Index: Makefile.in ================================================================== --- Makefile.in +++ Makefile.in @@ -33,11 +33,11 @@ TCC = ${CC} ${CFLAGS} -I. -I${TOP}/src -I${TOP}/ext/rtree -I${TOP}/ext/icu TCC += -I${TOP}/ext/fts3 -I${TOP}/ext/async -I${TOP}/ext/session TCC += -I${TOP}/ext/userauth # Define this for the autoconf-based build, so that the code knows it can -# include the generated config.h +# include the generated sqlite_cfg.h # TCC += -D_HAVE_SQLITE_CONFIG_H -DBUILD_sqlite # Define -DNDEBUG to compile without debugging (i.e., for production usage) # Omitting the define will cause extra debugging code to be inserted and @@ -138,10 +138,11 @@ GCOV_CFLAGS1 = -DSQLITE_COVERAGE_TEST=1 -fprofile-arcs -ftest-coverage GCOV_LDFLAGS1 = -lgcov USE_GCOV = @USE_GCOV@ LTCOMPILE_EXTRAS += $(GCOV_CFLAGS$(USE_GCOV)) LTLINK_EXTRAS += $(GCOV_LDFLAGS$(USE_GCOV)) +LTCOMPILE_EXTRAS += -DSQLITE_ENABLE_SHARED_SCHEMA # The directory into which to store package information for # Some standard variables and programs @@ -182,11 +183,11 @@ func.lo global.lo hash.lo \ icu.lo insert.lo json.lo legacy.lo loadext.lo \ main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ memdb.lo memjournal.lo \ mutex.lo mutex_noop.lo mutex_unix.lo mutex_w32.lo \ - notify.lo opcodes.lo os.lo os_unix.lo os_win.lo \ + notify.lo opcodes.lo os.lo os_kv.lo os_unix.lo os_win.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo \ sqlite3session.lo select.lo sqlite3rbu.lo status.lo stmt.lo \ table.lo threads.lo tokenize.lo treeview.lo trigger.lo \ update.lo userauth.lo upsert.lo util.lo vacuum.lo \ @@ -255,10 +256,11 @@ $(TOP)/src/notify.c \ $(TOP)/src/os.c \ $(TOP)/src/os.h \ $(TOP)/src/os_common.h \ $(TOP)/src/os_setup.h \ + $(TOP)/src/os_kv.c \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/os_win.h \ $(TOP)/src/pager.c \ $(TOP)/src/pager.h \ @@ -375,11 +377,11 @@ keywordhash.h \ opcodes.c \ opcodes.h \ parse.c \ parse.h \ - config.h \ + sqlite_cfg.h \ shell.c \ sqlite3.h # Source code to the test files. # @@ -417,10 +419,11 @@ $(TOP)/src/test_osinst.c \ $(TOP)/src/test_pcache.c \ $(TOP)/src/test_quota.c \ $(TOP)/src/test_rtree.c \ $(TOP)/src/test_schema.c \ + $(TOP)/src/test_schemapool.c \ $(TOP)/src/test_server.c \ $(TOP)/src/test_superlock.c \ $(TOP)/src/test_syscall.c \ $(TOP)/src/test_tclsh.c \ $(TOP)/src/test_tclvar.c \ @@ -431,20 +434,21 @@ $(TOP)/src/test_window.c \ $(TOP)/src/test_wsd.c \ $(TOP)/ext/fts3/fts3_term.c \ $(TOP)/ext/fts3/fts3_test.c \ $(TOP)/ext/session/test_session.c \ - $(TOP)/ext/session/sqlite3changebatch.c \ + $(TOP)/ext/recover/sqlite3recover.c \ + $(TOP)/ext/recover/dbdata.c \ + $(TOP)/ext/recover/test_recover.c \ $(TOP)/ext/rbu/test_rbu.c # Statically linked extensions # TESTSRC += \ $(TOP)/ext/expert/sqlite3expert.c \ $(TOP)/ext/expert/test_expert.c \ $(TOP)/ext/misc/amatch.c \ - $(TOP)/ext/misc/bgckpt.c \ $(TOP)/ext/misc/appendvfs.c \ $(TOP)/ext/misc/carray.c \ $(TOP)/ext/misc/cksumvfs.c \ $(TOP)/ext/misc/closure.c \ $(TOP)/ext/misc/csv.c \ @@ -480,10 +484,11 @@ $(TOP)/src/attach.c \ $(TOP)/src/backup.c \ $(TOP)/src/bitvec.c \ $(TOP)/src/btree.c \ $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ $(TOP)/src/ctime.c \ $(TOP)/src/date.c \ $(TOP)/src/dbpage.c \ $(TOP)/src/dbstat.c \ $(TOP)/src/expr.c \ @@ -492,10 +497,11 @@ $(TOP)/src/insert.c \ $(TOP)/src/wal.c \ $(TOP)/src/main.c \ $(TOP)/src/mem5.c \ $(TOP)/src/os.c \ + $(TOP)/src/os_kv.c \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/pager.c \ $(TOP)/src/pragma.c \ $(TOP)/src/prepare.c \ @@ -555,11 +561,11 @@ $(TOP)/src/sqliteLimit.h \ $(TOP)/src/vdbe.h \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vxworks.h \ $(TOP)/src/whereInt.h \ - config.h + sqlite_cfg.h # Header files used by extensions # EXTHDR += \ $(TOP)/ext/fts1/fts1.h \ @@ -621,22 +627,31 @@ SHELL_OPT += -DSQLITE_ENABLE_DBPAGE_VTAB SHELL_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB SHELL_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB SHELL_OPT += -DSQLITE_ENABLE_OFFSET_SQL_FUNC FUZZERSHELL_OPT = -FUZZCHECK_OPT = -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_OSS_FUZZ +FUZZCHECK_OPT += -I$(TOP)/test +FUZZCHECK_OPT += -I$(TOP)/ext/recover +FUZZCHECK_OPT += -DSQLITE_OMIT_LOAD_EXTENSION +FUZZCHECK_OPT += -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_OSS_FUZZ FUZZCHECK_OPT += -DSQLITE_MAX_MEMORY=50000000 FUZZCHECK_OPT += -DSQLITE_PRINTF_PRECISION_LIMIT=1000 FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS4 FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS3_PARENTHESIS FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS5 FUZZCHECK_OPT += -DSQLITE_ENABLE_RTREE FUZZCHECK_OPT += -DSQLITE_ENABLE_GEOPOLY FUZZCHECK_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB FUZZCHECK_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB -FUZZCHECK_SRC = $(TOP)/test/fuzzcheck.c $(TOP)/test/ossfuzz.c $(TOP)/test/fuzzinvariants.c +FUZZCHECK_SRC += $(TOP)/test/fuzzcheck.c +FUZZCHECK_SRC += $(TOP)/test/ossfuzz.c +FUZZCHECK_SRC += $(TOP)/test/fuzzinvariants.c +FUZZCHECK_SRC += $(TOP)/ext/recover/dbdata.c +FUZZCHECK_SRC += $(TOP)/ext/recover/sqlite3recover.c +FUZZCHECK_SRC += $(TOP)/test/vt02.c DBFUZZ_OPT = +ST_OPT = -DSQLITE_OS_KV_OPTIONAL # This is the default Makefile target. The objects listed here # are what get build when you type just "make" with no arguments. # all: sqlite3.h libsqlite3.la sqlite3$(TEXE) $(HAVE_TCL:1=libtclsqlite3.la) @@ -681,11 +696,11 @@ fuzzershell$(TEXE): $(TOP)/tool/fuzzershell.c sqlite3.c sqlite3.h $(LTLINK) -o $@ $(FUZZERSHELL_OPT) \ $(TOP)/tool/fuzzershell.c sqlite3.c $(TLIBS) -fuzzcheck$(TEXE): $(FUZZCHECK_SRC) sqlite3.c sqlite3.h +fuzzcheck$(TEXE): $(FUZZCHECK_SRC) sqlite3.c sqlite3.h $(FUZZCHECK_DEP) $(LTLINK) -o $@ $(FUZZCHECK_OPT) $(FUZZCHECK_SRC) sqlite3.c $(TLIBS) ossshell$(TEXE): $(TOP)/test/ossfuzz.c $(TOP)/test/ossshell.c sqlite3.c sqlite3.h $(LTLINK) -o $@ $(FUZZCHECK_OPT) $(TOP)/test/ossshell.c \ $(TOP)/test/ossfuzz.c sqlite3.c $(TLIBS) @@ -938,10 +953,13 @@ $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache1.c os.lo: $(TOP)/src/os.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os.c +os_kv.lo: $(TOP)/src/os_kv.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_kv.c + os_unix.lo: $(TOP)/src/os_unix.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_unix.c os_win.lo: $(TOP)/src/os_win.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_win.c @@ -1102,10 +1120,13 @@ $(TOP)/ext/misc/uint.c \ $(TOP)/ext/expert/sqlite3expert.c \ $(TOP)/ext/expert/sqlite3expert.h \ $(TOP)/ext/misc/zipfile.c \ $(TOP)/ext/misc/memtrace.c \ + $(TOP)/ext/recover/dbdata.c \ + $(TOP)/ext/recover/sqlite3recover.c \ + $(TOP)/ext/recover/sqlite3recover.h \ $(TOP)/src/test_windirent.c shell.c: $(SHELL_SRC) $(TOP)/tool/mkshellc.tcl $(TCLSH_CMD) $(TOP)/tool/mkshellc.tcl >shell.c @@ -1387,11 +1408,11 @@ $(LTLINK) -I. -o $@ $(TOP)/tool/logest.c wordcount$(TEXE): $(TOP)/test/wordcount.c sqlite3.lo $(LTLINK) -o $@ $(TOP)/test/wordcount.c sqlite3.lo $(TLIBS) -speedtest1$(TEXE): $(TOP)/test/speedtest1.c sqlite3.c +speedtest1$(TEXE): $(TOP)/test/speedtest1.c sqlite3.c Makefile $(LTLINK) $(ST_OPT) -o $@ $(TOP)/test/speedtest1.c sqlite3.c $(TLIBS) startup$(TEXE): $(TOP)/test/startup.c sqlite3.c $(CC) -Os -g -DSQLITE_THREADSAFE=0 -o $@ $(TOP)/test/startup.c sqlite3.c $(TLIBS) @@ -1501,11 +1522,11 @@ rm -f dbhash dbhash.exe rm -f fts5.* fts5parse.* rm -f threadtest5 distclean: clean - rm -f config.h config.log config.status libtool Makefile sqlite3.pc \ + rm -f sqlite_cfg.h config.log config.status libtool Makefile sqlite3.pc \ $(TESTPROGS) # # Windows section # @@ -1522,145 +1543,10 @@ sqlite3.dll: $(REAL_LIBOBJ) sqlite3.def $(TCC) -shared -o $@ sqlite3.def \ -Wl,"--strip-all" $(REAL_LIBOBJ) - -# -# fiddle/wasm section -# -# Maintenance reminder: we can/should move this into the wasm-specific -# GNU Make makefile, but we currently need it here for access to -# $(SHELL_OPT). The rest of the wasm-related bits are handled via GNU -# Make in ext/wasm/... -# -wasm_dir = ext/wasm -wasm_dir_abs = $(TOP)/ext/wasm -# ^^^ some emcc opts require absolute paths -fiddle_dir = $(wasm_dir)/fiddle -fiddle_dir_abs = $(TOP)/$(fiddle_dir) -fiddle_module_js = $(fiddle_dir)/fiddle-module.js -#emcc_opt = -O0 -#emcc_opt = -O1 -#emcc_opt = -O2 -#emcc_opt = -O3 -emcc_opt = -Oz -emcc_flags = $(emcc_opt) \ - -sALLOW_TABLE_GROWTH \ - -sABORTING_MALLOC \ - -sSTRICT_JS \ - -sENVIRONMENT=web \ - -sMODULARIZE \ - -sEXPORTED_RUNTIME_METHODS=@$(wasm_dir_abs)/EXPORTED_RUNTIME_METHODS.fiddle \ - -sDYNAMIC_EXECUTION=0 \ - --minify 0 \ - -I. $(SHELL_OPT) \ - -DSQLITE_THREADSAFE=0 -DSQLITE_OMIT_UTF16 -DSQLITE_OMIT_DEPRECATED -$(fiddle_module_js): Makefile sqlite3.c shell.c \ - $(wasm_dir)/EXPORTED_RUNTIME_METHODS.fiddle \ - $(wasm_dir)/EXPORTED_FUNCTIONS.fiddle - emcc -o $@ $(emcc_flags) \ - -sEXPORT_NAME=initFiddleModule \ - -sEXPORTED_FUNCTIONS=@$(wasm_dir_abs)/EXPORTED_FUNCTIONS.fiddle \ - -DSQLITE_SHELL_FIDDLE \ - sqlite3.c shell.c - gzip < $@ > $@.gz - gzip < $(fiddle_dir)/fiddle-module.wasm > $(fiddle_dir)/fiddle-module.wasm.gz -$(fiddle_dir)/fiddle.js.gz: $(fiddle_dir)/fiddle.js - gzip < $< > $@ - -fiddle_generated = $(fiddle_module_js) $(fiddle_module_js).gz \ - $(fiddle_dir)/fiddle-module.wasm \ - $(fiddle_dir)/fiddle-module.wasm.gz \ - $(fiddle_dir)/fiddle.js.gz - -clean-fiddle: - rm -f $(fiddle_generated) -clean: clean-fiddle -fiddle: $(fiddle_module_js) $(fiddle_dir)/fiddle.js.gz -wasm: fiddle -######################################################################## -# Explanation of the emcc build flags follows. Full docs for these can -# be found at: -# -# https://github.com/emscripten-core/emscripten/blob/main/src/settings.js -# -# -sENVIRONMENT=web: elides bootstrap code related to non-web JS -# environments like node.js. Removing this makes the output a tiny -# tick larger but hypothetically makes it more portable to -# non-browser JS environments. -# -# -sMODULARIZE: changes how the generated code is structured to avoid -# declaring a global Module object and instead installing a function -# which loads and initializes the module. The function is named... -# -# -sEXPORT_NAME=jsFunctionName (see -sMODULARIZE) -# -# -sEXPORTED_RUNTIME_METHODS=@/absolute/path/to/file: a file -# containing a list of emscripten-supplied APIs, one per line, which -# must be exported into the generated JS. Must be an absolute path! -# -# -sEXPORTED_FUNCTIONS=@/absolute/path/to/file: a file containing a -# list of C functions, one per line, which must be exported via wasm -# so they're visible to JS. C symbols names in that file must all -# start with an underscore for reasons known only to the emcc -# developers. e.g., _sqlite3_open_v2 and _sqlite3_finalize. Must be -# an absolute path! -# -# -sSTRICT_JS ensures that the emitted JS code includes the 'use -# strict' option. Note that -sSTRICT is more broadly-scoped and -# results in build errors. -# -# -sALLOW_TABLE_GROWTH is required for (at a minimum) the UDF-binding -# feature. Without it, JS functions cannot be made to proxy C-side -# callbacks. -# -# -sABORTING_MALLOC causes the JS-bound _malloc() to abort rather than -# return 0 on OOM. If set to 0 then all code which uses _malloc() -# must, just like in C, check the result before using it, else -# they're likely to corrupt the JS/WASM heap by writing to its -# address of 0. It is, as of this writing, enabled in Emscripten by -# default but we enable it explicitly in case that default changes. -# -# -sDYNAMIC_EXECUTION=0 disables eval() and the Function constructor. -# If the build runs without these, it's preferable to use this flag -# because certain execution environments disallow those constructs. -# This flag is not strictly necessary, however. -# -# -sWASM_BIGINT is UNTESTED but "should" allow the int64-using C APIs -# to work with JS/wasm, insofar as the JS environment supports the -# BigInt type. That support requires an extremely recent browser: -# Safari didn't get that support until late 2020. -# -# --no-entry: for compiling library code with no main(). If this is -# not supplied and the code has a main(), it is called as part of the -# module init process. Note that main() is #if'd out of shell.c -# (renamed) when building in wasm mode. -# -# --pre-js/--post-js=FILE relative or absolute paths to JS files to -# prepend/append to the emcc-generated bootstrapping JS. It's -# easier/faster to develop with separate JS files (reduces rebuilding -# requirements) but certain configurations, namely -sMODULARIZE, may -# require using at least a --pre-js file. They can be used -# individually and need not be paired. -# -# -O0..-O3 and -Oz: optimization levels affect not only C-style -# optimization but whether or not the resulting generated JS code -# gets minified. -O0 compiles _much_ more quickly than -O3 or -Oz, -# and doesn't minimize any JS code, so is recommended for -# development. -O3 or -Oz are recommended for deployment, but -# primarily because -Oz will shrink the wasm file notably. JS-side -# minification makes little difference in terms of overall -# distributable size. -# -# --minify 0: disables minification of the generated JS code, -# regardless of optimization level. Minification of the JS has -# minimal overall effect in the larger scheme of things and results -# in JS files which can neither be edited nor viewed as text files in -# Fossil (which flags them as binary because of their extreme line -# lengths). Interestingly, whether or not the comments in the -# generated JS file get stripped is unaffected by this setting and -# depends entirely on the optimization level. Higher optimization -# levels reduce the size of the JS considerably even without -# minification. -# -######################################################################## +# +# Fiddle app +# +fiddle: sqlite3.c shell.c + make -C ext/wasm fiddle emcc_opt=-Os Index: Makefile.msc ================================================================== --- Makefile.msc +++ Makefile.msc @@ -1249,11 +1249,11 @@ func.lo global.lo hash.lo \ icu.lo insert.lo json.lo legacy.lo loadext.lo \ main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ memdb.lo memjournal.lo \ mutex.lo mutex_noop.lo mutex_unix.lo mutex_w32.lo \ - notify.lo opcodes.lo os.lo os_unix.lo os_win.lo \ + notify.lo opcodes.lo os.lo os_kv.lo os_unix.lo os_win.lo \ pager.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo \ sqlite3session.lo select.lo sqlite3rbu.lo status.lo stmt.lo \ table.lo threads.lo tokenize.lo treeview.lo trigger.lo \ update.lo upsert.lo util.lo vacuum.lo \ @@ -1330,10 +1330,11 @@ $(TOP)\src\mutex_noop.c \ $(TOP)\src\mutex_unix.c \ $(TOP)\src\mutex_w32.c \ $(TOP)\src\notify.c \ $(TOP)\src\os.c \ + $(TOP)\src\os_kv.c \ $(TOP)\src\os_unix.c \ $(TOP)\src\os_win.c # Core source code files, part 2. # @@ -1535,10 +1536,11 @@ $(TOP)\src\test_osinst.c \ $(TOP)\src\test_pcache.c \ $(TOP)\src\test_quota.c \ $(TOP)\src\test_rtree.c \ $(TOP)\src\test_schema.c \ + $(TOP)\src\test_schemapool.c \ $(TOP)\src\test_server.c \ $(TOP)\src\test_superlock.c \ $(TOP)\src\test_syscall.c \ $(TOP)\src\test_tclsh.c \ $(TOP)\src\test_tclvar.c \ @@ -1558,11 +1560,10 @@ TESTEXT = \ $(TOP)\ext\expert\sqlite3expert.c \ $(TOP)\ext\expert\test_expert.c \ $(TOP)\ext\misc\amatch.c \ $(TOP)\ext\misc\appendvfs.c \ - $(TOP)\ext\misc\bgckpt.c \ $(TOP)\ext\misc\carray.c \ $(TOP)\ext\misc\cksumvfs.c \ $(TOP)\ext\misc\closure.c \ $(TOP)\ext\misc\csv.c \ $(TOP)\ext\misc\decimal.c \ @@ -1586,10 +1587,13 @@ $(TOP)\ext\misc\spellfix.c \ $(TOP)\ext\misc\totype.c \ $(TOP)\ext\misc\unionvtab.c \ $(TOP)\ext\misc\wholenumber.c \ $(TOP)\ext\rtree\test_rtreedoc.c \ + $(TOP)\ext\recover\sqlite3recover.c \ + $(TOP)\ext\recover\test_recover.c \ + $(TOP)\ext\recover\dbdata.c \ fts5.c # If use of zlib is enabled, add the "zipfile.c" source file. # !IF $(USE_ZLIB)!=0 @@ -1696,19 +1700,29 @@ # <> # Extra compiler options for various test tools. # MPTESTER_COMPILE_OPTS = -DSQLITE_ENABLE_FTS5 FUZZERSHELL_COMPILE_OPTS = -FUZZCHECK_OPTS = -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_OSS_FUZZ -DSQLITE_MAX_MEMORY=50000000 -DSQLITE_PRINTF_PRECISION_LIMIT=1000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -I$(TOP)\test -I$(TOP)\ext\recover +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_MEMSYS5 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OSS_FUZZ +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MEMORY=50000000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRINTF_PRECISION_LIMIT=1000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OMIT_LOAD_EXTENSION FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS4 FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS5 FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_RTREE FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_GEOPOLY FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_DBSTAT_VTAB FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_BYTECODE_VTAB +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\fuzzcheck.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\ossfuzz.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\fuzzinvariants.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\vt02.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\dbdata.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\sqlite3recover.c -FUZZCHECK_SRC = $(TOP)\test\fuzzcheck.c $(TOP)\test\ossfuzz.c $(TOP)\test\fuzzinvariants.c OSSSHELL_SRC = $(TOP)\test\ossshell.c $(TOP)\test\ossfuzz.c DBFUZZ_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_OMIT_LOAD_EXTENSION KV_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_DIRECT_OVERFLOW_READ ST_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 @@ -2052,10 +2066,13 @@ $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\pcache1.c os.lo: $(TOP)\src\os.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os.c +os_kv.lo: $(TOP)\src\os_kv.c $(HDR) + $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os_kv.c + os_unix.lo: $(TOP)\src\os_unix.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os_unix.c os_win.lo: $(TOP)\src\os_win.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os_win.c @@ -2223,10 +2240,13 @@ $(TOP)\ext\misc\shathree.c \ $(TOP)\ext\misc\uint.c \ $(TOP)\ext\expert\sqlite3expert.c \ $(TOP)\ext\expert\sqlite3expert.h \ $(TOP)\ext\misc\memtrace.c \ + $(TOP)/ext/recover/dbdata.c \ + $(TOP)/ext/recover/sqlite3recover.c \ + $(TOP)/ext/recover/sqlite3recover.h \ $(TOP)\src\test_windirent.c # If use of zlib is enabled, add the "zipfile.c" source file. # !IF $(USE_ZLIB)!=0 Index: VERSION ================================================================== --- VERSION +++ VERSION @@ -1,1 +1,1 @@ -3.40.0 +3.40.1 DELETED config.h.in Index: config.h.in ================================================================== --- config.h.in +++ /dev/null @@ -1,131 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if you have the header file. */ -#undef HAVE_DLFCN_H - -/* Define to 1 if you have the `fdatasync' function. */ -#undef HAVE_FDATASYNC - -/* Define to 1 if you have the `gmtime_r' function. */ -#undef HAVE_GMTIME_R - -/* Define to 1 if the system has the type `int16_t'. */ -#undef HAVE_INT16_T - -/* Define to 1 if the system has the type `int32_t'. */ -#undef HAVE_INT32_T - -/* Define to 1 if the system has the type `int64_t'. */ -#undef HAVE_INT64_T - -/* Define to 1 if the system has the type `int8_t'. */ -#undef HAVE_INT8_T - -/* Define to 1 if the system has the type `intptr_t'. */ -#undef HAVE_INTPTR_T - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the `isnan' function. */ -#undef HAVE_ISNAN - -/* Define to 1 if you have the `localtime_r' function. */ -#undef HAVE_LOCALTIME_R - -/* Define to 1 if you have the `localtime_s' function. */ -#undef HAVE_LOCALTIME_S - -/* Define to 1 if you have the header file. */ -#undef HAVE_MALLOC_H - -/* Define to 1 if you have the `malloc_usable_size' function. */ -#undef HAVE_MALLOC_USABLE_SIZE - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the pread() function. */ -#undef HAVE_PREAD - -/* Define to 1 if you have the pread64() function. */ -#undef HAVE_PREAD64 - -/* Define to 1 if you have the pwrite() function. */ -#undef HAVE_PWRITE - -/* Define to 1 if you have the pwrite64() function. */ -#undef HAVE_PWRITE64 - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the strchrnul() function */ -#undef HAVE_STRCHRNUL - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if the system has the type `uint16_t'. */ -#undef HAVE_UINT16_T - -/* Define to 1 if the system has the type `uint32_t'. */ -#undef HAVE_UINT32_T - -/* Define to 1 if the system has the type `uint64_t'. */ -#undef HAVE_UINT64_T - -/* Define to 1 if the system has the type `uint8_t'. */ -#undef HAVE_UINT8_T - -/* Define to 1 if the system has the type `uintptr_t'. */ -#undef HAVE_UINTPTR_T - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to 1 if you have the `usleep' function. */ -#undef HAVE_USLEEP - -/* Define to 1 if you have the utime() library function. */ -#undef HAVE_UTIME - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#undef LT_OBJDIR - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Number of bits in a file offset, on hosts where this is settable. */ -#undef _FILE_OFFSET_BITS - -/* Define for large files, on AIX-style hosts. */ -#undef _LARGE_FILES Index: configure ================================================================== --- configure +++ configure @@ -1,8 +1,8 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for sqlite 3.40.0. +# Generated by GNU Autoconf 2.69 for sqlite 3.40.1. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # @@ -724,12 +724,12 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='sqlite' PACKAGE_TARNAME='sqlite' -PACKAGE_VERSION='3.40.0' -PACKAGE_STRING='sqlite 3.40.0' +PACKAGE_VERSION='3.40.1' +PACKAGE_STRING='sqlite 3.40.1' PACKAGE_BUGREPORT='' PACKAGE_URL='' # Factoring default headers for most tests. ac_includes_default="\ @@ -1466,11 +1466,11 @@ # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures sqlite 3.40.0 to adapt to many kinds of systems. +\`configure' configures sqlite 3.40.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. @@ -1531,11 +1531,11 @@ _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of sqlite 3.40.0:";; + short | recursive ) echo "Configuration of sqlite 3.40.1:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options @@ -1659,11 +1659,11 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -sqlite configure 3.40.0 +sqlite configure 3.40.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. @@ -2078,11 +2078,11 @@ } # ac_fn_c_check_header_mongrel cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by sqlite $as_me 3.40.0, which was +It was created by sqlite $as_me 3.40.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF @@ -11873,11 +11873,11 @@ fi ######### # Output the config header -ac_config_headers="$ac_config_headers config.h" +ac_config_headers="$ac_config_headers sqlite_cfg.h" ######### # Generate the output files. # @@ -12388,11 +12388,11 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by sqlite $as_me 3.40.0, which was +This file was extended by sqlite $as_me 3.40.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS @@ -12454,11 +12454,11 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -sqlite config.status 3.40.0 +sqlite config.status 3.40.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation @@ -12836,11 +12836,11 @@ # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + "sqlite_cfg.h") CONFIG_HEADERS="$CONFIG_HEADERS sqlite_cfg.h" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac Index: configure.ac ================================================================== --- configure.ac +++ configure.ac @@ -804,11 +804,11 @@ fi AC_SUBST(AMALGAMATION_LINE_MACROS) ######### # Output the config header -AC_CONFIG_HEADERS(config.h) +AC_CONFIG_HEADERS(sqlite_cfg.h) ######### # Generate the output files. # AC_SUBST(BUILD_CFLAGS) DELETED doc/begin_concurrent.md Index: doc/begin_concurrent.md ================================================================== --- doc/begin_concurrent.md +++ /dev/null @@ -1,107 +0,0 @@ - -Begin Concurrent -================ - -## Overview - -Usually, SQLite allows at most one writer to proceed concurrently. The -BEGIN CONCURRENT enhancement allows multiple writers to process write -transactions simultanously if the database is in "wal" or "wal2" mode, -although the system still serializes COMMIT commands. - -When a write-transaction is opened with "BEGIN CONCURRENT", actually -locking the database is deferred until a COMMIT is executed. This means -that any number of transactions started with BEGIN CONCURRENT may proceed -concurrently. The system uses optimistic page-level-locking to prevent -conflicting concurrent transactions from being committed. - -When a BEGIN CONCURRENT transaction is committed, the system checks whether -or not any of the database pages that the transaction has read have been -modified since the BEGIN CONCURRENT was opened. In other words - it asks -if the transaction being committed operates on a different set of data than -all other concurrently executing transactions. If the answer is "yes, this -transaction did not read or modify any data modified by any concurrent -transaction", then the transaction is committed as normal. Otherwise, if the -transaction does conflict, it cannot be committed and an SQLITE_BUSY_SNAPSHOT -error is returned. At this point, all the client can do is ROLLBACK the -transaction. - -If SQLITE_BUSY_SNAPSHOT is returned, messages are output via the sqlite3_log -mechanism indicating the page and table or index on which the conflict -occurred. This can be useful when optimizing concurrency. - -## Application Programming Notes - -In order to serialize COMMIT processing, SQLite takes a lock on the database -as part of each COMMIT command and releases it before returning. At most one -writer may hold this lock at any one time. If a writer cannot obtain the lock, -it uses SQLite's busy-handler to pause and retry for a while: - - - https://www.sqlite.org/c3ref/busy_handler.html - - -If there is significant contention for the writer lock, this mechanism can be -inefficient. In this case it is better for the application to use a mutex or -some other mechanism that supports blocking to ensure that at most one writer -is attempting to COMMIT a BEGIN CONCURRENT transaction at a time. This is -usually easier if all writers are part of the same operating system process. - -If all database clients (readers and writers) are located in the same OS -process, and if that OS is a Unix variant, then it can be more efficient to -the built-in VFS "unix-excl" instead of the default "unix". This is because it -uses more efficient locking primitives. - -The key to maximizing concurrency using BEGIN CONCURRENT is to ensure that -there are a large number of non-conflicting transactions. In SQLite, each -table and each index is stored as a separate b-tree, each of which is -distributed over a discrete set of database pages. This means that: - - * Two transactions that write to different sets of tables never - conflict, and that - - * Two transactions that write to the same tables or indexes only - conflict if the values of the keys (either primary keys or indexed - rows) are fairly close together. For example, given a large - table with the schema: - -
     CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB);
- - writing two rows with adjacent values for "a" probably will cause a - conflict (as the two keys are stored on the same page), but writing two - rows with vastly different values for "a" will not (as the keys will likly - be stored on different pages). - -Note that, in SQLite, if values are not explicitly supplied for an INTEGER -PRIMARY KEY, as for example in: - -> - INSERT INTO t1(b) VALUES(<blob-value>); - -then monotonically increasing values are assigned automatically. This is -terrible for concurrency, as it all but ensures that all new rows are -added to the same database page. In such situations, it is better to -explicitly assign random values to INTEGER PRIMARY KEY fields. - -This problem also comes up for non-WITHOUT ROWID tables that do not have an -explicit INTEGER PRIMARY KEY column. In these cases each table has an implicit -INTEGER PRIMARY KEY column that is assigned increasing values, leading to the -same problem as omitting to assign a value to an explicit INTEGER PRIMARY KEY -column. - -For both explicit and implicit INTEGER PRIMARY KEYs, it is possible to have -SQLite assign values at random (instead of the monotonically increasing -values) by writing a row with a rowid equal to the largest possible signed -64-bit integer to the table. For example: - - INSERT INTO t1(a) VALUES(9223372036854775807); - -Applications should take care not to malfunction due to the presence of such -rows. - -The nature of some types of indexes, for example indexes on timestamp fields, -can also cause problems (as concurrent transactions may assign similar -timestamps that will be stored on the same db page to new records). In these -cases the database schema may need to be rethought to increase the concurrency -provided by page-level-locking. - ADDED doc/shared_schema.md Index: doc/shared_schema.md ================================================================== --- /dev/null +++ doc/shared_schema.md @@ -0,0 +1,142 @@ + +Shared-Schema Mode Notes +======================== + +The [reuse-schema](/timeline?r=reuse-schema) branch contains changes +to allow SQLite connections to share schemas +between database connections within the same process in order to save memory. +Schemas may be shared between multiple databases attached to the same or +distinct connection handles. + +Compile with -DSQLITE\_ENABLE\_SHARED\_SCHEMA in order to enable the +shared-schema enhancement. Enabling the shared-schema enhancement causes +approximately a 0.1% increase in CPU cycles consumed and about a 3000-byte +increase in the size of the library, even if shared-schema is never used. + +Assuming the compile-time requirements are satisfied, the shared-schema +feature is engaged by opening the database connection using the +sqlite3_open_v2() API with the SQLITE_OPEN_SHARED_SCHEMA +flag specified. The main database and any attached databases will then share +an in-memory Schema object with any other database opened within the process +for which: + + * the contents of the sqlite_master table, including all object names, + SQL statements and root pages are identical, and + * have the same values for the schema-cookie. + +Temp databases (those populated with "CREATE TEMP TABLE" and similar +statements) never share schemas. + +Connections opened with the SQLITE_OPEN_SHARED_SCHEMA flag +specified may not modify any database schema except that belonging to the +temp database in anyway. This includes creating or dropping database +objects, vacuuming the database, or running ANALYZE when the +sqlite_stat\[14\] tables do not exist. + +For SQLITE_OPEN_SHARED_SCHEMA connections, the +SQLITE_DBSTATUS_SCHEMA_USED sqlite3_db_status() verb +distributes the memory used for a shared schema object evenly between all +database connections that share it. + +## The ".shared-schema" Command + +The shell tool on this branch contains a special dot-command to help with +managing databases. The ".shared-schema" dot-command can be used to test +whether or not two databases are similar enough to share in-memory schemas, +and to fix minor problems that prevent them from doing so. To test if +two or more database are compatible, one database is opened directly using +the shell tool and the following command issued: + + .shared-schema check []... + +where <database-1> etc. are replaced with the names of database files +on disk. For each database specified on the command line, a single line of +output is produced. If the database can share an in-memory schema with the +main database opened by the shell tool, the output is of the form: + + is compatible + +Otherwise, if the database cannot share a schema with the main db, the output +is of the form: + + is NOT compatible () + +where <reason> indicates the cause of the incompatibility. <reason> +is always one of the following. + +
    +
  • objects - the databases contain a different set schema objects + (tables, indexes, views and triggers). + +
  • SQL - the databases contain the same set of objects, but the SQL + statements used to create them were not the same. + +
  • root pages - the databases contain the same set of objects created + by the same SQL statements, but the root pages are not the same. + +
  • order of sqlite_master rows - the databases contain the same + set of objects created by the same SQL statements with the same root pages, + but the order of the rows in the sqlite_master tables are different. + +
  • schema cookie - the database schemas are compatible, but the + schema cookie values ("PRAGMA schema_version") are different. +
+ +The final three problems in the list above can be fixed using the +.shared-schema command. To modify such a database so that it can share a +schema with the main database, the following shell command is used: + + .shared-schema fix []... + +If a database can be modified so that it may share a schema with the main +database opened by the shell tool, output is as follows: + + Fixing ... is compatible + +If a database does not require modification, or cannot be modified such that +it can share a schema with the main database, the output of "fix" is identical +to that of the "check" command. + +## Implementation Notes + +A single Schema object is never used by more than one database simultaneously, +regardless of whether or not those databases are attached to the same or +different database handles. Instead, a pool of schema objects is maintained +for each unique sqlite_master-contents/schema-cookie combination +opened within the process. Each time database schemas are required by a +connection, for example as part of an sqlite3_prepare\*(), +sqlite3_blob_open() or sqlite3_blob_open() call, it obtains +the minimum number of schemas required from the various schema-pools, returning +them at the end of the call. This means that a single schema-pool only ever +contains more than one copy of the schema if: + + * Two threads require schemas from the same pool at the same time, or + * A single sqlite3_prepare\*() call requires schemas for two or more + attached databases that use the same schema-pool. + +The size of a schema-pool never shrinks. Each schema pool always maintains +a number of schema objects equal to the highwater mark of schema objects +simultaneously required by clients. + +This approach is preferred to allowing multiple databases to use the same +Schema object simultaneously for three reasons: + + * The Schema object is not completely read-only. For example, the + Index.zIdxAff string is allocated lazily. + * Throughout the statement compiler, SQLite uses variables like + Table.pSchema and Index.pSchema with the sqlite3SchemaToIndex() routine + in order to determine which attached database a Table or Index object + resides in. This mechanism does not work if the same Schema may be + used by two or more attached databases. + * It may be easier to modify this approach in order to allow + SQLITE_OPEN_SHARED_SCHEMA connections to modify database + schemas, should that be required. + +SQLITE_OPEN_SHARED_SCHEMA connections do not store their +virtual-table handles in the Table.pVTable list of each table. This would not +work, as (a) there is no guarantee that a connection will be assigned the same +Schema object each time it requests one from a schema-pool and (b) a single +Schema (and therefore Table) object may correspond to tables in two or more +databases attached to a single connection. Instead, all virtual-table handles +associated with a single database are stored in a linked-list headed at +Db.pVTable. DELETED doc/wal2.md Index: doc/wal2.md ================================================================== --- doc/wal2.md +++ /dev/null @@ -1,98 +0,0 @@ - -Wal2 Mode Notes -=============== - -## Activating/Deactivating Wal2 Mode - -"Wal2" mode is very similar to "wal" mode. To change a database to wal2 mode, -use the command: - -> - PRAGMA journal_mode = wal2; - -It is not possible to change a database directly from "wal" mode to "wal2" -mode. Instead, it must first be changed to rollback mode. So, to change a wal -mode database to wal2 mode, the following two commands may be used: - -> - PRAGMA journal_mode = delete; - PRAGMA journal_mode = wal2; - -A database in wal2 mode may only be accessed by versions of SQLite compiled -from this branch. Attempting to use any other version of SQLite results in an -SQLITE_NOTADB error. A wal2 mode database may be changed back to rollback mode -(making it accessible by all versions of SQLite) using: - -> - PRAGMA journal_mode = delete; - -## The Advantage of Wal2 Mode - -In legacy wal mode, when a writer writes data to the database, it doesn't -modify the database file directly. Instead, it appends new data to the -"<database>-wal" file. Readers read data from both the original database -file and the "<database>-wal" file. At some point, data is copied from the -"<database>-wal" file into the database file, after which the wal file can -be deleted or overwritten. Copying data from the wal file into the database -file is called a "checkpoint", and may be done explictly (either by "PRAGMA -wal_checkpoint" or sqlite3_wal_checkpoint_v2()), or -automatically (by configuring "PRAGMA wal_autocheckpoint" - this is the -default). - -Checkpointers do not block writers, and writers do not block checkpointers. -However, if a writer writes to the database while a checkpoint is ongoing, -then the new data is appended to the end of the wal file. This means that, -even following the checkpoint, the wal file cannot be overwritten or deleted, -and so all subsequent transactions must also be appended to the wal file. The -work of the checkpointer is not wasted - SQLite remembers which parts of the -wal file have already been copied into the db file so that the next checkpoint -does not have to do so again - but it does mean that the wal file may grow -indefinitely if the checkpointer never gets a chance to finish without a -writer appending to the wal file. There are also circumstances in which -long-running readers may prevent a checkpointer from checkpointing the entire -wal file - also causing the wal file to grow indefinitely in a busy system. - -Wal2 mode does not have this problem. In wal2 mode, wal files do not grow -indefinitely even if the checkpointer never has a chance to finish -uninterrupted. - -In wal2 mode, the system uses two wal files instead of one. The files are named -"<database>-wal" and "<database>-wal2", where "<database>" is of -course the name of the database file. When data is written to the database, the -writer begins by appending the new data to the first wal file. Once the first -wal file has grown large enough, writers switch to appending data to the second -wal file. At this point the first wal file can be checkpointed (after which it -can be overwritten). Then, once the second wal file has grown large enough and -the first wal file has been checkpointed, writers switch back to the first wal -file. And so on. - -## Application Programming - -From the point of view of the user, the main differences between wal and -wal2 mode are to do with checkpointing: - - * In wal mode, a checkpoint may be attempted at any time. In wal2 - mode, the checkpointer has to wait until writers have switched - to the "other" wal file before a checkpoint can take place. - - * In wal mode, the wal-hook (callback registered using - sqlite3_wal_hook()) is invoked after a transaction is committed - with the total number of pages in the wal file as an argument. In wal2 - mode, the argument is either the total number of uncheckpointed pages in - both wal files, or - if the "other" wal file is empty or already - checkpointed - 0. - -Clients are recommended to use the same strategies for checkpointing wal2 mode -databases as for wal databases - by registering a wal-hook using -sqlite3_wal_hook() and attempting a checkpoint when the parameter -exceeds a certain threshold. - -However, it should be noted that although the wal-hook is invoked after each -transaction is committed to disk and database locks released, it is still -invoked from within the sqlite3_step() call used to execute the "COMMIT" -command. In BEGIN CONCURRENT systems, where the "COMMIT" is often protected by -an application mutex, this may reduce concurrency. In such systems, instead of -executing a checkpoint from within the wal-hook, a thread might defer this -action until after the application mutex has been released. - - Index: ext/fts5/fts5_index.c ================================================================== --- ext/fts5/fts5_index.c +++ ext/fts5/fts5_index.c @@ -6276,10 +6276,11 @@ i64 iRowid = fts5MultiIterRowid(pIter); char *z = (char*)fts5MultiIterTerm(pIter, &n); /* If this is a new term, query for it. Update cksum3 with the results. */ fts5TestTerm(p, &term, z, n, cksum2, &cksum3); + if( p->rc ) break; if( eDetail==FTS5_DETAIL_NONE ){ if( 0==fts5MultiIterIsEmpty(p, pIter) ){ cksum2 ^= sqlite3Fts5IndexEntryCksum(iRowid, 0, 0, -1, z, n); } Index: ext/fts5/fts5_main.c ================================================================== --- ext/fts5/fts5_main.c +++ ext/fts5/fts5_main.c @@ -258,11 +258,11 @@ p->ts.eState = 1; p->ts.iSavepoint = -1; break; case FTS5_SYNC: - assert( p->ts.eState==1 ); + assert( p->ts.eState==1 || p->ts.eState==2 ); p->ts.eState = 2; break; case FTS5_COMMIT: assert( p->ts.eState==2 ); @@ -273,25 +273,25 @@ assert( p->ts.eState==1 || p->ts.eState==2 || p->ts.eState==0 ); p->ts.eState = 0; break; case FTS5_SAVEPOINT: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint>=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint; break; case FTS5_RELEASE: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint<=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint-1; break; case FTS5_ROLLBACKTO: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=-1 ); /* The following assert() can fail if another vtab strikes an error ** within an xSavepoint() call then SQLite calls xRollbackTo() - without ** having called xSavepoint() on this vtab. */ /* assert( iSavepoint<=p->ts.iSavepoint ); */ @@ -1623,11 +1623,11 @@ Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ /* A transaction must be open when this is called. */ - assert( pTab->ts.eState==1 ); + assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); assert( pVtab->zErrMsg==0 ); assert( nArg==1 || nArg==(2+pConfig->nCol+2) ); assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER || sqlite3_value_type(apVal[0])==SQLITE_NULL @@ -1730,11 +1730,10 @@ } } } } - sqlite3Fts5IndexCloseReader(pTab->p.pIndex); pTab->p.pConfig->pzErrmsg = 0; return rc; } /* DELETED ext/fts5/test/fts5concurrent.test Index: ext/fts5/test/fts5concurrent.test ================================================================== --- ext/fts5/test/fts5concurrent.test +++ /dev/null @@ -1,55 +0,0 @@ -# 2022 May 09 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#************************************************************************* -# This file implements regression tests for SQLite library. The -# focus of this script is testing the FTS5 module. -# - -source [file join [file dirname [info script]] fts5_common.tcl] -set testprefix fts5concurrent - -# If SQLITE_ENABLE_FTS5 is not defined, omit this file. -ifcapable !fts5 { - finish_test - return -} - -do_execsql_test 1.0 { - CREATE VIRTUAL TABLE ft USING fts5(line, tokenize=trigram); -} - -do_execsql_test 1.1 { - BEGIN CONCURRENT; - INSERT INTO ft VALUES( hex(randomblob(50)) ); - COMMIT -} {} - -do_execsql_test 1.2 { - BEGIN CONCURRENT; - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<50 - ) - INSERT INTO ft SELECT hex(randomblob(50)) FROM s; - - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<50 - ) - INSERT INTO ft SELECT hex(randomblob(50)) FROM s; - - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<50 - ) - INSERT INTO ft SELECT hex(randomblob(50)) FROM s; - COMMIT; -} - - -finish_test - Index: ext/fts5/test/fts5misc.test ================================================================== --- ext/fts5/test/fts5misc.test +++ ext/fts5/test/fts5misc.test @@ -349,8 +349,99 @@ } {SQLITE_OK} do_test 13.3 { sqlite3_errmsg db } {not an error} + +#------------------------------------------------------------------------- +reset_db +db close +sqlite3 db test.db -uri 1 + +do_execsql_test 14.0 { + PRAGMA locking_mode=EXCLUSIVE; + BEGIN; + ATTACH 'file:/one?vfs=memdb' AS aux1; + ATTACH 'file:/one?vfs=memdb' AS aux2; + CREATE VIRTUAL TABLE t1 USING fts5(x); +} {exclusive} +do_catchsql_test 14.1 { + ANALYZE; +} {1 {database is locked}} +do_catchsql_test 14.2 { + COMMIT; +} {1 {database is locked}} +do_catchsql_test 14.3 { + COMMIT; +} {1 {database is locked}} +do_catchsql_test 14.4 { + ROLLBACK; +} {0 {}} + +#------------------------------------------------------------------------- +reset_db +sqlite3 db2 test.db + +do_execsql_test 15.0 { + CREATE TABLE t1(a, b); + BEGIN; + SELECT * FROM t1; +} + +do_execsql_test -db db2 15.1 { + BEGIN; + CREATE VIRTUAL TABLE x1 USING fts5(y); +} +do_test 15.2 { + list [catch { db2 eval COMMIT } msg] $msg +} {1 {database is locked}} +do_execsql_test -db db2 15.3 { + SAVEPOINT one; +} {} +do_execsql_test 15.4 END +do_test 15.4 { + list [catch { db2 eval COMMIT } msg] $msg +} {0 {}} + +#------------------------------------------------------------------------- +reset_db +forcedelete test.db2 +sqlite3 db2 test.db +do_execsql_test 16.0 { + + ATTACH 'test.db2' AS aux; + CREATE TABLE aux.t2(x,y); + INSERT INTO t2 VALUES(1, 2); + CREATE VIRTUAL TABLE x1 USING fts5(a); + BEGIN; + INSERT INTO x1 VALUES('abc'); + INSERT INTO t2 VALUES(3, 4); +} + +do_execsql_test -db db2 16.1 { + ATTACH 'test.db2' AS aux; + BEGIN; + SELECT * FROM t2 +} {1 2} + +do_catchsql_test 16.2 { + COMMIT; +} {1 {database is locked}} + +do_execsql_test 16.3 { + INSERT INTO x1 VALUES('def'); +} + +do_execsql_test -db db2 16.4 { + END +} + +do_execsql_test 16.5 { + COMMIT +} + +do_execsql_test -db db2 16.6 { + SELECT * FROM x1 +} {abc def} finish_test DELETED ext/misc/bgckpt.c Index: ext/misc/bgckpt.c ================================================================== --- ext/misc/bgckpt.c +++ /dev/null @@ -1,244 +0,0 @@ -/* -** 2017-10-11 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -*/ - -#if !defined(SQLITE_TEST) || defined(SQLITE_OS_UNIX) - -#include "sqlite3.h" -#include -#include - -/* -** API declarations. -*/ -typedef struct Checkpointer Checkpointer; -int sqlite3_bgckpt_create(const char *zFilename, Checkpointer **pp); -int sqlite3_bgckpt_checkpoint(Checkpointer *p, int bBlock); -void sqlite3_bgckpt_destroy(Checkpointer *p); - - -struct Checkpointer { - sqlite3 *db; /* Database handle */ - - pthread_t thread; /* Background thread */ - pthread_mutex_t mutex; - pthread_cond_t cond; - - int rc; /* Error from "PRAGMA wal_checkpoint" */ - int bCkpt; /* True if checkpoint requested */ - int bExit; /* True if exit requested */ -}; - -static void *bgckptThreadMain(void *pCtx){ - int rc = SQLITE_OK; - Checkpointer *p = (Checkpointer*)pCtx; - - while( rc==SQLITE_OK ){ - int bExit; - - pthread_mutex_lock(&p->mutex); - if( p->bCkpt==0 && p->bExit==0 ){ - pthread_cond_wait(&p->cond, &p->mutex); - } - p->bCkpt = 0; - bExit = p->bExit; - pthread_mutex_unlock(&p->mutex); - - if( bExit ) break; - rc = sqlite3_exec(p->db, "PRAGMA wal_checkpoint", 0, 0, 0); - if( rc==SQLITE_BUSY ){ - rc = SQLITE_OK; - } - } - - pthread_mutex_lock(&p->mutex); - p->rc = rc; - pthread_mutex_unlock(&p->mutex); - return 0; -} - -void sqlite3_bgckpt_destroy(Checkpointer *p){ - if( p ){ - void *ret = 0; - - /* Signal the background thread to exit */ - pthread_mutex_lock(&p->mutex); - p->bExit = 1; - pthread_cond_broadcast(&p->cond); - pthread_mutex_unlock(&p->mutex); - - pthread_join(p->thread, &ret); - sqlite3_close(p->db); - sqlite3_free(p); - } -} - - -int sqlite3_bgckpt_create(const char *zFilename, Checkpointer **pp){ - Checkpointer *pNew = 0; - int rc; - - pNew = (Checkpointer*)sqlite3_malloc(sizeof(Checkpointer)); - if( pNew==0 ){ - rc = SQLITE_NOMEM; - }else{ - memset(pNew, 0, sizeof(Checkpointer)); - rc = sqlite3_open(zFilename, &pNew->db); - } - - if( rc==SQLITE_OK ){ - pthread_mutex_init(&pNew->mutex, 0); - pthread_cond_init(&pNew->cond, 0); - pthread_create(&pNew->thread, 0, bgckptThreadMain, (void*)pNew); - } - - if( rc!=SQLITE_OK ){ - sqlite3_bgckpt_destroy(pNew); - pNew = 0; - } - *pp = pNew; - return rc; -} - -int sqlite3_bgckpt_checkpoint(Checkpointer *p, int bBlock){ - int rc; - pthread_mutex_lock(&p->mutex); - rc = p->rc; - if( rc==SQLITE_OK ){ - p->bCkpt = 1; - pthread_cond_broadcast(&p->cond); - } - pthread_mutex_unlock(&p->mutex); - return rc; -} - -#ifdef SQLITE_TEST - -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -# ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -# endif -#endif - -const char *sqlite3ErrName(int rc); - -static void SQLITE_TCLAPI bgckpt_del(void * clientData){ - Checkpointer *pCkpt = (Checkpointer*)clientData; - sqlite3_bgckpt_destroy(pCkpt); -} - -/* -** Tclcmd: $ckpt SUBCMD ... -*/ -static int SQLITE_TCLAPI bgckpt_obj_cmd( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - Checkpointer *pCkpt = (Checkpointer*)clientData; - const char *aCmd[] = { "checkpoint", "destroy", 0 }; - int iCmd; - - if( objc<2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "SUBCMD ..."); - return TCL_ERROR; - } - - if( Tcl_GetIndexFromObj(interp, objv[1], aCmd, "sub-command", 0, &iCmd) ){ - return TCL_ERROR; - } - - switch( iCmd ){ - case 0: { - int rc; - int bBlock = 0; - - if( objc>3 ){ - Tcl_WrongNumArgs(interp, 2, objv, "?BLOCKING?"); - return TCL_ERROR; - } - if( objc==3 && Tcl_GetBooleanFromObj(interp, objv[2], &bBlock) ){ - return TCL_ERROR; - } - - rc = sqlite3_bgckpt_checkpoint(pCkpt, bBlock); - if( rc!=SQLITE_OK ){ - Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); - return TCL_ERROR; - } - break; - } - - case 1: { - Tcl_DeleteCommand(interp, Tcl_GetString(objv[0])); - break; - } - } - - return TCL_OK; -} - -/* -** Tclcmd: bgckpt CMDNAME FILENAME -*/ -static int SQLITE_TCLAPI bgckpt_cmd( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - const char *zCmd; - const char *zFilename; - int rc; - Checkpointer *pCkpt; - - if( objc!=3 ){ - Tcl_WrongNumArgs(interp, 1, objv, "CMDNAME FILENAME"); - return TCL_ERROR; - } - zCmd = Tcl_GetString(objv[1]); - zFilename = Tcl_GetString(objv[2]); - - rc = sqlite3_bgckpt_create(zFilename, &pCkpt); - if( rc!=SQLITE_OK ){ - Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); - return TCL_ERROR; - } - - Tcl_CreateObjCommand(interp, zCmd, bgckpt_obj_cmd, (void*)pCkpt, bgckpt_del); - Tcl_SetObjResult(interp, objv[1]); - return TCL_OK; -} - -int Bgckpt_Init(Tcl_Interp *interp){ - Tcl_CreateObjCommand(interp, "bgckpt", bgckpt_cmd, 0, 0); - return TCL_OK; -} -#endif /* SQLITE_TEST */ - -#else -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -# ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -# endif -#endif -int Bgckpt_Init(Tcl_Interp *interp){ return TCL_OK; } -#endif - Index: ext/misc/cksumvfs.c ================================================================== --- ext/misc/cksumvfs.c +++ ext/misc/cksumvfs.c @@ -45,11 +45,11 @@ ** connection. All subsequent database connections that are opened ** will include this extension. For example: ** ** sqlite3 *db; ** sqlite3_open(":memory:", &db); -** sqlite3_load_extention(db, "./cksumvfs"); +** sqlite3_load_extension(db, "./cksumvfs"); ** sqlite3_close(db); ** ** If this extension is compiled with -DSQLITE_CKSUMVFS_STATIC and ** statically linked against the application, initialize it using ** a single API call as follows: DELETED ext/misc/dbdata.c Index: ext/misc/dbdata.c ================================================================== --- ext/misc/dbdata.c +++ /dev/null @@ -1,851 +0,0 @@ -/* -** 2019-04-17 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains an implementation of two eponymous virtual tables, -** "sqlite_dbdata" and "sqlite_dbptr". Both modules require that the -** "sqlite_dbpage" eponymous virtual table be available. -** -** SQLITE_DBDATA: -** sqlite_dbdata is used to extract data directly from a database b-tree -** page and its associated overflow pages, bypassing the b-tree layer. -** The table schema is equivalent to: -** -** CREATE TABLE sqlite_dbdata( -** pgno INTEGER, -** cell INTEGER, -** field INTEGER, -** value ANY, -** schema TEXT HIDDEN -** ); -** -** IMPORTANT: THE VIRTUAL TABLE SCHEMA ABOVE IS SUBJECT TO CHANGE. IN THE -** FUTURE NEW NON-HIDDEN COLUMNS MAY BE ADDED BETWEEN "value" AND -** "schema". -** -** Each page of the database is inspected. If it cannot be interpreted as -** a b-tree page, or if it is a b-tree page containing 0 entries, the -** sqlite_dbdata table contains no rows for that page. Otherwise, the -** table contains one row for each field in the record associated with -** each cell on the page. For intkey b-trees, the key value is stored in -** field -1. -** -** For example, for the database: -** -** CREATE TABLE t1(a, b); -- root page is page 2 -** INSERT INTO t1(rowid, a, b) VALUES(5, 'v', 'five'); -** INSERT INTO t1(rowid, a, b) VALUES(10, 'x', 'ten'); -** -** the sqlite_dbdata table contains, as well as from entries related to -** page 1, content equivalent to: -** -** INSERT INTO sqlite_dbdata(pgno, cell, field, value) VALUES -** (2, 0, -1, 5 ), -** (2, 0, 0, 'v' ), -** (2, 0, 1, 'five'), -** (2, 1, -1, 10 ), -** (2, 1, 0, 'x' ), -** (2, 1, 1, 'ten' ); -** -** If database corruption is encountered, this module does not report an -** error. Instead, it attempts to extract as much data as possible and -** ignores the corruption. -** -** SQLITE_DBPTR: -** The sqlite_dbptr table has the following schema: -** -** CREATE TABLE sqlite_dbptr( -** pgno INTEGER, -** child INTEGER, -** schema TEXT HIDDEN -** ); -** -** It contains one entry for each b-tree pointer between a parent and -** child page in the database. -*/ -#if !defined(SQLITEINT_H) -#include "sqlite3ext.h" - -typedef unsigned char u8; - -#endif -SQLITE_EXTENSION_INIT1 -#include -#include - -#define DBDATA_PADDING_BYTES 100 - -typedef struct DbdataTable DbdataTable; -typedef struct DbdataCursor DbdataCursor; - -/* Cursor object */ -struct DbdataCursor { - sqlite3_vtab_cursor base; /* Base class. Must be first */ - sqlite3_stmt *pStmt; /* For fetching database pages */ - - int iPgno; /* Current page number */ - u8 *aPage; /* Buffer containing page */ - int nPage; /* Size of aPage[] in bytes */ - int nCell; /* Number of cells on aPage[] */ - int iCell; /* Current cell number */ - int bOnePage; /* True to stop after one page */ - int szDb; - sqlite3_int64 iRowid; - - /* Only for the sqlite_dbdata table */ - u8 *pRec; /* Buffer containing current record */ - int nRec; /* Size of pRec[] in bytes */ - int nHdr; /* Size of header in bytes */ - int iField; /* Current field number */ - u8 *pHdrPtr; - u8 *pPtr; - - sqlite3_int64 iIntkey; /* Integer key value */ -}; - -/* Table object */ -struct DbdataTable { - sqlite3_vtab base; /* Base class. Must be first */ - sqlite3 *db; /* The database connection */ - sqlite3_stmt *pStmt; /* For fetching database pages */ - int bPtr; /* True for sqlite3_dbptr table */ -}; - -/* Column and schema definitions for sqlite_dbdata */ -#define DBDATA_COLUMN_PGNO 0 -#define DBDATA_COLUMN_CELL 1 -#define DBDATA_COLUMN_FIELD 2 -#define DBDATA_COLUMN_VALUE 3 -#define DBDATA_COLUMN_SCHEMA 4 -#define DBDATA_SCHEMA \ - "CREATE TABLE x(" \ - " pgno INTEGER," \ - " cell INTEGER," \ - " field INTEGER," \ - " value ANY," \ - " schema TEXT HIDDEN" \ - ")" - -/* Column and schema definitions for sqlite_dbptr */ -#define DBPTR_COLUMN_PGNO 0 -#define DBPTR_COLUMN_CHILD 1 -#define DBPTR_COLUMN_SCHEMA 2 -#define DBPTR_SCHEMA \ - "CREATE TABLE x(" \ - " pgno INTEGER," \ - " child INTEGER," \ - " schema TEXT HIDDEN" \ - ")" - -/* -** Connect to an sqlite_dbdata (pAux==0) or sqlite_dbptr (pAux!=0) virtual -** table. -*/ -static int dbdataConnect( - sqlite3 *db, - void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVtab, - char **pzErr -){ - DbdataTable *pTab = 0; - int rc = sqlite3_declare_vtab(db, pAux ? DBPTR_SCHEMA : DBDATA_SCHEMA); - - if( rc==SQLITE_OK ){ - pTab = (DbdataTable*)sqlite3_malloc64(sizeof(DbdataTable)); - if( pTab==0 ){ - rc = SQLITE_NOMEM; - }else{ - memset(pTab, 0, sizeof(DbdataTable)); - pTab->db = db; - pTab->bPtr = (pAux!=0); - } - } - - *ppVtab = (sqlite3_vtab*)pTab; - return rc; -} - -/* -** Disconnect from or destroy a sqlite_dbdata or sqlite_dbptr virtual table. -*/ -static int dbdataDisconnect(sqlite3_vtab *pVtab){ - DbdataTable *pTab = (DbdataTable*)pVtab; - if( pTab ){ - sqlite3_finalize(pTab->pStmt); - sqlite3_free(pVtab); - } - return SQLITE_OK; -} - -/* -** This function interprets two types of constraints: -** -** schema=? -** pgno=? -** -** If neither are present, idxNum is set to 0. If schema=? is present, -** the 0x01 bit in idxNum is set. If pgno=? is present, the 0x02 bit -** in idxNum is set. -** -** If both parameters are present, schema is in position 0 and pgno in -** position 1. -*/ -static int dbdataBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdx){ - DbdataTable *pTab = (DbdataTable*)tab; - int i; - int iSchema = -1; - int iPgno = -1; - int colSchema = (pTab->bPtr ? DBPTR_COLUMN_SCHEMA : DBDATA_COLUMN_SCHEMA); - - for(i=0; inConstraint; i++){ - struct sqlite3_index_constraint *p = &pIdx->aConstraint[i]; - if( p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ - if( p->iColumn==colSchema ){ - if( p->usable==0 ) return SQLITE_CONSTRAINT; - iSchema = i; - } - if( p->iColumn==DBDATA_COLUMN_PGNO && p->usable ){ - iPgno = i; - } - } - } - - if( iSchema>=0 ){ - pIdx->aConstraintUsage[iSchema].argvIndex = 1; - pIdx->aConstraintUsage[iSchema].omit = 1; - } - if( iPgno>=0 ){ - pIdx->aConstraintUsage[iPgno].argvIndex = 1 + (iSchema>=0); - pIdx->aConstraintUsage[iPgno].omit = 1; - pIdx->estimatedCost = 100; - pIdx->estimatedRows = 50; - - if( pTab->bPtr==0 && pIdx->nOrderBy && pIdx->aOrderBy[0].desc==0 ){ - int iCol = pIdx->aOrderBy[0].iColumn; - if( pIdx->nOrderBy==1 ){ - pIdx->orderByConsumed = (iCol==0 || iCol==1); - }else if( pIdx->nOrderBy==2 && pIdx->aOrderBy[1].desc==0 && iCol==0 ){ - pIdx->orderByConsumed = (pIdx->aOrderBy[1].iColumn==1); - } - } - - }else{ - pIdx->estimatedCost = 100000000; - pIdx->estimatedRows = 1000000000; - } - pIdx->idxNum = (iSchema>=0 ? 0x01 : 0x00) | (iPgno>=0 ? 0x02 : 0x00); - return SQLITE_OK; -} - -/* -** Open a new sqlite_dbdata or sqlite_dbptr cursor. -*/ -static int dbdataOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ - DbdataCursor *pCsr; - - pCsr = (DbdataCursor*)sqlite3_malloc64(sizeof(DbdataCursor)); - if( pCsr==0 ){ - return SQLITE_NOMEM; - }else{ - memset(pCsr, 0, sizeof(DbdataCursor)); - pCsr->base.pVtab = pVTab; - } - - *ppCursor = (sqlite3_vtab_cursor *)pCsr; - return SQLITE_OK; -} - -/* -** Restore a cursor object to the state it was in when first allocated -** by dbdataOpen(). -*/ -static void dbdataResetCursor(DbdataCursor *pCsr){ - DbdataTable *pTab = (DbdataTable*)(pCsr->base.pVtab); - if( pTab->pStmt==0 ){ - pTab->pStmt = pCsr->pStmt; - }else{ - sqlite3_finalize(pCsr->pStmt); - } - pCsr->pStmt = 0; - pCsr->iPgno = 1; - pCsr->iCell = 0; - pCsr->iField = 0; - pCsr->bOnePage = 0; - sqlite3_free(pCsr->aPage); - sqlite3_free(pCsr->pRec); - pCsr->pRec = 0; - pCsr->aPage = 0; -} - -/* -** Close an sqlite_dbdata or sqlite_dbptr cursor. -*/ -static int dbdataClose(sqlite3_vtab_cursor *pCursor){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - dbdataResetCursor(pCsr); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** Utility methods to decode 16 and 32-bit big-endian unsigned integers. -*/ -static unsigned int get_uint16(unsigned char *a){ - return (a[0]<<8)|a[1]; -} -static unsigned int get_uint32(unsigned char *a){ - return ((unsigned int)a[0]<<24) - | ((unsigned int)a[1]<<16) - | ((unsigned int)a[2]<<8) - | ((unsigned int)a[3]); -} - -/* -** Load page pgno from the database via the sqlite_dbpage virtual table. -** If successful, set (*ppPage) to point to a buffer containing the page -** data, (*pnPage) to the size of that buffer in bytes and return -** SQLITE_OK. In this case it is the responsibility of the caller to -** eventually free the buffer using sqlite3_free(). -** -** Or, if an error occurs, set both (*ppPage) and (*pnPage) to 0 and -** return an SQLite error code. -*/ -static int dbdataLoadPage( - DbdataCursor *pCsr, /* Cursor object */ - unsigned int pgno, /* Page number of page to load */ - u8 **ppPage, /* OUT: pointer to page buffer */ - int *pnPage /* OUT: Size of (*ppPage) in bytes */ -){ - int rc2; - int rc = SQLITE_OK; - sqlite3_stmt *pStmt = pCsr->pStmt; - - *ppPage = 0; - *pnPage = 0; - sqlite3_bind_int64(pStmt, 2, pgno); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - int nCopy = sqlite3_column_bytes(pStmt, 0); - if( nCopy>0 ){ - u8 *pPage; - pPage = (u8*)sqlite3_malloc64(nCopy + DBDATA_PADDING_BYTES); - if( pPage==0 ){ - rc = SQLITE_NOMEM; - }else{ - const u8 *pCopy = sqlite3_column_blob(pStmt, 0); - memcpy(pPage, pCopy, nCopy); - memset(&pPage[nCopy], 0, DBDATA_PADDING_BYTES); - } - *ppPage = pPage; - *pnPage = nCopy; - } - } - rc2 = sqlite3_reset(pStmt); - if( rc==SQLITE_OK ) rc = rc2; - - return rc; -} - -/* -** Read a varint. Put the value in *pVal and return the number of bytes. -*/ -static int dbdataGetVarint(const u8 *z, sqlite3_int64 *pVal){ - sqlite3_int64 v = 0; - int i; - for(i=0; i<8; i++){ - v = (v<<7) + (z[i]&0x7f); - if( (z[i]&0x80)==0 ){ *pVal = v; return i+1; } - } - v = (v<<8) + (z[i]&0xff); - *pVal = v; - return 9; -} - -/* -** Return the number of bytes of space used by an SQLite value of type -** eType. -*/ -static int dbdataValueBytes(int eType){ - switch( eType ){ - case 0: case 8: case 9: - case 10: case 11: - return 0; - case 1: - return 1; - case 2: - return 2; - case 3: - return 3; - case 4: - return 4; - case 5: - return 6; - case 6: - case 7: - return 8; - default: - if( eType>0 ){ - return ((eType-12) / 2); - } - return 0; - } -} - -/* -** Load a value of type eType from buffer pData and use it to set the -** result of context object pCtx. -*/ -static void dbdataValue( - sqlite3_context *pCtx, - int eType, - u8 *pData, - int nData -){ - if( eType>=0 && dbdataValueBytes(eType)<=nData ){ - switch( eType ){ - case 0: - case 10: - case 11: - sqlite3_result_null(pCtx); - break; - - case 8: - sqlite3_result_int(pCtx, 0); - break; - case 9: - sqlite3_result_int(pCtx, 1); - break; - - case 1: case 2: case 3: case 4: case 5: case 6: case 7: { - sqlite3_uint64 v = (signed char)pData[0]; - pData++; - switch( eType ){ - case 7: - case 6: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; - case 5: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; - case 4: v = (v<<8) + pData[0]; pData++; - case 3: v = (v<<8) + pData[0]; pData++; - case 2: v = (v<<8) + pData[0]; pData++; - } - - if( eType==7 ){ - double r; - memcpy(&r, &v, sizeof(r)); - sqlite3_result_double(pCtx, r); - }else{ - sqlite3_result_int64(pCtx, (sqlite3_int64)v); - } - break; - } - - default: { - int n = ((eType-12) / 2); - if( eType % 2 ){ - sqlite3_result_text(pCtx, (const char*)pData, n, SQLITE_TRANSIENT); - }else{ - sqlite3_result_blob(pCtx, pData, n, SQLITE_TRANSIENT); - } - } - } - } -} - -/* -** Move an sqlite_dbdata or sqlite_dbptr cursor to the next entry. -*/ -static int dbdataNext(sqlite3_vtab_cursor *pCursor){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; - - pCsr->iRowid++; - while( 1 ){ - int rc; - int iOff = (pCsr->iPgno==1 ? 100 : 0); - int bNextPage = 0; - - if( pCsr->aPage==0 ){ - while( 1 ){ - if( pCsr->bOnePage==0 && pCsr->iPgno>pCsr->szDb ) return SQLITE_OK; - rc = dbdataLoadPage(pCsr, pCsr->iPgno, &pCsr->aPage, &pCsr->nPage); - if( rc!=SQLITE_OK ) return rc; - if( pCsr->aPage ) break; - pCsr->iPgno++; - } - pCsr->iCell = pTab->bPtr ? -2 : 0; - pCsr->nCell = get_uint16(&pCsr->aPage[iOff+3]); - } - - if( pTab->bPtr ){ - if( pCsr->aPage[iOff]!=0x02 && pCsr->aPage[iOff]!=0x05 ){ - pCsr->iCell = pCsr->nCell; - } - pCsr->iCell++; - if( pCsr->iCell>=pCsr->nCell ){ - sqlite3_free(pCsr->aPage); - pCsr->aPage = 0; - if( pCsr->bOnePage ) return SQLITE_OK; - pCsr->iPgno++; - }else{ - return SQLITE_OK; - } - }else{ - /* If there is no record loaded, load it now. */ - if( pCsr->pRec==0 ){ - int bHasRowid = 0; - int nPointer = 0; - sqlite3_int64 nPayload = 0; - sqlite3_int64 nHdr = 0; - int iHdr; - int U, X; - int nLocal; - - switch( pCsr->aPage[iOff] ){ - case 0x02: - nPointer = 4; - break; - case 0x0a: - break; - case 0x0d: - bHasRowid = 1; - break; - default: - /* This is not a b-tree page with records on it. Continue. */ - pCsr->iCell = pCsr->nCell; - break; - } - - if( pCsr->iCell>=pCsr->nCell ){ - bNextPage = 1; - }else{ - - iOff += 8 + nPointer + pCsr->iCell*2; - if( iOff>pCsr->nPage ){ - bNextPage = 1; - }else{ - iOff = get_uint16(&pCsr->aPage[iOff]); - } - - /* For an interior node cell, skip past the child-page number */ - iOff += nPointer; - - /* Load the "byte of payload including overflow" field */ - if( bNextPage || iOff>pCsr->nPage ){ - bNextPage = 1; - }else{ - iOff += dbdataGetVarint(&pCsr->aPage[iOff], &nPayload); - } - - /* If this is a leaf intkey cell, load the rowid */ - if( bHasRowid && !bNextPage && iOffnPage ){ - iOff += dbdataGetVarint(&pCsr->aPage[iOff], &pCsr->iIntkey); - } - - /* Figure out how much data to read from the local page */ - U = pCsr->nPage; - if( bHasRowid ){ - X = U-35; - }else{ - X = ((U-12)*64/255)-23; - } - if( nPayload<=X ){ - nLocal = nPayload; - }else{ - int M, K; - M = ((U-12)*32/255)-23; - K = M+((nPayload-M)%(U-4)); - if( K<=X ){ - nLocal = K; - }else{ - nLocal = M; - } - } - - if( bNextPage || nLocal+iOff>pCsr->nPage ){ - bNextPage = 1; - }else{ - - /* Allocate space for payload. And a bit more to catch small buffer - ** overruns caused by attempting to read a varint or similar from - ** near the end of a corrupt record. */ - pCsr->pRec = (u8*)sqlite3_malloc64(nPayload+DBDATA_PADDING_BYTES); - if( pCsr->pRec==0 ) return SQLITE_NOMEM; - memset(pCsr->pRec, 0, nPayload+DBDATA_PADDING_BYTES); - pCsr->nRec = nPayload; - - /* Load the nLocal bytes of payload */ - memcpy(pCsr->pRec, &pCsr->aPage[iOff], nLocal); - iOff += nLocal; - - /* Load content from overflow pages */ - if( nPayload>nLocal ){ - sqlite3_int64 nRem = nPayload - nLocal; - unsigned int pgnoOvfl = get_uint32(&pCsr->aPage[iOff]); - while( nRem>0 ){ - u8 *aOvfl = 0; - int nOvfl = 0; - int nCopy; - rc = dbdataLoadPage(pCsr, pgnoOvfl, &aOvfl, &nOvfl); - assert( rc!=SQLITE_OK || aOvfl==0 || nOvfl==pCsr->nPage ); - if( rc!=SQLITE_OK ) return rc; - if( aOvfl==0 ) break; - - nCopy = U-4; - if( nCopy>nRem ) nCopy = nRem; - memcpy(&pCsr->pRec[nPayload-nRem], &aOvfl[4], nCopy); - nRem -= nCopy; - - pgnoOvfl = get_uint32(aOvfl); - sqlite3_free(aOvfl); - } - } - - iHdr = dbdataGetVarint(pCsr->pRec, &nHdr); - pCsr->nHdr = nHdr; - pCsr->pHdrPtr = &pCsr->pRec[iHdr]; - pCsr->pPtr = &pCsr->pRec[pCsr->nHdr]; - pCsr->iField = (bHasRowid ? -1 : 0); - } - } - }else{ - pCsr->iField++; - if( pCsr->iField>0 ){ - sqlite3_int64 iType; - if( pCsr->pHdrPtr>&pCsr->pRec[pCsr->nRec] ){ - bNextPage = 1; - }else{ - pCsr->pHdrPtr += dbdataGetVarint(pCsr->pHdrPtr, &iType); - pCsr->pPtr += dbdataValueBytes(iType); - } - } - } - - if( bNextPage ){ - sqlite3_free(pCsr->aPage); - sqlite3_free(pCsr->pRec); - pCsr->aPage = 0; - pCsr->pRec = 0; - if( pCsr->bOnePage ) return SQLITE_OK; - pCsr->iPgno++; - }else{ - if( pCsr->iField<0 || pCsr->pHdrPtr<&pCsr->pRec[pCsr->nHdr] ){ - return SQLITE_OK; - } - - /* Advance to the next cell. The next iteration of the loop will load - ** the record and so on. */ - sqlite3_free(pCsr->pRec); - pCsr->pRec = 0; - pCsr->iCell++; - } - } - } - - assert( !"can't get here" ); - return SQLITE_OK; -} - -/* -** Return true if the cursor is at EOF. -*/ -static int dbdataEof(sqlite3_vtab_cursor *pCursor){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - return pCsr->aPage==0; -} - -/* -** Determine the size in pages of database zSchema (where zSchema is -** "main", "temp" or the name of an attached database) and set -** pCsr->szDb accordingly. If successful, return SQLITE_OK. Otherwise, -** an SQLite error code. -*/ -static int dbdataDbsize(DbdataCursor *pCsr, const char *zSchema){ - DbdataTable *pTab = (DbdataTable*)pCsr->base.pVtab; - char *zSql = 0; - int rc, rc2; - sqlite3_stmt *pStmt = 0; - - zSql = sqlite3_mprintf("PRAGMA %Q.page_count", zSchema); - if( zSql==0 ) return SQLITE_NOMEM; - rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); - if( rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW ){ - pCsr->szDb = sqlite3_column_int(pStmt, 0); - } - rc2 = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ) rc = rc2; - return rc; -} - -/* -** xFilter method for sqlite_dbdata and sqlite_dbptr. -*/ -static int dbdataFilter( - sqlite3_vtab_cursor *pCursor, - int idxNum, const char *idxStr, - int argc, sqlite3_value **argv -){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; - int rc = SQLITE_OK; - const char *zSchema = "main"; - - dbdataResetCursor(pCsr); - assert( pCsr->iPgno==1 ); - if( idxNum & 0x01 ){ - zSchema = (const char*)sqlite3_value_text(argv[0]); - } - if( idxNum & 0x02 ){ - pCsr->iPgno = sqlite3_value_int(argv[(idxNum & 0x01)]); - pCsr->bOnePage = 1; - }else{ - pCsr->nPage = dbdataDbsize(pCsr, zSchema); - rc = dbdataDbsize(pCsr, zSchema); - } - - if( rc==SQLITE_OK ){ - if( pTab->pStmt ){ - pCsr->pStmt = pTab->pStmt; - pTab->pStmt = 0; - }else{ - rc = sqlite3_prepare_v2(pTab->db, - "SELECT data FROM sqlite_dbpage(?) WHERE pgno=?", -1, - &pCsr->pStmt, 0 - ); - } - } - if( rc==SQLITE_OK ){ - rc = sqlite3_bind_text(pCsr->pStmt, 1, zSchema, -1, SQLITE_TRANSIENT); - }else{ - pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db)); - } - if( rc==SQLITE_OK ){ - rc = dbdataNext(pCursor); - } - return rc; -} - -/* -** Return a column for the sqlite_dbdata or sqlite_dbptr table. -*/ -static int dbdataColumn( - sqlite3_vtab_cursor *pCursor, - sqlite3_context *ctx, - int i -){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; - if( pTab->bPtr ){ - switch( i ){ - case DBPTR_COLUMN_PGNO: - sqlite3_result_int64(ctx, pCsr->iPgno); - break; - case DBPTR_COLUMN_CHILD: { - int iOff = pCsr->iPgno==1 ? 100 : 0; - if( pCsr->iCell<0 ){ - iOff += 8; - }else{ - iOff += 12 + pCsr->iCell*2; - if( iOff>pCsr->nPage ) return SQLITE_OK; - iOff = get_uint16(&pCsr->aPage[iOff]); - } - if( iOff<=pCsr->nPage ){ - sqlite3_result_int64(ctx, get_uint32(&pCsr->aPage[iOff])); - } - break; - } - } - }else{ - switch( i ){ - case DBDATA_COLUMN_PGNO: - sqlite3_result_int64(ctx, pCsr->iPgno); - break; - case DBDATA_COLUMN_CELL: - sqlite3_result_int(ctx, pCsr->iCell); - break; - case DBDATA_COLUMN_FIELD: - sqlite3_result_int(ctx, pCsr->iField); - break; - case DBDATA_COLUMN_VALUE: { - if( pCsr->iField<0 ){ - sqlite3_result_int64(ctx, pCsr->iIntkey); - }else{ - sqlite3_int64 iType; - dbdataGetVarint(pCsr->pHdrPtr, &iType); - dbdataValue( - ctx, iType, pCsr->pPtr, &pCsr->pRec[pCsr->nRec] - pCsr->pPtr - ); - } - break; - } - } - } - return SQLITE_OK; -} - -/* -** Return the rowid for an sqlite_dbdata or sqlite_dptr table. -*/ -static int dbdataRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ - DbdataCursor *pCsr = (DbdataCursor*)pCursor; - *pRowid = pCsr->iRowid; - return SQLITE_OK; -} - - -/* -** Invoke this routine to register the "sqlite_dbdata" virtual table module -*/ -static int sqlite3DbdataRegister(sqlite3 *db){ - static sqlite3_module dbdata_module = { - 0, /* iVersion */ - 0, /* xCreate */ - dbdataConnect, /* xConnect */ - dbdataBestIndex, /* xBestIndex */ - dbdataDisconnect, /* xDisconnect */ - 0, /* xDestroy */ - dbdataOpen, /* xOpen - open a cursor */ - dbdataClose, /* xClose - close a cursor */ - dbdataFilter, /* xFilter - configure scan constraints */ - dbdataNext, /* xNext - advance a cursor */ - dbdataEof, /* xEof - check for end of scan */ - dbdataColumn, /* xColumn - read data */ - dbdataRowid, /* xRowid - read data */ - 0, /* xUpdate */ - 0, /* xBegin */ - 0, /* xSync */ - 0, /* xCommit */ - 0, /* xRollback */ - 0, /* xFindMethod */ - 0, /* xRename */ - 0, /* xSavepoint */ - 0, /* xRelease */ - 0, /* xRollbackTo */ - 0 /* xShadowName */ - }; - - int rc = sqlite3_create_module(db, "sqlite_dbdata", &dbdata_module, 0); - if( rc==SQLITE_OK ){ - rc = sqlite3_create_module(db, "sqlite_dbptr", &dbdata_module, (void*)1); - } - return rc; -} - -#ifdef _WIN32 -__declspec(dllexport) -#endif -int sqlite3_dbdata_init( - sqlite3 *db, - char **pzErrMsg, - const sqlite3_api_routines *pApi -){ - SQLITE_EXTENSION_INIT2(pApi); - return sqlite3DbdataRegister(db); -} Index: ext/misc/regexp.c ================================================================== --- ext/misc/regexp.c +++ ext/misc/regexp.c @@ -183,11 +183,11 @@ }else if( (c&0xf0)==0xe0 && p->i+1mx && (p->z[p->i]&0xc0)==0x80 && (p->z[p->i+1]&0xc0)==0x80 ){ c = (c&0x0f)<<12 | ((p->z[p->i]&0x3f)<<6) | (p->z[p->i+1]&0x3f); p->i += 2; if( c<=0x7ff || (c>=0xd800 && c<=0xdfff) ) c = 0xfffd; - }else if( (c&0xf8)==0xf0 && p->i+3mx && (p->z[p->i]&0xc0)==0x80 + }else if( (c&0xf8)==0xf0 && p->i+2mx && (p->z[p->i]&0xc0)==0x80 && (p->z[p->i+1]&0xc0)==0x80 && (p->z[p->i+2]&0xc0)==0x80 ){ c = (c&0x07)<<18 | ((p->z[p->i]&0x3f)<<12) | ((p->z[p->i+1]&0x3f)<<6) | (p->z[p->i+2]&0x3f); p->i += 3; if( c<=0xffff || c>0x10ffff ) c = 0xfffd; @@ -710,19 +710,19 @@ /* The following is a performance optimization. If the regex begins with ** ".*" (if the input regex lacks an initial "^") and afterwards there are ** one or more matching characters, enter those matching characters into ** zInit[]. The re_match() routine can then search ahead in the input ** string looking for the initial match without having to run the whole - ** regex engine over the string. Do not worry able trying to match + ** regex engine over the string. Do not worry about trying to match ** unicode characters beyond plane 0 - those are very rare and this is ** just an optimization. */ if( pRe->aOp[0]==RE_OP_ANYSTAR && !noCase ){ for(j=0, i=1; j<(int)sizeof(pRe->zInit)-2 && pRe->aOp[i]==RE_OP_MATCH; i++){ unsigned x = pRe->aArg[i]; - if( x<=127 ){ + if( x<=0x7f ){ pRe->zInit[j++] = (unsigned char)x; - }else if( x<=0xfff ){ + }else if( x<=0x7ff ){ pRe->zInit[j++] = (unsigned char)(0xc0 | (x>>6)); pRe->zInit[j++] = 0x80 | (x&0x3f); }else if( x<=0xffff ){ pRe->zInit[j++] = (unsigned char)(0xe0 | (x>>12)); pRe->zInit[j++] = 0x80 | ((x>>6)&0x3f); ADDED ext/rbu/rburename.test Index: ext/rbu/rburename.test ================================================================== --- /dev/null +++ ext/rbu/rburename.test @@ -0,0 +1,54 @@ +# 2022 November 07 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +source [file join [file dirname [info script]] rbu_common.tcl] +set ::testprefix rburename + + +do_execsql_test 1.0 { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 VALUES(5, 6); +} + +forcedelete test.db-vacuum + +proc my_rename {old new} { + lappend ::my_rename_calls [list [file tail $old] [file tail $new]] + file rename $old $new +} + +do_test 1.1 { + sqlite3rbu_vacuum rbu test.db + rbu rename_handler my_rename + while {[rbu step]=="SQLITE_OK"} {} + rbu close +} SQLITE_DONE + +do_test 1.2 { + set ::my_rename_calls +} {{test.db-oal test.db-wal}} + +proc my_rename {old new} { + error "something went wrong" +} + +do_test 1.3 { + sqlite3rbu_vacuum rbu test.db + rbu rename_handler my_rename + while {[rbu step]=="SQLITE_OK"} {} + list [catch { rbu close } msg] $msg +} {1 SQLITE_IOERR} + +finish_test Index: ext/rbu/rbuvacuum2.test ================================================================== --- ext/rbu/rbuvacuum2.test +++ ext/rbu/rbuvacuum2.test @@ -225,14 +225,15 @@ sqlite3rbu_vacuum rbu test.db test.db2 while {[rbu state]!="checkpoint"} { rbu step } rbu close } {SQLITE_OK} -do_execsql_test 6.2 { - SELECT 1 FROM sqlite_master LIMIT 1; - PRAGMA wal_checkpoint; -} {1 0 4 4} +do_test 6.2 { + execsql { SELECT 1 FROM sqlite_master LIMIT 1 } + execsql { PRAGMA wal_checkpoint } + execsql { SELECT 1 FROM sqlite_master LIMIT 1 } +} {1} do_test 6.3 { sqlite3rbu_vacuum rbu test.db test.db2 while {[rbu step]!="SQLITE_DONE"} { rbu step } rbu close Index: ext/rbu/sqlite3rbu.c ================================================================== --- ext/rbu/sqlite3rbu.c +++ ext/rbu/sqlite3rbu.c @@ -391,10 +391,12 @@ const char *zVfsName; /* Name of automatically created rbu vfs */ rbu_file *pTargetFd; /* File handle open on target db */ int nPagePerSector; /* Pages per sector for pTargetFd */ i64 iOalSz; i64 nPhaseOneStep; + void *pRenameArg; + int (*xRename)(void*, const char*, const char*); /* The following state variables are used as part of the incremental ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding ** function rbuSetupCheckpoint() for details. */ u32 iMaxFrame; /* Largest iWalFrame value in aFrame[] */ @@ -2779,11 +2781,11 @@ if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); if( p->zState==0 ){ const char *zFile = sqlite3_db_filename(p->dbRbu, "main"); - p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile); + p->zState = rbuMPrintf(p, "file:///%s-vacuum?modeof=%s", zFile, zFile); } } /* If using separate RBU and state databases, attach the state database to ** the RBU db handle now. */ @@ -3239,36 +3241,11 @@ assert( p->rc==SQLITE_OK ); p->rc = rbuLockDatabase(dbMain); } if( p->rc==SQLITE_OK ){ -#if defined(_WIN32_WCE) - { - LPWSTR zWideOal; - LPWSTR zWideWal; - - zWideOal = rbuWinUtf8ToUnicode(zOal); - if( zWideOal ){ - zWideWal = rbuWinUtf8ToUnicode(zWal); - if( zWideWal ){ - if( MoveFileW(zWideOal, zWideWal) ){ - p->rc = SQLITE_OK; - }else{ - p->rc = SQLITE_IOERR; - } - sqlite3_free(zWideWal); - }else{ - p->rc = SQLITE_IOERR_NOMEM; - } - sqlite3_free(zWideOal); - }else{ - p->rc = SQLITE_IOERR_NOMEM; - } - } -#else - p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK; -#endif + p->rc = p->xRename(p->pRenameArg, zOal, zWal); } if( p->rc!=SQLITE_OK || rbuIsVacuum(p) || rbuExclusiveCheckpoint(dbMain)==0 @@ -4003,10 +3980,11 @@ if( p ){ RbuState *pState = 0; /* Create the custom VFS. */ memset(p, 0, sizeof(sqlite3rbu)); + sqlite3rbu_rename_handler(p, 0, 0); rbuCreateVfs(p); /* Open the target, RBU and state databases */ if( p->rc==SQLITE_OK ){ char *pCsr = (char*)&p[1]; @@ -4393,10 +4371,58 @@ } p->rc = rc; return rc; } + +/* +** Default xRename callback for RBU. +*/ +static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){ + int rc = SQLITE_OK; +#if defined(_WIN32_WCE) + { + LPWSTR zWideOld; + LPWSTR zWideNew; + + zWideOld = rbuWinUtf8ToUnicode(zOld); + if( zWideOld ){ + zWideNew = rbuWinUtf8ToUnicode(zNew); + if( zWideNew ){ + if( MoveFileW(zWideOld, zWideNew) ){ + rc = SQLITE_OK; + }else{ + rc = SQLITE_IOERR; + } + sqlite3_free(zWideNew); + }else{ + rc = SQLITE_IOERR_NOMEM; + } + sqlite3_free(zWideOld); + }else{ + rc = SQLITE_IOERR_NOMEM; + } + } +#else + rc = rename(zOld, zNew) ? SQLITE_IOERR : SQLITE_OK; +#endif + return rc; +} + +void sqlite3rbu_rename_handler( + sqlite3rbu *pRbu, + void *pArg, + int (*xRename)(void *pArg, const char *zOld, const char *zNew) +){ + if( xRename ){ + pRbu->xRename = xRename; + pRbu->pRenameArg = pArg; + }else{ + pRbu->xRename = xDefaultRename; + pRbu->pRenameArg = 0; + } +} /************************************************************************** ** Beginning of RBU VFS shim methods. The VFS shim modifies the behaviour ** of a standard VFS in the following ways: ** Index: ext/rbu/sqlite3rbu.h ================================================================== --- ext/rbu/sqlite3rbu.h +++ ext/rbu/sqlite3rbu.h @@ -541,10 +541,38 @@ #define SQLITE_RBU_STATE_CHECKPOINT 3 #define SQLITE_RBU_STATE_DONE 4 #define SQLITE_RBU_STATE_ERROR 5 SQLITE_API int sqlite3rbu_state(sqlite3rbu *pRbu); + +/* +** As part of applying an RBU update or performing an RBU vacuum operation, +** the system must at one point move the *-oal file to the equivalent *-wal +** path. Normally, it does this by invoking POSIX function rename(2) directly. +** Except on WINCE platforms, where it uses win32 API MoveFileW(). This +** function may be used to register a callback that the RBU module will invoke +** instead of one of these APIs. +** +** If a callback is registered with an RBU handle, it invokes it instead +** of rename(2) when it needs to move a file within the file-system. The +** first argument passed to the xRename() callback is a copy of the second +** argument (pArg) passed to this function. The second is the full path +** to the file to move and the third the full path to which it should be +** moved. The callback function should return SQLITE_OK to indicate +** success. If an error occurs, it should return an SQLite error code. +** In this case the RBU operation will be abandoned and the error returned +** to the RBU user. +** +** Passing a NULL pointer in place of the xRename argument to this function +** restores the default behaviour. +*/ +SQLITE_API void sqlite3rbu_rename_handler( + sqlite3rbu *pRbu, + void *pArg, + int (*xRename)(void *pArg, const char *zOld, const char *zNew) +); + /* ** Create an RBU VFS named zName that accesses the underlying file-system ** via existing VFS zParent. Or, if the zParent parameter is passed NULL, ** then the new RBU VFS uses the default system VFS to access the file-system. Index: ext/rbu/test_rbu.c ================================================================== --- ext/rbu/test_rbu.c +++ ext/rbu/test_rbu.c @@ -24,10 +24,18 @@ # ifndef SQLITE_TCLAPI # define SQLITE_TCLAPI # endif #endif #include +#include + +typedef struct TestRbu TestRbu; +struct TestRbu { + sqlite3rbu *pRbu; + Tcl_Interp *interp; + Tcl_Obj *xRename; +}; /* From main.c */ extern const char *sqlite3ErrName(int); extern int sqlite3TestMakePointerStr(Tcl_Interp*, char*, void*); @@ -53,19 +61,34 @@ } Tcl_DecrRefCount(pScript); } +static int xRenameCallback(void *pArg, const char *zOld, const char *zNew){ + int rc = SQLITE_OK; + TestRbu *pTest = (TestRbu*)pArg; + Tcl_Obj *pEval = Tcl_DuplicateObj(pTest->xRename); + + Tcl_IncrRefCount(pEval); + Tcl_ListObjAppendElement(pTest->interp, pEval, Tcl_NewStringObj(zOld, -1)); + Tcl_ListObjAppendElement(pTest->interp, pEval, Tcl_NewStringObj(zNew, -1)); + + rc = Tcl_EvalObjEx(pTest->interp, pEval, TCL_GLOBAL_ONLY); + Tcl_DecrRefCount(pEval); + + return rc ? SQLITE_IOERR : SQLITE_OK; +} static int SQLITE_TCLAPI test_sqlite3rbu_cmd( ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ int ret = TCL_OK; - sqlite3rbu *pRbu = (sqlite3rbu*)clientData; + TestRbu *pTest = (TestRbu*)clientData; + sqlite3rbu *pRbu = pTest->pRbu; struct RbuCmd { const char *zName; int nArg; const char *zUsage; } aCmd[] = { @@ -80,10 +103,11 @@ {"progress", 2, ""}, /* 8 */ {"close_no_error", 2, ""}, /* 9 */ {"temp_size_limit", 3, "LIMIT"}, /* 10 */ {"temp_size", 2, ""}, /* 11 */ {"dbRbu_eval", 3, "SQL"}, /* 12 */ + {"rename_handler", 3, "SCRIPT"},/* 13 */ {0,0,0} }; int iCmd; if( objc<2 ){ @@ -125,10 +149,12 @@ Tcl_AppendResult(interp, " - ", zErrmsg, 0); sqlite3_free(zErrmsg); } ret = TCL_ERROR; } + if( pTest->xRename ) Tcl_DecrRefCount(pTest->xRename); + ckfree(pTest); break; } case 2: /* create_rbu_delta */ { sqlite3 *db = sqlite3rbu_db(pRbu, 0); @@ -211,18 +237,43 @@ case 11: /* temp_size */ { sqlite3_int64 sz = sqlite3rbu_temp_size(pRbu); Tcl_SetObjResult(interp, Tcl_NewWideIntObj(sz)); break; } + + case 13: /* rename_handler */ { + Tcl_Obj *pScript = objv[2]; + assert( !sqlite3_stricmp(aCmd[13].zName, "rename_handler") ); + if( Tcl_GetCharLength(pScript)==0 ){ + sqlite3rbu_rename_handler(pRbu, 0, 0); + }else{ + pTest->xRename = Tcl_DuplicateObj(pScript); + Tcl_IncrRefCount(pTest->xRename); + sqlite3rbu_rename_handler(pRbu, pTest, xRenameCallback); + } + break; + } default: /* seems unlikely */ assert( !"cannot happen" ); break; } return ret; } + +static void createRbuWrapper( + Tcl_Interp *interp, + const char *zCmd, + sqlite3rbu *pRbu +){ + TestRbu *pTest = (TestRbu*)ckalloc(sizeof(TestRbu)); + memset(pTest, 0, sizeof(TestRbu)); + pTest->pRbu = pRbu; + pTest->interp = interp; + Tcl_CreateObjCommand(interp, zCmd, test_sqlite3rbu_cmd, (ClientData)pTest, 0); +} /* ** Tclcmd: sqlite3rbu CMD ?? */ static int SQLITE_TCLAPI test_sqlite3rbu( @@ -245,11 +296,11 @@ zTarget = Tcl_GetString(objv[2]); zRbu = Tcl_GetString(objv[3]); if( objc==5 ) zStateDb = Tcl_GetString(objv[4]); pRbu = sqlite3rbu_open(zTarget, zRbu, zStateDb); - Tcl_CreateObjCommand(interp, zCmd, test_sqlite3rbu_cmd, (ClientData)pRbu, 0); + createRbuWrapper(interp, zCmd, pRbu); Tcl_SetObjResult(interp, objv[1]); return TCL_OK; } /* @@ -274,11 +325,11 @@ zTarget = Tcl_GetString(objv[2]); if( objc==4 ) zStateDb = Tcl_GetString(objv[3]); if( zStateDb && zStateDb[0]=='\0' ) zStateDb = 0; pRbu = sqlite3rbu_vacuum(zTarget, zStateDb); - Tcl_CreateObjCommand(interp, zCmd, test_sqlite3rbu_cmd, (ClientData)pRbu, 0); + createRbuWrapper(interp, zCmd, pRbu); Tcl_SetObjResult(interp, objv[1]); return TCL_OK; } /* ADDED ext/recover/dbdata.c Index: ext/recover/dbdata.c ================================================================== --- /dev/null +++ ext/recover/dbdata.c @@ -0,0 +1,942 @@ +/* +** 2019-04-17 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains an implementation of two eponymous virtual tables, +** "sqlite_dbdata" and "sqlite_dbptr". Both modules require that the +** "sqlite_dbpage" eponymous virtual table be available. +** +** SQLITE_DBDATA: +** sqlite_dbdata is used to extract data directly from a database b-tree +** page and its associated overflow pages, bypassing the b-tree layer. +** The table schema is equivalent to: +** +** CREATE TABLE sqlite_dbdata( +** pgno INTEGER, +** cell INTEGER, +** field INTEGER, +** value ANY, +** schema TEXT HIDDEN +** ); +** +** IMPORTANT: THE VIRTUAL TABLE SCHEMA ABOVE IS SUBJECT TO CHANGE. IN THE +** FUTURE NEW NON-HIDDEN COLUMNS MAY BE ADDED BETWEEN "value" AND +** "schema". +** +** Each page of the database is inspected. If it cannot be interpreted as +** a b-tree page, or if it is a b-tree page containing 0 entries, the +** sqlite_dbdata table contains no rows for that page. Otherwise, the +** table contains one row for each field in the record associated with +** each cell on the page. For intkey b-trees, the key value is stored in +** field -1. +** +** For example, for the database: +** +** CREATE TABLE t1(a, b); -- root page is page 2 +** INSERT INTO t1(rowid, a, b) VALUES(5, 'v', 'five'); +** INSERT INTO t1(rowid, a, b) VALUES(10, 'x', 'ten'); +** +** the sqlite_dbdata table contains, as well as from entries related to +** page 1, content equivalent to: +** +** INSERT INTO sqlite_dbdata(pgno, cell, field, value) VALUES +** (2, 0, -1, 5 ), +** (2, 0, 0, 'v' ), +** (2, 0, 1, 'five'), +** (2, 1, -1, 10 ), +** (2, 1, 0, 'x' ), +** (2, 1, 1, 'ten' ); +** +** If database corruption is encountered, this module does not report an +** error. Instead, it attempts to extract as much data as possible and +** ignores the corruption. +** +** SQLITE_DBPTR: +** The sqlite_dbptr table has the following schema: +** +** CREATE TABLE sqlite_dbptr( +** pgno INTEGER, +** child INTEGER, +** schema TEXT HIDDEN +** ); +** +** It contains one entry for each b-tree pointer between a parent and +** child page in the database. +*/ + +#if !defined(SQLITEINT_H) +#include "sqlite3ext.h" + +typedef unsigned char u8; +typedef unsigned int u32; + +#endif +SQLITE_EXTENSION_INIT1 +#include +#include + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +#define DBDATA_PADDING_BYTES 100 + +typedef struct DbdataTable DbdataTable; +typedef struct DbdataCursor DbdataCursor; + +/* Cursor object */ +struct DbdataCursor { + sqlite3_vtab_cursor base; /* Base class. Must be first */ + sqlite3_stmt *pStmt; /* For fetching database pages */ + + int iPgno; /* Current page number */ + u8 *aPage; /* Buffer containing page */ + int nPage; /* Size of aPage[] in bytes */ + int nCell; /* Number of cells on aPage[] */ + int iCell; /* Current cell number */ + int bOnePage; /* True to stop after one page */ + int szDb; + sqlite3_int64 iRowid; + + /* Only for the sqlite_dbdata table */ + u8 *pRec; /* Buffer containing current record */ + sqlite3_int64 nRec; /* Size of pRec[] in bytes */ + sqlite3_int64 nHdr; /* Size of header in bytes */ + int iField; /* Current field number */ + u8 *pHdrPtr; + u8 *pPtr; + u32 enc; /* Text encoding */ + + sqlite3_int64 iIntkey; /* Integer key value */ +}; + +/* Table object */ +struct DbdataTable { + sqlite3_vtab base; /* Base class. Must be first */ + sqlite3 *db; /* The database connection */ + sqlite3_stmt *pStmt; /* For fetching database pages */ + int bPtr; /* True for sqlite3_dbptr table */ +}; + +/* Column and schema definitions for sqlite_dbdata */ +#define DBDATA_COLUMN_PGNO 0 +#define DBDATA_COLUMN_CELL 1 +#define DBDATA_COLUMN_FIELD 2 +#define DBDATA_COLUMN_VALUE 3 +#define DBDATA_COLUMN_SCHEMA 4 +#define DBDATA_SCHEMA \ + "CREATE TABLE x(" \ + " pgno INTEGER," \ + " cell INTEGER," \ + " field INTEGER," \ + " value ANY," \ + " schema TEXT HIDDEN" \ + ")" + +/* Column and schema definitions for sqlite_dbptr */ +#define DBPTR_COLUMN_PGNO 0 +#define DBPTR_COLUMN_CHILD 1 +#define DBPTR_COLUMN_SCHEMA 2 +#define DBPTR_SCHEMA \ + "CREATE TABLE x(" \ + " pgno INTEGER," \ + " child INTEGER," \ + " schema TEXT HIDDEN" \ + ")" + +/* +** Connect to an sqlite_dbdata (pAux==0) or sqlite_dbptr (pAux!=0) virtual +** table. +*/ +static int dbdataConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + DbdataTable *pTab = 0; + int rc = sqlite3_declare_vtab(db, pAux ? DBPTR_SCHEMA : DBDATA_SCHEMA); + + if( rc==SQLITE_OK ){ + pTab = (DbdataTable*)sqlite3_malloc64(sizeof(DbdataTable)); + if( pTab==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pTab, 0, sizeof(DbdataTable)); + pTab->db = db; + pTab->bPtr = (pAux!=0); + } + } + + *ppVtab = (sqlite3_vtab*)pTab; + return rc; +} + +/* +** Disconnect from or destroy a sqlite_dbdata or sqlite_dbptr virtual table. +*/ +static int dbdataDisconnect(sqlite3_vtab *pVtab){ + DbdataTable *pTab = (DbdataTable*)pVtab; + if( pTab ){ + sqlite3_finalize(pTab->pStmt); + sqlite3_free(pVtab); + } + return SQLITE_OK; +} + +/* +** This function interprets two types of constraints: +** +** schema=? +** pgno=? +** +** If neither are present, idxNum is set to 0. If schema=? is present, +** the 0x01 bit in idxNum is set. If pgno=? is present, the 0x02 bit +** in idxNum is set. +** +** If both parameters are present, schema is in position 0 and pgno in +** position 1. +*/ +static int dbdataBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdx){ + DbdataTable *pTab = (DbdataTable*)tab; + int i; + int iSchema = -1; + int iPgno = -1; + int colSchema = (pTab->bPtr ? DBPTR_COLUMN_SCHEMA : DBDATA_COLUMN_SCHEMA); + + for(i=0; inConstraint; i++){ + struct sqlite3_index_constraint *p = &pIdx->aConstraint[i]; + if( p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + if( p->iColumn==colSchema ){ + if( p->usable==0 ) return SQLITE_CONSTRAINT; + iSchema = i; + } + if( p->iColumn==DBDATA_COLUMN_PGNO && p->usable ){ + iPgno = i; + } + } + } + + if( iSchema>=0 ){ + pIdx->aConstraintUsage[iSchema].argvIndex = 1; + pIdx->aConstraintUsage[iSchema].omit = 1; + } + if( iPgno>=0 ){ + pIdx->aConstraintUsage[iPgno].argvIndex = 1 + (iSchema>=0); + pIdx->aConstraintUsage[iPgno].omit = 1; + pIdx->estimatedCost = 100; + pIdx->estimatedRows = 50; + + if( pTab->bPtr==0 && pIdx->nOrderBy && pIdx->aOrderBy[0].desc==0 ){ + int iCol = pIdx->aOrderBy[0].iColumn; + if( pIdx->nOrderBy==1 ){ + pIdx->orderByConsumed = (iCol==0 || iCol==1); + }else if( pIdx->nOrderBy==2 && pIdx->aOrderBy[1].desc==0 && iCol==0 ){ + pIdx->orderByConsumed = (pIdx->aOrderBy[1].iColumn==1); + } + } + + }else{ + pIdx->estimatedCost = 100000000; + pIdx->estimatedRows = 1000000000; + } + pIdx->idxNum = (iSchema>=0 ? 0x01 : 0x00) | (iPgno>=0 ? 0x02 : 0x00); + return SQLITE_OK; +} + +/* +** Open a new sqlite_dbdata or sqlite_dbptr cursor. +*/ +static int dbdataOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + DbdataCursor *pCsr; + + pCsr = (DbdataCursor*)sqlite3_malloc64(sizeof(DbdataCursor)); + if( pCsr==0 ){ + return SQLITE_NOMEM; + }else{ + memset(pCsr, 0, sizeof(DbdataCursor)); + pCsr->base.pVtab = pVTab; + } + + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Restore a cursor object to the state it was in when first allocated +** by dbdataOpen(). +*/ +static void dbdataResetCursor(DbdataCursor *pCsr){ + DbdataTable *pTab = (DbdataTable*)(pCsr->base.pVtab); + if( pTab->pStmt==0 ){ + pTab->pStmt = pCsr->pStmt; + }else{ + sqlite3_finalize(pCsr->pStmt); + } + pCsr->pStmt = 0; + pCsr->iPgno = 1; + pCsr->iCell = 0; + pCsr->iField = 0; + pCsr->bOnePage = 0; + sqlite3_free(pCsr->aPage); + sqlite3_free(pCsr->pRec); + pCsr->pRec = 0; + pCsr->aPage = 0; +} + +/* +** Close an sqlite_dbdata or sqlite_dbptr cursor. +*/ +static int dbdataClose(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + dbdataResetCursor(pCsr); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Utility methods to decode 16 and 32-bit big-endian unsigned integers. +*/ +static u32 get_uint16(unsigned char *a){ + return (a[0]<<8)|a[1]; +} +static u32 get_uint32(unsigned char *a){ + return ((u32)a[0]<<24) + | ((u32)a[1]<<16) + | ((u32)a[2]<<8) + | ((u32)a[3]); +} + +/* +** Load page pgno from the database via the sqlite_dbpage virtual table. +** If successful, set (*ppPage) to point to a buffer containing the page +** data, (*pnPage) to the size of that buffer in bytes and return +** SQLITE_OK. In this case it is the responsibility of the caller to +** eventually free the buffer using sqlite3_free(). +** +** Or, if an error occurs, set both (*ppPage) and (*pnPage) to 0 and +** return an SQLite error code. +*/ +static int dbdataLoadPage( + DbdataCursor *pCsr, /* Cursor object */ + u32 pgno, /* Page number of page to load */ + u8 **ppPage, /* OUT: pointer to page buffer */ + int *pnPage /* OUT: Size of (*ppPage) in bytes */ +){ + int rc2; + int rc = SQLITE_OK; + sqlite3_stmt *pStmt = pCsr->pStmt; + + *ppPage = 0; + *pnPage = 0; + if( pgno>0 ){ + sqlite3_bind_int64(pStmt, 2, pgno); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + int nCopy = sqlite3_column_bytes(pStmt, 0); + if( nCopy>0 ){ + u8 *pPage; + pPage = (u8*)sqlite3_malloc64(nCopy + DBDATA_PADDING_BYTES); + if( pPage==0 ){ + rc = SQLITE_NOMEM; + }else{ + const u8 *pCopy = sqlite3_column_blob(pStmt, 0); + memcpy(pPage, pCopy, nCopy); + memset(&pPage[nCopy], 0, DBDATA_PADDING_BYTES); + } + *ppPage = pPage; + *pnPage = nCopy; + } + } + rc2 = sqlite3_reset(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + } + + return rc; +} + +/* +** Read a varint. Put the value in *pVal and return the number of bytes. +*/ +static int dbdataGetVarint(const u8 *z, sqlite3_int64 *pVal){ + sqlite3_uint64 u = 0; + int i; + for(i=0; i<8; i++){ + u = (u<<7) + (z[i]&0x7f); + if( (z[i]&0x80)==0 ){ *pVal = (sqlite3_int64)u; return i+1; } + } + u = (u<<8) + (z[i]&0xff); + *pVal = (sqlite3_int64)u; + return 9; +} + +/* +** Like dbdataGetVarint(), but set the output to 0 if it is less than 0 +** or greater than 0xFFFFFFFF. This can be used for all varints in an +** SQLite database except for key values in intkey tables. +*/ +static int dbdataGetVarintU32(const u8 *z, sqlite3_int64 *pVal){ + sqlite3_int64 val; + int nRet = dbdataGetVarint(z, &val); + if( val<0 || val>0xFFFFFFFF ) val = 0; + *pVal = val; + return nRet; +} + +/* +** Return the number of bytes of space used by an SQLite value of type +** eType. +*/ +static int dbdataValueBytes(int eType){ + switch( eType ){ + case 0: case 8: case 9: + case 10: case 11: + return 0; + case 1: + return 1; + case 2: + return 2; + case 3: + return 3; + case 4: + return 4; + case 5: + return 6; + case 6: + case 7: + return 8; + default: + if( eType>0 ){ + return ((eType-12) / 2); + } + return 0; + } +} + +/* +** Load a value of type eType from buffer pData and use it to set the +** result of context object pCtx. +*/ +static void dbdataValue( + sqlite3_context *pCtx, + u32 enc, + int eType, + u8 *pData, + sqlite3_int64 nData +){ + if( eType>=0 && dbdataValueBytes(eType)<=nData ){ + switch( eType ){ + case 0: + case 10: + case 11: + sqlite3_result_null(pCtx); + break; + + case 8: + sqlite3_result_int(pCtx, 0); + break; + case 9: + sqlite3_result_int(pCtx, 1); + break; + + case 1: case 2: case 3: case 4: case 5: case 6: case 7: { + sqlite3_uint64 v = (signed char)pData[0]; + pData++; + switch( eType ){ + case 7: + case 6: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; + case 5: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2; + case 4: v = (v<<8) + pData[0]; pData++; + case 3: v = (v<<8) + pData[0]; pData++; + case 2: v = (v<<8) + pData[0]; pData++; + } + + if( eType==7 ){ + double r; + memcpy(&r, &v, sizeof(r)); + sqlite3_result_double(pCtx, r); + }else{ + sqlite3_result_int64(pCtx, (sqlite3_int64)v); + } + break; + } + + default: { + int n = ((eType-12) / 2); + if( eType % 2 ){ + switch( enc ){ +#ifndef SQLITE_OMIT_UTF16 + case SQLITE_UTF16BE: + sqlite3_result_text16be(pCtx, (void*)pData, n, SQLITE_TRANSIENT); + break; + case SQLITE_UTF16LE: + sqlite3_result_text16le(pCtx, (void*)pData, n, SQLITE_TRANSIENT); + break; +#endif + default: + sqlite3_result_text(pCtx, (char*)pData, n, SQLITE_TRANSIENT); + break; + } + }else{ + sqlite3_result_blob(pCtx, pData, n, SQLITE_TRANSIENT); + } + } + } + } +} + +/* +** Move an sqlite_dbdata or sqlite_dbptr cursor to the next entry. +*/ +static int dbdataNext(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + + pCsr->iRowid++; + while( 1 ){ + int rc; + int iOff = (pCsr->iPgno==1 ? 100 : 0); + int bNextPage = 0; + + if( pCsr->aPage==0 ){ + while( 1 ){ + if( pCsr->bOnePage==0 && pCsr->iPgno>pCsr->szDb ) return SQLITE_OK; + rc = dbdataLoadPage(pCsr, pCsr->iPgno, &pCsr->aPage, &pCsr->nPage); + if( rc!=SQLITE_OK ) return rc; + if( pCsr->aPage ) break; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + } + pCsr->iCell = pTab->bPtr ? -2 : 0; + pCsr->nCell = get_uint16(&pCsr->aPage[iOff+3]); + } + + if( pTab->bPtr ){ + if( pCsr->aPage[iOff]!=0x02 && pCsr->aPage[iOff]!=0x05 ){ + pCsr->iCell = pCsr->nCell; + } + pCsr->iCell++; + if( pCsr->iCell>=pCsr->nCell ){ + sqlite3_free(pCsr->aPage); + pCsr->aPage = 0; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + }else{ + return SQLITE_OK; + } + }else{ + /* If there is no record loaded, load it now. */ + if( pCsr->pRec==0 ){ + int bHasRowid = 0; + int nPointer = 0; + sqlite3_int64 nPayload = 0; + sqlite3_int64 nHdr = 0; + int iHdr; + int U, X; + int nLocal; + + switch( pCsr->aPage[iOff] ){ + case 0x02: + nPointer = 4; + break; + case 0x0a: + break; + case 0x0d: + bHasRowid = 1; + break; + default: + /* This is not a b-tree page with records on it. Continue. */ + pCsr->iCell = pCsr->nCell; + break; + } + + if( pCsr->iCell>=pCsr->nCell ){ + bNextPage = 1; + }else{ + + iOff += 8 + nPointer + pCsr->iCell*2; + if( iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + iOff = get_uint16(&pCsr->aPage[iOff]); + } + + /* For an interior node cell, skip past the child-page number */ + iOff += nPointer; + + /* Load the "byte of payload including overflow" field */ + if( bNextPage || iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + iOff += dbdataGetVarintU32(&pCsr->aPage[iOff], &nPayload); + } + + /* If this is a leaf intkey cell, load the rowid */ + if( bHasRowid && !bNextPage && iOffnPage ){ + iOff += dbdataGetVarint(&pCsr->aPage[iOff], &pCsr->iIntkey); + } + + /* Figure out how much data to read from the local page */ + U = pCsr->nPage; + if( bHasRowid ){ + X = U-35; + }else{ + X = ((U-12)*64/255)-23; + } + if( nPayload<=X ){ + nLocal = nPayload; + }else{ + int M, K; + M = ((U-12)*32/255)-23; + K = M+((nPayload-M)%(U-4)); + if( K<=X ){ + nLocal = K; + }else{ + nLocal = M; + } + } + + if( bNextPage || nLocal+iOff>pCsr->nPage ){ + bNextPage = 1; + }else{ + + /* Allocate space for payload. And a bit more to catch small buffer + ** overruns caused by attempting to read a varint or similar from + ** near the end of a corrupt record. */ + pCsr->pRec = (u8*)sqlite3_malloc64(nPayload+DBDATA_PADDING_BYTES); + if( pCsr->pRec==0 ) return SQLITE_NOMEM; + memset(pCsr->pRec, 0, nPayload+DBDATA_PADDING_BYTES); + pCsr->nRec = nPayload; + + /* Load the nLocal bytes of payload */ + memcpy(pCsr->pRec, &pCsr->aPage[iOff], nLocal); + iOff += nLocal; + + /* Load content from overflow pages */ + if( nPayload>nLocal ){ + sqlite3_int64 nRem = nPayload - nLocal; + u32 pgnoOvfl = get_uint32(&pCsr->aPage[iOff]); + while( nRem>0 ){ + u8 *aOvfl = 0; + int nOvfl = 0; + int nCopy; + rc = dbdataLoadPage(pCsr, pgnoOvfl, &aOvfl, &nOvfl); + assert( rc!=SQLITE_OK || aOvfl==0 || nOvfl==pCsr->nPage ); + if( rc!=SQLITE_OK ) return rc; + if( aOvfl==0 ) break; + + nCopy = U-4; + if( nCopy>nRem ) nCopy = nRem; + memcpy(&pCsr->pRec[nPayload-nRem], &aOvfl[4], nCopy); + nRem -= nCopy; + + pgnoOvfl = get_uint32(aOvfl); + sqlite3_free(aOvfl); + } + } + + iHdr = dbdataGetVarintU32(pCsr->pRec, &nHdr); + if( nHdr>nPayload ) nHdr = 0; + pCsr->nHdr = nHdr; + pCsr->pHdrPtr = &pCsr->pRec[iHdr]; + pCsr->pPtr = &pCsr->pRec[pCsr->nHdr]; + pCsr->iField = (bHasRowid ? -1 : 0); + } + } + }else{ + pCsr->iField++; + if( pCsr->iField>0 ){ + sqlite3_int64 iType; + if( pCsr->pHdrPtr>&pCsr->pRec[pCsr->nRec] ){ + bNextPage = 1; + }else{ + pCsr->pHdrPtr += dbdataGetVarintU32(pCsr->pHdrPtr, &iType); + pCsr->pPtr += dbdataValueBytes(iType); + } + } + } + + if( bNextPage ){ + sqlite3_free(pCsr->aPage); + sqlite3_free(pCsr->pRec); + pCsr->aPage = 0; + pCsr->pRec = 0; + if( pCsr->bOnePage ) return SQLITE_OK; + pCsr->iPgno++; + }else{ + if( pCsr->iField<0 || pCsr->pHdrPtr<&pCsr->pRec[pCsr->nHdr] ){ + return SQLITE_OK; + } + + /* Advance to the next cell. The next iteration of the loop will load + ** the record and so on. */ + sqlite3_free(pCsr->pRec); + pCsr->pRec = 0; + pCsr->iCell++; + } + } + } + + assert( !"can't get here" ); + return SQLITE_OK; +} + +/* +** Return true if the cursor is at EOF. +*/ +static int dbdataEof(sqlite3_vtab_cursor *pCursor){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + return pCsr->aPage==0; +} + +/* +** Return true if nul-terminated string zSchema ends in "()". Or false +** otherwise. +*/ +static int dbdataIsFunction(const char *zSchema){ + size_t n = strlen(zSchema); + if( n>2 && zSchema[n-2]=='(' && zSchema[n-1]==')' ){ + return (int)n-2; + } + return 0; +} + +/* +** Determine the size in pages of database zSchema (where zSchema is +** "main", "temp" or the name of an attached database) and set +** pCsr->szDb accordingly. If successful, return SQLITE_OK. Otherwise, +** an SQLite error code. +*/ +static int dbdataDbsize(DbdataCursor *pCsr, const char *zSchema){ + DbdataTable *pTab = (DbdataTable*)pCsr->base.pVtab; + char *zSql = 0; + int rc, rc2; + int nFunc = 0; + sqlite3_stmt *pStmt = 0; + + if( (nFunc = dbdataIsFunction(zSchema))>0 ){ + zSql = sqlite3_mprintf("SELECT %.*s(0)", nFunc, zSchema); + }else{ + zSql = sqlite3_mprintf("PRAGMA %Q.page_count", zSchema); + } + if( zSql==0 ) return SQLITE_NOMEM; + + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW ){ + pCsr->szDb = sqlite3_column_int(pStmt, 0); + } + rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + return rc; +} + +/* +** Attempt to figure out the encoding of the database by retrieving page 1 +** and inspecting the header field. If successful, set the pCsr->enc variable +** and return SQLITE_OK. Otherwise, return an SQLite error code. +*/ +static int dbdataGetEncoding(DbdataCursor *pCsr){ + int rc = SQLITE_OK; + int nPg1 = 0; + u8 *aPg1 = 0; + rc = dbdataLoadPage(pCsr, 1, &aPg1, &nPg1); + assert( rc!=SQLITE_OK || nPg1==0 || nPg1>=512 ); + if( rc==SQLITE_OK && nPg1>0 ){ + pCsr->enc = get_uint32(&aPg1[56]); + } + sqlite3_free(aPg1); + return rc; +} + + +/* +** xFilter method for sqlite_dbdata and sqlite_dbptr. +*/ +static int dbdataFilter( + sqlite3_vtab_cursor *pCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + int rc = SQLITE_OK; + const char *zSchema = "main"; + + dbdataResetCursor(pCsr); + assert( pCsr->iPgno==1 ); + if( idxNum & 0x01 ){ + zSchema = (const char*)sqlite3_value_text(argv[0]); + if( zSchema==0 ) zSchema = ""; + } + if( idxNum & 0x02 ){ + pCsr->iPgno = sqlite3_value_int(argv[(idxNum & 0x01)]); + pCsr->bOnePage = 1; + }else{ + rc = dbdataDbsize(pCsr, zSchema); + } + + if( rc==SQLITE_OK ){ + int nFunc = 0; + if( pTab->pStmt ){ + pCsr->pStmt = pTab->pStmt; + pTab->pStmt = 0; + }else if( (nFunc = dbdataIsFunction(zSchema))>0 ){ + char *zSql = sqlite3_mprintf("SELECT %.*s(?2)", nFunc, zSchema); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + } + }else{ + rc = sqlite3_prepare_v2(pTab->db, + "SELECT data FROM sqlite_dbpage(?) WHERE pgno=?", -1, + &pCsr->pStmt, 0 + ); + } + } + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_text(pCsr->pStmt, 1, zSchema, -1, SQLITE_TRANSIENT); + }else{ + pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db)); + } + + /* Try to determine the encoding of the db by inspecting the header + ** field on page 1. */ + if( rc==SQLITE_OK ){ + rc = dbdataGetEncoding(pCsr); + } + + if( rc==SQLITE_OK ){ + rc = dbdataNext(pCursor); + } + return rc; +} + +/* +** Return a column for the sqlite_dbdata or sqlite_dbptr table. +*/ +static int dbdataColumn( + sqlite3_vtab_cursor *pCursor, + sqlite3_context *ctx, + int i +){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + DbdataTable *pTab = (DbdataTable*)pCursor->pVtab; + if( pTab->bPtr ){ + switch( i ){ + case DBPTR_COLUMN_PGNO: + sqlite3_result_int64(ctx, pCsr->iPgno); + break; + case DBPTR_COLUMN_CHILD: { + int iOff = pCsr->iPgno==1 ? 100 : 0; + if( pCsr->iCell<0 ){ + iOff += 8; + }else{ + iOff += 12 + pCsr->iCell*2; + if( iOff>pCsr->nPage ) return SQLITE_OK; + iOff = get_uint16(&pCsr->aPage[iOff]); + } + if( iOff<=pCsr->nPage ){ + sqlite3_result_int64(ctx, get_uint32(&pCsr->aPage[iOff])); + } + break; + } + } + }else{ + switch( i ){ + case DBDATA_COLUMN_PGNO: + sqlite3_result_int64(ctx, pCsr->iPgno); + break; + case DBDATA_COLUMN_CELL: + sqlite3_result_int(ctx, pCsr->iCell); + break; + case DBDATA_COLUMN_FIELD: + sqlite3_result_int(ctx, pCsr->iField); + break; + case DBDATA_COLUMN_VALUE: { + if( pCsr->iField<0 ){ + sqlite3_result_int64(ctx, pCsr->iIntkey); + }else if( &pCsr->pRec[pCsr->nRec] >= pCsr->pPtr ){ + sqlite3_int64 iType; + dbdataGetVarintU32(pCsr->pHdrPtr, &iType); + dbdataValue( + ctx, pCsr->enc, iType, pCsr->pPtr, + &pCsr->pRec[pCsr->nRec] - pCsr->pPtr + ); + } + break; + } + } + } + return SQLITE_OK; +} + +/* +** Return the rowid for an sqlite_dbdata or sqlite_dptr table. +*/ +static int dbdataRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + DbdataCursor *pCsr = (DbdataCursor*)pCursor; + *pRowid = pCsr->iRowid; + return SQLITE_OK; +} + + +/* +** Invoke this routine to register the "sqlite_dbdata" virtual table module +*/ +static int sqlite3DbdataRegister(sqlite3 *db){ + static sqlite3_module dbdata_module = { + 0, /* iVersion */ + 0, /* xCreate */ + dbdataConnect, /* xConnect */ + dbdataBestIndex, /* xBestIndex */ + dbdataDisconnect, /* xDisconnect */ + 0, /* xDestroy */ + dbdataOpen, /* xOpen - open a cursor */ + dbdataClose, /* xClose - close a cursor */ + dbdataFilter, /* xFilter - configure scan constraints */ + dbdataNext, /* xNext - advance a cursor */ + dbdataEof, /* xEof - check for end of scan */ + dbdataColumn, /* xColumn - read data */ + dbdataRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0 /* xShadowName */ + }; + + int rc = sqlite3_create_module(db, "sqlite_dbdata", &dbdata_module, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3_create_module(db, "sqlite_dbptr", &dbdata_module, (void*)1); + } + return rc; +} + +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_dbdata_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi); + return sqlite3DbdataRegister(db); +} + +#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ ADDED ext/recover/recover1.test Index: ext/recover/recover1.test ================================================================== --- /dev/null +++ ext/recover/recover1.test @@ -0,0 +1,320 @@ +# 2022 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recover1 + +proc compare_result {db1 db2 sql} { + set r1 [$db1 eval $sql] + set r2 [$db2 eval $sql] + if {$r1 != $r2} { + puts "r1: $r1" + puts "r2: $r2" + error "mismatch for $sql" + } + return "" +} + +proc compare_dbs {db1 db2} { + compare_result $db1 $db2 "SELECT sql FROM sqlite_master ORDER BY 1" + foreach tbl [$db1 eval {SELECT name FROM sqlite_master WHERE type='table'}] { + compare_result $db1 $db2 "SELECT * FROM $tbl" + } + + compare_result $db1 $db2 "PRAGMA page_size" + compare_result $db1 $db2 "PRAGMA auto_vacuum" + compare_result $db1 $db2 "PRAGMA encoding" + compare_result $db1 $db2 "PRAGMA user_version" + compare_result $db1 $db2 "PRAGMA application_id" +} + +proc do_recover_test {tn} { + forcedelete test.db2 + forcedelete rstate.db + + uplevel [list do_test $tn.1 { + set R [sqlite3_recover_init db main test.db2] + $R config testdb rstate.db + $R run + $R finish + } {}] + + sqlite3 db2 test.db2 + uplevel [list do_test $tn.2 [list compare_dbs db db2] {}] + db2 close + + forcedelete test.db2 + forcedelete rstate.db + + uplevel [list do_test $tn.3 { + set ::sqlhook [list] + set R [sqlite3_recover_init_sql db main my_sql_hook] + $R config testdb rstate.db + $R config rowids 1 + $R run + $R finish + } {}] + + sqlite3 db2 test.db2 + execsql [join $::sqlhook ";"] db2 + db2 close + sqlite3 db2 test.db2 + uplevel [list do_test $tn.4 [list compare_dbs db db2] {}] + db2 close +} + +proc my_sql_hook {sql} { + lappend ::sqlhook $sql + return 0 +} + +do_execsql_test 1.0 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + CREATE TABLE t2(a INTEGER PRIMARY KEY, b) WITHOUT ROWID; + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<10 + ) + INSERT INTO t1 SELECT i*2, hex(randomblob(250)) FROM s; + INSERT INTO t2 SELECT * FROM t1; +} + +do_recover_test 1 + +do_execsql_test 2.0 { + ALTER TABLE t1 ADD COLUMN c DEFAULT 'xyz' +} +do_recover_test 2 + +do_execsql_test 3.0 { + CREATE INDEX i1 ON t1(c); +} +do_recover_test 3 + +do_execsql_test 4.0 { + CREATE VIEW v1 AS SELECT * FROM t2; +} +do_recover_test 4 + +do_execsql_test 5.0 { + CREATE UNIQUE INDEX i2 ON t1(c, b); +} +do_recover_test 5 + +#-------------------------------------------------------------------------- +# +reset_db +do_execsql_test 6.0 { + CREATE TABLE t1( + a INTEGER PRIMARY KEY, + b INT, + c TEXT, + d INT GENERATED ALWAYS AS (a*abs(b)) VIRTUAL, + e TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED, + f TEXT GENERATED ALWAYS AS (substr(c,b,b+1)) STORED + ); + + INSERT INTO t1 VALUES(1, 2, 'hello world'); +} +do_recover_test 6 + +do_execsql_test 7.0 { + CREATE TABLE t2(i, j GENERATED ALWAYS AS (i+1) STORED, k); + INSERT INTO t2 VALUES(10, 'ten'); +} +do_execsql_test 7.1 { + SELECT * FROM t2 +} {10 11 ten} + +do_recover_test 7.2 + +#-------------------------------------------------------------------------- +# +reset_db +do_execsql_test 8.0 { + CREATE TABLE x1(a INTEGER PRIMARY KEY AUTOINCREMENT, b, c); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<2 + ) + INSERT INTO x1(b, c) SELECT hex(randomblob(100)), hex(randomblob(100)) FROM s; + + CREATE INDEX x1b ON x1(b); + CREATE INDEX x1cb ON x1(c, b); + DELETE FROM x1 WHERE a>50; + + ANALYZE; +} + +do_recover_test 8 + +#------------------------------------------------------------------------- +reset_db +ifcapable fts5 { + do_execsql_test 9.1 { + CREATE VIRTUAL TABLE ft5 USING fts5(a, b); + INSERT INTO ft5 VALUES('hello', 'world'); + } + do_recover_test 9 +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 10.1 { + CREATE TABLE x1(a PRIMARY KEY, str TEXT) WITHOUT ROWID; + INSERT INTO x1 VALUES(1, ' + \nhello\012world(\n0)(\n1) + '); + INSERT INTO x1 VALUES(2, ' + \nhello + '); +} +do_execsql_test 10.2 " + INSERT INTO x1 VALUES(3, '\012hello there\015world'); + INSERT INTO x1 VALUES(4, '\015hello there\015world'); +" +do_recover_test 10 + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 11.1 { + PRAGMA page_size = 4096; + PRAGMA encoding='utf16'; + PRAGMA auto_vacuum = 2; + PRAGMA user_version = 45; + PRAGMA application_id = 22; + + CREATE TABLE u1(u, v); + INSERT INTO u1 VALUES('edvin marton', 'bond'); + INSERT INTO u1 VALUES(1, 4.0); +} +do_execsql_test 11.1a { + PRAGMA auto_vacuum; +} {2} + +do_recover_test 11 + +do_test 12.1 { + set R [sqlite3_recover_init db "" test.db2] + $R config lostandfound "" + $R config invalid xyz +} {12} +do_test 12.2 { + $R run + $R run +} {0} + +do_test 12.3 { + $R finish +} {} + + + +#------------------------------------------------------------------------- +reset_db +file_control_reservebytes db 16 +do_execsql_test 12.1 { + PRAGMA auto_vacuum = 2; + PRAGMA user_version = 45; + PRAGMA application_id = 22; + + CREATE TABLE u1(u, v); + CREATE UNIQUE INDEX i1 ON u1(u, v); + INSERT INTO u1 VALUES(1, 2), (3, 4); + + CREATE TABLE u2(u, v); + CREATE UNIQUE INDEX i2 ON u1(u, v); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(50000)), hex(randomblob(20000))); +} + +do_recover_test 12 + +#------------------------------------------------------------------------- +reset_db +sqlite3 db "" +do_recover_test 13 + +do_execsql_test 14.1 { + PRAGMA auto_vacuum = 2; + PRAGMA user_version = 45; + PRAGMA application_id = 22; + + CREATE TABLE u1(u, v); + CREATE UNIQUE INDEX i1 ON u1(u, v); + INSERT INTO u1 VALUES(1, 2), (3, 4); + + CREATE TABLE u2(u, v); + CREATE UNIQUE INDEX i2 ON u1(u, v); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(500)), hex(randomblob(1000))); + INSERT INTO u2 VALUES(hex(randomblob(50000)), hex(randomblob(20000))); +} +do_recover_test 14 + +#------------------------------------------------------------------------- +reset_db +execsql { + PRAGMA journal_mode=OFF; + PRAGMA mmap_size=10; +} +do_execsql_test 15.1 { + CREATE TABLE t1(x); +} {} +do_recover_test 15 + +#------------------------------------------------------------------------- +reset_db +if {[wal_is_capable]} { + do_execsql_test 16.1 { + PRAGMA journal_mode = wal; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1), (2), (3); + } {wal} + do_test 16.2 { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish + } {} + do_execsql_test 16.3 { + SELECT * FROM t1; + } {1 2 3} + + do_execsql_test 16.4 { + BEGIN; + SELECT * FROM t1; + } {1 2 3} + do_test 16.5 { + set R [sqlite3_recover_init db main test.db2] + $R run + list [catch { $R finish } msg] $msg + } {1 {cannot start a transaction within a transaction}} + do_execsql_test 16.6 { + SELECT * FROM t1; + } {1 2 3} + do_execsql_test 16.7 { + INSERT INTO t1 VALUES(4); + } + do_test 16.8 { + set R [sqlite3_recover_init db main test.db2] + $R run + list [catch { $R finish } msg] $msg + } {1 {cannot start a transaction within a transaction}} + do_execsql_test 16.9 { + SELECT * FROM t1; + COMMIT; + } {1 2 3 4} +} + +finish_test + ADDED ext/recover/recover_common.tcl Index: ext/recover/recover_common.tcl ================================================================== --- /dev/null +++ ext/recover/recover_common.tcl @@ -0,0 +1,14 @@ + + +if {![info exists testdir]} { + set testdir [file join [file dirname [info script]] .. .. test] +} +source $testdir/tester.tcl + +if {[info commands sqlite3_recover_init]==""} { + finish_test + return -code return +} + + + ADDED ext/recover/recoverclobber.test Index: ext/recover/recoverclobber.test ================================================================== --- /dev/null +++ ext/recover/recoverclobber.test @@ -0,0 +1,50 @@ +# 2019 April 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for the SQLITE_RECOVER_ROWIDS option. +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverclobber + +proc recover {db output} { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} + +forcedelete test.db2 +do_execsql_test 1.0 { + ATTACH 'test.db2' AS aux; + CREATE TABLE aux.x1(x, one); + INSERT INTO x1 VALUES(1, 'one'), (2, 'two'), (3, 'three'); + + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 1), (2, 2), (3, 3), (4, 4); + + DETACH aux; +} + +breakpoint +do_test 1.1 { + recover db test.db2 +} {} + +do_execsql_test 1.2 { + ATTACH 'test.db2' AS aux; + SELECT * FROM aux.t1; +} {1 1 2 2 3 3 4 4} + +do_catchsql_test 1.3 { + SELECT * FROM aux.x1; +} {1 {no such table: aux.x1}} + +finish_test ADDED ext/recover/recovercorrupt.test Index: ext/recover/recovercorrupt.test ================================================================== --- /dev/null +++ ext/recover/recovercorrupt.test @@ -0,0 +1,67 @@ +# 2022 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recovercorrupt + +database_may_be_corrupt + +do_execsql_test 1.0 { + PRAGMA page_size = 512; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(2, hex(randomblob(100)), randomblob(200)); + CREATE INDEX i1 ON t1(b, c); + CREATE TABLE t2(a PRIMARY KEY, b, c) WITHOUT ROWID; + INSERT INTO t2 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(2, hex(randomblob(100)), randomblob(200)); + ANALYZE; + PRAGMA writable_schema = 1; + DELETE FROM sqlite_schema WHERE name='t2'; +} + +do_test 1.1 { + expr [file size test.db]>3072 +} {1} + +proc toggle_bit {blob bit} { + set byte [expr {$bit / 8}] + set bit [expr {$bit & 0x0F}] + binary scan $blob a${byte}ca* A x B + set x [expr {$x ^ (1 << $bit)}] + binary format a*ca* $A $x $B +} + + +db_save_and_close +for {set ii 0} {$ii < 10000} {incr ii} { + db_restore_and_reopen + db func toggle_bit toggle_bit + set bitsperpage [expr 512*8] + + set pg [expr {($ii / $bitsperpage)+1}] + set byte [expr {$ii % $bitsperpage}] + db eval { + UPDATE sqlite_dbpage SET data = toggle_bit(data, $byte) WHERE pgno=$pg + } + + set R [sqlite3_recover_init db main test.db2] + $R config lostandfound lost_and_found + $R run + do_test 1.2.$ii { + $R finish + } {} +} + + +finish_test + ADDED ext/recover/recovercorrupt2.test Index: ext/recover/recovercorrupt2.test ================================================================== --- /dev/null +++ ext/recover/recovercorrupt2.test @@ -0,0 +1,289 @@ +# 2022 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recovercorrupt2 + +do_execsql_test 1.0 { + PRAGMA page_size = 512; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(2, hex(randomblob(100)), randomblob(200)); + CREATE INDEX i1 ON t1(b, c); + CREATE TABLE t2(a PRIMARY KEY, b, c) WITHOUT ROWID; + INSERT INTO t2 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(2, hex(randomblob(100)), randomblob(200)); + ANALYZE; + PRAGMA writable_schema = 1; + UPDATE sqlite_schema SET sql = 'CREATE INDEX i1 ON o(world)' WHERE name='i1'; + DELETE FROM sqlite_schema WHERE name='sqlite_stat4'; +} + +do_test 1.1 { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} {} + +sqlite3 db2 test.db2 +do_execsql_test -db db2 1.2 { + SELECT sql FROM sqlite_schema +} { + {CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c)} + {CREATE TABLE t2(a PRIMARY KEY, b, c) WITHOUT ROWID} + {CREATE TABLE sqlite_stat1(tbl,idx,stat)} +} +db2 close + +do_execsql_test 1.3 { + PRAGMA writable_schema = 1; + UPDATE sqlite_schema SET sql = 'CREATE TABLE t2 syntax error!' WHERE name='t2'; +} + +do_test 1.4 { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} {} + +sqlite3 db2 test.db2 +do_execsql_test -db db2 1.5 { + SELECT sql FROM sqlite_schema +} { + {CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c)} + {CREATE TABLE sqlite_stat1(tbl,idx,stat)} +} +db2 close + +#------------------------------------------------------------------------- +# +reset_db +do_test 2.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +| size 8192 pagesize 4096 filename x3.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 02 00 00 00 02 .....@ ........ +| 32: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 04 ................ +| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................ +| 80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ................ +| 96: 00 2e 63 00 0d 00 00 00 01 0f d8 00 0f d8 00 00 ..c............. +| 4048: 00 00 00 00 00 00 00 00 26 01 06 17 11 11 01 39 ........&......9 +| 4064: 74 61 62 6c 65 74 31 74 31 02 43 52 45 41 54 45 tablet1t1.CREATE +| 4080: 20 54 41 42 4c 45 20 74 31 28 61 2c 62 2c 63 29 TABLE t1(a,b,c) +| page 2 offset 4096 +| 0: 0d 00 00 00 01 0f ce 00 0f ce 00 00 00 00 00 00 ................ +| 4032: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ..............(. +| 4048: ff ff ff ff ff ff ff 28 04 27 25 23 61 61 61 61 .........'%#aaaa +| 4064: 61 61 61 61 61 61 61 61 61 62 62 62 62 62 62 62 aaaaaaaaabbbbbbb +| 4080: 62 62 62 62 62 63 63 63 63 63 63 63 63 63 63 63 bbbbbccccccccccc +| end x3.db +}]} {} + +do_test 2.1 { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} {} + +sqlite3 db2 test.db2 +do_execsql_test -db db2 2.2 { + SELECT sql FROM sqlite_schema +} { + {CREATE TABLE t1(a,b,c)} +} +do_execsql_test -db db2 2.3 { + SELECT * FROM t1 +} {} +db2 close + +#------------------------------------------------------------------------- +# +reset_db +do_test 3.0 { + sqlite3 db {} + db deserialize [decode_hexdb { + .open --hexdb + | size 4096 pagesize 1024 filename corrupt032.txt.db + | page 1 offset 0 + | 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. + | 16: 04 00 01 01 08 40 20 20 00 00 00 02 00 00 00 03 .....@ ........ + | 32: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 04 ................ + | 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................ + | 80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 ................ + | 96: 00 2e 24 80 0d 00 00 00 01 03 d4 00 03 d4 00 00 ..$............. + | 976: 00 00 00 00 22 01 06 17 11 11 01 31 74 61 62 6c ...........1tabl + | 992: 65 74 31 74 31 02 43 52 45 41 54 45 20 54 41 42 et1t1.CREATE TAB + | 1008: 4c 45 20 74 31 28 78 29 00 00 00 00 00 00 00 00 LE t1(x)........ + | page 2 offset 1024 + | 0: 0d 00 00 00 01 02 06 00 02 06 00 00 00 00 00 00 ................ + | 512: 00 00 00 00 00 00 8b 60 01 03 97 46 00 00 00 00 .......`...F.... + | 1008: 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 00 ................ + | end corrupt032.txt.db +}]} {} + +do_test 3.1 { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} {} + +#------------------------------------------------------------------------- +# +reset_db +do_test 4.0 { + sqlite3 db {} + db deserialize [decode_hexdb { + .open --hexdb + | size 4096 pagesize 4096 filename crash-00f2d3627f1b43.db + | page 1 offset 0 + | 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. + | 16: 00 01 01 02 00 40 20 20 01 00 ff 00 42 01 10 01 .....@ ....B... + | 32: ef 00 00 87 00 ff ff ff f0 01 01 10 ff ff 00 00 ................ + | end crash-00f2d3627f1b43.db +}]} {} + +do_test 4.1 { + set R [sqlite3_recover_init db main test.db2] + catch { $R run } + list [catch { $R finish } msg] $msg +} {1 {unable to open database file}} + +#------------------------------------------------------------------------- +# +reset_db +do_test 5.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 16384 pagesize 4096 filename crash-7b75760a4c5f15.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 04 .....@ ........ +| 32: 00 00 00 00 00 00 00 00 00 00 00 03 00 00 00 00 ................ +| 96: 00 00 00 00 0d 00 00 00 03 0f 4e 00 0f bc 0f 90 ..........N..... +| 112: 0f 4e 00 00 00 00 00 00 00 00 00 00 00 00 00 00 .N.............. +| 3904: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 40 03 ..............@. +| 3920: 06 17 11 11 01 6d 74 61 62 6c 65 74 32 74 32 04 .....mtablet2t2. +| 3936: 43 52 45 41 54 45 20 54 41 42 4c 45 20 74 32 28 CREATE TABLE t2( +| 3952: 78 2c 79 2c 7a 20 50 52 49 4d 41 52 59 20 4b 45 x,y,z PRIMARY KE +| 3968: 59 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 Y) WITHOUT ROWID +| 3984: 2a 02 06 17 13 11 01 3f 69 6e 64 65 78 74 31 61 *......?indext1a +| 4000: 74 31 03 43 52 45 41 54 45 20 49 4e 44 45 58 20 t1.CREATE INDEX +| 4016: 74 31 61 20 4f 4e 20 74 31 28 61 29 42 01 06 17 t1a ON t1(a)B... +| 4032: 11 11 01 71 74 61 62 6c 65 74 31 74 31 02 43 52 ...qtablet1t1.CR +| 4048: 45 41 54 45 20 54 41 42 4c 45 20 74 31 28 61 20 EATE TABLE t1(a +| 4064: 49 4e 54 2c 62 20 54 45 58 54 2c 63 20 42 4c 4f INT,b TEXT,c BLO +| 4080: 42 2c 64 20 52 45 41 4c 29 20 53 54 52 49 43 54 B,d REAL) STRICT +| page 2 offset 4096 +| 0: 0d 00 00 00 14 0c ae 00 0f df 0f bd 0f 9a 0f 76 ...............v +| 16: 0f 51 0f 2b 0f 04 0e dc 0e b3 0e 89 0e 5e 0e 32 .Q.+.........^.2 +| 32: 0e 05 0d 1a 0d a8 0d 78 0d 47 0d 15 0c e2 00 00 .......x.G...... +| 3232: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 32 14 ..............2. +| 3248: 05 06 3f 34 07 15 f4 c9 23 af e2 b3 b6 61 62 63 ..?4....#....abc +| 3264: 30 32 30 78 79 7a 01 00 00 00 00 00 00 00 00 00 020xyz.......... +| 3280: 00 00 00 00 00 00 00 00 00 00 c3 b0 96 7e fb 4e .............~.N +| 3296: c5 4c 31 13 05 06 1f 32 07 dd f2 2a a5 7e b2 4d .L1....2...*.~.M +| 3312: 82 61 62 63 30 31 39 78 79 7a 01 00 00 00 00 00 .abc019xyz...... +| 3328: 00 00 00 00 00 00 00 00 00 00 00 00 00 c3 a3 d6 ................ +| 3344: e9 f1 c2 fd f3 30 12 05 06 1f 30 07 8f 8f f5 c4 .....0....0..... +| 3360: 35 b6 7f 8d 61 62 63 30 31 38 00 00 00 00 00 00 5...abc018...... +| 3376: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 43 ...............C +| 3392: b2 13 1f 9d 56 8a 47 21 b1 05 06 1f 2e 07 7f 46 ....V.G!.......F +| 3408: 91 03 3f 97 fb f7 61 62 63 30 00 00 00 00 00 00 ..?...abc0...... +| 3440: c3 bb d8 96 86 c2 e8 2b 2e 10 05 06 1f 2c 07 6d .......+.....,.m +| 3456: 85 7b ce d0 32 d2 54 61 62 63 30 00 00 00 00 00 ....2.Tabc0..... +| 3488: 43 a1 eb 44 14 dc 03 7b 2d 0f 05 06 1f 2a 07 d9 C..D....-....*.. +| 3504: ab ec bf 34 51 70 f3 61 62 63 30 31 35 78 79 7a ...4Qp.abc015xyz +| 3520: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3 ................ +| 3536: b6 3d f4 46 b1 6a af 2c 0e 05 06 1f 28 07 36 75 .=.F.j.,....(.6u +| 3552: e9 a2 bd 05 04 ea 61 62 63 30 31 34 78 79 7a 00 ......abc014xyz. +| 3568: 00 00 00 00 00 00 00 00 00 00 00 00 00 c3 ab 23 ...............# +| 3584: a7 6a 34 ca f8 2b 0d 05 06 1f 26 07 48 45 ab e0 .j4..+....&.HE.. +| 3600: 8c 7c ff 0c 61 62 63 30 31 33 78 79 7a 00 00 00 .|..abc013xyz... +| 3616: 00 00 00 00 0d d0 00 00 00 00 43 b8 d3 93 f4 92 ..........C..... +| 3632: 5b 7a 2a 0c 05 06 1f 24 07 be 6d 1e db 61 5d 80 [z*....$..m..a]. +| 3648: 9f 61 62 63 30 31 32 78 79 7a 00 00 00 00 00 00 .abc012xyz...... +| 3664: 00 00 00 00 00 00 43 b5 a1 a4 af 7b c6 60 29 0b ......C......`). +| 3680: 05 06 1f 22 07 6e a2 a3 64 68 d4 a6 bd 61 62 63 .....n..dh...abc +| 3696: 30 31 31 78 79 7a 00 00 00 00 00 00 00 00 00 00 011xyz.......... +| 3712: 00 c3 c4 1e ff 0f fc e6 ff 28 0a 05 06 1f 20 07 .........(.... . +| 3728: 50 f9 4a bb a5 7a 1e ca 61 62 63 30 31 30 78 79 P.J..z..abc010xy +| 3744: 7a 00 00 00 00 00 00 00 00 00 00 c3 a7 90 ed d9 z............... +| 3760: 5c 2c d5 27 09 05 06 1f 1e 07 90 8e 1d d9 1c 3a .,.'...........: +| 3776: e8 c1 61 62 63 30 30 39 78 79 7a 00 00 00 00 00 ..abc009xyz..... +| 3792: 00 00 00 00 43 a7 97 87 cf b0 ff 79 26 08 05 06 ....C......y&... +| 3808: 1f 1c 07 86 65 f6 7c 50 7a 2c 76 61 62 63 30 30 ....e.|Pz,vabc00 +| 3824: 38 78 79 7a 00 00 00 00 00 00 00 00 c3 b0 e3 4c 8xyz...........L +| 3840: 4f d3 41 b5 25 07 05 06 1f 1a 07 8b 20 e5 68 11 O.A.%....... .h. +| 3856: 13 55 87 61 62 63 30 30 37 78 79 7a 00 00 00 00 .U.abc007xyz.... +| 3872: 00 00 00 c3 b6 a3 74 f1 9c 33 f8 24 06 05 06 1f ......t..3.$.... +| 3888: 18 07 97 3c bc 34 49 94 54 ab 61 62 63 30 30 36 ...<.4I.T.abc006 +| 3904: 78 79 7a 00 00 00 00 00 00 c3 88 00 c2 ca 4c 4d xyz...........LM +| 3920: d3 23 05 05 06 1f 16 07 59 37 11 10 e9 e5 3d d5 .#......Y7....=. +| 3936: 61 62 63 30 30 35 78 79 7a 00 00 00 00 00 c3 c0 abc005xyz....... +| 3952: 15 12 67 ed 4b 79 22 04 05 06 1f 14 07 93 39 01 ..g.Ky........9. +| 3968: 7f b8 c7 99 58 61 62 63 30 30 34 78 79 7a 00 00 ....Xabc004xyz.. +| 3984: 09 c0 43 bf e0 e7 6d 70 fd 61 21 03 05 06 1f 12 ..C...mp.a!..... +| 4000: 07 b6 df 8d 8b 27 08 22 5a 61 62 63 30 30 33 78 .....'..Zabc003x +| 4016: 79 7a 00 00 00 c3 c7 ea 0f dc dd 32 22 20 02 05 yz.........2. .. +| 4032: 06 1f 10 07 2f a6 da 71 df 66 b3 b5 61 62 63 30 ..../..q.f..abc0 +| 4048: 30 32 78 79 7a 00 00 c3 ce d9 8d e9 ec 20 45 1f 02xyz........ E. +| 4064: 01 05 06 1f 0e 07 5a 47 53 20 3b 48 8f c0 61 62 ......ZGS ;H..ab +| 4080: 63 30 30 31 78 79 7a 00 c3 c9 e6 81 f8 d9 24 04 c001xyz.......$. +| page 3 offset 8192 +| 0: 0a 00 00 00 14 0e fd 00 0f f3 0f e6 0f d9 0f cc ................ +| 16: 0f bf 0f b2 0f a5 0f 98 0f 8b 0f 7e 0f 71 0f 64 ...........~.q.d +| 32: 0f 57 0f 4a 0f 3d 0f 30 0f 24 00 00 00 00 00 00 .W.J.=.0.$...... +| 3824: 00 00 00 00 00 00 00 00 00 00 00 00 00 0c 03 06 ................ +| 3840: 01 7f 46 91 03 3f 97 fb f7 11 0c 03 06 01 6e a2 ..F..?........n. +| 3856: a3 64 68 d4 a6 bd 0b 0c 03 06 01 6d 85 7b ce d0 .dh........m.... +| 3872: 32 d2 54 10 0b 03 06 09 5a 47 53 20 3b 48 8f c0 2.T.....ZGS ;H.. +| 3888: 0c 03 06 01 59 37 11 10 e9 e5 3d d5 05 0c 03 06 ....Y7....=..... +| 3904: 01 50 f9 4a bb a5 7a 1e ca 0a 0c 03 06 01 48 45 .P.J..z.......HE +| 3920: ab e0 8c 7c ff 0c 0d 0c 03 06 01 36 75 e9 a2 bd ...|.......6u... +| 3936: 05 04 ea 0e 0c 03 06 01 2f a6 da 71 df 66 b3 b5 ......../..q.f.. +| 3952: 02 0c 03 06 01 15 f4 c9 23 af e2 b3 b6 14 0c 03 ........#....... +| 3968: 06 01 dd f2 2a a5 7e b2 4d 82 13 0c 03 06 01 d9 ....*.~.M....... +| 3984: ab ec bf 34 51 70 f3 0f 0c 03 06 01 be 6d 1e db ...4Qp.......m.. +| 4000: 61 5d 80 9f 0c 0c 03 06 01 b6 df 8d 8b 27 08 22 a]...........'.. +| 4016: 5a 03 0c 03 06 01 97 3c bc 34 49 94 54 ab 06 0c Z......<.4I.T... +| 4032: 03 06 01 93 39 01 7f b8 c7 99 58 04 0c 03 06 01 ....9.....X..... +| 4048: 90 8e 1d d9 1c 3a e8 c1 09 0c 03 06 01 8f 8f f5 .....:.......... +| 4064: c4 35 b6 7f 8d 12 0c 03 06 01 8b 20 e5 68 11 13 .5......... .h.. +| 4080: 55 87 07 0c 03 06 01 86 65 f6 7c 50 7a 2b 06 08 U.......e.|Pz+.. +| page 4 offset 12288 +| 0: 0a 00 00 00 14 0f 62 00 0f 7a 0f a1 0f c9 0f d9 ......b..z...... +| 16: 0f 81 0f d1 0f f1 0f f9 0f e1 0f 89 0e 6a 0f c1 .............j.. +| 32: 0f 91 0f 99 0f b9 0f 72 0f 62 0f e9 0f b1 0f a9 .......r.b...... +| 3936: 00 00 07 04 01 01 01 11 0e 9e 07 04 01 01 01 0b ................ +| 3952: 31 16 07 04 01 01 01 10 37 36 06 04 09 01 01 ab 1.......76...... +| 3968: 58 07 04 01 01 01 05 1c 28 07 04 01 01 01 0a 10 X.......(....... +| 3984: cf 07 04 01 01 01 0d b2 e3 07 04 01 01 01 0e d3 ................ +| 4000: f2 07 04 01 01 01 02 41 ad 07 04 01 01 01 14 3e .......A.......> +| 4016: 22 07 04 01 01 01 13 27 45 07 04 01 01 01 0f ad .......'E....... +| 4032: dd 07 04 01 01 01 0c 2e a1 07 04 01 01 01 03 df ................ +| 4048: e1 07 04 01 01 01 06 59 a7 07 04 01 01 01 04 27 .......Y.......' +| 4064: bd 07 04 01 01 01 09 d0 e0 07 04 01 01 01 12 39 ...............9 +| 4080: 4f 07 04 01 01 01 07 c4 11 06 04 00 00 00 00 00 O............... +| end crash-7b75760a4c5f15.db +}]} {} + +do_test 5.1 { + set R [sqlite3_recover_init db main test.db2] + catch { $R run } + list [catch { $R finish } msg] $msg +} {0 {}} + +finish_test + ADDED ext/recover/recoverfault.test Index: ext/recover/recoverfault.test ================================================================== --- /dev/null +++ ext/recover/recoverfault.test @@ -0,0 +1,84 @@ +# 2022 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverfault + + +#-------------------------------------------------------------------------- +proc compare_result {db1 db2 sql} { + set r1 [$db1 eval $sql] + set r2 [$db2 eval $sql] + if {$r1 != $r2} { + puts "r1: $r1" + puts "r2: $r2" + error "mismatch for $sql" + } + return "" +} + +proc compare_dbs {db1 db2} { + compare_result $db1 $db2 "SELECT sql FROM sqlite_master ORDER BY 1" + foreach tbl [$db1 eval {SELECT name FROM sqlite_master WHERE type='table'}] { + compare_result $db1 $db2 "SELECT * FROM $tbl" + } +} +#-------------------------------------------------------------------------- + +do_execsql_test 1.0 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(2, hex(randomblob(1000)), randomblob(2000)); + CREATE INDEX i1 ON t1(b, c); + ANALYZE; +} +faultsim_save_and_close + +do_faultsim_test 1 -faults oom* -prep { + catch { db2 close } + faultsim_restore_and_reopen +} -body { + set R [sqlite3_recover_init db main test.db2] + $R run + $R finish +} -test { + faultsim_test_result {0 {}} {1 {}} + if {$testrc==0} { + sqlite3 db2 test.db2 + compare_dbs db db2 + db2 close + } +} + +faultsim_restore_and_reopen +do_execsql_test 2.0 { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t2 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(2, hex(randomblob(1000)), hex(randomblob(2000))); + PRAGMA writable_schema = 1; + DELETE FROM sqlite_schema WHERE name='t2'; +} +faultsim_save_and_close + +do_faultsim_test 2 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + set R [sqlite3_recover_init db main test.db2] + $R config lostandfound lost_and_found + $R run + $R finish +} -test { + faultsim_test_result {0 {}} {1 {}} +} + +finish_test + ADDED ext/recover/recoverfault2.test Index: ext/recover/recoverfault2.test ================================================================== --- /dev/null +++ ext/recover/recoverfault2.test @@ -0,0 +1,102 @@ +# 2022 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverfault2 + + +#-------------------------------------------------------------------------- +proc compare_result {db1 db2 sql} { + set r1 [$db1 eval $sql] + set r2 [$db2 eval $sql] + if {$r1 != $r2} { + puts "r1: $r1" + puts "r2: $r2" + error "mismatch for $sql" + } + return "" +} + +proc compare_dbs {db1 db2} { + compare_result $db1 $db2 "SELECT sql FROM sqlite_master ORDER BY 1" + foreach tbl [$db1 eval {SELECT name FROM sqlite_master WHERE type='table'}] { + compare_result $db1 $db2 "SELECT * FROM $tbl" + } +} +#-------------------------------------------------------------------------- + +do_execsql_test 1.0 " + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(2, '\012hello\015world\012today\n'); +" +faultsim_save_and_close + +proc my_sql_hook {sql} { + lappend ::lSql $sql + return 0 +} + +do_faultsim_test 1 -faults oom* -prep { + catch { db2 close } + faultsim_restore_and_reopen + set ::lSql [list] +} -body { + set R [sqlite3_recover_init_sql db main my_sql_hook] + $R run + $R finish +} -test { + faultsim_test_result {0 {}} {1 {}} + if {$testrc==0} { + sqlite3 db2 "" + db2 eval [join $::lSql ";"] + compare_dbs db db2 + db2 close + } +} + +ifcapable utf16 { + reset_db + do_execsql_test 2.0 " + PRAGMA encoding='utf-16'; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(2, '\012hello\015world\012today\n'); + " + faultsim_save_and_close + + proc my_sql_hook {sql} { + lappend ::lSql $sql + return 0 + } + + do_faultsim_test 2 -faults oom-t* -prep { + catch { db2 close } + faultsim_restore_and_reopen + set ::lSql [list] + } -body { + set R [sqlite3_recover_init_sql db main my_sql_hook] + $R run + $R finish + } -test { + faultsim_test_result {0 {}} {1 {}} + if {$testrc==0} { + sqlite3 db2 "" + db2 eval [join $::lSql ";"] + compare_dbs db db2 + db2 close + } + } +} + + + +finish_test + ADDED ext/recover/recoverold.test Index: ext/recover/recoverold.test ================================================================== --- /dev/null +++ ext/recover/recoverold.test @@ -0,0 +1,189 @@ +# 2019 April 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverold + +proc compare_result {db1 db2 sql} { + set r1 [$db1 eval $sql] + set r2 [$db2 eval $sql] + if {$r1 != $r2} { + puts "sql: $sql" + puts "r1: $r1" + puts "r2: $r2" + error "mismatch for $sql" + } + return "" +} + +proc compare_dbs {db1 db2} { + compare_result $db1 $db2 "SELECT sql FROM sqlite_master ORDER BY 1" + foreach tbl [$db1 eval {SELECT name FROM sqlite_master WHERE type='table'}] { + compare_result $db1 $db2 "SELECT * FROM $tbl" + } +} + +proc do_recover_test {tn {tsql {}} {res {}}} { + forcedelete test.db2 + forcedelete rstate.db + + set R [sqlite3_recover_init db main test.db2] + $R config lostandfound lost_and_found + $R run + $R finish + + sqlite3 db2 test.db2 + + if {$tsql==""} { + uplevel [list do_test $tn.1 [list compare_dbs db db2] {}] + } else { + uplevel [list do_execsql_test -db db2 $tn.1 $tsql $res] + } + db2 close + + forcedelete test.db2 + forcedelete rstate.db + + set ::sqlhook [list] + set R [sqlite3_recover_init_sql db main my_sql_hook] + $R config lostandfound lost_and_found + $R run + $R finish + + sqlite3 db2 test.db2 + db2 eval [join $::sqlhook ";"] + + + db cache flush + if {$tsql==""} { + compare_dbs db db2 + uplevel [list do_test $tn.sql [list compare_dbs db db2] {}] + } else { + uplevel [list do_execsql_test -db db2 $tn.sql $tsql $res] + } + db2 close +} + +proc my_sql_hook {sql} { + lappend ::sqlhook $sql + return 0 +} + + +set doc { + hello + world +} +do_execsql_test 1.1.1 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(1, 4, X'1234567800'); + INSERT INTO t1 VALUES(2, 'test', 8.1); + INSERT INTO t1 VALUES(3, $doc, 8.4); +} +do_recover_test 1.1.2 + +do_execsql_test 1.2.1 " + DELETE FROM t1; + INSERT INTO t1 VALUES(13, 'hello\r\nworld', 13); +" +do_recover_test 1.2.2 + +do_execsql_test 1.3.1 " + CREATE TABLE t2(i INTEGER PRIMARY KEY AUTOINCREMENT, b, c); + INSERT INTO t2 VALUES(NULL, 1, 2); + INSERT INTO t2 VALUES(NULL, 3, 4); + INSERT INTO t2 VALUES(NULL, 5, 6); + CREATE TABLE t3(i INTEGER PRIMARY KEY AUTOINCREMENT, b, c); + INSERT INTO t3 VALUES(NULL, 1, 2); + INSERT INTO t3 VALUES(NULL, 3, 4); + INSERT INTO t3 VALUES(NULL, 5, 6); + DELETE FROM t2; +" +do_recover_test 1.3.2 + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 2.1.0 { + PRAGMA auto_vacuum = 0; + CREATE TABLE t1(a, b, c, PRIMARY KEY(b, c)) WITHOUT ROWID; + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t1 VALUES(7, 8, 9); +} + +do_recover_test 2.1.1 + +do_execsql_test 2.2.0 { + PRAGMA writable_schema = 1; + DELETE FROM sqlite_master WHERE name='t1'; +} +do_recover_test 2.2.1 { + SELECT name FROM sqlite_master +} {lost_and_found} + +do_execsql_test 2.3.0 { + CREATE TABLE lost_and_found(a, b, c); +} +do_recover_test 2.3.1 { + SELECT name FROM sqlite_master +} {lost_and_found lost_and_found_0} + +do_execsql_test 2.4.0 { + CREATE TABLE lost_and_found_0(a, b, c); +} +do_recover_test 2.4.1 { + SELECT name FROM sqlite_master; + SELECT * FROM lost_and_found_1; +} {lost_and_found lost_and_found_0 lost_and_found_1 + 2 2 3 {} 2 3 1 + 2 2 3 {} 5 6 4 + 2 2 3 {} 8 9 7 +} + +do_execsql_test 2.5 { + CREATE TABLE x1(a, b, c); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100 + ) + INSERT INTO x1 SELECT i, i, hex(randomblob(500)) FROM s; + DROP TABLE x1; +} +do_recover_test 2.5.1 { + SELECT name FROM sqlite_master; + SELECT * FROM lost_and_found_1; +} {lost_and_found lost_and_found_0 lost_and_found_1 + 2 2 3 {} 2 3 1 + 2 2 3 {} 5 6 4 + 2 2 3 {} 8 9 7 +} + +ifcapable !secure_delete { + do_test 2.6 { + forcedelete test.db2 + set R [sqlite3_recover_init db main test.db2] + $R config lostandfound lost_and_found + $R config freelistcorrupt 1 + $R run + $R finish + sqlite3 db2 test.db2 + execsql { SELECT count(*) FROM lost_and_found_1; } db2 + } {103} + db2 close +} + +#------------------------------------------------------------------------- +breakpoint +reset_db +do_recover_test 3.0 + +finish_test ADDED ext/recover/recoverpgsz.test Index: ext/recover/recoverpgsz.test ================================================================== --- /dev/null +++ ext/recover/recoverpgsz.test @@ -0,0 +1,100 @@ +# 2022 October 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] recover_common.tcl] + +db close +sqlite3_test_control_pending_byte 0x1000000 + +set testprefix recoverpgsz + +foreach {pgsz bOverflow} { + 512 0 1024 0 2048 0 4096 0 8192 0 16384 0 32768 0 65536 0 + 512 1 1024 1 2048 1 4096 1 8192 1 16384 1 32768 1 65536 1 +} { + reset_db + execsql "PRAGMA page_size = $pgsz" + execsql "PRAGMA auto_vacuum = 0" + do_execsql_test 1.$pgsz.$bOverflow.1 { + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(b, a, c); + INSERT INTO t1(a, b) VALUES(1, 2), (3, 4), (5, 6); + DELETE FROM t1 WHERE a=3; + } + if {$bOverflow} { + do_execsql_test 1.$pgsz.$bOverflow.1a { + UPDATE t1 SET c = randomblob(100000); + } + } + db close + + + set fd [open test.db] + fconfigure $fd -encoding binary -translation binary + seek $fd $pgsz + set pg1 [read $fd $pgsz] + set pg2 [read $fd $pgsz] + close $fd + + set fd2 [open test.db2 w] + fconfigure $fd2 -encoding binary -translation binary + seek $fd2 $pgsz + puts -nonewline $fd2 $pg1 + close $fd2 + + sqlite3 db2 test.db2 + do_test 1.$pgsz.$bOverflow.2 { + set R [sqlite3_recover_init db2 main test.db3] + $R run + $R finish + } {} + + sqlite3 db3 test.db3 + do_test 1.$pgsz.$bOverflow.3 { + db3 eval { SELECT * FROM sqlite_schema } + db3 eval { PRAGMA page_size } + } $pgsz + + db2 close + db3 close + + forcedelete test.db3 + forcedelete test.db2 + + set fd2 [open test.db2 w] + fconfigure $fd2 -encoding binary -translation binary + seek $fd2 $pgsz + puts -nonewline $fd2 $pg2 + close $fd2 + + sqlite3 db2 test.db2 + do_test 1.$pgsz.$bOverflow.4 { + set R [sqlite3_recover_init db2 main test.db3] + $R run + $R finish + } {} + + sqlite3 db3 test.db3 + do_test 1.$pgsz.$bOverflow.5 { + db3 eval { SELECT * FROM sqlite_schema } + db3 eval { PRAGMA page_size } + } $pgsz + + db2 close + db3 close +} + + +finish_test + + + ADDED ext/recover/recoverrowid.test Index: ext/recover/recoverrowid.test ================================================================== --- /dev/null +++ ext/recover/recoverrowid.test @@ -0,0 +1,50 @@ +# 2022 September 07 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for the SQLITE_RECOVER_ROWIDS option. +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverrowid + +proc recover {db bRowids output} { + forcedelete $output + + set R [sqlite3_recover_init db main test.db2] + $R config rowids $bRowids + $R run + $R finish +} + +do_execsql_test 1.0 { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 1), (2, 2), (3, 3), (4, 4); + DELETE FROM t1 WHERE a IN (1, 3); +} + +do_test 1.1 { + recover db 0 test.db2 + sqlite3 db2 test.db2 + execsql { SELECT rowid, a, b FROM t1 ORDER BY rowid} db2 +} {1 2 2 2 4 4} + +do_test 1.2 { + db2 close + recover db 1 test.db2 + sqlite3 db2 test.db2 + execsql { SELECT rowid, a, b FROM t1 ORDER BY rowid} db2 +} {2 2 2 4 4 4} +db2 close + + + + +finish_test ADDED ext/recover/recoverslowidx.test Index: ext/recover/recoverslowidx.test ================================================================== --- /dev/null +++ ext/recover/recoverslowidx.test @@ -0,0 +1,87 @@ +# 2022 September 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for the SQLITE_RECOVER_SLOWINDEXES option. +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoverslowidx + +do_execsql_test 1.0 { + PRAGMA auto_vacuum = 0; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + INSERT INTO t1 VALUES(1, 1), (2, 2), (3, 3), (4, 4); +} + +proc my_sql_hook {sql} { + lappend ::lSql $sql + return 0 +} + +do_test 1.1 { + set lSql [list] + set R [sqlite3_recover_init_sql db main my_sql_hook] + while {[$R step]==0} { } + $R finish +} {} + +do_test 1.2 { + set lSql +} [list {*}{ + {BEGIN} + {PRAGMA writable_schema = on} + {PRAGMA encoding = 'UTF-8'} + {PRAGMA page_size = '1024'} + {PRAGMA auto_vacuum = '0'} + {PRAGMA user_version = '0'} + {PRAGMA application_id = '0'} + {CREATE TABLE t1(a, b)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (1, 1, 1)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (2, 2, 2)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (3, 3, 3)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (4, 4, 4)} + {CREATE INDEX i1 ON t1(a)} + {PRAGMA writable_schema = off} + {COMMIT} +}] + +do_test 1.3 { + set lSql [list] + set R [sqlite3_recover_init_sql db main my_sql_hook] + $R config slowindexes 1 + while {[$R step]==0} { } + $R finish +} {} + +do_test 1.4 { + set lSql +} [list {*}{ + {BEGIN} + {PRAGMA writable_schema = on} + {PRAGMA encoding = 'UTF-8'} + {PRAGMA page_size = '1024'} + {PRAGMA auto_vacuum = '0'} + {PRAGMA user_version = '0'} + {PRAGMA application_id = '0'} + {CREATE TABLE t1(a, b)} + {CREATE INDEX i1 ON t1(a)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (1, 1, 1)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (2, 2, 2)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (3, 3, 3)} + {INSERT OR IGNORE INTO 't1'(_rowid_, 'a', 'b') VALUES (4, 4, 4)} + {PRAGMA writable_schema = off} + {COMMIT} +}] + + +finish_test + ADDED ext/recover/recoversql.test Index: ext/recover/recoversql.test ================================================================== --- /dev/null +++ ext/recover/recoversql.test @@ -0,0 +1,52 @@ +# 2022 September 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +source [file join [file dirname [info script]] recover_common.tcl] +set testprefix recoversql + +do_execsql_test 1.0 { + CREATE TABLE "x.1" (x, y); + INSERT INTO "x.1" VALUES(1, 1), (2, 2), (3, 3); + CREATE INDEX "i.1" ON "x.1"(y, x); +} + +proc sql_hook {sql} { + incr ::iSqlHook + if {$::iSqlHook==$::sql_hook_cnt} { return 4 } + return 0 +} + +do_test 1.1 { + set ::sql_hook_cnt -1 + set ::iSqlHook 0 + set R [sqlite3_recover_init_sql db main sql_hook] + $R run + $R finish +} {} + +set nSqlCall $iSqlHook + +for {set ii 1} {$ii<$nSqlCall} {incr ii} { + set iSqlHook 0 + set sql_hook_cnt $ii + do_test 1.$ii.a { + set R [sqlite3_recover_init_sql db main sql_hook] + $R run + } {1} + do_test 1.$ii.b { + list [catch { $R finish } msg] $msg + } {1 {callback returned an error - 4}} +} + + +finish_test ADDED ext/recover/sqlite3recover.c Index: ext/recover/sqlite3recover.c ================================================================== --- /dev/null +++ ext/recover/sqlite3recover.c @@ -0,0 +1,2861 @@ +/* +** 2022-08-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +*/ + + +#include "sqlite3recover.h" +#include +#include + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +/* +** Declaration for public API function in file dbdata.c. This may be called +** with NULL as the final two arguments to register the sqlite_dbptr and +** sqlite_dbdata virtual tables with a database handle. +*/ +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_dbdata_init(sqlite3*, char**, const sqlite3_api_routines*); + +typedef unsigned int u32; +typedef unsigned char u8; +typedef sqlite3_int64 i64; + +typedef struct RecoverTable RecoverTable; +typedef struct RecoverColumn RecoverColumn; + +/* +** When recovering rows of data that can be associated with table +** definitions recovered from the sqlite_schema table, each table is +** represented by an instance of the following object. +** +** iRoot: +** The root page in the original database. Not necessarily (and usually +** not) the same in the recovered database. +** +** zTab: +** Name of the table. +** +** nCol/aCol[]: +** aCol[] is an array of nCol columns. In the order in which they appear +** in the table. +** +** bIntkey: +** Set to true for intkey tables, false for WITHOUT ROWID. +** +** iRowidBind: +** Each column in the aCol[] array has associated with it the index of +** the bind parameter its values will be bound to in the INSERT statement +** used to construct the output database. If the table does has a rowid +** but not an INTEGER PRIMARY KEY column, then iRowidBind contains the +** index of the bind paramater to which the rowid value should be bound. +** Otherwise, it contains -1. If the table does contain an INTEGER PRIMARY +** KEY column, then the rowid value should be bound to the index associated +** with the column. +** +** pNext: +** All RecoverTable objects used by the recovery operation are allocated +** and populated as part of creating the recovered database schema in +** the output database, before any non-schema data are recovered. They +** are then stored in a singly-linked list linked by this variable beginning +** at sqlite3_recover.pTblList. +*/ +struct RecoverTable { + u32 iRoot; /* Root page in original database */ + char *zTab; /* Name of table */ + int nCol; /* Number of columns in table */ + RecoverColumn *aCol; /* Array of columns */ + int bIntkey; /* True for intkey, false for without rowid */ + int iRowidBind; /* If >0, bind rowid to INSERT here */ + RecoverTable *pNext; +}; + +/* +** Each database column is represented by an instance of the following object +** stored in the RecoverTable.aCol[] array of the associated table. +** +** iField: +** The index of the associated field within database records. Or -1 if +** there is no associated field (e.g. for virtual generated columns). +** +** iBind: +** The bind index of the INSERT statement to bind this columns values +** to. Or 0 if there is no such index (iff (iField<0)). +** +** bIPK: +** True if this is the INTEGER PRIMARY KEY column. +** +** zCol: +** Name of column. +** +** eHidden: +** A RECOVER_EHIDDEN_* constant value (see below for interpretation of each). +*/ +struct RecoverColumn { + int iField; /* Field in record on disk */ + int iBind; /* Binding to use in INSERT */ + int bIPK; /* True for IPK column */ + char *zCol; + int eHidden; +}; + +#define RECOVER_EHIDDEN_NONE 0 /* Normal database column */ +#define RECOVER_EHIDDEN_HIDDEN 1 /* Column is __HIDDEN__ */ +#define RECOVER_EHIDDEN_VIRTUAL 2 /* Virtual generated column */ +#define RECOVER_EHIDDEN_STORED 3 /* Stored generated column */ + +/* +** Bitmap object used to track pages in the input database. Allocated +** and manipulated only by the following functions: +** +** recoverBitmapAlloc() +** recoverBitmapFree() +** recoverBitmapSet() +** recoverBitmapQuery() +** +** nPg: +** Largest page number that may be stored in the bitmap. The range +** of valid keys is 1 to nPg, inclusive. +** +** aElem[]: +** Array large enough to contain a bit for each key. For key value +** iKey, the associated bit is the bit (iKey%32) of aElem[iKey/32]. +** In other words, the following is true if bit iKey is set, or +** false if it is clear: +** +** (aElem[iKey/32] & (1 << (iKey%32))) ? 1 : 0 +*/ +typedef struct RecoverBitmap RecoverBitmap; +struct RecoverBitmap { + i64 nPg; /* Size of bitmap */ + u32 aElem[1]; /* Array of 32-bit bitmasks */ +}; + +/* +** State variables (part of the sqlite3_recover structure) used while +** recovering data for tables identified in the recovered schema (state +** RECOVER_STATE_WRITING). +*/ +typedef struct RecoverStateW1 RecoverStateW1; +struct RecoverStateW1 { + sqlite3_stmt *pTbls; + sqlite3_stmt *pSel; + sqlite3_stmt *pInsert; + int nInsert; + + RecoverTable *pTab; /* Table currently being written */ + int nMax; /* Max column count in any schema table */ + sqlite3_value **apVal; /* Array of nMax values */ + int nVal; /* Number of valid entries in apVal[] */ + int bHaveRowid; + i64 iRowid; + i64 iPrevPage; + int iPrevCell; +}; + +/* +** State variables (part of the sqlite3_recover structure) used while +** recovering data destined for the lost and found table (states +** RECOVER_STATE_LOSTANDFOUND[123]). +*/ +typedef struct RecoverStateLAF RecoverStateLAF; +struct RecoverStateLAF { + RecoverBitmap *pUsed; + i64 nPg; /* Size of db in pages */ + sqlite3_stmt *pAllAndParent; + sqlite3_stmt *pMapInsert; + sqlite3_stmt *pMaxField; + sqlite3_stmt *pUsedPages; + sqlite3_stmt *pFindRoot; + sqlite3_stmt *pInsert; /* INSERT INTO lost_and_found ... */ + sqlite3_stmt *pAllPage; + sqlite3_stmt *pPageData; + sqlite3_value **apVal; + int nMaxField; +}; + +/* +** Main recover handle structure. +*/ +struct sqlite3_recover { + /* Copies of sqlite3_recover_init[_sql]() parameters */ + sqlite3 *dbIn; /* Input database */ + char *zDb; /* Name of input db ("main" etc.) */ + char *zUri; /* URI for output database */ + void *pSqlCtx; /* SQL callback context */ + int (*xSql)(void*,const char*); /* Pointer to SQL callback function */ + + /* Values configured by sqlite3_recover_config() */ + char *zStateDb; /* State database to use (or NULL) */ + char *zLostAndFound; /* Name of lost-and-found table (or NULL) */ + int bFreelistCorrupt; /* SQLITE_RECOVER_FREELIST_CORRUPT setting */ + int bRecoverRowid; /* SQLITE_RECOVER_ROWIDS setting */ + int bSlowIndexes; /* SQLITE_RECOVER_SLOWINDEXES setting */ + + int pgsz; + int detected_pgsz; + int nReserve; + u8 *pPage1Disk; + u8 *pPage1Cache; + + /* Error code and error message */ + int errCode; /* For sqlite3_recover_errcode() */ + char *zErrMsg; /* For sqlite3_recover_errmsg() */ + + int eState; + int bCloseTransaction; + + /* Variables used with eState==RECOVER_STATE_WRITING */ + RecoverStateW1 w1; + + /* Variables used with states RECOVER_STATE_LOSTANDFOUND[123] */ + RecoverStateLAF laf; + + /* Fields used within sqlite3_recover_run() */ + sqlite3 *dbOut; /* Output database */ + sqlite3_stmt *pGetPage; /* SELECT against input db sqlite_dbdata */ + RecoverTable *pTblList; /* List of tables recovered from schema */ +}; + +/* +** The various states in which an sqlite3_recover object may exist: +** +** RECOVER_STATE_INIT: +** The object is initially created in this state. sqlite3_recover_step() +** has yet to be called. This is the only state in which it is permitted +** to call sqlite3_recover_config(). +** +** RECOVER_STATE_WRITING: +** +** RECOVER_STATE_LOSTANDFOUND1: +** State to populate the bitmap of pages used by other tables or the +** database freelist. +** +** RECOVER_STATE_LOSTANDFOUND2: +** Populate the recovery.map table - used to figure out a "root" page +** for each lost page from in the database from which records are +** extracted. +** +** RECOVER_STATE_LOSTANDFOUND3: +** Populate the lost-and-found table itself. +*/ +#define RECOVER_STATE_INIT 0 +#define RECOVER_STATE_WRITING 1 +#define RECOVER_STATE_LOSTANDFOUND1 2 +#define RECOVER_STATE_LOSTANDFOUND2 3 +#define RECOVER_STATE_LOSTANDFOUND3 4 +#define RECOVER_STATE_SCHEMA2 5 +#define RECOVER_STATE_DONE 6 + + +/* +** Global variables used by this extension. +*/ +typedef struct RecoverGlobal RecoverGlobal; +struct RecoverGlobal { + const sqlite3_io_methods *pMethods; + sqlite3_recover *p; +}; +static RecoverGlobal recover_g; + +/* +** Use this static SQLite mutex to protect the globals during the +** first call to sqlite3_recover_step(). +*/ +#define RECOVER_MUTEX_ID SQLITE_MUTEX_STATIC_APP2 + + +/* +** Default value for SQLITE_RECOVER_ROWIDS (sqlite3_recover.bRecoverRowid). +*/ +#define RECOVER_ROWID_DEFAULT 1 + +/* +** Mutex handling: +** +** recoverEnterMutex() - Enter the recovery mutex +** recoverLeaveMutex() - Leave the recovery mutex +** recoverAssertMutexHeld() - Assert that the recovery mutex is held +*/ +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE==0 +# define recoverEnterMutex() +# define recoverLeaveMutex() +#else +static void recoverEnterMutex(void){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)); +} +static void recoverLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)); +} +#endif +#if SQLITE_THREADSAFE+0>=1 && defined(SQLITE_DEBUG) +static void recoverAssertMutexHeld(void){ + assert( sqlite3_mutex_held(sqlite3_mutex_alloc(RECOVER_MUTEX_ID)) ); +} +#else +# define recoverAssertMutexHeld() +#endif + + +/* +** Like strlen(). But handles NULL pointer arguments. +*/ +static int recoverStrlen(const char *zStr){ + if( zStr==0 ) return 0; + return (int)(strlen(zStr)&0x7fffffff); +} + +/* +** This function is a no-op if the recover handle passed as the first +** argument already contains an error (if p->errCode!=SQLITE_OK). +** +** Otherwise, an attempt is made to allocate, zero and return a buffer nByte +** bytes in size. If successful, a pointer to the new buffer is returned. Or, +** if an OOM error occurs, NULL is returned and the handle error code +** (p->errCode) set to SQLITE_NOMEM. +*/ +static void *recoverMalloc(sqlite3_recover *p, i64 nByte){ + void *pRet = 0; + assert( nByte>0 ); + if( p->errCode==SQLITE_OK ){ + pRet = sqlite3_malloc64(nByte); + if( pRet ){ + memset(pRet, 0, nByte); + }else{ + p->errCode = SQLITE_NOMEM; + } + } + return pRet; +} + +/* +** Set the error code and error message for the recover handle passed as +** the first argument. The error code is set to the value of parameter +** errCode. +** +** Parameter zFmt must be a printf() style formatting string. The handle +** error message is set to the result of using any trailing arguments for +** parameter substitutions in the formatting string. +** +** For example: +** +** recoverError(p, SQLITE_ERROR, "no such table: %s", zTablename); +*/ +static int recoverError( + sqlite3_recover *p, + int errCode, + const char *zFmt, ... +){ + char *z = 0; + va_list ap; + va_start(ap, zFmt); + if( zFmt ){ + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + } + sqlite3_free(p->zErrMsg); + p->zErrMsg = z; + p->errCode = errCode; + return errCode; +} + + +/* +** This function is a no-op if p->errCode is initially other than SQLITE_OK. +** In this case it returns NULL. +** +** Otherwise, an attempt is made to allocate and return a bitmap object +** large enough to store a bit for all page numbers between 1 and nPg, +** inclusive. The bitmap is initially zeroed. +*/ +static RecoverBitmap *recoverBitmapAlloc(sqlite3_recover *p, i64 nPg){ + int nElem = (nPg+1+31) / 32; + int nByte = sizeof(RecoverBitmap) + nElem*sizeof(u32); + RecoverBitmap *pRet = (RecoverBitmap*)recoverMalloc(p, nByte); + + if( pRet ){ + pRet->nPg = nPg; + } + return pRet; +} + +/* +** Free a bitmap object allocated by recoverBitmapAlloc(). +*/ +static void recoverBitmapFree(RecoverBitmap *pMap){ + sqlite3_free(pMap); +} + +/* +** Set the bit associated with page iPg in bitvec pMap. +*/ +static void recoverBitmapSet(RecoverBitmap *pMap, i64 iPg){ + if( iPg<=pMap->nPg ){ + int iElem = (iPg / 32); + int iBit = (iPg % 32); + pMap->aElem[iElem] |= (((u32)1) << iBit); + } +} + +/* +** Query bitmap object pMap for the state of the bit associated with page +** iPg. Return 1 if it is set, or 0 otherwise. +*/ +static int recoverBitmapQuery(RecoverBitmap *pMap, i64 iPg){ + int ret = 1; + if( iPg<=pMap->nPg && iPg>0 ){ + int iElem = (iPg / 32); + int iBit = (iPg % 32); + ret = (pMap->aElem[iElem] & (((u32)1) << iBit)) ? 1 : 0; + } + return ret; +} + +/* +** Set the recover handle error to the error code and message returned by +** calling sqlite3_errcode() and sqlite3_errmsg(), respectively, on database +** handle db. +*/ +static int recoverDbError(sqlite3_recover *p, sqlite3 *db){ + return recoverError(p, sqlite3_errcode(db), "%s", sqlite3_errmsg(db)); +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, it attempts to prepare the SQL statement in zSql against +** database handle db. If successful, the statement handle is returned. +** Or, if an error occurs, NULL is returned and an error left in the +** recover handle. +*/ +static sqlite3_stmt *recoverPrepare( + sqlite3_recover *p, + sqlite3 *db, + const char *zSql +){ + sqlite3_stmt *pStmt = 0; + if( p->errCode==SQLITE_OK ){ + if( sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0) ){ + recoverDbError(p, db); + } + } + return pStmt; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, argument zFmt is used as a printf() style format string, +** along with any trailing arguments, to create an SQL statement. This +** SQL statement is prepared against database handle db and, if successful, +** the statment handle returned. Or, if an error occurs - either during +** the printf() formatting or when preparing the resulting SQL - an +** error code and message are left in the recover handle. +*/ +static sqlite3_stmt *recoverPreparePrintf( + sqlite3_recover *p, + sqlite3 *db, + const char *zFmt, ... +){ + sqlite3_stmt *pStmt = 0; + if( p->errCode==SQLITE_OK ){ + va_list ap; + char *z; + va_start(ap, zFmt); + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + if( z==0 ){ + p->errCode = SQLITE_NOMEM; + }else{ + pStmt = recoverPrepare(p, db, z); + sqlite3_free(z); + } + } + return pStmt; +} + +/* +** Reset SQLite statement handle pStmt. If the call to sqlite3_reset() +** indicates that an error occurred, and there is not already an error +** in the recover handle passed as the first argument, set the error +** code and error message appropriately. +** +** This function returns a copy of the statement handle pointer passed +** as the second argument. +*/ +static sqlite3_stmt *recoverReset(sqlite3_recover *p, sqlite3_stmt *pStmt){ + int rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT && p->errCode==SQLITE_OK ){ + recoverDbError(p, sqlite3_db_handle(pStmt)); + } + return pStmt; +} + +/* +** Finalize SQLite statement handle pStmt. If the call to sqlite3_reset() +** indicates that an error occurred, and there is not already an error +** in the recover handle passed as the first argument, set the error +** code and error message appropriately. +*/ +static void recoverFinalize(sqlite3_recover *p, sqlite3_stmt *pStmt){ + sqlite3 *db = sqlite3_db_handle(pStmt); + int rc = sqlite3_finalize(pStmt); + if( rc!=SQLITE_OK && p->errCode==SQLITE_OK ){ + recoverDbError(p, db); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of p->errCode is returned in this +** case. +** +** Otherwise, execute SQL script zSql. If successful, return SQLITE_OK. +** Or, if an error occurs, leave an error code and message in the recover +** handle and return a copy of the error code. +*/ +static int recoverExec(sqlite3_recover *p, sqlite3 *db, const char *zSql){ + if( p->errCode==SQLITE_OK ){ + int rc = sqlite3_exec(db, zSql, 0, 0, 0); + if( rc ){ + recoverDbError(p, db); + } + } + return p->errCode; +} + +/* +** Bind the value pVal to parameter iBind of statement pStmt. Leave an +** error in the recover handle passed as the first argument if an error +** (e.g. an OOM) occurs. +*/ +static void recoverBindValue( + sqlite3_recover *p, + sqlite3_stmt *pStmt, + int iBind, + sqlite3_value *pVal +){ + if( p->errCode==SQLITE_OK ){ + int rc = sqlite3_bind_value(pStmt, iBind, pVal); + if( rc ) recoverError(p, rc, 0); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). NULL is returned in this case. +** +** Otherwise, an attempt is made to interpret zFmt as a printf() style +** formatting string and the result of using the trailing arguments for +** parameter substitution with it written into a buffer obtained from +** sqlite3_malloc(). If successful, a pointer to the buffer is returned. +** It is the responsibility of the caller to eventually free the buffer +** using sqlite3_free(). +** +** Or, if an error occurs, an error code and message is left in the recover +** handle and NULL returned. +*/ +static char *recoverMPrintf(sqlite3_recover *p, const char *zFmt, ...){ + va_list ap; + char *z; + va_start(ap, zFmt); + z = sqlite3_vmprintf(zFmt, ap); + va_end(ap); + if( p->errCode==SQLITE_OK ){ + if( z==0 ) p->errCode = SQLITE_NOMEM; + }else{ + sqlite3_free(z); + z = 0; + } + return z; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). Zero is returned in this case. +** +** Otherwise, execute "PRAGMA page_count" against the input database. If +** successful, return the integer result. Or, if an error occurs, leave an +** error code and error message in the sqlite3_recover handle and return +** zero. +*/ +static i64 recoverPageCount(sqlite3_recover *p){ + i64 nPg = 0; + if( p->errCode==SQLITE_OK ){ + sqlite3_stmt *pStmt = 0; + pStmt = recoverPreparePrintf(p, p->dbIn, "PRAGMA %Q.page_count", p->zDb); + if( pStmt ){ + sqlite3_step(pStmt); + nPg = sqlite3_column_int64(pStmt, 0); + } + recoverFinalize(p, pStmt); + } + return nPg; +} + +/* +** Implementation of SQL scalar function "read_i32". The first argument to +** this function must be a blob. The second a non-negative integer. This +** function reads and returns a 32-bit big-endian integer from byte +** offset (4*) of the blob. +** +** SELECT read_i32(, ) +*/ +static void recoverReadI32( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *pBlob; + int nBlob; + int iInt; + + assert( argc==2 ); + nBlob = sqlite3_value_bytes(argv[0]); + pBlob = (const unsigned char*)sqlite3_value_blob(argv[0]); + iInt = sqlite3_value_int(argv[1]) & 0xFFFF; + + if( (iInt+1)*4<=nBlob ){ + const unsigned char *a = &pBlob[iInt*4]; + i64 iVal = ((i64)a[0]<<24) + + ((i64)a[1]<<16) + + ((i64)a[2]<< 8) + + ((i64)a[3]<< 0); + sqlite3_result_int64(context, iVal); + } +} + +/* +** Implementation of SQL scalar function "page_is_used". This function +** is used as part of the procedure for locating orphan rows for the +** lost-and-found table, and it depends on those routines having populated +** the sqlite3_recover.laf.pUsed variable. +** +** The only argument to this function is a page-number. It returns true +** if the page has already been used somehow during data recovery, or false +** otherwise. +** +** SELECT page_is_used(); +*/ +static void recoverPageIsUsed( + sqlite3_context *pCtx, + int nArg, + sqlite3_value **apArg +){ + sqlite3_recover *p = (sqlite3_recover*)sqlite3_user_data(pCtx); + i64 pgno = sqlite3_value_int64(apArg[0]); + assert( nArg==1 ); + sqlite3_result_int(pCtx, recoverBitmapQuery(p->laf.pUsed, pgno)); +} + +/* +** The implementation of a user-defined SQL function invoked by the +** sqlite_dbdata and sqlite_dbptr virtual table modules to access pages +** of the database being recovered. +** +** This function always takes a single integer argument. If the argument +** is zero, then the value returned is the number of pages in the db being +** recovered. If the argument is greater than zero, it is a page number. +** The value returned in this case is an SQL blob containing the data for +** the identified page of the db being recovered. e.g. +** +** SELECT getpage(0); -- return number of pages in db +** SELECT getpage(4); -- return page 4 of db as a blob of data +*/ +static void recoverGetPage( + sqlite3_context *pCtx, + int nArg, + sqlite3_value **apArg +){ + sqlite3_recover *p = (sqlite3_recover*)sqlite3_user_data(pCtx); + i64 pgno = sqlite3_value_int64(apArg[0]); + sqlite3_stmt *pStmt = 0; + + assert( nArg==1 ); + if( pgno==0 ){ + i64 nPg = recoverPageCount(p); + sqlite3_result_int64(pCtx, nPg); + return; + }else{ + if( p->pGetPage==0 ){ + pStmt = p->pGetPage = recoverPreparePrintf( + p, p->dbIn, "SELECT data FROM sqlite_dbpage(%Q) WHERE pgno=?", p->zDb + ); + }else if( p->errCode==SQLITE_OK ){ + pStmt = p->pGetPage; + } + + if( pStmt ){ + sqlite3_bind_int64(pStmt, 1, pgno); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + const u8 *aPg; + int nPg; + assert( p->errCode==SQLITE_OK ); + aPg = sqlite3_column_blob(pStmt, 0); + nPg = sqlite3_column_bytes(pStmt, 0); + if( pgno==1 && nPg==p->pgsz && 0==memcmp(p->pPage1Cache, aPg, nPg) ){ + aPg = p->pPage1Disk; + } + sqlite3_result_blob(pCtx, aPg, nPg-p->nReserve, SQLITE_TRANSIENT); + } + recoverReset(p, pStmt); + } + } + + if( p->errCode ){ + if( p->zErrMsg ) sqlite3_result_error(pCtx, p->zErrMsg, -1); + sqlite3_result_error_code(pCtx, p->errCode); + } +} + +/* +** Find a string that is not found anywhere in z[]. Return a pointer +** to that string. +** +** Try to use zA and zB first. If both of those are already found in z[] +** then make up some string and store it in the buffer zBuf. +*/ +static const char *recoverUnusedString( + const char *z, /* Result must not appear anywhere in z */ + const char *zA, const char *zB, /* Try these first */ + char *zBuf /* Space to store a generated string */ +){ + unsigned i = 0; + if( strstr(z, zA)==0 ) return zA; + if( strstr(z, zB)==0 ) return zB; + do{ + sqlite3_snprintf(20,zBuf,"(%s%u)", zA, i++); + }while( strstr(z,zBuf)!=0 ); + return zBuf; +} + +/* +** Implementation of scalar SQL function "escape_crnl". The argument passed to +** this function is the output of built-in function quote(). If the first +** character of the input is "'", indicating that the value passed to quote() +** was a text value, then this function searches the input for "\n" and "\r" +** characters and adds a wrapper similar to the following: +** +** replace(replace(, '\n', char(10), '\r', char(13)); +** +** Or, if the first character of the input is not "'", then a copy of the input +** is returned. +*/ +static void recoverEscapeCrnl( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zText = (const char*)sqlite3_value_text(argv[0]); + if( zText && zText[0]=='\'' ){ + int nText = sqlite3_value_bytes(argv[0]); + int i; + char zBuf1[20]; + char zBuf2[20]; + const char *zNL = 0; + const char *zCR = 0; + int nCR = 0; + int nNL = 0; + + for(i=0; zText[i]; i++){ + if( zNL==0 && zText[i]=='\n' ){ + zNL = recoverUnusedString(zText, "\\n", "\\012", zBuf1); + nNL = (int)strlen(zNL); + } + if( zCR==0 && zText[i]=='\r' ){ + zCR = recoverUnusedString(zText, "\\r", "\\015", zBuf2); + nCR = (int)strlen(zCR); + } + } + + if( zNL || zCR ){ + int iOut = 0; + i64 nMax = (nNL > nCR) ? nNL : nCR; + i64 nAlloc = nMax * nText + (nMax+64)*2; + char *zOut = (char*)sqlite3_malloc64(nAlloc); + if( zOut==0 ){ + sqlite3_result_error_nomem(context); + return; + } + + if( zNL && zCR ){ + memcpy(&zOut[iOut], "replace(replace(", 16); + iOut += 16; + }else{ + memcpy(&zOut[iOut], "replace(", 8); + iOut += 8; + } + for(i=0; zText[i]; i++){ + if( zText[i]=='\n' ){ + memcpy(&zOut[iOut], zNL, nNL); + iOut += nNL; + }else if( zText[i]=='\r' ){ + memcpy(&zOut[iOut], zCR, nCR); + iOut += nCR; + }else{ + zOut[iOut] = zText[i]; + iOut++; + } + } + + if( zNL ){ + memcpy(&zOut[iOut], ",'", 2); iOut += 2; + memcpy(&zOut[iOut], zNL, nNL); iOut += nNL; + memcpy(&zOut[iOut], "', char(10))", 12); iOut += 12; + } + if( zCR ){ + memcpy(&zOut[iOut], ",'", 2); iOut += 2; + memcpy(&zOut[iOut], zCR, nCR); iOut += nCR; + memcpy(&zOut[iOut], "', char(13))", 12); iOut += 12; + } + + sqlite3_result_text(context, zOut, iOut, SQLITE_TRANSIENT); + sqlite3_free(zOut); + return; + } + } + + sqlite3_result_value(context, argv[0]); +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of the error code is returned in +** this case. +** +** Otherwise, attempt to populate temporary table "recovery.schema" with the +** parts of the database schema that can be extracted from the input database. +** +** If no error occurs, SQLITE_OK is returned. Otherwise, an error code +** and error message are left in the recover handle and a copy of the +** error code returned. It is not considered an error if part of all of +** the database schema cannot be recovered due to corruption. +*/ +static int recoverCacheSchema(sqlite3_recover *p){ + return recoverExec(p, p->dbOut, + "WITH RECURSIVE pages(p) AS (" + " SELECT 1" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), pages WHERE pgno=p" + ")" + "INSERT INTO recovery.schema SELECT" + " max(CASE WHEN field=0 THEN value ELSE NULL END)," + " max(CASE WHEN field=1 THEN value ELSE NULL END)," + " max(CASE WHEN field=2 THEN value ELSE NULL END)," + " max(CASE WHEN field=3 THEN value ELSE NULL END)," + " max(CASE WHEN field=4 THEN value ELSE NULL END)" + "FROM sqlite_dbdata('getpage()') WHERE pgno IN (" + " SELECT p FROM pages" + ") GROUP BY pgno, cell" + ); +} + +/* +** If this recover handle is not in SQL callback mode (i.e. was not created +** using sqlite3_recover_init_sql()) of if an error has already occurred, +** this function is a no-op. Otherwise, issue a callback with SQL statement +** zSql as the parameter. +** +** If the callback returns non-zero, set the recover handle error code to +** the value returned (so that the caller will abandon processing). +*/ +static void recoverSqlCallback(sqlite3_recover *p, const char *zSql){ + if( p->errCode==SQLITE_OK && p->xSql ){ + int res = p->xSql(p->pSqlCtx, zSql); + if( res ){ + recoverError(p, SQLITE_ERROR, "callback returned an error - %d", res); + } + } +} + +/* +** Transfer the following settings from the input database to the output +** database: +** +** + page-size, +** + auto-vacuum settings, +** + database encoding, +** + user-version (PRAGMA user_version), and +** + application-id (PRAGMA application_id), and +*/ +static void recoverTransferSettings(sqlite3_recover *p){ + const char *aPragma[] = { + "encoding", + "page_size", + "auto_vacuum", + "user_version", + "application_id" + }; + int ii; + + /* Truncate the output database to 0 pages in size. This is done by + ** opening a new, empty, temp db, then using the backup API to clobber + ** any existing output db with a copy of it. */ + if( p->errCode==SQLITE_OK ){ + sqlite3 *db2 = 0; + int rc = sqlite3_open("", &db2); + if( rc!=SQLITE_OK ){ + recoverDbError(p, db2); + return; + } + + for(ii=0; iidbIn, "PRAGMA %Q.%s", p->zDb, zPrag); + if( p->errCode==SQLITE_OK && sqlite3_step(p1)==SQLITE_ROW ){ + const char *zArg = (const char*)sqlite3_column_text(p1, 0); + char *z2 = recoverMPrintf(p, "PRAGMA %s = %Q", zPrag, zArg); + recoverSqlCallback(p, z2); + recoverExec(p, db2, z2); + sqlite3_free(z2); + if( zArg==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + } + recoverFinalize(p, p1); + } + recoverExec(p, db2, "CREATE TABLE t1(a); DROP TABLE t1;"); + + if( p->errCode==SQLITE_OK ){ + sqlite3 *db = p->dbOut; + sqlite3_backup *pBackup = sqlite3_backup_init(db, "main", db2, "main"); + if( pBackup ){ + sqlite3_backup_step(pBackup, -1); + p->errCode = sqlite3_backup_finish(pBackup); + }else{ + recoverDbError(p, db); + } + } + + sqlite3_close(db2); + } +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). A copy of the error code is returned in +** this case. +** +** Otherwise, an attempt is made to open the output database, attach +** and create the schema of the temporary database used to store +** intermediate data, and to register all required user functions and +** virtual table modules with the output handle. +** +** If no error occurs, SQLITE_OK is returned. Otherwise, an error code +** and error message are left in the recover handle and a copy of the +** error code returned. +*/ +static int recoverOpenOutput(sqlite3_recover *p){ + struct Func { + const char *zName; + int nArg; + void (*xFunc)(sqlite3_context*,int,sqlite3_value **); + } aFunc[] = { + { "getpage", 1, recoverGetPage }, + { "page_is_used", 1, recoverPageIsUsed }, + { "read_i32", 2, recoverReadI32 }, + { "escape_crnl", 1, recoverEscapeCrnl }, + }; + + const int flags = SQLITE_OPEN_URI|SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE; + sqlite3 *db = 0; /* New database handle */ + int ii; /* For iterating through aFunc[] */ + + assert( p->dbOut==0 ); + + if( sqlite3_open_v2(p->zUri, &db, flags, 0) ){ + recoverDbError(p, db); + } + + /* Register the sqlite_dbdata and sqlite_dbptr virtual table modules. + ** These two are registered with the output database handle - this + ** module depends on the input handle supporting the sqlite_dbpage + ** virtual table only. */ + if( p->errCode==SQLITE_OK ){ + p->errCode = sqlite3_dbdata_init(db, 0, 0); + } + + /* Register the custom user-functions with the output handle. */ + for(ii=0; p->errCode==SQLITE_OK && iierrCode = sqlite3_create_function(db, aFunc[ii].zName, + aFunc[ii].nArg, SQLITE_UTF8, (void*)p, aFunc[ii].xFunc, 0, 0 + ); + } + + p->dbOut = db; + return p->errCode; +} + +/* +** Attach the auxiliary database 'recovery' to the output database handle. +** This temporary database is used during the recovery process and then +** discarded. +*/ +static void recoverOpenRecovery(sqlite3_recover *p){ + char *zSql = recoverMPrintf(p, "ATTACH %Q AS recovery;", p->zStateDb); + recoverExec(p, p->dbOut, zSql); + recoverExec(p, p->dbOut, + "PRAGMA writable_schema = 1;" + "CREATE TABLE recovery.map(pgno INTEGER PRIMARY KEY, parent INT);" + "CREATE TABLE recovery.schema(type, name, tbl_name, rootpage, sql);" + ); + sqlite3_free(zSql); +} + + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). +** +** Otherwise, argument zName must be the name of a table that has just been +** created in the output database. This function queries the output db +** for the schema of said table, and creates a RecoverTable object to +** store the schema in memory. The new RecoverTable object is linked into +** the list at sqlite3_recover.pTblList. +** +** Parameter iRoot must be the root page of table zName in the INPUT +** database. +*/ +static void recoverAddTable( + sqlite3_recover *p, + const char *zName, /* Name of table created in output db */ + i64 iRoot /* Root page of same table in INPUT db */ +){ + sqlite3_stmt *pStmt = recoverPreparePrintf(p, p->dbOut, + "PRAGMA table_xinfo(%Q)", zName + ); + + if( pStmt ){ + int iPk = -1; + int iBind = 1; + RecoverTable *pNew = 0; + int nCol = 0; + int nName = recoverStrlen(zName); + int nByte = 0; + while( sqlite3_step(pStmt)==SQLITE_ROW ){ + nCol++; + nByte += (sqlite3_column_bytes(pStmt, 1)+1); + } + nByte += sizeof(RecoverTable) + nCol*sizeof(RecoverColumn) + nName+1; + recoverReset(p, pStmt); + + pNew = recoverMalloc(p, nByte); + if( pNew ){ + int i = 0; + int iField = 0; + char *csr = 0; + pNew->aCol = (RecoverColumn*)&pNew[1]; + pNew->zTab = csr = (char*)&pNew->aCol[nCol]; + pNew->nCol = nCol; + pNew->iRoot = iRoot; + memcpy(csr, zName, nName); + csr += nName+1; + + for(i=0; sqlite3_step(pStmt)==SQLITE_ROW; i++){ + int iPKF = sqlite3_column_int(pStmt, 5); + int n = sqlite3_column_bytes(pStmt, 1); + const char *z = (const char*)sqlite3_column_text(pStmt, 1); + const char *zType = (const char*)sqlite3_column_text(pStmt, 2); + int eHidden = sqlite3_column_int(pStmt, 6); + + if( iPk==-1 && iPKF==1 && !sqlite3_stricmp("integer", zType) ) iPk = i; + if( iPKF>1 ) iPk = -2; + pNew->aCol[i].zCol = csr; + pNew->aCol[i].eHidden = eHidden; + if( eHidden==RECOVER_EHIDDEN_VIRTUAL ){ + pNew->aCol[i].iField = -1; + }else{ + pNew->aCol[i].iField = iField++; + } + if( eHidden!=RECOVER_EHIDDEN_VIRTUAL + && eHidden!=RECOVER_EHIDDEN_STORED + ){ + pNew->aCol[i].iBind = iBind++; + } + memcpy(csr, z, n); + csr += (n+1); + } + + pNew->pNext = p->pTblList; + p->pTblList = pNew; + pNew->bIntkey = 1; + } + + recoverFinalize(p, pStmt); + + pStmt = recoverPreparePrintf(p, p->dbOut, "PRAGMA index_xinfo(%Q)", zName); + while( pStmt && sqlite3_step(pStmt)==SQLITE_ROW ){ + int iField = sqlite3_column_int(pStmt, 0); + int iCol = sqlite3_column_int(pStmt, 1); + + assert( iFieldnCol && iColnCol ); + pNew->aCol[iCol].iField = iField; + + pNew->bIntkey = 0; + iPk = -2; + } + recoverFinalize(p, pStmt); + + if( p->errCode==SQLITE_OK ){ + if( iPk>=0 ){ + pNew->aCol[iPk].bIPK = 1; + }else if( pNew->bIntkey ){ + pNew->iRowidBind = iBind++; + } + } + } +} + +/* +** This function is called after recoverCacheSchema() has cached those parts +** of the input database schema that could be recovered in temporary table +** "recovery.schema". This function creates in the output database copies +** of all parts of that schema that must be created before the tables can +** be populated. Specifically, this means: +** +** * all tables that are not VIRTUAL, and +** * UNIQUE indexes. +** +** If the recovery handle uses SQL callbacks, then callbacks containing +** the associated "CREATE TABLE" and "CREATE INDEX" statements are made. +** +** Additionally, records are added to the sqlite_schema table of the +** output database for any VIRTUAL tables. The CREATE VIRTUAL TABLE +** records are written directly to sqlite_schema, not actually executed. +** If the handle is in SQL callback mode, then callbacks are invoked +** with equivalent SQL statements. +*/ +static int recoverWriteSchema1(sqlite3_recover *p){ + sqlite3_stmt *pSelect = 0; + sqlite3_stmt *pTblname = 0; + + pSelect = recoverPrepare(p, p->dbOut, + "WITH dbschema(rootpage, name, sql, tbl, isVirtual, isIndex) AS (" + " SELECT rootpage, name, sql, " + " type='table', " + " sql LIKE 'create virtual%'," + " (type='index' AND (sql LIKE '%unique%' OR ?1))" + " FROM recovery.schema" + ")" + "SELECT rootpage, tbl, isVirtual, name, sql" + " FROM dbschema " + " WHERE tbl OR isIndex" + " ORDER BY tbl DESC, name=='sqlite_sequence' DESC" + ); + + pTblname = recoverPrepare(p, p->dbOut, + "SELECT name FROM sqlite_schema " + "WHERE type='table' ORDER BY rowid DESC LIMIT 1" + ); + + if( pSelect ){ + sqlite3_bind_int(pSelect, 1, p->bSlowIndexes); + while( sqlite3_step(pSelect)==SQLITE_ROW ){ + i64 iRoot = sqlite3_column_int64(pSelect, 0); + int bTable = sqlite3_column_int(pSelect, 1); + int bVirtual = sqlite3_column_int(pSelect, 2); + const char *zName = (const char*)sqlite3_column_text(pSelect, 3); + const char *zSql = (const char*)sqlite3_column_text(pSelect, 4); + char *zFree = 0; + int rc = SQLITE_OK; + + if( bVirtual ){ + zSql = (const char*)(zFree = recoverMPrintf(p, + "INSERT INTO sqlite_schema VALUES('table', %Q, %Q, 0, %Q)", + zName, zName, zSql + )); + } + rc = sqlite3_exec(p->dbOut, zSql, 0, 0, 0); + if( rc==SQLITE_OK ){ + recoverSqlCallback(p, zSql); + if( bTable && !bVirtual ){ + if( SQLITE_ROW==sqlite3_step(pTblname) ){ + const char *zTbl = (const char*)sqlite3_column_text(pTblname, 0); + recoverAddTable(p, zTbl, iRoot); + } + recoverReset(p, pTblname); + } + }else if( rc!=SQLITE_ERROR ){ + recoverDbError(p, p->dbOut); + } + sqlite3_free(zFree); + } + } + recoverFinalize(p, pSelect); + recoverFinalize(p, pTblname); + + return p->errCode; +} + +/* +** This function is called after the output database has been populated. It +** adds all recovered schema elements that were not created in the output +** database by recoverWriteSchema1() - everything except for tables and +** UNIQUE indexes. Specifically: +** +** * views, +** * triggers, +** * non-UNIQUE indexes. +** +** If the recover handle is in SQL callback mode, then equivalent callbacks +** are issued to create the schema elements. +*/ +static int recoverWriteSchema2(sqlite3_recover *p){ + sqlite3_stmt *pSelect = 0; + + pSelect = recoverPrepare(p, p->dbOut, + p->bSlowIndexes ? + "SELECT rootpage, sql FROM recovery.schema " + " WHERE type!='table' AND type!='index'" + : + "SELECT rootpage, sql FROM recovery.schema " + " WHERE type!='table' AND (type!='index' OR sql NOT LIKE '%unique%')" + ); + + if( pSelect ){ + while( sqlite3_step(pSelect)==SQLITE_ROW ){ + const char *zSql = (const char*)sqlite3_column_text(pSelect, 1); + int rc = sqlite3_exec(p->dbOut, zSql, 0, 0, 0); + if( rc==SQLITE_OK ){ + recoverSqlCallback(p, zSql); + }else if( rc!=SQLITE_ERROR ){ + recoverDbError(p, p->dbOut); + } + } + } + recoverFinalize(p, pSelect); + + return p->errCode; +} + +/* +** This function is a no-op if recover handle p already contains an error +** (if p->errCode!=SQLITE_OK). In this case it returns NULL. +** +** Otherwise, if the recover handle is configured to create an output +** database (was created by sqlite3_recover_init()), then this function +** prepares and returns an SQL statement to INSERT a new record into table +** pTab, assuming the first nField fields of a record extracted from disk +** are valid. +** +** For example, if table pTab is: +** +** CREATE TABLE name(a, b GENERATED ALWAYS AS (a+1) STORED, c, d, e); +** +** And nField is 4, then the SQL statement prepared and returned is: +** +** INSERT INTO (a, c, d) VALUES (?1, ?2, ?3); +** +** In this case even though 4 values were extracted from the input db, +** only 3 are written to the output, as the generated STORED column +** cannot be written. +** +** If the recover handle is in SQL callback mode, then the SQL statement +** prepared is such that evaluating it returns a single row containing +** a single text value - itself an SQL statement similar to the above, +** except with SQL literals in place of the variables. For example: +** +** SELECT 'INSERT INTO (a, c, d) VALUES (' +** || quote(?1) || ', ' +** || quote(?2) || ', ' +** || quote(?3) || ')'; +** +** In either case, it is the responsibility of the caller to eventually +** free the statement handle using sqlite3_finalize(). +*/ +static sqlite3_stmt *recoverInsertStmt( + sqlite3_recover *p, + RecoverTable *pTab, + int nField +){ + sqlite3_stmt *pRet = 0; + const char *zSep = ""; + const char *zSqlSep = ""; + char *zSql = 0; + char *zFinal = 0; + char *zBind = 0; + int ii; + int bSql = p->xSql ? 1 : 0; + + if( nField<=0 ) return 0; + + assert( nField<=pTab->nCol ); + + zSql = recoverMPrintf(p, "INSERT OR IGNORE INTO %Q(", pTab->zTab); + + if( pTab->iRowidBind ){ + assert( pTab->bIntkey ); + zSql = recoverMPrintf(p, "%z_rowid_", zSql); + if( bSql ){ + zBind = recoverMPrintf(p, "%zquote(?%d)", zBind, pTab->iRowidBind); + }else{ + zBind = recoverMPrintf(p, "%z?%d", zBind, pTab->iRowidBind); + } + zSqlSep = "||', '||"; + zSep = ", "; + } + + for(ii=0; iiaCol[ii].eHidden; + if( eHidden!=RECOVER_EHIDDEN_VIRTUAL + && eHidden!=RECOVER_EHIDDEN_STORED + ){ + assert( pTab->aCol[ii].iField>=0 && pTab->aCol[ii].iBind>=1 ); + zSql = recoverMPrintf(p, "%z%s%Q", zSql, zSep, pTab->aCol[ii].zCol); + + if( bSql ){ + zBind = recoverMPrintf(p, + "%z%sescape_crnl(quote(?%d))", zBind, zSqlSep, pTab->aCol[ii].iBind + ); + zSqlSep = "||', '||"; + }else{ + zBind = recoverMPrintf(p, "%z%s?%d", zBind, zSep, pTab->aCol[ii].iBind); + } + zSep = ", "; + } + } + + if( bSql ){ + zFinal = recoverMPrintf(p, "SELECT %Q || ') VALUES (' || %s || ')'", + zSql, zBind + ); + }else{ + zFinal = recoverMPrintf(p, "%s) VALUES (%s)", zSql, zBind); + } + + pRet = recoverPrepare(p, p->dbOut, zFinal); + sqlite3_free(zSql); + sqlite3_free(zBind); + sqlite3_free(zFinal); + + return pRet; +} + + +/* +** Search the list of RecoverTable objects at p->pTblList for one that +** has root page iRoot in the input database. If such an object is found, +** return a pointer to it. Otherwise, return NULL. +*/ +static RecoverTable *recoverFindTable(sqlite3_recover *p, u32 iRoot){ + RecoverTable *pRet = 0; + for(pRet=p->pTblList; pRet && pRet->iRoot!=iRoot; pRet=pRet->pNext); + return pRet; +} + +/* +** This function attempts to create a lost and found table within the +** output db. If successful, it returns a pointer to a buffer containing +** the name of the new table. It is the responsibility of the caller to +** eventually free this buffer using sqlite3_free(). +** +** If an error occurs, NULL is returned and an error code and error +** message left in the recover handle. +*/ +static char *recoverLostAndFoundCreate( + sqlite3_recover *p, /* Recover object */ + int nField /* Number of column fields in new table */ +){ + char *zTbl = 0; + sqlite3_stmt *pProbe = 0; + int ii = 0; + + pProbe = recoverPrepare(p, p->dbOut, + "SELECT 1 FROM sqlite_schema WHERE name=?" + ); + for(ii=-1; zTbl==0 && p->errCode==SQLITE_OK && ii<1000; ii++){ + int bFail = 0; + if( ii<0 ){ + zTbl = recoverMPrintf(p, "%s", p->zLostAndFound); + }else{ + zTbl = recoverMPrintf(p, "%s_%d", p->zLostAndFound, ii); + } + + if( p->errCode==SQLITE_OK ){ + sqlite3_bind_text(pProbe, 1, zTbl, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pProbe) ){ + bFail = 1; + } + recoverReset(p, pProbe); + } + + if( bFail ){ + sqlite3_clear_bindings(pProbe); + sqlite3_free(zTbl); + zTbl = 0; + } + } + recoverFinalize(p, pProbe); + + if( zTbl ){ + const char *zSep = 0; + char *zField = 0; + char *zSql = 0; + + zSep = "rootpgno INTEGER, pgno INTEGER, nfield INTEGER, id INTEGER, "; + for(ii=0; p->errCode==SQLITE_OK && iidbOut, zSql); + recoverSqlCallback(p, zSql); + sqlite3_free(zSql); + }else if( p->errCode==SQLITE_OK ){ + recoverError( + p, SQLITE_ERROR, "failed to create %s output table", p->zLostAndFound + ); + } + + return zTbl; +} + +/* +** Synthesize and prepare an INSERT statement to write to the lost_and_found +** table in the output database. The name of the table is zTab, and it has +** nField c* fields. +*/ +static sqlite3_stmt *recoverLostAndFoundInsert( + sqlite3_recover *p, + const char *zTab, + int nField +){ + int nTotal = nField + 4; + int ii; + char *zBind = 0; + sqlite3_stmt *pRet = 0; + + if( p->xSql==0 ){ + for(ii=0; iidbOut, "INSERT INTO %s VALUES(%s)", zTab, zBind + ); + }else{ + const char *zSep = ""; + for(ii=0; iidbOut, "SELECT 'INSERT INTO %s VALUES(' || %s || ')'", zTab, zBind + ); + } + + sqlite3_free(zBind); + return pRet; +} + +/* +** Input database page iPg contains data that will be written to the +** lost-and-found table of the output database. This function attempts +** to identify the root page of the tree that page iPg belonged to. +** If successful, it sets output variable (*piRoot) to the page number +** of the root page and returns SQLITE_OK. Otherwise, if an error occurs, +** an SQLite error code is returned and the final value of *piRoot +** undefined. +*/ +static int recoverLostAndFoundFindRoot( + sqlite3_recover *p, + i64 iPg, + i64 *piRoot +){ + RecoverStateLAF *pLaf = &p->laf; + + if( pLaf->pFindRoot==0 ){ + pLaf->pFindRoot = recoverPrepare(p, p->dbOut, + "WITH RECURSIVE p(pgno) AS (" + " SELECT ?" + " UNION" + " SELECT parent FROM recovery.map AS m, p WHERE m.pgno=p.pgno" + ") " + "SELECT p.pgno FROM p, recovery.map m WHERE m.pgno=p.pgno " + " AND m.parent IS NULL" + ); + } + if( p->errCode==SQLITE_OK ){ + sqlite3_bind_int64(pLaf->pFindRoot, 1, iPg); + if( sqlite3_step(pLaf->pFindRoot)==SQLITE_ROW ){ + *piRoot = sqlite3_column_int64(pLaf->pFindRoot, 0); + }else{ + *piRoot = iPg; + } + recoverReset(p, pLaf->pFindRoot); + } + return p->errCode; +} + +/* +** Recover data from page iPage of the input database and write it to +** the lost-and-found table in the output database. +*/ +static void recoverLostAndFoundOnePage(sqlite3_recover *p, i64 iPage){ + RecoverStateLAF *pLaf = &p->laf; + sqlite3_value **apVal = pLaf->apVal; + sqlite3_stmt *pPageData = pLaf->pPageData; + sqlite3_stmt *pInsert = pLaf->pInsert; + + int nVal = -1; + int iPrevCell = 0; + i64 iRoot = 0; + int bHaveRowid = 0; + i64 iRowid = 0; + int ii = 0; + + if( recoverLostAndFoundFindRoot(p, iPage, &iRoot) ) return; + sqlite3_bind_int64(pPageData, 1, iPage); + while( p->errCode==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPageData) ){ + int iCell = sqlite3_column_int64(pPageData, 0); + int iField = sqlite3_column_int64(pPageData, 1); + + if( iPrevCell!=iCell && nVal>=0 ){ + /* Insert the new row */ + sqlite3_bind_int64(pInsert, 1, iRoot); /* rootpgno */ + sqlite3_bind_int64(pInsert, 2, iPage); /* pgno */ + sqlite3_bind_int(pInsert, 3, nVal); /* nfield */ + if( bHaveRowid ){ + sqlite3_bind_int64(pInsert, 4, iRowid); /* id */ + } + for(ii=0; iinMaxField ){ + sqlite3_value *pVal = sqlite3_column_value(pPageData, 2); + apVal[iField] = sqlite3_value_dup(pVal); + assert( iField==nVal || (nVal==-1 && iField==0) ); + nVal = iField+1; + if( apVal[iField]==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + } + + iPrevCell = iCell; + } + recoverReset(p, pPageData); + + for(ii=0; iilaf; + if( p->errCode==SQLITE_OK ){ + if( pLaf->pInsert==0 ){ + return SQLITE_DONE; + }else{ + if( p->errCode==SQLITE_OK ){ + int res = sqlite3_step(pLaf->pAllPage); + if( res==SQLITE_ROW ){ + i64 iPage = sqlite3_column_int64(pLaf->pAllPage, 0); + if( recoverBitmapQuery(pLaf->pUsed, iPage)==0 ){ + recoverLostAndFoundOnePage(p, iPage); + } + }else{ + recoverReset(p, pLaf->pAllPage); + return SQLITE_DONE; + } + } + } + } + return SQLITE_OK; +} + +/* +** Initialize resources required in RECOVER_STATE_LOSTANDFOUND3 +** state - during which the lost-and-found table of the output database +** is populated with recovered data that can not be assigned to any +** recovered schema object. +*/ +static void recoverLostAndFound3Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + + if( pLaf->nMaxField>0 ){ + char *zTab = 0; /* Name of lost_and_found table */ + + zTab = recoverLostAndFoundCreate(p, pLaf->nMaxField); + pLaf->pInsert = recoverLostAndFoundInsert(p, zTab, pLaf->nMaxField); + sqlite3_free(zTab); + + pLaf->pAllPage = recoverPreparePrintf(p, p->dbOut, + "WITH RECURSIVE seq(ii) AS (" + " SELECT 1 UNION ALL SELECT ii+1 FROM seq WHERE ii<%lld" + ")" + "SELECT ii FROM seq" , p->laf.nPg + ); + pLaf->pPageData = recoverPrepare(p, p->dbOut, + "SELECT cell, field, value " + "FROM sqlite_dbdata('getpage()') d WHERE d.pgno=? " + "UNION ALL " + "SELECT -1, -1, -1" + ); + + pLaf->apVal = (sqlite3_value**)recoverMalloc(p, + pLaf->nMaxField*sizeof(sqlite3_value*) + ); + } +} + +/* +** Initialize resources required in RECOVER_STATE_WRITING state - during which +** tables recovered from the schema of the input database are populated with +** recovered data. +*/ +static int recoverWriteDataInit(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + RecoverTable *pTbl = 0; + int nByte = 0; + + /* Figure out the maximum number of columns for any table in the schema */ + assert( p1->nMax==0 ); + for(pTbl=p->pTblList; pTbl; pTbl=pTbl->pNext){ + if( pTbl->nCol>p1->nMax ) p1->nMax = pTbl->nCol; + } + + /* Allocate an array of (sqlite3_value*) in which to accumulate the values + ** that will be written to the output database in a single row. */ + nByte = sizeof(sqlite3_value*) * (p1->nMax+1); + p1->apVal = (sqlite3_value**)recoverMalloc(p, nByte); + if( p1->apVal==0 ) return p->errCode; + + /* Prepare the SELECT to loop through schema tables (pTbls) and the SELECT + ** to loop through cells that appear to belong to a single table (pSel). */ + p1->pTbls = recoverPrepare(p, p->dbOut, + "SELECT rootpage FROM recovery.schema " + " WHERE type='table' AND (sql NOT LIKE 'create virtual%')" + " ORDER BY (tbl_name='sqlite_sequence') ASC" + ); + p1->pSel = recoverPrepare(p, p->dbOut, + "WITH RECURSIVE pages(page) AS (" + " SELECT ?1" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), pages " + " WHERE pgno=page" + ") " + "SELECT page, cell, field, value " + "FROM sqlite_dbdata('getpage()') d, pages p WHERE p.page=d.pgno " + "UNION ALL " + "SELECT 0, 0, 0, 0" + ); + + return p->errCode; +} + +/* +** Clean up resources allocated by recoverWriteDataInit() (stuff in +** sqlite3_recover.w1). +*/ +static void recoverWriteDataCleanup(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + int ii; + for(ii=0; iinVal; ii++){ + sqlite3_value_free(p1->apVal[ii]); + } + sqlite3_free(p1->apVal); + recoverFinalize(p, p1->pInsert); + recoverFinalize(p, p1->pTbls); + recoverFinalize(p, p1->pSel); + memset(p1, 0, sizeof(*p1)); +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_WRITING state - during which tables recovered from the +** schema of the input database are populated with recovered data. +*/ +static int recoverWriteDataStep(sqlite3_recover *p){ + RecoverStateW1 *p1 = &p->w1; + sqlite3_stmt *pSel = p1->pSel; + sqlite3_value **apVal = p1->apVal; + + if( p->errCode==SQLITE_OK && p1->pTab==0 ){ + if( sqlite3_step(p1->pTbls)==SQLITE_ROW ){ + i64 iRoot = sqlite3_column_int64(p1->pTbls, 0); + p1->pTab = recoverFindTable(p, iRoot); + + recoverFinalize(p, p1->pInsert); + p1->pInsert = 0; + + /* If this table is unknown, return early. The caller will invoke this + ** function again and it will move on to the next table. */ + if( p1->pTab==0 ) return p->errCode; + + /* If this is the sqlite_sequence table, delete any rows added by + ** earlier INSERT statements on tables with AUTOINCREMENT primary + ** keys before recovering its contents. The p1->pTbls SELECT statement + ** is rigged to deliver "sqlite_sequence" last of all, so we don't + ** worry about it being modified after it is recovered. */ + if( sqlite3_stricmp("sqlite_sequence", p1->pTab->zTab)==0 ){ + recoverExec(p, p->dbOut, "DELETE FROM sqlite_sequence"); + recoverSqlCallback(p, "DELETE FROM sqlite_sequence"); + } + + /* Bind the root page of this table within the original database to + ** SELECT statement p1->pSel. The SELECT statement will then iterate + ** through cells that look like they belong to table pTab. */ + sqlite3_bind_int64(pSel, 1, iRoot); + + p1->nVal = 0; + p1->bHaveRowid = 0; + p1->iPrevPage = -1; + p1->iPrevCell = -1; + }else{ + return SQLITE_DONE; + } + } + assert( p->errCode!=SQLITE_OK || p1->pTab ); + + if( p->errCode==SQLITE_OK && sqlite3_step(pSel)==SQLITE_ROW ){ + RecoverTable *pTab = p1->pTab; + + i64 iPage = sqlite3_column_int64(pSel, 0); + int iCell = sqlite3_column_int(pSel, 1); + int iField = sqlite3_column_int(pSel, 2); + sqlite3_value *pVal = sqlite3_column_value(pSel, 3); + int bNewCell = (p1->iPrevPage!=iPage || p1->iPrevCell!=iCell); + + assert( bNewCell==0 || (iField==-1 || iField==0) ); + assert( bNewCell || iField==p1->nVal || p1->nVal==pTab->nCol ); + + if( bNewCell ){ + int ii = 0; + if( p1->nVal>=0 ){ + if( p1->pInsert==0 || p1->nVal!=p1->nInsert ){ + recoverFinalize(p, p1->pInsert); + p1->pInsert = recoverInsertStmt(p, pTab, p1->nVal); + p1->nInsert = p1->nVal; + } + if( p1->nVal>0 ){ + sqlite3_stmt *pInsert = p1->pInsert; + for(ii=0; iinCol; ii++){ + RecoverColumn *pCol = &pTab->aCol[ii]; + int iBind = pCol->iBind; + if( iBind>0 ){ + if( pCol->bIPK ){ + sqlite3_bind_int64(pInsert, iBind, p1->iRowid); + }else if( pCol->iFieldnVal ){ + recoverBindValue(p, pInsert, iBind, apVal[pCol->iField]); + } + } + } + if( p->bRecoverRowid && pTab->iRowidBind>0 && p1->bHaveRowid ){ + sqlite3_bind_int64(pInsert, pTab->iRowidBind, p1->iRowid); + } + if( SQLITE_ROW==sqlite3_step(pInsert) ){ + const char *z = (const char*)sqlite3_column_text(pInsert, 0); + recoverSqlCallback(p, z); + } + recoverReset(p, pInsert); + assert( p->errCode || pInsert ); + if( pInsert ) sqlite3_clear_bindings(pInsert); + } + } + + for(ii=0; iinVal; ii++){ + sqlite3_value_free(apVal[ii]); + apVal[ii] = 0; + } + p1->nVal = -1; + p1->bHaveRowid = 0; + } + + if( iPage!=0 ){ + if( iField<0 ){ + p1->iRowid = sqlite3_column_int64(pSel, 3); + assert( p1->nVal==-1 ); + p1->nVal = 0; + p1->bHaveRowid = 1; + }else if( iFieldnCol ){ + assert( apVal[iField]==0 ); + apVal[iField] = sqlite3_value_dup( pVal ); + if( apVal[iField]==0 ){ + recoverError(p, SQLITE_NOMEM, 0); + } + p1->nVal = iField+1; + } + p1->iPrevCell = iCell; + p1->iPrevPage = iPage; + } + }else{ + recoverReset(p, pSel); + p1->pTab = 0; + } + + return p->errCode; +} + +/* +** Initialize resources required by sqlite3_recover_step() in +** RECOVER_STATE_LOSTANDFOUND1 state - during which the set of pages not +** already allocated to a recovered schema element is determined. +*/ +static void recoverLostAndFound1Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + sqlite3_stmt *pStmt = 0; + + assert( p->laf.pUsed==0 ); + pLaf->nPg = recoverPageCount(p); + pLaf->pUsed = recoverBitmapAlloc(p, pLaf->nPg); + + /* Prepare a statement to iterate through all pages that are part of any tree + ** in the recoverable part of the input database schema to the bitmap. And, + ** if !p->bFreelistCorrupt, add all pages that appear to be part of the + ** freelist. */ + pStmt = recoverPrepare( + p, p->dbOut, + "WITH trunk(pgno) AS (" + " SELECT read_i32(getpage(1), 8) AS x WHERE x>0" + " UNION" + " SELECT read_i32(getpage(trunk.pgno), 0) AS x FROM trunk WHERE x>0" + ")," + "trunkdata(pgno, data) AS (" + " SELECT pgno, getpage(pgno) FROM trunk" + ")," + "freelist(data, n, freepgno) AS (" + " SELECT data, min(16384, read_i32(data, 1)-1), pgno FROM trunkdata" + " UNION ALL" + " SELECT data, n-1, read_i32(data, 2+n) FROM freelist WHERE n>=0" + ")," + "" + "roots(r) AS (" + " SELECT 1 UNION ALL" + " SELECT rootpage FROM recovery.schema WHERE rootpage>0" + ")," + "used(page) AS (" + " SELECT r FROM roots" + " UNION" + " SELECT child FROM sqlite_dbptr('getpage()'), used " + " WHERE pgno=page" + ") " + "SELECT page FROM used" + " UNION ALL " + "SELECT freepgno FROM freelist WHERE NOT ?" + ); + if( pStmt ) sqlite3_bind_int(pStmt, 1, p->bFreelistCorrupt); + pLaf->pUsedPages = pStmt; +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_LOSTANDFOUND1 state - during which the set of pages not +** already allocated to a recovered schema element is determined. +*/ +static int recoverLostAndFound1Step(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + int rc = p->errCode; + if( rc==SQLITE_OK ){ + rc = sqlite3_step(pLaf->pUsedPages); + if( rc==SQLITE_ROW ){ + i64 iPg = sqlite3_column_int64(pLaf->pUsedPages, 0); + recoverBitmapSet(pLaf->pUsed, iPg); + rc = SQLITE_OK; + }else{ + recoverFinalize(p, pLaf->pUsedPages); + pLaf->pUsedPages = 0; + } + } + return rc; +} + +/* +** Initialize resources required by RECOVER_STATE_LOSTANDFOUND2 +** state - during which the pages identified in RECOVER_STATE_LOSTANDFOUND1 +** are sorted into sets that likely belonged to the same database tree. +*/ +static void recoverLostAndFound2Init(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + + assert( p->laf.pAllAndParent==0 ); + assert( p->laf.pMapInsert==0 ); + assert( p->laf.pMaxField==0 ); + assert( p->laf.nMaxField==0 ); + + pLaf->pMapInsert = recoverPrepare(p, p->dbOut, + "INSERT OR IGNORE INTO recovery.map(pgno, parent) VALUES(?, ?)" + ); + pLaf->pAllAndParent = recoverPreparePrintf(p, p->dbOut, + "WITH RECURSIVE seq(ii) AS (" + " SELECT 1 UNION ALL SELECT ii+1 FROM seq WHERE ii<%lld" + ")" + "SELECT pgno, child FROM sqlite_dbptr('getpage()') " + " UNION ALL " + "SELECT NULL, ii FROM seq", p->laf.nPg + ); + pLaf->pMaxField = recoverPreparePrintf(p, p->dbOut, + "SELECT max(field)+1 FROM sqlite_dbdata('getpage') WHERE pgno = ?" + ); +} + +/* +** Perform one step (sqlite3_recover_step()) of work for the connection +** passed as the only argument, which is guaranteed to be in +** RECOVER_STATE_LOSTANDFOUND2 state - during which the pages identified +** in RECOVER_STATE_LOSTANDFOUND1 are sorted into sets that likely belonged +** to the same database tree. +*/ +static int recoverLostAndFound2Step(sqlite3_recover *p){ + RecoverStateLAF *pLaf = &p->laf; + if( p->errCode==SQLITE_OK ){ + int res = sqlite3_step(pLaf->pAllAndParent); + if( res==SQLITE_ROW ){ + i64 iChild = sqlite3_column_int(pLaf->pAllAndParent, 1); + if( recoverBitmapQuery(pLaf->pUsed, iChild)==0 ){ + sqlite3_bind_int64(pLaf->pMapInsert, 1, iChild); + sqlite3_bind_value(pLaf->pMapInsert, 2, + sqlite3_column_value(pLaf->pAllAndParent, 0) + ); + sqlite3_step(pLaf->pMapInsert); + recoverReset(p, pLaf->pMapInsert); + sqlite3_bind_int64(pLaf->pMaxField, 1, iChild); + if( SQLITE_ROW==sqlite3_step(pLaf->pMaxField) ){ + int nMax = sqlite3_column_int(pLaf->pMaxField, 0); + if( nMax>pLaf->nMaxField ) pLaf->nMaxField = nMax; + } + recoverReset(p, pLaf->pMaxField); + } + }else{ + recoverFinalize(p, pLaf->pAllAndParent); + pLaf->pAllAndParent =0; + return SQLITE_DONE; + } + } + return p->errCode; +} + +/* +** Free all resources allocated as part of sqlite3_recover_step() calls +** in one of the RECOVER_STATE_LOSTANDFOUND[123] states. +*/ +static void recoverLostAndFoundCleanup(sqlite3_recover *p){ + recoverBitmapFree(p->laf.pUsed); + p->laf.pUsed = 0; + sqlite3_finalize(p->laf.pUsedPages); + sqlite3_finalize(p->laf.pAllAndParent); + sqlite3_finalize(p->laf.pMapInsert); + sqlite3_finalize(p->laf.pMaxField); + sqlite3_finalize(p->laf.pFindRoot); + sqlite3_finalize(p->laf.pInsert); + sqlite3_finalize(p->laf.pAllPage); + sqlite3_finalize(p->laf.pPageData); + p->laf.pUsedPages = 0; + p->laf.pAllAndParent = 0; + p->laf.pMapInsert = 0; + p->laf.pMaxField = 0; + p->laf.pFindRoot = 0; + p->laf.pInsert = 0; + p->laf.pAllPage = 0; + p->laf.pPageData = 0; + sqlite3_free(p->laf.apVal); + p->laf.apVal = 0; +} + +/* +** Free all resources allocated as part of sqlite3_recover_step() calls. +*/ +static void recoverFinalCleanup(sqlite3_recover *p){ + RecoverTable *pTab = 0; + RecoverTable *pNext = 0; + + recoverWriteDataCleanup(p); + recoverLostAndFoundCleanup(p); + + for(pTab=p->pTblList; pTab; pTab=pNext){ + pNext = pTab->pNext; + sqlite3_free(pTab); + } + p->pTblList = 0; + sqlite3_finalize(p->pGetPage); + p->pGetPage = 0; + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_RESET_CACHE, 0); + + { +#ifndef NDEBUG + int res = +#endif + sqlite3_close(p->dbOut); + assert( res==SQLITE_OK ); + } + p->dbOut = 0; +} + +/* +** Decode and return an unsigned 16-bit big-endian integer value from +** buffer a[]. +*/ +static u32 recoverGetU16(const u8 *a){ + return (((u32)a[0])<<8) + ((u32)a[1]); +} + +/* +** Decode and return an unsigned 32-bit big-endian integer value from +** buffer a[]. +*/ +static u32 recoverGetU32(const u8 *a){ + return (((u32)a[0])<<24) + (((u32)a[1])<<16) + (((u32)a[2])<<8) + ((u32)a[3]); +} + +/* +** Decode an SQLite varint from buffer a[]. Write the decoded value to (*pVal) +** and return the number of bytes consumed. +*/ +static int recoverGetVarint(const u8 *a, i64 *pVal){ + sqlite3_uint64 u = 0; + int i; + for(i=0; i<8; i++){ + u = (u<<7) + (a[i]&0x7f); + if( (a[i]&0x80)==0 ){ *pVal = (sqlite3_int64)u; return i+1; } + } + u = (u<<8) + (a[i]&0xff); + *pVal = (sqlite3_int64)u; + return 9; +} + +/* +** The second argument points to a buffer n bytes in size. If this buffer +** or a prefix thereof appears to contain a well-formed SQLite b-tree page, +** return the page-size in bytes. Otherwise, if the buffer does not +** appear to contain a well-formed b-tree page, return 0. +*/ +static int recoverIsValidPage(u8 *aTmp, const u8 *a, int n){ + u8 *aUsed = aTmp; + int nFrag = 0; + int nActual = 0; + int iFree = 0; + int nCell = 0; /* Number of cells on page */ + int iCellOff = 0; /* Offset of cell array in page */ + int iContent = 0; + int eType = 0; + int ii = 0; + + eType = (int)a[0]; + if( eType!=0x02 && eType!=0x05 && eType!=0x0A && eType!=0x0D ) return 0; + + iFree = (int)recoverGetU16(&a[1]); + nCell = (int)recoverGetU16(&a[3]); + iContent = (int)recoverGetU16(&a[5]); + if( iContent==0 ) iContent = 65536; + nFrag = (int)a[7]; + + if( iContent>n ) return 0; + + memset(aUsed, 0, n); + memset(aUsed, 0xFF, iContent); + + /* Follow the free-list. This is the same format for all b-tree pages. */ + if( iFree && iFree<=iContent ) return 0; + while( iFree ){ + int iNext = 0; + int nByte = 0; + if( iFree>(n-4) ) return 0; + iNext = recoverGetU16(&a[iFree]); + nByte = recoverGetU16(&a[iFree+2]); + if( iFree+nByte>n ) return 0; + if( iNext && iNextiContent ) return 0; + for(ii=0; iin ){ + return 0; + } + if( eType==0x05 || eType==0x02 ) nByte += 4; + nByte += recoverGetVarint(&a[iOff+nByte], &nPayload); + if( eType==0x0D ){ + i64 dummy = 0; + nByte += recoverGetVarint(&a[iOff+nByte], &dummy); + } + if( eType!=0x05 ){ + int X = (eType==0x0D) ? n-35 : (((n-12)*64/255)-23); + int M = ((n-12)*32/255)-23; + int K = M+((nPayload-M)%(n-4)); + + if( nPayloadn ){ + return 0; + } + for(iByte=iOff; iByte<(iOff+nByte); iByte++){ + if( aUsed[iByte]!=0 ){ + return 0; + } + aUsed[iByte] = 0xFF; + } + } + + nActual = 0; + for(ii=0; iipMethods!=&recover_methods ); + return pFd->pMethods->xClose(pFd); +} + +/* +** Write value v to buffer a[] as a 16-bit big-endian unsigned integer. +*/ +static void recoverPutU16(u8 *a, u32 v){ + a[0] = (v>>8) & 0x00FF; + a[1] = (v>>0) & 0x00FF; +} + +/* +** Write value v to buffer a[] as a 32-bit big-endian unsigned integer. +*/ +static void recoverPutU32(u8 *a, u32 v){ + a[0] = (v>>24) & 0x00FF; + a[1] = (v>>16) & 0x00FF; + a[2] = (v>>8) & 0x00FF; + a[3] = (v>>0) & 0x00FF; +} + +/* +** Detect the page-size of the database opened by file-handle pFd by +** searching the first part of the file for a well-formed SQLite b-tree +** page. If parameter nReserve is non-zero, then as well as searching for +** a b-tree page with zero reserved bytes, this function searches for one +** with nReserve reserved bytes at the end of it. +** +** If successful, set variable p->detected_pgsz to the detected page-size +** in bytes and return SQLITE_OK. Or, if no error occurs but no valid page +** can be found, return SQLITE_OK but leave p->detected_pgsz set to 0. Or, +** if an error occurs (e.g. an IO or OOM error), then an SQLite error code +** is returned. The final value of p->detected_pgsz is undefined in this +** case. +*/ +static int recoverVfsDetectPagesize( + sqlite3_recover *p, /* Recover handle */ + sqlite3_file *pFd, /* File-handle open on input database */ + u32 nReserve, /* Possible nReserve value */ + i64 nSz /* Size of database file in bytes */ +){ + int rc = SQLITE_OK; + const int nMin = 512; + const int nMax = 65536; + const int nMaxBlk = 4; + u32 pgsz = 0; + int iBlk = 0; + u8 *aPg = 0; + u8 *aTmp = 0; + int nBlk = 0; + + aPg = (u8*)sqlite3_malloc(2*nMax); + if( aPg==0 ) return SQLITE_NOMEM; + aTmp = &aPg[nMax]; + + nBlk = (nSz+nMax-1)/nMax; + if( nBlk>nMaxBlk ) nBlk = nMaxBlk; + + do { + for(iBlk=0; rc==SQLITE_OK && iBlk=((iBlk+1)*nMax)) ? nMax : (nSz % nMax); + memset(aPg, 0, nMax); + rc = pFd->pMethods->xRead(pFd, aPg, nByte, iBlk*nMax); + if( rc==SQLITE_OK ){ + int pgsz2; + for(pgsz2=(pgsz ? pgsz*2 : nMin); pgsz2<=nMax; pgsz2=pgsz2*2){ + int iOff; + for(iOff=0; iOff(u32)p->detected_pgsz ){ + p->detected_pgsz = pgsz; + p->nReserve = nReserve; + } + if( nReserve==0 ) break; + nReserve = 0; + }while( 1 ); + + p->detected_pgsz = pgsz; + sqlite3_free(aPg); + return rc; +} + +/* +** The xRead() method of the wrapper VFS. This is used to intercept calls +** to read page 1 of the input database. +*/ +static int recoverVfsRead(sqlite3_file *pFd, void *aBuf, int nByte, i64 iOff){ + int rc = SQLITE_OK; + if( pFd->pMethods==&recover_methods ){ + pFd->pMethods = recover_g.pMethods; + rc = pFd->pMethods->xRead(pFd, aBuf, nByte, iOff); + if( nByte==16 ){ + sqlite3_randomness(16, aBuf); + }else + if( rc==SQLITE_OK && iOff==0 && nByte>=108 ){ + /* Ensure that the database has a valid header file. The only fields + ** that really matter to recovery are: + ** + ** + Database page size (16-bits at offset 16) + ** + Size of db in pages (32-bits at offset 28) + ** + Database encoding (32-bits at offset 56) + ** + ** Also preserved are: + ** + ** + first freelist page (32-bits at offset 32) + ** + size of freelist (32-bits at offset 36) + ** + the wal-mode flags (16-bits at offset 18) + ** + ** We also try to preserve the auto-vacuum, incr-value, user-version + ** and application-id fields - all 32 bit quantities at offsets + ** 52, 60, 64 and 68. All other fields are set to known good values. + ** + ** Byte offset 105 should also contain the page-size as a 16-bit + ** integer. + */ + const int aPreserve[] = {32, 36, 52, 60, 64, 68}; + u8 aHdr[108] = { + 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00, + 0xFF, 0xFF, 0x01, 0x01, 0x00, 0x40, 0x20, 0x20, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x10, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2e, 0x5b, 0x30, + + 0x0D, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00 + }; + u8 *a = (u8*)aBuf; + + u32 pgsz = recoverGetU16(&a[16]); + u32 nReserve = a[20]; + u32 enc = recoverGetU32(&a[56]); + u32 dbsz = 0; + i64 dbFileSize = 0; + int ii; + sqlite3_recover *p = recover_g.p; + + if( pgsz==0x01 ) pgsz = 65536; + rc = pFd->pMethods->xFileSize(pFd, &dbFileSize); + + if( rc==SQLITE_OK && p->detected_pgsz==0 ){ + rc = recoverVfsDetectPagesize(p, pFd, nReserve, dbFileSize); + } + if( p->detected_pgsz ){ + pgsz = p->detected_pgsz; + nReserve = p->nReserve; + } + + if( pgsz ){ + dbsz = dbFileSize / pgsz; + } + if( enc!=SQLITE_UTF8 && enc!=SQLITE_UTF16BE && enc!=SQLITE_UTF16LE ){ + enc = SQLITE_UTF8; + } + + sqlite3_free(p->pPage1Cache); + p->pPage1Cache = 0; + p->pPage1Disk = 0; + + p->pgsz = nByte; + p->pPage1Cache = (u8*)recoverMalloc(p, nByte*2); + if( p->pPage1Cache ){ + p->pPage1Disk = &p->pPage1Cache[nByte]; + memcpy(p->pPage1Disk, aBuf, nByte); + aHdr[18] = a[18]; + aHdr[19] = a[19]; + recoverPutU32(&aHdr[28], dbsz); + recoverPutU32(&aHdr[56], enc); + recoverPutU16(&aHdr[105], pgsz-nReserve); + if( pgsz==65536 ) pgsz = 1; + recoverPutU16(&aHdr[16], pgsz); + aHdr[20] = nReserve; + for(ii=0; iipPage1Cache, aBuf, nByte); + }else{ + rc = p->errCode; + } + + } + pFd->pMethods = &recover_methods; + }else{ + rc = pFd->pMethods->xRead(pFd, aBuf, nByte, iOff); + } + return rc; +} + +/* +** Used to make sqlite3_io_methods wrapper methods less verbose. +*/ +#define RECOVER_VFS_WRAPPER(code) \ + int rc = SQLITE_OK; \ + if( pFd->pMethods==&recover_methods ){ \ + pFd->pMethods = recover_g.pMethods; \ + rc = code; \ + pFd->pMethods = &recover_methods; \ + }else{ \ + rc = code; \ + } \ + return rc; + +/* +** Methods of the wrapper VFS. All methods except for xRead() and xClose() +** simply uninstall the sqlite3_io_methods wrapper, invoke the equivalent +** method on the lower level VFS, then reinstall the wrapper before returning. +** Those that return an integer value use the RECOVER_VFS_WRAPPER macro. +*/ +static int recoverVfsWrite( + sqlite3_file *pFd, const void *aBuf, int nByte, i64 iOff +){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xWrite(pFd, aBuf, nByte, iOff) + ); +} +static int recoverVfsTruncate(sqlite3_file *pFd, sqlite3_int64 size){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xTruncate(pFd, size) + ); +} +static int recoverVfsSync(sqlite3_file *pFd, int flags){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xSync(pFd, flags) + ); +} +static int recoverVfsFileSize(sqlite3_file *pFd, sqlite3_int64 *pSize){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xFileSize(pFd, pSize) + ); +} +static int recoverVfsLock(sqlite3_file *pFd, int eLock){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xLock(pFd, eLock) + ); +} +static int recoverVfsUnlock(sqlite3_file *pFd, int eLock){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xUnlock(pFd, eLock) + ); +} +static int recoverVfsCheckReservedLock(sqlite3_file *pFd, int *pResOut){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xCheckReservedLock(pFd, pResOut) + ); +} +static int recoverVfsFileControl(sqlite3_file *pFd, int op, void *pArg){ + RECOVER_VFS_WRAPPER ( + (pFd->pMethods ? pFd->pMethods->xFileControl(pFd, op, pArg) : SQLITE_NOTFOUND) + ); +} +static int recoverVfsSectorSize(sqlite3_file *pFd){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xSectorSize(pFd) + ); +} +static int recoverVfsDeviceCharacteristics(sqlite3_file *pFd){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xDeviceCharacteristics(pFd) + ); +} +static int recoverVfsShmMap( + sqlite3_file *pFd, int iPg, int pgsz, int bExtend, void volatile **pp +){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmMap(pFd, iPg, pgsz, bExtend, pp) + ); +} +static int recoverVfsShmLock(sqlite3_file *pFd, int offset, int n, int flags){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmLock(pFd, offset, n, flags) + ); +} +static void recoverVfsShmBarrier(sqlite3_file *pFd){ + if( pFd->pMethods==&recover_methods ){ + pFd->pMethods = recover_g.pMethods; + pFd->pMethods->xShmBarrier(pFd); + pFd->pMethods = &recover_methods; + }else{ + pFd->pMethods->xShmBarrier(pFd); + } +} +static int recoverVfsShmUnmap(sqlite3_file *pFd, int deleteFlag){ + RECOVER_VFS_WRAPPER ( + pFd->pMethods->xShmUnmap(pFd, deleteFlag) + ); +} + +static int recoverVfsFetch( + sqlite3_file *pFd, + sqlite3_int64 iOff, + int iAmt, + void **pp +){ + *pp = 0; + return SQLITE_OK; +} +static int recoverVfsUnfetch(sqlite3_file *pFd, sqlite3_int64 iOff, void *p){ + return SQLITE_OK; +} + +/* +** Install the VFS wrapper around the file-descriptor open on the input +** database for recover handle p. Mutex RECOVER_MUTEX_ID must be held +** when this function is called. +*/ +static void recoverInstallWrapper(sqlite3_recover *p){ + sqlite3_file *pFd = 0; + assert( recover_g.pMethods==0 ); + recoverAssertMutexHeld(); + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_FILE_POINTER, (void*)&pFd); + assert( pFd==0 || pFd->pMethods!=&recover_methods ); + if( pFd && pFd->pMethods ){ + int iVersion = 1 + (pFd->pMethods->iVersion>1 && pFd->pMethods->xShmMap!=0); + recover_g.pMethods = pFd->pMethods; + recover_g.p = p; + recover_methods.iVersion = iVersion; + pFd->pMethods = &recover_methods; + } +} + +/* +** Uninstall the VFS wrapper that was installed around the file-descriptor open +** on the input database for recover handle p. Mutex RECOVER_MUTEX_ID must be +** held when this function is called. +*/ +static void recoverUninstallWrapper(sqlite3_recover *p){ + sqlite3_file *pFd = 0; + recoverAssertMutexHeld(); + sqlite3_file_control(p->dbIn, p->zDb,SQLITE_FCNTL_FILE_POINTER,(void*)&pFd); + if( pFd && pFd->pMethods ){ + pFd->pMethods = recover_g.pMethods; + recover_g.pMethods = 0; + recover_g.p = 0; + } +} + +/* +** This function does the work of a single sqlite3_recover_step() call. It +** is guaranteed that the handle is not in an error state when this +** function is called. +*/ +static void recoverStep(sqlite3_recover *p){ + assert( p && p->errCode==SQLITE_OK ); + switch( p->eState ){ + case RECOVER_STATE_INIT: + /* This is the very first call to sqlite3_recover_step() on this object. + */ + recoverSqlCallback(p, "BEGIN"); + recoverSqlCallback(p, "PRAGMA writable_schema = on"); + + recoverEnterMutex(); + recoverInstallWrapper(p); + + /* Open the output database. And register required virtual tables and + ** user functions with the new handle. */ + recoverOpenOutput(p); + + /* Open transactions on both the input and output databases. */ + sqlite3_file_control(p->dbIn, p->zDb, SQLITE_FCNTL_RESET_CACHE, 0); + recoverExec(p, p->dbIn, "PRAGMA writable_schema = on"); + recoverExec(p, p->dbIn, "BEGIN"); + if( p->errCode==SQLITE_OK ) p->bCloseTransaction = 1; + recoverExec(p, p->dbIn, "SELECT 1 FROM sqlite_schema"); + recoverTransferSettings(p); + recoverOpenRecovery(p); + recoverCacheSchema(p); + + recoverUninstallWrapper(p); + recoverLeaveMutex(); + + recoverExec(p, p->dbOut, "BEGIN"); + + recoverWriteSchema1(p); + p->eState = RECOVER_STATE_WRITING; + break; + + case RECOVER_STATE_WRITING: { + if( p->w1.pTbls==0 ){ + recoverWriteDataInit(p); + } + if( SQLITE_DONE==recoverWriteDataStep(p) ){ + recoverWriteDataCleanup(p); + if( p->zLostAndFound ){ + p->eState = RECOVER_STATE_LOSTANDFOUND1; + }else{ + p->eState = RECOVER_STATE_SCHEMA2; + } + } + break; + } + + case RECOVER_STATE_LOSTANDFOUND1: { + if( p->laf.pUsed==0 ){ + recoverLostAndFound1Init(p); + } + if( SQLITE_DONE==recoverLostAndFound1Step(p) ){ + p->eState = RECOVER_STATE_LOSTANDFOUND2; + } + break; + } + case RECOVER_STATE_LOSTANDFOUND2: { + if( p->laf.pAllAndParent==0 ){ + recoverLostAndFound2Init(p); + } + if( SQLITE_DONE==recoverLostAndFound2Step(p) ){ + p->eState = RECOVER_STATE_LOSTANDFOUND3; + } + break; + } + + case RECOVER_STATE_LOSTANDFOUND3: { + if( p->laf.pInsert==0 ){ + recoverLostAndFound3Init(p); + } + if( SQLITE_DONE==recoverLostAndFound3Step(p) ){ + p->eState = RECOVER_STATE_SCHEMA2; + } + break; + } + + case RECOVER_STATE_SCHEMA2: { + int rc = SQLITE_OK; + + recoverWriteSchema2(p); + p->eState = RECOVER_STATE_DONE; + + /* If no error has occurred, commit the write transaction on the output + ** database. Regardless of whether or not an error has occurred, make + ** an attempt to end the read transaction on the input database. */ + recoverExec(p, p->dbOut, "COMMIT"); + rc = sqlite3_exec(p->dbIn, "END", 0, 0, 0); + if( p->errCode==SQLITE_OK ) p->errCode = rc; + + recoverSqlCallback(p, "PRAGMA writable_schema = off"); + recoverSqlCallback(p, "COMMIT"); + p->eState = RECOVER_STATE_DONE; + recoverFinalCleanup(p); + break; + }; + + case RECOVER_STATE_DONE: { + /* no-op */ + break; + }; + } +} + + +/* +** This is a worker function that does the heavy lifting for both init +** functions: +** +** sqlite3_recover_init() +** sqlite3_recover_init_sql() +** +** All this function does is allocate space for the recover handle and +** take copies of the input parameters. All the real work is done within +** sqlite3_recover_run(). +*/ +sqlite3_recover *recoverInit( + sqlite3* db, + const char *zDb, + const char *zUri, /* Output URI for _recover_init() */ + int (*xSql)(void*, const char*),/* SQL callback for _recover_init_sql() */ + void *pSqlCtx /* Context arg for _recover_init_sql() */ +){ + sqlite3_recover *pRet = 0; + int nDb = 0; + int nUri = 0; + int nByte = 0; + + if( zDb==0 ){ zDb = "main"; } + + nDb = recoverStrlen(zDb); + nUri = recoverStrlen(zUri); + + nByte = sizeof(sqlite3_recover) + nDb+1 + nUri+1; + pRet = (sqlite3_recover*)sqlite3_malloc(nByte); + if( pRet ){ + memset(pRet, 0, nByte); + pRet->dbIn = db; + pRet->zDb = (char*)&pRet[1]; + pRet->zUri = &pRet->zDb[nDb+1]; + memcpy(pRet->zDb, zDb, nDb); + if( nUri>0 && zUri ) memcpy(pRet->zUri, zUri, nUri); + pRet->xSql = xSql; + pRet->pSqlCtx = pSqlCtx; + pRet->bRecoverRowid = RECOVER_ROWID_DEFAULT; + } + + return pRet; +} + +/* +** Initialize a recovery handle that creates a new database containing +** the recovered data. +*/ +sqlite3_recover *sqlite3_recover_init( + sqlite3* db, + const char *zDb, + const char *zUri +){ + return recoverInit(db, zDb, zUri, 0, 0); +} + +/* +** Initialize a recovery handle that returns recovered data in the +** form of SQL statements via a callback. +*/ +sqlite3_recover *sqlite3_recover_init_sql( + sqlite3* db, + const char *zDb, + int (*xSql)(void*, const char*), + void *pSqlCtx +){ + return recoverInit(db, zDb, 0, xSql, pSqlCtx); +} + +/* +** Return the handle error message, if any. +*/ +const char *sqlite3_recover_errmsg(sqlite3_recover *p){ + return (p && p->errCode!=SQLITE_NOMEM) ? p->zErrMsg : "out of memory"; +} + +/* +** Return the handle error code. +*/ +int sqlite3_recover_errcode(sqlite3_recover *p){ + return p ? p->errCode : SQLITE_NOMEM; +} + +/* +** Configure the handle. +*/ +int sqlite3_recover_config(sqlite3_recover *p, int op, void *pArg){ + int rc = SQLITE_OK; + if( p==0 ){ + rc = SQLITE_NOMEM; + }else if( p->eState!=RECOVER_STATE_INIT ){ + rc = SQLITE_MISUSE; + }else{ + switch( op ){ + case 789: + /* This undocumented magic configuration option is used to set the + ** name of the auxiliary database that is ATTACH-ed to the database + ** connection and used to hold state information during the + ** recovery process. This option is for debugging use only and + ** is subject to change or removal at any time. */ + sqlite3_free(p->zStateDb); + p->zStateDb = recoverMPrintf(p, "%s", (char*)pArg); + break; + + case SQLITE_RECOVER_LOST_AND_FOUND: { + const char *zArg = (const char*)pArg; + sqlite3_free(p->zLostAndFound); + if( zArg ){ + p->zLostAndFound = recoverMPrintf(p, "%s", zArg); + }else{ + p->zLostAndFound = 0; + } + break; + } + + case SQLITE_RECOVER_FREELIST_CORRUPT: + p->bFreelistCorrupt = *(int*)pArg; + break; + + case SQLITE_RECOVER_ROWIDS: + p->bRecoverRowid = *(int*)pArg; + break; + + case SQLITE_RECOVER_SLOWINDEXES: + p->bSlowIndexes = *(int*)pArg; + break; + + default: + rc = SQLITE_NOTFOUND; + break; + } + } + + return rc; +} + +/* +** Do a unit of work towards the recovery job. Return SQLITE_OK if +** no error has occurred but database recovery is not finished, SQLITE_DONE +** if database recovery has been successfully completed, or an SQLite +** error code if an error has occurred. +*/ +int sqlite3_recover_step(sqlite3_recover *p){ + if( p==0 ) return SQLITE_NOMEM; + if( p->errCode==SQLITE_OK ) recoverStep(p); + if( p->eState==RECOVER_STATE_DONE && p->errCode==SQLITE_OK ){ + return SQLITE_DONE; + } + return p->errCode; +} + +/* +** Do the configured recovery operation. Return SQLITE_OK if successful, or +** else an SQLite error code. +*/ +int sqlite3_recover_run(sqlite3_recover *p){ + while( SQLITE_OK==sqlite3_recover_step(p) ); + return sqlite3_recover_errcode(p); +} + + +/* +** Free all resources associated with the recover handle passed as the only +** argument. The results of using a handle with any sqlite3_recover_** +** API function after it has been passed to this function are undefined. +** +** A copy of the value returned by the first call made to sqlite3_recover_run() +** on this handle is returned, or SQLITE_OK if sqlite3_recover_run() has +** not been called on this handle. +*/ +int sqlite3_recover_finish(sqlite3_recover *p){ + int rc; + if( p==0 ){ + rc = SQLITE_NOMEM; + }else{ + recoverFinalCleanup(p); + if( p->bCloseTransaction && sqlite3_get_autocommit(p->dbIn)==0 ){ + rc = sqlite3_exec(p->dbIn, "END", 0, 0, 0); + if( p->errCode==SQLITE_OK ) p->errCode = rc; + } + rc = p->errCode; + sqlite3_free(p->zErrMsg); + sqlite3_free(p->zStateDb); + sqlite3_free(p->zLostAndFound); + sqlite3_free(p->pPage1Cache); + sqlite3_free(p); + } + return rc; +} + +#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ ADDED ext/recover/sqlite3recover.h Index: ext/recover/sqlite3recover.h ================================================================== --- /dev/null +++ ext/recover/sqlite3recover.h @@ -0,0 +1,249 @@ +/* +** 2022-08-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the public interface to the "recover" extension - +** an SQLite extension designed to recover data from corrupted database +** files. +*/ + +/* +** OVERVIEW: +** +** To use the API to recover data from a corrupted database, an +** application: +** +** 1) Creates an sqlite3_recover handle by calling either +** sqlite3_recover_init() or sqlite3_recover_init_sql(). +** +** 2) Configures the new handle using one or more calls to +** sqlite3_recover_config(). +** +** 3) Executes the recovery by repeatedly calling sqlite3_recover_step() on +** the handle until it returns something other than SQLITE_OK. If it +** returns SQLITE_DONE, then the recovery operation completed without +** error. If it returns some other non-SQLITE_OK value, then an error +** has occurred. +** +** 4) Retrieves any error code and English language error message using the +** sqlite3_recover_errcode() and sqlite3_recover_errmsg() APIs, +** respectively. +** +** 5) Destroys the sqlite3_recover handle and frees all resources +** using sqlite3_recover_finish(). +** +** The application may abandon the recovery operation at any point +** before it is finished by passing the sqlite3_recover handle to +** sqlite3_recover_finish(). This is not an error, but the final state +** of the output database, or the results of running the partial script +** delivered to the SQL callback, are undefined. +*/ + +#ifndef _SQLITE_RECOVER_H +#define _SQLITE_RECOVER_H + +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** An instance of the sqlite3_recover object represents a recovery +** operation in progress. +** +** Constructors: +** +** sqlite3_recover_init() +** sqlite3_recover_init_sql() +** +** Destructor: +** +** sqlite3_recover_finish() +** +** Methods: +** +** sqlite3_recover_config() +** sqlite3_recover_errcode() +** sqlite3_recover_errmsg() +** sqlite3_recover_run() +** sqlite3_recover_step() +*/ +typedef struct sqlite3_recover sqlite3_recover; + +/* +** These two APIs attempt to create and return a new sqlite3_recover object. +** In both cases the first two arguments identify the (possibly +** corrupt) database to recover data from. The first argument is an open +** database handle and the second the name of a database attached to that +** handle (i.e. "main", "temp" or the name of an attached database). +** +** If sqlite3_recover_init() is used to create the new sqlite3_recover +** handle, then data is recovered into a new database, identified by +** string parameter zUri. zUri may be an absolute or relative file path, +** or may be an SQLite URI. If the identified database file already exists, +** it is overwritten. +** +** If sqlite3_recover_init_sql() is invoked, then any recovered data will +** be returned to the user as a series of SQL statements. Executing these +** SQL statements results in the same database as would have been created +** had sqlite3_recover_init() been used. For each SQL statement in the +** output, the callback function passed as the third argument (xSql) is +** invoked once. The first parameter is a passed a copy of the fourth argument +** to this function (pCtx) as its first parameter, and a pointer to a +** nul-terminated buffer containing the SQL statement formated as UTF-8 as +** the second. If the xSql callback returns any value other than SQLITE_OK, +** then processing is immediately abandoned and the value returned used as +** the recover handle error code (see below). +** +** If an out-of-memory error occurs, NULL may be returned instead of +** a valid handle. In all other cases, it is the responsibility of the +** application to avoid resource leaks by ensuring that +** sqlite3_recover_finish() is called on all allocated handles. +*/ +sqlite3_recover *sqlite3_recover_init( + sqlite3* db, + const char *zDb, + const char *zUri +); +sqlite3_recover *sqlite3_recover_init_sql( + sqlite3* db, + const char *zDb, + int (*xSql)(void*, const char*), + void *pCtx +); + +/* +** Configure an sqlite3_recover object that has just been created using +** sqlite3_recover_init() or sqlite3_recover_init_sql(). This function +** may only be called before the first call to sqlite3_recover_step() +** or sqlite3_recover_run() on the object. +** +** The second argument passed to this function must be one of the +** SQLITE_RECOVER_* symbols defined below. Valid values for the third argument +** depend on the specific SQLITE_RECOVER_* symbol in use. +** +** SQLITE_OK is returned if the configuration operation was successful, +** or an SQLite error code otherwise. +*/ +int sqlite3_recover_config(sqlite3_recover*, int op, void *pArg); + +/* +** SQLITE_RECOVER_LOST_AND_FOUND: +** The pArg argument points to a string buffer containing the name +** of a "lost-and-found" table in the output database, or NULL. If +** the argument is non-NULL and the database contains seemingly +** valid pages that cannot be associated with any table in the +** recovered part of the schema, data is extracted from these +** pages to add to the lost-and-found table. +** +** SQLITE_RECOVER_FREELIST_CORRUPT: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is set +** (argument is 1) and a lost-and-found table has been configured using +** SQLITE_RECOVER_LOST_AND_FOUND, then is assumed that the freelist is +** corrupt and an attempt is made to recover records from pages that +** appear to be linked into the freelist. Otherwise, pages on the freelist +** are ignored. Setting this option can recover more data from the +** database, but often ends up "recovering" deleted records. The default +** value is 0 (clear). +** +** SQLITE_RECOVER_ROWIDS: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is set +** (argument is 1), then an attempt is made to recover rowid values +** that are not also INTEGER PRIMARY KEY values. If this option is +** clear, then new rowids are assigned to all recovered rows. The +** default value is 1 (set). +** +** SQLITE_RECOVER_SLOWINDEXES: +** The pArg value must actually be a pointer to a value of type +** int containing value 0 or 1 cast as a (void*). If this option is clear +** (argument is 0), then when creating an output database, the recover +** module creates and populates non-UNIQUE indexes right at the end of the +** recovery operation - after all recoverable data has been inserted +** into the new database. This is faster overall, but means that the +** final call to sqlite3_recover_step() for a recovery operation may +** be need to create a large number of indexes, which may be very slow. +** +** Or, if this option is set (argument is 1), then non-UNIQUE indexes +** are created in the output database before it is populated with +** recovered data. This is slower overall, but avoids the slow call +** to sqlite3_recover_step() at the end of the recovery operation. +** +** The default option value is 0. +*/ +#define SQLITE_RECOVER_LOST_AND_FOUND 1 +#define SQLITE_RECOVER_FREELIST_CORRUPT 2 +#define SQLITE_RECOVER_ROWIDS 3 +#define SQLITE_RECOVER_SLOWINDEXES 4 + +/* +** Perform a unit of work towards the recovery operation. This function +** must normally be called multiple times to complete database recovery. +** +** If no error occurs but the recovery operation is not completed, this +** function returns SQLITE_OK. If recovery has been completed successfully +** then SQLITE_DONE is returned. If an error has occurred, then an SQLite +** error code (e.g. SQLITE_IOERR or SQLITE_NOMEM) is returned. It is not +** considered an error if some or all of the data cannot be recovered +** due to database corruption. +** +** Once sqlite3_recover_step() has returned a value other than SQLITE_OK, +** all further such calls on the same recover handle are no-ops that return +** the same non-SQLITE_OK value. +*/ +int sqlite3_recover_step(sqlite3_recover*); + +/* +** Run the recovery operation to completion. Return SQLITE_OK if successful, +** or an SQLite error code otherwise. Calling this function is the same +** as executing: +** +** while( SQLITE_OK==sqlite3_recover_step(p) ); +** return sqlite3_recover_errcode(p); +*/ +int sqlite3_recover_run(sqlite3_recover*); + +/* +** If an error has been encountered during a prior call to +** sqlite3_recover_step(), then this function attempts to return a +** pointer to a buffer containing an English language explanation of +** the error. If no error message is available, or if an out-of memory +** error occurs while attempting to allocate a buffer in which to format +** the error message, NULL is returned. +** +** The returned buffer remains valid until the sqlite3_recover handle is +** destroyed using sqlite3_recover_finish(). +*/ +const char *sqlite3_recover_errmsg(sqlite3_recover*); + +/* +** If this function is called on an sqlite3_recover handle after +** an error occurs, an SQLite error code is returned. Otherwise, SQLITE_OK. +*/ +int sqlite3_recover_errcode(sqlite3_recover*); + +/* +** Clean up a recovery object created by a call to sqlite3_recover_init(). +** The results of using a recovery object with any API after it has been +** passed to this function are undefined. +** +** This function returns the same value as sqlite3_recover_errcode(). +*/ +int sqlite3_recover_finish(sqlite3_recover*); + + +#ifdef __cplusplus +} /* end of the 'extern "C"' block */ +#endif + +#endif /* ifndef _SQLITE_RECOVER_H */ ADDED ext/recover/test_recover.c Index: ext/recover/test_recover.c ================================================================== --- /dev/null +++ ext/recover/test_recover.c @@ -0,0 +1,311 @@ +/* +** 2022-08-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +*/ + +#include "sqlite3recover.h" +#include "sqliteInt.h" + +#include +#include + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +typedef struct TestRecover TestRecover; +struct TestRecover { + sqlite3_recover *p; + Tcl_Interp *interp; + Tcl_Obj *pScript; +}; + +static int xSqlCallback(void *pSqlArg, const char *zSql){ + TestRecover *p = (TestRecover*)pSqlArg; + Tcl_Obj *pEval = 0; + int res = 0; + + pEval = Tcl_DuplicateObj(p->pScript); + Tcl_IncrRefCount(pEval); + + res = Tcl_ListObjAppendElement(p->interp, pEval, Tcl_NewStringObj(zSql, -1)); + if( res==TCL_OK ){ + res = Tcl_EvalObjEx(p->interp, pEval, 0); + } + + Tcl_DecrRefCount(pEval); + if( res ){ + Tcl_BackgroundError(p->interp); + return TCL_ERROR; + }else{ + Tcl_Obj *pObj = Tcl_GetObjResult(p->interp); + if( Tcl_GetCharLength(pObj)==0 ){ + res = 0; + }else if( Tcl_GetIntFromObj(p->interp, pObj, &res) ){ + Tcl_BackgroundError(p->interp); + return TCL_ERROR; + } + } + return res; +} + +static int getDbPointer(Tcl_Interp *interp, Tcl_Obj *pObj, sqlite3 **pDb){ + Tcl_CmdInfo info; + if( 0==Tcl_GetCommandInfo(interp, Tcl_GetString(pObj), &info) ){ + Tcl_AppendResult(interp, "no such handle: ", Tcl_GetString(pObj), 0); + return TCL_ERROR; + } + *pDb = *(sqlite3 **)info.objClientData; + return TCL_OK; +} + +/* +** Implementation of the command created by [sqlite3_recover_init]: +** +** $cmd config OP ARG +** $cmd run +** $cmd errmsg +** $cmd errcode +** $cmd finalize +*/ +static int testRecoverCmd( + void *clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static struct RecoverSub { + const char *zSub; + int nArg; + const char *zMsg; + } aSub[] = { + { "config", 2, "ARG" }, /* 0 */ + { "run", 0, "" }, /* 1 */ + { "errmsg", 0, "" }, /* 2 */ + { "errcode", 0, "" }, /* 3 */ + { "finish", 0, "" }, /* 4 */ + { "step", 0, "" }, /* 5 */ + { 0 } + }; + int rc = TCL_OK; + int iSub = 0; + TestRecover *pTest = (TestRecover*)clientData; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUBCOMMAND ..."); + return TCL_ERROR; + } + rc = Tcl_GetIndexFromObjStruct(interp, + objv[1], aSub, sizeof(aSub[0]), "sub-command", 0, &iSub + ); + if( rc!=TCL_OK ) return rc; + if( (objc-2)!=aSub[iSub].nArg ){ + Tcl_WrongNumArgs(interp, 2, objv, aSub[iSub].zMsg); + return TCL_ERROR; + } + + switch( iSub ){ + case 0: assert( sqlite3_stricmp("config", aSub[iSub].zSub)==0 ); { + const char *aOp[] = { + "testdb", /* 0 */ + "lostandfound", /* 1 */ + "freelistcorrupt", /* 2 */ + "rowids", /* 3 */ + "slowindexes", /* 4 */ + "invalid", /* 5 */ + 0 + }; + int iOp = 0; + int res = 0; + if( Tcl_GetIndexFromObj(interp, objv[2], aOp, "option", 0, &iOp) ){ + return TCL_ERROR; + } + switch( iOp ){ + case 0: + res = sqlite3_recover_config(pTest->p, + 789, (void*)Tcl_GetString(objv[3]) /* MAGIC NUMBER! */ + ); + break; + case 1: { + const char *zStr = Tcl_GetString(objv[3]); + res = sqlite3_recover_config(pTest->p, + SQLITE_RECOVER_LOST_AND_FOUND, (void*)(zStr[0] ? zStr : 0) + ); + break; + } + case 2: { + int iVal = 0; + if( Tcl_GetBooleanFromObj(interp, objv[3], &iVal) ) return TCL_ERROR; + res = sqlite3_recover_config(pTest->p, + SQLITE_RECOVER_FREELIST_CORRUPT, (void*)&iVal + ); + break; + } + case 3: { + int iVal = 0; + if( Tcl_GetBooleanFromObj(interp, objv[3], &iVal) ) return TCL_ERROR; + res = sqlite3_recover_config(pTest->p, + SQLITE_RECOVER_ROWIDS, (void*)&iVal + ); + break; + } + case 4: { + int iVal = 0; + if( Tcl_GetBooleanFromObj(interp, objv[3], &iVal) ) return TCL_ERROR; + res = sqlite3_recover_config(pTest->p, + SQLITE_RECOVER_SLOWINDEXES, (void*)&iVal + ); + break; + } + case 5: { + res = sqlite3_recover_config(pTest->p, 12345, 0); + break; + } + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(res)); + break; + } + case 1: assert( sqlite3_stricmp("run", aSub[iSub].zSub)==0 ); { + int res = sqlite3_recover_run(pTest->p); + Tcl_SetObjResult(interp, Tcl_NewIntObj(res)); + break; + } + case 2: assert( sqlite3_stricmp("errmsg", aSub[iSub].zSub)==0 ); { + const char *zErr = sqlite3_recover_errmsg(pTest->p); + Tcl_SetObjResult(interp, Tcl_NewStringObj(zErr, -1)); + break; + } + case 3: assert( sqlite3_stricmp("errcode", aSub[iSub].zSub)==0 ); { + int errCode = sqlite3_recover_errcode(pTest->p); + Tcl_SetObjResult(interp, Tcl_NewIntObj(errCode)); + break; + } + case 4: assert( sqlite3_stricmp("finish", aSub[iSub].zSub)==0 ); { + int res = sqlite3_recover_errcode(pTest->p); + int res2; + if( res!=SQLITE_OK ){ + const char *zErr = sqlite3_recover_errmsg(pTest->p); + Tcl_SetObjResult(interp, Tcl_NewStringObj(zErr, -1)); + } + res2 = sqlite3_recover_finish(pTest->p); + assert( res2==res ); + if( res ) return TCL_ERROR; + break; + } + case 5: assert( sqlite3_stricmp("step", aSub[iSub].zSub)==0 ); { + int res = sqlite3_recover_step(pTest->p); + Tcl_SetObjResult(interp, Tcl_NewIntObj(res)); + break; + } + } + + return TCL_OK; +} + +/* +** sqlite3_recover_init DB DBNAME URI +*/ +static int test_sqlite3_recover_init( + void *clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static int iTestRecoverCmd = 1; + + TestRecover *pNew = 0; + sqlite3 *db = 0; + const char *zDb = 0; + const char *zUri = 0; + char zCmd[128]; + int bSql = clientData ? 1 : 0; + + if( objc!=4 ){ + const char *zErr = (bSql ? "DB DBNAME SCRIPT" : "DB DBNAME URI"); + Tcl_WrongNumArgs(interp, 1, objv, zErr); + return TCL_ERROR; + } + if( getDbPointer(interp, objv[1], &db) ) return TCL_ERROR; + zDb = Tcl_GetString(objv[2]); + if( zDb[0]=='\0' ) zDb = 0; + + pNew = ckalloc(sizeof(TestRecover)); + if( bSql==0 ){ + zUri = Tcl_GetString(objv[3]); + pNew->p = sqlite3_recover_init(db, zDb, zUri); + }else{ + pNew->interp = interp; + pNew->pScript = objv[3]; + Tcl_IncrRefCount(pNew->pScript); + pNew->p = sqlite3_recover_init_sql(db, zDb, xSqlCallback, (void*)pNew); + } + + sprintf(zCmd, "sqlite_recover%d", iTestRecoverCmd++); + Tcl_CreateObjCommand(interp, zCmd, testRecoverCmd, (void*)pNew, 0); + + Tcl_SetObjResult(interp, Tcl_NewStringObj(zCmd, -1)); + return TCL_OK; +} + +/* +** Declaration for public API function in file dbdata.c. This may be called +** with NULL as the final two arguments to register the sqlite_dbptr and +** sqlite_dbdata virtual tables with a database handle. +*/ +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_dbdata_init(sqlite3*, char**, const sqlite3_api_routines*); + +/* +** sqlite3_recover_init DB DBNAME URI +*/ +static int test_sqlite3_dbdata_init( + void *clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db = 0; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + if( getDbPointer(interp, objv[1], &db) ) return TCL_ERROR; + sqlite3_dbdata_init(db, 0, 0); + + Tcl_ResetResult(interp); + return TCL_OK; +} + +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +int TestRecover_Init(Tcl_Interp *interp){ +#ifndef SQLITE_OMIT_VIRTUALTABLE + struct Cmd { + const char *zCmd; + Tcl_ObjCmdProc *xProc; + void *pArg; + } aCmd[] = { + { "sqlite3_recover_init", test_sqlite3_recover_init, 0 }, + { "sqlite3_recover_init_sql", test_sqlite3_recover_init, (void*)1 }, + { "sqlite3_dbdata_init", test_sqlite3_dbdata_init, (void*)1 }, + }; + int i; + + for(i=0; izCmd, p->xProc, p->pArg, 0); + } +#endif + return TCL_OK; +} + Index: ext/rtree/rtree.c ================================================================== --- ext/rtree/rtree.c +++ ext/rtree/rtree.c @@ -3233,11 +3233,11 @@ return SQLITE_LOCKED_VTAB; } rtreeReference(pRtree); assert(nData>=1); - cell.iRowid = 0; /* Used only to suppress a compiler warning */ + memset(&cell, 0, sizeof(cell)); /* Constraint handling. A write operation on an r-tree table may return ** SQLITE_CONSTRAINT for two reasons: ** ** 1. A duplicate rowid value, or DELETED ext/session/changebatch1.test Index: ext/session/changebatch1.test ================================================================== --- ext/session/changebatch1.test +++ /dev/null @@ -1,222 +0,0 @@ -# 2016 August 23 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. -# - -if {![info exists testdir]} { - set testdir [file join [file dirname [info script]] .. .. test] -} -source $testdir/tester.tcl -ifcapable !session {finish_test; return} - -set testprefix changebatch1 - - -proc sql_to_changeset {method sql} { - sqlite3session S db main - S attach * - execsql $sql - set ret [S $method] - S delete - return $ret -} - -proc do_changebatch_test {tn method args} { - set C [list] - foreach a $args { - lappend C [sql_to_changeset $method $a] - } - - sqlite3changebatch cb db - set i 1 - foreach ::cs [lrange $C 0 end-1] { - set rc [cb add $::cs] - if {$rc!="SQLITE_OK"} { error "expected SQLITE_OK, got $rc (i=$i)" } - incr i - } - - set ::cs [lindex $C end] - do_test $tn { cb add [set ::cs] } SQLITE_CONSTRAINT - cb delete -} - -proc do_changebatch_test1 {tn args} { - uplevel do_changebatch_test $tn changeset $args -} -proc do_changebatch_test2 {tn args} { - uplevel do_changebatch_test $tn fullchangeset $args -} - -#------------------------------------------------------------------------- -# The body of the following loop contains tests for database schemas -# that do not feature multi-column UNIQUE constraints. In this case -# it doesn't matter if the changesets are generated using -# sqlite3session_changeset() or sqlite3session_fullchangeset(). -# -foreach {tn testfunction} { - 1 do_changebatch_test1 - 2 do_changebatch_test2 -} { - reset_db - - #------------------------------------------------------------------------- - # - do_execsql_test $tn.1.0 { - CREATE TABLE t1(a PRIMARY KEY, b); - } - - $testfunction $tn.1.1 { - INSERT INTO t1 VALUES(1, 1); - } { - DELETE FROM t1 WHERE a=1; - } - - do_execsql_test $tn.1.2.0 { - INSERT INTO t1 VALUES(1, 1); - INSERT INTO t1 VALUES(2, 2); - INSERT INTO t1 VALUES(3, 3); - } - $testfunction $tn.1.2.1 { - DELETE FROM t1 WHERE a=2; - } { - INSERT INTO t1 VALUES(2, 2); - } - - #------------------------------------------------------------------------- - # - do_execsql_test $tn.2.0 { - CREATE TABLE x1(a, b PRIMARY KEY, c UNIQUE); - CREATE TABLE x2(a PRIMARY KEY, b UNIQUE, c UNIQUE); - CREATE INDEX x1a ON x1(a); - - INSERT INTO x1 VALUES(1, 1, 'a'); - INSERT INTO x1 VALUES(1, 2, 'b'); - INSERT INTO x1 VALUES(1, 3, 'c'); - } - - $testfunction $tn.2.1 { - DELETE FROM x1 WHERE b=2; - } { - UPDATE x1 SET c='b' WHERE b=3; - } - - $testfunction $tn.2.2 { - DELETE FROM x1 WHERE b=1; - } { - INSERT INTO x1 VALUES(1, 5, 'a'); - } - - set L [list] - for {set i 1000} {$i < 10000} {incr i} { - lappend L "INSERT INTO x2 VALUES($i, $i, 'x' || $i)" - } - lappend L "DELETE FROM x2 WHERE b=1005" - $testfunction $tn.2.3 {*}$L - - execsql { INSERT INTO x1 VALUES('f', 'f', 'f') } - $testfunction $tn.2.4 { - INSERT INTO x2 VALUES('f', 'f', 'f'); - } { - INSERT INTO x1 VALUES('g', 'g', 'g'); - } { - DELETE FROM x1 WHERE b='f'; - } { - INSERT INTO x2 VALUES('g', 'g', 'g'); - } { - INSERT INTO x1 VALUES('f', 'f', 'f'); - } - - execsql { - DELETE FROM x1; - INSERT INTO x1 VALUES(1.5, 1.5, 1.5); - } - $testfunction $tn.2.5 { - DELETE FROM x1 WHERE b BETWEEN 1 AND 2; - } { - INSERT INTO x1 VALUES(2.5, 2.5, 2.5); - } { - INSERT INTO x1 VALUES(1.5, 1.5, 1.5); - } - - execsql { - DELETE FROM x2; - INSERT INTO x2 VALUES(X'abcd', X'1234', X'7890'); - INSERT INTO x2 VALUES(X'0000', X'0000', X'0000'); - } - breakpoint - $testfunction $tn.2.6 { - UPDATE x2 SET c = X'1234' WHERE a=X'abcd'; - INSERT INTO x2 VALUES(X'1234', X'abcd', X'7890'); - } { - DELETE FROM x2 WHERE b=X'0000'; - } { - INSERT INTO x2 VALUES(1, X'0000', NULL); - } -} - -#------------------------------------------------------------------------- -# Test some multi-column UNIQUE constraints. First Using _changeset() to -# demonstrate the problem, then using _fullchangeset() to show that it has -# been fixed. -# -reset_db -do_execsql_test 3.0 { - CREATE TABLE y1(a PRIMARY KEY, b, c, UNIQUE(b, c)); - INSERT INTO y1 VALUES(1, 1, 1); - INSERT INTO y1 VALUES(2, 2, 2); - INSERT INTO y1 VALUES(3, 3, 3); - INSERT INTO y1 VALUES(4, 3, 4); - BEGIN; -} - -do_test 3.1.1 { - set c1 [sql_to_changeset changeset { DELETE FROM y1 WHERE a=4 }] - set c2 [sql_to_changeset changeset { UPDATE y1 SET c=4 WHERE a=3 }] - sqlite3changebatch cb db - cb add $c1 - cb add $c2 -} {SQLITE_OK} -do_test 3.1.2 { - cb delete - execsql ROLLBACK -} {} - -do_test 3.1.1 { - set c1 [sql_to_changeset fullchangeset { DELETE FROM y1 WHERE a=4 }] - set c2 [sql_to_changeset fullchangeset { UPDATE y1 SET c=4 WHERE a=3 }] - sqlite3changebatch cb db - cb add $c1 - cb add $c2 -} {SQLITE_CONSTRAINT} -do_test 3.1.2 { - cb delete -} {} - -#------------------------------------------------------------------------- -# -reset_db -do_execsql_test 4.0 { - CREATE TABLE t1(x, y, z, PRIMARY KEY(x, y), UNIQUE(z)); -} - -do_test 4.1 { - set c1 [sql_to_changeset fullchangeset { INSERT INTO t1 VALUES(1, 2, 3) }] - execsql { - DROP TABLE t1; - CREATE TABLE t1(w, x, y, z, PRIMARY KEY(x, y), UNIQUE(z)); - } - sqlite3changebatch cb db - list [catch { cb add $c1 } msg] $msg -} {1 SQLITE_RANGE} - - - -finish_test DELETED ext/session/changebatchfault.test Index: ext/session/changebatchfault.test ================================================================== --- ext/session/changebatchfault.test +++ /dev/null @@ -1,42 +0,0 @@ -# 2011 Mar 21 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# The focus of this file is testing the session module. -# - -if {![info exists testdir]} { - set testdir [file join [file dirname [info script]] .. .. test] -} -source [file join [file dirname [info script]] session_common.tcl] -source $testdir/tester.tcl -ifcapable !session {finish_test; return} -set testprefix changebatchfault - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c PRIMARY KEY, UNIQUE(a, b)); - INSERT INTO t1 VALUES('a', 'a', 'a'); - INSERT INTO t1 VALUES('b', 'b', 'b'); -} - -set ::c1 [changeset_from_sql { delete from t1 where c='a' }] -set ::c2 [changeset_from_sql { insert into t1 values('c', 'c', 'c') }] - -do_faultsim_test 1 -faults oom-* -body { - sqlite3changebatch cb db - cb add $::c1 - cb add $::c2 -} -test { - faultsim_test_result {0 SQLITE_OK} {1 SQLITE_NOMEM} - catch { cb delete } -} - - -finish_test Index: ext/session/sessionH.test ================================================================== --- ext/session/sessionH.test +++ ext/session/sessionH.test @@ -27,11 +27,11 @@ } do_then_apply_sql { WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERe i<10000 ) - INSERT INTO t1 SELECT 'abcde', randomblob(18), i FROM s; + INSERT INTO t1 SELECT 'abcde', randomblob(16), i FROM s; } compare_db db db2 } {} #------------------------------------------------------------------------ DELETED ext/session/sqlite3changebatch.c Index: ext/session/sqlite3changebatch.c ================================================================== --- ext/session/sqlite3changebatch.c +++ /dev/null @@ -1,485 +0,0 @@ - -#if !defined(SQLITE_TEST) || (defined(SQLITE_ENABLE_SESSION) && defined(SQLITE_ENABLE_PREUPDATE_HOOK)) - -#include "sqlite3session.h" -#include "sqlite3changebatch.h" - -#include -#include - -typedef struct BatchTable BatchTable; -typedef struct BatchIndex BatchIndex; -typedef struct BatchIndexEntry BatchIndexEntry; -typedef struct BatchHash BatchHash; - -struct sqlite3_changebatch { - sqlite3 *db; /* Database handle used to read schema */ - BatchTable *pTab; /* First in linked list of tables */ - int iChangesetId; /* Current changeset id */ - int iNextIdxId; /* Next available index id */ - int nEntry; /* Number of entries in hash table */ - int nHash; /* Number of hash buckets */ - BatchIndexEntry **apHash; /* Array of hash buckets */ -}; - -struct BatchTable { - BatchIndex *pIdx; /* First in linked list of UNIQUE indexes */ - BatchTable *pNext; /* Next table */ - char zTab[1]; /* Table name */ -}; - -struct BatchIndex { - BatchIndex *pNext; /* Next index on same table */ - int iId; /* Index id (assigned internally) */ - int bPk; /* True for PK index */ - int nCol; /* Size of aiCol[] array */ - int *aiCol; /* Array of columns that make up index */ -}; - -struct BatchIndexEntry { - BatchIndexEntry *pNext; /* Next colliding hash table entry */ - int iChangesetId; /* Id of associated changeset */ - int iIdxId; /* Id of index this key is from */ - int szRecord; - char aRecord[1]; -}; - -/* -** Allocate and zero a block of nByte bytes. Must be freed using cbFree(). -*/ -static void *cbMalloc(int *pRc, int nByte){ - void *pRet; - - if( *pRc ){ - pRet = 0; - }else{ - pRet = sqlite3_malloc(nByte); - if( pRet ){ - memset(pRet, 0, nByte); - }else{ - *pRc = SQLITE_NOMEM; - } - } - - return pRet; -} - -/* -** Free an allocation made by cbMalloc(). -*/ -static void cbFree(void *p){ - sqlite3_free(p); -} - -/* -** Return the hash bucket that pEntry belongs in. -*/ -static int cbHash(sqlite3_changebatch *p, BatchIndexEntry *pEntry){ - unsigned int iHash = (unsigned int)pEntry->iIdxId; - unsigned char *pEnd = (unsigned char*)&pEntry->aRecord[pEntry->szRecord]; - unsigned char *pIter; - - for(pIter=(unsigned char*)pEntry->aRecord; pIternHash); -} - -/* -** Resize the hash table. -*/ -static int cbHashResize(sqlite3_changebatch *p){ - int rc = SQLITE_OK; - BatchIndexEntry **apNew; - int nNew = (p->nHash ? p->nHash*2 : 512); - int i; - - apNew = cbMalloc(&rc, sizeof(BatchIndexEntry*) * nNew); - if( rc==SQLITE_OK ){ - int nHash = p->nHash; - p->nHash = nNew; - for(i=0; iapHash[i])!=0 ){ - int iHash = cbHash(p, pEntry); - p->apHash[i] = pEntry->pNext; - pEntry->pNext = apNew[iHash]; - apNew[iHash] = pEntry; - } - } - - cbFree(p->apHash); - p->apHash = apNew; - } - - return rc; -} - - -/* -** Allocate a new sqlite3_changebatch object. -*/ -int sqlite3changebatch_new(sqlite3 *db, sqlite3_changebatch **pp){ - sqlite3_changebatch *pRet; - int rc = SQLITE_OK; - *pp = pRet = (sqlite3_changebatch*)cbMalloc(&rc, sizeof(sqlite3_changebatch)); - if( pRet ){ - pRet->db = db; - } - return rc; -} - -/* -** Add a BatchIndex entry for index zIdx to table pTab. -*/ -static int cbAddIndex( - sqlite3_changebatch *p, - BatchTable *pTab, - const char *zIdx, - int bPk -){ - int nCol = 0; - sqlite3_stmt *pIndexInfo = 0; - BatchIndex *pNew = 0; - int rc; - char *zIndexInfo; - - zIndexInfo = (char*)sqlite3_mprintf("PRAGMA main.index_info = %Q", zIdx); - if( zIndexInfo ){ - rc = sqlite3_prepare_v2(p->db, zIndexInfo, -1, &pIndexInfo, 0); - sqlite3_free(zIndexInfo); - }else{ - rc = SQLITE_NOMEM; - } - - if( rc==SQLITE_OK ){ - while( SQLITE_ROW==sqlite3_step(pIndexInfo) ){ nCol++; } - rc = sqlite3_reset(pIndexInfo); - } - - pNew = (BatchIndex*)cbMalloc(&rc, sizeof(BatchIndex) + sizeof(int) * nCol); - if( rc==SQLITE_OK ){ - pNew->nCol = nCol; - pNew->bPk = bPk; - pNew->aiCol = (int*)&pNew[1]; - pNew->iId = p->iNextIdxId++; - while( SQLITE_ROW==sqlite3_step(pIndexInfo) ){ - int i = sqlite3_column_int(pIndexInfo, 0); - int j = sqlite3_column_int(pIndexInfo, 1); - pNew->aiCol[i] = j; - } - rc = sqlite3_reset(pIndexInfo); - } - - if( rc==SQLITE_OK ){ - pNew->pNext = pTab->pIdx; - pTab->pIdx = pNew; - }else{ - cbFree(pNew); - } - sqlite3_finalize(pIndexInfo); - - return rc; -} - -/* -** Free the object passed as the first argument. -*/ -static void cbFreeTable(BatchTable *pTab){ - BatchIndex *pIdx; - BatchIndex *pIdxNext; - for(pIdx=pTab->pIdx; pIdx; pIdx=pIdxNext){ - pIdxNext = pIdx->pNext; - cbFree(pIdx); - } - cbFree(pTab); -} - -/* -** Find or create the BatchTable object named zTab. -*/ -static int cbFindTable( - sqlite3_changebatch *p, - const char *zTab, - BatchTable **ppTab -){ - BatchTable *pRet = 0; - int rc = SQLITE_OK; - - for(pRet=p->pTab; pRet; pRet=pRet->pNext){ - if( 0==sqlite3_stricmp(zTab, pRet->zTab) ) break; - } - - if( pRet==0 ){ - int nTab = strlen(zTab); - pRet = (BatchTable*)cbMalloc(&rc, nTab + sizeof(BatchTable)); - if( pRet ){ - sqlite3_stmt *pIndexList = 0; - char *zIndexList = 0; - int rc2; - memcpy(pRet->zTab, zTab, nTab); - - zIndexList = sqlite3_mprintf("PRAGMA main.index_list = %Q", zTab); - if( zIndexList==0 ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(p->db, zIndexList, -1, &pIndexList, 0); - sqlite3_free(zIndexList); - } - - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pIndexList) ){ - if( sqlite3_column_int(pIndexList, 2) ){ - const char *zIdx = (const char*)sqlite3_column_text(pIndexList, 1); - const char *zTyp = (const char*)sqlite3_column_text(pIndexList, 3); - rc = cbAddIndex(p, pRet, zIdx, (zTyp[0]=='p')); - } - } - rc2 = sqlite3_finalize(pIndexList); - if( rc==SQLITE_OK ) rc = rc2; - - if( rc==SQLITE_OK ){ - pRet->pNext = p->pTab; - p->pTab = pRet; - }else{ - cbFreeTable(pRet); - pRet = 0; - } - } - } - - *ppTab = pRet; - return rc; -} - -/* -** Extract value iVal from the changeset iterator passed as the first -** argument. Set *ppVal to point to the value before returning. -** -** This function attempts to extract the value using function xVal -** (which is always either sqlite3changeset_new or sqlite3changeset_old). -** If the call returns SQLITE_OK but does not supply an sqlite3_value* -** pointer, an attempt to extract the value is made using the xFallback -** function. -*/ -static int cbGetChangesetValue( - sqlite3_changeset_iter *pIter, - int (*xVal)(sqlite3_changeset_iter*,int,sqlite3_value**), - int (*xFallback)(sqlite3_changeset_iter*,int,sqlite3_value**), - int iVal, - sqlite3_value **ppVal -){ - int rc = xVal(pIter, iVal, ppVal); - if( rc==SQLITE_OK && *ppVal==0 && xFallback ){ - rc = xFallback(pIter, iVal, ppVal); - } - return rc; -} - -static int cbAddToHash( - sqlite3_changebatch *p, - sqlite3_changeset_iter *pIter, - BatchIndex *pIdx, - int (*xVal)(sqlite3_changeset_iter*,int,sqlite3_value**), - int (*xFallback)(sqlite3_changeset_iter*,int,sqlite3_value**), - int *pbConf -){ - BatchIndexEntry *pNew; - int sz = pIdx->nCol; - int i; - int iOut = 0; - int rc = SQLITE_OK; - - for(i=0; rc==SQLITE_OK && inCol; i++){ - sqlite3_value *pVal; - rc = cbGetChangesetValue(pIter, xVal, xFallback, pIdx->aiCol[i], &pVal); - if( rc==SQLITE_OK ){ - int eType = 0; - if( pVal ) eType = sqlite3_value_type(pVal); - switch( eType ){ - case 0: - case SQLITE_NULL: - return SQLITE_OK; - - case SQLITE_INTEGER: - sz += 8; - break; - case SQLITE_FLOAT: - sz += 8; - break; - - default: - assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); - sz += sqlite3_value_bytes(pVal); - break; - } - } - } - - pNew = cbMalloc(&rc, sizeof(BatchIndexEntry) + sz); - if( pNew ){ - pNew->iChangesetId = p->iChangesetId; - pNew->iIdxId = pIdx->iId; - pNew->szRecord = sz; - - for(i=0; inCol; i++){ - int eType; - sqlite3_value *pVal; - rc = cbGetChangesetValue(pIter, xVal, xFallback, pIdx->aiCol[i], &pVal); - if( rc!=SQLITE_OK ) break; /* coverage: condition is never true */ - eType = sqlite3_value_type(pVal); - pNew->aRecord[iOut++] = eType; - switch( eType ){ - case SQLITE_INTEGER: { - sqlite3_int64 i64 = sqlite3_value_int64(pVal); - memcpy(&pNew->aRecord[iOut], &i64, 8); - iOut += 8; - break; - } - case SQLITE_FLOAT: { - double d64 = sqlite3_value_double(pVal); - memcpy(&pNew->aRecord[iOut], &d64, sizeof(double)); - iOut += sizeof(double); - break; - } - - default: { - int nByte = sqlite3_value_bytes(pVal); - const char *z = (const char*)sqlite3_value_blob(pVal); - memcpy(&pNew->aRecord[iOut], z, nByte); - iOut += nByte; - break; - } - } - } - } - - if( rc==SQLITE_OK && p->nEntry>=(p->nHash/2) ){ - rc = cbHashResize(p); - } - - if( rc==SQLITE_OK ){ - BatchIndexEntry *pIter; - int iHash = cbHash(p, pNew); - - assert( iHash>=0 && iHashnHash ); - for(pIter=p->apHash[iHash]; pIter; pIter=pIter->pNext){ - if( pNew->szRecord==pIter->szRecord - && 0==memcmp(pNew->aRecord, pIter->aRecord, pNew->szRecord) - ){ - if( pNew->iChangesetId!=pIter->iChangesetId ){ - *pbConf = 1; - } - cbFree(pNew); - pNew = 0; - break; - } - } - - if( pNew ){ - pNew->pNext = p->apHash[iHash]; - p->apHash[iHash] = pNew; - p->nEntry++; - } - }else{ - cbFree(pNew); - } - - return rc; -} - - -/* -** Add a changeset to the current batch. -*/ -int sqlite3changebatch_add(sqlite3_changebatch *p, void *pBuf, int nBuf){ - sqlite3_changeset_iter *pIter; /* Iterator opened on pBuf/nBuf */ - int rc; /* Return code */ - int bConf = 0; /* Conflict was detected */ - - rc = sqlite3changeset_start(&pIter, nBuf, pBuf); - if( rc==SQLITE_OK ){ - int rc2; - for(rc2 = sqlite3changeset_next(pIter); - rc2==SQLITE_ROW; - rc2 = sqlite3changeset_next(pIter) - ){ - BatchTable *pTab; - BatchIndex *pIdx; - const char *zTab; /* Table this change applies to */ - int nCol; /* Number of columns in table */ - int op; /* UPDATE, INSERT or DELETE */ - - sqlite3changeset_op(pIter, &zTab, &nCol, &op, 0); - assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE ); - - rc = cbFindTable(p, zTab, &pTab); - assert( pTab || rc!=SQLITE_OK ); - if( pTab ){ - for(pIdx=pTab->pIdx; pIdx && rc==SQLITE_OK; pIdx=pIdx->pNext){ - if( op==SQLITE_UPDATE && pIdx->bPk ) continue; - if( op==SQLITE_UPDATE || op==SQLITE_DELETE ){ - rc = cbAddToHash(p, pIter, pIdx, sqlite3changeset_old, 0, &bConf); - } - if( op==SQLITE_UPDATE || op==SQLITE_INSERT ){ - rc = cbAddToHash(p, pIter, pIdx, - sqlite3changeset_new, sqlite3changeset_old, &bConf - ); - } - } - } - if( rc!=SQLITE_OK ) break; - } - - rc2 = sqlite3changeset_finalize(pIter); - if( rc==SQLITE_OK ) rc = rc2; - } - - if( rc==SQLITE_OK && bConf ){ - rc = SQLITE_CONSTRAINT; - } - p->iChangesetId++; - return rc; -} - -/* -** Zero an existing changebatch object. -*/ -void sqlite3changebatch_zero(sqlite3_changebatch *p){ - int i; - for(i=0; inHash; i++){ - BatchIndexEntry *pEntry; - BatchIndexEntry *pNext; - for(pEntry=p->apHash[i]; pEntry; pEntry=pNext){ - pNext = pEntry->pNext; - cbFree(pEntry); - } - } - cbFree(p->apHash); - p->nHash = 0; - p->apHash = 0; -} - -/* -** Delete a changebatch object. -*/ -void sqlite3changebatch_delete(sqlite3_changebatch *p){ - BatchTable *pTab; - BatchTable *pTabNext; - - sqlite3changebatch_zero(p); - for(pTab=p->pTab; pTab; pTab=pTabNext){ - pTabNext = pTab->pNext; - cbFreeTable(pTab); - } - cbFree(p); -} - -/* -** Return the db handle. -*/ -sqlite3 *sqlite3changebatch_db(sqlite3_changebatch *p){ - return p->db; -} - -#endif /* SQLITE_ENABLE_SESSION && SQLITE_ENABLE_PREUPDATE_HOOK */ DELETED ext/session/sqlite3changebatch.h Index: ext/session/sqlite3changebatch.h ================================================================== --- ext/session/sqlite3changebatch.h +++ /dev/null @@ -1,82 +0,0 @@ - -#if !defined(SQLITECHANGEBATCH_H_) -#define SQLITECHANGEBATCH_H_ 1 - -typedef struct sqlite3_changebatch sqlite3_changebatch; - -/* -** Create a new changebatch object for detecting conflicts between -** changesets associated with a schema equivalent to that of the "main" -** database of the open database handle db passed as the first -** parameter. It is the responsibility of the caller to ensure that -** the database handle is not closed until after the changebatch -** object has been deleted. -** -** A changebatch object is used to detect batches of non-conflicting -** changesets. Changesets that do not conflict may be applied to the -** target database in any order without affecting the final state of -** the database. -** -** The changebatch object only works reliably if PRIMARY KEY and UNIQUE -** constraints on tables affected by the changesets use collation -** sequences that are equivalent to built-in collation sequence -** BINARY for the == operation. -** -** If successful, SQLITE_OK is returned and (*pp) set to point to -** the new changebatch object. If an error occurs, an SQLite error -** code is returned and the final value of (*pp) is undefined. -*/ -int sqlite3changebatch_new(sqlite3 *db, sqlite3_changebatch **pp); - -/* -** Argument p points to a buffer containing a changeset n bytes in -** size. Assuming no error occurs, this function returns SQLITE_OK -** if the changeset does not conflict with any changeset passed -** to an sqlite3changebatch_add() call made on the same -** sqlite3_changebatch* handle since the most recent call to -** sqlite3changebatch_zero(). If the changeset does conflict with -** an earlier such changeset, SQLITE_CONSTRAINT is returned. Or, -** if an error occurs, some other SQLite error code may be returned. -** -** One changeset is said to conflict with another if -** either: -** -** * the two changesets contain operations (INSERT, UPDATE or -** DELETE) on the same row, identified by primary key, or -** -** * the two changesets contain operations (INSERT, UPDATE or -** DELETE) on rows with identical values in any combination -** of fields constrained by a UNIQUE constraint. -** -** Even if this function returns SQLITE_CONFLICT, the current -** changeset is added to the internal data structures - so future -** calls to this function may conflict with it. If this function -** returns any result code other than SQLITE_OK or SQLITE_CONFLICT, -** the result of any future call to sqlite3changebatch_add() is -** undefined. -** -** Only changesets may be passed to this function. Passing a -** patchset to this function results in an SQLITE_MISUSE error. -*/ -int sqlite3changebatch_add(sqlite3_changebatch*, void *p, int n); - -/* -** Zero a changebatch object. This causes the records of all earlier -** calls to sqlite3changebatch_add() to be discarded. -*/ -void sqlite3changebatch_zero(sqlite3_changebatch*); - -/* -** Return a copy of the first argument passed to the sqlite3changebatch_new() -** call used to create the changebatch object passed as the only argument -** to this function. -*/ -sqlite3 *sqlite3changebatch_db(sqlite3_changebatch*); - -/* -** Delete a changebatch object. -*/ -void sqlite3changebatch_delete(sqlite3_changebatch*); - -#endif /* !defined(SQLITECHANGEBATCH_H_) */ - Index: ext/session/sqlite3session.c ================================================================== --- ext/session/sqlite3session.c +++ ext/session/sqlite3session.c @@ -23,17 +23,10 @@ # else # define SESSIONS_STRM_CHUNK_SIZE 1024 # endif #endif -/* -** The three different types of changesets generated. -*/ -#define SESSIONS_PATCHSET 0 -#define SESSIONS_CHANGESET 1 -#define SESSIONS_FULLCHANGESET 2 - static int sessions_strm_chunk_size = SESSIONS_STRM_CHUNK_SIZE; typedef struct SessionHook SessionHook; struct SessionHook { void *pCtx; @@ -2238,11 +2231,11 @@ ** original values of any fields that have been modified. The new.* record ** contains the new values of only those fields that have been modified. */ static int sessionAppendUpdate( SessionBuffer *pBuf, /* Buffer to append to */ - int ePatchset, /* True for "patchset", 0 for "changeset" */ + int bPatchset, /* True for "patchset", 0 for "changeset" */ sqlite3_stmt *pStmt, /* Statement handle pointing at new row */ SessionChange *p, /* Object containing old values */ u8 *abPK /* Boolean array - true for PK columns */ ){ int rc = SQLITE_OK; @@ -2302,21 +2295,21 @@ /* If at least one field has been modified, this is not a no-op. */ if( bChanged ) bNoop = 0; /* Add a field to the old.* record. This is omitted if this modules is ** currently generating a patchset. */ - if( ePatchset!=SESSIONS_PATCHSET ){ - if( ePatchset==SESSIONS_FULLCHANGESET || bChanged || abPK[i] ){ + if( bPatchset==0 ){ + if( bChanged || abPK[i] ){ sessionAppendBlob(pBuf, pCsr, nAdvance, &rc); }else{ sessionAppendByte(pBuf, 0, &rc); } } /* Add a field to the new.* record. Or the only record if currently ** generating a patchset. */ - if( bChanged || (ePatchset==SESSIONS_PATCHSET && abPK[i]) ){ + if( bChanged || (bPatchset && abPK[i]) ){ sessionAppendCol(&buf2, pStmt, i, &rc); }else{ sessionAppendByte(&buf2, 0, &rc); } @@ -2338,21 +2331,21 @@ ** the changeset format if argument bPatchset is zero, or the patchset ** format otherwise. */ static int sessionAppendDelete( SessionBuffer *pBuf, /* Buffer to append to */ - int eChangeset, /* One of SESSIONS_CHANGESET etc. */ + int bPatchset, /* True for "patchset", 0 for "changeset" */ SessionChange *p, /* Object containing old values */ int nCol, /* Number of columns in table */ u8 *abPK /* Boolean array - true for PK columns */ ){ int rc = SQLITE_OK; sessionAppendByte(pBuf, SQLITE_DELETE, &rc); sessionAppendByte(pBuf, p->bIndirect, &rc); - if( eChangeset!=SESSIONS_PATCHSET ){ + if( bPatchset==0 ){ sessionAppendBlob(pBuf, p->aRecord, p->nRecord, &rc); }else{ int i; u8 *a = p->aRecord; for(i=0; inCol, pRc); sessionAppendBlob(pBuf, pTab->abPK, pTab->nCol, pRc); sessionAppendBlob(pBuf, (u8 *)pTab->zName, (int)strlen(pTab->zName)+1, pRc); } @@ -2544,11 +2537,11 @@ ** occurs, an SQLite error code is returned and both output variables set ** to 0. */ static int sessionGenerateChangeset( sqlite3_session *pSession, /* Session object */ - int ePatchset, /* One of SESSIONS_CHANGESET etc. */ + int bPatchset, /* True for patchset, false for changeset */ int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut, /* First argument for xOutput */ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ ){ @@ -2591,11 +2584,11 @@ if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ rc = SQLITE_SCHEMA; } /* Write a table header */ - sessionAppendTableHdr(&buf, ePatchset, pTab, &rc); + sessionAppendTableHdr(&buf, bPatchset, pTab, &rc); /* Build and compile a statement to execute: */ if( rc==SQLITE_OK ){ rc = sessionSelectStmt( db, pSession->zDb, zName, nCol, azCol, abPK, &pSel); @@ -2616,14 +2609,14 @@ for(iCol=0; iColop!=SQLITE_INSERT ){ - rc = sessionAppendDelete(&buf, ePatchset, p, nCol, abPK); + rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK); } if( rc==SQLITE_OK ){ rc = sqlite3_reset(pSel); } @@ -2679,12 +2672,11 @@ void **ppChangeset /* OUT: Buffer containing changeset */ ){ int rc; if( pnChangeset==0 || ppChangeset==0 ) return SQLITE_MISUSE; - rc = sessionGenerateChangeset( - pSession, SESSIONS_CHANGESET, 0, 0, pnChangeset, ppChangeset); + rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); assert( rc || pnChangeset==0 || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize ); return rc; } @@ -2696,12 +2688,11 @@ sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ){ if( xOutput==0 ) return SQLITE_MISUSE; - return sessionGenerateChangeset( - pSession, SESSIONS_CHANGESET, xOutput, pOut, 0, 0); + return sessionGenerateChangeset(pSession, 0, xOutput, pOut, 0, 0); } /* ** Streaming version of sqlite3session_patchset(). */ @@ -2709,12 +2700,11 @@ sqlite3_session *pSession, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ){ if( xOutput==0 ) return SQLITE_MISUSE; - return sessionGenerateChangeset( - pSession, SESSIONS_PATCHSET, xOutput, pOut, 0, 0); + return sessionGenerateChangeset(pSession, 1, xOutput, pOut, 0, 0); } /* ** Obtain a patchset object containing all changes recorded by the ** session object passed as the first argument. @@ -2726,23 +2716,12 @@ sqlite3_session *pSession, /* Session object */ int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ void **ppPatchset /* OUT: Buffer containing changeset */ ){ if( pnPatchset==0 || ppPatchset==0 ) return SQLITE_MISUSE; - return sessionGenerateChangeset( - pSession, SESSIONS_PATCHSET, 0, 0, pnPatchset, ppPatchset); -} - -int sqlite3session_fullchangeset( - sqlite3_session *pSession, /* Session object */ - int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ - void **ppChangeset /* OUT: Buffer containing changeset */ -){ - return sessionGenerateChangeset( - pSession, SESSIONS_FULLCHANGESET, 0, 0, pnChangeset, ppChangeset); -} - + return sessionGenerateChangeset(pSession, 1, 0, 0, pnPatchset, ppPatchset); +} /* ** Enable or disable the session object passed as the first argument. */ int sqlite3session_enable(sqlite3_session *pSession, int bEnable){ @@ -3346,10 +3325,26 @@ } }else if( p->bInvert ){ if( p->op==SQLITE_INSERT ) p->op = SQLITE_DELETE; else if( p->op==SQLITE_DELETE ) p->op = SQLITE_INSERT; } + + /* If this is an UPDATE that is part of a changeset, then check that + ** there are no fields in the old.* record that are not (a) PK fields, + ** or (b) also present in the new.* record. + ** + ** Such records are technically corrupt, but the rebaser was at one + ** point generating them. Under most circumstances this is benign, but + ** can cause spurious SQLITE_RANGE errors when applying the changeset. */ + if( p->bPatchset==0 && p->op==SQLITE_UPDATE){ + for(i=0; inCol; i++){ + if( p->abPK[i]==0 && p->apValue[i+p->nCol]==0 ){ + sqlite3ValueFree(p->apValue[i]); + p->apValue[i] = 0; + } + } + } } return SQLITE_ROW; } @@ -5282,15 +5277,14 @@ /* Create the serialized output changeset based on the contents of the ** hash tables attached to the SessionTable objects in list p->pList. */ for(pTab=pGrp->pList; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ - int eChangeset = pGrp->bPatch ? SESSIONS_PATCHSET : SESSIONS_CHANGESET; int i; if( pTab->nEntry==0 ) continue; - sessionAppendTableHdr(&buf, eChangeset, pTab, &rc); + sessionAppendTableHdr(&buf, pGrp->bPatch, pTab, &rc); for(i=0; inChange; i++){ SessionChange *p; for(p=pTab->apChange[i]; p; p=p->pNext){ sessionAppendByte(&buf, p->op, &rc); sessionAppendByte(&buf, p->bIndirect, &rc); @@ -5543,11 +5537,11 @@ int n2 = sessionSerialLen(a2); if( pIter->abPK[i] || a2[0]==0 ){ if( !pIter->abPK[i] && a1[0] ) bData = 1; memcpy(pOut, a1, n1); pOut += n1; - }else if( a2[0]!=0xFF ){ + }else if( a2[0]!=0xFF && a1[0] ){ bData = 1; memcpy(pOut, a2, n2); pOut += n2; }else{ *pOut++ = '\0'; Index: ext/session/sqlite3session.h ================================================================== --- ext/session/sqlite3session.h +++ ext/session/sqlite3session.h @@ -348,36 +348,10 @@ ** Or, if one field of a row is updated while a session is disabled, and ** another field of the same row is updated while the session is enabled, the ** resulting changeset will contain an UPDATE change that updates both fields. */ int sqlite3session_changeset( - sqlite3_session *pSession, /* Session object */ - int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ - void **ppChangeset /* OUT: Buffer containing changeset */ -); - -/* -** CAPI3REF: Generate A Full Changeset From A Session Object -** -** This function is similar to sqlite3session_changeset(), except that for -** each row affected by an UPDATE statement, all old.* values are recorded -** as part of the changeset, not just those modified. -*/ -int sqlite3session_fullchangeset( - sqlite3_session *pSession, /* Session object */ - int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ - void **ppChangeset /* OUT: Buffer containing changeset */ -); - -/* -** CAPI3REF: Generate A Full Changeset From A Session Object -** -** This function is similar to sqlite3session_changeset(), except that for -** each row affected by an UPDATE statement, all old.* values are recorded -** as part of the changeset, not just those modified. -*/ -int sqlite3session_fullchangeset( sqlite3_session *pSession, /* Session object */ int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ ); Index: ext/session/test_session.c ================================================================== --- ext/session/test_session.c +++ ext/session/test_session.c @@ -96,10 +96,23 @@ return rc; } /************************************************************************/ + +#ifdef SQLITE_DEBUG +static int sqlite3_test_changeset(int, void *, char **); +static void assert_changeset_is_ok(int n, void *p){ + int rc = 0; + char *z = 0; + rc = sqlite3_test_changeset(n, p, &z); + assert( z==0 ); +} +#else +# define assert_changeset_is_ok(n,p) +#endif + /* ** Tclcmd: sql_exec_changeset DB SQL */ static int SQLITE_TCLAPI test_sql_exec_changeset( void * clientData, @@ -125,10 +138,11 @@ Tcl_ResetResult(interp); Tcl_AppendResult(interp, "error in sql_exec_changeset()", 0); return TCL_ERROR; } + assert_changeset_is_ok(nChangeset, pChangeset); Tcl_SetObjResult(interp, Tcl_NewByteArrayObj(pChangeset, nChangeset)); sqlite3_free(pChangeset); return TCL_OK; } @@ -219,11 +233,10 @@ ** $session delete ** $session enable BOOL ** $session indirect INTEGER ** $session patchset ** $session table_filter SCRIPT -** $session fullchangeset */ static int SQLITE_TCLAPI test_session_cmd( void *clientData, Tcl_Interp *interp, int objc, @@ -233,24 +246,24 @@ sqlite3_session *pSession = p->pSession; static struct SessionSubcmd { const char *zSub; int nArg; const char *zMsg; + int iSub; } aSub[] = { - { "attach", 1, "TABLE" }, /* 0 */ - { "changeset", 0, "" }, /* 1 */ - { "delete", 0, "" }, /* 2 */ - { "enable", 1, "BOOL" }, /* 3 */ - { "indirect", 1, "BOOL" }, /* 4 */ - { "isempty", 0, "" }, /* 5 */ - { "table_filter", 1, "SCRIPT" }, /* 6 */ + { "attach", 1, "TABLE", }, /* 0 */ + { "changeset", 0, "", }, /* 1 */ + { "delete", 0, "", }, /* 2 */ + { "enable", 1, "BOOL", }, /* 3 */ + { "indirect", 1, "BOOL", }, /* 4 */ + { "isempty", 0, "", }, /* 5 */ + { "table_filter", 1, "SCRIPT", }, /* 6 */ { "patchset", 0, "", }, /* 7 */ - { "diff", 2, "FROMDB TBL" }, /* 8 */ - { "fullchangeset",0, "" }, /* 9 */ - { "memory_used", 0, "", }, /* 10 */ - { "changeset_size", 0, "", }, /* 11 */ - { "object_config_size", 1, "INTEGER", }, /* 12 */ + { "diff", 2, "FROMDB TBL", }, /* 8 */ + { "memory_used", 0, "", }, /* 9 */ + { "changeset_size", 0, "", }, /* 10 */ + { "object_config_size", 1, "INTEGER", }, /* 11 */ { 0 } }; int iSub; int rc; @@ -276,40 +289,37 @@ return test_session_error(interp, rc, 0); } break; } - case 9: /* fullchangeset */ case 7: /* patchset */ case 1: { /* changeset */ TestSessionsBlob o = {0, 0}; - if( iSub!=9 && test_tcl_integer(interp, SESSION_STREAM_TCL_VAR) ){ + if( test_tcl_integer(interp, SESSION_STREAM_TCL_VAR) ){ void *pCtx = (void*)&o; if( iSub==7 ){ rc = sqlite3session_patchset_strm(pSession, testStreamOutput, pCtx); }else{ rc = sqlite3session_changeset_strm(pSession, testStreamOutput, pCtx); } }else{ if( iSub==7 ){ rc = sqlite3session_patchset(pSession, &o.n, &o.p); - }else if( iSub==9 ){ - rc = sqlite3session_fullchangeset(pSession, &o.n, &o.p); }else{ rc = sqlite3session_changeset(pSession, &o.n, &o.p); } } if( rc==SQLITE_OK ){ + assert_changeset_is_ok(o.n, o.p); Tcl_SetObjResult(interp, Tcl_NewByteArrayObj(o.p, o.n)); } sqlite3_free(o.p); if( rc!=SQLITE_OK ){ return test_session_error(interp, rc, 0); } break; } - case 2: /* delete */ Tcl_DeleteCommand(interp, Tcl_GetString(objv[0])); break; @@ -357,22 +367,22 @@ return test_session_error(interp, rc, zErr); } break; } - case 10: { /* memory_used */ + case 9: { /* memory_used */ sqlite3_int64 nMalloc = sqlite3session_memory_used(pSession); Tcl_SetObjResult(interp, Tcl_NewWideIntObj(nMalloc)); break; } - case 11: { + case 10: { sqlite3_int64 nSize = sqlite3session_changeset_size(pSession); Tcl_SetObjResult(interp, Tcl_NewWideIntObj(nSize)); break; } - case 12: { + case 11: { int rc; int iArg; if( Tcl_GetIntFromObj(interp, objv[2], &iArg) ){ return TCL_ERROR; } @@ -524,11 +534,11 @@ Tcl_BackgroundError(interp); } Tcl_DecrRefCount(pEval); return res; -} +} static int test_conflict_handler( void *pCtx, /* Pointer to TestConflictHandler structure */ int eConf, /* DATA, MISSING, CONFLICT, CONSTRAINT */ sqlite3_changeset_iter *pIter /* Handle describing change and conflict */ @@ -956,10 +966,11 @@ rc = sqlite3changeset_invert(sIn.nData, sIn.aData, &sOut.n, &sOut.p); } if( rc!=SQLITE_OK ){ rc = test_session_error(interp, rc, 0); }else{ + assert_changeset_is_ok(sOut.n, sOut.p); Tcl_SetObjResult(interp,Tcl_NewByteArrayObj((unsigned char*)sOut.p,sOut.n)); } sqlite3_free(sOut.p); return rc; } @@ -1004,10 +1015,11 @@ } if( rc!=SQLITE_OK ){ rc = test_session_error(interp, rc, 0); }else{ + assert_changeset_is_ok(sOut.n, sOut.p); Tcl_SetObjResult(interp,Tcl_NewByteArrayObj((unsigned char*)sOut.p,sOut.n)); } sqlite3_free(sOut.p); return rc; } @@ -1166,131 +1178,10 @@ } return TCL_OK; } -#include "sqlite3changebatch.h" - -typedef struct TestChangebatch TestChangebatch; -struct TestChangebatch { - sqlite3_changebatch *pChangebatch; -}; - -/* -** Tclcmd: $changebatch add BLOB -** $changebatch zero -** $changebatch delete -*/ -static int SQLITE_TCLAPI test_changebatch_cmd( - void *clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - TestChangebatch *p = (TestChangebatch*)clientData; - sqlite3_changebatch *pChangebatch = p->pChangebatch; - struct SessionSubcmd { - const char *zSub; - int nArg; - const char *zMsg; - int iSub; - } aSub[] = { - { "add", 1, "CHANGESET", }, /* 0 */ - { "zero", 0, "", }, /* 1 */ - { "delete", 0, "", }, /* 2 */ - { 0 } - }; - int iSub; - int rc; - - if( objc<2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "SUBCOMMAND ..."); - return TCL_ERROR; - } - rc = Tcl_GetIndexFromObjStruct(interp, - objv[1], aSub, sizeof(aSub[0]), "sub-command", 0, &iSub - ); - if( rc!=TCL_OK ) return rc; - if( objc!=2+aSub[iSub].nArg ){ - Tcl_WrongNumArgs(interp, 2, objv, aSub[iSub].zMsg); - return TCL_ERROR; - } - - switch( iSub ){ - case 0: { /* add */ - int nArg; - unsigned char *pArg = Tcl_GetByteArrayFromObj(objv[2], &nArg); - rc = sqlite3changebatch_add(pChangebatch, pArg, nArg); - if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){ - return test_session_error(interp, rc, 0); - }else{ - extern const char *sqlite3ErrName(int); - Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); - } - break; - } - - case 1: { /* zero */ - sqlite3changebatch_zero(pChangebatch); - break; - } - - case 2: /* delete */ - Tcl_DeleteCommand(interp, Tcl_GetString(objv[0])); - break; - } - - return TCL_OK; -} - -static void SQLITE_TCLAPI test_changebatch_del(void *clientData){ - TestChangebatch *p = (TestChangebatch*)clientData; - sqlite3changebatch_delete(p->pChangebatch); - ckfree((char*)p); -} - -/* -** Tclcmd: sqlite3changebatch CMD DB-HANDLE -*/ -static int SQLITE_TCLAPI test_sqlite3changebatch( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - sqlite3 *db; - Tcl_CmdInfo info; - int rc; /* sqlite3session_create() return code */ - TestChangebatch *p; /* New wrapper object */ - - if( objc!=3 ){ - Tcl_WrongNumArgs(interp, 1, objv, "CMD DB-HANDLE"); - return TCL_ERROR; - } - - if( 0==Tcl_GetCommandInfo(interp, Tcl_GetString(objv[2]), &info) ){ - Tcl_AppendResult(interp, "no such handle: ", Tcl_GetString(objv[2]), 0); - return TCL_ERROR; - } - db = *(sqlite3 **)info.objClientData; - - p = (TestChangebatch*)ckalloc(sizeof(TestChangebatch)); - memset(p, 0, sizeof(TestChangebatch)); - rc = sqlite3changebatch_new(db, &p->pChangebatch); - if( rc!=SQLITE_OK ){ - ckfree((char*)p); - return test_session_error(interp, rc, 0); - } - - Tcl_CreateObjCommand( - interp, Tcl_GetString(objv[1]), test_changebatch_cmd, (ClientData)p, - test_changebatch_del - ); - Tcl_SetObjResult(interp, objv[1]); - return TCL_OK; -} - /* ** tclcmd: CMD configure REBASE-BLOB ** tclcmd: CMD rebase CHANGESET ** tclcmd: CMD delete */ @@ -1360,10 +1251,11 @@ }else{ rc = sqlite3rebaser_rebase(p, sStr.nData, sStr.aData, &sOut.n, &sOut.p); } if( rc==SQLITE_OK ){ + assert_changeset_is_ok(sOut.n, sOut.p); Tcl_SetObjResult(interp, Tcl_NewByteArrayObj(sOut.p, sOut.n)); } sqlite3_free(sOut.p); break; } @@ -1405,10 +1297,103 @@ (ClientData)pNew, test_rebaser_del ); Tcl_SetObjResult(interp, objv[1]); return TCL_OK; } + +/* +** +*/ +static int sqlite3_test_changeset( + int nChangeset, + void *pChangeset, + char **pzErr +){ + sqlite3_changeset_iter *pIter = 0; + char *zErr = 0; + int rc = SQLITE_OK; + int bPatch = (nChangeset>0 && ((char*)pChangeset)[0]=='P'); + + rc = sqlite3changeset_start(&pIter, nChangeset, pChangeset); + if( rc==SQLITE_OK ){ + int rc2; + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3changeset_next(pIter) ){ + unsigned char *aPk = 0; + int nCol = 0; + int op = 0; + const char *zTab = 0; + + sqlite3changeset_pk(pIter, &aPk, &nCol); + sqlite3changeset_op(pIter, &zTab, &nCol, &op, 0); + + if( op==SQLITE_UPDATE ){ + int iCol; + for(iCol=0; iColzCmd, p->xProc, 0, 0); } - - Tcl_CreateObjCommand( - interp, "sqlite3changebatch", test_sqlite3changebatch, 0, 0 - ); return TCL_OK; } #endif /* SQLITE_TEST && SQLITE_SESSION && SQLITE_PREUPDATE_HOOK */ DELETED ext/wasm/EXPORTED_RUNTIME_METHODS.fiddle Index: ext/wasm/EXPORTED_RUNTIME_METHODS.fiddle ================================================================== --- ext/wasm/EXPORTED_RUNTIME_METHODS.fiddle +++ /dev/null @@ -1,14 +0,0 @@ -FS -addFunction -allocateUTF8OnStack -ccall -cwrap -getValue -intArrayFromString -lengthBytesUTF8 -removeFunction -setValue -stackAlloc -stackRestore -stackSave -stringToUTF8Array Index: ext/wasm/GNUmakefile ================================================================== --- ext/wasm/GNUmakefile +++ ext/wasm/GNUmakefile @@ -1,39 +1,116 @@ -# This GNU makefile exists primarily to simplify/speed up development -# of the sqlite3 WASM components. It is not part of the canonical -# build process. -# -# Maintenance notes: the fiddle build is currently performed in the -# top-level ../../Makefile.in. It may be moved into this file at some -# point, as GNU Make has been deemed acceptable for the WASM-related -# components (whereas POSIX Make is required for the more conventional -# components). -SHELL := $(shell which bash 2>/dev/null) -all: - -.PHONY: fiddle -ifneq (,$(wildcard /home/stephan)) - fiddle_opt ?= -O0 -else - fiddle_opt = -Os -endif -fiddle: - $(MAKE) -C ../.. fiddle -e emcc_opt=$(fiddle_opt) - -clean: - $(MAKE) -C ../../ clean-fiddle - -rm -f $(CLEAN_FILES) - +####################################################################### +# This GNU makefile drives the build of the sqlite3 WASM +# components. It is not part of the canonical build process. +# +# This build assumes a Linux platform and is not intended for +# general-purpose client-level use, except for creating builds with +# custom configurations. It is primarily intended for the sqlite +# project's own development of the JS/WASM components. +# +# Primary targets: +# +# default, all = build in dev mode +# +# o0, o1, o2, o3, os, oz = full clean/rebuild with the -Ox level indicated +# by the target name. Rebuild is necessary for all components to get +# the desired optimization level. +# +# dist = create end user deliverables. Add dist.build=oX to build +# with a specific optimization level, where oX is one of the +# above-listed o? target names. +# +# clean = clean up +######################################################################## +SHELL := $(shell which bash 2>/dev/null) MAKEFILE := $(lastword $(MAKEFILE_LIST)) +CLEAN_FILES := +DISTCLEAN_FILES := ./--dummy-- +default: all +release: oz + +# Emscripten SDK home dir and related binaries... +EMSDK_HOME ?= $(word 1,$(wildcard $(HOME)/emsdk $(HOME)/src/emsdk)) +emcc.bin ?= $(word 1,$(wildcard $(EMSDK_HOME)/upstream/emscripten/emcc) $(shell which emcc)) +ifeq (,$(emcc.bin)) + $(error Cannot find emcc.) +endif + +wasm-strip ?= $(shell which wasm-strip 2>/dev/null) +ifeq (,$(filter clean,$(MAKECMDGOALS))) +ifeq (,$(wasm-strip)) + $(info WARNING: *******************************************************************) + $(info WARNING: builds using -O2/-O3/-Os/-Oz will minify WASM-exported names,) + $(info WARNING: breaking _All The Things_. The workaround for that is to build) + $(info WARNING: with -g3 (which explodes the file size) and then strip the debug) + $(info WARNING: info after compilation, using wasm-strip, to shrink the wasm file.) + $(info WARNING: wasm-strip was not found in the PATH so we cannot strip those.) + $(info WARNING: If this build uses any optimization level higher than -O1 then) + $(info WARNING: the ***resulting JS code WILL NOT BE USABLE***.) + $(info WARNING: wasm-strip is part of the wabt package:) + $(info WARNING: https://github.com/WebAssembly/wabt) + $(info WARNING: on Ubuntu-like systems it can be installed with:) + $(info WARNING: sudo apt install wabt) + $(info WARNING: *******************************************************************) +endif +endif # 'make clean' check + +ifeq (,$(wasm-strip)) + maybe-wasm-strip = echo "not wasm-stripping" +else + maybe-wasm-strip = $(wasm-strip) +endif + dir.top := ../.. -# Reminder: some Emscripten flags require absolute paths -dir.wasm := $(patsubst %/,%,$(dir $(abspath $(MAKEFILE)))) +# Reminder: some Emscripten flags require absolute paths but we want +# relative paths for most stuff simply to reduce noise. The +# $(abspath...) GNU make function can transform relative paths to +# absolute. +dir.wasm := $(patsubst %/,%,$(dir $(MAKEFILE))) dir.api := api dir.jacc := jaccwabyt dir.common := common -CLEAN_FILES := *~ $(dir.jacc)/*~ $(dir.api)/*~ $(dir.common)/*~ +dir.fiddle := fiddle +dir.tool := $(dir.top)/tool +######################################################################## +# dir.dout = output dir for deliverables. +# +# MAINTENANCE REMINDER: the output .js and .wasm files of emcc must be +# in _this_ dir, rather than a subdir, or else parts of the generated +# code get confused and cannot load property. Specifically, when X.js +# loads X.wasm, whether or not X.js uses the correct path for X.wasm +# depends on how it's loaded: an HTML script tag will resolve it +# intuitively, whereas a Worker's call to importScripts() will not. +# That's a fundamental incompatibility with how URL resolution in +# JS happens between those two contexts. See: +# +# https://zzz.buzz/2017/03/14/relative-uris-in-web-development/ +# +# We unfortunately have no way, from Worker-initiated code, to +# automatically resolve the path from X.js to X.wasm. +# +# We have an "only slightly unsightly" solution for our main builds +# but it does not work for the WASMFS builds, so those builds have to +# be built to _this_ directory and can only run when the client app is +# loaded from the same directory. +dir.dout := $(dir.wasm)/jswasm +# dir.tmp = output dir for intermediary build files, as opposed to +# end-user deliverables. +dir.tmp := $(dir.wasm)/bld +CLEAN_FILES += $(dir.tmp)/* $(dir.dout)/* +ifeq (,$(wildcard $(dir.dout))) + dir._tmp := $(shell mkdir -p $(dir.dout)) +endif +ifeq (,$(wildcard $(dir.tmp))) + dir._tmp := $(shell mkdir -p $(dir.tmp)) +endif +cflags.common := -I. -I.. -I$(dir.top) +CLEAN_FILES += *~ $(dir.jacc)/*~ $(dir.api)/*~ $(dir.common)/*~ +emcc.WASM_BIGINT ?= 1 +sqlite3.c := $(dir.top)/sqlite3.c +sqlite3.h := $(dir.top)/sqlite3.h SQLITE_OPT = \ -DSQLITE_ENABLE_FTS4 \ -DSQLITE_ENABLE_RTREE \ -DSQLITE_ENABLE_EXPLAIN_COMMENTS \ -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION \ @@ -43,25 +120,35 @@ -DSQLITE_ENABLE_BYTECODE_VTAB \ -DSQLITE_ENABLE_OFFSET_SQL_FUNC \ -DSQLITE_OMIT_LOAD_EXTENSION \ -DSQLITE_OMIT_DEPRECATED \ -DSQLITE_OMIT_UTF16 \ - -DSQLITE_THREADSAFE=0 -#SQLITE_OPT += -DSQLITE_ENABLE_MEMSYS5 -$(dir.top)/sqlite3.c: - $(MAKE) -C $(dir.top) sqlite3.c - -# SQLITE_OMIT_LOAD_EXTENSION: if this is true, sqlite3_vfs::xDlOpen -# and friends may be NULL. - + -DSQLITE_OMIT_SHARED_CACHE \ + -DSQLITE_OMIT_WAL \ + -DSQLITE_THREADSAFE=0 \ + -DSQLITE_TEMP_STORE=3 \ + -DSQLITE_OS_KV_OPTIONAL=1 \ + '-DSQLITE_DEFAULT_UNIX_VFS="unix-none"' \ + -DSQLITE_USE_URI=1 \ + -DSQLITE_WASM_ENABLE_C_TESTS +# ^^^ most flags are set in sqlite3-wasm.c but we need them +# made explicit here for building speedtest1.c. + +ifneq (,$(filter release,$(MAKECMDGOALS))) +emcc_opt ?= -Oz -flto +else emcc_opt ?= -O0 -.PHONY: release -release: - $(MAKE) 'emcc_opt=-Os -g3' -# ^^^^^ target-specific vars, e.g.: -# release: emcc_opt=... -# apparently only work for file targets, not PHONY targets? +# ^^^^ build times for -O levels higher than 0 are painful at +# dev-time. +endif +# When passing emcc_opt from the CLI, += and re-assignment have no +# effect, so emcc_opt+=-g3 doesn't work. So... +emcc_opt_full := $(emcc_opt) -g3 +# ^^^ ALWAYS use -g3. See below for why. +# +# ^^^ -flto improves runtime speed at -O0 considerably but doubles +# build time. # # ^^^^ -O3, -Oz, -Os minify symbol names and there appears to be no # way around that except to use -g3, but -g3 causes the binary file # size to absolutely explode (approx. 5x larger). This minification # utterly breaks the resulting module, making it unsable except as @@ -75,37 +162,23 @@ # debugging symbols. That results in a small build with unmangled # symbol names. -Oz gives ever-so-slightly better compression than # -Os: not quite 1% in some completely unscientific tests. Runtime # speed for the unit tests is all over the place either way so it's # difficult to say whether -Os gives any speed benefit over -Oz. +# +# (Much later: -O2 consistently gives the best speeds.) ######################################################################## -# Emscripten SDK home dir and related binaries... -EMSDK_HOME ?= $(word 1,$(wildcard $(HOME)/src/emsdk $(HOME)/emsdk)) -emcc.bin ?= $(word 1,$(wildcard $(shell which emcc) $(EMSDK_HOME)/upstream/emscripten/emcc)) -ifeq (,$(emcc.bin)) - $(error Cannot find emcc.) -endif - -wasm-strip ?= $(shell which wasm-strip 2>/dev/null) -ifeq (,$(filter clean,$(MAKECMDGOALS))) -ifeq (,$(wasm-strip)) - $(info WARNING: *******************************************************************) - $(info WARNING: builds using -O3/-Os/-Oz will minify WASM-exported names,) - $(info WARNING: breaking _All The Things_. The workaround for that is to build) - $(info WARNING: with -g3 (which explodes the file size) and then strip the debug) - $(info WARNING: info after compilation, using wasm-strip, to shrink the wasm file.) - $(info WARNING: wasm-strip was not found in the PATH so we cannot strip those.) - $(info WARNING: If this build uses any optimization level higher than -O2 then) - $(info WARNING: the ***resulting WASM binary WILL NOT BE USABLE***.) - $(info WARNING: wasm-strip is part of the wabt package:) - $(info WARNING: https://github.com/WebAssembly/wabt) - $(info WARNING: on Ubuntu-like systems it can be installed with:) - $(info WARNING: sudo apt install wabt) - $(info WARNING: *******************************************************************) -endif -endif # 'make clean' check + +$(sqlite3.c) $(sqlite3.h): + $(MAKE) -C $(dir.top) sqlite3.c + +.PHONY: clean distclean +clean: + -rm -f $(CLEAN_FILES) +distclean: clean + -rm -f $(DISTCLEAN_FILES) ifeq (release,$(filter release,$(MAKECMDGOALS))) ifeq (,$(wasm-strip)) $(error Cannot make release-quality binary because wasm-strip is not available. \ See notes in the warning above) @@ -112,176 +185,471 @@ endif else $(info Development build. Use '$(MAKE) release' for a smaller release build.) endif -EXPORTED_FUNCTIONS.api.in := $(dir.api)/EXPORTED_FUNCTIONS.sqlite3-api \ - $(dir.jacc)/jaccwabyt_test.exports +bin.version-info := $(dir.wasm)/version-info +# ^^^^ NOT in $(dir.tmp) because we need it to survive the cleanup +# process for the dist build to work properly. +$(bin.version-info): $(dir.wasm)/version-info.c $(sqlite3.h) $(MAKEFILE) + $(CC) -O0 -I$(dir.top) -o $@ $< +DISTCLEAN_FILES += $(bin.version-info) + +bin.stripccomments := $(dir.tool)/stripccomments +$(bin.stripccomments): $(bin.stripccomments).c $(MAKEFILE) + $(CC) -o $@ $< +DISTCLEAN_FILES += $(bin.stripccomments) -EXPORTED_FUNCTIONS.api: $(EXPORTED_FUNCTIONS.api.in) $(MAKEFILE) +EXPORTED_FUNCTIONS.api.in := $(abspath $(dir.api)/EXPORTED_FUNCTIONS.sqlite3-api) +EXPORTED_FUNCTIONS.api := $(dir.tmp)/EXPORTED_FUNCTIONS.api +$(EXPORTED_FUNCTIONS.api): $(EXPORTED_FUNCTIONS.api.in) $(MAKEFILE) cat $(EXPORTED_FUNCTIONS.api.in) > $@ -CLEAN_FILES += EXPORTED_FUNCTIONS.api - -sqlite3-api.jses := \ - $(dir.api)/sqlite3-api-prologue.js \ - $(dir.common)/whwasmutil.js \ - $(dir.jacc)/jaccwabyt.js \ - $(dir.api)/sqlite3-api-glue.js \ - $(dir.api)/sqlite3-api-oo1.js \ - $(dir.api)/sqlite3-api-worker.js \ - $(dir.api)/sqlite3-api-opfs.js \ - $(dir.api)/sqlite3-api-cleanup.js - -sqlite3-api.js := $(dir.api)/sqlite3-api.js -CLEAN_FILES += $(sqlite3-api.js) + +sqlite3-license-version.js := $(dir.tmp)/sqlite3-license-version.js +sqlite3-license-version-header.js := $(dir.api)/sqlite3-license-version-header.js +sqlite3-api-build-version.js := $(dir.tmp)/sqlite3-api-build-version.js +# sqlite3-api.jses = the list of JS files which make up $(sqlite3-api.js), in +# the order they need to be assembled. +sqlite3-api.jses := $(sqlite3-license-version.js) +sqlite3-api.jses += $(dir.api)/sqlite3-api-prologue.js +sqlite3-api.jses += $(dir.common)/whwasmutil.js +sqlite3-api.jses += $(dir.jacc)/jaccwabyt.js +sqlite3-api.jses += $(dir.api)/sqlite3-api-glue.js +sqlite3-api.jses += $(sqlite3-api-build-version.js) +sqlite3-api.jses += $(dir.api)/sqlite3-api-oo1.js +sqlite3-api.jses += $(dir.api)/sqlite3-api-worker1.js +sqlite3-api.jses += $(dir.api)/sqlite3-api-opfs.js +sqlite3-api.jses += $(dir.api)/sqlite3-api-cleanup.js + +# "External" API files which are part of our distribution +# but not part of the sqlite3-api.js amalgamation. +SOAP.js := $(dir.api)/sqlite3-opfs-async-proxy.js +sqlite3-worker1.js := $(dir.api)/sqlite3-worker1.js +sqlite3-worker1-promiser.js := $(dir.api)/sqlite3-worker1-promiser.js +define CP_XAPI +sqlite3-api.ext.jses += $$(dir.dout)/$$(notdir $(1)) +$$(dir.dout)/$$(notdir $(1)): $(1) $$(MAKEFILE) + cp $$< $$@ +endef +$(foreach X,$(SOAP.js) $(sqlite3-worker1.js) $(sqlite3-worker1-promiser.js),\ + $(eval $(call CP_XAPI,$(X)))) +all: $(sqlite3-api.ext.jses) + +sqlite3-api.js := $(dir.tmp)/sqlite3-api.js $(sqlite3-api.js): $(sqlite3-api.jses) $(MAKEFILE) @echo "Making $@..." @for i in $(sqlite3-api.jses); do \ echo "/* BEGIN FILE: $$i */"; \ cat $$i; \ echo "/* END FILE: $$i */"; \ done > $@ -post-js.js := $(dir.api)/post-js.js -CLEAN_FILES += $(post-js.js) +$(sqlite3-api-build-version.js): $(bin.version-info) $(MAKEFILE) + @echo "Making $@..." + @{ \ + echo 'self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){'; \ + echo -n ' sqlite3.version = '; \ + $(bin.version-info) --json; \ + echo ';'; \ + echo '});'; \ + } > $@ + +######################################################################## +# --post-js and --pre-js are emcc flags we use to append/prepend JS to +# the generated emscripten module file. +pre-js.js := $(dir.api)/pre-js.js +post-js.js := $(dir.tmp)/post-js.js post-jses := \ $(dir.api)/post-js-header.js \ $(sqlite3-api.js) \ $(dir.api)/post-js-footer.js - $(post-js.js): $(post-jses) $(MAKEFILE) @echo "Making $@..." @for i in $(post-jses); do \ echo "/* BEGIN FILE: $$i */"; \ cat $$i; \ echo "/* END FILE: $$i */"; \ done > $@ +extern-post-js.js := $(dir.api)/extern-post-js.js +extern-pre-js.js := $(dir.api)/extern-pre-js.js +pre-post-common.flags := \ + --post-js=$(post-js.js) \ + --extern-post-js=$(extern-post-js.js) \ + --extern-pre-js=$(sqlite3-license-version.js) +pre-post-jses.deps := $(post-js.js) \ + $(extern-post-js.js) $(extern-pre-js.js) $(sqlite3-license-version.js) +$(sqlite3-license-version.js): $(sqlite3.h) $(sqlite3-license-version-header.js) $(MAKEFILE) + @echo "Making $@..."; { \ + cat $(sqlite3-license-version-header.js); \ + echo '/*'; \ + echo '** This code was built from sqlite3 version...'; \ + echo "** "; \ + awk -e '/define SQLITE_VERSION/{$$1=""; print "**" $$0}' \ + -e '/define SQLITE_SOURCE_ID/{$$1=""; print "**" $$0}' $(sqlite3.h); \ + echo '*/'; \ + } > $@ +######################################################################## +# call-make-pre-js creates rules for pre-js-$(1).js. $1 = the base +# name of the JS file on whose behalf this pre-js is for. +define call-make-pre-js +pre-post-$(1).flags ?= +$$(dir.tmp)/pre-js-$(1).js: $$(pre-js.js) $$(MAKEFILE) + cp $$(pre-js.js) $$@ + @if [ sqlite3-wasmfs = $(1) ]; then \ + echo "delete Module[xNameOfInstantiateWasm] /*for WASMFS build*/;"; \ + elif [ sqlite3 != $(1) ]; then \ + echo "Module[xNameOfInstantiateWasm].uri = '$(1).wasm';"; \ + fi >> $$@ +pre-post-$(1).deps := $$(pre-post-jses.deps) $$(dir.tmp)/pre-js-$(1).js +pre-post-$(1).flags += --pre-js=$$(dir.tmp)/pre-js-$(1).js +endef +#$(error $(call call-make-pre-js,sqlite3-wasmfs)) +# /post-js and pre-js +######################################################################## ######################################################################## -# emcc flags for .c/.o/.wasm. -emcc.flags = +# emcc flags for .c/.o/.wasm/.js. +emcc.flags := #emcc.flags += -v # _very_ loud but also informative about what it's doing +# -g3 is needed to keep -O2 and higher from creating broken JS via +# minification. ######################################################################## # emcc flags for .c/.o. emcc.cflags := emcc.cflags += -std=c99 -fPIC # -------------^^^^^^^^ we currently need c99 for WASM-specific sqlite3 APIs. -emcc.cflags += -I. -I$(dir.top) # $(SQLITE_OPT) +emcc.cflags += -I. -I$(dir.top) ######################################################################## # emcc flags specific to building the final .js/.wasm file... emcc.jsflags := -fPIC +emcc.jsflags += --minify 0 emcc.jsflags += --no-entry -emcc.jsflags += -sENVIRONMENT=web emcc.jsflags += -sMODULARIZE emcc.jsflags += -sSTRICT_JS emcc.jsflags += -sDYNAMIC_EXECUTION=0 emcc.jsflags += -sNO_POLYFILL -emcc.jsflags += -sEXPORTED_FUNCTIONS=@$(dir.wasm)/EXPORTED_FUNCTIONS.api -emcc.jsflags += -sEXPORTED_RUNTIME_METHODS=FS,wasmMemory # wasmMemory==>for -sIMPORTED_MEMORY +emcc.jsflags += -sEXPORTED_FUNCTIONS=@$(EXPORTED_FUNCTIONS.api) +emcc.exportedRuntimeMethods := \ + -sEXPORTED_RUNTIME_METHODS=FS,wasmMemory + # FS ==> stdio/POSIX I/O proxies + # wasmMemory ==> required by our code for use with -sIMPORTED_MEMORY +emcc.jsflags += $(emcc.exportedRuntimeMethods) emcc.jsflags += -sUSE_CLOSURE_COMPILER=0 emcc.jsflags += -sIMPORTED_MEMORY -#emcc.jsflags += -sINITIAL_MEMORY=13107200 +emcc.environment := -sENVIRONMENT=web,worker +######################################################################## +# -sINITIAL_MEMORY: How much memory we need to start with is governed +# at least in part by whether -sALLOW_MEMORY_GROWTH is enabled. If so, +# we can start with less. If not, we need as much as we'll ever +# possibly use (which, of course, we can't know for sure). Note, +# however, that speedtest1 shows that performance for even moderate +# workloads MAY suffer considerably if we start small and have to grow +# at runtime. e.g. OPFS-backed (speedtest1 --size 75) take MAY take X +# time with 16mb+ memory and 3X time when starting with 8MB. However, +# such test results are inconsistent due to browser internals which +# are opaque to us. +emcc.jsflags += -sALLOW_MEMORY_GROWTH +emcc.INITIAL_MEMORY.128 := 13107200 +emcc.INITIAL_MEMORY.96 := 100663296 +emcc.INITIAL_MEMORY.64 := 64225280 +emcc.INITIAL_MEMORY.32 := 33554432 +emcc.INITIAL_MEMORY.16 := 16777216 +emcc.INITIAL_MEMORY.8 := 8388608 +emcc.INITIAL_MEMORY ?= 16 +ifeq (,$(emcc.INITIAL_MEMORY.$(emcc.INITIAL_MEMORY))) +$(error emcc.INITIAL_MEMORY must be one of: 8, 16, 32, 64, 96, 128 (megabytes)) +endif +emcc.jsflags += -sINITIAL_MEMORY=$(emcc.INITIAL_MEMORY.$(emcc.INITIAL_MEMORY)) +# /INITIAL_MEMORY +######################################################################## + +emcc.jsflags += $(emcc.environment) #emcc.jsflags += -sTOTAL_STACK=4194304 -emcc.jsflags += -sEXPORT_NAME=sqlite3InitModule + +sqlite3.js.init-func := sqlite3InitModule +# ^^^^ $(sqlite3.js.init-func) symbol name is hard-coded in +# $(extern-post-js.js) as well as in numerous docs. If changed, it +# needs to be globally modified in *.js and all related documentation. + +emcc.jsflags += -sEXPORT_NAME=$(sqlite3.js.init-func) emcc.jsflags += -sGLOBAL_BASE=4096 # HYPOTHETICALLY keep func table indexes from overlapping w/ heap addr. -emcc.jsflags +=--post-js=$(post-js.js) #emcc.jsflags += -sSTRICT # fails due to missing __syscall_...() #emcc.jsflags += -sALLOW_UNIMPLEMENTED_SYSCALLS #emcc.jsflags += -sFILESYSTEM=0 # only for experimentation. sqlite3 needs the FS API #emcc.jsflags += -sABORTING_MALLOC -emcc.jsflags += -sALLOW_MEMORY_GROWTH emcc.jsflags += -sALLOW_TABLE_GROWTH +# -sALLOW_TABLE_GROWTH is required for installing new SQL UDFs emcc.jsflags += -Wno-limited-postlink-optimizations # ^^^^^ it likes to warn when we have "limited optimizations" via the -g3 flag. -#emcc.jsflags += -sMALLOC=emmalloc -#emcc.jsflags += -sMALLOC=dlmalloc # a good 8k larger than emmalloc #emcc.jsflags += -sSTANDALONE_WASM # causes OOM errors, not sure why -#emcc.jsflags += --import=foo_bar -#emcc.jsflags += --no-gc-sections # https://lld.llvm.org/WebAssembly.html emcc.jsflags += -sERROR_ON_UNDEFINED_SYMBOLS=0 emcc.jsflags += -sLLD_REPORT_UNDEFINED #emcc.jsflags += --allow-undefined -emcc.jsflags += --import-undefined +#emcc.jsflags += --import-undefined #emcc.jsflags += --unresolved-symbols=import-dynamic --experimental-pic -#emcc.jsflags += --experimental-pic --unresolved-symbols=ingore-all --import-undefined +#emcc.jsflags += --experimental-pic --unresolved-symbols=ingore-all --import-undefined #emcc.jsflags += --unresolved-symbols=ignore-all -enable_bigint ?= 1 -ifneq (0,$(enable_bigint)) -emcc.jsflags += -sWASM_BIGINT -endif -emcc.jsflags += -sMEMORY64=0 -# ^^^^ MEMORY64=1 fails to load, erroring with: +emcc.jsflags += -sWASM_BIGINT=$(emcc.WASM_BIGINT) + +######################################################################## +# -sMEMORY64=1 fails to load, erroring with: # invalid memory limits flags 0x5 # (enable via --experimental-wasm-memory64) # # ^^^^ MEMORY64=2 builds and loads but dies when we do things like: # -# new Uint8Array(heapWrappers().HEAP8U.buffer, ptr, n) +# new Uint8Array(wasm.heap8u().buffer, ptr, n) # # because ptr is now a BigInt, so is invalid for passing to arguments -# which have strict must-be-a-number requirements. +# which have strict must-be-a-Number requirements. +######################################################################## + + +######################################################################## +# -sSINGLE_FILE: +# https://github.com/emscripten-core/emscripten/blob/main/src/settings.js#L1704 +# -sSINGLE_FILE=1 would be really nice but we have to build with -g3 +# for -O2 and higher to work (else minification breaks the code) and +# cannot wasm-strip the binary before it gets encoded into the JS +# file. The result is that the generated JS file is, because of the -g3 +# debugging info, _huge_. ######################################################################## - -sqlite3.js := $(dir.api)/sqlite3.js -sqlite3.wasm := $(dir.api)/sqlite3.wasm -$(dir.api)/sqlite3-wasm.o: emcc.cflags += $(SQLITE_OPT) -$(dir.api)/sqlite3-wasm.o: $(dir.top)/sqlite3.c -$(dir.api)/wasm_util.o: emcc.cflags += $(SQLITE_OPT) -sqlite3.wasm.c := $(dir.api)/sqlite3-wasm.c \ - $(dir.jacc)/jaccwabyt_test.c -# ^^^ FIXME (how?): jaccwabyt_test.c is only needed for the test -# apps. However, we want to test the release builds with those apps, -# so we cannot simply elide that file in release builds. That -# component is critical to the VFS bindings so needs to be tested -# along with the core APIs. -define WASM_C_COMPILE -$(1).o := $$(subst .c,.o,$(1)) -sqlite3.wasm.obj += $$($(1).o) -$$($(1).o): $$(MAKEFILE) $(1) - $$(emcc.bin) $$(emcc_opt) $$(emcc.flags) $$(emcc.cflags) -c $(1) -o $$@ -CLEAN_FILES += $$($(1).o) -endef -$(foreach c,$(sqlite3.wasm.c),$(eval $(call WASM_C_COMPILE,$(c)))) -$(sqlite3.js): +######################################################################## +# AN EXPERIMENT: undocumented Emscripten feature: if the target file +# extension is "mjs", it defaults to ES6 module builds: +# https://github.com/emscripten-core/emscripten/issues/14383 +ifeq (,$(filter esm,$(MAKECMDGOALS))) +sqlite3.js.ext := js +else +esm.deps := $(filter-out esm,$(MAKECMDGOALS)) +esm: $(if $(esm.deps),$(esm.deps),all) +sqlite3.js.ext := mjs +endif +# /esm +######################################################################## +sqlite3.js := $(dir.dout)/sqlite3.$(sqlite3.js.ext) +sqlite3.wasm := $(dir.dout)/sqlite3.wasm +sqlite3-wasm.c := $(dir.api)/sqlite3-wasm.c +# sqlite3-wasm.o vs sqlite3-wasm.c: building against the latter +# (predictably) results in a slightly faster binary, but we're close +# enough to the target speed requirements that the 500ms makes a +# difference. Thus we build all binaries against sqlite3-wasm.c +# instead of building a shared copy of sqlite3-wasm.o. +$(eval $(call call-make-pre-js,sqlite3)) +$(sqlite3.js): $(sqlite3.js): $(MAKEFILE) $(sqlite3.wasm.obj) \ - EXPORTED_FUNCTIONS.api \ - $(post-js.js) - $(emcc.bin) -o $@ $(emcc_opt) $(emcc.flags) $(emcc.jsflags) $(sqlite3.wasm.obj) + $(EXPORTED_FUNCTIONS.api) \ + $(pre-post-sqlite3.deps) + @echo "Building $@ ..." + $(emcc.bin) -o $@ $(emcc_opt_full) $(emcc.flags) \ + $(emcc.jsflags) $(pre-post-common.flags) $(pre-post-sqlite3.flags) \ + $(cflags.common) $(SQLITE_OPT) $(sqlite3-wasm.c) chmod -x $(sqlite3.wasm) -ifneq (,$(wasm-strip)) - $(wasm-strip) $(sqlite3.wasm) -endif + $(maybe-wasm-strip) $(sqlite3.wasm) @ls -la $@ $(sqlite3.wasm) - +$(sqlite3.wasm): $(sqlite3.js) CLEAN_FILES += $(sqlite3.js) $(sqlite3.wasm) all: $(sqlite3.js) +wasm: $(sqlite3.js) # End main Emscripten-based module build ######################################################################## +######################################################################## +# batch-runner.js... +dir.sql := sql +speedtest1 := ../../speedtest1 +speedtest1.c := ../../test/speedtest1.c +speedtest1.sql := $(dir.sql)/speedtest1.sql +speedtest1.cliflags := --size 25 --big-transactions +$(speedtest1): + $(MAKE) -C ../.. speedtest1 +$(speedtest1.sql): $(speedtest1) $(MAKEFILE) + $(speedtest1) $(speedtest1.cliflags) --script $@ +batch-runner.list: $(MAKEFILE) $(speedtest1.sql) $(dir.sql)/000-mandelbrot.sql + bash split-speedtest1-script.sh $(dir.sql)/speedtest1.sql + ls -1 $(dir.sql)/*.sql | grep -v speedtest1.sql | sort > $@ +clean-batch: + rm -f batch-runner.list $(dir.sql)/speedtest1*.sql +# ^^^ we don't do this along with 'clean' because we clean/rebuild on +# a regular basis with different -Ox flags and rebuilding the batch +# pieces each time is an unnecessary time sink. +batch: batch-runner.list +all: batch +# end batch-runner.js +######################################################################## +# speedtest1.js... +# speedtest1-common.eflags = emcc flags used by multiple builds of speedtest1 +# speedtest1.eflags = emcc flags used by main build of speedtest1 +speedtest1-common.eflags := $(emcc_opt_full) +speedtest1.eflags := +speedtest1.eflags += -sENVIRONMENT=web +speedtest1.eflags += -sALLOW_MEMORY_GROWTH +speedtest1.eflags += -sINITIAL_MEMORY=$(emcc.INITIAL_MEMORY.$(emcc.INITIAL_MEMORY)) +speedtest1-common.eflags += -sINVOKE_RUN=0 +speedtest1-common.eflags += --no-entry +#speedtest1-common.eflags += -flto +speedtest1-common.eflags += -sABORTING_MALLOC +speedtest1-common.eflags += -sSTRICT_JS +speedtest1-common.eflags += -sMODULARIZE +speedtest1-common.eflags += -Wno-limited-postlink-optimizations +EXPORTED_FUNCTIONS.speedtest1 := $(abspath $(dir.tmp)/EXPORTED_FUNCTIONS.speedtest1) +speedtest1-common.eflags += -sEXPORTED_FUNCTIONS=@$(EXPORTED_FUNCTIONS.speedtest1) +speedtest1-common.eflags += $(emcc.exportedRuntimeMethods) +speedtest1-common.eflags += -sALLOW_TABLE_GROWTH +speedtest1-common.eflags += -sDYNAMIC_EXECUTION=0 +speedtest1-common.eflags += --minify 0 +speedtest1-common.eflags += -sEXPORT_NAME=$(sqlite3.js.init-func) +speedtest1-common.eflags += -sWASM_BIGINT=$(emcc.WASM_BIGINT) +speedtest1-common.eflags += $(pre-post-common.flags) +speedtest1.exit-runtime0 := -sEXIT_RUNTIME=0 +speedtest1.exit-runtime1 := -sEXIT_RUNTIME=1 +# Re -sEXIT_RUNTIME=1 vs 0: if it's 1 and speedtest1 crashes, we get +# this error from emscripten: +# +# > native function `free` called after runtime exit (use +# NO_EXIT_RUNTIME to keep it alive after main() exits)) +# +# If it's 0 and it crashes, we get: +# +# > stdio streams had content in them that was not flushed. you should +# set EXIT_RUNTIME to 1 (see the FAQ), or make sure to emit a newline +# when you printf etc. +# +# and pending output is not flushed because it didn't end with a +# newline (by design). The lesser of the two evils seems to be +# -sEXIT_RUNTIME=1 but we need EXIT_RUNTIME=0 for the worker-based app +# which runs speedtest1 multiple times. + +$(EXPORTED_FUNCTIONS.speedtest1): $(EXPORTED_FUNCTIONS.api) + @echo "Making $@ ..." + @{ echo _wasm_main; cat $(EXPORTED_FUNCTIONS.api); } > $@ +speedtest1.js := $(dir.dout)/speedtest1.js +speedtest1.wasm := $(subst .js,.wasm,$(speedtest1.js)) +speedtest1.cflags := $(cflags.common) -DSQLITE_SPEEDTEST1_WASM +speedtest1.cses := $(speedtest1.c) $(sqlite3-wasm.c) +$(eval $(call call-make-pre-js,speedtest1)) +$(speedtest1.js): $(MAKEFILE) $(speedtest1.cses) \ + $(pre-post-speedtest1.deps) \ + $(EXPORTED_FUNCTIONS.speedtest1) + @echo "Building $@ ..." + $(emcc.bin) \ + $(speedtest1.eflags) $(speedtest1-common.eflags) $(speedtest1.cflags) \ + $(pre-post-speedtest1.flags) \ + $(SQLITE_OPT) \ + $(speedtest1.exit-runtime0) \ + -o $@ $(speedtest1.cses) -lm + $(maybe-wasm-strip) $(speedtest1.wasm) + ls -la $@ $(speedtest1.wasm) + +speedtest1: $(speedtest1.js) +all: speedtest1 +CLEAN_FILES += $(speedtest1.js) $(speedtest1.wasm) +# end speedtest1.js +######################################################################## + +######################################################################## +# Convenience rules to rebuild with various -Ox levels. Much +# experimentation shows -O2 to be the clear winner in terms of speed. +# Note that build times with anything higher than -O0 are somewhat +# painful. + +.PHONY: o0 o1 o2 o3 os oz +o-xtra := -flto +# ^^^^ -flto can have a considerably performance boost at -O0 but +# doubles the build time and seems to have negligible effect on +# higher optimization levels. +o0: clean + $(MAKE) -e "emcc_opt=-O0" +o1: clean + $(MAKE) -e "emcc_opt=-O1 $(o-xtra)" +o2: clean + $(MAKE) -e "emcc_opt=-O2 $(o-xtra)" +o3: clean + $(MAKE) -e "emcc_opt=-O3 $(o-xtra)" +os: clean + @echo "WARNING: -Os can result in a build with mysteriously missing pieces!" + $(MAKE) -e "emcc_opt=-Os $(o-xtra)" +oz: clean + $(MAKE) -e "emcc_opt=-Oz $(o-xtra)" + +######################################################################## +# Sub-makes... + +include fiddle.make + +# Only add wasmfs if wasmfs.enable=1 or we're running (dist)clean +wasmfs.enable ?= $(if $(filter %clean,$(MAKECMDGOALS)),1,0) +ifeq (1,$(wasmfs.enable)) +# wasmfs build disabled 2022-10-19 per /chat discussion. +# OPFS-over-wasmfs was initially a stopgap measure and a convenient +# point of comparison for the OPFS sqlite3_vfs's performance, but it +# currently doubles our deliverables and build maintenance burden for +# little, if any, benefit. +# +######################################################################## +# Some platforms do not support the WASMFS build. Raspberry Pi OS is one +# of them. As such platforms are discovered, add their (uname -m) name +# to PLATFORMS_WITH_NO_WASMFS to exclude the wasmfs build parts. +PLATFORMS_WITH_NO_WASMFS := aarch64 # add any others here +THIS_ARCH := $(shell /usr/bin/uname -m) +ifneq (,$(filter $(THIS_ARCH),$(PLATFORMS_WITH_NO_WASMFS))) +$(info This platform does not support the WASMFS build.) +HAVE_WASMFS := 0 +else +HAVE_WASMFS := 1 +include wasmfs.make +endif +endif +# /wasmfs +######################################################################## + +######################################################################## +# Create deliverables: +ifneq (,$(filter dist,$(MAKECMDGOALS))) +include dist.make +endif + +######################################################################## +# Push files to public wasm-testing.sqlite.org server +wasm-testing.include = $(dir.dout) *.js *.html \ + batch-runner.list $(dir.sql) $(dir.common) $(dir.fiddle) $(dir.jacc) +wasm-testing.exclude = sql/speedtest1.sql +wasm-testing.dir = /jail/sites/wasm-testing +wasm-testing.dest ?= wasm-testing:$(wasm-testing.dir) +# ---------------------^^^^^^^^^^^^ ssh alias +.PHONY: push-testing +push-testing: + rsync -z -e ssh --ignore-times --chown=stephan:www-data --group -r \ + $(patsubst %,--exclude=%,$(wasm-testing.exclude)) \ + $(wasm-testing.include) $(wasm-testing.dest) + @echo "Updating gzipped copies..."; \ + ssh wasm-testing 'cd $(wasm-testing.dir) && bash .gzip' || \ + echo "SSH failed: it's likely that stale content will be served via old gzip files." ######################################################################## -# fiddle_remote is the remote destination for the fiddle app. It -# must be a [user@]HOST:/path for rsync. -# Note that the target "should probably" contain a symlink of -# index.html -> fiddle.html. -fiddle_remote ?= -ifeq (,$(fiddle_remote)) -ifneq (,$(wildcard /home/stephan)) - fiddle_remote = wh:www/wh/sqlite3/. -else ifneq (,$(wildcard /home/drh)) - #fiddle_remote = if appropriate, add that user@host:/path here -endif -endif -$(fiddle_files): default -push-fiddle: $(fiddle_files) - @if [ x = "x$(fiddle_remote)" ]; then \ - echo "fiddle_remote must be a [user@]HOST:/path for rsync"; \ - exit 1; \ - fi - rsync -va fiddle/ $(fiddle_remote) -# end fiddle remote push +# If we find a copy of the sqlite.org/wasm docs checked out, copy +# certain files over to it, noting that some need automatable edits... +WDOCS.home ?= ../../../wdoc +.PHONY: update-docs +ifneq (,$(wildcard $(WDOCS.home)/api-index.md)) +WDOCS.jswasm := $(WDOCS.home)/jswasm +update-docs: $(bin.stripccomments) $(sqlite3.js) $(sqlite3.wasm) + @echo "Copying files to the /wasm docs. Be sure to use an -Oz build for this!" + cp $(sqlite3.wasm) $(WDOCS.jswasm)/. + $(bin.stripccomments) -k -k < $(sqlite3.js) \ + | sed -e '/^[ \t]*$$/d' > $(WDOCS.jswasm)/sqlite3.js + cp demo-123.js demo-123.html demo-123-worker.html $(WDOCS.home) + sed -n -e '/EXTRACT_BEGIN/,/EXTRACT_END/p' \ + module-symbols.html > $(WDOCS.home)/module-symbols.html +else +update-docs: + @echo "Cannot find wasm docs checkout."; \ + echo "Pass WDOCS.home=/path/to/wasm/docs/checkout or edit this makefile to suit."; \ + exit 127 +endif +# end /wasm docs ######################################################################## ADDED ext/wasm/README-dist.txt Index: ext/wasm/README-dist.txt ================================================================== --- /dev/null +++ ext/wasm/README-dist.txt @@ -0,0 +1,23 @@ +This is the README for the sqlite3 WASM/JS distribution. + +Main project page: https://sqlite.org + +Documentation: https://sqlite.org/wasm + +This archive contains the sqlite3.js and sqlite3.wasm file which make +up the sqlite3 WASM/JS build. + +The jswasm directory contains the core sqlite3 deliverables and the +top-level directory contains demonstration and test apps. Browsers +will not serve WASM files from file:// URLs, so the demo/test apps +require a web server and that server must include the following +headers in its response when serving the files: + + Cross-Origin-Opener-Policy: same-origin + Cross-Origin-Embedder-Policy: require-corp + +One simple way to get the demo apps up and running on Unix-style +systems is to install althttpd (https://sqlite.org/althttpd) and run: + + althttpd --enable-sab --page index.html + Index: ext/wasm/README.md ================================================================== --- ext/wasm/README.md +++ ext/wasm/README.md @@ -8,10 +8,11 @@ [here](https://emscripten.org/docs/getting_started/downloads.html) and summarized below for Linux environments: ``` # Clone the emscripten repository: +$ sudo apt install git $ git clone https://github.com/emscripten-core/emsdk.git $ cd emsdk # Download and install the latest SDK tools: $ ./emsdk install latest @@ -22,10 +23,11 @@ Those parts only need to be run once, but the SDK can be updated using: ``` $ git pull +$ ./emsdk install latest $ ./emsdk activate latest ``` The following needs to be run for each shell instance which needs the `emcc` compiler: @@ -53,51 +55,51 @@ ``` $ cd ext/wasm $ make ``` -That will generate the fiddle application under -[ext/fiddle](/dir/ext/wasm/fiddle), as `fiddle.html`. That application -cannot, due to XMLHttpRequest security limitations, run if the HTML -file is opened directly in the browser (i.e. if it is opened using a -`file://` URL), so it needs to be served via an HTTP server. For -example, using [althttpd][]: +That will generate the a number of files required for a handful of +test and demo applications which can be accessed via +`index.html`. WASM content cannot, due to XMLHttpRequest security +limitations, be loaded if the containing HTML file is opened directly +in the browser (i.e. if it is opened using a `file://` URL), so it +needs to be served via an HTTP server. For example, using +[althttpd][]: ``` -$ cd ext/wasm/fiddle -$ althttpd -page fiddle.html +$ cd ext/wasm +$ althttpd --enable-sab --max-age 1 --page index.html ``` -That will open the system's browser and run the fiddle app's page. +That will open the system's browser and run the index page, from which +all of the test and demo applications can be accessed. Note that when serving this app via [althttpd][], it must be a version -from 2022-05-17 or newer so that it recognizes the `.wasm` file -extension and responds with the mimetype `application/wasm`, as the -WASM loader is pedantic about that detail. - - -# Known Quirks and Limitations - -Some "impedence mismatch" between C and WASM/JavaScript is to be -expected. - -## No I/O - -sqlite3 shell commands which require file I/O or pipes are disabled in -the WASM build. - -## `exit()` Triggered from C - -When C code calls `exit()`, as happens (for example) when running an -"unsafe" command when safe mode is active, WASM's connection to the -sqlite3 shell environment has no sensible choice but to shut down -because `exit()` leaves it in a state we can no longer recover -from. The JavaScript-side application attempts to recognize this and -warn the user that restarting the application is necessary. Currently -the only way to restart it is to reload the page. Restructuring the -shell code such that it could be "rebooted" without restarting the -JS app would require some invasive changes which are not currently -on any TODO list but have not been entirely ruled out long-term. +from 2022-09-26 or newer so that it recognizes the `--enable-sab` +flag, which causes althttpd to emit two HTTP response headers which +are required to enable JavaScript's `SharedArrayBuffer` and `Atomics` +APIs. Those APIs are required in order to enable the OPFS-related +features in the apps which use them. + +# Testing on a remote machine that is accessed via SSH + +*NB: The following are developer notes, last validated on 2022-08-18* + + * Remote: Install git, emsdk, and althttpd + * Use a [version of althttpd][althttpd] from + September 26, 2022 or newer. + * Remote: Install the SQLite source tree. CD to ext/wasm + * Remote: "`make`" to build WASM + * Remote: `althttpd --enable-sab --port 8080 --popup` + * Local: `ssh -L 8180:localhost:8080 remote` + * Local: Point your web-browser at http://localhost:8180/index.html + +In order to enable [SharedArrayBuffers](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer), +the web-browser requires that the two extra Cross-Origin lines be present +in HTTP reply headers and that the request must come from "localhost". +Since the web-server is on a different machine from +the web-broser, the localhost requirement means that the connection must be tunneled +using SSH. [emscripten]: https://emscripten.org [althttpd]: https://sqlite.org/althttpd Index: ext/wasm/api/README.md ================================================================== --- ext/wasm/api/README.md +++ ext/wasm/api/README.md @@ -21,12 +21,10 @@ The overall idea is that the following files get concatenated together, in the listed order, the resulting file is loaded by a browser client: -- `post-js-header.js`\ - Emscripten-specific header for the `--post-js` input. - `sqlite3-api-prologue.js`\ Contains the initial bootstrap setup of the sqlite3 API objects. This is exposed as a function, rather than objects, so that the next step can pass in a config object which abstracts away parts of the WASM environment, to facilitate plugging it in to arbitrary @@ -45,50 +43,59 @@ and C structs, such that changes to the struct state from either JS or C are visible to the other end of the connection. This is also an independent spinoff project, conceived for the sqlite3 project but maintained separately. - `sqlite3-api-glue.js`\ - Invokes the function exposed by `sqlite3-api-prologue.js`, passing - it a configuration object to configure it for the current WASM - toolchain (noting that it currently requires Emscripten), then - removes that function from the global scope. The result of this file - is a global-scope `sqlite3` object which acts as a namespace for the - API's functionality. This object gets removed from the global scope - after the following files have attached their own features to it. + Invokes functionality exposed by the previous two files to + flesh out low-level parts of `sqlite3-api-prologue.js`. Most of + these pieces related to the `sqlite3.capi.wasm` object. +- `sqlite3-api-build-version.js`\ + Gets created by the build process and populates the + `sqlite3.version` object. This part is not critical, but records the + version of the library against which this module was built. - `sqlite3-api-oo1.js`\ Provides a high-level object-oriented wrapper to the lower-level C API, colloquially known as OO API #1. Its API is similar to other high-level sqlite3 JS wrappers and should feel relatively familiar to anyone familiar with such APIs. That said, it is not a "required component" and can be elided from builds which do not want it. -- `sqlite3-api-worker.js`\ +- `sqlite3-api-worker1.js`\ A Worker-thread-based API which uses OO API #1 to provide an interface to a database which can be driven from the main Window thread via the Worker message-passing interface. Like OO API #1, this is an optional component, offering one of any number of potential implementations for such an API. - - `sqlite3-worker.js`\ + - `sqlite3-worker1.js`\ Is not part of the amalgamated sources and is intended to be loaded by a client Worker thread. It loads the sqlite3 module - and runs the Worker API which is implemented in - `sqlite3-api-worker.js`. + and runs the Worker #1 API which is implemented in + `sqlite3-api-worker1.js`. + - `sqlite3-worker1-promiser.js`\ + Is likewise not part of the amalgamated sources and provides + a Promise-based interface into the Worker #1 API. This is + a far user-friendlier way to interface with databases running + in a Worker thread. - `sqlite3-api-opfs.js`\ - is an in-development/experimental sqlite3 VFS wrapper, the goal of - which being to use Google Chrome's Origin-Private FileSystem (OPFS) - storage layer to provide persistent storage for database files in a - browser. It is far from complete. -- `sqlite3-api-cleanup.js`\ - the previous files temporarily create global objects in order to - communicate their state to the files which follow them, and _this_ - file connects any final components together and cleans up those - globals. As of this writing, this code ensures that the previous - files leave no global symbols installed, and it moves the sqlite3 - namespace object into the in-scope Emscripten module. Abstracting - this for other WASM toolchains is TODO. -- `post-js-footer.js`\ - Emscripten-specific footer for the `--post-js` input. This closes - off the lexical scope opened by `post-js-header.js`. + is an sqlite3 VFS implementation which supports Google Chrome's + Origin-Private FileSystem (OPFS) as a storage layer to provide + persistent storage for database files in a browser. It requires... + - `sqlite3-opfs-async-proxy.js`\ + is the asynchronous backend part of the OPFS proxy. It speaks + directly to the (async) OPFS API and channels those results back + to its synchronous counterpart. This file, because it must be + started in its own Worker, is not part of the amalgamation. +- **`api/sqlite3-api-cleanup.js`**\ + The previous files do not immediately extend the library. Instead + they add callback functions to be called during its + bootstrapping. Some also temporarily create global objects in order + to communicate their state to the files which follow them. This file + cleans up any dangling globals and runs the API bootstrapping + process, which is what finally executes the initialization code + installed by the previous files. As of this writing, this code + ensures that the previous files leave no more than a single global + symbol installed. When adapting the API for non-Emscripten + toolchains, this "should" be the only file where changes are needed. The build process glues those files together, resulting in `sqlite3-api.js`, which is everything except for the `post-js-*.js` files, and `sqlite3.js`, which is the Emscripten-generated amalgamated output and includes the `post-js-*.js` parts, as well as the @@ -97,5 +104,30 @@ The non-JS outlier file is `sqlite3-wasm.c`: it is a proxy for `sqlite3.c` which `#include`'s that file and adds a couple more WASM-specific helper functions, at least one of which requires access to private/static `sqlite3.c` internals. `sqlite3.wasm` is compiled from this file rather than `sqlite3.c`. + +The following files are part of the build process but are injected +into the build-generated `sqlite3.js` along with `sqlite3-api.js`. + +- `extern-pre-js.js`\ + Emscripten-specific header for Emscripten's `--extern-pre-js` + flag. As of this writing, that file is only used for experimentation + purposes and holds no code relevant to the production deliverables. +- `pre-js.js`\ + Emscripten-specific header for Emscripten's `--pre-js` flag. This + file is intended as a place to override certain Emscripten behavior + before it starts up, but corner-case Emscripten bugs keep that from + being a reality. +- `post-js-header.js`\ + Emscripten-specific header for the `--post-js` input. It opens up + a lexical scope by starting a post-run handler for Emscripten. +- `post-js-footer.js`\ + Emscripten-specific footer for the `--post-js` input. This closes + off the lexical scope opened by `post-js-header.js`. +- `extern-post-js.js`\ + Emscripten-specific header for Emscripten's `--extern-post-js` + flag. This file overwrites the Emscripten-installed + `sqlite3InitModule()` function with one which, after the module is + loaded, also initializes the asynchronous parts of the sqlite3 + module. For example, the OPFS VFS support. ADDED ext/wasm/api/extern-post-js.js Index: ext/wasm/api/extern-post-js.js ================================================================== --- /dev/null +++ ext/wasm/api/extern-post-js.js @@ -0,0 +1,103 @@ +/* extern-post-js.js must be appended to the resulting sqlite3.js + file. It gets its name from being used as the value for the + --extern-post-js=... Emscripten flag. Note that this code, unlike + most of the associated JS code, runs outside of the + Emscripten-generated module init scope, in the current + global scope. */ +(function(){ + /** + In order to hide the sqlite3InitModule()'s resulting Emscripten + module from downstream clients (and simplify our documentation by + being able to elide those details), we rewrite + sqlite3InitModule() to return the sqlite3 object. + + Unfortunately, we cannot modify the module-loader/exporter-based + impls which Emscripten installs at some point in the file above + this. + */ + const originalInit = self.sqlite3InitModule; + if(!originalInit){ + throw new Error("Expecting self.sqlite3InitModule to be defined by the Emscripten build."); + } + /** + We need to add some state which our custom Module.locateFile() + can see, but an Emscripten limitation currently prevents us from + attaching it to the sqlite3InitModule function object: + + https://github.com/emscripten-core/emscripten/issues/18071 + + The only(?) current workaround is to temporarily stash this state + into the global scope and delete it when sqlite3InitModule() + is called. + */ + const initModuleState = self.sqlite3InitModuleState = Object.assign(Object.create(null),{ + moduleScript: self?.document?.currentScript, + isWorker: ('undefined' !== typeof WorkerGlobalScope), + location: self.location, + urlParams: new URL(self.location.href).searchParams + }); + initModuleState.debugModule = + (new URL(self.location.href).searchParams).has('sqlite3.debugModule') + ? (...args)=>console.warn('sqlite3.debugModule:',...args) + : ()=>{}; + + if(initModuleState.urlParams.has('sqlite3.dir')){ + initModuleState.sqlite3Dir = initModuleState.urlParams.get('sqlite3.dir') +'/'; + }else if(initModuleState.moduleScript){ + const li = initModuleState.moduleScript.src.split('/'); + li.pop(); + initModuleState.sqlite3Dir = li.join('/') + '/'; + } + + self.sqlite3InitModule = (...args)=>{ + //console.warn("Using replaced sqlite3InitModule()",self.location); + return originalInit(...args).then((EmscriptenModule)=>{ + if(self.window!==self && + (EmscriptenModule['ENVIRONMENT_IS_PTHREAD'] + || EmscriptenModule['_pthread_self'] + || 'function'===typeof threadAlert + || self.location.pathname.endsWith('.worker.js') + )){ + /** Workaround for wasmfs-generated worker, which calls this + routine from each individual thread and requires that its + argument be returned. All of the criteria above are fragile, + based solely on inspection of the offending code, not public + Emscripten details. */ + return EmscriptenModule; + } + EmscriptenModule.sqlite3.scriptInfo = initModuleState; + //console.warn("sqlite3.scriptInfo =",EmscriptenModule.sqlite3.scriptInfo); + const f = EmscriptenModule.sqlite3.asyncPostInit; + delete EmscriptenModule.sqlite3.asyncPostInit; + return f(); + }).catch((e)=>{ + console.error("Exception loading sqlite3 module:",e); + throw e; + }); + }; + self.sqlite3InitModule.ready = originalInit.ready; + + if(self.sqlite3InitModuleState.moduleScript){ + const sim = self.sqlite3InitModuleState; + let src = sim.moduleScript.src.split('/'); + src.pop(); + sim.scriptDir = src.join('/') + '/'; + } + initModuleState.debugModule('sqlite3InitModuleState =',initModuleState); + if(0){ + console.warn("Replaced sqlite3InitModule()"); + console.warn("self.location.href =",self.location.href); + if('undefined' !== typeof document){ + console.warn("document.currentScript.src =", + document?.currentScript?.src); + } + } + /* Replace the various module exports performed by the Emscripten + glue... */ + if (typeof exports === 'object' && typeof module === 'object') + module.exports = sqlite3InitModule; + else if (typeof exports === 'object') + exports["sqlite3InitModule"] = sqlite3InitModule; + /* AMD modules get injected in a way we cannot override, + so we can't handle those here. */ +})(); ADDED ext/wasm/api/extern-pre-js.js Index: ext/wasm/api/extern-pre-js.js ================================================================== --- /dev/null +++ ext/wasm/api/extern-pre-js.js @@ -0,0 +1,7 @@ +/* extern-pre-js.js must be prepended to the resulting sqlite3.js + file. This file is currently only used for holding snippets during + test and development. + + It gets its name from being used as the value for the + --extern-pre-js=... Emscripten flag. +*/ Index: ext/wasm/api/post-js-footer.js ================================================================== --- ext/wasm/api/post-js-footer.js +++ ext/wasm/api/post-js-footer.js @@ -1,3 +1,4 @@ /* The current function scope was opened via post-js-header.js, which - gets prepended to this at build-time. */ + gets prepended to this at build-time. This file closes that + scope. */ })/*postRun.push(...)*/; Index: ext/wasm/api/post-js-header.js ================================================================== --- ext/wasm/api/post-js-header.js +++ ext/wasm/api/post-js-header.js @@ -3,24 +3,23 @@ post-js.js for use with Emscripten's --post-js flag. This code requires that it be running in that context. The Emscripten environment must have been set up already but it will not have loaded its WASM when the code in this file is run. The function it installs will be run after the WASM module is loaded, at which - point the sqlite3 WASM API bits will be set up. + point the sqlite3 JS API bits will get set up. */ if(!Module.postRun) Module.postRun = []; Module.postRun.push(function(Module/*the Emscripten-style module object*/){ 'use strict'; - /* This function will contain: + /* This function will contain at least the following: - post-js-header.js (this file) - sqlite3-api-prologue.js => Bootstrapping bits to attach the rest to - - sqlite3-api-whwasmutil.js => Replacements for much of Emscripten's glue - - sqlite3-api-jaccwabyt.js => Jaccwabyt (C/JS struct binding) + - common/whwasmutil.js => Replacements for much of Emscripten's glue + - jaccwaby/jaccwabyt.js => Jaccwabyt (C/JS struct binding) - sqlite3-api-glue.js => glues previous parts together - - sqlite3-api-oo.js => SQLite3 OO API #1. - - sqlite3-api-worker.js => Worker-based API + - sqlite3-api-oo.js => SQLite3 OO API #1 + - sqlite3-api-worker1.js => Worker-based API + - sqlite3-api-opfs.js => OPFS VFS - sqlite3-api-cleanup.js => final API cleanup - post-js-footer.js => closes this postRun() function - - Whew! */ ADDED ext/wasm/api/pre-js.js Index: ext/wasm/api/pre-js.js ================================================================== --- /dev/null +++ ext/wasm/api/pre-js.js @@ -0,0 +1,100 @@ +/** + BEGIN FILE: api/pre-js.js + + This file is intended to be prepended to the sqlite3.js build using + Emscripten's --pre-js=THIS_FILE flag (or equivalent). +*/ + +// See notes in extern-post-js.js +const sqlite3InitModuleState = self.sqlite3InitModuleState || Object.create(null); +delete self.sqlite3InitModuleState; +sqlite3InitModuleState.debugModule('self.location =',self.location); + +/** + This custom locateFile() tries to figure out where to load `path` + from. The intent is to provide a way for foo/bar/X.js loaded from a + Worker constructor or importScripts() to be able to resolve + foo/bar/X.wasm (in the latter case, with some help): + + 1) If URL param named the same as `path` is set, it is returned. + + 2) If sqlite3InitModuleState.sqlite3Dir is set, then (thatName + path) + is returned (note that it's assumed to end with '/'). + + 3) If this code is running in the main UI thread AND it was loaded + from a SCRIPT tag, the directory part of that URL is used + as the prefix. (This form of resolution unfortunately does not + function for scripts loaded via importScripts().) + + 4) If none of the above apply, (prefix+path) is returned. +*/ +Module['locateFile'] = function(path, prefix) { + let theFile; + const up = this.urlParams; + if(up.has(path)){ + theFile = up.get(path); + }else if(this.sqlite3Dir){ + theFile = this.sqlite3Dir + path; + }else if(this.scriptDir){ + theFile = this.scriptDir + path; + }else{ + theFile = prefix + path; + } + sqlite3InitModuleState.debugModule( + "locateFile(",arguments[0], ',', arguments[1],")", + 'sqlite3InitModuleState.scriptDir =',this.scriptDir, + 'up.entries() =',Array.from(up.entries()), + "result =", theFile + ); + return theFile; +}.bind(sqlite3InitModuleState); + +/** + Bug warning: a custom Module.instantiateWasm() does not work + in WASMFS builds: + + https://github.com/emscripten-core/emscripten/issues/17951 + + In such builds we must disable this. +*/ +const xNameOfInstantiateWasm = true + ? 'instantiateWasm' + : 'emscripten-bug-17951'; +Module[xNameOfInstantiateWasm] = function callee(imports,onSuccess){ + imports.env.foo = function(){}; + const uri = Module.locateFile( + callee.uri, ( + ('undefined'===typeof scriptDirectory/*var defined by Emscripten glue*/) + ? '' : scriptDirectory) + ); + sqlite3InitModuleState.debugModule( + "instantiateWasm() uri =", uri + ); + const wfetch = ()=>fetch(uri, {credentials: 'same-origin'}); + const loadWasm = WebAssembly.instantiateStreaming + ? async ()=>{ + return WebAssembly.instantiateStreaming(wfetch(), imports) + .then((arg)=>onSuccess(arg.instance, arg.module)); + } + : async ()=>{ // Safari < v15 + return wfetch() + .then(response => response.arrayBuffer()) + .then(bytes => WebAssembly.instantiate(bytes, imports)) + .then((arg)=>onSuccess(arg.instance, arg.module)); + }; + loadWasm(); + return {}; +}; +/* + It is literally impossible to reliably get the name of _this_ script + at runtime, so impossible to derive X.wasm from script name + X.js. Thus we need, at build-time, to redefine + Module[xNameOfInstantiateWasm].uri by appending it to a build-specific + copy of this file with the name of the wasm file. This is apparently + why Emscripten hard-codes the name of the wasm file into their glue + scripts. +*/ +Module[xNameOfInstantiateWasm].uri = 'sqlite3.wasm'; +/* END FILE: api/pre-js.js, noting that the build process may add a + line after this one to change the above .uri to a build-specific + one. */ Index: ext/wasm/api/sqlite3-api-cleanup.js ================================================================== --- ext/wasm/api/sqlite3-api-cleanup.js +++ ext/wasm/api/sqlite3-api-cleanup.js @@ -9,36 +9,62 @@ * May you share freely, never taking more than you give. *********************************************************************** This file is the tail end of the sqlite3-api.js constellation, - intended to be appended after all other files so that it can clean - up any global systems temporarily used for setting up the API's - various subsystems. + intended to be appended after all other sqlite3-api-*.js files so + that it can finalize any setup and clean up any global symbols + temporarily used for setting up the API's various subsystems. */ 'use strict'; -self.sqlite3.postInit.forEach( - self.importScripts/*global is a Worker*/ - ? function(f){ - /** We try/catch/report for the sake of failures which happen in - a Worker, as those exceptions can otherwise get completely - swallowed, leading to confusing downstream errors which have - nothing to do with this failure. */ - try{ f(self, self.sqlite3) } - catch(e){ - console.error("Error in postInit() function:",e); - throw e; - } - } - : (f)=>f(self, self.sqlite3) -); -delete self.sqlite3.postInit; -if(self.location && +self.location.port > 1024){ - console.warn("Installing sqlite3 bits as global S for dev-testing purposes."); - self.S = self.sqlite3; -} -/* Clean up temporary global-scope references to our APIs... */ -self.sqlite3.config.Module.sqlite3 = self.sqlite3 -/* ^^^^ Currently needed by test code and Worker API setup */; -delete self.sqlite3.capi.util /* arguable, but these are (currently) internal-use APIs */; -delete self.sqlite3 /* clean up our global-scope reference */; -//console.warn("Module.sqlite3 =",Module.sqlite3); +if('undefined' !== typeof Module){ // presumably an Emscripten build + /** + Install a suitable default configuration for sqlite3ApiBootstrap(). + */ + const SABC = Object.assign( + Object.create(null), { + Module: Module /* ==> Currently needs to be exposed here for + test code. NOT part of the public API. */, + exports: Module['asm'], + memory: Module.wasmMemory /* gets set if built with -sIMPORT_MEMORY */ + }, + self.sqlite3ApiConfig || Object.create(null) + ); + + /** + For current (2022-08-22) purposes, automatically call + sqlite3ApiBootstrap(). That decision will be revisited at some + point, as we really want client code to be able to call this to + configure certain parts. Clients may modify + self.sqlite3ApiBootstrap.defaultConfig to tweak the default + configuration used by a no-args call to sqlite3ApiBootstrap(), + but must have first loaded their WASM module in order to be + able to provide the necessary configuration state. + */ + //console.warn("self.sqlite3ApiConfig = ",self.sqlite3ApiConfig); + self.sqlite3ApiConfig = SABC; + let sqlite3; + try{ + sqlite3 = self.sqlite3ApiBootstrap(); + }catch(e){ + console.error("sqlite3ApiBootstrap() error:",e); + throw e; + }finally{ + delete self.sqlite3ApiBootstrap; + delete self.sqlite3ApiConfig; + } + + if(self.location && +self.location.port > 1024){ + console.warn("Installing sqlite3 bits as global S for local dev/test purposes."); + self.S = sqlite3; + } + + /* Clean up temporary references to our APIs... */ + delete sqlite3.util /* arguable, but these are (currently) internal-use APIs */; + Module.sqlite3 = sqlite3 /* Needed for customized sqlite3InitModule() to be able to + pass the sqlite3 object off to the client. */; +}else{ + console.warn("This is not running in an Emscripten module context, so", + "self.sqlite3ApiBootstrap() is _not_ being called due to lack", + "of config info for the WASM environment.", + "It must be called manually."); +} Index: ext/wasm/api/sqlite3-api-glue.js ================================================================== --- ext/wasm/api/sqlite3-api-glue.js +++ ext/wasm/api/sqlite3-api-glue.js @@ -14,166 +14,18 @@ previous steps of the sqlite3-api.js bootstrapping process: sqlite3-api-prologue.js, whwasmutil.js, and jaccwabyt.js. It initializes the main API pieces so that the downstream components (e.g. sqlite3-api-oo1.js) have all that they need. */ -(function(self){ +self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){ 'use strict'; const toss = (...args)=>{throw new Error(args.join(' '))}; - - self.sqlite3 = self.sqlite3ApiBootstrap({ - Module: Module /* ==> Emscripten-style Module object. Currently - needs to be exposed here for test code. NOT part - of the public API. */, - exports: Module['asm'], - memory: Module.wasmMemory /* gets set if built with -sIMPORT_MEMORY */, - bigIntEnabled: !!self.BigInt64Array, - allocExportName: 'malloc', - deallocExportName: 'free' - }); - delete self.sqlite3ApiBootstrap; - - const sqlite3 = self.sqlite3; - const capi = sqlite3.capi, wasm = capi.wasm, util = capi.util; - self.WhWasmUtilInstaller(capi.wasm); + const toss3 = sqlite3.SQLite3Error.toss; + const capi = sqlite3.capi, wasm = sqlite3.wasm, util = sqlite3.util; + self.WhWasmUtilInstaller(wasm); delete self.WhWasmUtilInstaller; - if(0){ - /* "The problem" is that the following isn't type-safe. - OTOH, nothing about WASM pointers is. */ - /** - Add the `.pointer` xWrap() signature entry to extend - the `pointer` arg handler to check for a `pointer` - property. This can be used to permit, e.g., passing - an SQLite3.DB instance to a C-style sqlite3_xxx function - which takes an `sqlite3*` argument. - */ - const oldP = wasm.xWrap.argAdapter('pointer'); - const adapter = function(v){ - if(v && 'object'===typeof v && v.constructor){ - const x = v.pointer; - if(Number.isInteger(x)) return x; - else toss("Invalid (object) type for pointer-type argument."); - } - return oldP(v); - }; - wasm.xWrap.argAdapter('.pointer', adapter); - } - - // WhWasmUtil.xWrap() bindings... - { - /** - Add some descriptive xWrap() aliases for '*' intended to - (A) initially improve readability/correctness of capi.signatures - and (B) eventually perhaps provide some sort of type-safety - in their conversions. - */ - const aPtr = wasm.xWrap.argAdapter('*'); - wasm.xWrap.argAdapter('sqlite3*', aPtr)('sqlite3_stmt*', aPtr); - - /** - Populate api object with sqlite3_...() by binding the "raw" wasm - exports into type-converting proxies using wasm.xWrap(). - */ - for(const e of wasm.bindingSignatures){ - capi[e[0]] = wasm.xWrap.apply(null, e); - } - - /* For functions which cannot work properly unless - wasm.bigIntEnabled is true, install a bogus impl which - throws if called when bigIntEnabled is false. */ - const fI64Disabled = function(fname){ - return ()=>toss(fname+"() disabled due to lack", - "of BigInt support in this build."); - }; - for(const e of wasm.bindingSignatures.int64){ - capi[e[0]] = wasm.bigIntEnabled - ? wasm.xWrap.apply(null, e) - : fI64Disabled(e[0]); - } - - if(wasm.exports.sqlite3_wasm_db_error){ - util.sqlite3_wasm_db_error = capi.wasm.xWrap( - 'sqlite3_wasm_db_error', 'int', 'sqlite3*', 'int', 'string' - ); - }else{ - util.sqlite3_wasm_db_error = function(pDb,errCode,msg){ - console.warn("sqlite3_wasm_db_error() is not exported.",arguments); - return errCode; - }; - } - - /** - When registering a VFS and its related components it may be - necessary to ensure that JS keeps a reference to them to keep - them from getting garbage collected. Simply pass each such value - to this function and a reference will be held to it for the life - of the app. - */ - capi.sqlite3_vfs_register.addReference = function f(...args){ - if(!f._) f._ = []; - f._.push(...args); - }; - - }/*xWrap() bindings*/; - - /** - Scope-local holder of the two impls of sqlite3_prepare_v2/v3(). - */ - const __prepare = Object.create(null); - /** - This binding expects a JS string as its 2nd argument and - null as its final argument. In order to compile multiple - statements from a single string, the "full" impl (see - below) must be used. - */ - __prepare.basic = wasm.xWrap('sqlite3_prepare_v3', - "int", ["sqlite3*", "string", - "int"/*MUST always be negative*/, - "int", "**", - "**"/*MUST be 0 or null or undefined!*/]); - /** - Impl which requires that the 2nd argument be a pointer - to the SQL string, instead of being converted to a - string. This variant is necessary for cases where we - require a non-NULL value for the final argument - (exec()'ing multiple statements from one input - string). For simpler cases, where only the first - statement in the SQL string is required, the wrapper - named sqlite3_prepare_v2() is sufficient and easier to - use because it doesn't require dealing with pointers. - */ - __prepare.full = wasm.xWrap('sqlite3_prepare_v3', - "int", ["sqlite3*", "*", "int", "int", - "**", "**"]); - - /* Documented in the api object's initializer. */ - capi.sqlite3_prepare_v3 = function f(pDb, sql, sqlLen, prepFlags, ppStmt, pzTail){ - /* 2022-07-08: xWrap() 'string' arg handling may be able do this - special-case handling for us. It needs to be tested. Or maybe - not: we always want to treat pzTail as null when passed a - non-pointer SQL string and the argument adapters don't have - enough state to know that. Maybe they could/should, by passing - the currently-collected args as an array as the 2nd arg to the - argument adapters? Or maybe we collect all args in an array, - pass that to an optional post-args-collected callback, and give - it a chance to manipulate the args before we pass them on? */ - if(util.isSQLableTypedArray(sql)) sql = util.typedArrayToString(sql); - switch(typeof sql){ - case 'string': return __prepare.basic(pDb, sql, -1, prepFlags, ppStmt, null); - case 'number': return __prepare.full(pDb, sql, sqlLen||-1, prepFlags, ppStmt, pzTail); - default: - return util.sqlite3_wasm_db_error( - pDb, capi.SQLITE_MISUSE, - "Invalid SQL argument type for sqlite3_prepare_v2/v3()." - ); - } - }; - - capi.sqlite3_prepare_v2 = - (pDb, sql, sqlLen, ppStmt, pzTail)=>capi.sqlite3_prepare_v3(pDb, sql, sqlLen, 0, ppStmt, pzTail); - /** Install JS<->C struct bindings for the non-opaque struct types we need... */ sqlite3.StructBinder = self.Jaccwabyt({ heap: 0 ? wasm.memory : wasm.heap8u, @@ -183,29 +35,686 @@ bigIntEnabled: wasm.bigIntEnabled, memberPrefix: '$' }); delete self.Jaccwabyt; + if(0){ + /* "The problem" is that the following isn't even remotely + type-safe. OTOH, nothing about WASM pointers is. */ + const argPointer = wasm.xWrap.argAdapter('*'); + wasm.xWrap.argAdapter('StructType', (v)=>{ + if(v && v.constructor && v instanceof StructBinder.StructType){ + v = v.pointer; + } + return wasm.isPtr(v) + ? argPointer(v) + : toss("Invalid (object) type for StructType-type argument."); + }); + } + + {/* Convert Arrays and certain TypedArrays to strings for + 'flexible-string'-type arguments */ + const xString = wasm.xWrap.argAdapter('string'); + wasm.xWrap.argAdapter( + 'flexible-string', (v)=>xString(util.flexibleString(v)) + ); + } + + if(1){// WhWasmUtil.xWrap() bindings... + /** + Add some descriptive xWrap() aliases for '*' intended to (A) + initially improve readability/correctness of capi.signatures + and (B) eventually perhaps provide automatic conversion from + higher-level representations, e.g. capi.sqlite3_vfs to + `sqlite3_vfs*` via capi.sqlite3_vfs.pointer. + */ + const aPtr = wasm.xWrap.argAdapter('*'); + wasm.xWrap.argAdapter('sqlite3*', aPtr) + ('sqlite3_stmt*', aPtr) + ('sqlite3_context*', aPtr) + ('sqlite3_value*', aPtr) + ('sqlite3_vfs*', aPtr) + ('void*', aPtr); + wasm.xWrap.resultAdapter('sqlite3*', aPtr) + ('sqlite3_context*', aPtr) + ('sqlite3_stmt*', aPtr) + ('sqlite3_vfs*', aPtr) + ('void*', aPtr); + + /** + Populate api object with sqlite3_...() by binding the "raw" wasm + exports into type-converting proxies using wasm.xWrap(). + */ + for(const e of wasm.bindingSignatures){ + capi[e[0]] = wasm.xWrap.apply(null, e); + } + for(const e of wasm.bindingSignatures.wasm){ + wasm[e[0]] = wasm.xWrap.apply(null, e); + } + + /* For C API functions which cannot work properly unless + wasm.bigIntEnabled is true, install a bogus impl which + throws if called when bigIntEnabled is false. */ + const fI64Disabled = function(fname){ + return ()=>toss(fname+"() disabled due to lack", + "of BigInt support in this build."); + }; + for(const e of wasm.bindingSignatures.int64){ + capi[e[0]] = wasm.bigIntEnabled + ? wasm.xWrap.apply(null, e) + : fI64Disabled(e[0]); + } + + /* There's no(?) need to expose bindingSignatures to clients, + implicitly making it part of the public interface. */ + delete wasm.bindingSignatures; + + if(wasm.exports.sqlite3_wasm_db_error){ + util.sqlite3_wasm_db_error = wasm.xWrap( + 'sqlite3_wasm_db_error', 'int', 'sqlite3*', 'int', 'string' + ); + }else{ + util.sqlite3_wasm_db_error = function(pDb,errCode,msg){ + console.warn("sqlite3_wasm_db_error() is not exported.",arguments); + return errCode; + }; + } + + }/*xWrap() bindings*/; + + /** + When registering a VFS and its related components it may be + necessary to ensure that JS keeps a reference to them to keep + them from getting garbage collected. Simply pass each such value + to this function and a reference will be held to it for the life + of the app. + */ + capi.sqlite3_vfs_register.addReference = function f(...args){ + if(!f._) f._ = []; + f._.push(...args); + }; + + /** + Internal helper to assist in validating call argument counts in + the hand-written sqlite3_xyz() wrappers. We do this only for + consistency with non-special-case wrappings. + */ + const __dbArgcMismatch = (pDb,f,n)=>{ + return sqlite3.util.sqlite3_wasm_db_error(pDb, capi.SQLITE_MISUSE, + f+"() requires "+n+" argument"+ + (1===n?"":'s')+"."); + }; + + /** + Helper for flexible-string conversions which require a + byte-length counterpart argument. Passed a value and its + ostensible length, this function returns [V,N], where V + is either v or a transformed copy of v and N is either n, + -1, or the byte length of v (if it's a byte array). + */ + const __flexiString = function(v,n){ + if('string'===typeof v){ + n = -1; + }else if(util.isSQLableTypedArray(v)){ + n = v.byteLength; + v = util.typedArrayToString(v); + }else if(Array.isArray(v)){ + v = v.join(""); + n = -1; + } + return [v, n]; + }; + + if(1){/* Special-case handling of sqlite3_exec() */ + const __exec = wasm.xWrap("sqlite3_exec", "int", + ["sqlite3*", "flexible-string", "*", "*", "**"]); + /* Documented in the api object's initializer. */ + capi.sqlite3_exec = function f(pDb, sql, callback, pVoid, pErrMsg){ + if(f.length!==arguments.length){ + return __dbArgcMismatch(pDb,"sqlite3_exec",f.length); + }else if('function' !== typeof callback){ + return __exec(pDb, sql, callback, pVoid, pErrMsg); + } + /* Wrap the callback in a WASM-bound function and convert the callback's + `(char**)` arguments to arrays of strings... */ + const cbwrap = function(pVoid, nCols, pColVals, pColNames){ + let rc = capi.SQLITE_ERROR; + try { + let aVals = [], aNames = [], i = 0, offset = 0; + for( ; i < nCols; offset += (wasm.ptrSizeof * ++i) ){ + aVals.push( wasm.cstringToJs(wasm.getPtrValue(pColVals + offset)) ); + aNames.push( wasm.cstringToJs(wasm.getPtrValue(pColNames + offset)) ); + } + rc = callback(pVoid, nCols, aVals, aNames) | 0; + /* The first 2 args of the callback are useless for JS but + we want the JS mapping of the C API to be as close to the + C API as possible. */ + }catch(e){ + /* If we set the db error state here, the higher-level exec() call + replaces it with its own, so we have no way of reporting the + exception message except the console. We must not propagate + exceptions through the C API. */ + } + return rc; + }; + let pFunc, rc; + try{ + pFunc = wasm.installFunction("ipipp", cbwrap); + rc = __exec(pDb, sql, pFunc, pVoid, pErrMsg); + }catch(e){ + rc = util.sqlite3_wasm_db_error(pDb, capi.SQLITE_ERROR, + "Error running exec(): "+e.message); + }finally{ + if(pFunc) wasm.uninstallFunction(pFunc); + } + return rc; + }; + }/*sqlite3_exec() proxy*/; + + if(1){/* Special-case handling of sqlite3_create_function_v2() + and sqlite3_create_window_function() */ + const sqlite3CreateFunction = wasm.xWrap( + "sqlite3_create_function_v2", "int", + ["sqlite3*", "string"/*funcName*/, "int"/*nArg*/, + "int"/*eTextRep*/, "*"/*pApp*/, + "*"/*xStep*/,"*"/*xFinal*/, "*"/*xValue*/, "*"/*xDestroy*/] + ); + const sqlite3CreateWindowFunction = wasm.xWrap( + "sqlite3_create_window_function", "int", + ["sqlite3*", "string"/*funcName*/, "int"/*nArg*/, + "int"/*eTextRep*/, "*"/*pApp*/, + "*"/*xStep*/,"*"/*xFinal*/, "*"/*xValue*/, + "*"/*xInverse*/, "*"/*xDestroy*/] + ); + + const __udfSetResult = function(pCtx, val){ + //console.warn("udfSetResult",typeof val, val); + switch(typeof val) { + case 'undefined': + /* Assume that the client already called sqlite3_result_xxx(). */ + break; + case 'boolean': + capi.sqlite3_result_int(pCtx, val ? 1 : 0); + break; + case 'bigint': + if(wasm.bigIntEnabled){ + if(util.bigIntFits64(val)) capi.sqlite3_result_int64(pCtx, val); + else toss3("BigInt value",val.toString(),"is too BigInt for int64."); + }else if(util.bigIntFits32(val)){ + capi.sqlite3_result_int(pCtx, Number(val)); + }else if(util.bigIntFitsDouble(val)){ + capi.sqlite3_result_double(pCtx, Number(val)); + }else{ + toss3("BigInt value",val.toString(),"is too BigInt."); + } + break; + case 'number': { + (util.isInt32(val) + ? capi.sqlite3_result_int + : capi.sqlite3_result_double)(pCtx, val); + break; + } + case 'string': + capi.sqlite3_result_text(pCtx, val, -1, capi.SQLITE_TRANSIENT); + break; + case 'object': + if(null===val/*yes, typeof null === 'object'*/) { + capi.sqlite3_result_null(pCtx); + break; + }else if(util.isBindableTypedArray(val)){ + const pBlob = wasm.allocFromTypedArray(val); + capi.sqlite3_result_blob( + pCtx, pBlob, val.byteLength, + wasm.exports[sqlite3.config.deallocExportName] + ); + break; + } + // else fall through + default: + toss3("Don't not how to handle this UDF result value:",(typeof val), val); + }; + }/*__udfSetResult()*/; + + const __udfConvertArgs = function(argc, pArgv){ + let i, pVal, valType, arg; + const tgt = []; + for(i = 0; i < argc; ++i){ + pVal = wasm.getPtrValue(pArgv + (wasm.ptrSizeof * i)); + /** + Curiously: despite ostensibly requiring 8-byte + alignment, the pArgv array is parcelled into chunks of + 4 bytes (1 pointer each). The values those point to + have 8-byte alignment but the individual argv entries + do not. + */ + valType = capi.sqlite3_value_type(pVal); + switch(valType){ + case capi.SQLITE_INTEGER: + if(wasm.bigIntEnabled){ + arg = capi.sqlite3_value_int64(pVal); + if(util.bigIntFitsDouble(arg)) arg = Number(arg); + } + else arg = capi.sqlite3_value_double(pVal)/*yes, double, for larger integers*/; + break; + case capi.SQLITE_FLOAT: + arg = capi.sqlite3_value_double(pVal); + break; + case capi.SQLITE_TEXT: + arg = capi.sqlite3_value_text(pVal); + break; + case capi.SQLITE_BLOB:{ + const n = capi.sqlite3_value_bytes(pVal); + const pBlob = capi.sqlite3_value_blob(pVal); + if(n && !pBlob) sqlite3.WasmAllocError.toss( + "Cannot allocate memory for blob argument of",n,"byte(s)" + ); + arg = n ? wasm.heap8u().slice(pBlob, pBlob + Number(n)) : null; + break; + } + case capi.SQLITE_NULL: + arg = null; break; + default: + toss3("Unhandled sqlite3_value_type()",valType, + "is possibly indicative of incorrect", + "pointer size assumption."); + } + tgt.push(arg); + } + return tgt; + }/*__udfConvertArgs()*/; + + const __udfSetError = (pCtx, e)=>{ + if(e instanceof sqlite3.WasmAllocError){ + capi.sqlite3_result_error_nomem(pCtx); + }else{ + const msg = ('string'===typeof e) ? e : e.message; + capi.sqlite3_result_error(pCtx, msg, -1); + } + }; + + const __xFunc = function(callback){ + return function(pCtx, argc, pArgv){ + try{ __udfSetResult(pCtx, callback(pCtx, ...__udfConvertArgs(argc, pArgv))) } + catch(e){ + //console.error('xFunc() caught:',e); + __udfSetError(pCtx, e); + } + }; + }; + + const __xInverseAndStep = function(callback){ + return function(pCtx, argc, pArgv){ + try{ callback(pCtx, ...__udfConvertArgs(argc, pArgv)) } + catch(e){ __udfSetError(pCtx, e) } + }; + }; + + const __xFinalAndValue = function(callback){ + return function(pCtx){ + try{ __udfSetResult(pCtx, callback(pCtx)) } + catch(e){ __udfSetError(pCtx, e) } + }; + }; + + const __xDestroy = function(callback){ + return function(pVoid){ + try{ callback(pVoid) } + catch(e){ console.error("UDF xDestroy method threw:",e) } + }; + }; + + const __xMap = Object.assign(Object.create(null), { + xFunc: {sig:'v(pip)', f:__xFunc}, + xStep: {sig:'v(pip)', f:__xInverseAndStep}, + xInverse: {sig:'v(pip)', f:__xInverseAndStep}, + xFinal: {sig:'v(p)', f:__xFinalAndValue}, + xValue: {sig:'v(p)', f:__xFinalAndValue}, + xDestroy: {sig:'v(p)', f:__xDestroy} + }); + + const __xWrapFuncs = function(theFuncs, tgtUninst){ + const rc = [] + let k; + for(k in theFuncs){ + let fArg = theFuncs[k]; + if('function'===typeof fArg){ + const w = __xMap[k]; + fArg = wasm.installFunction(w.sig, w.f(fArg)); + tgtUninst.push(fArg); + } + rc.push(fArg); + } + return rc; + }; + + /* Documented in the api object's initializer. */ + capi.sqlite3_create_function_v2 = function f( + pDb, funcName, nArg, eTextRep, pApp, + xFunc, //void (*xFunc)(sqlite3_context*,int,sqlite3_value**) + xStep, //void (*xStep)(sqlite3_context*,int,sqlite3_value**) + xFinal, //void (*xFinal)(sqlite3_context*) + xDestroy //void (*xDestroy)(void*) + ){ + if(f.length!==arguments.length){ + return __dbArgcMismatch(pDb,"sqlite3_create_function_v2",f.length); + } + /* Wrap the callbacks in a WASM-bound functions... */ + const uninstall = [/*funcs to uninstall on error*/]; + let rc; + try{ + const funcArgs = __xWrapFuncs({xFunc, xStep, xFinal, xDestroy}, + uninstall); + rc = sqlite3CreateFunction(pDb, funcName, nArg, eTextRep, + pApp, ...funcArgs); + }catch(e){ + console.error("sqlite3_create_function_v2() setup threw:",e); + for(let v of uninstall){ + wasm.uninstallFunction(v); + } + rc = util.sqlite3_wasm_db_error(pDb, capi.SQLITE_ERROR, + "Creation of UDF threw: "+e.message); + } + return rc; + }; + + capi.sqlite3_create_function = function f( + pDb, funcName, nArg, eTextRep, pApp, + xFunc, xStep, xFinal + ){ + return (f.length===arguments.length) + ? capi.sqlite3_create_function_v2(pDb, funcName, nArg, eTextRep, + pApp, xFunc, xStep, xFinal, 0) + : __dbArgcMismatch(pDb,"sqlite3_create_function",f.length); + }; + + /* Documented in the api object's initializer. */ + capi.sqlite3_create_window_function = function f( + pDb, funcName, nArg, eTextRep, pApp, + xStep, //void (*xStep)(sqlite3_context*,int,sqlite3_value**) + xFinal, //void (*xFinal)(sqlite3_context*) + xValue, //void (*xFinal)(sqlite3_context*) + xInverse,//void (*xStep)(sqlite3_context*,int,sqlite3_value**) + xDestroy //void (*xDestroy)(void*) + ){ + if(f.length!==arguments.length){ + return __dbArgcMismatch(pDb,"sqlite3_create_window_function",f.length); + } + /* Wrap the callbacks in a WASM-bound functions... */ + const uninstall = [/*funcs to uninstall on error*/]; + let rc; + try{ + const funcArgs = __xWrapFuncs({xStep, xFinal, xValue, xInverse, xDestroy}, + uninstall); + rc = sqlite3CreateWindowFunction(pDb, funcName, nArg, eTextRep, + pApp, ...funcArgs); + }catch(e){ + console.error("sqlite3_create_window_function() setup threw:",e); + for(let v of uninstall){ + wasm.uninstallFunction(v); + } + rc = util.sqlite3_wasm_db_error(pDb, capi.SQLITE_ERROR, + "Creation of UDF threw: "+e.message); + } + return rc; + }; + /** + A helper for UDFs implemented in JS and bound to WASM by the + client. Given a JS value, udfSetResult(pCtx,X) calls one of the + sqlite3_result_xyz(pCtx,...) routines, depending on X's data + type: + + - `null`: sqlite3_result_null() + - `boolean`: sqlite3_result_int() + - `number`: sqlite3_result_int() or sqlite3_result_double() + - `string`: sqlite3_result_text() + - Uint8Array or Int8Array: sqlite3_result_blob() + - `undefined`: indicates that the UDF called one of the + `sqlite3_result_xyz()` routines on its own, making this + function a no-op. Results are _undefined_ if this function is + passed the `undefined` value but did _not_ call one of the + `sqlite3_result_xyz()` routines. + + Anything else triggers sqlite3_result_error(). + */ + capi.sqlite3_create_function_v2.udfSetResult = + capi.sqlite3_create_function.udfSetResult = + capi.sqlite3_create_window_function.udfSetResult = __udfSetResult; + + /** + A helper for UDFs implemented in JS and bound to WASM by the + client. When passed the + (argc,argv) values from the UDF-related functions which receive + them (xFunc, xStep, xInverse), it creates a JS array + representing those arguments, converting each to JS in a manner + appropriate to its data type: numeric, text, blob + (Uint8Array), or null. + + Results are undefined if it's passed anything other than those + two arguments from those specific contexts. + + Thus an argc of 4 will result in a length-4 array containing + the converted values from the corresponding argv. + + The conversion will throw only on allocation error or an internal + error. + */ + capi.sqlite3_create_function_v2.udfConvertArgs = + capi.sqlite3_create_function.udfConvertArgs = + capi.sqlite3_create_window_function.udfConvertArgs = __udfConvertArgs; + + /** + A helper for UDFs implemented in JS and bound to WASM by the + client. It expects to be a passed `(sqlite3_context*, Error)` + (an exception object or message string). And it sets the + current UDF's result to sqlite3_result_error_nomem() or + sqlite3_result_error(), depending on whether the 2nd argument + is a sqlite3.WasmAllocError object or not. + */ + capi.sqlite3_create_function_v2.udfSetError = + capi.sqlite3_create_function.udfSetError = + capi.sqlite3_create_window_function.udfSetError = __udfSetError; + + }/*sqlite3_create_function_v2() and sqlite3_create_window_function() proxies*/; + + if(1){/* Special-case handling of sqlite3_prepare_v2() and + sqlite3_prepare_v3() */ + /** + Scope-local holder of the two impls of sqlite3_prepare_v2/v3(). + */ + const __prepare = Object.create(null); + /** + This binding expects a JS string as its 2nd argument and + null as its final argument. In order to compile multiple + statements from a single string, the "full" impl (see + below) must be used. + */ + __prepare.basic = wasm.xWrap('sqlite3_prepare_v3', + "int", ["sqlite3*", "string", + "int"/*ignored for this impl!*/, + "int", "**", + "**"/*MUST be 0 or null or undefined!*/]); + /** + Impl which requires that the 2nd argument be a pointer + to the SQL string, instead of being converted to a + string. This variant is necessary for cases where we + require a non-NULL value for the final argument + (exec()'ing multiple statements from one input + string). For simpler cases, where only the first + statement in the SQL string is required, the wrapper + named sqlite3_prepare_v2() is sufficient and easier to + use because it doesn't require dealing with pointers. + */ + __prepare.full = wasm.xWrap('sqlite3_prepare_v3', + "int", ["sqlite3*", "*", "int", "int", + "**", "**"]); + + /* Documented in the api object's initializer. */ + capi.sqlite3_prepare_v3 = function f(pDb, sql, sqlLen, prepFlags, ppStmt, pzTail){ + if(f.length!==arguments.length){ + return __dbArgcMismatch(pDb,"sqlite3_prepare_v3",f.length); + } + const [xSql, xSqlLen] = __flexiString(sql, sqlLen); + switch(typeof xSql){ + case 'string': return __prepare.basic(pDb, xSql, xSqlLen, prepFlags, ppStmt, null); + case 'number': return __prepare.full(pDb, xSql, xSqlLen, prepFlags, ppStmt, pzTail); + default: + return util.sqlite3_wasm_db_error( + pDb, capi.SQLITE_MISUSE, + "Invalid SQL argument type for sqlite3_prepare_v2/v3()." + ); + } + }; + + /* Documented in the api object's initializer. */ + capi.sqlite3_prepare_v2 = function f(pDb, sql, sqlLen, ppStmt, pzTail){ + return (f.length===arguments.length) + ? capi.sqlite3_prepare_v3(pDb, sql, sqlLen, 0, ppStmt, pzTail) + : __dbArgcMismatch(pDb,"sqlite3_prepare_v2",f.length); + }; + }/*sqlite3_prepare_v2/v3()*/; + {/* Import C-level constants and structs... */ const cJson = wasm.xCall('sqlite3_wasm_enum_json'); if(!cJson){ toss("Maintenance required: increase sqlite3_wasm_enum_json()'s", "static buffer size!"); } wasm.ctype = JSON.parse(wasm.cstringToJs(cJson)); //console.debug('wasm.ctype length =',wasm.cstrlen(cJson)); for(const t of ['access', 'blobFinalizers', 'dataTypes', - 'encodings', 'flock', 'ioCap', + 'encodings', 'fcntl', 'flock', 'ioCap', 'openFlags', 'prepareFlags', 'resultCodes', - 'syncFlags', 'udfFlags', 'version' + 'serialize', 'syncFlags', 'trace', 'udfFlags', + 'version' ]){ - for(const [k,v] of Object.entries(wasm.ctype[t])){ - capi[k] = v; + for(const e of Object.entries(wasm.ctype[t])){ + // ^^^ [k,v] there triggers a buggy code transormation via one + // of the Emscripten-driven optimizers. + capi[e[0]] = e[1]; + } + } + const __rcMap = Object.create(null); + for(const t of ['resultCodes']){ + for(const e of Object.entries(wasm.ctype[t])){ + __rcMap[e[1]] = e[0]; } } + /** + For the given integer, returns the SQLITE_xxx result code as a + string, or undefined if no such mapping is found. + */ + capi.sqlite3_js_rc_str = (rc)=>__rcMap[rc]; /* Bind all registered C-side structs... */ + const notThese = Object.assign(Object.create(null),{ + // Structs NOT to register + WasmTestStruct: true + }); + if(!util.isUIThread()){ + /* We remove the kvvfs VFS from Worker threads below. */ + notThese.sqlite3_kvvfs_methods = true; + } for(const s of wasm.ctype.structs){ - capi[s.name] = sqlite3.StructBinder(s); + if(!notThese[s.name]){ + capi[s.name] = sqlite3.StructBinder(s); + } + } + }/*end C constant imports*/ + + const pKvvfs = capi.sqlite3_vfs_find("kvvfs"); + if( pKvvfs ){/* kvvfs-specific glue */ + if(util.isUIThread()){ + const kvvfsMethods = new capi.sqlite3_kvvfs_methods( + wasm.exports.sqlite3_wasm_kvvfs_methods() + ); + delete capi.sqlite3_kvvfs_methods; + + const kvvfsMakeKey = wasm.exports.sqlite3_wasm_kvvfsMakeKeyOnPstack, + pstack = wasm.pstack, + pAllocRaw = wasm.exports.sqlite3_wasm_pstack_alloc; + + const kvvfsStorage = (zClass)=> + ((115/*=='s'*/===wasm.getMemValue(zClass)) + ? sessionStorage : localStorage); + + /** + Implementations for members of the object referred to by + sqlite3_wasm_kvvfs_methods(). We swap out the native + implementations with these, which use localStorage or + sessionStorage for their backing store. + */ + const kvvfsImpls = { + xRead: (zClass, zKey, zBuf, nBuf)=>{ + const stack = pstack.pointer, + astack = wasm.scopedAllocPush(); + try { + const zXKey = kvvfsMakeKey(zClass,zKey); + if(!zXKey) return -3/*OOM*/; + const jKey = wasm.cstringToJs(zXKey); + const jV = kvvfsStorage(zClass).getItem(jKey); + if(!jV) return -1; + const nV = jV.length /* Note that we are relying 100% on v being + ASCII so that jV.length is equal to the + C-string's byte length. */; + if(nBuf<=0) return nV; + else if(1===nBuf){ + wasm.setMemValue(zBuf, 0); + return nV; + } + const zV = wasm.scopedAllocCString(jV); + if(nBuf > nV + 1) nBuf = nV + 1; + wasm.heap8u().copyWithin(zBuf, zV, zV + nBuf - 1); + wasm.setMemValue(zBuf + nBuf - 1, 0); + return nBuf - 1; + }catch(e){ + console.error("kvstorageRead()",e); + return -2; + }finally{ + pstack.restore(stack); + wasm.scopedAllocPop(astack); + } + }, + xWrite: (zClass, zKey, zData)=>{ + const stack = pstack.pointer; + try { + const zXKey = kvvfsMakeKey(zClass,zKey); + if(!zXKey) return 1/*OOM*/; + const jKey = wasm.cstringToJs(zXKey); + kvvfsStorage(zClass).setItem(jKey, wasm.cstringToJs(zData)); + return 0; + }catch(e){ + console.error("kvstorageWrite()",e); + return capi.SQLITE_IOERR; + }finally{ + pstack.restore(stack); + } + }, + xDelete: (zClass, zKey)=>{ + const stack = pstack.pointer; + try { + const zXKey = kvvfsMakeKey(zClass,zKey); + if(!zXKey) return 1/*OOM*/; + kvvfsStorage(zClass).removeItem(wasm.cstringToJs(zXKey)); + return 0; + }catch(e){ + console.error("kvstorageDelete()",e); + return capi.SQLITE_IOERR; + }finally{ + pstack.restore(stack); + } + } + }/*kvvfsImpls*/; + for(const k of Object.keys(kvvfsImpls)){ + kvvfsMethods[kvvfsMethods.memberKey(k)] = + wasm.installFunction( + kvvfsMethods.memberSignature(k), + kvvfsImpls[k] + ); + } + }else{ + /* Worker thread: unregister kvvfs to avoid it being used + for anything other than local/sessionStorage. It "can" + be used that way but it's not really intended to be. */ + capi.sqlite3_vfs_unregister(pKvvfs); } - } + }/*pKvvfs*/ -})(self); +}); Index: ext/wasm/api/sqlite3-api-oo1.js ================================================================== --- ext/wasm/api/sqlite3-api-oo1.js +++ ext/wasm/api/sqlite3-api-oo1.js @@ -12,15 +12,15 @@ This file contains the so-called OO #1 API wrapper for the sqlite3 WASM build. It requires that sqlite3-api-glue.js has already run and it installs its deliverable as self.sqlite3.oo1. */ -(function(self){ +self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){ const toss = (...args)=>{throw new Error(args.join(' '))}; + const toss3 = (...args)=>{throw new sqlite3.SQLite3Error(...args)}; - const sqlite3 = self.sqlite3 || toss("Missing main sqlite3 object."); - const capi = sqlite3.capi, util = capi.util; + const capi = sqlite3.capi, wasm = sqlite3.wasm, util = sqlite3.util; /* What follows is colloquially known as "OO API #1". It is a binding of the sqlite3 API which is designed to be run within the same thread (main or worker) as the one in which the sqlite3 WASM binding was initialized. This wrapper cannot use the sqlite3 binding if, e.g., the wrapper is in the main thread @@ -31,44 +31,190 @@ inadvertently, the underlying pointer values of DB and Stmt instances, we'll gate access to them via the `pointer` property accessor and store their real values in this map. Keys = DB/Stmt objects, values = pointer values. This also unifies how those are accessed, for potential use downstream via custom - capi.wasm.xWrap() function signatures which know how to extract + wasm.xWrap() function signatures which know how to extract it. */ const __ptrMap = new WeakMap(); /** - Map of DB instances to objects, each object being a map of UDF - names to wasm function _pointers_ added to that DB handle via - createFunction(). - */ - const __udfMap = new WeakMap(); - /** Map of DB instances to objects, each object being a map of Stmt wasm pointers to Stmt objects. */ const __stmtMap = new WeakMap(); /** If object opts has _its own_ property named p then that property's value is returned, else dflt is returned. */ - const getOwnOption = (opts, p, dflt)=> - opts.hasOwnProperty(p) ? opts[p] : dflt; + const getOwnOption = (opts, p, dflt)=>{ + const d = Object.getOwnPropertyDescriptor(opts,p); + return d ? d.value : dflt; + }; + + // Documented in DB.checkRc() + const checkSqlite3Rc = function(dbPtr, sqliteResultCode){ + if(sqliteResultCode){ + if(dbPtr instanceof DB) dbPtr = dbPtr.pointer; + toss3( + "sqlite result code",sqliteResultCode+":", + (dbPtr + ? capi.sqlite3_errmsg(dbPtr) + : capi.sqlite3_errstr(sqliteResultCode)) + ); + } + }; + + /** + sqlite3_trace_v2() callback which gets installed by the DB ctor + if its open-flags contain "t". + */ + const __dbTraceToConsole = + wasm.installFunction('i(ippp)', function(t,c,p,x){ + if(capi.SQLITE_TRACE_STMT===t){ + // x == SQL, p == sqlite3_stmt* + console.log("SQL TRACE #"+(++this.counter), + wasm.cstringToJs(x)); + } + }.bind({counter: 0})); + + /** + A map of sqlite3_vfs pointers to SQL code to run when the DB + constructor opens a database with the given VFS. + */ + const __vfsPostOpenSql = Object.create(null); + + /** + A proxy for DB class constructors. It must be called with the + being-construct DB object as its "this". See the DB constructor + for the argument docs. This is split into a separate function + in order to enable simple creation of special-case DB constructors, + e.g. JsStorageDb and OpfsDb. + + Expects to be passed a configuration object with the following + properties: + + - `.filename`: the db filename. It may be a special name like ":memory:" + or "". + + - `.flags`: as documented in the DB constructor. + + - `.vfs`: as documented in the DB constructor. + + It also accepts those as the first 3 arguments. + */ + const dbCtorHelper = function ctor(...args){ + if(!ctor._name2vfs){ + /** + Map special filenames which we handle here (instead of in C) + to some helpful metadata... + + As of 2022-09-20, the C API supports the names :localStorage: + and :sessionStorage: for kvvfs. However, C code cannot + determine (without embedded JS code, e.g. via Emscripten's + EM_JS()) whether the kvvfs is legal in the current browser + context (namely the main UI thread). In order to help client + code fail early on, instead of it being delayed until they + try to read or write a kvvfs-backed db, we'll check for those + names here and throw if they're not legal in the current + context. + */ + ctor._name2vfs = Object.create(null); + const isWorkerThread = ('function'===typeof importScripts/*===running in worker thread*/) + ? (n)=>toss3("The VFS for",n,"is only available in the main window thread.") + : false; + ctor._name2vfs[':localStorage:'] = { + vfs: 'kvvfs', filename: isWorkerThread || (()=>'local') + }; + ctor._name2vfs[':sessionStorage:'] = { + vfs: 'kvvfs', filename: isWorkerThread || (()=>'session') + }; + } + const opt = ctor.normalizeArgs(...args); + let fn = opt.filename, vfsName = opt.vfs, flagsStr = opt.flags; + if(('string'!==typeof fn && 'number'!==typeof fn) + || 'string'!==typeof flagsStr + || (vfsName && ('string'!==typeof vfsName && 'number'!==typeof vfsName))){ + console.error("Invalid DB ctor args",opt,arguments); + toss3("Invalid arguments for DB constructor."); + } + let fnJs = ('number'===typeof fn) ? wasm.cstringToJs(fn) : fn; + const vfsCheck = ctor._name2vfs[fnJs]; + if(vfsCheck){ + vfsName = vfsCheck.vfs; + fn = fnJs = vfsCheck.filename(fnJs); + } + let pDb, oflags = 0; + if( flagsStr.indexOf('c')>=0 ){ + oflags |= capi.SQLITE_OPEN_CREATE | capi.SQLITE_OPEN_READWRITE; + } + if( flagsStr.indexOf('w')>=0 ) oflags |= capi.SQLITE_OPEN_READWRITE; + if( 0===oflags ) oflags |= capi.SQLITE_OPEN_READONLY; + oflags |= capi.SQLITE_OPEN_EXRESCODE; + const stack = wasm.pstack.pointer; + try { + const pPtr = wasm.pstack.allocPtr() /* output (sqlite3**) arg */; + let rc = capi.sqlite3_open_v2(fn, pPtr, oflags, vfsName || 0); + pDb = wasm.getPtrValue(pPtr); + checkSqlite3Rc(pDb, rc); + if(flagsStr.indexOf('t')>=0){ + capi.sqlite3_trace_v2(pDb, capi.SQLITE_TRACE_STMT, + __dbTraceToConsole, 0); + } + // Check for per-VFS post-open SQL... + const pVfs = capi.sqlite3_js_db_vfs(pDb); + //console.warn("Opened db",fn,"with vfs",vfsName,pVfs); + if(!pVfs) toss3("Internal error: cannot get VFS for new db handle."); + const postInitSql = __vfsPostOpenSql[pVfs]; + if(postInitSql){ + rc = capi.sqlite3_exec(pDb, postInitSql, 0, 0, 0); + checkSqlite3Rc(pDb, rc); + } + }catch( e ){ + if( pDb ) capi.sqlite3_close_v2(pDb); + throw e; + }finally{ + wasm.pstack.restore(stack); + } + this.filename = fnJs; + __ptrMap.set(this, pDb); + __stmtMap.set(this, Object.create(null)); + }; + + /** + Sets SQL which should be exec()'d on a DB instance after it is + opened with the given VFS pointer. This is intended only for use + by DB subclasses or sqlite3_vfs implementations. + */ + dbCtorHelper.setVfsPostOpenSql = function(pVfs, sql){ + __vfsPostOpenSql[pVfs] = sql; + }; /** - An Error subclass specifically for reporting DB-level errors and - enabling clients to unambiguously identify such exceptions. + A helper for DB constructors. It accepts either a single + config-style object or up to 3 arguments (filename, dbOpenFlags, + dbVfsName). It returns a new object containing: + + { filename: ..., flags: ..., vfs: ... } + + If passed an object, any additional properties it has are copied + as-is into the new object. */ - class SQLite3Error extends Error { - constructor(...args){ - super(...args); - this.name = 'SQLite3Error'; - } - }; - const toss3 = (...args)=>{throw new SQLite3Error(args)}; - sqlite3.SQLite3Error = SQLite3Error; - + dbCtorHelper.normalizeArgs = function(filename=':memory:',flags = 'c',vfs = null){ + const arg = {}; + if(1===arguments.length && 'object'===typeof arguments[0]){ + const x = arguments[0]; + Object.keys(x).forEach((k)=>arg[k] = x[k]); + if(undefined===arg.flags) arg.flags = 'c'; + if(undefined===arg.vfs) arg.vfs = null; + if(undefined===arg.filename) arg.filename = ':memory:'; + }else{ + arg.filename = filename; + arg.flags = flags; + arg.vfs = vfs; + } + return arg; + }; /** The DB class provides a high-level OO wrapper around an sqlite3 db handle. The given db filename must be resolvable using whatever @@ -78,44 +224,66 @@ Note that the special sqlite3 db names ":memory:" and "" (temporary db) have their normal special meanings here and need not resolve to real filenames, but "" uses an on-storage temporary database and requires that the VFS support that. - The db is currently opened with a fixed set of flags: - (SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXRESCODE). This API will change in the future - permit the caller to provide those flags via an additional - argument. + The second argument specifies the open/create mode for the + database. It must be string containing a sequence of letters (in + any order, but case sensitive) specifying the mode: + + - "c": create if it does not exist, else fail if it does not + exist. Implies the "w" flag. + + - "w": write. Implies "r": a db cannot be write-only. + + - "r": read-only if neither "w" nor "c" are provided, else it + is ignored. + + - "t": enable tracing of SQL executed on this database handle, + sending it to `console.log()`. To disable it later, call + `sqlite3.capi.sqlite3_trace_v2(thisDb.pointer, 0, 0, 0)`. + + If "w" is not provided, the db is implicitly read-only, noting + that "rc" is meaningless + + Any other letters are currently ignored. The default is + "c". These modes are ignored for the special ":memory:" and "" + names and _may_ be ignored altogether for certain VFSes. + + The final argument is analogous to the final argument of + sqlite3_open_v2(): the name of an sqlite3 VFS. Pass a falsy value, + or none at all, to use the default. If passed a value, it must + be the string name of a VFS. + + The constructor optionally (and preferably) takes its arguments + in the form of a single configuration object with the following + properties: + + - `filename`: database file name + - `flags`: open-mode flags + - `vfs`: the VFS fname + + The `filename` and `vfs` arguments may be either JS strings or + C-strings allocated via WASM. `flags` is required to be a JS + string (because it's specific to this API, which is specific + to JS). For purposes of passing a DB instance to C-style sqlite3 - functions, its read-only `pointer` property holds its `sqlite3*` - pointer value. That property can also be used to check whether - this DB instance is still open. + functions, the DB object's read-only `pointer` property holds its + `sqlite3*` pointer value. That property can also be used to check + whether this DB instance is still open. + + In the main window thread, the filenames `":localStorage:"` and + `":sessionStorage:"` are special: they cause the db to use either + localStorage or sessionStorage for storing the database using + the kvvfs. If one of these names are used, they trump + any vfs name set in the arguments. */ - const DB = function ctor(fn=':memory:'){ - if('string'!==typeof fn){ - toss3("Invalid filename for DB constructor."); - } - const stack = capi.wasm.scopedAllocPush(); - let ptr; - try { - const ppDb = capi.wasm.scopedAllocPtr() /* output (sqlite3**) arg */; - const rc = capi.sqlite3_open_v2(fn, ppDb, capi.SQLITE_OPEN_READWRITE - | capi.SQLITE_OPEN_CREATE - | capi.SQLITE_OPEN_EXRESCODE, null); - ptr = capi.wasm.getMemValue(ppDb, '*'); - ctor.checkRc(ptr, rc); - }catch(e){ - if(ptr) capi.sqlite3_close_v2(ptr); - throw e; - } - finally{capi.wasm.scopedAllocPop(stack);} - this.filename = fn; - __ptrMap.set(this, ptr); - __stmtMap.set(this, Object.create(null)); - __udfMap.set(this, Object.create(null)); + const DB = function(...args){ + dbCtorHelper.apply(this, args); }; + DB.dbCtorHelper = dbCtorHelper; /** Internal-use enum for mapping JS types to DB-bindable types. These do not (and need not) line up with the SQLITE_type values. All values in this enum must be truthy and distinct @@ -127,11 +295,11 @@ string: 3, boolean: 4, blob: 5 }; BindTypes['undefined'] == BindTypes.null; - if(capi.wasm.bigIntEnabled){ + if(wasm.bigIntEnabled){ BindTypes.bigint = BindTypes.number; } /** This class wraps sqlite3_stmt. Calling this constructor @@ -139,10 +307,19 @@ new instances. For purposes of passing a Stmt instance to C-style sqlite3 functions, its read-only `pointer` property holds its `sqlite3_stmt*` pointer value. + + Other non-function properties include: + + - `db`: the DB object which created the statement. + + - `columnCount`: the number of result columns in the query, or 0 for + queries which cannot return results. + + - `parameterCount`: the number of bindable paramters in the query. */ const Stmt = function(){ if(BindTypes!==arguments[2]){ toss3("Do not call the Stmt constructor directly. Use DB.prepare()."); } @@ -161,37 +338,43 @@ /** Throws if ndx is not an integer or if it is out of range for stmt.columnCount, else returns stmt. Reminder: this will also fail after the statement is finalized but the resulting error will be about an out-of-bounds column - index. + index rather than a statement-is-finalized error. */ const affirmColIndex = function(stmt,ndx){ if((ndx !== (ndx|0)) || ndx<0 || ndx>=stmt.columnCount){ toss3("Column index",ndx,"is out of range."); } return stmt; }; /** - Expects to be passed (arguments) from DB.exec() and - DB.execMulti(). Does the argument processing/validation, throws - on error, and returns a new object on success: + Expects to be passed the `arguments` object from DB.exec(). Does + the argument processing/validation, throws on error, and returns + a new object on success: { sql: the SQL, opt: optionsObj, cbArg: function} - cbArg is only set if the opt.callback is set, in which case - it's a function which expects to be passed the current Stmt - and returns the callback argument of the type indicated by - the input arguments. + The opt object is a normalized copy of any passed to this + function. The sql will be converted to a string if it is provided + in one of the supported non-string formats. + + cbArg is only set if the opt.callback or opt.resultRows are set, + in which case it's a function which expects to be passed the + current Stmt and returns the callback argument of the type + indicated by the input arguments. */ - const parseExecArgs = function(args){ + const parseExecArgs = function(db, args){ const out = Object.create(null); out.opt = Object.create(null); switch(args.length){ case 1: if('string'===typeof args[0] || util.isSQLableTypedArray(args[0])){ + out.sql = args[0]; + }else if(Array.isArray(args[0])){ out.sql = args[0]; }else if(args[0] && 'object'===typeof args[0]){ out.opt = args[0]; out.sql = out.opt.sql; } @@ -200,84 +383,146 @@ out.sql = args[0]; out.opt = args[1]; break; default: toss3("Invalid argument count for exec()."); }; - if(util.isSQLableTypedArray(out.sql)){ - out.sql = util.typedArrayToString(out.sql); - }else if(Array.isArray(out.sql)){ - out.sql = out.sql.join(''); - }else if('string'!==typeof out.sql){ - toss3("Missing SQL argument."); - } - if(out.opt.callback || out.opt.resultRows){ - switch((undefined===out.opt.rowMode) - ? 'stmt' : out.opt.rowMode) { - case 'object': out.cbArg = (stmt)=>stmt.get({}); break; + out.sql = util.flexibleString(out.sql); + if('string'!==typeof out.sql){ + toss3("Missing SQL argument or unsupported SQL value type."); + } + const opt = out.opt; + switch(opt.returnValue){ + case 'resultRows': + if(!opt.resultRows) opt.resultRows = []; + out.returnVal = ()=>opt.resultRows; + break; + case 'saveSql': + if(!opt.saveSql) opt.saveSql = []; + out.returnVal = ()=>opt.saveSql; + break; + case undefined: + case 'this': + out.returnVal = ()=>db; + break; + default: + toss3("Invalid returnValue value:",opt.returnValue); + } + if(opt.callback || opt.resultRows){ + switch((undefined===opt.rowMode) + ? 'array' : opt.rowMode) { + case 'object': out.cbArg = (stmt)=>stmt.get(Object.create(null)); break; case 'array': out.cbArg = (stmt)=>stmt.get([]); break; case 'stmt': - if(Array.isArray(out.opt.resultRows)){ - toss3("Invalid rowMode for resultRows array: must", + if(Array.isArray(opt.resultRows)){ + toss3("exec(): invalid rowMode for a resultRows array: must", "be one of 'array', 'object',", - "or a result column number."); + "a result column number, or column name reference."); } out.cbArg = (stmt)=>stmt; break; default: - if(util.isInt32(out.opt.rowMode)){ - out.cbArg = (stmt)=>stmt.get(out.opt.rowMode); + if(util.isInt32(opt.rowMode)){ + out.cbArg = (stmt)=>stmt.get(opt.rowMode); break; + }else if('string'===typeof opt.rowMode && opt.rowMode.length>1){ + /* "$X", ":X", and "@X" fetch column named "X" (case-sensitive!) */ + const prefix = opt.rowMode[0]; + if(':'===prefix || '@'===prefix || '$'===prefix){ + out.cbArg = function(stmt){ + const rc = stmt.get(this.obj)[this.colName]; + return (undefined===rc) ? toss3("exec(): unknown result column:",this.colName) : rc; + }.bind({ + obj:Object.create(null), + colName: opt.rowMode.substr(1) + }); + break; + } } - toss3("Invalid rowMode:",out.opt.rowMode); + toss3("Invalid rowMode:",opt.rowMode); } } return out; }; /** - Expects to be given a DB instance or an `sqlite3*` pointer, and an - sqlite3 API result code. If the result code is not falsy, this - function throws an SQLite3Error with an error message from - sqlite3_errmsg(), using dbPtr as the db handle. Note that if it's - passed a non-error code like SQLITE_ROW or SQLITE_DONE, it will - still throw but the error string might be "Not an error." The - various non-0 non-error codes need to be checked for in client - code where they are expected. + Internal impl of the DB.selectArray() and + selectObject() methods. + */ + const __selectFirstRow = (db, sql, bind, getArg)=>{ + let stmt, rc; + try { + stmt = db.prepare(sql).bind(bind); + if(stmt.step()) rc = stmt.get(getArg); + }finally{ + if(stmt) stmt.finalize(); + } + return rc; + }; + + /** + Expects to be given a DB instance or an `sqlite3*` pointer (may + be null) and an sqlite3 API result code. If the result code is + not falsy, this function throws an SQLite3Error with an error + message from sqlite3_errmsg(), using dbPtr as the db handle, or + sqlite3_errstr() if dbPtr is falsy. Note that if it's passed a + non-error code like SQLITE_ROW or SQLITE_DONE, it will still + throw but the error string might be "Not an error." The various + non-0 non-error codes need to be checked for in + client code where they are expected. */ - DB.checkRc = function(dbPtr, sqliteResultCode){ - if(sqliteResultCode){ - if(dbPtr instanceof DB) dbPtr = dbPtr.pointer; - throw new SQLite3Error([ - "sqlite result code",sqliteResultCode+":", - capi.sqlite3_errmsg(dbPtr) || "Unknown db error." - ].join(' ')); - } - }; + DB.checkRc = checkSqlite3Rc; DB.prototype = { + /** Returns true if this db handle is open, else false. */ + isOpen: function(){ + return !!this.pointer; + }, + /** Throws if this given DB has been closed, else returns `this`. */ + affirmOpen: function(){ + return affirmDbOpen(this); + }, /** Finalizes all open statements and closes this database connection. This is a no-op if the db has already been closed. After calling close(), `this.pointer` will resolve to `undefined`, so that can be used to check whether the db instance is still opened. + + If this.onclose.before is a function then it is called before + any close-related cleanup. + + If this.onclose.after is a function then it is called after the + db is closed but before auxiliary state like this.filename is + cleared. + + Both onclose handlers are passed this object. If this db is not + opened, neither of the handlers are called. Any exceptions the + handlers throw are ignored because "destructors must not + throw." + + Note that garbage collection of a db handle, if it happens at + all, will never trigger close(), so onclose handlers are not a + reliable way to implement close-time cleanup or maintenance of + a db. */ close: function(){ if(this.pointer){ + if(this.onclose && (this.onclose.before instanceof Function)){ + try{this.onclose.before(this)} + catch(e){/*ignore*/} + } const pDb = this.pointer; - let s; - const that = this; Object.keys(__stmtMap.get(this)).forEach((k,s)=>{ if(s && s.pointer) s.finalize(); }); - Object.values(__udfMap.get(this)).forEach( - capi.wasm.uninstallFunction.bind(capi.wasm) - ); __ptrMap.delete(this); __stmtMap.delete(this); - __udfMap.delete(this); capi.sqlite3_close_v2(pDb); + if(this.onclose && (this.onclose.after instanceof Function)){ + try{this.onclose.after(this)} + catch(e){/*ignore*/} + } delete this.filename; } }, /** Returns the number of changes, as per sqlite3_changes() @@ -298,236 +543,236 @@ ? capi.sqlite3_changes64(p) : capi.sqlite3_changes(p); } }, /** - Similar to this.filename but will return NULL for - special names like ":memory:". Not of much use until - we have filesystem support. Throws if the DB has - been closed. If passed an argument it then it will return - the filename of the ATTACHEd db with that name, else it assumes - a name of `main`. - */ - fileName: function(dbName){ - return capi.sqlite3_db_filename(affirmDbOpen(this).pointer, dbName||"main"); - }, - /** - Returns true if this db instance has a name which resolves to a - file. If the name is "" or ":memory:", it resolves to false. - Note that it is not aware of the peculiarities of URI-style - names and a URI-style name for a ":memory:" db will fool it. - */ - hasFilename: function(){ - const fn = this.filename; - if(!fn || ':memory'===fn) return false; - return true; + Similar to the this.filename but returns the + sqlite3_db_filename() value for the given database name, + defaulting to "main". The argument may be either a JS string + or a pointer to a WASM-allocated C-string. + */ + dbFilename: function(dbName='main'){ + return capi.sqlite3_db_filename(affirmDbOpen(this).pointer, dbName); }, /** Returns the name of the given 0-based db number, as documented for sqlite3_db_name(). */ dbName: function(dbNumber=0){ return capi.sqlite3_db_name(affirmDbOpen(this).pointer, dbNumber); }, + /** + Returns the name of the sqlite3_vfs used by the given database + of this connection (defaulting to 'main'). The argument may be + either a JS string or a WASM C-string. Returns undefined if the + given db name is invalid. Throws if this object has been + close()d. + */ + dbVfsName: function(dbName=0){ + let rc; + const pVfs = capi.sqlite3_js_db_vfs( + affirmDbOpen(this).pointer, dbName + ); + if(pVfs){ + const v = new capi.sqlite3_vfs(pVfs); + try{ rc = wasm.cstringToJs(v.$zName) } + finally { v.dispose() } + } + return rc; + }, /** Compiles the given SQL and returns a prepared Stmt. This is the only way to create new Stmt objects. Throws on error. - The given SQL must be a string, a Uint8Array holding SQL, or a - WASM pointer to memory holding the NUL-terminated SQL string. - If the SQL contains no statements, an SQLite3Error is thrown. + The given SQL must be a string, a Uint8Array holding SQL, a + WASM pointer to memory holding the NUL-terminated SQL string, + or an array of strings. In the latter case, the array is + concatenated together, with no separators, to form the SQL + string (arrays are often a convenient way to formulate long + statements). If the SQL contains no statements, an + SQLite3Error is thrown. Design note: the C API permits empty SQL, reporting it as a 0 result code and a NULL stmt pointer. Supporting that case here would cause extra work for all clients: any use of the Stmt API on such a statement will necessarily throw, so clients would be required to check `stmt.pointer` after calling `prepare()` in order to determine whether the Stmt instance is empty or not. Long-time practice (with other sqlite3 script bindings) - suggests that the empty-prepare case is sufficiently rare (and - useless) that supporting it here would simply hurt overall - usability. + suggests that the empty-prepare case is sufficiently rare that + supporting it here would simply hurt overall usability. */ prepare: function(sql){ affirmDbOpen(this); - const stack = capi.wasm.scopedAllocPush(); + const stack = wasm.pstack.pointer; let ppStmt, pStmt; try{ - ppStmt = capi.wasm.scopedAllocPtr()/* output (sqlite3_stmt**) arg */; + ppStmt = wasm.pstack.alloc(8)/* output (sqlite3_stmt**) arg */; DB.checkRc(this, capi.sqlite3_prepare_v2(this.pointer, sql, -1, ppStmt, null)); - pStmt = capi.wasm.getMemValue(ppStmt, '*'); + pStmt = wasm.getPtrValue(ppStmt); } - finally {capi.wasm.scopedAllocPop(stack)} + finally { + wasm.pstack.restore(stack); + } if(!pStmt) toss3("Cannot prepare empty SQL."); const stmt = new Stmt(this, pStmt, BindTypes); __stmtMap.get(this)[pStmt] = stmt; return stmt; }, /** - This function works like execMulti(), and takes most of the - same arguments, but is more efficient (performs much less - work) when the input SQL is only a single statement. If - passed a multi-statement SQL, it only processes the first - one. - - This function supports the following additional options not - supported by execMulti(): - - - .multi: if true, this function acts as a proxy for - execMulti() and behaves identically to that function. - - - .columnNames: if this is an array and the query has - result columns, the array is passed to - Stmt.getColumnNames() to append the column names to it - (regardless of whether the query produces any result - rows). If the query has no result columns, this value is - unchanged. - - The following options to execMulti() are _not_ supported by - this method (they are simply ignored): - - - .saveSql - */ - exec: function(/*(sql [,optionsObj]) or (optionsObj)*/){ - affirmDbOpen(this); - const arg = parseExecArgs(arguments); - if(!arg.sql) return this; - else if(arg.opt.multi){ - return this.execMulti(arg, undefined, BindTypes); - } - const opt = arg.opt; - let stmt, rowTarget; - try { - if(Array.isArray(opt.resultRows)){ - rowTarget = opt.resultRows; - } - stmt = this.prepare(arg.sql); - if(stmt.columnCount && Array.isArray(opt.columnNames)){ - stmt.getColumnNames(opt.columnNames); - } - if(opt.bind) stmt.bind(opt.bind); - if(opt.callback || rowTarget){ - while(stmt.step()){ - const row = arg.cbArg(stmt); - if(rowTarget) rowTarget.push(row); - if(opt.callback){ - stmt._isLocked = true; - opt.callback(row, stmt); - stmt._isLocked = false; - } - } - }else{ - stmt.step(); - } - }finally{ - if(stmt){ - delete stmt._isLocked; - stmt.finalize(); - } - } - return this; - }/*exec()*/, - /** Executes one or more SQL statements in the form of a single string. Its arguments must be either (sql,optionsObject) or - (optionsObject). In the latter case, optionsObject.sql - must contain the SQL to execute. Returns this - object. Throws on error. + (optionsObject). In the latter case, optionsObject.sql must + contain the SQL to execute. By default it returns this object + but that can be changed via the `returnValue` option as + described below. Throws on error. If no SQL is provided, or a non-string is provided, an exception is triggered. Empty SQL, on the other hand, is simply a no-op. The optional options object may contain any of the following properties: - - .sql = the SQL to run (unless it's provided as the first - argument). This must be of type string, Uint8Array, or an - array of strings (in which case they're concatenated - together as-is, with no separator between elements, - before evaluation). - - - .bind = a single value valid as an argument for - Stmt.bind(). This is ONLY applied to the FIRST non-empty - statement in the SQL which has any bindable - parameters. (Empty statements are skipped entirely.) - - - .callback = a function which gets called for each row of - the FIRST statement in the SQL which has result - _columns_, but only if that statement has any result - _rows_. The second argument passed to the callback is - always the current Stmt object (so that the caller may - collect column names, or similar). The first argument - passed to the callback defaults to the current Stmt - object but may be changed with ... - - - .rowMode = either a string describing what type of argument - should be passed as the first argument to the callback or an - integer representing a result column index. A `rowMode` of - 'object' causes the results of `stmt.get({})` to be passed to - the `callback` and/or appended to `resultRows`. A value of - 'array' causes the results of `stmt.get([])` to be passed to - passed on. A value of 'stmt' is equivalent to the default, - passing the current Stmt to the callback (noting that it's - always passed as the 2nd argument), but this mode will trigger - an exception if `resultRows` is an array. If `rowMode` is an - integer, only the single value from that result column will be - passed on. Any other value for the option triggers an - exception. - - - .resultRows: if this is an array, it functions similarly to - the `callback` option: each row of the result set (if any) of - the FIRST first statement which has result _columns_ is - appended to the array in the format specified for the `rowMode` - option, with the exception that the only legal values for - `rowMode` in this case are 'array' or 'object', neither of - which is the default. It is legal to use both `resultRows` and - `callback`, but `resultRows` is likely much simpler to use for - small data sets and can be used over a WebWorker-style message - interface. execMulti() throws if `resultRows` is set and - `rowMode` is 'stmt' (which is the default!). - - - saveSql = an optional array. If set, the SQL of each + - `sql` = the SQL to run (unless it's provided as the first + argument). This must be of type string, Uint8Array, or an array + of strings. In the latter case they're concatenated together + as-is, _with no separator_ between elements, before evaluation. + The array form is often simpler for long hand-written queries. + + - `bind` = a single value valid as an argument for + Stmt.bind(). This is _only_ applied to the _first_ non-empty + statement in the SQL which has any bindable parameters. (Empty + statements are skipped entirely.) + + - `saveSql` = an optional array. If set, the SQL of each executed statement is appended to this array before the - statement is executed (but after it is prepared - we - don't have the string until after that). Empty SQL - statements are elided. - - See also the exec() method, which is a close cousin of this - one. - - ACHTUNG #1: The callback MUST NOT modify the Stmt - object. Calling any of the Stmt.get() variants, - Stmt.getColumnName(), or similar, is legal, but calling - step() or finalize() is not. Routines which are illegal - in this context will trigger an exception. - - ACHTUNG #2: The semantics of the `bind` and `callback` - options may well change or those options may be removed - altogether for this function (but retained for exec()). - Generally speaking, neither bind parameters nor a callback - are generically useful when executing multi-statement SQL. + statement is executed (but after it is prepared - we don't have + the string until after that). Empty SQL statements are elided + but can have odd effects in the output. e.g. SQL of: `"select + 1; -- empty\n; select 2"` will result in an array containing + `["select 1;", "--empty \n; select 2"]`. That's simply how + sqlite3 records the SQL for the 2nd statement. + + ================================================================== + The following options apply _only_ to the _first_ statement + which has a non-zero result column count, regardless of whether + the statement actually produces any result rows. + ================================================================== + + - `columnNames`: if this is an array, the column names of the + result set are stored in this array before the callback (if + any) is triggered (regardless of whether the query produces any + result rows). If no statement has result columns, this value is + unchanged. Achtung: an SQL result may have multiple columns + with identical names. + + - `callback` = a function which gets called for each row of + the result set, but only if that statement has any result + _rows_. The callback's "this" is the options object, noting + that this function synthesizes one if the caller does not pass + one to exec(). The second argument passed to the callback is + always the current Stmt object, as it's needed if the caller + wants to fetch the column names or some such (noting that they + could also be fetched via `this.columnNames`, if the client + provides the `columnNames` option). + + ACHTUNG: The callback MUST NOT modify the Stmt object. Calling + any of the Stmt.get() variants, Stmt.getColumnName(), or + similar, is legal, but calling step() or finalize() is + not. Member methods which are illegal in this context will + trigger an exception. + + The first argument passed to the callback defaults to an array of + values from the current result row but may be changed with ... + + - `rowMode` = specifies the type of he callback's first argument. + It may be any of... + + A) A string describing what type of argument should be passed + as the first argument to the callback: + + A.1) `'array'` (the default) causes the results of + `stmt.get([])` to be passed to the `callback` and/or appended + to `resultRows` + + A.2) `'object'` causes the results of + `stmt.get(Object.create(null))` to be passed to the + `callback` and/or appended to `resultRows`. Achtung: an SQL + result may have multiple columns with identical names. In + that case, the right-most column will be the one set in this + object! + + A.3) `'stmt'` causes the current Stmt to be passed to the + callback, but this mode will trigger an exception if + `resultRows` is an array because appending the statement to + the array would be downright unhelpful. + + B) An integer, indicating a zero-based column in the result + row. Only that one single value will be passed on. + + C) A string with a minimum length of 2 and leading character of + ':', '$', or '@' will fetch the row as an object, extract that + one field, and pass that field's value to the callback. Note + that these keys are case-sensitive so must match the case used + in the SQL. e.g. `"select a A from t"` with a `rowMode` of + `'$A'` would work but `'$a'` would not. A reference to a column + not in the result set will trigger an exception on the first + row (as the check is not performed until rows are fetched). + Note also that `$` is a legal identifier character in JS so + need not be quoted. (Design note: those 3 characters were + chosen because they are the characters support for naming bound + parameters.) + + Any other `rowMode` value triggers an exception. + + - `resultRows`: if this is an array, it functions similarly to + the `callback` option: each row of the result set (if any), + with the exception that the `rowMode` 'stmt' is not legal. It + is legal to use both `resultRows` and `callback`, but + `resultRows` is likely much simpler to use for small data sets + and can be used over a WebWorker-style message interface. + exec() throws if `resultRows` is set and `rowMode` is 'stmt'. + + - `returnValue`: is a string specifying what this function + should return: + + A) The default value is `"this"`, meaning that the + DB object itself should be returned. + + B) `"resultRows"` means to return the value of the + `resultRows` option. If `resultRows` is not set, this + function behaves as if it were set to an empty array. + + C) `"saveSql"` means to return the value of the + `saveSql` option. If `saveSql` is not set, this + function behaves as if it were set to an empty array. + + Potential TODOs: + + - `bind`: permit an array of arrays/objects to bind. The first + sub-array would act on the first statement which has bindable + parameters (as it does now). The 2nd would act on the next such + statement, etc. + + - `callback` and `resultRows`: permit an array entries with + semantics similar to those described for `bind` above. + */ - execMulti: function(/*(sql [,obj]) || (obj)*/){ + exec: function(/*(sql [,obj]) || (obj)*/){ affirmDbOpen(this); - const wasm = capi.wasm; - const arg = (BindTypes===arguments[2] - /* ^^^ Being passed on from exec() */ - ? arguments[0] : parseExecArgs(arguments)); - if(!arg.sql) return this; + const arg = parseExecArgs(this, arguments); + if(!arg.sql){ + return toss3("exec() requires an SQL string."); + } const opt = arg.opt; const callback = opt.callback; - const resultRows = (Array.isArray(opt.resultRows) - ? opt.resultRows : undefined); - if(resultRows && 'stmt'===opt.rowMode){ - toss3("rowMode 'stmt' is not valid in combination", - "with a resultRows array."); - } - let rowMode = (((callback||resultRows) && (undefined!==opt.rowMode)) - ? opt.rowMode : undefined); + const resultRows = + Array.isArray(opt.resultRows) ? opt.resultRows : undefined; let stmt; let bind = opt.bind; + let evalFirstResult = !!(arg.cbArg || opt.columnNames) /* true to evaluate the first result-returning query */; const stack = wasm.scopedAllocPush(); try{ const isTA = util.isSQLableTypedArray(arg.sql) /* Optimization: if the SQL is a TypedArray we can save some string conversion costs. */; @@ -542,25 +787,25 @@ let pSql = pzTail + wasm.ptrSizeof; const pSqlEnd = pSql + sqlByteLen; if(isTA) wasm.heap8().set(arg.sql, pSql); else wasm.jstrcpy(arg.sql, wasm.heap8(), pSql, sqlByteLen, false); wasm.setMemValue(pSql + sqlByteLen, 0/*NUL terminator*/); - while(wasm.getMemValue(pSql, 'i8') - /* Maintenance reminder: ^^^^ _must_ be i8 or else we + while(pSql && wasm.getMemValue(pSql, 'i8') + /* Maintenance reminder:^^^ _must_ be 'i8' or else we will very likely cause an endless loop. What that's doing is checking for a terminating NUL byte. If we use i32 or similar then we read 4 bytes, read stuff around the NUL terminator, and get stuck in and endless loop at the end of the SQL, endlessly re-preparing an empty statement. */ ){ - wasm.setMemValue(ppStmt, 0, wasm.ptrIR); - wasm.setMemValue(pzTail, 0, wasm.ptrIR); - DB.checkRc(this, capi.sqlite3_prepare_v2( - this.pointer, pSql, sqlByteLen, ppStmt, pzTail + wasm.setPtrValue(ppStmt, 0); + wasm.setPtrValue(pzTail, 0); + DB.checkRc(this, capi.sqlite3_prepare_v3( + this.pointer, pSql, sqlByteLen, 0, ppStmt, pzTail )); - const pStmt = wasm.getMemValue(ppStmt, wasm.ptrIR); - pSql = wasm.getMemValue(pzTail, wasm.ptrIR); + const pStmt = wasm.getPtrValue(ppStmt); + pSql = wasm.getPtrValue(pzTail); sqlByteLen = pSqlEnd - pSql; if(!pStmt) continue; if(Array.isArray(opt.saveSql)){ opt.saveSql.push(capi.sqlite3_sql(pStmt).trim()); } @@ -567,40 +812,42 @@ stmt = new Stmt(this, pStmt, BindTypes); if(bind && stmt.parameterCount){ stmt.bind(bind); bind = null; } - if(stmt.columnCount && undefined!==rowMode){ + if(evalFirstResult && stmt.columnCount){ /* Only forward SELECT results for the FIRST query in the SQL which potentially has them. */ - while(stmt.step()){ + evalFirstResult = false; + if(Array.isArray(opt.columnNames)){ + stmt.getColumnNames(opt.columnNames); + } + while(!!arg.cbArg && stmt.step()){ stmt._isLocked = true; const row = arg.cbArg(stmt); - if(callback) callback(row, stmt); if(resultRows) resultRows.push(row); + if(callback) callback.call(opt, row, stmt); stmt._isLocked = false; } - rowMode = undefined; }else{ - // Do we need to while(stmt.step()){} here? stmt.step(); } stmt.finalize(); stmt = null; } - }catch(e){ - console.warn("DB.execMulti() is propagating exception",opt,e); + }/*catch(e){ + console.warn("DB.exec() is propagating exception",opt,e); throw e; - }finally{ + }*/finally{ if(stmt){ delete stmt._isLocked; stmt.finalize(); } wasm.scopedAllocPop(stack); } - return this; - }/*execMulti()*/, + return arg.returnVal(); + }/*exec()*/, /** Creates a new scalar UDF (User-Defined Function) which is accessible via SQL code. This function may be called in any of the following forms: @@ -608,182 +855,173 @@ - (name, function, optionsObject) - (name, optionsObject) - (optionsObject) In the final two cases, the function must be defined as the - 'callback' property of the options object. In the final + `callback` property of the options object (optionally called + `xFunc` to align with the C API documentation). In the final case, the function's name must be the 'name' property. - This can only be used to create scalar functions, not - aggregate or window functions. UDFs cannot be removed from - a DB handle after they're added. + The first two call forms can only be used for creating scalar + functions. Creating an aggregate or window function requires + the options-object form (see below for details). + + UDFs cannot currently be removed from a DB handle after they're + added. More correctly, they can be removed as documented for + sqlite3_create_function_v2(), but doing so will "leak" the + JS-created WASM binding of those functions. On success, returns this object. Throws on error. - When called from SQL, arguments to the UDF, and its result, - will be converted between JS and SQL with as much fidelity - as is feasible, triggering an exception if a type - conversion cannot be determined. Some freedom is afforded - to numeric conversions due to friction between the JS and C - worlds: integers which are larger than 32 bits will be - treated as doubles, as JS does not support 64-bit integers - and it is (as of this writing) illegal to use WASM - functions which take or return 64-bit integers from JS. - - The optional options object may contain flags to modify how + When called from SQL arguments to the UDF, and its result, + will be converted between JS and SQL with as much fidelity as + is feasible, triggering an exception if a type conversion + cannot be determined. The docs for sqlite3_create_function_v2() + describe the conversions in more detail. + + The values set in the options object differ for scalar and + aggregate functions: + + - Scalar: set the `xFunc` function-type property to the UDF + function. + + - Aggregate: set the `xStep` and `xFinal` function-type + properties to the "step" and "final" callbacks for the + aggregate. Do not set the `xFunc` property. + + - Window: set the `xStep`, `xFinal`, `xValue`, and `xInverse` + function-type properties. Do not set the `xFunc` property. + + The options object may optionally have an `xDestroy` + function-type property, as per sqlite3_create_function_v2(). + Its argument will be the WASM-pointer-type value of the `pApp` + property, and this function will throw if `pApp` is defined but + is not null, undefined, or a numeric (WASM pointer) + value. i.e. `pApp`, if set, must be value suitable for use as a + WASM pointer argument, noting that `null` or `undefined` will + translate to 0 for that purpose. + + The options object may contain flags to modify how the function is defined: - - .arity: the number of arguments which SQL calls to this - function expect or require. The default value is the - callback's length property (i.e. the number of declared - parameters it has). A value of -1 means that the function - is variadic and may accept any number of arguments, up to - sqlite3's compile-time limits. sqlite3 will enforce the - argument count if is zero or greater. - - The following properties correspond to flags documented at: + - `arity`: the number of arguments which SQL calls to this + function expect or require. The default value is `xFunc.length` + or `xStep.length` (i.e. the number of declared parameters it + has) **MINUS 1** (see below for why). As a special case, if the + `length` is 0, its arity is also 0 instead of -1. A negative + arity value means that the function is variadic and may accept + any number of arguments, up to sqlite3's compile-time + limits. sqlite3 will enforce the argument count if is zero or + greater. The callback always receives a pointer to an + `sqlite3_context` object as its first argument. Any arguments + after that are from SQL code. The leading context argument does + _not_ count towards the function's arity. See the docs for + sqlite3.capi.sqlite3_create_function_v2() for why that argument + is needed in the interface. + + The following options-object properties correspond to flags + documented at: https://sqlite.org/c3ref/create_function.html - - .deterministic = SQLITE_DETERMINISTIC - - .directOnly = SQLITE_DIRECTONLY - - .innocuous = SQLITE_INNOCUOUS - - Maintenance reminder: the ability to add new - WASM-accessible functions to the runtime requires that the - WASM build is compiled with emcc's `-sALLOW_TABLE_GROWTH` - flag. - */ - createFunction: function f(name, callback,opt){ + - `deterministic` = sqlite3.capi.SQLITE_DETERMINISTIC + - `directOnly` = sqlite3.capi.SQLITE_DIRECTONLY + - `innocuous` = sqlite3.capi.SQLITE_INNOCUOUS + + Sidebar: the ability to add new WASM-accessible functions to + the runtime requires that the WASM build is compiled with the + equivalent functionality as that provided by Emscripten's + `-sALLOW_TABLE_GROWTH` flag. + */ + createFunction: function f(name, xFunc, opt){ + const isFunc = (f)=>(f instanceof Function); switch(arguments.length){ case 1: /* (optionsObject) */ opt = name; name = opt.name; - callback = opt.callback; + xFunc = opt.xFunc || 0; break; case 2: /* (name, callback|optionsObject) */ - if(!(callback instanceof Function)){ - opt = callback; - callback = opt.callback; + if(!isFunc(xFunc)){ + opt = xFunc; + xFunc = opt.xFunc || 0; } + break; + case 3: /* name, xFunc, opt */ break; default: break; } if(!opt) opt = {}; - if(!(callback instanceof Function)){ - toss3("Invalid arguments: expecting a callback function."); - }else if('string' !== typeof name){ + if('string' !== typeof name){ toss3("Invalid arguments: missing function name."); } - if(!f._extractArgs){ - /* Static init */ - f._extractArgs = function(argc, pArgv){ - let i, pVal, valType, arg; - const tgt = []; - for(i = 0; i < argc; ++i){ - pVal = capi.wasm.getMemValue(pArgv + (capi.wasm.ptrSizeof * i), - capi.wasm.ptrIR); - /** - Curiously: despite ostensibly requiring 8-byte - alignment, the pArgv array is parcelled into chunks of - 4 bytes (1 pointer each). The values those point to - have 8-byte alignment but the individual argv entries - do not. - */ - valType = capi.sqlite3_value_type(pVal); - switch(valType){ - case capi.SQLITE_INTEGER: - case capi.SQLITE_FLOAT: - arg = capi.sqlite3_value_double(pVal); - break; - case capi.SQLITE_TEXT: - arg = capi.sqlite3_value_text(pVal); - break; - case capi.SQLITE_BLOB:{ - const n = capi.sqlite3_value_bytes(pVal); - const pBlob = capi.sqlite3_value_blob(pVal); - arg = new Uint8Array(n); - let i; - const heap = n ? capi.wasm.heap8() : false; - for(i = 0; i < n; ++i) arg[i] = heap[pBlob+i]; - break; - } - case capi.SQLITE_NULL: - arg = null; break; - default: - toss3("Unhandled sqlite3_value_type()",valType, - "is possibly indicative of incorrect", - "pointer size assumption."); - } - tgt.push(arg); - } - return tgt; - }/*_extractArgs()*/; - f._setResult = function(pCx, val){ - switch(typeof val) { - case 'boolean': - capi.sqlite3_result_int(pCx, val ? 1 : 0); - break; - case 'number': { - (util.isInt32(val) - ? capi.sqlite3_result_int - : capi.sqlite3_result_double)(pCx, val); - break; - } - case 'string': - capi.sqlite3_result_text(pCx, val, -1, capi.SQLITE_TRANSIENT); - break; - case 'object': - if(null===val) { - capi.sqlite3_result_null(pCx); - break; - }else if(util.isBindableTypedArray(val)){ - const pBlob = capi.wasm.mallocFromTypedArray(val); - capi.sqlite3_result_blob(pCx, pBlob, val.byteLength, - capi.SQLITE_TRANSIENT); - capi.wasm.dealloc(pBlob); - break; - } - // else fall through - default: - toss3("Don't not how to handle this UDF result value:",val); - }; - }/*_setResult()*/; - }/*static init*/ - const wrapper = function(pCx, argc, pArgv){ - try{ - f._setResult(pCx, callback.apply(null, f._extractArgs(argc, pArgv))); - }catch(e){ - if(e instanceof capi.WasmAllocError){ - capi.sqlite3_result_error_nomem(pCx); - }else{ - capi.sqlite3_result_error(pCx, e.message, -1); - } - } - }; - const pUdf = capi.wasm.installFunction(wrapper, "v(iii)"); + let xStep = opt.xStep || 0; + let xFinal = opt.xFinal || 0; + const xValue = opt.xValue || 0; + const xInverse = opt.xInverse || 0; + let isWindow = undefined; + if(isFunc(xFunc)){ + isWindow = false; + if(isFunc(xStep) || isFunc(xFinal)){ + toss3("Ambiguous arguments: scalar or aggregate?"); + } + xStep = xFinal = null; + }else if(isFunc(xStep)){ + if(!isFunc(xFinal)){ + toss3("Missing xFinal() callback for aggregate or window UDF."); + } + xFunc = null; + }else if(isFunc(xFinal)){ + toss3("Missing xStep() callback for aggregate or window UDF."); + }else{ + toss3("Missing function-type properties."); + } + if(false === isWindow){ + if(isFunc(xValue) || isFunc(xInverse)){ + toss3("xValue and xInverse are not permitted for non-window UDFs."); + } + }else if(isFunc(xValue)){ + if(!isFunc(xInverse)){ + toss3("xInverse must be provided if xValue is."); + } + isWindow = true; + }else if(isFunc(xInverse)){ + toss3("xValue must be provided if xInverse is."); + } + const pApp = opt.pApp; + if(undefined!==pApp && + null!==pApp && + (('number'!==typeof pApp) || !util.isInt32(pApp))){ + toss3("Invalid value for pApp property. Must be a legal WASM pointer value."); + } + const xDestroy = opt.xDestroy || 0; + if(xDestroy && !isFunc(xDestroy)){ + toss3("xDestroy property must be a function."); + } let fFlags = 0 /*flags for sqlite3_create_function_v2()*/; if(getOwnOption(opt, 'deterministic')) fFlags |= capi.SQLITE_DETERMINISTIC; if(getOwnOption(opt, 'directOnly')) fFlags |= capi.SQLITE_DIRECTONLY; if(getOwnOption(opt, 'innocuous')) fFlags |= capi.SQLITE_INNOCUOUS; name = name.toLowerCase(); - try { - DB.checkRc(this, capi.sqlite3_create_function_v2( - this.pointer, name, - (opt.hasOwnProperty('arity') ? +opt.arity : callback.length), - capi.SQLITE_UTF8 | fFlags, null/*pApp*/, pUdf, - null/*xStep*/, null/*xFinal*/, null/*xDestroy*/)); - }catch(e){ - capi.wasm.uninstallFunction(pUdf); - throw e; - } - const udfMap = __udfMap.get(this); - if(udfMap[name]){ - try{capi.wasm.uninstallFunction(udfMap[name])} - catch(e){/*ignore*/} - } - udfMap[name] = pUdf; + const xArity = xFunc || xStep; + const arity = getOwnOption(opt, 'arity'); + const arityArg = ('number'===typeof arity + ? arity + : (xArity.length ? xArity.length-1/*for pCtx arg*/ : 0)); + let rc; + if( isWindow ){ + rc = capi.sqlite3_create_window_function( + this.pointer, name, arityArg, + capi.SQLITE_UTF8 | fFlags, pApp || 0, + xStep, xFinal, xValue, xInverse, xDestroy); + }else{ + rc = capi.sqlite3_create_function_v2( + this.pointer, name, arityArg, + capi.SQLITE_UTF8 | fFlags, pApp || 0, + xFunc, xStep, xFinal, xDestroy); + } + DB.checkRc(this, rc); return this; }/*createFunction()*/, /** Prepares the given SQL, step()s it one time, and returns the value of the first result column. If it has no results, @@ -796,11 +1034,11 @@ If passed a 3rd argument, it is expected to be one of the SQLITE_{typename} constants. Passing the undefined value is the same as not passing a value. - Throws on error (e.g. malformedSQL). + Throws on error (e.g. malformed SQL). */ selectValue: function(sql,bind,asType){ let stmt, rc; try { stmt = this.prepare(sql).bind(bind); @@ -808,10 +1046,42 @@ }finally{ if(stmt) stmt.finalize(); } return rc; }, + /** + Prepares the given SQL, step()s it one time, and returns an + array containing the values of the first result row. If it has + no results, `undefined` is returned. + + If passed a second argument other than `undefined`, it is + treated like an argument to Stmt.bind(), so may be any type + supported by that function. + + Throws on error (e.g. malformed SQL). + */ + selectArray: function(sql,bind){ + return __selectFirstRow(this, sql, bind, []); + }, + + /** + Prepares the given SQL, step()s it one time, and returns an + object containing the key/value pairs of the first result + row. If it has no results, `undefined` is returned. + + Note that the order of returned object's keys is not guaranteed + to be the same as the order of the fields in the query string. + + If passed a second argument other than `undefined`, it is + treated like an argument to Stmt.bind(), so may be any type + supported by that function. + + Throws on error (e.g. malformed SQL). + */ + selectObject: function(sql,bind){ + return __selectFirstRow(this, sql, bind, {}); + }, /** Returns the number of currently-opened Stmt handles for this db handle, or 0 if this DB instance is closed. */ @@ -818,52 +1088,50 @@ openStatementCount: function(){ return this.pointer ? Object.keys(__stmtMap.get(this)).length : 0; }, /** - This function currently does nothing and always throws. It - WILL BE REMOVED pending other refactoring, to eliminate a hard - dependency on Emscripten. This feature will be moved into a - higher-level API or a runtime-configurable feature. - - That said, what its replacement should eventually do is... - - Exports a copy of this db's file as a Uint8Array and - returns it. It is technically not legal to call this while - any prepared statement are currently active because, - depending on the platform, it might not be legal to read - the db while a statement is locking it. Throws if this db - is not open or has any opened statements. - - The resulting buffer can be passed to this class's - constructor to restore the DB. - - Maintenance reminder: the corresponding sql.js impl of this - feature closes the current db, finalizing any active - statements and (seemingly unnecessarily) destroys any UDFs, - copies the file, and then re-opens it (without restoring - the UDFs). Those gymnastics are not necessary on the tested - platform but might be necessary on others. Because of that - eventuality, this interface currently enforces that no - statements are active when this is run. It will throw if - any are. + Starts a transaction, calls the given callback, and then either + rolls back or commits the savepoint, depending on whether the + callback throws. The callback is passed this db object as its + only argument. On success, returns the result of the + callback. Throws on error. + + Note that transactions may not be nested, so this will throw if + it is called recursively. For nested transactions, use the + savepoint() method or manually manage SAVEPOINTs using exec(). + */ + transaction: function(callback){ + affirmDbOpen(this).exec("BEGIN"); + try { + const rc = callback(this); + this.exec("COMMIT"); + return rc; + }catch(e){ + this.exec("ROLLBACK"); + throw e; + } + }, + + /** + This works similarly to transaction() but uses sqlite3's SAVEPOINT + feature. This function starts a savepoint (with an unspecified name) + and calls the given callback function, passing it this db object. + If the callback returns, the savepoint is released (committed). If + the callback throws, the savepoint is rolled back. If it does not + throw, it returns the result of the callback. */ - exportBinaryImage: function(){ - toss3("exportBinaryImage() is slated for removal for portability reasons."); - /*********************** - The following is currently kept only for reference when - porting to some other layer, noting that we may well not be - able to implement this, at this level, when using the OPFS - VFS because of its exclusive locking policy. - - affirmDbOpen(this); - if(this.openStatementCount()>0){ - toss3("Cannot export with prepared statements active!", - "finalize() all statements and try again."); - } - return MODCFG.FS.readFile(this.filename, {encoding:"binary"}); - ***********************/ + savepoint: function(callback){ + affirmDbOpen(this).exec("SAVEPOINT oo1"); + try { + const rc = callback(this); + this.exec("RELEASE oo1"); + return rc; + }catch(e){ + this.exec("ROLLBACK to SAVEPOINT oo1; RELEASE SAVEPOINT oo1"); + throw e; + } } }/*DB.prototype*/; /** Throws if the given Stmt has been finalized, else stmt is @@ -884,11 +1152,11 @@ case BindTypes.null: case BindTypes.number: case BindTypes.string: return t; case BindTypes.bigint: - if(capi.wasm.bigIntEnabled) return t; + if(wasm.bigIntEnabled) return t; /* else fall through */ default: //console.log("isSupportedBindType",t,v); return util.isBindableTypedArray(v) ? BindTypes.blob : undefined; } @@ -944,45 +1212,44 @@ success. */ const bindOne = function f(stmt,ndx,bindType,val){ affirmUnlocked(stmt, 'bind()'); if(!f._){ - if(capi.wasm.bigIntEnabled){ - f._maxInt = BigInt("0x7fffffffffffffff"); - f._minInt = ~f._maxInt; - } + f._tooBigInt = (v)=>toss3( + "BigInt value is too big to store without precision loss:", v + ); /* Reminder: when not in BigInt mode, it's impossible for JS to represent a number out of the range we can bind, so we have no range checking. */ f._ = { string: function(stmt, ndx, val, asBlob){ if(1){ /* _Hypothetically_ more efficient than the impl in the 'else' block. */ - const stack = capi.wasm.scopedAllocPush(); + const stack = wasm.scopedAllocPush(); try{ - const n = capi.wasm.jstrlen(val); - const pStr = capi.wasm.scopedAlloc(n); - capi.wasm.jstrcpy(val, capi.wasm.heap8u(), pStr, n, false); + const n = wasm.jstrlen(val); + const pStr = wasm.scopedAlloc(n); + wasm.jstrcpy(val, wasm.heap8u(), pStr, n, false); const f = asBlob ? capi.sqlite3_bind_blob : capi.sqlite3_bind_text; return f(stmt.pointer, ndx, pStr, n, capi.SQLITE_TRANSIENT); }finally{ - capi.wasm.scopedAllocPop(stack); + wasm.scopedAllocPop(stack); } }else{ - const bytes = capi.wasm.jstrToUintArray(val,false); - const pStr = capi.wasm.alloc(bytes.length || 1); - capi.wasm.heap8u().set(bytes.length ? bytes : [0], pStr); + const bytes = wasm.jstrToUintArray(val,false); + const pStr = wasm.alloc(bytes.length || 1); + wasm.heap8u().set(bytes.length ? bytes : [0], pStr); try{ const f = asBlob ? capi.sqlite3_bind_blob : capi.sqlite3_bind_text; return f(stmt.pointer, ndx, pStr, bytes.length, capi.SQLITE_TRANSIENT); }finally{ - capi.wasm.dealloc(pStr); + wasm.dealloc(pStr); } } } }; - } + }/* static init */ affirmSupportedBindType(val); ndx = affirmParamIndex(stmt,ndx); let rc = 0; switch((null===val || undefined===val) ? BindTypes.null : bindType){ case BindTypes.null: @@ -992,19 +1259,28 @@ rc = f._.string(stmt, ndx, val, false); break; case BindTypes.number: { let m; if(util.isInt32(val)) m = capi.sqlite3_bind_int; - else if(capi.wasm.bigIntEnabled && ('bigint'===typeof val)){ - if(valf._maxInt){ - toss3("BigInt value is out of range for int64: "+val); - } - m = capi.sqlite3_bind_int64; - }else if(Number.isInteger(val)){ - m = capi.sqlite3_bind_int64; - }else{ - m = capi.sqlite3_bind_double; + else if('bigint'===typeof val){ + if(!util.bigIntFits64(val)){ + f._tooBigInt(val); + }else if(wasm.bigIntEnabled){ + m = capi.sqlite3_bind_int64; + }else if(util.bigIntFitsDouble(val)){ + val = Number(val); + m = capi.sqlite3_bind_double; + }else{ + f._tooBigInt(val); + } + }else{ // !int32, !bigint + val = Number(val); + if(wasm.bigIntEnabled && Number.isInteger(val)){ + m = capi.sqlite3_bind_int64; + }else{ + m = capi.sqlite3_bind_double; + } } rc = m(stmt.pointer, ndx, val); break; } case BindTypes.boolean: @@ -1016,35 +1292,35 @@ }else if(!util.isBindableTypedArray(val)){ toss3("Binding a value as a blob requires", "that it be a string, Uint8Array, or Int8Array."); }else if(1){ /* _Hypothetically_ more efficient than the impl in the 'else' block. */ - const stack = capi.wasm.scopedAllocPush(); + const stack = wasm.scopedAllocPush(); try{ - const pBlob = capi.wasm.scopedAlloc(val.byteLength || 1); - capi.wasm.heap8().set(val.byteLength ? val : [0], pBlob) + const pBlob = wasm.scopedAlloc(val.byteLength || 1); + wasm.heap8().set(val.byteLength ? val : [0], pBlob) rc = capi.sqlite3_bind_blob(stmt.pointer, ndx, pBlob, val.byteLength, capi.SQLITE_TRANSIENT); }finally{ - capi.wasm.scopedAllocPop(stack); + wasm.scopedAllocPop(stack); } }else{ - const pBlob = capi.wasm.mallocFromTypedArray(val); + const pBlob = wasm.allocFromTypedArray(val); try{ rc = capi.sqlite3_bind_blob(stmt.pointer, ndx, pBlob, val.byteLength, capi.SQLITE_TRANSIENT); }finally{ - capi.wasm.dealloc(pBlob); + wasm.dealloc(pBlob); } } break; } default: console.warn("Unsupported bind() argument type:",val); toss3("Unsupported bind() argument type: "+(typeof val)); } - if(rc) checkDbRc(stmt.db.pointer, rc); + if(rc) DB.checkRc(stmt.db.pointer, rc); return stmt; }; Stmt.prototype = { /** @@ -1057,10 +1333,11 @@ if(this.pointer){ affirmUnlocked(this,'finalize()'); delete __stmtMap.get(this.db)[this.pointer]; capi.sqlite3_finalize(this.pointer); __ptrMap.delete(this); + delete this._mayGet; delete this.columnCount; delete this.parameterCount; delete this.db; delete this._isLocked; } @@ -1103,35 +1380,36 @@ Bindable value types: - null is bound as NULL. - undefined as a standalone value is a no-op intended to - simplify certain client-side use cases: passing undefined - as a value to this function will not actually bind - anything and this function will skip confirmation that - binding is even legal. (Those semantics simplify certain - client-side uses.) Conversely, a value of undefined as an - array or object property when binding an array/object - (see below) is treated the same as null. - - - Numbers are bound as either doubles or integers: doubles - if they are larger than 32 bits, else double or int32, - depending on whether they have a fractional part. (It is, - as of this writing, illegal to call (from JS) a WASM - function which either takes or returns an int64.) - Booleans are bound as integer 0 or 1. It is not expected - the distinction of binding doubles which have no - fractional parts is integers is significant for the - majority of clients due to sqlite3's data typing - model. If capi.wasm.bigIntEnabled is true then this - routine will bind BigInt values as 64-bit integers. + simplify certain client-side use cases: passing undefined as + a value to this function will not actually bind anything and + this function will skip confirmation that binding is even + legal. (Those semantics simplify certain client-side uses.) + Conversely, a value of undefined as an array or object + property when binding an array/object (see below) is treated + the same as null. + + - Numbers are bound as either doubles or integers: doubles if + they are larger than 32 bits, else double or int32, depending + on whether they have a fractional part. Booleans are bound as + integer 0 or 1. It is not expected the distinction of binding + doubles which have no fractional parts is integers is + significant for the majority of clients due to sqlite3's data + typing model. If [BigInt] support is enabled then this + routine will bind BigInt values as 64-bit integers if they'll + fit in 64 bits. If that support disabled, it will store the + BigInt as an int32 or a double if it can do so without loss + of precision. If the BigInt is _too BigInt_ then it will + throw. - Strings are bound as strings (use bindAsBlob() to force - blob binding). + blob binding). - Uint8Array and Int8Array instances are bound as blobs. - (TODO: binding the other TypedArray types.) + (TODO: binding the other TypedArray types.) If passed an array, each element of the array is bound at the parameter index equal to the array index plus 1 (because arrays are 0-based but binding is 1-based). @@ -1226,26 +1504,70 @@ bindOne(this, ndx, BindTypes.blob, arg); this._mayGet = false; return this; }, /** - Steps the statement one time. If the result indicates that - a row of data is available, true is returned. If no row of - data is available, false is returned. Throws on error. + Steps the statement one time. If the result indicates that a + row of data is available, a truthy value is returned. + If no row of data is available, a falsy + value is returned. Throws on error. */ step: function(){ affirmUnlocked(this, 'step()'); const rc = capi.sqlite3_step(affirmStmtOpen(this).pointer); switch(rc){ case capi.SQLITE_DONE: return this._mayGet = false; case capi.SQLITE_ROW: return this._mayGet = true; default: this._mayGet = false; - console.warn("sqlite3_step() rc=",rc,"SQL =", - capi.sqlite3_sql(this.pointer)); - checkDbRc(this.db.pointer, rc); - }; + console.warn("sqlite3_step() rc=",rc, + capi.sqlite3_js_rc_str(rc), + "SQL =", capi.sqlite3_sql(this.pointer)); + DB.checkRc(this.db.pointer, rc); + } + }, + /** + Functions exactly like step() except that... + + 1) On success, it calls this.reset() and returns this object. + 2) On error, it throws and does not call reset(). + + This is intended to simplify constructs like: + + ``` + for(...) { + stmt.bind(...).stepReset(); + } + ``` + + Note that the reset() call makes it illegal to call this.get() + after the step. + */ + stepReset: function(){ + this.step(); + return this.reset(); + }, + /** + Functions like step() except that it finalizes this statement + immediately after stepping unless the step cannot be performed + because the statement is locked. Throws on error, but any error + other than the statement-is-locked case will also trigger + finalization of this statement. + + On success, it returns true if the step indicated that a row of + data was available, else it returns false. + + This is intended to simplify use cases such as: + + ``` + aDb.prepare("insert into foo(a) values(?)").bind(123).stepFinalize(); + ``` + */ + stepFinalize: function(){ + const rc = this.step(); + this.finalize(); + return rc; }, /** Fetches the value from the given 0-based column index of the current data row, throwing if index is out of range. @@ -1299,11 +1621,11 @@ switch(undefined===asType ? capi.sqlite3_column_type(this.pointer, ndx) : asType){ case capi.SQLITE_NULL: return null; case capi.SQLITE_INTEGER:{ - if(capi.wasm.bigIntEnabled){ + if(wasm.bigIntEnabled){ const rc = capi.sqlite3_column_int64(this.pointer, ndx); if(rc>=Number.MIN_SAFE_INTEGER && rc<=Number.MAX_SAFE_INTEGER){ /* Coerce "normal" number ranges to normal number values, and only return BigInt-type values for numbers out of this range. */ @@ -1330,12 +1652,12 @@ return capi.sqlite3_column_text(this.pointer, ndx); case capi.SQLITE_BLOB: { const n = capi.sqlite3_column_bytes(this.pointer, ndx), ptr = capi.sqlite3_column_blob(this.pointer, ndx), rc = new Uint8Array(n); - //heap = n ? capi.wasm.heap8() : false; - if(n) rc.set(capi.wasm.heap8u().slice(ptr, ptr+n), 0); + //heap = n ? wasm.heap8() : false; + if(n) rc.set(wasm.heap8u().slice(ptr, ptr+n), 0); //for(let i = 0; i < n; ++i) rc[i] = heap[ptr + i]; if(n && this.db._blobXfer instanceof Array){ /* This is an optimization soley for the Worker-based API. These values will be transfered to the main thread directly @@ -1345,11 +1667,11 @@ return rc; } default: toss3("Don't know how to translate", "type of result column #"+ndx+"."); } - abort("Not reached."); + toss3("Not reached."); }, /** Equivalent to get(ndx) but coerces the result to an integer. */ getInt: function(ndx){return this.get(ndx,capi.SQLITE_INTEGER)}, /** Equivalent to get(ndx) but coerces the result to a @@ -1372,11 +1694,11 @@ const s = this.get(ndx, capi.SQLITE_STRING); return null===s ? s : JSON.parse(s); }, // Design note: the only reason most of these getters have a 'get' // prefix is for consistency with getVALUE_TYPE(). The latter - // arguablly really need that prefix for API readability and the + // arguably really need that prefix for API readability and the // rest arguably don't, but consistency is a powerful thing. /** Returns the result column name of the given index, or throws if index is out of bounds or this statement has been finalized. This can be used without having run step() @@ -1393,13 +1715,12 @@ array, it is used as the target and all names are appended to it. Returns the target array. Throws if this statement cannot have result columns. This object's columnCount member holds the number of columns. */ - getColumnNames: function(tgt){ + getColumnNames: function(tgt=[]){ affirmColIndex(affirmStmtOpen(this),0); - if(!tgt) tgt = []; for(let i = 0; i < this.columnCount; ++i){ tgt.push(capi.sqlite3_column_name(this.pointer, i)); } return tgt; }, @@ -1423,16 +1744,57 @@ set: ()=>toss3("The pointer property is read-only.") } Object.defineProperty(Stmt.prototype, 'pointer', prop); Object.defineProperty(DB.prototype, 'pointer', prop); } - + /** The OO API's public namespace. */ sqlite3.oo1 = { version: { lib: capi.sqlite3_libversion(), ooApi: "0.1" }, DB, Stmt - }/*SQLite3 object*/; -})(self); + }/*oo1 object*/; + + if(util.isUIThread()){ + /** + Functionally equivalent to DB(storageName,'c','kvvfs') except + that it throws if the given storage name is not one of 'local' + or 'session'. + */ + sqlite3.oo1.JsStorageDb = function(storageName='session'){ + if('session'!==storageName && 'local'!==storageName){ + toss3("JsStorageDb db name must be one of 'session' or 'local'."); + } + dbCtorHelper.call(this, { + filename: storageName, + flags: 'c', + vfs: "kvvfs" + }); + }; + const jdb = sqlite3.oo1.JsStorageDb; + jdb.prototype = Object.create(DB.prototype); + /** Equivalent to sqlite3_js_kvvfs_clear(). */ + jdb.clearStorage = capi.sqlite3_js_kvvfs_clear; + /** + Clears this database instance's storage or throws if this + instance has been closed. Returns the number of + database blocks which were cleaned up. + */ + jdb.prototype.clearStorage = function(){ + return jdb.clearStorage(affirmDbOpen(this).filename); + }; + /** Equivalent to sqlite3_js_kvvfs_size(). */ + jdb.storageSize = capi.sqlite3_js_kvvfs_size; + /** + Returns the _approximate_ number of bytes this database takes + up in its storage or throws if this instance has been closed. + */ + jdb.prototype.storageSize = function(){ + return jdb.storageSize(affirmDbOpen(this).filename); + }; + }/*main-window-only bits*/ + +}); + Index: ext/wasm/api/sqlite3-api-opfs.js ================================================================== --- ext/wasm/api/sqlite3-api-opfs.js +++ ext/wasm/api/sqlite3-api-opfs.js @@ -1,7 +1,7 @@ /* - 2022-07-22 + 2022-09-18 The author disclaims copyright to this source code. In place of a legal notice, here is a blessing: * May you do good and not evil. @@ -8,386 +8,1304 @@ * May you find forgiveness for yourself and forgive others. * May you share freely, never taking more than you give. *********************************************************************** - This file contains extensions to the sqlite3 WASM API related to the - Origin-Private FileSystem (OPFS). It is intended to be appended to - the main JS deliverable somewhere after sqlite3-api-glue.js and - before sqlite3-api-cleanup.js. + This file holds the synchronous half of an sqlite3_vfs + implementation which proxies, in a synchronous fashion, the + asynchronous Origin-Private FileSystem (OPFS) APIs using a second + Worker, implemented in sqlite3-opfs-async-proxy.js. This file is + intended to be appended to the main sqlite3 JS deliverable somewhere + after sqlite3-api-oo1.js and before sqlite3-api-cleanup.js. +*/ +'use strict'; +self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){ +/** + installOpfsVfs() returns a Promise which, on success, installs an + sqlite3_vfs named "opfs", suitable for use with all sqlite3 APIs + which accept a VFS. It is intended to be called via + sqlite3ApiBootstrap.initializersAsync or an equivalent mechanism. + + The installed VFS uses the Origin-Private FileSystem API for + all file storage. On error it is rejected with an exception + explaining the problem. Reasons for rejection include, but are + not limited to: + + - The counterpart Worker (see below) could not be loaded. + + - The environment does not support OPFS. That includes when + this function is called from the main window thread. Significant notes and limitations: - As of this writing, OPFS is still very much in flux and only available in bleeding-edge versions of Chrome (v102+, noting that that number will increase as the OPFS API matures). - - The _synchronous_ family of OPFS features (which is what this API - requires) are only available in non-shared Worker threads. This - file tries to detect that case and becomes a no-op if those - features do not seem to be available. -*/ - -// FileSystemHandle -// FileSystemDirectoryHandle -// FileSystemFileHandle -// FileSystemFileHandle.prototype.createSyncAccessHandle -self.sqlite3.postInit.push(function(self, sqlite3){ - const warn = console.warn.bind(console), - error = console.error.bind(console); - if(!self.importScripts || !self.FileSystemFileHandle - || !self.FileSystemFileHandle.prototype.createSyncAccessHandle){ - warn("OPFS not found or its sync API is not available in this environment."); - return; - }else if(!sqlite3.capi.wasm.bigIntEnabled){ - error("OPFS requires BigInt support but sqlite3.capi.wasm.bigIntEnabled is false."); - return; - } - //warn('self.FileSystemFileHandle =',self.FileSystemFileHandle); - //warn('self.FileSystemFileHandle.prototype =',self.FileSystemFileHandle.prototype); - const toss = (...args)=>{throw new Error(args.join(' '))}; - const capi = sqlite3.capi, - wasm = capi.wasm; - const sqlite3_vfs = capi.sqlite3_vfs - || toss("Missing sqlite3.capi.sqlite3_vfs object."); - const sqlite3_file = capi.sqlite3_file - || toss("Missing sqlite3.capi.sqlite3_file object."); - const sqlite3_io_methods = capi.sqlite3_io_methods - || toss("Missing sqlite3.capi.sqlite3_io_methods object."); - const StructBinder = sqlite3.StructBinder || toss("Missing sqlite3.StructBinder."); - const debug = console.debug.bind(console), - log = console.log.bind(console); - warn("UNDER CONSTRUCTION: setting up OPFS VFS..."); - - const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/; - const dVfs = pDVfs - ? new sqlite3_vfs(pDVfs) - : null /* dVfs will be null when sqlite3 is built with - SQLITE_OS_OTHER. Though we cannot currently handle - that case, the hope is to eventually be able to. */; - const oVfs = new sqlite3_vfs(); - const oIom = new sqlite3_io_methods(); - oVfs.$iVersion = 2/*yes, two*/; - oVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof; - oVfs.$mxPathname = 1024/*sure, why not?*/; - oVfs.$zName = wasm.allocCString("opfs"); - oVfs.ondispose = [ - '$zName', oVfs.$zName, - 'cleanup dVfs', ()=>(dVfs ? dVfs.dispose() : null) - ]; - if(dVfs){ - oVfs.$xSleep = dVfs.$xSleep; - oVfs.$xRandomness = dVfs.$xRandomness; - } - // All C-side memory of oVfs is zeroed out, but just to be explicit: - oVfs.$xDlOpen = oVfs.$xDlError = oVfs.$xDlSym = oVfs.$xDlClose = null; - - /** - Pedantic sidebar about oVfs.ondispose: the entries in that array - are items to clean up when oVfs.dispose() is called, but in this - environment it will never be called. The VFS instance simply - hangs around until the WASM module instance is cleaned up. We - "could" _hypothetically_ clean it up by "importing" an - sqlite3_os_end() impl into the wasm build, but the shutdown order - of the wasm engine and the JS one are undefined so there is no - guaranty that the oVfs instance would be available in one - environment or the other when sqlite3_os_end() is called (_if_ it - gets called at all in a wasm build, which is undefined). - */ - - /** - Installs a StructBinder-bound function pointer member of the - given name and function in the given StructType target object. - It creates a WASM proxy for the given function and arranges for - that proxy to be cleaned up when tgt.dispose() is called. Throws - on the slightest hint of error (e.g. tgt is-not-a StructType, - name does not map to a struct-bound member, etc.). - - Returns a proxy for this function which is bound to tgt and takes - 2 args (name,func). That function returns the same thing, - permitting calls to be chained. - - If called with only 1 arg, it has no side effects but returns a - func with the same signature as described above. - */ - const installMethod = function callee(tgt, name, func){ - if(!(tgt instanceof StructBinder.StructType)){ - toss("Usage error: target object is-not-a StructType."); - } - if(1===arguments.length){ - return (n,f)=>callee(tgt,n,f); - } - if(!callee.argcProxy){ - callee.argcProxy = function(func,sig){ - return function(...args){ - if(func.length!==arguments.length){ - toss("Argument mismatch. Native signature is:",sig); - } - return func.apply(this, args); - } - }; - callee.removeFuncList = function(){ - if(this.ondispose.__removeFuncList){ - this.ondispose.__removeFuncList.forEach( - (v,ndx)=>{ - if('number'===typeof v){ - try{wasm.uninstallFunction(v)} - catch(e){/*ignore*/} - } - /* else it's a descriptive label for the next number in - the list. */ - } - ); - delete this.ondispose.__removeFuncList; - } - }; - }/*static init*/ - const sigN = tgt.memberSignature(name); - if(sigN.length<2){ - toss("Member",name," is not a function pointer. Signature =",sigN); - } - const memKey = tgt.memberKey(name); - //log("installMethod",tgt, name, sigN); - const fProxy = 1 - // We can remove this proxy middle-man once the VFS is working - ? callee.argcProxy(func, sigN) - : func; - const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true)); - tgt[memKey] = pFunc; - if(!tgt.ondispose) tgt.ondispose = []; - if(!tgt.ondispose.__removeFuncList){ - tgt.ondispose.push('ondispose.__removeFuncList handler', - callee.removeFuncList); - tgt.ondispose.__removeFuncList = []; - } - tgt.ondispose.__removeFuncList.push(memKey, pFunc); - return (n,f)=>callee(tgt, n, f); - }/*installMethod*/; - - /** - Map of sqlite3_file pointers to OPFS handles. - */ - const __opfsHandles = Object.create(null); - - const randomFilename = function f(len=16){ - if(!f._chars){ - f._chars = "abcdefghijklmnopqrstuvwxyz"+ - "ABCDEFGHIJKLMNOPQRSTUVWXYZ"+ - "012346789"; - f._n = f._chars.length; - } - const a = []; - let i = 0; - for( ; i < len; ++i){ - const ndx = Math.random() * (f._n * 64) % f._n | 0; - a[i] = f._chars[ndx]; - } - return a.join(''); - }; - - //const rootDir = await navigator.storage.getDirectory(); - - //////////////////////////////////////////////////////////////////////// - // Set up OPFS VFS methods... - let inst = installMethod(oVfs); - inst('xOpen', function(pVfs, zName, pFile, flags, pOutFlags){ - const f = new sqlite3_file(pFile); - f.$pMethods = oIom.pointer; - __opfsHandles[pFile] = f; - f.opfsHandle = null /* TODO */; - if(flags & capi.SQLITE_OPEN_DELETEONCLOSE){ - f.deleteOnClose = true; - } - f.filename = zName ? wasm.cstringToJs(zName) : randomFilename(); - error("OPFS sqlite3_vfs::xOpen is not yet full implemented."); - return capi.SQLITE_IOERR; - }) - ('xFullPathname', function(pVfs,zName,nOut,pOut){ - /* Until/unless we have some notion of "current dir" - in OPFS, simply copy zName to pOut... */ - const i = wasm.cstrncpy(pOut, zName, nOut); - return i SQLITE_DEFAULT_SECTOR_SIZE */; - //}) - - const rc = capi.sqlite3_vfs_register(oVfs.pointer, 0); - if(rc){ - oVfs.dispose(); - toss("sqlite3_vfs_register(OPFS) failed with rc",rc); - } - capi.sqlite3_vfs_register.addReference(oVfs, oIom); - warn("End of (very incomplete) OPFS setup.", oVfs); - //oVfs.dispose()/*only because we can't yet do anything with it*/; -}); + - The OPFS features used here are only available in dedicated Worker + threads. This file tries to detect that case, resulting in a + rejected Promise if those features do not seem to be available. + + - It requires the SharedArrayBuffer and Atomics classes, and the + former is only available if the HTTP server emits the so-called + COOP and COEP response headers. These features are required for + proxying OPFS's synchronous API via the synchronous interface + required by the sqlite3_vfs API. + + - This function may only be called a single time. When called, this + function removes itself from the sqlite3 object. + + All arguments to this function are for internal/development purposes + only. They do not constitute a public API and may change at any + time. + + The argument may optionally be a plain object with the following + configuration options: + + - proxyUri: as described above + + - verbose (=2): an integer 0-3. 0 disables all logging, 1 enables + logging of errors. 2 enables logging of warnings and errors. 3 + additionally enables debugging info. + + - sanityChecks (=false): if true, some basic sanity tests are + run on the OPFS VFS API after it's initialized, before the + returned Promise resolves. + + On success, the Promise resolves to the top-most sqlite3 namespace + object and that object gets a new object installed in its + `opfs` property, containing several OPFS-specific utilities. +*/ +const installOpfsVfs = function callee(options){ + if(!self.SharedArrayBuffer || + !self.Atomics || + !self.FileSystemHandle || + !self.FileSystemDirectoryHandle || + !self.FileSystemFileHandle || + !self.FileSystemFileHandle.prototype.createSyncAccessHandle || + !navigator.storage.getDirectory){ + return Promise.reject( + new Error("This environment does not have OPFS support.") + ); + } + if(!options || 'object'!==typeof options){ + options = Object.create(null); + } + const urlParams = new URL(self.location.href).searchParams; + if(undefined===options.verbose){ + options.verbose = urlParams.has('opfs-verbose') ? 3 : 2; + } + if(undefined===options.sanityChecks){ + options.sanityChecks = urlParams.has('opfs-sanity-check'); + } + if(undefined===options.proxyUri){ + options.proxyUri = callee.defaultProxyUri; + } + + if('function' === typeof options.proxyUri){ + options.proxyUri = options.proxyUri(); + } + const thePromise = new Promise(function(promiseResolve, promiseReject_){ + const loggers = { + 0:console.error.bind(console), + 1:console.warn.bind(console), + 2:console.log.bind(console) + }; + const logImpl = (level,...args)=>{ + if(options.verbose>level) loggers[level]("OPFS syncer:",...args); + }; + const log = (...args)=>logImpl(2, ...args); + const warn = (...args)=>logImpl(1, ...args); + const error = (...args)=>logImpl(0, ...args); + const toss = function(...args){throw new Error(args.join(' '))}; + const capi = sqlite3.capi; + const wasm = sqlite3.wasm; + const sqlite3_vfs = capi.sqlite3_vfs; + const sqlite3_file = capi.sqlite3_file; + const sqlite3_io_methods = capi.sqlite3_io_methods; + /** + Generic utilities for working with OPFS. This will get filled out + by the Promise setup and, on success, installed as sqlite3.opfs. + */ + const opfsUtil = Object.create(null); + /** + Not part of the public API. Solely for internal/development + use. + */ + opfsUtil.metrics = { + dump: function(){ + let k, n = 0, t = 0, w = 0; + for(k in state.opIds){ + const m = metrics[k]; + n += m.count; + t += m.time; + w += m.wait; + m.avgTime = (m.count && m.time) ? (m.time / m.count) : 0; + m.avgWait = (m.count && m.wait) ? (m.wait / m.count) : 0; + } + console.log(self.location.href, + "metrics for",self.location.href,":",metrics, + "\nTotal of",n,"op(s) for",t, + "ms (incl. "+w+" ms of waiting on the async side)"); + console.log("Serialization metrics:",metrics.s11n); + W.postMessage({type:'opfs-async-metrics'}); + }, + reset: function(){ + let k; + const r = (m)=>(m.count = m.time = m.wait = 0); + for(k in state.opIds){ + r(metrics[k] = Object.create(null)); + } + let s = metrics.s11n = Object.create(null); + s = s.serialize = Object.create(null); + s.count = s.time = 0; + s = metrics.s11n.deserialize = Object.create(null); + s.count = s.time = 0; + } + }/*metrics*/; + const promiseReject = function(err){ + opfsVfs.dispose(); + return promiseReject_(err); + }; + const W = new Worker(options.proxyUri); + W._originalOnError = W.onerror /* will be restored later */; + W.onerror = function(err){ + // The error object doesn't contain any useful info when the + // failure is, e.g., that the remote script is 404. + error("Error initializing OPFS asyncer:",err); + promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons.")); + }; + const pDVfs = capi.sqlite3_vfs_find(null)/*pointer to default VFS*/; + const dVfs = pDVfs + ? new sqlite3_vfs(pDVfs) + : null /* dVfs will be null when sqlite3 is built with + SQLITE_OS_OTHER. Though we cannot currently handle + that case, the hope is to eventually be able to. */; + const opfsVfs = new sqlite3_vfs(); + const opfsIoMethods = new sqlite3_io_methods(); + opfsVfs.$iVersion = 2/*yes, two*/; + opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof; + opfsVfs.$mxPathname = 1024/*sure, why not?*/; + opfsVfs.$zName = wasm.allocCString("opfs"); + // All C-side memory of opfsVfs is zeroed out, but just to be explicit: + opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null; + opfsVfs.ondispose = [ + '$zName', opfsVfs.$zName, + 'cleanup default VFS wrapper', ()=>(dVfs ? dVfs.dispose() : null), + 'cleanup opfsIoMethods', ()=>opfsIoMethods.dispose() + ]; + /** + Pedantic sidebar about opfsVfs.ondispose: the entries in that array + are items to clean up when opfsVfs.dispose() is called, but in this + environment it will never be called. The VFS instance simply + hangs around until the WASM module instance is cleaned up. We + "could" _hypothetically_ clean it up by "importing" an + sqlite3_os_end() impl into the wasm build, but the shutdown order + of the wasm engine and the JS one are undefined so there is no + guaranty that the opfsVfs instance would be available in one + environment or the other when sqlite3_os_end() is called (_if_ it + gets called at all in a wasm build, which is undefined). + */ + /** + State which we send to the async-api Worker or share with it. + This object must initially contain only cloneable or sharable + objects. After the worker's "inited" message arrives, other types + of data may be added to it. + + For purposes of Atomics.wait() and Atomics.notify(), we use a + SharedArrayBuffer with one slot reserved for each of the API + proxy's methods. The sync side of the API uses Atomics.wait() + on the corresponding slot and the async side uses + Atomics.notify() on that slot. + + The approach of using a single SAB to serialize comms for all + instances might(?) lead to deadlock situations in multi-db + cases. We should probably have one SAB here with a single slot + for locking a per-file initialization step and then allocate a + separate SAB like the above one for each file. That will + require a bit of acrobatics but should be feasible. The most + problematic part is that xOpen() would have to use + postMessage() to communicate its SharedArrayBuffer, and mixing + that approach with Atomics.wait/notify() gets a bit messy. + */ + const state = Object.create(null); + state.verbose = options.verbose; + state.littleEndian = (()=>{ + const buffer = new ArrayBuffer(2); + new DataView(buffer).setInt16(0, 256, true /* ==>littleEndian */); + // Int16Array uses the platform's endianness. + return new Int16Array(buffer)[0] === 256; + })(); + /** + Whether the async counterpart should log exceptions to + the serialization channel. That produces a great deal of + noise for seemingly innocuous things like xAccess() checks + for missing files, so this option may have one of 3 values: + + 0 = no exception logging + + 1 = only log exceptions for "significant" ops like xOpen(), + xRead(), and xWrite(). + + 2 = log all exceptions. + */ + state.asyncS11nExceptions = 1; + /* Size of file I/O buffer block. 64k = max sqlite3 page size, and + xRead/xWrite() will never deal in blocks larger than that. */ + state.fileBufferSize = 1024 * 64; + state.sabS11nOffset = state.fileBufferSize; + /** + The size of the block in our SAB for serializing arguments and + result values. Needs to be large enough to hold serialized + values of any of the proxied APIs. Filenames are the largest + part but are limited to opfsVfs.$mxPathname bytes. + */ + state.sabS11nSize = opfsVfs.$mxPathname * 2; + /** + The SAB used for all data I/O between the synchronous and + async halves (file i/o and arg/result s11n). + */ + state.sabIO = new SharedArrayBuffer( + state.fileBufferSize/* file i/o block */ + + state.sabS11nSize/* argument/result serialization block */ + ); + state.opIds = Object.create(null); + const metrics = Object.create(null); + { + /* Indexes for use in our SharedArrayBuffer... */ + let i = 0; + /* SAB slot used to communicate which operation is desired + between both workers. This worker writes to it and the other + listens for changes. */ + state.opIds.whichOp = i++; + /* Slot for storing return values. This worker listens to that + slot and the other worker writes to it. */ + state.opIds.rc = i++; + /* Each function gets an ID which this worker writes to + the whichOp slot. The async-api worker uses Atomic.wait() + on the whichOp slot to figure out which operation to run + next. */ + state.opIds.xAccess = i++; + state.opIds.xClose = i++; + state.opIds.xDelete = i++; + state.opIds.xDeleteNoWait = i++; + state.opIds.xFileControl = i++; + state.opIds.xFileSize = i++; + state.opIds.xLock = i++; + state.opIds.xOpen = i++; + state.opIds.xRead = i++; + state.opIds.xSleep = i++; + state.opIds.xSync = i++; + state.opIds.xTruncate = i++; + state.opIds.xUnlock = i++; + state.opIds.xWrite = i++; + state.opIds.mkdir = i++; + state.opIds['opfs-async-metrics'] = i++; + state.opIds['opfs-async-shutdown'] = i++; + /* The retry slot is used by the async part for wait-and-retry + semantics. Though we could hypothetically use the xSleep slot + for that, doing so might lead to undesired side effects. */ + state.opIds.retry = i++; + state.sabOP = new SharedArrayBuffer( + i * 4/* ==sizeof int32, noting that Atomics.wait() and friends + can only function on Int32Array views of an SAB. */); + opfsUtil.metrics.reset(); + } + /** + SQLITE_xxx constants to export to the async worker + counterpart... + */ + state.sq3Codes = Object.create(null); + [ + 'SQLITE_ACCESS_EXISTS', + 'SQLITE_ACCESS_READWRITE', + 'SQLITE_ERROR', + 'SQLITE_IOERR', + 'SQLITE_IOERR_ACCESS', + 'SQLITE_IOERR_CLOSE', + 'SQLITE_IOERR_DELETE', + 'SQLITE_IOERR_FSYNC', + 'SQLITE_IOERR_LOCK', + 'SQLITE_IOERR_READ', + 'SQLITE_IOERR_SHORT_READ', + 'SQLITE_IOERR_TRUNCATE', + 'SQLITE_IOERR_UNLOCK', + 'SQLITE_IOERR_WRITE', + 'SQLITE_LOCK_EXCLUSIVE', + 'SQLITE_LOCK_NONE', + 'SQLITE_LOCK_PENDING', + 'SQLITE_LOCK_RESERVED', + 'SQLITE_LOCK_SHARED', + 'SQLITE_MISUSE', + 'SQLITE_NOTFOUND', + 'SQLITE_OPEN_CREATE', + 'SQLITE_OPEN_DELETEONCLOSE', + 'SQLITE_OPEN_READONLY' + ].forEach((k)=>{ + if(undefined === (state.sq3Codes[k] = capi[k])){ + toss("Maintenance required: not found:",k); + } + }); + + /** + Runs the given operation (by name) in the async worker + counterpart, waits for its response, and returns the result + which the async worker writes to SAB[state.opIds.rc]. The + 2nd and subsequent arguments must be the aruguments for the + async op. + */ + const opRun = (op,...args)=>{ + const opNdx = state.opIds[op] || toss("Invalid op ID:",op); + state.s11n.serialize(...args); + Atomics.store(state.sabOPView, state.opIds.rc, -1); + Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx); + Atomics.notify(state.sabOPView, state.opIds.whichOp) + /* async thread will take over here */; + const t = performance.now(); + Atomics.wait(state.sabOPView, state.opIds.rc, -1) + /* When this wait() call returns, the async half will have + completed the operation and reported its results. */; + const rc = Atomics.load(state.sabOPView, state.opIds.rc); + metrics[op].wait += performance.now() - t; + if(rc && state.asyncS11nExceptions){ + const err = state.s11n.deserialize(); + if(err) error(op+"() async error:",...err); + } + return rc; + }; + + /** + Not part of the public API. Only for test/development use. + */ + opfsUtil.debug = { + asyncShutdown: ()=>{ + warn("Shutting down OPFS async listener. The OPFS VFS will no longer work."); + opRun('opfs-async-shutdown'); + }, + asyncRestart: ()=>{ + warn("Attempting to restart OPFS VFS async listener. Might work, might not."); + W.postMessage({type: 'opfs-async-restart'}); + } + }; + + const initS11n = ()=>{ + /** + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ACHTUNG: this code is 100% duplicated in the other half of + this proxy! The documentation is maintained in the + "synchronous half". + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + This proxy de/serializes cross-thread function arguments and + output-pointer values via the state.sabIO SharedArrayBuffer, + using the region defined by (state.sabS11nOffset, + state.sabS11nOffset]. Only one dataset is recorded at a time. + + This is not a general-purpose format. It only supports the + range of operations, and data sizes, needed by the + sqlite3_vfs and sqlite3_io_methods operations. Serialized + data are transient and this serialization algorithm may + change at any time. + + The data format can be succinctly summarized as: + + Nt...Td...D + + Where: + + - N = number of entries (1 byte) + + - t = type ID of first argument (1 byte) + + - ...T = type IDs of the 2nd and subsequent arguments (1 byte + each). + + - d = raw bytes of first argument (per-type size). + + - ...D = raw bytes of the 2nd and subsequent arguments (per-type + size). + + All types except strings have fixed sizes. Strings are stored + using their TextEncoder/TextDecoder representations. It would + arguably make more sense to store them as Int16Arrays of + their JS character values, but how best/fastest to get that + in and out of string form is an open point. Initial + experimentation with that approach did not gain us any speed. + + Historical note: this impl was initially about 1% this size by + using using JSON.stringify/parse(), but using fit-to-purpose + serialization saves considerable runtime. + */ + if(state.s11n) return state.s11n; + const textDecoder = new TextDecoder(), + textEncoder = new TextEncoder('utf-8'), + viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize), + viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize); + state.s11n = Object.create(null); + /* Only arguments and return values of these types may be + serialized. This covers the whole range of types needed by the + sqlite3_vfs API. */ + const TypeIds = Object.create(null); + TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' }; + TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' }; + TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' }; + TypeIds.string = { id: 4 }; + + const getTypeId = (v)=>( + TypeIds[typeof v] + || toss("Maintenance required: this value type cannot be serialized.",v) + ); + const getTypeIdById = (tid)=>{ + switch(tid){ + case TypeIds.number.id: return TypeIds.number; + case TypeIds.bigint.id: return TypeIds.bigint; + case TypeIds.boolean.id: return TypeIds.boolean; + case TypeIds.string.id: return TypeIds.string; + default: toss("Invalid type ID:",tid); + } + }; + + /** + Returns an array of the deserialized state stored by the most + recent serialize() operation (from from this thread or the + counterpart thread), or null if the serialization buffer is + empty. If passed a truthy argument, the serialization buffer + is cleared after deserialization. + */ + state.s11n.deserialize = function(clear=false){ + ++metrics.s11n.deserialize.count; + const t = performance.now(); + const argc = viewU8[0]; + const rc = argc ? [] : null; + if(argc){ + const typeIds = []; + let offset = 1, i, n, v; + for(i = 0; i < argc; ++i, ++offset){ + typeIds.push(getTypeIdById(viewU8[offset])); + } + for(i = 0; i < argc; ++i){ + const t = typeIds[i]; + if(t.getter){ + v = viewDV[t.getter](offset, state.littleEndian); + offset += t.size; + }else{/*String*/ + n = viewDV.getInt32(offset, state.littleEndian); + offset += 4; + v = textDecoder.decode(viewU8.slice(offset, offset+n)); + offset += n; + } + rc.push(v); + } + } + if(clear) viewU8[0] = 0; + //log("deserialize:",argc, rc); + metrics.s11n.deserialize.time += performance.now() - t; + return rc; + }; + + /** + Serializes all arguments to the shared buffer for consumption + by the counterpart thread. + + This routine is only intended for serializing OPFS VFS + arguments and (in at least one special case) result values, + and the buffer is sized to be able to comfortably handle + those. + + If passed no arguments then it zeroes out the serialization + state. + */ + state.s11n.serialize = function(...args){ + const t = performance.now(); + ++metrics.s11n.serialize.count; + if(args.length){ + //log("serialize():",args); + const typeIds = []; + let i = 0, offset = 1; + viewU8[0] = args.length & 0xff /* header = # of args */; + for(; i < args.length; ++i, ++offset){ + /* Write the TypeIds.id value into the next args.length + bytes. */ + typeIds.push(getTypeId(args[i])); + viewU8[offset] = typeIds[i].id; + } + for(i = 0; i < args.length; ++i) { + /* Deserialize the following bytes based on their + corresponding TypeIds.id from the header. */ + const t = typeIds[i]; + if(t.setter){ + viewDV[t.setter](offset, args[i], state.littleEndian); + offset += t.size; + }else{/*String*/ + const s = textEncoder.encode(args[i]); + viewDV.setInt32(offset, s.byteLength, state.littleEndian); + offset += 4; + viewU8.set(s, offset); + offset += s.byteLength; + } + } + //log("serialize() result:",viewU8.slice(0,offset)); + }else{ + viewU8[0] = 0; + } + metrics.s11n.serialize.time += performance.now() - t; + }; + return state.s11n; + }/*initS11n()*/; + + /** + Generates a random ASCII string len characters long, intended for + use as a temporary file name. + */ + const randomFilename = function f(len=16){ + if(!f._chars){ + f._chars = "abcdefghijklmnopqrstuvwxyz"+ + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"+ + "012346789"; + f._n = f._chars.length; + } + const a = []; + let i = 0; + for( ; i < len; ++i){ + const ndx = Math.random() * (f._n * 64) % f._n | 0; + a[i] = f._chars[ndx]; + } + return a.join(''); + }; + + /** + Map of sqlite3_file pointers to objects constructed by xOpen(). + */ + const __openFiles = Object.create(null); + + /** + Installs a StructBinder-bound function pointer member of the + given name and function in the given StructType target object. + It creates a WASM proxy for the given function and arranges for + that proxy to be cleaned up when tgt.dispose() is called. Throws + on the slightest hint of error (e.g. tgt is-not-a StructType, + name does not map to a struct-bound member, etc.). + + Returns a proxy for this function which is bound to tgt and takes + 2 args (name,func). That function returns the same thing, + permitting calls to be chained. + + If called with only 1 arg, it has no side effects but returns a + func with the same signature as described above. + */ + const installMethod = function callee(tgt, name, func){ + if(!(tgt instanceof sqlite3.StructBinder.StructType)){ + toss("Usage error: target object is-not-a StructType."); + } + if(1===arguments.length){ + return (n,f)=>callee(tgt,n,f); + } + if(!callee.argcProxy){ + callee.argcProxy = function(func,sig){ + return function(...args){ + if(func.length!==arguments.length){ + toss("Argument mismatch. Native signature is:",sig); + } + return func.apply(this, args); + } + }; + callee.removeFuncList = function(){ + if(this.ondispose.__removeFuncList){ + this.ondispose.__removeFuncList.forEach( + (v,ndx)=>{ + if('number'===typeof v){ + try{wasm.uninstallFunction(v)} + catch(e){/*ignore*/} + } + /* else it's a descriptive label for the next number in + the list. */ + } + ); + delete this.ondispose.__removeFuncList; + } + }; + }/*static init*/ + const sigN = tgt.memberSignature(name); + if(sigN.length<2){ + toss("Member",name," is not a function pointer. Signature =",sigN); + } + const memKey = tgt.memberKey(name); + const fProxy = 0 + /** This middle-man proxy is only for use during development, to + confirm that we always pass the proper number of + arguments. We know that the C-level code will always use the + correct argument count. */ + ? callee.argcProxy(func, sigN) + : func; + const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true)); + tgt[memKey] = pFunc; + if(!tgt.ondispose) tgt.ondispose = []; + if(!tgt.ondispose.__removeFuncList){ + tgt.ondispose.push('ondispose.__removeFuncList handler', + callee.removeFuncList); + tgt.ondispose.__removeFuncList = []; + } + tgt.ondispose.__removeFuncList.push(memKey, pFunc); + return (n,f)=>callee(tgt, n, f); + }/*installMethod*/; + + const opTimer = Object.create(null); + opTimer.op = undefined; + opTimer.start = undefined; + const mTimeStart = (op)=>{ + opTimer.start = performance.now(); + opTimer.op = op; + ++metrics[op].count; + }; + const mTimeEnd = ()=>( + metrics[opTimer.op].time += performance.now() - opTimer.start + ); + + /** + Impls for the sqlite3_io_methods methods. Maintenance reminder: + members are in alphabetical order to simplify finding them. + */ + const ioSyncWrappers = { + xCheckReservedLock: function(pFile,pOut){ + /** + As of late 2022, only a single lock can be held on an OPFS + file. We have no way of checking whether any _other_ db + connection has a lock except by trying to obtain and (on + success) release a sync-handle for it, but doing so would + involve an inherent race condition. For the time being, + pending a better solution, we simply report whether the + given pFile instance has a lock. + */ + const f = __openFiles[pFile]; + wasm.setMemValue(pOut, f.lockMode ? 1 : 0, 'i32'); + return 0; + }, + xClose: function(pFile){ + mTimeStart('xClose'); + let rc = 0; + const f = __openFiles[pFile]; + if(f){ + delete __openFiles[pFile]; + rc = opRun('xClose', pFile); + if(f.sq3File) f.sq3File.dispose(); + } + mTimeEnd(); + return rc; + }, + xDeviceCharacteristics: function(pFile){ + //debug("xDeviceCharacteristics(",pFile,")"); + return capi.SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN; + }, + xFileControl: function(pFile, opId, pArg){ + mTimeStart('xFileControl'); + const rc = (capi.SQLITE_FCNTL_SYNC===opId) + ? opRun('xSync', pFile, 0) + : capi.SQLITE_NOTFOUND; + mTimeEnd(); + return rc; + }, + xFileSize: function(pFile,pSz64){ + mTimeStart('xFileSize'); + const rc = opRun('xFileSize', pFile); + if(0==rc){ + const sz = state.s11n.deserialize()[0]; + wasm.setMemValue(pSz64, sz, 'i64'); + } + mTimeEnd(); + return rc; + }, + xLock: function(pFile,lockType){ + mTimeStart('xLock'); + const f = __openFiles[pFile]; + let rc = 0; + if( capi.SQLITE_LOCK_NONE === f.lockType ) { + rc = opRun('xLock', pFile, lockType); + if( 0===rc ) f.lockType = lockType; + }else{ + f.lockType = lockType; + } + mTimeEnd(); + return rc; + }, + xRead: function(pFile,pDest,n,offset64){ + mTimeStart('xRead'); + const f = __openFiles[pFile]; + let rc; + try { + rc = opRun('xRead',pFile, n, Number(offset64)); + if(0===rc || capi.SQLITE_IOERR_SHORT_READ===rc){ + /** + Results get written to the SharedArrayBuffer f.sabView. + Because the heap is _not_ a SharedArrayBuffer, we have + to copy the results. TypedArray.set() seems to be the + fastest way to copy this. */ + wasm.heap8u().set(f.sabView.subarray(0, n), pDest); + } + }catch(e){ + error("xRead(",arguments,") failed:",e,f); + rc = capi.SQLITE_IOERR_READ; + } + mTimeEnd(); + return rc; + }, + xSync: function(pFile,flags){ + ++metrics.xSync.count; + return 0; // impl'd in xFileControl() + }, + xTruncate: function(pFile,sz64){ + mTimeStart('xTruncate'); + const rc = opRun('xTruncate', pFile, Number(sz64)); + mTimeEnd(); + return rc; + }, + xUnlock: function(pFile,lockType){ + mTimeStart('xUnlock'); + const f = __openFiles[pFile]; + let rc = 0; + if( capi.SQLITE_LOCK_NONE === lockType + && f.lockType ){ + rc = opRun('xUnlock', pFile, lockType); + } + if( 0===rc ) f.lockType = lockType; + mTimeEnd(); + return rc; + }, + xWrite: function(pFile,pSrc,n,offset64){ + mTimeStart('xWrite'); + const f = __openFiles[pFile]; + let rc; + try { + f.sabView.set(wasm.heap8u().subarray(pSrc, pSrc+n)); + rc = opRun('xWrite', pFile, n, Number(offset64)); + }catch(e){ + error("xWrite(",arguments,") failed:",e,f); + rc = capi.SQLITE_IOERR_WRITE; + } + mTimeEnd(); + return rc; + } + }/*ioSyncWrappers*/; + + /** + Impls for the sqlite3_vfs methods. Maintenance reminder: members + are in alphabetical order to simplify finding them. + */ + const vfsSyncWrappers = { + xAccess: function(pVfs,zName,flags,pOut){ + mTimeStart('xAccess'); + const rc = opRun('xAccess', wasm.cstringToJs(zName)); + wasm.setMemValue( pOut, (rc ? 0 : 1), 'i32' ); + mTimeEnd(); + return 0; + }, + xCurrentTime: function(pVfs,pOut){ + /* If it turns out that we need to adjust for timezone, see: + https://stackoverflow.com/a/11760121/1458521 */ + wasm.setMemValue(pOut, 2440587.5 + (new Date().getTime()/86400000), + 'double'); + return 0; + }, + xCurrentTimeInt64: function(pVfs,pOut){ + // TODO: confirm that this calculation is correct + wasm.setMemValue(pOut, (2440587.5 * 86400000) + new Date().getTime(), + 'i64'); + return 0; + }, + xDelete: function(pVfs, zName, doSyncDir){ + mTimeStart('xDelete'); + opRun('xDelete', wasm.cstringToJs(zName), doSyncDir, false); + /* We're ignoring errors because we cannot yet differentiate + between harmless and non-harmless failures. */ + mTimeEnd(); + return 0; + }, + xFullPathname: function(pVfs,zName,nOut,pOut){ + /* Until/unless we have some notion of "current dir" + in OPFS, simply copy zName to pOut... */ + const i = wasm.cstrncpy(pOut, zName, nOut); + return ipMethods is NULL. */ + if(fh.readOnly){ + wasm.setMemValue(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32'); + } + __openFiles[pFile] = fh; + fh.sabView = state.sabFileBufView; + fh.sq3File = new sqlite3_file(pFile); + fh.sq3File.$pMethods = opfsIoMethods.pointer; + fh.lockType = capi.SQLITE_LOCK_NONE; + } + mTimeEnd(); + return rc; + }/*xOpen()*/ + }/*vfsSyncWrappers*/; + + if(dVfs){ + opfsVfs.$xRandomness = dVfs.$xRandomness; + opfsVfs.$xSleep = dVfs.$xSleep; + } + if(!opfsVfs.$xRandomness){ + /* If the default VFS has no xRandomness(), add a basic JS impl... */ + vfsSyncWrappers.xRandomness = function(pVfs, nOut, pOut){ + const heap = wasm.heap8u(); + let i = 0; + for(; i < nOut; ++i) heap[pOut + i] = (Math.random()*255000) & 0xFF; + return i; + }; + } + if(!opfsVfs.$xSleep){ + /* If we can inherit an xSleep() impl from the default VFS then + assume it's sane and use it, otherwise install a JS-based + one. */ + vfsSyncWrappers.xSleep = function(pVfs,ms){ + Atomics.wait(state.sabOPView, state.opIds.xSleep, 0, ms); + return 0; + }; + } + + /* Install the vfs/io_methods into their C-level shared instances... */ + for(let k of Object.keys(ioSyncWrappers)){ + installMethod(opfsIoMethods, k, ioSyncWrappers[k]); + } + for(let k of Object.keys(vfsSyncWrappers)){ + installMethod(opfsVfs, k, vfsSyncWrappers[k]); + } + + /** + Expects an OPFS file path. It gets resolved, such that ".." + components are properly expanded, and returned. If the 2nd arg + is true, the result is returned as an array of path elements, + else an absolute path string is returned. + */ + opfsUtil.getResolvedPath = function(filename,splitIt){ + const p = new URL(filename, "file://irrelevant").pathname; + return splitIt ? p.split('/').filter((v)=>!!v) : p; + }; + + /** + Takes the absolute path to a filesystem element. Returns an + array of [handleOfContainingDir, filename]. If the 2nd argument + is truthy then each directory element leading to the file is + created along the way. Throws if any creation or resolution + fails. + */ + opfsUtil.getDirForFilename = async function f(absFilename, createDirs = false){ + const path = opfsUtil.getResolvedPath(absFilename, true); + const filename = path.pop(); + let dh = opfsUtil.rootDirectory; + for(const dirName of path){ + if(dirName){ + dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs}); + } + } + return [dh, filename]; + }; + + /** + Creates the given directory name, recursively, in + the OPFS filesystem. Returns true if it succeeds or the + directory already exists, else false. + */ + opfsUtil.mkdir = async function(absDirName){ + try { + await opfsUtil.getDirForFilename(absDirName+"/filepart", true); + return true; + }catch(e){ + //console.warn("mkdir(",absDirName,") failed:",e); + return false; + } + }; + /** + Checks whether the given OPFS filesystem entry exists, + returning true if it does, false if it doesn't. + */ + opfsUtil.entryExists = async function(fsEntryName){ + try { + const [dh, fn] = await opfsUtil.getDirForFilename(fsEntryName); + await dh.getFileHandle(fn); + return true; + }catch(e){ + return false; + } + }; + + /** + Generates a random ASCII string, intended for use as a + temporary file name. Its argument is the length of the string, + defaulting to 16. + */ + opfsUtil.randomFilename = randomFilename; + + /** + Re-registers the OPFS VFS. This is intended only for odd use + cases which have to call sqlite3_shutdown() as part of their + initialization process, which will unregister the VFS + registered by installOpfsVfs(). If passed a truthy value, the + OPFS VFS is registered as the default VFS, else it is not made + the default. Returns the result of the the + sqlite3_vfs_register() call. + + Design note: the problem of having to re-register things after + a shutdown/initialize pair is more general. How to best plug + that in to the library is unclear. In particular, we cannot + hook in to any C-side calls to sqlite3_initialize(), so we + cannot add an after-initialize callback mechanism. + */ + opfsUtil.registerVfs = (asDefault=false)=>{ + return wasm.exports.sqlite3_vfs_register( + opfsVfs.pointer, asDefault ? 1 : 0 + ); + }; + + /** + Returns a promise which resolves to an object which represents + all files and directories in the OPFS tree. The top-most object + has two properties: `dirs` is an array of directory entries + (described below) and `files` is a list of file names for all + files in that directory. + + Traversal starts at sqlite3.opfs.rootDirectory. + + Each `dirs` entry is an object in this form: + + ``` + { name: directoryName, + dirs: [...subdirs], + files: [...file names] + } + ``` + + The `files` and `subdirs` entries are always set but may be + empty arrays. + + The returned object has the same structure but its `name` is + an empty string. All returned objects are created with + Object.create(null), so have no prototype. + + Design note: the entries do not contain more information, + e.g. file sizes, because getting such info is not only + expensive but is subject to locking-related errors. + */ + opfsUtil.treeList = async function(){ + const doDir = async function callee(dirHandle,tgt){ + tgt.name = dirHandle.name; + tgt.dirs = []; + tgt.files = []; + for await (const handle of dirHandle.values()){ + if('directory' === handle.kind){ + const subDir = Object.create(null); + tgt.dirs.push(subDir); + await callee(handle, subDir); + }else{ + tgt.files.push(handle.name); + } + } + }; + const root = Object.create(null); + await doDir(opfsUtil.rootDirectory, root); + return root; + }; + + /** + Irrevocably deletes _all_ files in the current origin's OPFS. + Obviously, this must be used with great caution. It may throw + an exception if removal of anything fails (e.g. a file is + locked), but the precise conditions under which it will throw + are not documented (so we cannot tell you what they are). + */ + opfsUtil.rmfr = async function(){ + const dir = opfsUtil.rootDirectory, opt = {recurse: true}; + for await (const handle of dir.values()){ + dir.removeEntry(handle.name, opt); + } + }; + + /** + Deletes the given OPFS filesystem entry. As this environment + has no notion of "current directory", the given name must be an + absolute path. If the 2nd argument is truthy, deletion is + recursive (use with caution!). + + The returned Promise resolves to true if the deletion was + successful, else false (but...). The OPFS API reports the + reason for the failure only in human-readable form, not + exceptions which can be type-checked to determine the + failure. Because of that... + + If the final argument is truthy then this function will + propagate any exception on error, rather than returning false. + */ + opfsUtil.unlink = async function(fsEntryName, recursive = false, + throwOnError = false){ + try { + const [hDir, filenamePart] = + await opfsUtil.getDirForFilename(fsEntryName, false); + await hDir.removeEntry(filenamePart, {recursive}); + return true; + }catch(e){ + if(throwOnError){ + throw new Error("unlink(",arguments[0],") failed: "+e.message,{ + cause: e + }); + } + return false; + } + }; + + /** + Traverses the OPFS filesystem, calling a callback for each one. + The argument may be either a callback function or an options object + with any of the following properties: + + - `callback`: function which gets called for each filesystem + entry. It gets passed 3 arguments: 1) the + FileSystemFileHandle or FileSystemDirectoryHandle of each + entry (noting that both are instanceof FileSystemHandle). 2) + the FileSystemDirectoryHandle of the parent directory. 3) the + current depth level, with 0 being at the top of the tree + relative to the starting directory. If the callback returns a + literal false, as opposed to any other falsy value, traversal + stops without an error. Any exceptions it throws are + propagated. Results are undefined if the callback manipulate + the filesystem (e.g. removing or adding entries) because the + how OPFS iterators behave in the face of such changes is + undocumented. + + - `recursive` [bool=true]: specifies whether to recurse into + subdirectories or not. Whether recursion is depth-first or + breadth-first is unspecified! + + - `directory` [FileSystemDirectoryEntry=sqlite3.opfs.rootDirectory] + specifies the starting directory. + + If this function is passed a function, it is assumed to be the + callback. + + Returns a promise because it has to (by virtue of being async) + but that promise has no specific meaning: the traversal it + performs is synchronous. The promise must be used to catch any + exceptions propagated by the callback, however. + + TODO: add an option which specifies whether to traverse + depth-first or breadth-first. We currently do depth-first but + an incremental file browsing widget would benefit more from + breadth-first. + */ + opfsUtil.traverse = async function(opt){ + const defaultOpt = { + recursive: true, + directory: opfsUtil.rootDirectory + }; + if('function'===typeof opt){ + opt = {callback:opt}; + } + opt = Object.assign(defaultOpt, opt||{}); + const doDir = async function callee(dirHandle, depth){ + for await (const handle of dirHandle.values()){ + if(false === opt.callback(handle, dirHandle, depth)) return false; + else if(opt.recursive && 'directory' === handle.kind){ + if(false === await callee(handle, depth + 1)) break; + } + } + }; + doDir(opt.directory, 0); + }; + + //TODO to support fiddle and worker1 db upload: + //opfsUtil.createFile = function(absName, content=undefined){...} + + if(sqlite3.oo1){ + opfsUtil.OpfsDb = function(...args){ + const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args); + opt.vfs = opfsVfs.$zName; + sqlite3.oo1.DB.dbCtorHelper.call(this, opt); + }; + opfsUtil.OpfsDb.prototype = Object.create(sqlite3.oo1.DB.prototype); + sqlite3.oo1.DB.dbCtorHelper.setVfsPostOpenSql( + opfsVfs.pointer, + [ + /* Truncate journal mode is faster than delete or wal for + this vfs, per speedtest1. */ + "pragma journal_mode=truncate;" + /* + This vfs benefits hugely from cache on moderate/large + speedtest1 --size 50 and --size 100 workloads. We currently + rely on setting a non-default cache size when building + sqlite3.wasm. If that policy changes, the cache can + be set here. + */ + //"pragma cache_size=-8388608;" + ].join('') + ); + } + + /** + Potential TODOs: + + - Expose one or both of the Worker objects via opfsUtil and + publish an interface for proxying the higher-level OPFS + features like getting a directory listing. + */ + const sanityCheck = function(){ + const scope = wasm.scopedAllocPush(); + const sq3File = new sqlite3_file(); + try{ + const fid = sq3File.pointer; + const openFlags = capi.SQLITE_OPEN_CREATE + | capi.SQLITE_OPEN_READWRITE + //| capi.SQLITE_OPEN_DELETEONCLOSE + | capi.SQLITE_OPEN_MAIN_DB; + const pOut = wasm.scopedAlloc(8); + const dbFile = "/sanity/check/file"+randomFilename(8); + const zDbFile = wasm.scopedAllocCString(dbFile); + let rc; + state.s11n.serialize("This is ä string."); + rc = state.s11n.deserialize(); + log("deserialize() says:",rc); + if("This is ä string."!==rc[0]) toss("String d13n error."); + vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut); + rc = wasm.getMemValue(pOut,'i32'); + log("xAccess(",dbFile,") exists ?=",rc); + rc = vfsSyncWrappers.xOpen(opfsVfs.pointer, zDbFile, + fid, openFlags, pOut); + log("open rc =",rc,"state.sabOPView[xOpen] =", + state.sabOPView[state.opIds.xOpen]); + if(0!==rc){ + error("open failed with code",rc); + return; + } + vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut); + rc = wasm.getMemValue(pOut,'i32'); + if(!rc) toss("xAccess() failed to detect file."); + rc = ioSyncWrappers.xSync(sq3File.pointer, 0); + if(rc) toss('sync failed w/ rc',rc); + rc = ioSyncWrappers.xTruncate(sq3File.pointer, 1024); + if(rc) toss('truncate failed w/ rc',rc); + wasm.setMemValue(pOut,0,'i64'); + rc = ioSyncWrappers.xFileSize(sq3File.pointer, pOut); + if(rc) toss('xFileSize failed w/ rc',rc); + log("xFileSize says:",wasm.getMemValue(pOut, 'i64')); + rc = ioSyncWrappers.xWrite(sq3File.pointer, zDbFile, 10, 1); + if(rc) toss("xWrite() failed!"); + const readBuf = wasm.scopedAlloc(16); + rc = ioSyncWrappers.xRead(sq3File.pointer, readBuf, 6, 2); + wasm.setMemValue(readBuf+6,0); + let jRead = wasm.cstringToJs(readBuf); + log("xRead() got:",jRead); + if("sanity"!==jRead) toss("Unexpected xRead() value."); + if(vfsSyncWrappers.xSleep){ + log("xSleep()ing before close()ing..."); + vfsSyncWrappers.xSleep(opfsVfs.pointer,2000); + log("waking up from xSleep()"); + } + rc = ioSyncWrappers.xClose(fid); + log("xClose rc =",rc,"sabOPView =",state.sabOPView); + log("Deleting file:",dbFile); + vfsSyncWrappers.xDelete(opfsVfs.pointer, zDbFile, 0x1234); + vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut); + rc = wasm.getMemValue(pOut,'i32'); + if(rc) toss("Expecting 0 from xAccess(",dbFile,") after xDelete()."); + warn("End of OPFS sanity checks."); + }finally{ + sq3File.dispose(); + wasm.scopedAllocPop(scope); + } + }/*sanityCheck()*/; + + W.onmessage = function({data}){ + //log("Worker.onmessage:",data); + switch(data.type){ + case 'opfs-async-loaded': + /*Arrives as soon as the asyc proxy finishes loading. + Pass our config and shared state on to the async worker.*/ + W.postMessage({type: 'opfs-async-init',args: state}); + break; + case 'opfs-async-inited':{ + /*Indicates that the async partner has received the 'init' + and has finished initializing, so the real work can + begin...*/ + try { + const rc = capi.sqlite3_vfs_register(opfsVfs.pointer, 0); + if(rc){ + toss("sqlite3_vfs_register(OPFS) failed with rc",rc); + } + if(opfsVfs.pointer !== capi.sqlite3_vfs_find("opfs")){ + toss("BUG: sqlite3_vfs_find() failed for just-installed OPFS VFS"); + } + capi.sqlite3_vfs_register.addReference(opfsVfs, opfsIoMethods); + state.sabOPView = new Int32Array(state.sabOP); + state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize); + state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize); + initS11n(); + if(options.sanityChecks){ + warn("Running sanity checks because of opfs-sanity-check URL arg..."); + sanityCheck(); + } + navigator.storage.getDirectory().then((d)=>{ + W.onerror = W._originalOnError; + delete W._originalOnError; + sqlite3.opfs = opfsUtil; + opfsUtil.rootDirectory = d; + log("End of OPFS sqlite3_vfs setup.", opfsVfs); + promiseResolve(sqlite3); + }); + }catch(e){ + error(e); + promiseReject(e); + } + break; + } + default: + promiseReject(e); + error("Unexpected message from the async worker:",data); + break; + }/*switch(data.type)*/ + }/*W.onmessage()*/; + })/*thePromise*/; + return thePromise; +}/*installOpfsVfs()*/; +installOpfsVfs.defaultProxyUri = + "sqlite3-opfs-async-proxy.js"; +self.sqlite3ApiBootstrap.initializersAsync.push(async (sqlite3)=>{ + if(sqlite3.scriptInfo && !sqlite3.scriptInfo.isWorker){ + return; + } + try{ + let proxyJs = installOpfsVfs.defaultProxyUri; + if(sqlite3.scriptInfo.sqlite3Dir){ + installOpfsVfs.defaultProxyUri = + sqlite3.scriptInfo.sqlite3Dir + proxyJs; + //console.warn("installOpfsVfs.defaultProxyUri =",installOpfsVfs.defaultProxyUri); + } + return installOpfsVfs().catch((e)=>{ + console.warn("Ignoring inability to install OPFS sqlite3_vfs:",e.message); + }); + }catch(e){ + console.error("installOpfsVfs() exception:",e); + throw e; + } +}); +}/*sqlite3ApiBootstrap.initializers.push()*/); Index: ext/wasm/api/sqlite3-api-prologue.js ================================================================== --- ext/wasm/api/sqlite3-api-prologue.js +++ ext/wasm/api/sqlite3-api-prologue.js @@ -15,26 +15,33 @@ file into an Emscripten Module.postRun() handler which has a parameter named "Module" (the Emscripten Module object). The exact requirements, conventions, and build process are very much under construction and will be (re)documented once they've stopped fluctuating so much. - Specific goals of this project: + Project home page: https://sqlite.org + + Documentation home page: https://sqlite.org/wasm + + Specific goals of this subproject: - Except where noted in the non-goals, provide a more-or-less feature-complete wrapper to the sqlite3 C API, insofar as WASM - feature parity with C allows for. In fact, provide at least 3 + feature parity with C allows for. In fact, provide at least 4 APIs... - 1) Bind a low-level sqlite3 API which is as close to the native - one as feasible in terms of usage. + 1) 1-to-1 bindings as exported from WASM, with no automatic + type conversions between JS and C. + + 2) A binding of (1) which provides certain JS/C type conversions + to greatly simplify its use. - 2) A higher-level API, more akin to sql.js and node.js-style + 3) A higher-level API, more akin to sql.js and node.js-style implementations. This one speaks directly to the low-level API. This API must be used from the same thread as the low-level API. - 3) A second higher-level API which speaks to the previous APIs via + 4) A second higher-level API which speaks to the previous APIs via worker messages. This one is intended for use in the main thread, with the lower-level APIs installed in a Worker thread, and talking to them via Worker messages. Because Workers are asynchronouns and have only a single message channel, some acrobatics are needed here to feed async work results back to @@ -41,23 +48,22 @@ the client (as we cannot simply pass around callbacks between the main and Worker threads). - Insofar as possible, support client-side storage using JS filesystem APIs. As of this writing, such things are still very - much TODO. Initial testing with using IndexedDB as backing storage - showed it to work reasonably well, but it's also too easy to - corrupt by using a web page in two browser tabs because IndexedDB - lacks the locking features needed to support that. + much under development. Specific non-goals of this project: - As WASM is a web-centric technology and UTF-8 is the King of Encodings in that realm, there are no currently plans to support the UTF16-related sqlite3 APIs. They would add a complication to the bindings for no appreciable benefit. Though web-related - implementation details take priority, the lower-level WASM module - "should" work in non-web WASM environments. + implementation details take priority, and the JavaScript + components of the API specifically focus on browser clients, the + lower-level WASM module "should" work in non-web WASM + environments. - Supporting old or niche-market platforms. WASM is built for a modern web and requires modern platforms. - Though scalar User-Defined Functions (UDFs) may be created in @@ -76,45 +82,263 @@ C-bound callback functions). These APIs have a considerably different shape than sql.js's, however. */ /** - This global symbol is is only a temporary measure: the JS-side - post-processing will remove that object from the global scope when - setup is complete. We require it there temporarily in order to glue - disparate parts together during the loading of the API (which spans - several components). + sqlite3ApiBootstrap() is the only global symbol persistently + exposed by this API. It is intended to be called one time at the + end of the API amalgamation process, passed configuration details + for the current environment, and then optionally be removed from + the global object using `delete self.sqlite3ApiBootstrap`. - This function requires a configuration object intended to abstract + This function expects a configuration object, intended to abstract away details specific to any given WASM environment, primarily so - that it can be used without any _direct_ dependency on Emscripten. - (That said, OO API #1 requires, as of this writing, Emscripten's - virtual filesystem API. Baby steps.) + that it can be used without any _direct_ dependency on + Emscripten. (Note the default values for the config object!) The + config object is only honored the first time this is + called. Subsequent calls ignore the argument and return the same + (configured) object which gets initialized by the first call. + This function will throw if any of the required config options are + missing. + + The config object properties include: + + - `exports`[^1]: the "exports" object for the current WASM + environment. In an Emscripten-based build, this should be set to + `Module['asm']`. + + - `memory`[^1]: optional WebAssembly.Memory object, defaulting to + `exports.memory`. In Emscripten environments this should be set + to `Module.wasmMemory` if the build uses `-sIMPORT_MEMORY`, or be + left undefined/falsy to default to `exports.memory` when using + WASM-exported memory. + + - `bigIntEnabled`: true if BigInt support is enabled. Defaults to + true if `self.BigInt64Array` is available, else false. Some APIs + will throw exceptions if called without BigInt support, as BigInt + is required for marshalling C-side int64 into and out of JS. + + - `allocExportName`: the name of the function, in `exports`, of the + `malloc(3)`-compatible routine for the WASM environment. Defaults + to `"malloc"`. + + - `deallocExportName`: the name of the function, in `exports`, of + the `free(3)`-compatible routine for the WASM + environment. Defaults to `"free"`. + + - `wasmfsOpfsDir`[^1]: if the environment supports persistent + storage, this directory names the "mount point" for that + directory. It must be prefixed by `/` and may contain only a + single directory-name part. Using the root directory name is not + supported by any current persistent backend. This setting is + only used in WASMFS-enabled builds. + + + [^1] = This property may optionally be a function, in which case this + function re-assigns it to the value returned from that function, + enabling delayed evaluation. + */ -self.sqlite3ApiBootstrap = function(config){ - 'use strict'; +'use strict'; +self.sqlite3ApiBootstrap = function sqlite3ApiBootstrap( + apiConfig = (self.sqlite3ApiConfig || sqlite3ApiBootstrap.defaultConfig) +){ + if(sqlite3ApiBootstrap.sqlite3){ /* already initalized */ + console.warn("sqlite3ApiBootstrap() called multiple times.", + "Config and external initializers are ignored on calls after the first."); + return sqlite3ApiBootstrap.sqlite3; + } + const config = Object.assign(Object.create(null),{ + exports: undefined, + memory: undefined, + bigIntEnabled: (()=>{ + if('undefined'!==typeof Module){ + /* Emscripten module will contain HEAPU64 when built with + -sWASM_BIGINT=1, else it will not. */ + return !!Module.HEAPU64; + } + return !!self.BigInt64Array; + })(), + allocExportName: 'malloc', + deallocExportName: 'free', + wasmfsOpfsDir: '/opfs' + }, apiConfig || {}); - /** Throws a new Error, the message of which is the concatenation - all args with a space between each. */ - const toss = (...args)=>{throw new Error(args.join(' '))}; + [ + // If any of these config options are functions, replace them with + // the result of calling that function... + 'exports', 'memory', 'wasmfsOpfsDir' + ].forEach((k)=>{ + if('function' === typeof config[k]){ + config[k] = config[k](); + } + }); + + /** + The main sqlite3 binding API gets installed into this object, + mimicking the C API as closely as we can. The numerous members + names with prefixes 'sqlite3_' and 'SQLITE_' behave, insofar as + possible, identically to the C-native counterparts, as documented at: + + https://www.sqlite.org/c3ref/intro.html + + A very few exceptions require an additional level of proxy + function or may otherwise require special attention in the WASM + environment, and all such cases are documented somewhere below + in this file or in sqlite3-api-glue.js. capi members which are + not documented are installed as 1-to-1 proxies for their + C-side counterparts. + */ + const capi = Object.create(null); + /** + Holds state which are specific to the WASM-related + infrastructure and glue code. It is not expected that client + code will normally need these, but they're exposed here in case + it does. These APIs are _not_ to be considered an + official/stable part of the sqlite3 WASM API. They may change + as the developers' experience suggests appropriate changes. + + Note that a number of members of this object are injected + dynamically after the api object is fully constructed, so + not all are documented in this file. + */ + const wasm = Object.create(null); + + /** Internal helper for SQLite3Error ctor. */ + const __rcStr = (rc)=>{ + return (capi.sqlite3_js_rc_str && capi.sqlite3_js_rc_str(rc)) + || ("Unknown result code #"+rc); + }; + + /** Internal helper for SQLite3Error ctor. */ + const __isInt = (n)=>'number'===typeof n && n===(n | 0); + + /** + An Error subclass specifically for reporting DB-level errors and + enabling clients to unambiguously identify such exceptions. + The C-level APIs never throw, but some of the higher-level + C-style APIs do and the object-oriented APIs use exceptions + exclusively to report errors. + */ + class SQLite3Error extends Error { + /** + Constructs this object with a message depending on its arguments: + + - If it's passed only a single integer argument, it is assumed + to be an sqlite3 C API result code. The message becomes the + result of sqlite3.capi.sqlite3_js_rc_str() or (if that returns + falsy) a synthesized string which contains that integer. + + - If passed 2 arguments and the 2nd is a object, it bevaves + like the Error(string,object) constructor except that the first + argument is subject to the is-integer semantics from the + previous point. + + - Else all arguments are concatenated with a space between each + one, using args.join(' '), to create the error message. + */ + constructor(...args){ + if(1===args.length && __isInt(args[0])){ + super(__rcStr(args[0])); + }else if(2===args.length && 'object'===typeof args){ + if(__isInt(args[0])) super(__rcStr(args[0]), args[1]); + else super(...args); + }else{ + super(args.join(' ')); + } + this.name = 'SQLite3Error'; + } + }; + + /** + Functionally equivalent to the SQLite3Error constructor but may + be used as part of an expression, e.g.: + + ``` + return someFunction(x) || SQLite3Error.toss(...); + ``` + */ + SQLite3Error.toss = (...args)=>{ + throw new SQLite3Error(...args); + }; + const toss3 = SQLite3Error.toss; + + if(config.wasmfsOpfsDir && !/^\/[^/]+$/.test(config.wasmfsOpfsDir)){ + toss3("config.wasmfsOpfsDir must be falsy or in the form '/dir-name'."); + } /** Returns true if n is a 32-bit (signed) integer, else false. This is used for determining when we need to switch to double-type DB operations for integer values in order to keep more precision. */ - const isInt32 = function(n){ + const isInt32 = (n)=>{ return ('bigint'!==typeof n /*TypeError: can't convert BigInt to number*/) && !!(n===(n|0) && n<=2147483647 && n>=-2147483648); }; + /** + Returns true if the given BigInt value is small enough to fit + into an int64 value, else false. + */ + const bigIntFits64 = function f(b){ + if(!f._max){ + f._max = BigInt("0x7fffffffffffffff"); + f._min = ~f._max; + } + return b >= f._min && b <= f._max; + }; + + /** + Returns true if the given BigInt value is small enough to fit + into an int32, else false. + */ + const bigIntFits32 = (b)=>(b >= (-0x7fffffffn - 1n) && b <= 0x7fffffffn); + + /** + Returns true if the given BigInt value is small enough to fit + into a double value without loss of precision, else false. + */ + const bigIntFitsDouble = function f(b){ + if(!f._min){ + f._min = Number.MIN_SAFE_INTEGER; + f._max = Number.MAX_SAFE_INTEGER; + } + return b >= f._min && b <= f._max; + }; /** Returns v if v appears to be a TypedArray, else false. */ const isTypedArray = (v)=>{ return (v && v.constructor && isInt32(v.constructor.BYTES_PER_ELEMENT)) ? v : false; }; + + /** Internal helper to use in operations which need to distinguish + between TypedArrays which are backed by a SharedArrayBuffer + from those which are not. */ + const __SAB = ('undefined'===typeof SharedArrayBuffer) + ? function(){} : SharedArrayBuffer; + /** Returns true if the given TypedArray object is backed by a + SharedArrayBuffer, else false. */ + const isSharedTypedArray = (aTypedArray)=>(aTypedArray.buffer instanceof __SAB); + + /** + Returns either aTypedArray.slice(begin,end) (if + aTypedArray.buffer is a SharedArrayBuffer) or + aTypedArray.subarray(begin,end) (if it's not). + + This distinction is important for APIs which don't like to + work on SABs, e.g. TextDecoder, and possibly for our + own APIs which work on memory ranges which "might" be + modified by other threads while they're working. + */ + const typedArrayPart = (aTypedArray, begin, end)=>{ + return isSharedTypedArray(aTypedArray) + ? aTypedArray.slice(begin, end) + : aTypedArray.subarray(begin, end); + }; + /** Returns true if v appears to be one of our bind()-able TypedArray types: Uint8Array or Int8Array. Support for TypedArrays with element sizes >1 is TODO. */ @@ -137,103 +361,208 @@ /** Returns true if isBindableTypedArray(v) does, else throws with a message that v is not a supported TypedArray value. */ const affirmBindableTypedArray = (v)=>{ return isBindableTypedArray(v) - || toss("Value is not of a supported TypedArray type."); + || toss3("Value is not of a supported TypedArray type."); }; const utf8Decoder = new TextDecoder('utf-8'); - const typedArrayToString = (str)=>utf8Decoder.decode(str); + + /** + Uses TextDecoder to decode the given half-open range of the + given TypedArray to a string. This differs from a simple + call to TextDecoder in that it accounts for whether the + first argument is backed by a SharedArrayBuffer or not, + and can work more efficiently if it's not (TextDecoder + refuses to act upon an SAB). + */ + const typedArrayToString = function(typedArray, begin, end){ + return utf8Decoder.decode(typedArrayPart(typedArray, begin,end)); + }; + + /** + If v is-a Array, its join("") result is returned. If + isSQLableTypedArray(v) is true then typedArrayToString(v) is + returned. If it looks like a WASM pointer, wasm.cstringToJs(v) is + returned. Else v is returned as-is. + */ + const flexibleString = function(v){ + if(isSQLableTypedArray(v)) return typedArrayToString(v); + else if(Array.isArray(v)) return v.join(""); + else if(wasm.isPtr(v)) v = wasm.cstringToJs(v); + return v; + }; /** An Error subclass specifically for reporting Wasm-level malloc() failure and enabling clients to unambiguously identify such exceptions. */ class WasmAllocError extends Error { + /** + If called with 2 arguments and the 2nd one is an object, it + behaves like the Error constructor, else it concatenates all + arguments together with a single space between each to + construct an error message string. As a special case, if + called with no arguments then it uses a default error + message. + */ constructor(...args){ - super(...args); + if(2===args.length && 'object'===typeof args){ + super(...args); + }else if(args.length){ + super(args.join(' ')); + }else{ + super("Allocation failed."); + } this.name = 'WasmAllocError'; } }; - - /** - The main sqlite3 binding API gets installed into this object, - mimicking the C API as closely as we can. The numerous members - names with prefixes 'sqlite3_' and 'SQLITE_' behave, insofar as - possible, identically to the C-native counterparts, as documented at: - - https://www.sqlite.org/c3ref/intro.html - - A very few exceptions require an additional level of proxy - function or may otherwise require special attention in the WASM - environment, and all such cases are document here. Those not - documented here are installed as 1-to-1 proxies for their C-side - counterparts. - */ - const capi = { - /** - An Error subclass which is thrown by this object's alloc() method - on OOM. - */ - WasmAllocError: WasmAllocError, - /** - The API's one single point of access to the WASM-side memory - allocator. Works like malloc(3) (and is likely bound to - malloc()) but throws an WasmAllocError if allocation fails. It is - important that any code which might pass through the sqlite3 C - API NOT throw and must instead return SQLITE_NOMEM (or - equivalent, depending on the context). - - That said, very few cases in the API can result in - client-defined functions propagating exceptions via the C-style - API. Most notably, this applies ot User-defined SQL Functions - (UDFs) registered via sqlite3_create_function_v2(). For that - specific case it is recommended that all UDF creation be - funneled through a utility function and that a wrapper function - be added around the UDF which catches any exception and sets - the error state to OOM. (The overall complexity of registering - UDFs essentially requires a helper for doing so!) - */ - alloc: undefined/*installed later*/, - /** - The API's one single point of access to the WASM-side memory - deallocator. Works like free(3) (and is likely bound to - free()). - */ - dealloc: undefined/*installed later*/, - /** - When using sqlite3_open_v2() it is important to keep the following - in mind: - - https://www.sqlite.org/c3ref/open.html - - - The flags for use with its 3rd argument are installed in this - object using the C-cide names, e.g. SQLITE_OPEN_CREATE. - - - If the combination of flags passed to it are invalid, - behavior is undefined. Thus is is never okay to call this - with fewer than 3 arguments, as JS will default the - missing arguments to `undefined`, which will result in a - flag value of 0. Most of the available SQLITE_OPEN_xxx - flags are meaningless in the WASM build, e.g. the mutext- - and cache-related flags, but they are retained in this - API for consistency's sake. - - - The final argument to this function specifies the VFS to - use, which is largely (but not entirely!) meaningless in - the WASM environment. It should always be null or - undefined, and it is safe to elide that argument when - calling this function. - */ - sqlite3_open_v2: function(filename,dbPtrPtr,flags,vfsStr){}/*installed later*/, + /** + Functionally equivalent to the WasmAllocError constructor but may + be used as part of an expression, e.g.: + + ``` + return someAllocatingFunction(x) || WasmAllocError.toss(...); + ``` + */ + WasmAllocError.toss = (...args)=>{ + throw new WasmAllocError(...args); + }; + + Object.assign(capi, { + /** + sqlite3_create_function_v2() differs from its native + counterpart only in the following ways: + + 1) The fourth argument (`eTextRep`) argument must not specify + any encoding other than sqlite3.SQLITE_UTF8. The JS API does not + currently support any other encoding and likely never + will. This function does not replace that argument on its own + because it may contain other flags. + + 2) Any of the four final arguments may be either WASM pointers + (assumed to be function pointers) or JS Functions. In the + latter case, each gets bound to WASM using + sqlite3.capi.wasm.installFunction() and that wrapper is passed + on to the native implementation. + + The semantics of JS functions are: + + xFunc: is passed `(pCtx, ...values)`. Its return value becomes + the new SQL function's result. + + xStep: is passed `(pCtx, ...values)`. Its return value is + ignored. + + xFinal: is passed `(pCtx)`. Its return value becomes the new + aggregate SQL function's result. + + xDestroy: is passed `(void*)`. Its return value is ignored. The + pointer passed to it is the one from the 5th argument to + sqlite3_create_function_v2(). + + Note that: + + - `pCtx` in the above descriptions is a `sqlite3_context*`. At + least 99 times out of a hundred, that initial argument will + be irrelevant for JS UDF bindings, but it needs to be there + so that the cases where it _is_ relevant, in particular with + window and aggregate functions, have full access to the + lower-level sqlite3 APIs. + + - When wrapping JS functions, the remaining arguments are passd + to them as positional arguments, not as an array of + arguments, because that allows callback definitions to be + more JS-idiomatic than C-like. For example `(pCtx,a,b)=>a+b` + is more intuitive and legible than + `(pCtx,args)=>args[0]+args[1]`. For cases where an array of + arguments would be more convenient, the callbacks simply need + to be declared like `(pCtx,...args)=>{...}`, in which case + `args` will be an array. + + - If a JS wrapper throws, it gets translated to + sqlite3_result_error() or sqlite3_result_error_nomem(), + depending on whether the exception is an + sqlite3.WasmAllocError object or not. + + - When passing on WASM function pointers, arguments are _not_ + converted or reformulated. They are passed on as-is in raw + pointer form using their native C signatures. Only JS + functions passed in to this routine, and thus wrapped by this + routine, get automatic conversions of arguments and result + values. The routines which perform those conversions are + exposed for client-side use as + sqlite3_create_function_v2.convertUdfArgs() and + sqlite3_create_function_v2.setUdfResult(). sqlite3_create_function() + and sqlite3_create_window_function() have those same methods. + + For xFunc(), xStep(), and xFinal(): + + - When called from SQL, arguments to the UDF, and its result, + will be converted between JS and SQL with as much fidelity as + is feasible, triggering an exception if a type conversion + cannot be determined. Some freedom is afforded to numeric + conversions due to friction between the JS and C worlds: + integers which are larger than 32 bits may be treated as + doubles or BigInts. + + If any JS-side bound functions throw, those exceptions are + intercepted and converted to database-side errors with the + exception of xDestroy(): any exception from it is ignored, + possibly generating a console.error() message. Destructors + must not throw. + + Once installed, there is currently no way to uninstall the + automatically-converted WASM-bound JS functions from WASM. They + can be uninstalled from the database as documented in the C + API, but this wrapper currently has no infrastructure in place + to also free the WASM-bound JS wrappers, effectively resulting + in a memory leak if the client uninstalls the UDF. Improving that + is a potential TODO, but removing client-installed UDFs is rare + in practice. If this factor is relevant for a given client, + they can create WASM-bound JS functions themselves, hold on to their + pointers, and pass the pointers in to here. Later on, they can + free those pointers (using `wasm.uninstallFunction()` or + equivalent). + + C reference: https://www.sqlite.org/c3ref/create_function.html + + Maintenance reminder: the ability to add new + WASM-accessible functions to the runtime requires that the + WASM build is compiled with emcc's `-sALLOW_TABLE_GROWTH` + flag. + */ + sqlite3_create_function_v2: function( + pDb, funcName, nArg, eTextRep, pApp, + xFunc, xStep, xFinal, xDestroy + ){/*installed later*/}, + /** + Equivalent to passing the same arguments to + sqlite3_create_function_v2(), with 0 as the final argument. + */ + sqlite3_create_function:function( + pDb, funcName, nArg, eTextRep, pApp, + xFunc, xStep, xFinal + ){/*installed later*/}, + /** + The sqlite3_create_window_function() JS wrapper differs from + its native implementation in the exact same way that + sqlite3_create_function_v2() does. The additional function, + xInverse(), is treated identically to xStep() by the wrapping + layer. + */ + sqlite3_create_window_function: function( + pDb, funcName, nArg, eTextRep, pApp, + xStep, xFinal, xValue, xInverse, xDestroy + ){/*installed later*/}, /** The sqlite3_prepare_v3() binding handles two different uses with differing JS/WASM semantics: - 1) sqlite3_prepare_v3(pDb, sqlString, -1, prepFlags, ppStmt [, null]) + 1) sqlite3_prepare_v3(pDb, sqlString, -1, prepFlags, ppStmt , null) 2) sqlite3_prepare_v3(pDb, sqlPointer, sqlByteLen, prepFlags, ppStmt, sqlPointerToPointer) Note that the SQL length argument (the 3rd argument) must, for usage (1), always be negative because it must be a byte length @@ -250,133 +579,192 @@ hold SQL). If it is, this function assumes case (1) and calls the underyling C function with the equivalent of: (pDb, sqlAsString, -1, prepFlags, ppStmt, null) - The pzTail argument is ignored in this case because its result - is meaningless when a string-type value is passed through - (because the string goes through another level of internal + The `pzTail` argument is ignored in this case because its + result is meaningless when a string-type value is passed + through: the string goes through another level of internal conversion for WASM's sake and the result pointer would refer to that transient conversion's memory, not the passed-in - string). + string. If the sql argument is not a string, it must be a _pointer_ to a NUL-terminated string which was allocated in the WASM memory - (e.g. using cwapi.wasm.alloc() or equivalent). In that case, + (e.g. using capi.wasm.alloc() or equivalent). In that case, the final argument may be 0/null/undefined or must be a pointer to which the "tail" of the compiled SQL is written, as documented for the C-side sqlite3_prepare_v3(). In case (2), the underlying C function is called with the equivalent of: - (pDb, sqlAsPointer, (sqlByteLen||-1), prepFlags, ppStmt, pzTail) + (pDb, sqlAsPointer, sqlByteLen, prepFlags, ppStmt, pzTail) It returns its result and compiled statement as documented in the C API. Fetching the output pointers (5th and 6th - parameters) requires using capi.wasm.getMemValue() (or - equivalent) and the pzTail will point to an address relative to - the sqlAsPointer value. + parameters) requires using `capi.wasm.getMemValue()` (or + equivalent) and the `pzTail` will point to an address relative to + the `sqlAsPointer` value. If passed an invalid 2nd argument type, this function will - return SQLITE_MISUSE but will unfortunately be able to return - any additional error information because we have no way to set - the db's error state such that this function could return a - non-0 integer and the client could call sqlite3_errcode() or - sqlite3_errmsg() to fetch it. See the RFE at: - - https://sqlite.org/forum/forumpost/f9eb79b11aefd4fc81d - - The alternative would be to throw an exception for that case, - but that would be in strong constrast to the rest of the - C-level API and seems likely to cause more confusion. - - Side-note: in the C API the function does not fail if provided - an empty string but its result output pointer will be NULL. + return SQLITE_MISUSE and sqlite3_errmsg() will contain a string + describing the problem. + + Side-note: if given an empty string, or one which contains only + comments or an empty SQL expression, 0 is returned but the result + output pointer will be NULL. */ - sqlite3_prepare_v3: function(dbPtr, sql, sqlByteLen, prepFlags, - stmtPtrPtr, strPtrPtr){}/*installed later*/, + sqlite3_prepare_v3: (dbPtr, sql, sqlByteLen, prepFlags, + stmtPtrPtr, strPtrPtr)=>{}/*installed later*/, /** Equivalent to calling sqlite3_prapare_v3() with 0 as its 4th argument. */ - sqlite3_prepare_v2: function(dbPtr, sql, sqlByteLen, stmtPtrPtr, - strPtrPtr){}/*installed later*/, - - /** - Various internal-use utilities are added here as needed. They - are bound to an object only so that we have access to them in - the differently-scoped steps of the API bootstrapping - process. At the end of the API setup process, this object gets - removed. - */ - util:{ - isInt32, isTypedArray, isBindableTypedArray, isSQLableTypedArray, - affirmBindableTypedArray, typedArrayToString - }, - - /** - Holds state which are specific to the WASM-related - infrastructure and glue code. It is not expected that client - code will normally need these, but they're exposed here in case - it does. These APIs are _not_ to be considered an - official/stable part of the sqlite3 WASM API. They may change - as the developers' experience suggests appropriate changes. - - Note that a number of members of this object are injected - dynamically after the api object is fully constructed, so - not all are documented inline here. - */ - wasm: { - //^^^ TODO?: move wasm from sqlite3.capi.wasm to sqlite3.wasm - /** - Emscripten APIs have a deep-seated assumption that all pointers - are 32 bits. We'll remain optimistic that that won't always be - the case and will use this constant in places where we might - otherwise use a hard-coded 4. - */ - ptrSizeof: config.wasmPtrSizeof || 4, - /** - The WASM IR (Intermediate Representation) value for - pointer-type values. It MUST refer to a value type of the - size described by this.ptrSizeof _or_ it may be any value - which ends in '*', which Emscripten's glue code internally - translates to i32. - */ - ptrIR: config.wasmPtrIR || "i32", - /** - True if BigInt support was enabled via (e.g.) the - Emscripten -sWASM_BIGINT flag, else false. When - enabled, certain 64-bit sqlite3 APIs are enabled which - are not otherwise enabled due to JS/WASM int64 - impedence mismatches. - */ - bigIntEnabled: !!config.bigIntEnabled, - /** - The symbols exported by the WASM environment. - */ - exports: config.exports - || toss("Missing API config.exports (WASM module exports)."), - - /** - When Emscripten compiles with `-sIMPORT_MEMORY`, it - initalizes the heap and imports it into wasm, as opposed to - the other way around. In this case, the memory is not - available via this.exports.memory. - */ - memory: config.memory || config.exports['memory'] - || toss("API config object requires a WebAssembly.Memory object", - "in either config.exports.memory (exported)", - "or config.memory (imported)."), - /* Many more wasm-related APIs get installed later on. */ - }/*wasm*/ - }/*capi*/; - - /** - capi.wasm.alloc()'s srcTypedArray.byteLength bytes, + sqlite3_prepare_v2: (dbPtr, sql, sqlByteLen, + stmtPtrPtr,strPtrPtr)=>{}/*installed later*/, + + /** + This binding enables the callback argument to be a JavaScript. + + If the callback is a function, then for the duration of the + sqlite3_exec() call, it installs a WASM-bound function which + acts as a proxy for the given callback. That proxy will also + perform a conversion of the callback's arguments from + `(char**)` to JS arrays of strings. However, for API + consistency's sake it will still honor the C-level callback + parameter order and will call it like: + + `callback(pVoid, colCount, listOfValues, listOfColNames)` + + If the callback is not a JS function then this binding performs + no translation of the callback, but the sql argument is still + converted to a WASM string for the call using the + "flexible-string" argument converter. + */ + sqlite3_exec: (pDb, sql, callback, pVoid, pErrMsg)=>{}/*installed later*/, + + /** + If passed a single argument which appears to be a byte-oriented + TypedArray (Int8Array or Uint8Array), this function treats that + TypedArray as an output target, fetches `theArray.byteLength` + bytes of randomness, and populates the whole array with it. As + a special case, if the array's length is 0, this function + behaves as if it were passed (0,0). When called this way, it + returns its argument, else it returns the `undefined` value. + + If called with any other arguments, they are passed on as-is + to the C API. Results are undefined if passed any incompatible + values. + */ + sqlite3_randomness: (n, outPtr)=>{/*installed later*/}, + }/*capi*/); + + /** + Various internal-use utilities are added here as needed. They + are bound to an object only so that we have access to them in + the differently-scoped steps of the API bootstrapping + process. At the end of the API setup process, this object gets + removed. These are NOT part of the public API. + */ + const util = { + affirmBindableTypedArray, flexibleString, + bigIntFits32, bigIntFits64, bigIntFitsDouble, + isBindableTypedArray, + isInt32, isSQLableTypedArray, isTypedArray, + typedArrayToString, + isUIThread: ()=>'undefined'===typeof WorkerGlobalScope, + isSharedTypedArray, + typedArrayPart + }; + + Object.assign(wasm, { + /** + Emscripten APIs have a deep-seated assumption that all pointers + are 32 bits. We'll remain optimistic that that won't always be + the case and will use this constant in places where we might + otherwise use a hard-coded 4. + */ + ptrSizeof: config.wasmPtrSizeof || 4, + /** + The WASM IR (Intermediate Representation) value for + pointer-type values. It MUST refer to a value type of the + size described by this.ptrSizeof _or_ it may be any value + which ends in '*', which Emscripten's glue code internally + translates to i32. + */ + ptrIR: config.wasmPtrIR || "i32", + /** + True if BigInt support was enabled via (e.g.) the + Emscripten -sWASM_BIGINT flag, else false. When + enabled, certain 64-bit sqlite3 APIs are enabled which + are not otherwise enabled due to JS/WASM int64 + impedence mismatches. + */ + bigIntEnabled: !!config.bigIntEnabled, + /** + The symbols exported by the WASM environment. + */ + exports: config.exports + || toss3("Missing API config.exports (WASM module exports)."), + + /** + When Emscripten compiles with `-sIMPORT_MEMORY`, it + initalizes the heap and imports it into wasm, as opposed to + the other way around. In this case, the memory is not + available via this.exports.memory. + */ + memory: config.memory || config.exports['memory'] + || toss3("API config object requires a WebAssembly.Memory object", + "in either config.exports.memory (exported)", + "or config.memory (imported)."), + + /** + The API's one single point of access to the WASM-side memory + allocator. Works like malloc(3) (and is likely bound to + malloc()) but throws an WasmAllocError if allocation fails. It is + important that any code which might pass through the sqlite3 C + API NOT throw and must instead return SQLITE_NOMEM (or + equivalent, depending on the context). + + Very few cases in the sqlite3 JS APIs can result in + client-defined functions propagating exceptions via the C-style + API. Most notably, this applies to WASM-bound JS functions + which are created directly by clients and passed on _as WASM + function pointers_ to functions such as + sqlite3_create_function_v2(). Such bindings created + transparently by this API will automatically use wrappers which + catch exceptions and convert them to appropriate error codes. + + For cases where non-throwing allocation is required, use + sqlite3.wasm.alloc.impl(), which is direct binding of the + underlying C-level allocator. + + Design note: this function is not named "malloc" primarily + because Emscripten uses that name and we wanted to avoid any + confusion early on in this code's development, when it still + had close ties to Emscripten's glue code. + */ + alloc: undefined/*installed later*/, + + /** + The API's one single point of access to the WASM-side memory + deallocator. Works like free(3) (and is likely bound to + free()). + + Design note: this function is not named "free" for the same + reason that this.alloc() is not called this.malloc(). + */ + dealloc: undefined/*installed later*/ + + /* Many more wasm-related APIs get installed later on. */ + }/*wasm*/); + + /** + wasm.alloc()'s srcTypedArray.byteLength bytes, populates them with the values from the source TypedArray, and returns the pointer to that memory. The returned pointer must eventually be passed to - capi.wasm.dealloc() to clean it up. + wasm.dealloc() to clean it up. As a special case, to avoid further special cases where this is used, if srcTypedArray.byteLength is 0, it allocates a single byte and sets it to the value 0. Even in such cases, calls must behave as if the @@ -385,29 +773,33 @@ ACHTUNG: this currently only works for Uint8Array and Int8Array types and will throw if srcTypedArray is of any other type. */ - capi.wasm.mallocFromTypedArray = function(srcTypedArray){ + wasm.allocFromTypedArray = function(srcTypedArray){ affirmBindableTypedArray(srcTypedArray); - const pRet = this.alloc(srcTypedArray.byteLength || 1); - this.heapForSize(srcTypedArray.constructor).set(srcTypedArray.byteLength ? srcTypedArray : [0], pRet); + const pRet = wasm.alloc(srcTypedArray.byteLength || 1); + wasm.heapForSize(srcTypedArray.constructor).set( + srcTypedArray.byteLength ? srcTypedArray : [0], pRet + ); return pRet; - }.bind(capi.wasm); + }; const keyAlloc = config.allocExportName || 'malloc', keyDealloc = config.deallocExportName || 'free'; for(const key of [keyAlloc, keyDealloc]){ - const f = capi.wasm.exports[key]; - if(!(f instanceof Function)) toss("Missing required exports[",key,"] function."); - } - capi.wasm.alloc = function(n){ - const m = this.exports[keyAlloc](n); - if(!m) throw new WasmAllocError("Failed to allocate "+n+" bytes."); - return m; - }.bind(capi.wasm) - capi.wasm.dealloc = (m)=>capi.wasm.exports[keyDealloc](m); + const f = wasm.exports[key]; + if(!(f instanceof Function)) toss3("Missing required exports[",key,"] function."); + } + + wasm.alloc = function f(n){ + const m = f.impl(n); + if(!m) throw new WasmAllocError("Failed to allocate",n," bytes."); + return m; + }; + wasm.alloc.impl = wasm.exports[keyAlloc]; + wasm.dealloc = wasm.exports[keyDealloc]; /** Reports info about compile-time options using sqlite_compileoption_get() and sqlite3_compileoption_used(). It has several distinct uses: @@ -434,11 +826,11 @@ Compile-time option names may optionally include their "SQLITE_" prefix. When it returns an object of all options, the prefix is elided. */ - capi.wasm.compileOptionUsed = function f(optName){ + wasm.compileOptionUsed = function f(optName){ if(!arguments.length){ if(f._result) return f._result; else if(!f._opt){ f._rx = /^([^=]+)=(.+)/; f._rxInt = /^-?\d+$/; @@ -470,30 +862,43 @@ return ( 'string'===typeof optName ) ? !!capi.sqlite3_compileoption_used(optName) : false; }/*compileOptionUsed()*/; - capi.wasm.bindingSignatures = [ - /** - Signatures for the WASM-exported C-side functions. Each entry - is an array with 2+ elements: - - ["c-side name", - "result type" (capi.wasm.xWrap() syntax), - [arg types in xWrap() syntax] - // ^^^ this needn't strictly be an array: it can be subsequent - // elements instead: [x,y,z] is equivalent to x,y,z - ] - */ + /** + Signatures for the WASM-exported C-side functions. Each entry + is an array with 2+ elements: + + [ "c-side name", + "result type" (wasm.xWrap() syntax), + [arg types in xWrap() syntax] + // ^^^ this needn't strictly be an array: it can be subsequent + // elements instead: [x,y,z] is equivalent to x,y,z + ] + + Note that support for the API-specific data types in the + result/argument type strings gets plugged in at a later phase in + the API initialization process. + */ + wasm.bindingSignatures = [ // Please keep these sorted by function name! - ["sqlite3_bind_blob","int", "sqlite3_stmt*", "int", "*", "int", "*"], + ["sqlite3_aggregate_context","void*", "sqlite3_context*", "int"], + ["sqlite3_bind_blob","int", "sqlite3_stmt*", "int", "*", "int", "*" + /* TODO: we should arguably write a custom wrapper which knows + how to handle Blob, TypedArrays, and JS strings. */ + ], ["sqlite3_bind_double","int", "sqlite3_stmt*", "int", "f64"], ["sqlite3_bind_int","int", "sqlite3_stmt*", "int", "int"], ["sqlite3_bind_null",undefined, "sqlite3_stmt*", "int"], ["sqlite3_bind_parameter_count", "int", "sqlite3_stmt*"], ["sqlite3_bind_parameter_index","int", "sqlite3_stmt*", "string"], - ["sqlite3_bind_text","int", "sqlite3_stmt*", "int", "string", "int", "int"], + ["sqlite3_bind_text","int", "sqlite3_stmt*", "int", "string", "int", "int" + /* We should arguably create a hand-written binding of + bind_text() which does more flexible text conversion, along + the lines of sqlite3_prepare_v3(). The slightly problematic + part is the final argument (text destructor). */ + ], ["sqlite3_close_v2", "int", "sqlite3*"], ["sqlite3_changes", "int", "sqlite3*"], ["sqlite3_clear_bindings","int", "sqlite3_stmt*"], ["sqlite3_column_blob","*", "sqlite3_stmt*", "int"], ["sqlite3_column_bytes","int", "sqlite3_stmt*", "int"], @@ -503,37 +908,50 @@ ["sqlite3_column_name","string", "sqlite3_stmt*", "int"], ["sqlite3_column_text","string", "sqlite3_stmt*", "int"], ["sqlite3_column_type","int", "sqlite3_stmt*", "int"], ["sqlite3_compileoption_get", "string", "int"], ["sqlite3_compileoption_used", "int", "string"], - ["sqlite3_create_function_v2", "int", - "sqlite3*", "string", "int", "int", "*", "*", "*", "*", "*"], + /* sqlite3_create_function(), sqlite3_create_function_v2(), and + sqlite3_create_window_function() use hand-written bindings to + simplify handling of their function-type arguments. */ ["sqlite3_data_count", "int", "sqlite3_stmt*"], ["sqlite3_db_filename", "string", "sqlite3*", "string"], + ["sqlite3_db_handle", "sqlite3*", "sqlite3_stmt*"], ["sqlite3_db_name", "string", "sqlite3*", "int"], + ["sqlite3_deserialize", "int", "sqlite3*", "string", "*", "i64", "i64", "int"] + /* Careful! Short version: de/serialize() are problematic because they + might use a different allocator than the user for managing the + deserialized block. de/serialize() are ONLY safe to use with + sqlite3_malloc(), sqlite3_free(), and its 64-bit variants. */, ["sqlite3_errmsg", "string", "sqlite3*"], ["sqlite3_error_offset", "int", "sqlite3*"], ["sqlite3_errstr", "string", "int"], - //["sqlite3_exec", "int", "sqlite3*", "string", "*", "*", "**"], - // ^^^ TODO: we need a wrapper to support passing a function pointer or a function - // for the callback. + /*["sqlite3_exec", "int", "sqlite3*", "string", "*", "*", "**" + Handled seperately to perform translation of the callback + into a WASM-usable one. ],*/ ["sqlite3_expanded_sql", "string", "sqlite3_stmt*"], ["sqlite3_extended_errcode", "int", "sqlite3*"], ["sqlite3_extended_result_codes", "int", "sqlite3*", "int"], + ["sqlite3_file_control", "int", "sqlite3*", "string", "int", "*"], ["sqlite3_finalize", "int", "sqlite3_stmt*"], + ["sqlite3_free", undefined,"*"], ["sqlite3_initialize", undefined], - ["sqlite3_interrupt", undefined, "sqlite3*" - /* ^^^ we cannot actually currently support this because JS is + /*["sqlite3_interrupt", undefined, "sqlite3*" + ^^^ we cannot actually currently support this because JS is single-threaded and we don't have a portable way to access a DB - from 2 SharedWorkers concurrently. */], + from 2 SharedWorkers concurrently. ],*/ ["sqlite3_libversion", "string"], ["sqlite3_libversion_number", "int"], + ["sqlite3_malloc", "*","int"], ["sqlite3_open", "int", "string", "*"], ["sqlite3_open_v2", "int", "string", "*", "int", "string"], /* sqlite3_prepare_v2() and sqlite3_prepare_v3() are handled separately due to us requiring two different sets of semantics for those, depending on how their SQL argument is provided. */ + /* sqlite3_randomness() uses a hand-written wrapper to extend + the range of supported argument types. */ + ["sqlite3_realloc", "*","*","int"], ["sqlite3_reset", "int", "sqlite3_stmt*"], ["sqlite3_result_blob",undefined, "*", "*", "int", "*"], ["sqlite3_result_double",undefined, "*", "f64"], ["sqlite3_result_error",undefined, "*", "string", "int"], ["sqlite3_result_error_code", undefined, "*", "int"], @@ -540,54 +958,645 @@ ["sqlite3_result_error_nomem", undefined, "*"], ["sqlite3_result_error_toobig", undefined, "*"], ["sqlite3_result_int",undefined, "*", "int"], ["sqlite3_result_null",undefined, "*"], ["sqlite3_result_text",undefined, "*", "string", "int", "*"], + ["sqlite3_serialize","*", "sqlite3*", "string", "*", "int"], + ["sqlite3_shutdown", undefined], ["sqlite3_sourceid", "string"], ["sqlite3_sql", "string", "sqlite3_stmt*"], ["sqlite3_step", "int", "sqlite3_stmt*"], ["sqlite3_strglob", "int", "string","string"], ["sqlite3_strlike", "int", "string","string","int"], + ["sqlite3_trace_v2", "int", "sqlite3*", "int", "*", "*"], ["sqlite3_total_changes", "int", "sqlite3*"], - ["sqlite3_value_blob", "*", "*"], - ["sqlite3_value_bytes","int", "*"], - ["sqlite3_value_double","f64", "*"], - ["sqlite3_value_text", "string", "*"], - ["sqlite3_value_type", "int", "*"], + ["sqlite3_uri_boolean", "int", "string", "string", "int"], + ["sqlite3_uri_key", "string", "string", "int"], + ["sqlite3_uri_parameter", "string", "string", "string"], + ["sqlite3_user_data","void*", "sqlite3_context*"], + ["sqlite3_value_blob", "*", "sqlite3_value*"], + ["sqlite3_value_bytes","int", "sqlite3_value*"], + ["sqlite3_value_double","f64", "sqlite3_value*"], + ["sqlite3_value_int","int", "sqlite3_value*"], + ["sqlite3_value_text", "string", "sqlite3_value*"], + ["sqlite3_value_type", "int", "sqlite3_value*"], ["sqlite3_vfs_find", "*", "string"], - ["sqlite3_vfs_register", "int", "*", "int"] - ]/*capi.wasm.bindingSignatures*/; + ["sqlite3_vfs_register", "int", "sqlite3_vfs*", "int"], + ["sqlite3_vfs_unregister", "int", "sqlite3_vfs*"] + ]/*wasm.bindingSignatures*/; - if(false && capi.wasm.compileOptionUsed('SQLITE_ENABLE_NORMALIZE')){ + if(false && wasm.compileOptionUsed('SQLITE_ENABLE_NORMALIZE')){ /* ^^^ "the problem" is that this is an option feature and the build-time function-export list does not currently take optional features into account. */ - capi.wasm.bindingSignatures.push(["sqlite3_normalized_sql", "string", "sqlite3_stmt*"]); + wasm.bindingSignatures.push(["sqlite3_normalized_sql", "string", "sqlite3_stmt*"]); } /** Functions which require BigInt (int64) support are separated from the others because we need to conditionally bind them or apply dummy impls, depending on the capabilities of the environment. */ - capi.wasm.bindingSignatures.int64 = [ - ["sqlite3_bind_int64","int", ["sqlite3_stmt*", "int", "i64"]], - ["sqlite3_changes64","i64", ["sqlite3*"]], - ["sqlite3_column_int64","i64", ["sqlite3_stmt*", "int"]], - ["sqlite3_total_changes64", "i64", ["sqlite3*"]] + wasm.bindingSignatures.int64 = [ + ["sqlite3_bind_int64","int", ["sqlite3_stmt*", "int", "i64"]], + ["sqlite3_changes64","i64", ["sqlite3*"]], + ["sqlite3_column_int64","i64", ["sqlite3_stmt*", "int"]], + ["sqlite3_malloc64", "*","i64"], + ["sqlite3_msize", "i64", "*"], + ["sqlite3_realloc64", "*","*", "i64"], + ["sqlite3_result_int64",undefined, "*", "i64"], + ["sqlite3_total_changes64", "i64", ["sqlite3*"]], + ["sqlite3_uri_int64", "i64", ["string", "string", "i64"]], + ["sqlite3_value_int64","i64", "sqlite3_value*"], + ]; + + /** + Functions which are intended solely for API-internal use by the + WASM components, not client code. These get installed into + sqlite3.wasm. + */ + wasm.bindingSignatures.wasm = [ + ["sqlite3_wasm_db_reset", "int", "sqlite3*"], + ["sqlite3_wasm_db_vfs", "sqlite3_vfs*", "sqlite3*","string"], + ["sqlite3_wasm_vfs_create_file", "int", + "sqlite3_vfs*","string","*", "int"], + ["sqlite3_wasm_vfs_unlink", "int", "sqlite3_vfs*","string"] ]; + + + /** + sqlite3.wasm.pstack (pseudo-stack) holds a special-case + stack-style allocator intended only for use with _small_ data of + not more than (in total) a few kb in size, managed as if it were + stack-based. + + It has only a single intended usage: + + ``` + const stackPos = pstack.pointer; + try{ + const ptr = pstack.alloc(8); + // ==> pstack.pointer === ptr + const otherPtr = pstack.alloc(8); + // ==> pstack.pointer === otherPtr + ... + }finally{ + pstack.restore(stackPos); + // ==> pstack.pointer === stackPos + } + ``` + + This allocator is much faster than a general-purpose one but is + limited to usage patterns like the one shown above. + + It operates from a static range of memory which lives outside of + space managed by Emscripten's stack-management, so does not + collide with Emscripten-provided stack allocation APIs. The + memory lives in the WASM heap and can be used with routines such + as wasm.setMemValue() and any wasm.heap8u().slice(). + */ + wasm.pstack = Object.assign(Object.create(null),{ + /** + Sets the current pstack position to the given pointer. Results + are undefined if the passed-in value did not come from + this.pointer. + */ + restore: wasm.exports.sqlite3_wasm_pstack_restore, + /** + Attempts to allocate the given number of bytes from the + pstack. On success, it zeroes out a block of memory of the + given size, adjusts the pstack pointer, and returns a pointer + to the memory. On error, returns throws a WasmAllocError. The + memory must eventually be released using restore(). + + This method always adjusts the given value to be a multiple + of 8 bytes because failing to do so can lead to incorrect + results when reading and writing 64-bit values from/to the WASM + heap. Similarly, the returned address is always 8-byte aligned. + */ + alloc: (n)=>{ + return wasm.exports.sqlite3_wasm_pstack_alloc(n) + || WasmAllocError.toss("Could not allocate",n, + "bytes from the pstack."); + }, + /** + alloc()'s n chunks, each sz bytes, as a single memory block and + returns the addresses as an array of n element, each holding + the address of one chunk. + + Throws a WasmAllocError if allocation fails. + + Example: + + ``` + const [p1, p2, p3] = wasm.pstack.allocChunks(3,4); + ``` + */ + allocChunks: (n,sz)=>{ + const mem = wasm.pstack.alloc(n * sz); + const rc = []; + let i = 0, offset = 0; + for(; i < n; offset = (sz * ++i)){ + rc.push(mem + offset); + } + return rc; + }, + /** + A convenience wrapper for allocChunks() which sizes each chunk + as either 8 bytes (safePtrSize is truthy) or wasm.ptrSizeof (if + safePtrSize is falsy). + + How it returns its result differs depending on its first + argument: if it's 1, it returns a single pointer value. If it's + more than 1, it returns the same as allocChunks(). + + When a returned pointers will refer to a 64-bit value, e.g. a + double or int64, and that value must be written or fetched, + e.g. using wasm.setMemValue() or wasm.getMemValue(), it is + important that the pointer in question be aligned to an 8-byte + boundary or else it will not be fetched or written properly and + will corrupt or read neighboring memory. + + However, when all pointers involved point to "small" data, it + is safe to pass a falsy value to save a tiny bit of memory. + */ + allocPtr: (n=1,safePtrSize=true)=>{ + return 1===n + ? wasm.pstack.alloc(safePtrSize ? 8 : wasm.ptrSizeof) + : wasm.pstack.allocChunks(n, safePtrSize ? 8 : wasm.ptrSizeof); + } + })/*wasm.pstack*/; + Object.defineProperties(wasm.pstack, { + /** + sqlite3.wasm.pstack.pointer resolves to the current pstack + position pointer. This value is intended _only_ to be saved + for passing to restore(). Writing to this memory, without + first reserving it via wasm.pstack.alloc() and friends, leads + to undefined results. + */ + pointer: { + configurable: false, iterable: true, writeable: false, + get: wasm.exports.sqlite3_wasm_pstack_ptr + //Whether or not a setter as an alternative to restore() is + //clearer or would just lead to confusion is unclear. + //set: wasm.exports.sqlite3_wasm_pstack_restore + }, + /** + sqlite3.wasm.pstack.quota to the total number of bytes + available in the pstack, including any space which is currently + allocated. This value is a compile-time constant. + */ + quota: { + configurable: false, iterable: true, writeable: false, + get: wasm.exports.sqlite3_wasm_pstack_quota + }, + /** + sqlite3.wasm.pstack.remaining resolves to the amount of space + remaining in the pstack. + */ + remaining: { + configurable: false, iterable: true, writeable: false, + get: wasm.exports.sqlite3_wasm_pstack_remaining + } + })/*wasm.pstack properties*/; + + capi.sqlite3_randomness = (...args)=>{ + if(1===args.length && util.isTypedArray(args[0]) + && 1===args[0].BYTES_PER_ELEMENT){ + const ta = args[0]; + if(0===ta.byteLength){ + wasm.exports.sqlite3_randomness(0,0); + return ta; + } + const stack = wasm.pstack.pointer; + try { + let n = ta.byteLength, offset = 0; + const r = wasm.exports.sqlite3_randomness; + const heap = wasm.heap8u(); + const nAlloc = n < 512 ? n : 512; + const ptr = wasm.pstack.alloc(nAlloc); + do{ + const j = (n>nAlloc ? nAlloc : n); + r(j, ptr); + ta.set(typedArrayPart(heap, ptr, ptr+j), offset); + n -= j; + offset += j; + } while(n > 0); + }catch(e){ + console.error("Highly unexpected (and ignored!) "+ + "exception in sqlite3_randomness():",e); + }finally{ + wasm.pstack.restore(stack); + } + return ta; + } + wasm.exports.sqlite3_randomness(...args); + }; + + /** State for sqlite3_wasmfs_opfs_dir(). */ + let __wasmfsOpfsDir = undefined; + /** + If the wasm environment has a WASMFS/OPFS-backed persistent + storage directory, its path is returned by this function. If it + does not then it returns "" (noting that "" is a falsy value). + + The first time this is called, this function inspects the current + environment to determine whether persistence support is available + and, if it is, enables it (if needed). + + This function currently only recognizes the WASMFS/OPFS storage + combination and its path refers to storage rooted in the + Emscripten-managed virtual filesystem. + */ + capi.sqlite3_wasmfs_opfs_dir = function(){ + if(undefined !== __wasmfsOpfsDir) return __wasmfsOpfsDir; + // If we have no OPFS, there is no persistent dir + const pdir = config.wasmfsOpfsDir; + if(!pdir + || !self.FileSystemHandle + || !self.FileSystemDirectoryHandle + || !self.FileSystemFileHandle){ + return __wasmfsOpfsDir = ""; + } + try{ + if(pdir && 0===wasm.xCallWrapped( + 'sqlite3_wasm_init_wasmfs', 'i32', ['string'], pdir + )){ + return __wasmfsOpfsDir = pdir; + }else{ + return __wasmfsOpfsDir = ""; + } + }catch(e){ + // sqlite3_wasm_init_wasmfs() is not available + return __wasmfsOpfsDir = ""; + } + }; + + /** + Experimental and subject to change or removal. + + Returns true if sqlite3.capi.sqlite3_wasmfs_opfs_dir() is a + non-empty string and the given name starts with (that string + + '/'), else returns false. + */ + capi.sqlite3_wasmfs_filename_is_persistent = function(name){ + const p = capi.sqlite3_wasmfs_opfs_dir(); + return (p && name) ? name.startsWith(p+'/') : false; + }; + + // This bit is highly arguable and is incompatible with the fiddle shell. + if(false && 0===wasm.exports.sqlite3_vfs_find(0)){ + /* Assume that sqlite3_initialize() has not yet been called. + This will be the case in an SQLITE_OS_KV build. */ + wasm.exports.sqlite3_initialize(); + } + + /** + Given an `sqlite3*`, an sqlite3_vfs name, and an optional db name + (defaulting to "main"), returns a truthy value (see below) if + that db uses that VFS, else returns false. If pDb is falsy then + the 3rd argument is ignored and this function returns a truthy + value if the default VFS name matches that of the 2nd + argument. Results are undefined if pDb is truthy but refers to an + invalid pointer. The 3rd argument specifies the database name of + the given database connection to check, defaulting to the main + db. + + The 2nd and 3rd arguments may either be a JS string or a WASM + C-string. If the 2nd argument is a NULL WASM pointer, the default + VFS is assumed. If the 3rd is a NULL WASM pointer, "main" is + assumed. + + The truthy value it returns is a pointer to the `sqlite3_vfs` + object. + + To permit safe use of this function from APIs which may be called + via the C stack (like SQL UDFs), this function does not throw: if + bad arguments cause a conversion error when passing into + wasm-space, false is returned. + */ + capi.sqlite3_js_db_uses_vfs = function(pDb,vfsName,dbName=0){ + try{ + const pK = capi.sqlite3_vfs_find(vfsName); + if(!pK) return false; + else if(!pDb){ + return pK===capi.sqlite3_vfs_find(0) ? pK : false; + }else{ + return pK===capi.sqlite3_js_db_vfs(pDb,dbName) ? pK : false; + } + }catch(e){ + /* Ignore - probably bad args to a wasm-bound function. */ + return false; + } + }; + + /** + Returns an array of the names of all currently-registered sqlite3 + VFSes. + */ + capi.sqlite3_js_vfs_list = function(){ + const rc = []; + let pVfs = capi.sqlite3_vfs_find(0); + while(pVfs){ + const oVfs = new capi.sqlite3_vfs(pVfs); + rc.push(wasm.cstringToJs(oVfs.$zName)); + pVfs = oVfs.$pNext; + oVfs.dispose(); + } + return rc; + }; + + /** + Serializes the given `sqlite3*` pointer to a Uint8Array, as per + sqlite3_serialize(). On success it returns a Uint8Array. On + error it throws with a description of the problem. + */ + capi.sqlite3_js_db_export = function(pDb){ + if(!pDb) toss3('Invalid sqlite3* argument.'); + if(!wasm.bigIntEnabled) toss3('BigInt64 support is not enabled.'); + const stack = wasm.pstack.pointer; + let pOut; + try{ + const pSize = wasm.pstack.alloc(8/*i64*/ + wasm.ptrSizeof); + const ppOut = pSize + 8; + /** + Maintenance reminder, since this cost a full hour of grief + and confusion: if the order of pSize/ppOut are reversed in + that memory block, fetching the value of pSize after the + export reads a garbage size because it's not on an 8-byte + memory boundary! + */ + let rc = wasm.exports.sqlite3_wasm_db_serialize( + pDb, ppOut, pSize, 0 + ); + if(rc){ + toss3("Database serialization failed with code", + sqlite3.capi.sqlite3_js_rc_str(rc)); + } + pOut = wasm.getPtrValue(ppOut); + const nOut = wasm.getMemValue(pSize, 'i64'); + rc = nOut + ? wasm.heap8u().slice(pOut, pOut + Number(nOut)) + : new Uint8Array(); + return rc; + }finally{ + if(pOut) wasm.exports.sqlite3_free(pOut); + wasm.pstack.restore(stack); + } + }; + + /** + Given a `sqlite3*` and a database name (JS string or WASM + C-string pointer, which may be 0), returns a pointer to the + sqlite3_vfs responsible for it. If the given db name is null/0, + or not provided, then "main" is assumed. + */ + capi.sqlite3_js_db_vfs = + (dbPointer, dbName=0)=>wasm.sqlite3_wasm_db_vfs(dbPointer, dbName); + + /** + A thin wrapper around capi.sqlite3_aggregate_context() which + behaves the same except that it throws a WasmAllocError if that + function returns 0. As a special case, if n is falsy it does + _not_ throw if that function returns 0. That special case is + intended for use with xFinal() implementations. + */ + capi.sqlite3_js_aggregate_context = (pCtx, n)=>{ + return capi.sqlite3_aggregate_context(pCtx, n) + || (n ? WasmAllocError.toss("Cannot allocate",n, + "bytes for sqlite3_aggregate_context()") + : 0); + }; + + if( util.isUIThread() ){ + /* Features specific to the main window thread... */ + + /** + Internal helper for sqlite3_js_kvvfs_clear() and friends. + Its argument should be one of ('local','session',""). + */ + const __kvvfsInfo = function(which){ + const rc = Object.create(null); + rc.prefix = 'kvvfs-'+which; + rc.stores = []; + if('session'===which || ""===which) rc.stores.push(self.sessionStorage); + if('local'===which || ""===which) rc.stores.push(self.localStorage); + return rc; + }; + + /** + Clears all storage used by the kvvfs DB backend, deleting any + DB(s) stored there. Its argument must be either 'session', + 'local', or "". In the first two cases, only sessionStorage + resp. localStorage is cleared. If it's an empty string (the + default) then both are cleared. Only storage keys which match + the pattern used by kvvfs are cleared: any other client-side + data are retained. + + This function is only available in the main window thread. + + Returns the number of entries cleared. + */ + capi.sqlite3_js_kvvfs_clear = function(which=""){ + let rc = 0; + const kvinfo = __kvvfsInfo(which); + kvinfo.stores.forEach((s)=>{ + const toRm = [] /* keys to remove */; + let i; + for( i = 0; i < s.length; ++i ){ + const k = s.key(i); + if(k.startsWith(kvinfo.prefix)) toRm.push(k); + } + toRm.forEach((kk)=>s.removeItem(kk)); + rc += toRm.length; + }); + return rc; + }; + + /** + This routine guesses the approximate amount of + window.localStorage and/or window.sessionStorage in use by the + kvvfs database backend. Its argument must be one of + ('session', 'local', ""). In the first two cases, only + sessionStorage resp. localStorage is counted. If it's an empty + string (the default) then both are counted. Only storage keys + which match the pattern used by kvvfs are counted. The returned + value is the "length" value of every matching key and value, + noting that JavaScript stores each character in 2 bytes. + + Note that the returned size is not authoritative from the + perspective of how much data can fit into localStorage and + sessionStorage, as the precise algorithms for determining + those limits are unspecified and may include per-entry + overhead invisible to clients. + */ + capi.sqlite3_js_kvvfs_size = function(which=""){ + let sz = 0; + const kvinfo = __kvvfsInfo(which); + kvinfo.stores.forEach((s)=>{ + let i; + for(i = 0; i < s.length; ++i){ + const k = s.key(i); + if(k.startsWith(kvinfo.prefix)){ + sz += k.length; + sz += s.getItem(k).length; + } + } + }); + return sz * 2 /* because JS uses 2-byte char encoding */; + }; + + }/* main-window-only bits */ + /* The remainder of the API will be set up in later steps. */ - return { + const sqlite3 = { + WasmAllocError: WasmAllocError, + SQLite3Error: SQLite3Error, capi, - postInit: [ - /* some pieces of the API may install functions into this array, - and each such function will be called, passed (self,sqlite3), - at the very end of the API load/init process, where self is - the current global object and sqlite3 is the object returned - from sqlite3ApiBootstrap(). This array will be removed at the - end of the API setup process. */], - /** Config is needed downstream for gluing pieces together. It - will be removed at the end of the API setup process. */ - config + util, + wasm, + config, + /** + Holds the version info of the sqlite3 source tree from which + the generated sqlite3-api.js gets built. Note that its version + may well differ from that reported by sqlite3_libversion(), but + that should be considered a source file mismatch, as the JS and + WASM files are intended to be built and distributed together. + + This object is initially a placeholder which gets replaced by a + build-generated object. + */ + version: Object.create(null), + /** + Performs any optional asynchronous library-level initialization + which might be required. This function returns a Promise which + resolves to the sqlite3 namespace object. Any error in the + async init will be fatal to the init as a whole, but init + routines are themselves welcome to install dummy catch() + handlers which are not fatal if their failure should be + considered non-fatal. If called more than once, the second and + subsequent calls are no-ops which return a pre-resolved + Promise. + + Ideally this function is called as part of the Promise chain + which handles the loading and bootstrapping of the API. If not + then it must be called by client-level code, which must not use + the library until the returned promise resolves. + + Bug: if called while a prior call is still resolving, the 2nd + call will resolve prematurely, before the 1st call has finished + resolving. The current build setup precludes that possibility, + so it's only a hypothetical problem if/when this function + ever needs to be invoked by clients. + + In Emscripten-based builds, this function is called + automatically and deleted from this object. + */ + asyncPostInit: async function(){ + let lip = sqlite3ApiBootstrap.initializersAsync; + delete sqlite3ApiBootstrap.initializersAsync; + if(!lip || !lip.length) return Promise.resolve(sqlite3); + // Is it okay to resolve these in parallel or do we need them + // to resolve in order? We currently only have 1, so it + // makes no difference. + lip = lip.map((f)=>{ + const p = (f instanceof Promise) ? f : f(sqlite3); + return p.catch((e)=>{ + console.error("an async sqlite3 initializer failed:",e); + throw e; + }); + }); + //let p = lip.shift(); + //while(lip.length) p = p.then(lip.shift()); + //return p.then(()=>sqlite3); + return Promise.all(lip).then(()=>sqlite3); + }, + /** + scriptInfo ideally gets injected into this object by the + infrastructure which assembles the JS/WASM module. It contains + state which must be collected before sqlite3ApiBootstrap() can + be declared. It is not necessarily available to any + sqlite3ApiBootstrap.initializers but "should" be in place (if + it's added at all) by the time that + sqlite3ApiBootstrap.initializersAsync is processed. + + This state is not part of the public API, only intended for use + with the sqlite3 API bootstrapping and wasm-loading process. + */ + scriptInfo: undefined }; + try{ + sqlite3ApiBootstrap.initializers.forEach((f)=>{ + f(sqlite3); + }); + }catch(e){ + /* If we don't report this here, it can get completely swallowed + up and disappear into the abyss of Promises and Workers. */ + console.error("sqlite3 bootstrap initializer threw:",e); + throw e; + } + delete sqlite3ApiBootstrap.initializers; + sqlite3ApiBootstrap.sqlite3 = sqlite3; + return sqlite3; }/*sqlite3ApiBootstrap()*/; +/** + self.sqlite3ApiBootstrap.initializers is an internal detail used by + the various pieces of the sqlite3 API's amalgamation process. It + must not be modified by client code except when plugging such code + into the amalgamation process. + + Each component of the amalgamation is expected to append a function + to this array. When sqlite3ApiBootstrap() is called for the first + time, each such function will be called (in their appended order) + and passed the sqlite3 namespace object, into which they can install + their features (noting that most will also require that certain + features alread have been installed). At the end of that process, + this array is deleted. + + Note that the order of insertion into this array is significant for + some pieces. e.g. sqlite3.capi and sqlite3.wasm cannot be fully + utilized until the whwasmutil.js part is plugged in via + sqlite3-api-glue.js. +*/ +self.sqlite3ApiBootstrap.initializers = []; +/** + self.sqlite3ApiBootstrap.initializersAsync is an internal detail + used by the sqlite3 API's amalgamation process. It must not be + modified by client code except when plugging such code into the + amalgamation process. + + The counterpart of self.sqlite3ApiBootstrap.initializers, + specifically for initializers which are asynchronous. All entries in + this list must be either async functions, non-async functions which + return a Promise, or a Promise. Each function in the list is called + with the sqlite3 ojbect as its only argument. + + The resolved value of any Promise is ignored and rejection will kill + the asyncPostInit() process (at an indeterminate point because all + of them are run asynchronously in parallel). + + This list is not processed until the client calls + sqlite3.asyncPostInit(). This means, for example, that intializers + added to self.sqlite3ApiBootstrap.initializers may push entries to + this list. +*/ +self.sqlite3ApiBootstrap.initializersAsync = []; +/** + Client code may assign sqlite3ApiBootstrap.defaultConfig an + object-type value before calling sqlite3ApiBootstrap() (without + arguments) in order to tell that call to use this object as its + default config value. The intention of this is to provide + downstream clients with a reasonably flexible approach for plugging in + an environment-suitable configuration without having to define a new + global-scope symbol. +*/ +self.sqlite3ApiBootstrap.defaultConfig = Object.create(null); +/** + Placeholder: gets installed by the first call to + self.sqlite3ApiBootstrap(). However, it is recommended that the + caller of sqlite3ApiBootstrap() capture its return value and delete + self.sqlite3ApiBootstrap after calling it. It returns the same + value which will be stored here. +*/ +self.sqlite3ApiBootstrap.sqlite3 = undefined; + DELETED ext/wasm/api/sqlite3-api-worker.js Index: ext/wasm/api/sqlite3-api-worker.js ================================================================== --- ext/wasm/api/sqlite3-api-worker.js +++ /dev/null @@ -1,420 +0,0 @@ -/* - 2022-07-22 - - The author disclaims copyright to this source code. In place of a - legal notice, here is a blessing: - - * May you do good and not evil. - * May you find forgiveness for yourself and forgive others. - * May you share freely, never taking more than you give. - - *********************************************************************** - - This file implements a Worker-based wrapper around SQLite3 OO API - #1. - - In order to permit this API to be loaded in worker threads without - automatically registering onmessage handlers, initializing the - worker API requires calling initWorkerAPI(). If this function - is called from a non-worker thread then it throws an exception. - - When initialized, it installs message listeners to receive messages - from the main thread and then it posts a message in the form: - - ``` - {type:'sqlite3-api',data:'worker-ready'} - ``` - - This file requires that the core C-style sqlite3 API and OO API #1 - have been loaded and that self.sqlite3 contains both, - as documented for those APIs. -*/ -self.sqlite3.initWorkerAPI = function(){ - 'use strict'; - /** - UNDER CONSTRUCTION - - We need an API which can proxy the DB API via a Worker message - interface. The primary quirky factor in such an API is that we - cannot pass callback functions between the window thread and a - worker thread, so we have to receive all db results via - asynchronous message-passing. That requires an asychronous API - with a distinctly different shape that the main OO API. - - Certain important considerations here include: - - - Support only one db connection or multiple? The former is far - easier, but there's always going to be a user out there who wants - to juggle six database handles at once. Do we add that complexity - or tell such users to write their own code using the provided - lower-level APIs? - - - Fetching multiple results: do we pass them on as a series of - messages, with start/end messages on either end, or do we collect - all results and bundle them back in a single message? The former - is, generically speaking, more memory-efficient but the latter - far easier to implement in this environment. The latter is - untennable for large data sets. Despite a web page hypothetically - being a relatively limited environment, there will always be - those users who feel that they should/need to be able to work - with multi-hundred-meg (or larger) blobs, and passing around - arrays of those may quickly exhaust the JS engine's memory. - - TODOs include, but are not limited to: - - - The ability to manage multiple DB handles. This can - potentially be done via a simple mapping of DB.filename or - DB.pointer (`sqlite3*` handle) to DB objects. The open() - interface would need to provide an ID (probably DB.pointer) back - to the user which can optionally be passed as an argument to - the other APIs (they'd default to the first-opened DB, for - ease of use). Client-side usability of this feature would - benefit from making another wrapper class (or a singleton) - available to the main thread, with that object proxying all(?) - communication with the worker. - - - Revisit how virtual files are managed. We currently delete DBs - from the virtual filesystem when we close them, for the sake of - saving memory (the VFS lives in RAM). Supporting multiple DBs may - require that we give up that habit. Similarly, fully supporting - ATTACH, where a user can upload multiple DBs and ATTACH them, - also requires the that we manage the VFS entries better. - */ - const toss = (...args)=>{throw new Error(args.join(' '))}; - if('function' !== typeof importScripts){ - toss("Cannot initalize the sqlite3 worker API in the main thread."); - } - const self = this.self; - const sqlite3 = this.sqlite3 || toss("Missing this.sqlite3 object."); - const SQLite3 = sqlite3.oo1 || toss("Missing this.sqlite3.oo1 OO API."); - const DB = SQLite3.DB; - - /** - Returns the app-wide unique ID for the given db, creating one if - needed. - */ - const getDbId = function(db){ - let id = wState.idMap.get(db); - if(id) return id; - id = 'db#'+(++wState.idSeq)+'@'+db.pointer; - /** ^^^ can't simply use db.pointer b/c closing/opening may re-use - the same address, which could map pending messages to a wrong - instance. */ - wState.idMap.set(db, id); - return id; - }; - - /** - Helper for managing Worker-level state. - */ - const wState = { - defaultDb: undefined, - idSeq: 0, - idMap: new WeakMap, - open: function(arg){ - // TODO: if arg is a filename, look for a db in this.dbs with the - // same filename and close/reopen it (or just pass it back as is?). - if(!arg && this.defaultDb) return this.defaultDb; - //???if(this.defaultDb) this.defaultDb.close(); - let db; - db = (Array.isArray(arg) ? new DB(...arg) : new DB(arg)); - this.dbs[getDbId(db)] = db; - if(!this.defaultDb) this.defaultDb = db; - return db; - }, - close: function(db,alsoUnlink){ - if(db){ - delete this.dbs[getDbId(db)]; - db.close(alsoUnlink); - if(db===this.defaultDb) this.defaultDb = undefined; - } - }, - post: function(type,data,xferList){ - if(xferList){ - self.postMessage({type, data},xferList); - xferList.length = 0; - }else{ - self.postMessage({type, data}); - } - }, - /** Map of DB IDs to DBs. */ - dbs: Object.create(null), - getDb: function(id,require=true){ - return this.dbs[id] - || (require ? toss("Unknown (or closed) DB ID:",id) : undefined); - } - }; - - /** Throws if the given db is falsy or not opened. */ - const affirmDbOpen = function(db = wState.defaultDb){ - return (db && db.pointer) ? db : toss("DB is not opened."); - }; - - /** Extract dbId from the given message payload. */ - const getMsgDb = function(msgData,affirmExists=true){ - const db = wState.getDb(msgData.dbId,false) || wState.defaultDb; - return affirmExists ? affirmDbOpen(db) : db; - }; - - const getDefaultDbId = function(){ - return wState.defaultDb && getDbId(wState.defaultDb); - }; - - /** - A level of "organizational abstraction" for the Worker - API. Each method in this object must map directly to a Worker - message type key. The onmessage() dispatcher attempts to - dispatch all inbound messages to a method of this object, - passing it the event.data part of the inbound event object. All - methods must return a plain Object containing any response - state, which the dispatcher may amend. All methods must throw - on error. - */ - const wMsgHandler = { - xfer: [/*Temp holder for "transferable" postMessage() state.*/], - /** - Proxy for DB.exec() which expects a single argument of type - string (SQL to execute) or an options object in the form - expected by exec(). The notable differences from exec() - include: - - - The default value for options.rowMode is 'array' because - the normal default cannot cross the window/Worker boundary. - - - A function-type options.callback property cannot cross - the window/Worker boundary, so is not useful here. If - options.callback is a string then it is assumed to be a - message type key, in which case a callback function will be - applied which posts each row result via: - - postMessage({type: thatKeyType, data: theRow}) - - And, at the end of the result set (whether or not any - result rows were produced), it will post an identical - message with data:null to alert the caller than the result - set is completed. - - The callback proxy must not recurse into this interface, or - results are undefined. (It hypothetically cannot recurse - because an exec() call will be tying up the Worker thread, - causing any recursion attempt to wait until the first - exec() is completed.) - - The response is the input options object (or a synthesized - one if passed only a string), noting that - options.resultRows and options.columnNames may be populated - by the call to exec(). - - This opens/creates the Worker's db if needed. - */ - exec: function(ev){ - const opt = ( - 'string'===typeof ev.data - ) ? {sql: ev.data} : (ev.data || Object.create(null)); - if(undefined===opt.rowMode){ - /* Since the default rowMode of 'stmt' is not useful - for the Worker interface, we'll default to - something else. */ - opt.rowMode = 'array'; - }else if('stmt'===opt.rowMode){ - toss("Invalid rowMode for exec(): stmt mode", - "does not work in the Worker API."); - } - const db = getMsgDb(ev); - if(opt.callback || Array.isArray(opt.resultRows)){ - // Part of a copy-avoidance optimization for blobs - db._blobXfer = this.xfer; - } - const callbackMsgType = opt.callback; - if('string' === typeof callbackMsgType){ - /* Treat this as a worker message type and post each - row as a message of that type. */ - const that = this; - opt.callback = - (row)=>wState.post(callbackMsgType,row,this.xfer); - } - try { - db.exec(opt); - if(opt.callback instanceof Function){ - opt.callback = callbackMsgType; - wState.post(callbackMsgType, null); - } - }/*catch(e){ - console.warn("Worker is propagating:",e);throw e; - }*/finally{ - delete db._blobXfer; - if(opt.callback){ - opt.callback = callbackMsgType; - } - } - return opt; - }/*exec()*/, - /** - TO(re)DO, once we can abstract away access to the - JS environment's virtual filesystem. Currently this - always throws. - - Response is (should be) an object: - - { - buffer: Uint8Array (db file contents), - filename: the current db filename, - mimetype: 'application/x-sqlite3' - } - - TODO is to determine how/whether this feature can support - exports of ":memory:" and "" (temp file) DBs. The latter is - ostensibly easy because the file is (potentially) on disk, but - the former does not have a structure which maps directly to a - db file image. - */ - export: function(ev){ - toss("export() requires reimplementing for portability reasons."); - /**const db = getMsgDb(ev); - const response = { - buffer: db.exportBinaryImage(), - filename: db.filename, - mimetype: 'application/x-sqlite3' - }; - this.xfer.push(response.buffer.buffer); - return response;**/ - }/*export()*/, - /** - Proxy for the DB constructor. Expects to be passed a single - object or a falsy value to use defaults. The object may - have a filename property to name the db file (see the DB - constructor for peculiarities and transformations) and/or a - buffer property (a Uint8Array holding a complete database - file's contents). The response is an object: - - { - filename: db filename (possibly differing from the input), - - id: an opaque ID value intended for future distinction - between multiple db handles. Messages including a specific - ID will use the DB for that ID. - - } - - If the Worker's db is currently opened, this call closes it - before proceeding. - */ - open: function(ev){ - wState.close(/*true???*/); - const args = [], data = (ev.data || {}); - if(data.simulateError){ - toss("Throwing because of open.simulateError flag."); - } - if(data.filename) args.push(data.filename); - if(data.buffer){ - args.push(data.buffer); - this.xfer.push(data.buffer.buffer); - } - const db = wState.open(args); - return { - filename: db.filename, - dbId: getDbId(db) - }; - }, - /** - Proxy for DB.close(). If ev.data may either be a boolean or - an object with an `unlink` property. If that value is - truthy then the db file (if the db is currently open) will - be unlinked from the virtual filesystem, else it will be - kept intact. The response object is: - - { - filename: db filename _if_ the db is opened when this - is called, else the undefined value - } - */ - close: function(ev){ - const db = getMsgDb(ev,false); - const response = { - filename: db && db.filename - }; - if(db){ - wState.close(db, !!((ev.data && 'object'===typeof ev.data) - ? ev.data.unlink : ev.data)); - } - return response; - }, - toss: function(ev){ - toss("Testing worker exception"); - } - }/*wMsgHandler*/; - - /** - UNDER CONSTRUCTION! - - A subset of the DB API is accessible via Worker messages in the - form: - - { type: apiCommand, - dbId: optional DB ID value (else uses a default db handle) - data: apiArguments - } - - As a rule, these commands respond with a postMessage() of their - own in the same form, but will, if needed, transform the `data` - member to an object and may add state to it. The responses - always have an object-format `data` part. If the inbound `data` - is an object which has a `messageId` property, that property is - always mirrored in the result object, for use in client-side - dispatching of these asynchronous results. Exceptions thrown - during processing result in an `error`-type event with a - payload in the form: - - { - message: error string, - errorClass: class name of the error type, - dbId: DB handle ID, - input: ev.data, - [messageId: if set in the inbound message] - } - - The individual APIs are documented in the wMsgHandler object. - */ - self.onmessage = function(ev){ - ev = ev.data; - let response, dbId = ev.dbId, evType = ev.type; - const arrivalTime = performance.now(); - try { - if(wMsgHandler.hasOwnProperty(evType) && - wMsgHandler[evType] instanceof Function){ - response = wMsgHandler[evType](ev); - }else{ - toss("Unknown db worker message type:",ev.type); - } - }catch(err){ - evType = 'error'; - response = { - message: err.message, - errorClass: err.name, - input: ev - }; - if(err.stack){ - response.stack = ('string'===typeof err.stack) - ? err.stack.split('\n') : err.stack; - } - if(0) console.warn("Worker is propagating an exception to main thread.", - "Reporting it _here_ for the stack trace:",err,response); - } - if(!response.messageId && ev.data - && 'object'===typeof ev.data && ev.data.messageId){ - response.messageId = ev.data.messageId; - } - if(!dbId){ - dbId = response.dbId/*from 'open' cmd*/ - || getDefaultDbId(); - } - if(!response.dbId) response.dbId = dbId; - // Timing info is primarily for use in testing this API. It's not part of - // the public API. arrivalTime = when the worker got the message. - response.workerReceivedTime = arrivalTime; - response.workerRespondTime = performance.now(); - response.departureTime = ev.departureTime; - wState.post(evType, response, wMsgHandler.xfer); - }; - setTimeout(()=>self.postMessage({type:'sqlite3-api',data:'worker-ready'}), 0); -}.bind({self, sqlite3: self.sqlite3}); ADDED ext/wasm/api/sqlite3-api-worker1.js Index: ext/wasm/api/sqlite3-api-worker1.js ================================================================== --- /dev/null +++ ext/wasm/api/sqlite3-api-worker1.js @@ -0,0 +1,654 @@ +/* + 2022-07-22 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + This file implements the initializer for the sqlite3 "Worker API + #1", a very basic DB access API intended to be scripted from a main + window thread via Worker-style messages. Because of limitations in + that type of communication, this API is minimalistic and only + capable of serving relatively basic DB requests (e.g. it cannot + process nested query loops concurrently). + + This file requires that the core C-style sqlite3 API and OO API #1 + have been loaded. +*/ + +/** + sqlite3.initWorker1API() implements a Worker-based wrapper around + SQLite3 OO API #1, colloquially known as "Worker API #1". + + In order to permit this API to be loaded in worker threads without + automatically registering onmessage handlers, initializing the + worker API requires calling initWorker1API(). If this function is + called from a non-worker thread then it throws an exception. It + must only be called once per Worker. + + When initialized, it installs message listeners to receive Worker + messages and then it posts a message in the form: + + ``` + {type:'sqlite3-api', result:'worker1-ready'} + ``` + + to let the client know that it has been initialized. Clients may + optionally depend on this function not returning until + initialization is complete, as the initialization is synchronous. + In some contexts, however, listening for the above message is + a better fit. + + Note that the worker-based interface can be slightly quirky because + of its async nature. In particular, any number of messages may be posted + to the worker before it starts handling any of them. If, e.g., an + "open" operation fails, any subsequent messages will fail. The + Promise-based wrapper for this API (`sqlite3-worker1-promiser.js`) + is more comfortable to use in that regard. + + The documentation for the input and output worker messages for + this API follows... + + ==================================================================== + Common message format... + + Each message posted to the worker has an operation-independent + envelope and operation-dependent arguments: + + ``` + { + type: string, // one of: 'open', 'close', 'exec', 'config-get' + + messageId: OPTIONAL arbitrary value. The worker will copy it as-is + into response messages to assist in client-side dispatching. + + dbId: a db identifier string (returned by 'open') which tells the + operation which database instance to work on. If not provided, the + first-opened db is used. This is an "opaque" value, with no + inherently useful syntax or information. Its value is subject to + change with any given build of this API and cannot be used as a + basis for anything useful beyond its one intended purpose. + + args: ...operation-dependent arguments... + + // the framework may add other properties for testing or debugging + // purposes. + + } + ``` + + Response messages, posted back to the main thread, look like: + + ``` + { + type: string. Same as above except for error responses, which have the type + 'error', + + messageId: same value, if any, provided by the inbound message + + dbId: the id of the db which was operated on, if any, as returned + by the corresponding 'open' operation. + + result: ...operation-dependent result... + + } + ``` + + ==================================================================== + Error responses + + Errors are reported messages in an operation-independent format: + + ``` + { + type: "error", + + messageId: ...as above..., + + dbId: ...as above... + + result: { + + operation: type of the triggering operation: 'open', 'close', ... + + message: ...error message text... + + errorClass: string. The ErrorClass.name property from the thrown exception. + + input: the message object which triggered the error. + + stack: _if available_, a stack trace array. + + } + + } + ``` + + + ==================================================================== + "config-get" + + This operation fetches the serializable parts of the sqlite3 API + configuration. + + Message format: + + ``` + { + type: "config-get", + messageId: ...as above..., + args: currently ignored and may be elided. + } + ``` + + Response: + + ``` + { + type: "config-get", + messageId: ...as above..., + result: { + + version: sqlite3.version object + + bigIntEnabled: bool. True if BigInt support is enabled. + + wasmfsOpfsDir: path prefix, if any, _intended_ for use with + WASMFS OPFS persistent storage. + + wasmfsOpfsEnabled: true if persistent storage is enabled in the + current environment. Only files stored under wasmfsOpfsDir + will persist using that mechanism, however. It is legal to use + the non-WASMFS OPFS VFS to open a database via a URI-style + db filename. + + vfsList: result of sqlite3.capi.sqlite3_js_vfs_list() + } + } + ``` + + + ==================================================================== + "open" a database + + Message format: + + ``` + { + type: "open", + messageId: ...as above..., + args:{ + + filename [=":memory:" or "" (unspecified)]: the db filename. + See the sqlite3.oo1.DB constructor for peculiarities and + transformations, + + vfs: sqlite3_vfs name. Ignored if filename is ":memory:" or "". + This may change how the given filename is resolved. + } + } + ``` + + Response: + + ``` + { + type: "open", + messageId: ...as above..., + result: { + filename: db filename, possibly differing from the input. + + dbId: an opaque ID value which must be passed in the message + envelope to other calls in this API to tell them which db to + use. If it is not provided to future calls, they will default to + operating on the least-recently-opened db. This property is, for + API consistency's sake, also part of the containing message + envelope. Only the `open` operation includes it in the `result` + property. + + persistent: true if the given filename resides in the + known-persistent storage, else false. + + vfs: name of the VFS the "main" db is using. + } + } + ``` + + ==================================================================== + "close" a database + + Message format: + + ``` + { + type: "close", + messageId: ...as above... + dbId: ...as above... + args: OPTIONAL {unlink: boolean} + } + ``` + + If the `dbId` does not refer to an opened ID, this is a no-op. If + the `args` object contains a truthy `unlink` value then the database + will be unlinked (deleted) after closing it. The inability to close a + db (because it's not opened) or delete its file does not trigger an + error. + + Response: + + ``` + { + type: "close", + messageId: ...as above..., + result: { + + filename: filename of closed db, or undefined if no db was closed + + } + } + ``` + + ==================================================================== + "exec" SQL + + All SQL execution is processed through the exec operation. It offers + most of the features of the oo1.DB.exec() method, with a few limitations + imposed by the state having to cross thread boundaries. + + Message format: + + ``` + { + type: "exec", + messageId: ...as above... + dbId: ...as above... + args: string (SQL) or {... see below ...} + } + ``` + + Response: + + ``` + { + type: "exec", + messageId: ...as above..., + dbId: ...as above... + result: { + input arguments, possibly modified. See below. + } + } + ``` + + The arguments are in the same form accepted by oo1.DB.exec(), with + the exceptions noted below. + + A function-type args.callback property cannot cross + the window/Worker boundary, so is not useful here. If + args.callback is a string then it is assumed to be a + message type key, in which case a callback function will be + applied which posts each row result via: + + postMessage({type: thatKeyType, + rowNumber: 1-based-#, + row: theRow, + columnNames: anArray + }) + + And, at the end of the result set (whether or not any result rows + were produced), it will post an identical message with + (row=undefined, rowNumber=null) to alert the caller than the result + set is completed. Note that a row value of `null` is a legal row + result for certain arg.rowMode values. + + (Design note: we don't use (row=undefined, rowNumber=undefined) to + indicate end-of-results because fetching those would be + indistinguishable from fetching from an empty object unless the + client used hasOwnProperty() (or similar) to distinguish "missing + property" from "property with the undefined value". Similarly, + `null` is a legal value for `row` in some case , whereas the db + layer won't emit a result value of `undefined`.) + + The callback proxy must not recurse into this interface. An exec() + call will tie up the Worker thread, causing any recursion attempt + to wait until the first exec() is completed. + + The response is the input options object (or a synthesized one if + passed only a string), noting that options.resultRows and + options.columnNames may be populated by the call to db.exec(). + +*/ +self.sqlite3ApiBootstrap.initializers.push(function(sqlite3){ +sqlite3.initWorker1API = function(){ + 'use strict'; + const toss = (...args)=>{throw new Error(args.join(' '))}; + if('function' !== typeof importScripts){ + toss("initWorker1API() must be run from a Worker thread."); + } + const self = this.self; + const sqlite3 = this.sqlite3 || toss("Missing this.sqlite3 object."); + const DB = sqlite3.oo1.DB; + + /** + Returns the app-wide unique ID for the given db, creating one if + needed. + */ + const getDbId = function(db){ + let id = wState.idMap.get(db); + if(id) return id; + id = 'db#'+(++wState.idSeq)+'@'+db.pointer; + /** ^^^ can't simply use db.pointer b/c closing/opening may re-use + the same address, which could map pending messages to a wrong + instance. */ + wState.idMap.set(db, id); + return id; + }; + + /** + Internal helper for managing Worker-level state. + */ + const wState = { + /** + Each opened DB is added to this.dbList, and the first entry in + that list is the default db. As each db is closed, its entry is + removed from the list. + */ + dbList: [], + /** Sequence number of dbId generation. */ + idSeq: 0, + /** Map of DB instances to dbId. */ + idMap: new WeakMap, + /** Temp holder for "transferable" postMessage() state. */ + xfer: [], + open: function(opt){ + const db = new DB(opt); + this.dbs[getDbId(db)] = db; + if(this.dbList.indexOf(db)<0) this.dbList.push(db); + return db; + }, + close: function(db,alsoUnlink){ + if(db){ + delete this.dbs[getDbId(db)]; + const filename = db.filename; + const pVfs = sqlite3.wasm.sqlite3_wasm_db_vfs(db.pointer, 0); + db.close(); + const ddNdx = this.dbList.indexOf(db); + if(ddNdx>=0) this.dbList.splice(ddNdx, 1); + if(alsoUnlink && filename && pVfs){ + sqlite3.wasm.sqlite3_wasm_vfs_unlink(pVfs, filename); + } + } + }, + /** + Posts the given worker message value. If xferList is provided, + it must be an array, in which case a copy of it passed as + postMessage()'s second argument and xferList.length is set to + 0. + */ + post: function(msg,xferList){ + if(xferList && xferList.length){ + self.postMessage( msg, Array.from(xferList) ); + xferList.length = 0; + }else{ + self.postMessage(msg); + } + }, + /** Map of DB IDs to DBs. */ + dbs: Object.create(null), + /** Fetch the DB for the given id. Throw if require=true and the + id is not valid, else return the db or undefined. */ + getDb: function(id,require=true){ + return this.dbs[id] + || (require ? toss("Unknown (or closed) DB ID:",id) : undefined); + } + }; + + /** Throws if the given db is falsy or not opened, else returns its + argument. */ + const affirmDbOpen = function(db = wState.dbList[0]){ + return (db && db.pointer) ? db : toss("DB is not opened."); + }; + + /** Extract dbId from the given message payload. */ + const getMsgDb = function(msgData,affirmExists=true){ + const db = wState.getDb(msgData.dbId,false) || wState.dbList[0]; + return affirmExists ? affirmDbOpen(db) : db; + }; + + const getDefaultDbId = function(){ + return wState.dbList[0] && getDbId(wState.dbList[0]); + }; + + const guessVfs = function(filename){ + const m = /^file:.+(vfs=(\w+))/.exec(filename); + return sqlite3.capi.sqlite3_vfs_find(m ? m[2] : 0); + }; + + const isSpecialDbFilename = (n)=>{ + return ""===n || ':'===n[0]; + }; + + /** + A level of "organizational abstraction" for the Worker1 + API. Each method in this object must map directly to a Worker1 + message type key. The onmessage() dispatcher attempts to + dispatch all inbound messages to a method of this object, + passing it the event.data part of the inbound event object. All + methods must return a plain Object containing any result + state, which the dispatcher may amend. All methods must throw + on error. + */ + const wMsgHandler = { + open: function(ev){ + const oargs = Object.create(null), args = (ev.args || Object.create(null)); + if(args.simulateError){ // undocumented internal testing option + toss("Throwing because of simulateError flag."); + } + const rc = Object.create(null); + const pDir = sqlite3.capi.sqlite3_wasmfs_opfs_dir(); + let byteArray, pVfs; + oargs.vfs = args.vfs; + if(isSpecialDbFilename(args.filename)){ + oargs.filename = args.filename || ""; + }else{ + oargs.filename = args.filename; + byteArray = args.byteArray; + if(byteArray) pVfs = guessVfs(args.filename); + } + if(pVfs){ + /* 2022-11-02: this feature is as-yet untested except that + sqlite3_wasm_vfs_create_file() has been tested from the + browser dev console. */ + let pMem; + try{ + pMem = sqlite3.wasm.allocFromTypedArray(byteArray); + const rc = sqlite3.wasm.sqlite3_wasm_vfs_create_file( + pVfs, oargs.filename, pMem, byteArray.byteLength + ); + if(rc) sqlite3.SQLite3Error.toss(rc); + }catch(e){ + throw new sqlite3.SQLite3Error( + e.name+' creating '+args.filename+": "+e.message, { + cause: e + } + ); + }finally{ + if(pMem) sqlite3.wasm.dealloc(pMem); + } + } + const db = wState.open(oargs); + rc.filename = db.filename; + rc.persistent = (!!pDir && db.filename.startsWith(pDir+'/')) + || !!sqlite3.capi.sqlite3_js_db_uses_vfs(db.pointer, "opfs"); + rc.dbId = getDbId(db); + rc.vfs = db.dbVfsName(); + return rc; + }, + + close: function(ev){ + const db = getMsgDb(ev,false); + const response = { + filename: db && db.filename + }; + if(db){ + const doUnlink = ((ev.args && 'object'===typeof ev.args) + ? !!ev.args.unlink : false); + wState.close(db, doUnlink); + } + return response; + }, + + exec: function(ev){ + const rc = ( + 'string'===typeof ev.args + ) ? {sql: ev.args} : (ev.args || Object.create(null)); + if('stmt'===rc.rowMode){ + toss("Invalid rowMode for 'exec': stmt mode", + "does not work in the Worker API."); + }else if(!rc.sql){ + toss("'exec' requires input SQL."); + } + const db = getMsgDb(ev); + if(rc.callback || Array.isArray(rc.resultRows)){ + // Part of a copy-avoidance optimization for blobs + db._blobXfer = wState.xfer; + } + const theCallback = rc.callback; + let rowNumber = 0; + const hadColNames = !!rc.columnNames; + if('string' === typeof theCallback){ + if(!hadColNames) rc.columnNames = []; + /* Treat this as a worker message type and post each + row as a message of that type. */ + rc.callback = function(row,stmt){ + wState.post({ + type: theCallback, + columnNames: rc.columnNames, + rowNumber: ++rowNumber, + row: row + }, wState.xfer); + } + } + try { + db.exec(rc); + if(rc.callback instanceof Function){ + rc.callback = theCallback; + /* Post a sentinel message to tell the client that the end + of the result set has been reached (possibly with zero + rows). */ + wState.post({ + type: theCallback, + columnNames: rc.columnNames, + rowNumber: null /*null to distinguish from "property not set"*/, + row: undefined /*undefined because null is a legal row value + for some rowType values, but undefined is not*/ + }); + } + }finally{ + delete db._blobXfer; + if(rc.callback) rc.callback = theCallback; + } + return rc; + }/*exec()*/, + + 'config-get': function(){ + const rc = Object.create(null), src = sqlite3.config; + [ + 'wasmfsOpfsDir', 'bigIntEnabled' + ].forEach(function(k){ + if(Object.getOwnPropertyDescriptor(src, k)) rc[k] = src[k]; + }); + rc.wasmfsOpfsEnabled = !!sqlite3.capi.sqlite3_wasmfs_opfs_dir(); + rc.version = sqlite3.version; + rc.vfsList = sqlite3.capi.sqlite3_js_vfs_list(); + rc.opfsEnabled = !!sqlite3.opfs; + return rc; + }, + + /** + Exports the database to a byte array, as per + sqlite3_serialize(). Response is an object: + + { + byteArray: Uint8Array (db file contents), + filename: the current db filename, + mimetype: 'application/x-sqlite3' + } + */ + export: function(ev){ + const db = getMsgDb(ev); + const response = { + byteArray: sqlite3.capi.sqlite3_js_db_export(db.pointer), + filename: db.filename, + mimetype: 'application/x-sqlite3' + }; + wState.xfer.push(response.byteArray.buffer); + return response; + }/*export()*/, + + toss: function(ev){ + toss("Testing worker exception"); + }, + + 'opfs-tree': async function(ev){ + if(!sqlite3.opfs) toss("OPFS support is unavailable."); + const response = await sqlite3.opfs.treeList(); + return response; + } + }/*wMsgHandler*/; + + self.onmessage = async function(ev){ + ev = ev.data; + let result, dbId = ev.dbId, evType = ev.type; + const arrivalTime = performance.now(); + try { + if(wMsgHandler.hasOwnProperty(evType) && + wMsgHandler[evType] instanceof Function){ + result = await wMsgHandler[evType](ev); + }else{ + toss("Unknown db worker message type:",ev.type); + } + }catch(err){ + evType = 'error'; + result = { + operation: ev.type, + message: err.message, + errorClass: err.name, + input: ev + }; + if(err.stack){ + result.stack = ('string'===typeof err.stack) + ? err.stack.split(/\n\s*/) : err.stack; + } + if(0) console.warn("Worker is propagating an exception to main thread.", + "Reporting it _here_ for the stack trace:",err,result); + } + if(!dbId){ + dbId = result.dbId/*from 'open' cmd*/ + || getDefaultDbId(); + } + // Timing info is primarily for use in testing this API. It's not part of + // the public API. arrivalTime = when the worker got the message. + wState.post({ + type: evType, + dbId: dbId, + messageId: ev.messageId, + workerReceivedTime: arrivalTime, + workerRespondTime: performance.now(), + departureTime: ev.departureTime, + // TODO: move the timing bits into... + //timing:{ + // departure: ev.departureTime, + // workerReceived: arrivalTime, + // workerResponse: performance.now(); + //}, + result: result + }, wState.xfer); + }; + self.postMessage({type:'sqlite3-api',result:'worker1-ready'}); +}.bind({self, sqlite3}); +}); ADDED ext/wasm/api/sqlite3-license-version-header.js Index: ext/wasm/api/sqlite3-license-version-header.js ================================================================== --- /dev/null +++ ext/wasm/api/sqlite3-license-version-header.js @@ -0,0 +1,25 @@ +/* +** LICENSE for the sqlite3 WebAssembly/JavaScript APIs. +** +** This bundle (typically released as sqlite3.js or sqlite3-wasmfs.js) +** is an amalgamation of JavaScript source code from two projects: +** +** 1) https://emscripten.org: the Emscripten "glue code" is covered by +** the terms of the MIT license and University of Illinois/NCSA +** Open Source License, as described at: +** +** https://emscripten.org/docs/introducing_emscripten/emscripten_license.html +** +** 2) https://sqlite.org: all code and documentation labeled as being +** from this source are released under the same terms as the sqlite3 +** C library: +** +** 2022-10-16 +** +** The author disclaims copyright to this source code. In place of a +** legal notice, here is a blessing: +** +** * May you do good and not evil. +** * May you find forgiveness for yourself and forgive others. +** * May you share freely, never taking more than you give. +*/ ADDED ext/wasm/api/sqlite3-opfs-async-proxy.js Index: ext/wasm/api/sqlite3-opfs-async-proxy.js ================================================================== --- /dev/null +++ ext/wasm/api/sqlite3-opfs-async-proxy.js @@ -0,0 +1,830 @@ +/* + 2022-09-16 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A Worker which manages asynchronous OPFS handles on behalf of a + synchronous API which controls it via a combination of Worker + messages, SharedArrayBuffer, and Atomics. It is the asynchronous + counterpart of the API defined in sqlite3-api-opfs.js. + + Highly indebted to: + + https://github.com/rhashimoto/wa-sqlite/blob/master/src/examples/OriginPrivateFileSystemVFS.js + + for demonstrating how to use the OPFS APIs. + + This file is to be loaded as a Worker. It does not have any direct + access to the sqlite3 JS/WASM bits, so any bits which it needs (most + notably SQLITE_xxx integer codes) have to be imported into it via an + initialization process. + + This file represents an implementation detail of a larger piece of + code, and not a public interface. Its details may change at any time + and are not intended to be used by any client-level code. +*/ +"use strict"; +const toss = function(...args){throw new Error(args.join(' '))}; +if(self.window === self){ + toss("This code cannot run from the main thread.", + "Load it as a Worker from a separate Worker."); +}else if(!navigator.storage.getDirectory){ + toss("This API requires navigator.storage.getDirectory."); +} + +/** + Will hold state copied to this object from the syncronous side of + this API. +*/ +const state = Object.create(null); + +/** + verbose: + + 0 = no logging output + 1 = only errors + 2 = warnings and errors + 3 = debug, warnings, and errors +*/ +state.verbose = 2; + +const loggers = { + 0:console.error.bind(console), + 1:console.warn.bind(console), + 2:console.log.bind(console) +}; +const logImpl = (level,...args)=>{ + if(state.verbose>level) loggers[level]("OPFS asyncer:",...args); +}; +const log = (...args)=>logImpl(2, ...args); +const warn = (...args)=>logImpl(1, ...args); +const error = (...args)=>logImpl(0, ...args); +const metrics = Object.create(null); +metrics.reset = ()=>{ + let k; + const r = (m)=>(m.count = m.time = m.wait = 0); + for(k in state.opIds){ + r(metrics[k] = Object.create(null)); + } + let s = metrics.s11n = Object.create(null); + s = s.serialize = Object.create(null); + s.count = s.time = 0; + s = metrics.s11n.deserialize = Object.create(null); + s.count = s.time = 0; +}; +metrics.dump = ()=>{ + let k, n = 0, t = 0, w = 0; + for(k in state.opIds){ + const m = metrics[k]; + n += m.count; + t += m.time; + w += m.wait; + m.avgTime = (m.count && m.time) ? (m.time / m.count) : 0; + } + console.log(self.location.href, + "metrics for",self.location.href,":\n", + metrics, + "\nTotal of",n,"op(s) for",t,"ms", + "approx",w,"ms spent waiting on OPFS APIs."); + console.log("Serialization metrics:",metrics.s11n); +}; + +/** + __openFiles is a map of sqlite3_file pointers (integers) to + metadata related to a given OPFS file handles. The pointers are, in + this side of the interface, opaque file handle IDs provided by the + synchronous part of this constellation. Each value is an object + with a structure demonstrated in the xOpen() impl. +*/ +const __openFiles = Object.create(null); +/** + __autoLocks is a Set of sqlite3_file pointers (integers) which were + "auto-locked". i.e. those for which we obtained a sync access + handle without an explicit xLock() call. Such locks will be + released during db connection idle time, whereas a sync access + handle obtained via xLock(), or subsequently xLock()'d after + auto-acquisition, will not be released until xUnlock() is called. + + Maintenance reminder: if we relinquish auto-locks at the end of the + operation which acquires them, we pay a massive performance + penalty: speedtest1 benchmarks take up to 4x as long. By delaying + the lock release until idle time, the hit is negligible. +*/ +const __autoLocks = new Set(); + +/** + Expects an OPFS file path. It gets resolved, such that ".." + components are properly expanded, and returned. If the 2nd arg is + true, the result is returned as an array of path elements, else an + absolute path string is returned. +*/ +const getResolvedPath = function(filename,splitIt){ + const p = new URL( + filename, 'file://irrelevant' + ).pathname; + return splitIt ? p.split('/').filter((v)=>!!v) : p; +}; + +/** + Takes the absolute path to a filesystem element. Returns an array + of [handleOfContainingDir, filename]. If the 2nd argument is truthy + then each directory element leading to the file is created along + the way. Throws if any creation or resolution fails. +*/ +const getDirForFilename = async function f(absFilename, createDirs = false){ + const path = getResolvedPath(absFilename, true); + const filename = path.pop(); + let dh = state.rootDir; + for(const dirName of path){ + if(dirName){ + dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs}); + } + } + return [dh, filename]; +}; + +/** + An error class specifically for use with getSyncHandle(), the goal + of which is to eventually be able to distinguish unambiguously + between locking-related failures and other types, noting that we + cannot currently do so because createSyncAccessHandle() does not + define its exceptions in the required level of detail. +*/ +class GetSyncHandleError extends Error { + constructor(errorObject, ...msg){ + super(); + this.error = errorObject; + this.message = [ + ...msg, ': Original exception ['+errorObject.name+']:', + errorObject.message + ].join(' '); + this.name = 'GetSyncHandleError'; + } +}; + +/** + Returns the sync access handle associated with the given file + handle object (which must be a valid handle object, as created by + xOpen()), lazily opening it if needed. + + In order to help alleviate cross-tab contention for a dabase, + if an exception is thrown while acquiring the handle, this routine + will wait briefly and try again, up to 3 times. If acquisition + still fails at that point it will give up and propagate the + exception. +*/ +const getSyncHandle = async (fh)=>{ + if(!fh.syncHandle){ + const t = performance.now(); + log("Acquiring sync handle for",fh.filenameAbs); + const maxTries = 4, msBase = 300; + let i = 1, ms = msBase; + for(; true; ms = msBase * ++i){ + try { + //if(i<3) toss("Just testing getSyncHandle() wait-and-retry."); + //TODO? A config option which tells it to throw here + //randomly every now and then, for testing purposes. + fh.syncHandle = await fh.fileHandle.createSyncAccessHandle(); + break; + }catch(e){ + if(i === maxTries){ + throw new GetSyncHandleError( + e, "Error getting sync handle.",maxTries, + "attempts failed.",fh.filenameAbs + ); + } + warn("Error getting sync handle. Waiting",ms, + "ms and trying again.",fh.filenameAbs,e); + Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms); + } + } + log("Got sync handle for",fh.filenameAbs,'in',performance.now() - t,'ms'); + if(!fh.xLock){ + __autoLocks.add(fh.fid); + log("Auto-locked",fh.fid,fh.filenameAbs); + } + } + return fh.syncHandle; +}; + +/** + If the given file-holding object has a sync handle attached to it, + that handle is remove and asynchronously closed. Though it may + sound sensible to continue work as soon as the close() returns + (noting that it's asynchronous), doing so can cause operations + performed soon afterwards, e.g. a call to getSyncHandle() to fail + because they may happen out of order from the close(). OPFS does + not guaranty that the actual order of operations is retained in + such cases. i.e. always "await" on the result of this function. +*/ +const closeSyncHandle = async (fh)=>{ + if(fh.syncHandle){ + log("Closing sync handle for",fh.filenameAbs); + const h = fh.syncHandle; + delete fh.syncHandle; + delete fh.xLock; + __autoLocks.delete(fh.fid); + return h.close(); + } +}; + +/** + A proxy for closeSyncHandle() which is guaranteed to not throw. + + This function is part of a lock/unlock step in functions which + require a sync access handle but may be called without xLock() + having been called first. Such calls need to release that + handle to avoid locking the file for all of time. This is an + _attempt_ at reducing cross-tab contention but it may prove + to be more of a problem than a solution and may need to be + removed. +*/ +const closeSyncHandleNoThrow = async (fh)=>{ + try{await closeSyncHandle(fh)} + catch(e){ + warn("closeSyncHandleNoThrow() ignoring:",e,fh); + } +}; + +/** + Stores the given value at state.sabOPView[state.opIds.rc] and then + Atomics.notify()'s it. +*/ +const storeAndNotify = (opName, value)=>{ + log(opName+"() => notify(",value,")"); + Atomics.store(state.sabOPView, state.opIds.rc, value); + Atomics.notify(state.sabOPView, state.opIds.rc); +}; + +/** + Throws if fh is a file-holding object which is flagged as read-only. +*/ +const affirmNotRO = function(opName,fh){ + if(fh.readOnly) toss(opName+"(): File is read-only: "+fh.filenameAbs); +}; +const affirmLocked = function(opName,fh){ + //if(!fh.syncHandle) toss(opName+"(): File does not have a lock: "+fh.filenameAbs); + /** + Currently a no-op, as speedtest1 triggers xRead() without a + lock (that seems like a bug but it's currently uninvestigated). + This means, however, that some OPFS VFS routines may trigger + acquisition of a lock but never let it go until xUnlock() is + called (which it likely won't be if xLock() was not called). + */ +}; + +/** + We track 2 different timers: the "metrics" timer records how much + time we spend performing work. The "wait" timer records how much + time we spend waiting on the underlying OPFS timer. See the calls + to mTimeStart(), mTimeEnd(), wTimeStart(), and wTimeEnd() + throughout this file to see how they're used. +*/ +const __mTimer = Object.create(null); +__mTimer.op = undefined; +__mTimer.start = undefined; +const mTimeStart = (op)=>{ + __mTimer.start = performance.now(); + __mTimer.op = op; + //metrics[op] || toss("Maintenance required: missing metrics for",op); + ++metrics[op].count; +}; +const mTimeEnd = ()=>( + metrics[__mTimer.op].time += performance.now() - __mTimer.start +); +const __wTimer = Object.create(null); +__wTimer.op = undefined; +__wTimer.start = undefined; +const wTimeStart = (op)=>{ + __wTimer.start = performance.now(); + __wTimer.op = op; + //metrics[op] || toss("Maintenance required: missing metrics for",op); +}; +const wTimeEnd = ()=>( + metrics[__wTimer.op].wait += performance.now() - __wTimer.start +); + +/** + Gets set to true by the 'opfs-async-shutdown' command to quit the + wait loop. This is only intended for debugging purposes: we cannot + inspect this file's state while the tight waitLoop() is running and + need a way to stop that loop for introspection purposes. +*/ +let flagAsyncShutdown = false; + + +/** + Asynchronous wrappers for sqlite3_vfs and sqlite3_io_methods + methods, as well as helpers like mkdir(). Maintenance reminder: + members are in alphabetical order to simplify finding them. +*/ +const vfsAsyncImpls = { + 'opfs-async-metrics': async ()=>{ + mTimeStart('opfs-async-metrics'); + metrics.dump(); + storeAndNotify('opfs-async-metrics', 0); + mTimeEnd(); + }, + 'opfs-async-shutdown': async ()=>{ + flagAsyncShutdown = true; + storeAndNotify('opfs-async-shutdown', 0); + }, + mkdir: async (dirname)=>{ + mTimeStart('mkdir'); + let rc = 0; + wTimeStart('mkdir'); + try { + await getDirForFilename(dirname+"/filepart", true); + }catch(e){ + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR; + }finally{ + wTimeEnd(); + } + storeAndNotify('mkdir', rc); + mTimeEnd(); + }, + xAccess: async (filename)=>{ + mTimeStart('xAccess'); + /* OPFS cannot support the full range of xAccess() queries sqlite3 + calls for. We can essentially just tell if the file is + accessible, but if it is it's automatically writable (unless + it's locked, which we cannot(?) know without trying to open + it). OPFS does not have the notion of read-only. + + The return semantics of this function differ from sqlite3's + xAccess semantics because we are limited in what we can + communicate back to our synchronous communication partner: 0 = + accessible, non-0 means not accessible. + */ + let rc = 0; + wTimeStart('xAccess'); + try{ + const [dh, fn] = await getDirForFilename(filename); + await dh.getFileHandle(fn); + }catch(e){ + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR; + }finally{ + wTimeEnd(); + } + storeAndNotify('xAccess', rc); + mTimeEnd(); + }, + xClose: async function(fid/*sqlite3_file pointer*/){ + const opName = 'xClose'; + mTimeStart(opName); + __autoLocks.delete(fid); + const fh = __openFiles[fid]; + let rc = 0; + wTimeStart(opName); + if(fh){ + delete __openFiles[fid]; + await closeSyncHandle(fh); + if(fh.deleteOnClose){ + try{ await fh.dirHandle.removeEntry(fh.filenamePart) } + catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) } + } + }else{ + state.s11n.serialize(); + rc = state.sq3Codes.SQLITE_NOTFOUND; + } + wTimeEnd(); + storeAndNotify(opName, rc); + mTimeEnd(); + }, + xDelete: async function(...args){ + mTimeStart('xDelete'); + const rc = await vfsAsyncImpls.xDeleteNoWait(...args); + storeAndNotify('xDelete', rc); + mTimeEnd(); + }, + xDeleteNoWait: async function(filename, syncDir = 0, recursive = false){ + /* The syncDir flag is, for purposes of the VFS API's semantics, + ignored here. However, if it has the value 0x1234 then: after + deleting the given file, recursively try to delete any empty + directories left behind in its wake (ignoring any errors and + stopping at the first failure). + + That said: we don't know for sure that removeEntry() fails if + the dir is not empty because the API is not documented. It has, + however, a "recursive" flag which defaults to false, so + presumably it will fail if the dir is not empty and that flag + is false. + */ + let rc = 0; + wTimeStart('xDelete'); + try { + while(filename){ + const [hDir, filenamePart] = await getDirForFilename(filename, false); + if(!filenamePart) break; + await hDir.removeEntry(filenamePart, {recursive}); + if(0x1234 !== syncDir) break; + recursive = false; + filename = getResolvedPath(filename, true); + filename.pop(); + filename = filename.join('/'); + } + }catch(e){ + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR_DELETE; + } + wTimeEnd(); + return rc; + }, + xFileSize: async function(fid/*sqlite3_file pointer*/){ + mTimeStart('xFileSize'); + const fh = __openFiles[fid]; + let rc; + wTimeStart('xFileSize'); + try{ + affirmLocked('xFileSize',fh); + rc = await (await getSyncHandle(fh)).getSize(); + state.s11n.serialize(Number(rc)); + rc = 0; + }catch(e){ + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR; + } + wTimeEnd(); + storeAndNotify('xFileSize', rc); + mTimeEnd(); + }, + xLock: async function(fid/*sqlite3_file pointer*/, + lockType/*SQLITE_LOCK_...*/){ + mTimeStart('xLock'); + const fh = __openFiles[fid]; + let rc = 0; + const oldLockType = fh.xLock; + fh.xLock = lockType; + if( !fh.syncHandle ){ + wTimeStart('xLock'); + try { + await getSyncHandle(fh); + __autoLocks.delete(fid); + }catch(e){ + state.s11n.storeException(1,e); + rc = state.sq3Codes.SQLITE_IOERR_LOCK; + fh.xLock = oldLockType; + } + wTimeEnd(); + } + storeAndNotify('xLock',rc); + mTimeEnd(); + }, + xOpen: async function(fid/*sqlite3_file pointer*/, filename, + flags/*SQLITE_OPEN_...*/){ + const opName = 'xOpen'; + mTimeStart(opName); + const deleteOnClose = (state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags); + const create = (state.sq3Codes.SQLITE_OPEN_CREATE & flags); + wTimeStart('xOpen'); + try{ + let hDir, filenamePart; + try { + [hDir, filenamePart] = await getDirForFilename(filename, !!create); + }catch(e){ + state.s11n.storeException(1,e); + storeAndNotify(opName, state.sq3Codes.SQLITE_NOTFOUND); + mTimeEnd(); + wTimeEnd(); + return; + } + const hFile = await hDir.getFileHandle(filenamePart, {create}); + /** + wa-sqlite, at this point, grabs a SyncAccessHandle and + assigns it to the syncHandle prop of the file state + object, but only for certain cases and it's unclear why it + places that limitation on it. + */ + wTimeEnd(); + __openFiles[fid] = Object.assign(Object.create(null),{ + fid: fid, + filenameAbs: filename, + filenamePart: filenamePart, + dirHandle: hDir, + fileHandle: hFile, + sabView: state.sabFileBufView, + readOnly: create + ? false : (state.sq3Codes.SQLITE_OPEN_READONLY & flags), + deleteOnClose: deleteOnClose + }); + storeAndNotify(opName, 0); + }catch(e){ + wTimeEnd(); + error(opName,e); + state.s11n.storeException(1,e); + storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR); + } + mTimeEnd(); + }, + xRead: async function(fid/*sqlite3_file pointer*/,n,offset64){ + mTimeStart('xRead'); + let rc = 0, nRead; + const fh = __openFiles[fid]; + try{ + affirmLocked('xRead',fh); + wTimeStart('xRead'); + nRead = (await getSyncHandle(fh)).read( + fh.sabView.subarray(0, n), + {at: Number(offset64)} + ); + wTimeEnd(); + if(nRead < n){/* Zero-fill remaining bytes */ + fh.sabView.fill(0, nRead, n); + rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ; + } + }catch(e){ + if(undefined===nRead) wTimeEnd(); + error("xRead() failed",e,fh); + state.s11n.storeException(1,e); + rc = state.sq3Codes.SQLITE_IOERR_READ; + } + storeAndNotify('xRead',rc); + mTimeEnd(); + }, + xSync: async function(fid/*sqlite3_file pointer*/,flags/*ignored*/){ + mTimeStart('xSync'); + const fh = __openFiles[fid]; + let rc = 0; + if(!fh.readOnly && fh.syncHandle){ + try { + wTimeStart('xSync'); + await fh.syncHandle.flush(); + }catch(e){ + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR_FSYNC; + } + wTimeEnd(); + } + storeAndNotify('xSync',rc); + mTimeEnd(); + }, + xTruncate: async function(fid/*sqlite3_file pointer*/,size){ + mTimeStart('xTruncate'); + let rc = 0; + const fh = __openFiles[fid]; + wTimeStart('xTruncate'); + try{ + affirmLocked('xTruncate',fh); + affirmNotRO('xTruncate', fh); + await (await getSyncHandle(fh)).truncate(size); + }catch(e){ + error("xTruncate():",e,fh); + state.s11n.storeException(2,e); + rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE; + } + wTimeEnd(); + storeAndNotify('xTruncate',rc); + mTimeEnd(); + }, + xUnlock: async function(fid/*sqlite3_file pointer*/, + lockType/*SQLITE_LOCK_...*/){ + mTimeStart('xUnlock'); + let rc = 0; + const fh = __openFiles[fid]; + if( state.sq3Codes.SQLITE_LOCK_NONE===lockType + && fh.syncHandle ){ + wTimeStart('xUnlock'); + try { await closeSyncHandle(fh) } + catch(e){ + state.s11n.storeException(1,e); + rc = state.sq3Codes.SQLITE_IOERR_UNLOCK; + } + wTimeEnd(); + } + storeAndNotify('xUnlock',rc); + mTimeEnd(); + }, + xWrite: async function(fid/*sqlite3_file pointer*/,n,offset64){ + mTimeStart('xWrite'); + let rc; + const fh = __openFiles[fid]; + wTimeStart('xWrite'); + try{ + affirmLocked('xWrite',fh); + affirmNotRO('xWrite', fh); + rc = ( + n === (await getSyncHandle(fh)) + .write(fh.sabView.subarray(0, n), + {at: Number(offset64)}) + ) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE; + }catch(e){ + error("xWrite():",e,fh); + state.s11n.storeException(1,e); + rc = state.sq3Codes.SQLITE_IOERR_WRITE; + } + wTimeEnd(); + storeAndNotify('xWrite',rc); + mTimeEnd(); + } +}/*vfsAsyncImpls*/; + +const initS11n = ()=>{ + /** + ACHTUNG: this code is 100% duplicated in the other half of this + proxy! The documentation is maintained in the "synchronous half". + */ + if(state.s11n) return state.s11n; + const textDecoder = new TextDecoder(), + textEncoder = new TextEncoder('utf-8'), + viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize), + viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize); + state.s11n = Object.create(null); + const TypeIds = Object.create(null); + TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' }; + TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' }; + TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' }; + TypeIds.string = { id: 4 }; + const getTypeId = (v)=>( + TypeIds[typeof v] + || toss("Maintenance required: this value type cannot be serialized.",v) + ); + const getTypeIdById = (tid)=>{ + switch(tid){ + case TypeIds.number.id: return TypeIds.number; + case TypeIds.bigint.id: return TypeIds.bigint; + case TypeIds.boolean.id: return TypeIds.boolean; + case TypeIds.string.id: return TypeIds.string; + default: toss("Invalid type ID:",tid); + } + }; + state.s11n.deserialize = function(clear=false){ + ++metrics.s11n.deserialize.count; + const t = performance.now(); + const argc = viewU8[0]; + const rc = argc ? [] : null; + if(argc){ + const typeIds = []; + let offset = 1, i, n, v; + for(i = 0; i < argc; ++i, ++offset){ + typeIds.push(getTypeIdById(viewU8[offset])); + } + for(i = 0; i < argc; ++i){ + const t = typeIds[i]; + if(t.getter){ + v = viewDV[t.getter](offset, state.littleEndian); + offset += t.size; + }else{/*String*/ + n = viewDV.getInt32(offset, state.littleEndian); + offset += 4; + v = textDecoder.decode(viewU8.slice(offset, offset+n)); + offset += n; + } + rc.push(v); + } + } + if(clear) viewU8[0] = 0; + //log("deserialize:",argc, rc); + metrics.s11n.deserialize.time += performance.now() - t; + return rc; + }; + state.s11n.serialize = function(...args){ + const t = performance.now(); + ++metrics.s11n.serialize.count; + if(args.length){ + //log("serialize():",args); + const typeIds = []; + let i = 0, offset = 1; + viewU8[0] = args.length & 0xff /* header = # of args */; + for(; i < args.length; ++i, ++offset){ + /* Write the TypeIds.id value into the next args.length + bytes. */ + typeIds.push(getTypeId(args[i])); + viewU8[offset] = typeIds[i].id; + } + for(i = 0; i < args.length; ++i) { + /* Deserialize the following bytes based on their + corresponding TypeIds.id from the header. */ + const t = typeIds[i]; + if(t.setter){ + viewDV[t.setter](offset, args[i], state.littleEndian); + offset += t.size; + }else{/*String*/ + const s = textEncoder.encode(args[i]); + viewDV.setInt32(offset, s.byteLength, state.littleEndian); + offset += 4; + viewU8.set(s, offset); + offset += s.byteLength; + } + } + //log("serialize() result:",viewU8.slice(0,offset)); + }else{ + viewU8[0] = 0; + } + metrics.s11n.serialize.time += performance.now() - t; + }; + + state.s11n.storeException = state.asyncS11nExceptions + ? ((priority,e)=>{ + if(priority<=state.asyncS11nExceptions){ + state.s11n.serialize([e.name,': ',e.message].join("")); + } + }) + : ()=>{}; + + return state.s11n; +}/*initS11n()*/; + +const waitLoop = async function f(){ + const opHandlers = Object.create(null); + for(let k of Object.keys(state.opIds)){ + const vi = vfsAsyncImpls[k]; + if(!vi) continue; + const o = Object.create(null); + opHandlers[state.opIds[k]] = o; + o.key = k; + o.f = vi; + } + /** + waitTime is how long (ms) to wait for each Atomics.wait(). + We need to wake up periodically to give the thread a chance + to do other things. + */ + const waitTime = 500; + while(!flagAsyncShutdown){ + try { + if('timed-out'===Atomics.wait( + state.sabOPView, state.opIds.whichOp, 0, waitTime + )){ + if(__autoLocks.size){ + /* Release all auto-locks. */ + for(const fid of __autoLocks){ + const fh = __openFiles[fid]; + await closeSyncHandleNoThrow(fh); + log("Auto-unlocked",fid,fh.filenameAbs); + } + } + continue; + } + const opId = Atomics.load(state.sabOPView, state.opIds.whichOp); + Atomics.store(state.sabOPView, state.opIds.whichOp, 0); + const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId); + const args = state.s11n.deserialize( + true /* clear s11n to keep the caller from confusing this with + an exception string written by the upcoming + operation */ + ) || []; + //warn("waitLoop() whichOp =",opId, hnd, args); + if(hnd.f) await hnd.f(...args); + else error("Missing callback for opId",opId); + }catch(e){ + error('in waitLoop():',e); + } + } +}; + +navigator.storage.getDirectory().then(function(d){ + const wMsg = (type)=>postMessage({type}); + state.rootDir = d; + self.onmessage = function({data}){ + switch(data.type){ + case 'opfs-async-init':{ + /* Receive shared state from synchronous partner */ + const opt = data.args; + state.littleEndian = opt.littleEndian; + state.asyncS11nExceptions = opt.asyncS11nExceptions; + state.verbose = opt.verbose ?? 2; + state.fileBufferSize = opt.fileBufferSize; + state.sabS11nOffset = opt.sabS11nOffset; + state.sabS11nSize = opt.sabS11nSize; + state.sabOP = opt.sabOP; + state.sabOPView = new Int32Array(state.sabOP); + state.sabIO = opt.sabIO; + state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize); + state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize); + state.opIds = opt.opIds; + state.sq3Codes = opt.sq3Codes; + Object.keys(vfsAsyncImpls).forEach((k)=>{ + if(!Number.isFinite(state.opIds[k])){ + toss("Maintenance required: missing state.opIds[",k,"]"); + } + }); + initS11n(); + metrics.reset(); + log("init state",state); + wMsg('opfs-async-inited'); + waitLoop(); + break; + } + case 'opfs-async-restart': + if(flagAsyncShutdown){ + warn("Restarting after opfs-async-shutdown. Might or might not work."); + flagAsyncShutdown = false; + waitLoop(); + } + break; + case 'opfs-async-metrics': + metrics.dump(); + break; + } + }; + wMsg('opfs-async-loaded'); +}).catch((e)=>error("error initializing OPFS asyncer:",e)); Index: ext/wasm/api/sqlite3-wasm.c ================================================================== --- ext/wasm/api/sqlite3-wasm.c +++ ext/wasm/api/sqlite3-wasm.c @@ -1,6 +1,301 @@ -#include "sqlite3.c" +/* +** This file requires access to sqlite3.c static state in order to +** implement certain WASM-specific features, and thus directly +** includes that file. Unlike the rest of sqlite3.c, this file +** requires compiling with -std=c99 (or equivalent, or a later C +** version) because it makes use of features not available in C89. +** +** At its simplest, to build sqlite3.wasm either place this file +** in the same directory as sqlite3.c/h before compilation or use the +** -I/path flag to tell the compiler where to find both of those +** files, then compile this file. For example: +** +** emcc -o sqlite3.wasm ... -I/path/to/sqlite3-c-and-h sqlite3-wasm.c +*/ +#define SQLITE_WASM +#ifdef SQLITE_WASM_ENABLE_C_TESTS +/* +** Code blocked off by SQLITE_WASM_TESTS is intended solely for use in +** unit/regression testing. They may be safely omitted from +** client-side builds. The main unit test script, tester1.js, will +** skip related tests if it doesn't find the corresponding functions +** in the WASM exports. +*/ +# define SQLITE_WASM_TESTS 1 +#else +# define SQLITE_WASM_TESTS 0 +#endif + +/* +** Threading and file locking: JS is single-threaded. Each Worker +** thread is a separate instance of the JS engine so can never access +** the same db handle as another thread, thus multi-threading support +** is unnecessary in the library. Because the filesystems are virtual +** and local to a given wasm runtime instance, two Workers can never +** access the same db file at once, with the exception of OPFS. As of +** this writing (2022-09-30), OPFS exclusively locks a file when +** opening it, so two Workers can never open the same OPFS-backed file +** at once. That situation will change if and when lower-level locking +** features are added to OPFS (as is currently planned, per folks +** involved with its development). +** +** Summary: except for the case of future OPFS, which supports +** locking, and any similar future filesystems, threading and file +** locking support are unnecessary in the wasm build. +*/ + +/* +** Undefine any SQLITE_... config flags which we specifically do not +** want undefined. Please keep these alphabetized. +*/ +#undef SQLITE_OMIT_DESERIALIZE +#undef SQLITE_OMIT_MEMORYDB + +/* +** Define any SQLITE_... config defaults we want if they aren't +** overridden by the builder. Please keep these alphabetized. +*/ + +/**********************************************************************/ +/* SQLITE_D... */ +#ifndef SQLITE_DEFAULT_CACHE_SIZE +/* +** The OPFS impls benefit tremendously from an increased cache size +** when working on large workloads, e.g. speedtest1 --size 50 or +** higher. On smaller workloads, e.g. speedtest1 --size 25, they +** clearly benefit from having 4mb of cache, but not as much as a +** larger cache benefits the larger workloads. Speed differences +** between 2x and nearly 3x have been measured with ample page cache. +*/ +# define SQLITE_DEFAULT_CACHE_SIZE -16384 +#endif +#if 0 && !defined(SQLITE_DEFAULT_PAGE_SIZE) +/* TODO: experiment with this. */ +# define SQLITE_DEFAULT_PAGE_SIZE 8192 /*4096*/ +#endif +#ifndef SQLITE_DEFAULT_UNIX_VFS +# define SQLITE_DEFAULT_UNIX_VFS "unix-none" +#endif +#undef SQLITE_DQS +#define SQLITE_DQS 0 + +/**********************************************************************/ +/* SQLITE_ENABLE_... */ +#ifndef SQLITE_ENABLE_BYTECODE_VTAB +# define SQLITE_ENABLE_BYTECODE_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_DBPAGE_VTAB +# define SQLITE_ENABLE_DBPAGE_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_DBSTAT_VTAB +# define SQLITE_ENABLE_DBSTAT_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_EXPLAIN_COMMENTS +# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1 +#endif +#ifndef SQLITE_ENABLE_FTS4 +# define SQLITE_ENABLE_FTS4 1 +#endif +#ifndef SQLITE_ENABLE_OFFSET_SQL_FUNC +# define SQLITE_ENABLE_OFFSET_SQL_FUNC 1 +#endif +#ifndef SQLITE_ENABLE_RTREE +# define SQLITE_ENABLE_RTREE 1 +#endif +#ifndef SQLITE_ENABLE_STMTVTAB +# define SQLITE_ENABLE_STMTVTAB 1 +#endif +#ifndef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +# define SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +#endif + +/**********************************************************************/ +/* SQLITE_O... */ +#ifndef SQLITE_OMIT_DEPRECATED +# define SQLITE_OMIT_DEPRECATED 1 +#endif +#ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION 1 +#endif +#ifndef SQLITE_OMIT_SHARED_CACHE +# define SQLITE_OMIT_SHARED_CACHE 1 +#endif +#ifndef SQLITE_OMIT_UTF16 +# define SQLITE_OMIT_UTF16 1 +#endif +#ifndef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1 +#endif +#ifndef SQLITE_OS_KV_OPTIONAL +# define SQLITE_OS_KV_OPTIONAL 1 +#endif + +/**********************************************************************/ +/* SQLITE_T... */ +#ifndef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 3 +#endif +#ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +#endif + +/**********************************************************************/ +/* SQLITE_USE_... */ +#ifndef SQLITE_USE_URI +# define SQLITE_USE_URI 1 +#endif + +#include +#include "sqlite3.c" /* yes, .c instead of .h. */ + +#if defined(__EMSCRIPTEN__) +# include +#endif + +/* +** SQLITE_WASM_KEEP is functionally identical to EMSCRIPTEN_KEEPALIVE +** but is not Emscripten-specific. It explicitly marks functions for +** export into the target wasm file without requiring explicit listing +** of those functions in Emscripten's -sEXPORTED_FUNCTIONS=... list +** (or equivalent in other build platforms). Any function with neither +** this attribute nor which is listed as an explicit export will not +** be exported from the wasm file (but may still be used internally +** within the wasm file). +** +** The functions in this file (sqlite3-wasm.c) which require exporting +** are marked with this flag. They may also be added to any explicit +** build-time export list but need not be. All of these APIs are +** intended for use only within the project's own JS/WASM code, and +** not by client code, so an argument can be made for reducing their +** visibility by not including them in any build-time export lists. +** +** 2022-09-11: it's not yet _proven_ that this approach works in +** non-Emscripten builds. If not, such builds will need to export +** those using the --export=... wasm-ld flag (or equivalent). As of +** this writing we are tied to Emscripten for various reasons +** and cannot test the library with other build environments. +*/ +#define SQLITE_WASM_KEEP __attribute__((used,visibility("default"))) +// See also: +//__attribute__((export_name("theExportedName"), used, visibility("default"))) + + +#if 0 +/* +** An EXPERIMENT in implementing a stack-based allocator analog to +** Emscripten's stackSave(), stackAlloc(), stackRestore(). +** Unfortunately, this cannot work together with Emscripten because +** Emscripten defines its own native one and we'd stomp on each +** other's memory. Other than that complication, basic tests show it +** to work just fine. +** +** Another option is to malloc() a chunk of our own and call that our +** "stack". +*/ +SQLITE_WASM_KEEP void * sqlite3_wasm_stack_end(void){ + extern void __heap_base + /* see https://stackoverflow.com/questions/10038964 */; + return &__heap_base; +} +SQLITE_WASM_KEEP void * sqlite3_wasm_stack_begin(void){ + extern void __data_end; + return &__data_end; +} +static void * pWasmStackPtr = 0; +SQLITE_WASM_KEEP void * sqlite3_wasm_stack_ptr(void){ + if(!pWasmStackPtr) pWasmStackPtr = sqlite3_wasm_stack_end(); + return pWasmStackPtr; +} +SQLITE_WASM_KEEP void sqlite3_wasm_stack_restore(void * p){ + pWasmStackPtr = p; +} +SQLITE_WASM_KEEP void * sqlite3_wasm_stack_alloc(int n){ + if(n<=0) return 0; + n = (n + 7) & ~7 /* align to 8-byte boundary */; + unsigned char * const p = (unsigned char *)sqlite3_wasm_stack_ptr(); + unsigned const char * const b = (unsigned const char *)sqlite3_wasm_stack_begin(); + if(b + n >= p || b + n < b/*overflow*/) return 0; + return pWasmStackPtr = p - n; +} +#endif /* stack allocator experiment */ + +/* +** State for the "pseudo-stack" allocator implemented in +** sqlite3_wasm_pstack_xyz(). In order to avoid colliding with +** Emscripten-controled stack space, it carves out a bit of stack +** memory to use for that purpose. This memory ends up in the +** WASM-managed memory, such that routines which manipulate the wasm +** heap can also be used to manipulate this memory. +** +** This particular allocator is intended for small allocations such as +** storage for output pointers. We cannot reasonably size it large +** enough for general-purpose string conversions because some of our +** tests use input files (strings) of 16MB+. +*/ +static unsigned char PStack_mem[512 * 8] = {0}; +static struct { + unsigned const char * const pBegin;/* Start (inclusive) of memory */ + unsigned const char * const pEnd; /* One-after-the-end of memory */ + unsigned char * pPos; /* Current stack pointer */ +} PStack = { + &PStack_mem[0], + &PStack_mem[0] + sizeof(PStack_mem), + &PStack_mem[0] + sizeof(PStack_mem) +}; +/* +** Returns the current pstack position. +*/ +SQLITE_WASM_KEEP void * sqlite3_wasm_pstack_ptr(void){ + return PStack.pPos; +} +/* +** Sets the pstack position poitner to p. Results are undefined if the +** given value did not come from sqlite3_wasm_pstack_ptr(). +*/ +SQLITE_WASM_KEEP void sqlite3_wasm_pstack_restore(unsigned char * p){ + assert(p>=PStack.pBegin && p<=PStack.pEnd && p>=PStack.pPos); + assert(0==(p & 0x7)); + if(p>=PStack.pBegin && p<=PStack.pEnd /*&& p>=PStack.pPos*/){ + PStack.pPos = p; + } +} +/* +** Allocate and zero out n bytes from the pstack. Returns a pointer to +** the memory on success, 0 on error (including a negative n value). n +** is always adjusted to be a multiple of 8 and returned memory is +** always zeroed out before returning (because this keeps the client +** JS code from having to do so, and most uses of the pstack will +** call for doing so). +*/ +SQLITE_WASM_KEEP void * sqlite3_wasm_pstack_alloc(int n){ + if( n<=0 ) return 0; + //if( n & 0x7 ) n += 8 - (n & 0x7) /* align to 8-byte boundary */; + n = (n + 7) & ~7 /* align to 8-byte boundary */; + if( PStack.pBegin + n > PStack.pPos /*not enough space left*/ + || PStack.pBegin + n <= PStack.pBegin /*overflow*/ ) return 0; + memset((PStack.pPos = PStack.pPos - n), 0, (unsigned int)n); + return PStack.pPos; +} +/* +** Return the number of bytes left which can be +** sqlite3_wasm_pstack_alloc()'d. +*/ +SQLITE_WASM_KEEP int sqlite3_wasm_pstack_remaining(void){ + assert(PStack.pPos >= PStack.pBegin); + assert(PStack.pPos <= PStack.pEnd); + return (int)(PStack.pPos - PStack.pBegin); +} + +/* +** Return the total number of bytes available in the pstack, including +** any space which is currently allocated. This value is a +** compile-time constant. +*/ +SQLITE_WASM_KEEP int sqlite3_wasm_pstack_quota(void){ + return (int)(PStack.pEnd - PStack.pBegin); +} /* ** This function is NOT part of the sqlite3 public API. It is strictly ** for use by the sqlite project's own JS/WASM bindings. ** @@ -12,76 +307,226 @@ ** Wasm bindings such as sqlite3_prepare_v2/v3(), and definitely not ** from client code. ** ** Returns err_code. */ -int sqlite3_wasm_db_error(sqlite3*db, int err_code, - const char *zMsg){ - if(0!=zMsg){ +SQLITE_WASM_KEEP +int sqlite3_wasm_db_error(sqlite3*db, int err_code, const char *zMsg){ + if( 0!=zMsg ){ const int nMsg = sqlite3Strlen30(zMsg); sqlite3ErrorWithMsg(db, err_code, "%.*s", nMsg, zMsg); }else{ sqlite3ErrorWithMsg(db, err_code, NULL); } return err_code; } +#if SQLITE_WASM_TESTS +struct WasmTestStruct { + int v4; + void * ppV; + const char * cstr; + int64_t v8; + void (*xFunc)(void*); +}; +typedef struct WasmTestStruct WasmTestStruct; +SQLITE_WASM_KEEP +void sqlite3_wasm_test_struct(WasmTestStruct * s){ + if(s){ + s->v4 *= 2; + s->v8 = s->v4 * 2; + s->ppV = s; + s->cstr = __FILE__; + if(s->xFunc) s->xFunc(s); + } + return; +} +#endif /* SQLITE_WASM_TESTS */ + /* ** This function is NOT part of the sqlite3 public API. It is strictly ** for use by the sqlite project's own JS/WASM bindings. Unlike the ** rest of the sqlite3 API, this part requires C99 for snprintf() and ** variadic macros. ** ** Returns a string containing a JSON-format "enum" of C-level -** constants intended to be imported into the JS environment. The JSON -** is initialized the first time this function is called and that -** result is reused for all future calls. +** constants and struct-related metadata intended to be imported into +** the JS environment. The JSON is initialized the first time this +** function is called and that result is reused for all future calls. ** ** If this function returns NULL then it means that the internal -** buffer is not large enough for the generated JSON. In debug builds -** that will trigger an assert(). +** buffer is not large enough for the generated JSON and needs to be +** increased. In debug builds that will trigger an assert(). */ +SQLITE_WASM_KEEP const char * sqlite3_wasm_enum_json(void){ - static char strBuf[1024 * 8] = {0} /* where the JSON goes */; - int n = 0, childCount = 0, structCount = 0 + static char aBuffer[1024 * 12] = {0} /* where the JSON goes */; + int n = 0, nChildren = 0, nStruct = 0 /* output counters for figuring out where commas go */; - char * pos = &strBuf[1] /* skip first byte for now to help protect + char * zPos = &aBuffer[1] /* skip first byte for now to help protect ** against a small race condition */; - char const * const zEnd = pos + sizeof(strBuf) /* one-past-the-end */; - if(strBuf[0]) return strBuf; - /* Leave strBuf[0] at 0 until the end to help guard against a tiny + char const * const zEnd = &aBuffer[0] + sizeof(aBuffer) /* one-past-the-end */; + if(aBuffer[0]) return aBuffer; + /* Leave aBuffer[0] at 0 until the end to help guard against a tiny ** race condition. If this is called twice concurrently, they might - ** end up both writing to strBuf, but they'll both write the same + ** end up both writing to aBuffer, but they'll both write the same ** thing, so that's okay. If we set byte 0 up front then the 2nd ** instance might return and use the string before the 1st instance ** is done filling it. */ /* Core output macros... */ -#define lenCheck assert(pos < zEnd - 128 \ +#define lenCheck assert(zPos < zEnd - 128 \ && "sqlite3_wasm_enum_json() buffer is too small."); \ - if(pos >= zEnd - 128) return 0 + if( zPos >= zEnd - 128 ) return 0 #define outf(format,...) \ - pos += snprintf(pos, ((size_t)(zEnd - pos)), format, __VA_ARGS__); \ + zPos += snprintf(zPos, ((size_t)(zEnd - zPos)), format, __VA_ARGS__); \ lenCheck #define out(TXT) outf("%s",TXT) #define CloseBrace(LEVEL) \ - assert(LEVEL<5); memset(pos, '}', LEVEL); pos+=LEVEL; lenCheck + assert(LEVEL<5); memset(zPos, '}', LEVEL); zPos+=LEVEL; lenCheck /* Macros for emitting maps of integer- and string-type macros to ** their values. */ #define DefGroup(KEY) n = 0; \ - outf("%s\"" #KEY "\": {",(childCount++ ? "," : "")); + outf("%s\"" #KEY "\": {",(nChildren++ ? "," : "")); #define DefInt(KEY) \ outf("%s\"%s\": %d", (n++ ? ", " : ""), #KEY, (int)KEY) #define DefStr(KEY) \ outf("%s\"%s\": \"%s\"", (n++ ? ", " : ""), #KEY, KEY) #define _DefGroup CloseBrace(1) - DefGroup(version) { - DefInt(SQLITE_VERSION_NUMBER); - DefStr(SQLITE_VERSION); - DefStr(SQLITE_SOURCE_ID); + /* The following groups are sorted alphabetic by group name. */ + DefGroup(access){ + DefInt(SQLITE_ACCESS_EXISTS); + DefInt(SQLITE_ACCESS_READWRITE); + DefInt(SQLITE_ACCESS_READ)/*docs say this is unused*/; + } _DefGroup; + + DefGroup(blobFinalizers) { + /* SQLITE_STATIC/TRANSIENT need to be handled explicitly as + ** integers to avoid casting-related warnings. */ + out("\"SQLITE_STATIC\":0, \"SQLITE_TRANSIENT\":-1"); + } _DefGroup; + + DefGroup(dataTypes) { + DefInt(SQLITE_INTEGER); + DefInt(SQLITE_FLOAT); + DefInt(SQLITE_TEXT); + DefInt(SQLITE_BLOB); + DefInt(SQLITE_NULL); + } _DefGroup; + + DefGroup(encodings) { + /* Noting that the wasm binding only aims to support UTF-8. */ + DefInt(SQLITE_UTF8); + DefInt(SQLITE_UTF16LE); + DefInt(SQLITE_UTF16BE); + DefInt(SQLITE_UTF16); + /*deprecated DefInt(SQLITE_ANY); */ + DefInt(SQLITE_UTF16_ALIGNED); + } _DefGroup; + + DefGroup(fcntl) { + DefInt(SQLITE_FCNTL_LOCKSTATE); + DefInt(SQLITE_FCNTL_GET_LOCKPROXYFILE); + DefInt(SQLITE_FCNTL_SET_LOCKPROXYFILE); + DefInt(SQLITE_FCNTL_LAST_ERRNO); + DefInt(SQLITE_FCNTL_SIZE_HINT); + DefInt(SQLITE_FCNTL_CHUNK_SIZE); + DefInt(SQLITE_FCNTL_FILE_POINTER); + DefInt(SQLITE_FCNTL_SYNC_OMITTED); + DefInt(SQLITE_FCNTL_WIN32_AV_RETRY); + DefInt(SQLITE_FCNTL_PERSIST_WAL); + DefInt(SQLITE_FCNTL_OVERWRITE); + DefInt(SQLITE_FCNTL_VFSNAME); + DefInt(SQLITE_FCNTL_POWERSAFE_OVERWRITE); + DefInt(SQLITE_FCNTL_PRAGMA); + DefInt(SQLITE_FCNTL_BUSYHANDLER); + DefInt(SQLITE_FCNTL_TEMPFILENAME); + DefInt(SQLITE_FCNTL_MMAP_SIZE); + DefInt(SQLITE_FCNTL_TRACE); + DefInt(SQLITE_FCNTL_HAS_MOVED); + DefInt(SQLITE_FCNTL_SYNC); + DefInt(SQLITE_FCNTL_COMMIT_PHASETWO); + DefInt(SQLITE_FCNTL_WIN32_SET_HANDLE); + DefInt(SQLITE_FCNTL_WAL_BLOCK); + DefInt(SQLITE_FCNTL_ZIPVFS); + DefInt(SQLITE_FCNTL_RBU); + DefInt(SQLITE_FCNTL_VFS_POINTER); + DefInt(SQLITE_FCNTL_JOURNAL_POINTER); + DefInt(SQLITE_FCNTL_WIN32_GET_HANDLE); + DefInt(SQLITE_FCNTL_PDB); + DefInt(SQLITE_FCNTL_BEGIN_ATOMIC_WRITE); + DefInt(SQLITE_FCNTL_COMMIT_ATOMIC_WRITE); + DefInt(SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE); + DefInt(SQLITE_FCNTL_LOCK_TIMEOUT); + DefInt(SQLITE_FCNTL_DATA_VERSION); + DefInt(SQLITE_FCNTL_SIZE_LIMIT); + DefInt(SQLITE_FCNTL_CKPT_DONE); + DefInt(SQLITE_FCNTL_RESERVE_BYTES); + DefInt(SQLITE_FCNTL_CKPT_START); + DefInt(SQLITE_FCNTL_EXTERNAL_READER); + DefInt(SQLITE_FCNTL_CKSM_FILE); + } _DefGroup; + + DefGroup(flock) { + DefInt(SQLITE_LOCK_NONE); + DefInt(SQLITE_LOCK_SHARED); + DefInt(SQLITE_LOCK_RESERVED); + DefInt(SQLITE_LOCK_PENDING); + DefInt(SQLITE_LOCK_EXCLUSIVE); + } _DefGroup; + + DefGroup(ioCap) { + DefInt(SQLITE_IOCAP_ATOMIC); + DefInt(SQLITE_IOCAP_ATOMIC512); + DefInt(SQLITE_IOCAP_ATOMIC1K); + DefInt(SQLITE_IOCAP_ATOMIC2K); + DefInt(SQLITE_IOCAP_ATOMIC4K); + DefInt(SQLITE_IOCAP_ATOMIC8K); + DefInt(SQLITE_IOCAP_ATOMIC16K); + DefInt(SQLITE_IOCAP_ATOMIC32K); + DefInt(SQLITE_IOCAP_ATOMIC64K); + DefInt(SQLITE_IOCAP_SAFE_APPEND); + DefInt(SQLITE_IOCAP_SEQUENTIAL); + DefInt(SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN); + DefInt(SQLITE_IOCAP_POWERSAFE_OVERWRITE); + DefInt(SQLITE_IOCAP_IMMUTABLE); + DefInt(SQLITE_IOCAP_BATCH_ATOMIC); + } _DefGroup; + + DefGroup(openFlags) { + /* Noting that not all of these will have any effect in + ** WASM-space. */ + DefInt(SQLITE_OPEN_READONLY); + DefInt(SQLITE_OPEN_READWRITE); + DefInt(SQLITE_OPEN_CREATE); + DefInt(SQLITE_OPEN_URI); + DefInt(SQLITE_OPEN_MEMORY); + DefInt(SQLITE_OPEN_NOMUTEX); + DefInt(SQLITE_OPEN_FULLMUTEX); + DefInt(SQLITE_OPEN_SHAREDCACHE); + DefInt(SQLITE_OPEN_PRIVATECACHE); + DefInt(SQLITE_OPEN_EXRESCODE); + DefInt(SQLITE_OPEN_NOFOLLOW); + /* OPEN flags for use with VFSes... */ + DefInt(SQLITE_OPEN_MAIN_DB); + DefInt(SQLITE_OPEN_MAIN_JOURNAL); + DefInt(SQLITE_OPEN_TEMP_DB); + DefInt(SQLITE_OPEN_TEMP_JOURNAL); + DefInt(SQLITE_OPEN_TRANSIENT_DB); + DefInt(SQLITE_OPEN_SUBJOURNAL); + DefInt(SQLITE_OPEN_SUPER_JOURNAL); + DefInt(SQLITE_OPEN_WAL); + DefInt(SQLITE_OPEN_DELETEONCLOSE); + DefInt(SQLITE_OPEN_EXCLUSIVE); + } _DefGroup; + + DefGroup(prepareFlags) { + DefInt(SQLITE_PREPARE_PERSISTENT); + DefInt(SQLITE_PREPARE_NORMALIZE); + DefInt(SQLITE_PREPARE_NO_VTAB); } _DefGroup; DefGroup(resultCodes) { DefInt(SQLITE_OK); DefInt(SQLITE_ERROR); @@ -112,11 +557,10 @@ DefInt(SQLITE_NOTADB); DefInt(SQLITE_NOTICE); DefInt(SQLITE_WARNING); DefInt(SQLITE_ROW); DefInt(SQLITE_DONE); - // Extended Result Codes DefInt(SQLITE_ERROR_MISSING_COLLSEQ); DefInt(SQLITE_ERROR_RETRY); DefInt(SQLITE_ERROR_SNAPSHOT); DefInt(SQLITE_IOERR_READ); @@ -191,108 +635,40 @@ DefInt(SQLITE_AUTH_USER); DefInt(SQLITE_OK_LOAD_PERMANENTLY); //DefInt(SQLITE_OK_SYMLINK) /* internal use only */; } _DefGroup; - DefGroup(dataTypes) { - DefInt(SQLITE_INTEGER); - DefInt(SQLITE_FLOAT); - DefInt(SQLITE_TEXT); - DefInt(SQLITE_BLOB); - DefInt(SQLITE_NULL); - } _DefGroup; - - DefGroup(encodings) { - /* Noting that the wasm binding only aims to support UTF-8. */ - DefInt(SQLITE_UTF8); - DefInt(SQLITE_UTF16LE); - DefInt(SQLITE_UTF16BE); - DefInt(SQLITE_UTF16); - /*deprecated DefInt(SQLITE_ANY); */ - DefInt(SQLITE_UTF16_ALIGNED); - } _DefGroup; - - DefGroup(blobFinalizers) { - /* SQLITE_STATIC/TRANSIENT need to be handled explicitly as - ** integers to avoid casting-related warnings. */ - out("\"SQLITE_STATIC\":0, \"SQLITE_TRANSIENT\":-1"); + DefGroup(serialize){ + DefInt(SQLITE_SERIALIZE_NOCOPY); + DefInt(SQLITE_DESERIALIZE_FREEONCLOSE); + DefInt(SQLITE_DESERIALIZE_READONLY); + DefInt(SQLITE_DESERIALIZE_RESIZEABLE); + } _DefGroup; + + DefGroup(syncFlags) { + DefInt(SQLITE_SYNC_NORMAL); + DefInt(SQLITE_SYNC_FULL); + DefInt(SQLITE_SYNC_DATAONLY); + } _DefGroup; + + DefGroup(trace) { + DefInt(SQLITE_TRACE_STMT); + DefInt(SQLITE_TRACE_PROFILE); + DefInt(SQLITE_TRACE_ROW); + DefInt(SQLITE_TRACE_CLOSE); } _DefGroup; DefGroup(udfFlags) { DefInt(SQLITE_DETERMINISTIC); DefInt(SQLITE_DIRECTONLY); DefInt(SQLITE_INNOCUOUS); } _DefGroup; - DefGroup(openFlags) { - /* Noting that not all of these will have any effect in WASM-space. */ - DefInt(SQLITE_OPEN_READONLY); - DefInt(SQLITE_OPEN_READWRITE); - DefInt(SQLITE_OPEN_CREATE); - DefInt(SQLITE_OPEN_URI); - DefInt(SQLITE_OPEN_MEMORY); - DefInt(SQLITE_OPEN_NOMUTEX); - DefInt(SQLITE_OPEN_FULLMUTEX); - DefInt(SQLITE_OPEN_SHAREDCACHE); - DefInt(SQLITE_OPEN_PRIVATECACHE); - DefInt(SQLITE_OPEN_EXRESCODE); - DefInt(SQLITE_OPEN_NOFOLLOW); - /* OPEN flags for use with VFSes... */ - DefInt(SQLITE_OPEN_MAIN_DB); - DefInt(SQLITE_OPEN_MAIN_JOURNAL); - DefInt(SQLITE_OPEN_TEMP_DB); - DefInt(SQLITE_OPEN_TEMP_JOURNAL); - DefInt(SQLITE_OPEN_TRANSIENT_DB); - DefInt(SQLITE_OPEN_SUBJOURNAL); - DefInt(SQLITE_OPEN_SUPER_JOURNAL); - DefInt(SQLITE_OPEN_WAL); - DefInt(SQLITE_OPEN_DELETEONCLOSE); - DefInt(SQLITE_OPEN_EXCLUSIVE); - } _DefGroup; - - DefGroup(syncFlags) { - DefInt(SQLITE_SYNC_NORMAL); - DefInt(SQLITE_SYNC_FULL); - DefInt(SQLITE_SYNC_DATAONLY); - } _DefGroup; - - DefGroup(prepareFlags) { - DefInt(SQLITE_PREPARE_PERSISTENT); - DefInt(SQLITE_PREPARE_NORMALIZE); - DefInt(SQLITE_PREPARE_NO_VTAB); - } _DefGroup; - - DefGroup(flock) { - DefInt(SQLITE_LOCK_NONE); - DefInt(SQLITE_LOCK_SHARED); - DefInt(SQLITE_LOCK_RESERVED); - DefInt(SQLITE_LOCK_PENDING); - DefInt(SQLITE_LOCK_EXCLUSIVE); - } _DefGroup; - - DefGroup(ioCap) { - DefInt(SQLITE_IOCAP_ATOMIC); - DefInt(SQLITE_IOCAP_ATOMIC512); - DefInt(SQLITE_IOCAP_ATOMIC1K); - DefInt(SQLITE_IOCAP_ATOMIC2K); - DefInt(SQLITE_IOCAP_ATOMIC4K); - DefInt(SQLITE_IOCAP_ATOMIC8K); - DefInt(SQLITE_IOCAP_ATOMIC16K); - DefInt(SQLITE_IOCAP_ATOMIC32K); - DefInt(SQLITE_IOCAP_ATOMIC64K); - DefInt(SQLITE_IOCAP_SAFE_APPEND); - DefInt(SQLITE_IOCAP_SEQUENTIAL); - DefInt(SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN); - DefInt(SQLITE_IOCAP_POWERSAFE_OVERWRITE); - DefInt(SQLITE_IOCAP_IMMUTABLE); - DefInt(SQLITE_IOCAP_BATCH_ATOMIC); - } _DefGroup; - - DefGroup(access){ - DefInt(SQLITE_ACCESS_EXISTS); - DefInt(SQLITE_ACCESS_READWRITE); - DefInt(SQLITE_ACCESS_READ)/*docs say this is unused*/; + DefGroup(version) { + DefInt(SQLITE_VERSION_NUMBER); + DefStr(SQLITE_VERSION); + DefStr(SQLITE_SOURCE_ID); } _DefGroup; #undef DefGroup #undef DefStr #undef DefInt @@ -317,11 +693,11 @@ */ /** Macros for emitting StructBinder description. */ #define StructBinder__(TYPE) \ n = 0; \ - outf("%s{", (structCount++ ? ", " : "")); \ + outf("%s{", (nStruct++ ? ", " : "")); \ out("\"name\": \"" # TYPE "\","); \ outf("\"sizeof\": %d", (int)sizeof(TYPE)); \ out(",\"members\": {"); #define StructBinder_(T) StructBinder__(T) /** ^^^ indirection needed to expand CurrentStruct */ @@ -333,11 +709,11 @@ (n++ ? ", " : ""), #MEMBER, \ (int)offsetof(CurrentStruct,MEMBER), \ (int)sizeof(((CurrentStruct*)0)->MEMBER), \ SIG) - structCount = 0; + nStruct = 0; out(", \"structs\": ["); { #define CurrentStruct sqlite3_vfs StructBinder { M(iVersion,"i"); @@ -389,20 +765,41 @@ } _StructBinder; #undef CurrentStruct #define CurrentStruct sqlite3_file StructBinder { - M(pMethods,"P"); + M(pMethods,"p"); + } _StructBinder; +#undef CurrentStruct + +#define CurrentStruct sqlite3_kvvfs_methods + StructBinder { + M(xRead,"i(sspi)"); + M(xWrite,"i(sss)"); + M(xDelete,"i(ss)"); + M(nKeySize,"i"); + } _StructBinder; +#undef CurrentStruct + +#if SQLITE_WASM_TESTS +#define CurrentStruct WasmTestStruct + StructBinder { + M(v4,"i"); + M(cstr,"s"); + M(ppV,"p"); + M(v8,"j"); + M(xFunc,"v(p)"); } _StructBinder; #undef CurrentStruct +#endif } out( "]"/*structs*/); out("}"/*top-level object*/); - *pos = 0; - strBuf[0] = '{'/*end of the race-condition workaround*/; - return strBuf; + *zPos = 0; + aBuffer[0] = '{'/*end of the race-condition workaround*/; + return aBuffer; #undef StructBinder #undef StructBinder_ #undef StructBinder__ #undef M #undef _StructBinder @@ -409,5 +806,376 @@ #undef CloseBrace #undef out #undef outf #undef lenCheck } + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** This function invokes the xDelete method of the given VFS (or the +** default VFS if pVfs is NULL), passing on the given filename. If +** zName is NULL, no default VFS is found, or it has no xDelete +** method, SQLITE_MISUSE is returned, else the result of the xDelete() +** call is returned. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_vfs_unlink(sqlite3_vfs *pVfs, const char * zName){ + int rc = SQLITE_MISUSE /* ??? */; + if( 0==pVfs && 0!=zName ) pVfs = sqlite3_vfs_find(0); + if( zName && pVfs && pVfs->xDelete ){ + rc = pVfs->xDelete(pVfs, zName, 1); + } + return rc; +} + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** Returns a pointer to the given DB's VFS for the given DB name, +** defaulting to "main" if zDbName is 0. Returns 0 if no db with the +** given name is open. +*/ +SQLITE_WASM_KEEP +sqlite3_vfs * sqlite3_wasm_db_vfs(sqlite3 *pDb, const char *zDbName){ + sqlite3_vfs * pVfs = 0; + sqlite3_file_control(pDb, zDbName ? zDbName : "main", + SQLITE_FCNTL_VFS_POINTER, &pVfs); + return pVfs; +} + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** This function resets the given db pointer's database as described at +** +** https://www.sqlite.org/c3ref/c_dbconfig_defensive.html#sqlitedbconfigresetdatabase +** +** Returns 0 on success, an SQLITE_xxx code on error. Returns +** SQLITE_MISUSE if pDb is NULL. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_db_reset(sqlite3*pDb){ + int rc = SQLITE_MISUSE; + if( pDb ){ + rc = sqlite3_db_config(pDb, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0); + if( 0==rc ) rc = sqlite3_exec(pDb, "VACUUM", 0, 0, 0); + sqlite3_db_config(pDb, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); + } + return rc; +} + +/* +** Uses the given database's VFS xRead to stream the db file's +** contents out to the given callback. The callback gets a single +** chunk of size n (its 2nd argument) on each call and must return 0 +** on success, non-0 on error. This function returns 0 on success, +** SQLITE_NOTFOUND if no db is open, or propagates any other non-0 +** code from the callback. Note that this is not thread-friendly: it +** expects that it will be the only thread reading the db file and +** takes no measures to ensure that is the case. +** +** This implementation appears to work fine, but +** sqlite3_wasm_db_serialize() is arguably the better way to achieve +** this. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_db_export_chunked( sqlite3* pDb, + int (*xCallback)(unsigned const char *zOut, int n) ){ + sqlite3_int64 nSize = 0; + sqlite3_int64 nPos = 0; + sqlite3_file * pFile = 0; + unsigned char buf[1024 * 8]; + int nBuf = (int)sizeof(buf); + int rc = pDb + ? sqlite3_file_control(pDb, "main", + SQLITE_FCNTL_FILE_POINTER, &pFile) + : SQLITE_NOTFOUND; + if( rc ) return rc; + rc = pFile->pMethods->xFileSize(pFile, &nSize); + if( rc ) return rc; + if(nSize % nBuf){ + /* DB size is not an even multiple of the buffer size. Reduce + ** buffer size so that we do not unduly inflate the db size + ** with zero-padding when exporting. */ + if(0 == nSize % 4096) nBuf = 4096; + else if(0 == nSize % 2048) nBuf = 2048; + else if(0 == nSize % 1024) nBuf = 1024; + else nBuf = 512; + } + for( ; 0==rc && nPospMethods->xRead(pFile, buf, nBuf, nPos); + if(SQLITE_IOERR_SHORT_READ == rc){ + rc = (nPos + nBuf) < nSize ? rc : 0/*assume EOF*/; + } + if( 0==rc ) rc = xCallback(buf, nBuf); + } + return rc; +} + +/* +** A proxy for sqlite3_serialize() which serializes the "main" schema +** of pDb, placing the serialized output in pOut and nOut. nOut may be +** NULL. If pDb or pOut are NULL then SQLITE_MISUSE is returned. If +** allocation of the serialized copy fails, SQLITE_NOMEM is returned. +** On success, 0 is returned and `*pOut` will contain a pointer to the +** memory unless mFlags includes SQLITE_SERIALIZE_NOCOPY and the +** database has no contiguous memory representation, in which case +** `*pOut` will be NULL but 0 will be returned. +** +** If `*pOut` is not NULL, the caller is responsible for passing it to +** sqlite3_free() to free it. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_db_serialize( sqlite3 *pDb, unsigned char **pOut, + sqlite3_int64 *nOut, unsigned int mFlags ){ + unsigned char * z; + if( !pDb || !pOut ) return SQLITE_MISUSE; + if(nOut) *nOut = 0; + z = sqlite3_serialize(pDb, "main", nOut, mFlags); + if( z || (SQLITE_SERIALIZE_NOCOPY & mFlags) ){ + *pOut = z; + return 0; + }else{ + return SQLITE_NOMEM; + } +} + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** Creates a new file using the I/O API of the given VFS, containing +** the given number of bytes of the given data. If the file exists, +** it is truncated to the given length and populated with the given +** data. +** +** This function exists so that we can implement the equivalent of +** Emscripten's FS.createDataFile() in a VFS-agnostic way. This +** functionality is intended for use in uploading database files. +** +** If pVfs is NULL, sqlite3_vfs_find(0) is used. +** +** If zFile is NULL, pVfs is NULL (and sqlite3_vfs_find(0) returns +** NULL), or nData is negative, SQLITE_MISUSE are returned. +** +** On success, it creates a new file with the given name, populated +** with the fist nData bytes of pData. If pData is NULL, the file is +** created and/or truncated to nData bytes. +** +** Whether or not directory components of zFilename are created +** automatically or not is unspecified: that detail is left to the +** VFS. The "opfs" VFS, for example, create them. +** +** Not all VFSes support this functionality, e.g. the "kvvfs" does +** not. +** +** If an error happens while populating or truncating the file, the +** target file will be deleted (if needed) if this function created +** it. If this function did not create it, it is not deleted but may +** be left in an undefined state. +** +** Returns 0 on success. On error, it returns a code described above +** or propagates a code from one of the I/O methods. +** +** Design note: nData is an integer, instead of int64, for WASM +** portability, so that the API can still work in builds where BigInt +** support is disabled or unavailable. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_vfs_create_file( sqlite3_vfs *pVfs, + const char *zFilename, + const unsigned char * pData, + int nData ){ + int rc; + sqlite3_file *pFile = 0; + sqlite3_io_methods const *pIo; + const int openFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; + int flagsOut = 0; + int fileExisted = 0; + int doUnlock = 0; + const unsigned char *pPos = pData; + const int blockSize = 512 + /* Because we are using pFile->pMethods->xWrite() for writing, and + ** it may have a buffer limit related to sqlite3's pager size, we + ** conservatively write in 512-byte blocks (smallest page + ** size). */; + + if( !pVfs ) pVfs = sqlite3_vfs_find(0); + if( !pVfs || !zFilename || nData<0 ) return SQLITE_MISUSE; + pVfs->xAccess(pVfs, zFilename, SQLITE_ACCESS_EXISTS, &fileExisted); + rc = sqlite3OsOpenMalloc(pVfs, zFilename, &pFile, openFlags, &flagsOut); + if(rc) return rc; + pIo = pFile->pMethods; + if( pIo->xLock ) { + /* We need xLock() in order to accommodate the OPFS VFS, as it + ** obtains a writeable handle via the lock operation and releases + ** it in xUnlock(). If we don't do those here, we have to add code + ** to the VFS to account check whether it was locked before + ** xFileSize(), xTruncate(), and the like, and release the lock + ** only if it was unlocked when the op was started. */ + rc = pIo->xLock(pFile, SQLITE_LOCK_EXCLUSIVE); + doUnlock = 0==rc; + } + if( 0==rc) rc = pIo->xTruncate(pFile, nData); + if( 0==rc && 0!=pData && nData>0 ){ + while( 0==rc && nData>0 ){ + const int n = nData>=blockSize ? blockSize : nData; + rc = pIo->xWrite(pFile, pPos, n, (sqlite3_int64)(pPos - pData)); + nData -= n; + pPos += n; + } + if( 0==rc && nData>0 ){ + assert( nDataxWrite(pFile, pPos, nData, (sqlite3_int64)(pPos - pData)); + } + } + if( pIo->xUnlock && doUnlock!=0 ) pIo->xUnlock(pFile, SQLITE_LOCK_NONE); + pIo->xClose(pFile); + if( rc!=0 && 0==fileExisted ){ + pVfs->xDelete(pVfs, zFilename, 1); + } + return rc; +} + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** Allocates sqlite3KvvfsMethods.nKeySize bytes from +** sqlite3_wasm_pstack_alloc() and returns 0 if that allocation fails, +** else it passes that string to kvstorageMakeKey() and returns a +** NUL-terminated pointer to that string. It is up to the caller to +** use sqlite3_wasm_pstack_restore() to free the returned pointer. +*/ +SQLITE_WASM_KEEP +char * sqlite3_wasm_kvvfsMakeKeyOnPstack(const char *zClass, + const char *zKeyIn){ + assert(sqlite3KvvfsMethods.nKeySize>24); + char *zKeyOut = + (char *)sqlite3_wasm_pstack_alloc(sqlite3KvvfsMethods.nKeySize); + if(zKeyOut){ + kvstorageMakeKey(zClass, zKeyIn, zKeyOut); + } + return zKeyOut; +} + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings. +** +** Returns the pointer to the singleton object which holds the kvvfs +** I/O methods and associated state. +*/ +SQLITE_WASM_KEEP +sqlite3_kvvfs_methods * sqlite3_wasm_kvvfs_methods(void){ + return &sqlite3KvvfsMethods; +} + +#if defined(__EMSCRIPTEN__) && defined(SQLITE_ENABLE_WASMFS) +#include + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own JS/WASM bindings, specifically +** only when building with Emscripten's WASMFS support. +** +** This function should only be called if the JS side detects the +** existence of the Origin-Private FileSystem (OPFS) APIs in the +** client. The first time it is called, this function instantiates a +** WASMFS backend impl for OPFS. On success, subsequent calls are +** no-ops. +** +** This function may be passed a "mount point" name, which must have a +** leading "/" and is currently restricted to a single path component, +** e.g. "/foo" is legal but "/foo/" and "/foo/bar" are not. If it is +** NULL or empty, it defaults to "/opfs". +** +** Returns 0 on success, SQLITE_NOMEM if instantiation of the backend +** object fails, SQLITE_IOERR if mkdir() of the zMountPoint dir in +** the virtual FS fails. In builds compiled without SQLITE_ENABLE_WASMFS +** defined, SQLITE_NOTFOUND is returned without side effects. +*/ +SQLITE_WASM_KEEP +int sqlite3_wasm_init_wasmfs(const char *zMountPoint){ + static backend_t pOpfs = 0; + if( !zMountPoint || !*zMountPoint ) zMountPoint = "/opfs"; + if( !pOpfs ){ + pOpfs = wasmfs_create_opfs_backend(); + } + /** It's not enough to instantiate the backend. We have to create a + mountpoint in the VFS and attach the backend to it. */ + if( pOpfs && 0!=access(zMountPoint, F_OK) ){ + /* Note that this check and is not robust but it will + hypothetically suffice for the transient wasm-based virtual + filesystem we're currently running in. */ + const int rc = wasmfs_create_directory(zMountPoint, 0777, pOpfs); + /*emscripten_console_logf("OPFS mkdir(%s) rc=%d", zMountPoint, rc);*/ + if(rc) return SQLITE_IOERR; + } + return pOpfs ? 0 : SQLITE_NOMEM; +} +#else +SQLITE_WASM_KEEP +int sqlite3_wasm_init_wasmfs(const char *zUnused){ + //emscripten_console_warn("WASMFS OPFS is not compiled in."); + if(zUnused){/*unused*/} + return SQLITE_NOTFOUND; +} +#endif /* __EMSCRIPTEN__ && SQLITE_ENABLE_WASMFS */ + +#if SQLITE_WASM_TESTS + +SQLITE_WASM_KEEP +int sqlite3_wasm_test_intptr(int * p){ + return *p = *p * 2; +} + +SQLITE_WASM_KEEP +int64_t sqlite3_wasm_test_int64_max(void){ + return (int64_t)0x7fffffffffffffff; +} + +SQLITE_WASM_KEEP +int64_t sqlite3_wasm_test_int64_min(void){ + return ~sqlite3_wasm_test_int64_max(); +} + +SQLITE_WASM_KEEP +int64_t sqlite3_wasm_test_int64_times2(int64_t x){ + return x * 2; +} + +SQLITE_WASM_KEEP +void sqlite3_wasm_test_int64_minmax(int64_t * min, int64_t *max){ + *max = sqlite3_wasm_test_int64_max(); + *min = sqlite3_wasm_test_int64_min(); + /*printf("minmax: min=%lld, max=%lld\n", *min, *max);*/ +} + +SQLITE_WASM_KEEP +int64_t sqlite3_wasm_test_int64ptr(int64_t * p){ + /*printf("sqlite3_wasm_test_int64ptr( @%lld = 0x%llx )\n", (int64_t)p, *p);*/ + return *p = *p * 2; +} + +SQLITE_WASM_KEEP +void sqlite3_wasm_test_stack_overflow(int recurse){ + if(recurse) sqlite3_wasm_test_stack_overflow(recurse); +} + +/* For testing the 'string-free' whwasmutil.xWrap() conversion. */ +SQLITE_WASM_KEEP +char * sqlite3_wasm_test_str_hello(int fail){ + char * s = fail ? 0 : (char *)malloc(6); + if(s){ + memcpy(s, "hello", 5); + s[5] = 0; + } + return s; +} +#endif /* SQLITE_WASM_TESTS */ + +#undef SQLITE_WASM_KEEP DELETED ext/wasm/api/sqlite3-worker.js Index: ext/wasm/api/sqlite3-worker.js ================================================================== --- ext/wasm/api/sqlite3-worker.js +++ /dev/null @@ -1,31 +0,0 @@ -/* - 2022-05-23 - - The author disclaims copyright to this source code. In place of a - legal notice, here is a blessing: - - * May you do good and not evil. - * May you find forgiveness for yourself and forgive others. - * May you share freely, never taking more than you give. - - *********************************************************************** - - This is a JS Worker file for the main sqlite3 api. It loads - sqlite3.js, initializes the module, and postMessage()'s a message - after the module is initialized: - - {type: 'sqlite3-api', data: 'worker-ready'} - - This seemingly superfluous level of indirection is necessary when - loading sqlite3.js via a Worker. Instantiating a worker with new - Worker("sqlite.js") will not (cannot) call sqlite3InitModule() to - initialize the module due to a timing/order-of-operations conflict - (and that symbol is not exported in a way that a Worker loading it - that way can see it). Thus JS code wanting to load the sqlite3 - Worker-specific API needs to pass _this_ file (or equivalent) to the - Worker constructor and then listen for an event in the form shown - above in order to know when the module has completed initialization. -*/ -"use strict"; -importScripts('sqlite3.js'); -sqlite3InitModule().then((EmscriptenModule)=>EmscriptenModule.sqlite3.initWorkerAPI()); ADDED ext/wasm/api/sqlite3-worker1-promiser.js Index: ext/wasm/api/sqlite3-worker1-promiser.js ================================================================== --- /dev/null +++ ext/wasm/api/sqlite3-worker1-promiser.js @@ -0,0 +1,259 @@ +/* + 2022-08-24 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + This file implements a Promise-based proxy for the sqlite3 Worker + API #1. It is intended to be included either from the main thread or + a Worker, but only if (A) the environment supports nested Workers + and (B) it's _not_ a Worker which loads the sqlite3 WASM/JS + module. This file's features will load that module and provide a + slightly simpler client-side interface than the slightly-lower-level + Worker API does. + + This script necessarily exposes one global symbol, but clients may + freely `delete` that symbol after calling it. +*/ +'use strict'; +/** + Configures an sqlite3 Worker API #1 Worker such that it can be + manipulated via a Promise-based interface and returns a factory + function which returns Promises for communicating with the worker. + This proxy has an _almost_ identical interface to the normal + worker API, with any exceptions documented below. + + It requires a configuration object with the following properties: + + - `worker` (required): a Worker instance which loads + `sqlite3-worker1.js` or a functional equivalent. Note that the + promiser factory replaces the worker.onmessage property. This + config option may alternately be a function, in which case this + function re-assigns this property with the result of calling that + function, enabling delayed instantiation of a Worker. + + - `onready` (optional, but...): this callback is called with no + arguments when the worker fires its initial + 'sqlite3-api'/'worker1-ready' message, which it does when + sqlite3.initWorker1API() completes its initialization. This is + the simplest way to tell the worker to kick off work at the + earliest opportunity. + + - `onunhandled` (optional): a callback which gets passed the + message event object for any worker.onmessage() events which + are not handled by this proxy. Ideally that "should" never + happen, as this proxy aims to handle all known message types. + + - `generateMessageId` (optional): a function which, when passed an + about-to-be-posted message object, generates a _unique_ message ID + for the message, which this API then assigns as the messageId + property of the message. It _must_ generate unique IDs on each call + so that dispatching can work. If not defined, a default generator + is used (which should be sufficient for most or all cases). + + - `debug` (optional): a console.debug()-style function for logging + information about messages. + + This function returns a stateful factory function with the + following interfaces: + + - Promise function(messageType, messageArgs) + - Promise function({message object}) + + The first form expects the "type" and "args" values for a Worker + message. The second expects an object in the form {type:..., + args:...} plus any other properties the client cares to set. This + function will always set the `messageId` property on the object, + even if it's already set, and will set the `dbId` property to the + current database ID if it is _not_ set in the message object. + + The function throws on error. + + The function installs a temporary message listener, posts a + message to the configured Worker, and handles the message's + response via the temporary message listener. The then() callback + of the returned Promise is passed the `message.data` property from + the resulting message, i.e. the payload from the worker, stripped + of the lower-level event state which the onmessage() handler + receives. + + Example usage: + + ``` + const config = {...}; + const sq3Promiser = sqlite3Worker1Promiser(config); + sq3Promiser('open', {filename:"/foo.db"}).then(function(msg){ + console.log("open response",msg); // => {type:'open', result: {filename:'/foo.db'}, ...} + }); + sq3Promiser({type:'close'}).then((msg)=>{ + console.log("close response",msg); // => {type:'close', result: {filename:'/foo.db'}, ...} + }); + ``` + + Differences from Worker API #1: + + - exec's {callback: STRING} option does not work via this + interface (it triggers an exception), but {callback: function} + does and works exactly like the STRING form does in the Worker: + the callback is called one time for each row of the result set, + passed the same worker message format as the worker API emits: + + {type:typeString, + row:VALUE, + rowNumber:1-based-#, + columnNames: array} + + Where `typeString` is an internally-synthesized message type string + used temporarily for worker message dispatching. It can be ignored + by all client code except that which tests this API. The `row` + property contains the row result in the form implied by the + `rowMode` option (defaulting to `'array'`). The `rowNumber` is a + 1-based integer value incremented by 1 on each call into th + callback. + + At the end of the result set, the same event is fired with + (row=undefined, rowNumber=null) to indicate that + the end of the result set has been reached. Note that the rows + arrive via worker-posted messages, with all the implications + of that. +*/ +self.sqlite3Worker1Promiser = function callee(config = callee.defaultConfig){ + // Inspired by: https://stackoverflow.com/a/52439530 + if(1===arguments.length && 'function'===typeof arguments[0]){ + const f = config; + config = Object.assign(Object.create(null), callee.defaultConfig); + config.onready = f; + }else{ + config = Object.assign(Object.create(null), callee.defaultConfig, config); + } + const handlerMap = Object.create(null); + const noop = function(){}; + const err = config.onerror + || noop /* config.onerror is intentionally undocumented + pending finding a less ambiguous name */; + const debug = config.debug || noop; + const idTypeMap = config.generateMessageId ? undefined : Object.create(null); + const genMsgId = config.generateMessageId || function(msg){ + return msg.type+'#'+(idTypeMap[msg.type] = (idTypeMap[msg.type]||0) + 1); + }; + const toss = (...args)=>{throw new Error(args.join(' '))}; + if(!config.worker) config.worker = callee.defaultConfig.worker; + if('function'===typeof config.worker) config.worker = config.worker(); + let dbId; + config.worker.onmessage = function(ev){ + ev = ev.data; + debug('worker1.onmessage',ev); + let msgHandler = handlerMap[ev.messageId]; + if(!msgHandler){ + if(ev && 'sqlite3-api'===ev.type && 'worker1-ready'===ev.result) { + /*fired one time when the Worker1 API initializes*/ + if(config.onready) config.onready(); + return; + } + msgHandler = handlerMap[ev.type] /* check for exec per-row callback */; + if(msgHandler && msgHandler.onrow){ + msgHandler.onrow(ev); + return; + } + if(config.onunhandled) config.onunhandled(arguments[0]); + else err("sqlite3Worker1Promiser() unhandled worker message:",ev); + return; + } + delete handlerMap[ev.messageId]; + switch(ev.type){ + case 'error': + msgHandler.reject(ev); + return; + case 'open': + if(!dbId) dbId = ev.dbId; + break; + case 'close': + if(ev.dbId===dbId) dbId = undefined; + break; + default: + break; + } + try {msgHandler.resolve(ev)} + catch(e){msgHandler.reject(e)} + }/*worker.onmessage()*/; + return function(/*(msgType, msgArgs) || (msgEnvelope)*/){ + let msg; + if(1===arguments.length){ + msg = arguments[0]; + }else if(2===arguments.length){ + msg = { + type: arguments[0], + args: arguments[1] + }; + }else{ + toss("Invalid arugments for sqlite3Worker1Promiser()-created factory."); + } + if(!msg.dbId) msg.dbId = dbId; + msg.messageId = genMsgId(msg); + msg.departureTime = performance.now(); + const proxy = Object.create(null); + proxy.message = msg; + let rowCallbackId /* message handler ID for exec on-row callback proxy */; + if('exec'===msg.type && msg.args){ + if('function'===typeof msg.args.callback){ + rowCallbackId = msg.messageId+':row'; + proxy.onrow = msg.args.callback; + msg.args.callback = rowCallbackId; + handlerMap[rowCallbackId] = proxy; + }else if('string' === typeof msg.args.callback){ + toss("exec callback may not be a string when using the Promise interface."); + /** + Design note: the reason for this limitation is that this + API takes over worker.onmessage() and the client has no way + of adding their own message-type handlers to it. Per-row + callbacks are implemented as short-lived message.type + mappings for worker.onmessage(). + + We "could" work around this by providing a new + config.fallbackMessageHandler (or some such) which contains + a map of event type names to callbacks. Seems like overkill + for now, seeing as the client can pass callback functions + to this interface (whereas the string-form "callback" is + needed for the over-the-Worker interface). + */ + } + } + //debug("requestWork", msg); + let p = new Promise(function(resolve, reject){ + proxy.resolve = resolve; + proxy.reject = reject; + handlerMap[msg.messageId] = proxy; + debug("Posting",msg.type,"message to Worker dbId="+(dbId||'default')+':',msg); + config.worker.postMessage(msg); + }); + if(rowCallbackId) p = p.finally(()=>delete handlerMap[rowCallbackId]); + return p; + }; +}/*sqlite3Worker1Promiser()*/; +self.sqlite3Worker1Promiser.defaultConfig = { + worker: function(){ + let theJs = "sqlite3-worker1.js"; + if(this.currentScript){ + const src = this.currentScript.src.split('/'); + src.pop(); + theJs = src.join('/')+'/' + theJs; + //console.warn("promiser currentScript, theJs =",this.currentScript,theJs); + }else{ + //console.warn("promiser self.location =",self.location); + const urlParams = new URL(self.location.href).searchParams; + if(urlParams.has('sqlite3.dir')){ + theJs = urlParams.get('sqlite3.dir') + '/' + theJs; + } + } + return new Worker(theJs + self.location.search); + }.bind({ + currentScript: self?.document?.currentScript + }), + onerror: (...args)=>console.error('worker1 promiser error',...args) +}; ADDED ext/wasm/api/sqlite3-worker1.js Index: ext/wasm/api/sqlite3-worker1.js ================================================================== --- /dev/null +++ ext/wasm/api/sqlite3-worker1.js @@ -0,0 +1,49 @@ +/* + 2022-05-23 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + This is a JS Worker file for the main sqlite3 api. It loads + sqlite3.js, initializes the module, and postMessage()'s a message + after the module is initialized: + + {type: 'sqlite3-api', result: 'worker1-ready'} + + This seemingly superfluous level of indirection is necessary when + loading sqlite3.js via a Worker. Instantiating a worker with new + Worker("sqlite.js") will not (cannot) call sqlite3InitModule() to + initialize the module due to a timing/order-of-operations conflict + (and that symbol is not exported in a way that a Worker loading it + that way can see it). Thus JS code wanting to load the sqlite3 + Worker-specific API needs to pass _this_ file (or equivalent) to the + Worker constructor and then listen for an event in the form shown + above in order to know when the module has completed initialization. + + This file accepts a URL arguments to adjust how it loads sqlite3.js: + + - `sqlite3.dir`, if set, treats the given directory name as the + directory from which `sqlite3.js` will be loaded. +*/ +"use strict"; +(()=>{ + const urlParams = new URL(self.location.href).searchParams; + let theJs = 'sqlite3.js'; + if(urlParams.has('sqlite3.dir')){ + theJs = urlParams.get('sqlite3.dir') + '/' + theJs; + } + //console.warn("worker1 theJs =",theJs); + importScripts(theJs); + sqlite3InitModule().then((sqlite3)=>{ + if(sqlite3.capi.sqlite3_wasmfs_opfs_dir){ + sqlite3.capi.sqlite3_wasmfs_opfs_dir(); + } + sqlite3.initWorker1API(); + }); +})(); ADDED ext/wasm/batch-runner.html Index: ext/wasm/batch-runner.html ================================================================== --- /dev/null +++ ext/wasm/batch-runner.html @@ -0,0 +1,90 @@ + + + + + + + + + sqlite3-api batch SQL runner + + +
sqlite3-api batch SQL runner
+ +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+

+ This page is for batch-running extracts from the output + of speedtest1 --script, as well as other standalone SQL + scripts. +

+

ACHTUNG: this file requires a generated input list + file. Run "make batch" from this directory to generate it. +

+ + +
+
+
+ + + + + + + + + + + +
+ +
+ + + + +
+ + + + + + ADDED ext/wasm/batch-runner.js Index: ext/wasm/batch-runner.js ================================================================== --- /dev/null +++ ext/wasm/batch-runner.js @@ -0,0 +1,588 @@ +/* + 2022-08-29 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A basic batch SQL runner for sqlite3-api.js. This file must be run in + main JS thread and sqlite3.js must have been loaded before it. +*/ +'use strict'; +(function(){ + const toss = function(...args){throw new Error(args.join(' '))}; + const warn = console.warn.bind(console); + let sqlite3; + const urlParams = new URL(self.location.href).searchParams; + const cacheSize = (()=>{ + if(urlParams.has('cachesize')) return +urlParams.get('cachesize'); + return 200; + })(); + + /** Throws if the given sqlite3 result code is not 0. */ + const checkSqliteRc = (dbh,rc)=>{ + if(rc) toss("Prepare failed:",sqlite3.capi.sqlite3_errmsg(dbh)); + }; + + const sqlToDrop = [ + "SELECT type,name FROM sqlite_schema ", + "WHERE name NOT LIKE 'sqlite\\_%' escape '\\' ", + "AND name NOT LIKE '\\_%' escape '\\'" + ].join(''); + + const clearDbWebSQL = function(db){ + db.handle.transaction(function(tx){ + const onErr = (e)=>console.error(e); + const callback = function(tx, result){ + const rows = result.rows; + let i, n; + i = n = rows.length; + while(i--){ + const row = rows.item(i); + const name = JSON.stringify(row.name); + const type = row.type; + switch(type){ + case 'index': case 'table': + case 'trigger': case 'view': { + const sql2 = 'DROP '+type+' '+name; + tx.executeSql(sql2, [], ()=>{}, onErr); + break; + } + default: + warn("Unhandled db entry type:",type,'name =',name); + break; + } + } + }; + tx.executeSql(sqlToDrop, [], callback, onErr); + db.handle.changeVersion(db.handle.version, "", ()=>{}, onErr, ()=>{}); + }); + }; + + const clearDbSqlite = function(db){ + // This would be SO much easier with the oo1 API, but we specifically want to + // inject metrics we can't get via that API, and we cannot reliably (OPFS) + // open the same DB twice to clear it using that API, so... + const rc = sqlite3.wasm.exports.sqlite3_wasm_db_reset(db.handle); + App.logHtml("reset db rc =",rc,db.id, db.filename); + }; + + + const E = (s)=>document.querySelector(s); + const App = { + e: { + output: E('#test-output'), + selSql: E('#sql-select'), + btnRun: E('#sql-run'), + btnRunNext: E('#sql-run-next'), + btnRunRemaining: E('#sql-run-remaining'), + btnExportMetrics: E('#export-metrics'), + btnClear: E('#output-clear'), + btnReset: E('#db-reset'), + cbReverseLog: E('#cb-reverse-log-order'), + selImpl: E('#select-impl'), + fsToolbar: E('#toolbar') + }, + db: Object.create(null), + dbs: Object.create(null), + cache:{}, + log: console.log.bind(console), + warn: console.warn.bind(console), + cls: function(){this.e.output.innerHTML = ''}, + logHtml2: function(cssClass,...args){ + const ln = document.createElement('div'); + if(cssClass) ln.classList.add(cssClass); + ln.append(document.createTextNode(args.join(' '))); + this.e.output.append(ln); + //this.e.output.lastElementChild.scrollIntoViewIfNeeded(); + }, + logHtml: function(...args){ + console.log(...args); + if(1) this.logHtml2('', ...args); + }, + logErr: function(...args){ + console.error(...args); + if(1) this.logHtml2('error', ...args); + }, + + execSql: async function(name,sql){ + const db = this.getSelectedDb(); + const banner = "========================================"; + this.logHtml(banner, + "Running",name,'('+sql.length,'bytes) using',db.id); + const capi = this.sqlite3.capi, wasm = this.sqlite3.wasm; + let pStmt = 0, pSqlBegin; + const stack = wasm.scopedAllocPush(); + const metrics = db.metrics = Object.create(null); + metrics.prepTotal = metrics.stepTotal = 0; + metrics.stmtCount = 0; + metrics.malloc = 0; + metrics.strcpy = 0; + this.blockControls(true); + if(this.gotErr){ + this.logErr("Cannot run SQL: error cleanup is pending."); + return; + } + // Run this async so that the UI can be updated for the above header... + const endRun = ()=>{ + metrics.evalSqlEnd = performance.now(); + metrics.evalTimeTotal = (metrics.evalSqlEnd - metrics.evalSqlStart); + this.logHtml(db.id,"metrics:",JSON.stringify(metrics, undefined, ' ')); + this.logHtml("prepare() count:",metrics.stmtCount); + this.logHtml("Time in prepare_v2():",metrics.prepTotal,"ms", + "("+(metrics.prepTotal / metrics.stmtCount),"ms per prepare())"); + this.logHtml("Time in step():",metrics.stepTotal,"ms", + "("+(metrics.stepTotal / metrics.stmtCount),"ms per step())"); + this.logHtml("Total runtime:",metrics.evalTimeTotal,"ms"); + this.logHtml("Overhead (time - prep - step):", + (metrics.evalTimeTotal - metrics.prepTotal - metrics.stepTotal)+"ms"); + this.logHtml(banner,"End of",name); + }; + + let runner; + if('websql'===db.id){ + const who = this; + runner = function(resolve, reject){ + /* WebSQL cannot execute multiple statements, nor can it execute SQL without + an explicit transaction. Thus we have to do some fragile surgery on the + input SQL. Since we're only expecting carefully curated inputs, the hope is + that this will suffice. PS: it also can't run most SQL functions, e.g. even + instr() results in "not authorized". */ + if('string'!==typeof sql){ // assume TypedArray + sql = new TextDecoder().decode(sql); + } + sql = sql.replace(/-- [^\n]+\n/g,''); // comment lines interfere with our split() + const sqls = sql.split(/;+\n/); + const rxBegin = /^BEGIN/i, rxCommit = /^COMMIT/i; + try { + const nextSql = ()=>{ + let x = sqls.shift(); + while(sqls.length && !x) x = sqls.shift(); + return x && x.trim(); + }; + const who = this; + const transaction = function(tx){ + try { + let s; + /* Try to approximate the spirit of the input scripts + by running batches bound by BEGIN/COMMIT statements. */ + for(s = nextSql(); !!s; s = nextSql()){ + if(rxBegin.test(s)) continue; + else if(rxCommit.test(s)) break; + //console.log("websql sql again",sqls.length, s); + ++metrics.stmtCount; + const t = performance.now(); + tx.executeSql(s,[], ()=>{}, (t,e)=>{ + console.error("WebSQL error",e,"SQL =",s); + who.logErr(e.message); + //throw e; + return false; + }); + metrics.stepTotal += performance.now() - t; + } + }catch(e){ + who.logErr("transaction():",e.message); + throw e; + } + }; + const n = sqls.length; + const nextBatch = function(){ + if(sqls.length){ + console.log("websql sqls.length",sqls.length,'of',n); + db.handle.transaction(transaction, (e)=>{ + who.logErr("Ignoring and contiuing:",e.message) + //reject(e); + return false; + }, nextBatch); + }else{ + resolve(who); + } + }; + metrics.evalSqlStart = performance.now(); + nextBatch(); + }catch(e){ + //this.gotErr = e; + console.error("websql error:",e); + who.logErr(e.message); + //reject(e); + } + }.bind(this); + }else{/*sqlite3 db...*/ + runner = function(resolve, reject){ + metrics.evalSqlStart = performance.now(); + try { + let t; + let sqlByteLen = sql.byteLength; + const [ppStmt, pzTail] = wasm.scopedAllocPtr(2); + t = performance.now(); + pSqlBegin = wasm.scopedAlloc( sqlByteLen + 1/*SQL + NUL*/) || toss("alloc(",sqlByteLen,") failed"); + metrics.malloc = performance.now() - t; + metrics.byteLength = sqlByteLen; + let pSql = pSqlBegin; + const pSqlEnd = pSqlBegin + sqlByteLen; + t = performance.now(); + wasm.heap8().set(sql, pSql); + wasm.setMemValue(pSql + sqlByteLen, 0); + metrics.strcpy = performance.now() - t; + let breaker = 0; + while(pSql && wasm.getMemValue(pSql,'i8')){ + wasm.setPtrValue(ppStmt, 0); + wasm.setPtrValue(pzTail, 0); + t = performance.now(); + let rc = capi.sqlite3_prepare_v3( + db.handle, pSql, sqlByteLen, 0, ppStmt, pzTail + ); + metrics.prepTotal += performance.now() - t; + checkSqliteRc(db.handle, rc); + pStmt = wasm.getPtrValue(ppStmt); + pSql = wasm.getPtrValue(pzTail); + sqlByteLen = pSqlEnd - pSql; + if(!pStmt) continue/*empty statement*/; + ++metrics.stmtCount; + t = performance.now(); + rc = capi.sqlite3_step(pStmt); + capi.sqlite3_finalize(pStmt); + pStmt = 0; + metrics.stepTotal += performance.now() - t; + switch(rc){ + case capi.SQLITE_ROW: + case capi.SQLITE_DONE: break; + default: checkSqliteRc(db.handle, rc); toss("Not reached."); + } + } + resolve(this); + }catch(e){ + if(pStmt) capi.sqlite3_finalize(pStmt); + //this.gotErr = e; + reject(e); + }finally{ + capi.sqlite3_exec(db.handle,"rollback;",0,0,0); + wasm.scopedAllocPop(stack); + } + }.bind(this); + } + let p; + if(1){ + p = new Promise(function(res,rej){ + setTimeout(()=>runner(res, rej), 50)/*give UI a chance to output the "running" banner*/; + }); + }else{ + p = new Promise(runner); + } + return p.catch( + (e)=>this.logErr("Error via execSql("+name+",...):",e.message) + ).finally(()=>{ + endRun(); + this.blockControls(false); + }); + }, + + clearDb: function(){ + const db = this.getSelectedDb(); + if('websql'===db.id){ + this.logErr("TODO: clear websql db."); + return; + } + if(!db.handle) return; + const capi = this.sqlite3, wasm = this.sqlite3.wasm; + //const scope = wasm.scopedAllocPush( + this.logErr("TODO: clear db"); + }, + + /** + Loads batch-runner.list and populates the selection list from + it. Returns a promise which resolves to nothing in particular + when it completes. Only intended to be run once at the start + of the app. + */ + loadSqlList: async function(){ + const sel = this.e.selSql; + sel.innerHTML = ''; + this.blockControls(true); + const infile = 'batch-runner.list'; + this.logHtml("Loading list of SQL files:", infile); + let txt; + try{ + const r = await fetch(infile); + if(404 === r.status){ + toss("Missing file '"+infile+"'."); + } + if(!r.ok) toss("Loading",infile,"failed:",r.statusText); + txt = await r.text(); + const warning = E('#warn-list'); + if(warning) warning.remove(); + }catch(e){ + this.logErr(e.message); + throw e; + }finally{ + this.blockControls(false); + } + const list = txt.split(/\n+/); + let opt; + if(0){ + opt = document.createElement('option'); + opt.innerText = "Select file to evaluate..."; + opt.value = ''; + opt.disabled = true; + opt.selected = true; + sel.appendChild(opt); + } + list.forEach(function(fn){ + if(!fn) return; + opt = document.createElement('option'); + opt.value = fn; + opt.innerText = fn.split('/').pop(); + sel.appendChild(opt); + }); + this.logHtml("Loaded",infile); + }, + + /** Fetch ./fn and return its contents as a Uint8Array. */ + fetchFile: async function(fn, cacheIt=false){ + if(cacheIt && this.cache[fn]) return this.cache[fn]; + this.logHtml("Fetching",fn,"..."); + let sql; + try { + const r = await fetch(fn); + if(!r.ok) toss("Fetch failed:",r.statusText); + sql = new Uint8Array(await r.arrayBuffer()); + }catch(e){ + this.logErr(e.message); + throw e; + } + this.logHtml("Fetched",sql.length,"bytes from",fn); + if(cacheIt) this.cache[fn] = sql; + return sql; + }/*fetchFile()*/, + + /** Disable or enable certain UI controls. */ + blockControls: function(disable){ + //document.querySelectorAll('.disable-during-eval').forEach((e)=>e.disabled = disable); + this.e.fsToolbar.disabled = disable; + }, + + /** + Converts this.metrics() to a form which is suitable for easy conversion to + CSV. It returns an array of arrays. The first sub-array is the column names. + The 2nd and subsequent are the values, one per test file (only the most recent + metrics are kept for any given file). + */ + metricsToArrays: function(){ + const rc = []; + Object.keys(this.dbs).sort().forEach((k)=>{ + const d = this.dbs[k]; + const m = d.metrics; + delete m.evalSqlStart; + delete m.evalSqlEnd; + const mk = Object.keys(m).sort(); + if(!rc.length){ + rc.push(['db', ...mk]); + } + const row = [k.split('/').pop()/*remove dir prefix from filename*/]; + rc.push(row); + row.push(...mk.map((kk)=>m[kk])); + }); + return rc; + }, + + metricsToBlob: function(colSeparator='\t'){ + const ar = [], ma = this.metricsToArrays(); + if(!ma.length){ + this.logErr("Metrics are empty. Run something."); + return; + } + ma.forEach(function(row){ + ar.push(row.join(colSeparator),'\n'); + }); + return new Blob(ar); + }, + + downloadMetrics: function(){ + const b = this.metricsToBlob(); + if(!b) return; + const url = URL.createObjectURL(b); + const a = document.createElement('a'); + a.href = url; + a.download = 'batch-runner-js-'+((new Date().getTime()/1000) | 0)+'.csv'; + this.logHtml("Triggering download of",a.download); + document.body.appendChild(a); + a.click(); + setTimeout(()=>{ + document.body.removeChild(a); + URL.revokeObjectURL(url); + }, 500); + }, + + /** + Fetch file fn and eval it as an SQL blob. This is an async + operation and returns a Promise which resolves to this + object on success. + */ + evalFile: async function(fn){ + const sql = await this.fetchFile(fn); + return this.execSql(fn,sql); + }/*evalFile()*/, + + /** + Clears all DB tables in all _opened_ databases. Because of + disparities between backends, we cannot simply "unlink" the + databases to clean them up. + */ + clearStorage: function(onlySelectedDb=false){ + const list = onlySelectedDb + ? [('boolean'===typeof onlySelectedDb) + ? this.dbs[this.e.selImpl.value] + : onlySelectedDb] + : Object.values(this.dbs); + for(let db of list){ + if(db && db.handle){ + this.logHtml("Clearing db",db.id); + db.clear(); + } + } + }, + + /** + Fetches the handle of the db associated with + this.e.selImpl.value, opening it if needed. + */ + getSelectedDb: function(){ + if(!this.dbs.memdb){ + for(let opt of this.e.selImpl.options){ + const d = this.dbs[opt.value] = Object.create(null); + d.id = opt.value; + switch(d.id){ + case 'virtualfs': + d.filename = 'file:/virtualfs.sqlite3?vfs=unix-none'; + break; + case 'memdb': + d.filename = ':memory:'; + break; + case 'wasmfs-opfs': + d.filename = 'file:'+( + this.sqlite3.capi.sqlite3_wasmfs_opfs_dir() + )+'/wasmfs-opfs.sqlite3b'; + break; + case 'websql': + d.filename = 'websql.db'; + break; + default: + this.logErr("Unhandled db selection option (see details in the console).",opt); + toss("Unhandled db init option"); + } + } + }/*first-time init*/ + const dbId = this.e.selImpl.value; + const d = this.dbs[dbId]; + if(d.handle) return d; + if('websql' === dbId){ + d.handle = self.openDatabase('batch-runner', '0.1', 'foo', 1024 * 1024 * 50); + d.clear = ()=>clearDbWebSQL(d); + d.handle.transaction(function(tx){ + tx.executeSql("PRAGMA cache_size="+cacheSize); + App.logHtml(dbId,"cache_size =",cacheSize); + }); + }else{ + const capi = this.sqlite3.capi, wasm = this.sqlite3.wasm; + const stack = wasm.scopedAllocPush(); + let pDb = 0; + try{ + const oFlags = capi.SQLITE_OPEN_CREATE | capi.SQLITE_OPEN_READWRITE; + const ppDb = wasm.scopedAllocPtr(); + const rc = capi.sqlite3_open_v2(d.filename, ppDb, oFlags, null); + pDb = wasm.getPtrValue(ppDb) + if(rc) toss("sqlite3_open_v2() failed with code",rc); + capi.sqlite3_exec(pDb, "PRAGMA cache_size="+cacheSize, 0, 0, 0); + this.logHtml(dbId,"cache_size =",cacheSize); + }catch(e){ + if(pDb) capi.sqlite3_close_v2(pDb); + }finally{ + wasm.scopedAllocPop(stack); + } + d.handle = pDb; + d.clear = ()=>clearDbSqlite(d); + } + d.clear(); + this.logHtml("Opened db:",dbId,d.filename); + console.log("db =",d); + return d; + }, + + run: function(sqlite3){ + delete this.run; + this.sqlite3 = sqlite3; + const capi = sqlite3.capi, wasm = sqlite3.wasm; + this.logHtml("Loaded module:",capi.sqlite3_libversion(), capi.sqlite3_sourceid()); + this.logHtml("WASM heap size =",wasm.heap8().length); + this.loadSqlList(); + if(capi.sqlite3_wasmfs_opfs_dir()){ + E('#warn-opfs').classList.remove('hidden'); + }else{ + E('#warn-opfs').remove(); + E('option[value=wasmfs-opfs]').disabled = true; + } + if('function' === typeof self.openDatabase){ + E('#warn-websql').classList.remove('hidden'); + }else{ + E('option[value=websql]').disabled = true; + E('#warn-websql').remove(); + } + const who = this; + if(this.e.cbReverseLog.checked){ + this.e.output.classList.add('reverse'); + } + this.e.cbReverseLog.addEventListener('change', function(){ + who.e.output.classList[this.checked ? 'add' : 'remove']('reverse'); + }, false); + this.e.btnClear.addEventListener('click', ()=>this.cls(), false); + this.e.btnRun.addEventListener('click', function(){ + if(!who.e.selSql.value) return; + who.evalFile(who.e.selSql.value); + }, false); + this.e.btnRunNext.addEventListener('click', function(){ + ++who.e.selSql.selectedIndex; + if(!who.e.selSql.value) return; + who.evalFile(who.e.selSql.value); + }, false); + this.e.btnReset.addEventListener('click', function(){ + who.clearStorage(true); + }, false); + this.e.btnExportMetrics.addEventListener('click', function(){ + who.logHtml2('warning',"Triggering download of metrics CSV. Check your downloads folder."); + who.downloadMetrics(); + //const m = who.metricsToArrays(); + //console.log("Metrics:",who.metrics, m); + }); + this.e.selImpl.addEventListener('change', function(){ + who.getSelectedDb(); + }); + this.e.btnRunRemaining.addEventListener('click', async function(){ + let v = who.e.selSql.value; + const timeStart = performance.now(); + while(v){ + await who.evalFile(v); + if(who.gotError){ + who.logErr("Error handling script",v,":",who.gotError.message); + break; + } + ++who.e.selSql.selectedIndex; + v = who.e.selSql.value; + } + const timeTotal = performance.now() - timeStart; + who.logHtml("Run-remaining time:",timeTotal,"ms ("+(timeTotal/1000/60)+" minute(s))"); + who.clearStorage(); + }, false); + }/*run()*/ + }/*App*/; + + self.sqlite3TestModule.initSqlite3().then(function(sqlite3_){ + sqlite3 = sqlite3_; + self.App = App /* only to facilitate dev console access */; + App.run(sqlite3); + }); +})(); Index: ext/wasm/common/SqliteTestUtil.js ================================================================== --- ext/wasm/common/SqliteTestUtil.js +++ ext/wasm/common/SqliteTestUtil.js @@ -111,36 +111,77 @@ returns falsy. */ throwUnless: function(expr, msg){ ++this.counter; if(!this.toBool(expr)) throw new Error(msg || "throwUnless() failed"); return this; + }, + + /** + Parses window.location.search-style string into an object + containing key/value pairs of URL arguments (already + urldecoded). The object is created using Object.create(null), + so contains only parsed-out properties and has no prototype + (and thus no inherited properties). + + If the str argument is not passed (arguments.length==0) then + window.location.search.substring(1) is used by default. If + neither str is passed in nor window exists then false is returned. + + On success it returns an Object containing the key/value pairs + parsed from the string. Keys which have no value are treated + has having the boolean true value. + + Pedantic licensing note: this code has appeared in other source + trees, but was originally written by the same person who pasted + it into those trees. + */ + processUrlArgs: function(str) { + if( 0 === arguments.length ) { + if( ('undefined' === typeof window) || + !window.location || + !window.location.search ) return false; + else str = (''+window.location.search).substring(1); + } + if( ! str ) return false; + str = (''+str).split(/#/,2)[0]; // remove #... to avoid it being added as part of the last value. + const args = Object.create(null); + const sp = str.split(/&+/); + const rx = /^([^=]+)(=(.+))?/; + var i, m; + for( i in sp ) { + m = rx.exec( sp[i] ); + if( ! m ) continue; + args[decodeURIComponent(m[1])] = (m[3] ? decodeURIComponent(m[3]) : true); + } + return args; } }; /** This is a module object for use with the emscripten-installed sqlite3InitModule() factory function. */ self.sqlite3TestModule = { + /** + Array of functions to call after Emscripten has initialized the + wasm module. Each gets passed the Emscripten module object + (which is _this_ object). + */ postRun: [ /* function(theModule){...} */ ], //onRuntimeInitialized: function(){}, /* Proxy for C-side stdout output. */ - print: function(){ - console.log.apply(console, Array.prototype.slice.call(arguments)); - }, + print: (...args)=>{console.log(...args)}, /* Proxy for C-side stderr output. */ - printErr: function(){ - console.error.apply(console, Array.prototype.slice.call(arguments)); - }, + printErr: (...args)=>{console.error(...args)}, /** - Called by the module init bits to report loading - progress. It gets passed an empty argument when loading is - done (after onRuntimeInitialized() and any this.postRun - callbacks have been run). + Called by the Emscripten module init bits to report loading + progress. It gets passed an empty argument when loading is done + (after onRuntimeInitialized() and any this.postRun callbacks + have been run). */ setStatus: function f(text){ if(!f.last){ f.last = { text: '', step: 0 }; f.ui = { @@ -166,8 +207,30 @@ delete f.ui.progress; delete f.ui.spinner; } f.ui.status.classList.add('hidden'); } + }, + /** + Config options used by the Emscripten-dependent initialization + which happens via this.initSqlite3(). This object gets + (indirectly) passed to sqlite3ApiBootstrap() to configure the + sqlite3 API. + */ + sqlite3ApiConfig: { + wasmfsOpfsDir: "/opfs" + }, + /** + Intended to be called by apps which need to call the + Emscripten-installed sqlite3InitModule() routine. This function + temporarily installs this.sqlite3ApiConfig into the self + object, calls it sqlite3InitModule(), and removes + self.sqlite3ApiConfig after initialization is done. Returns the + promise from sqlite3InitModule(), and the next then() handler + will get the sqlite3 API object as its argument. + */ + initSqlite3: function(){ + self.sqlite3ApiConfig = this.sqlite3ApiConfig; + return self.sqlite3InitModule(this).finally(()=>delete self.sqlite3ApiConfig); } }; })(self/*window or worker*/); Index: ext/wasm/common/testing.css ================================================================== --- ext/wasm/common/testing.css +++ ext/wasm/common/testing.css @@ -1,5 +1,10 @@ +body { + display: flex; + flex-direction: column; + flex-wrap: wrap; +} textarea { font-family: monospace; } header { font-size: 130%; @@ -27,6 +32,32 @@ .center { text-align: center; } .error { color: red; background-color: yellow; } -#test-output { font-family: monospace } +.strong { font-weight: 700 } +.warning { color: firebrick; } +.green { color: darkgreen; } +.tests-pass { background-color: green; color: white } +.tests-fail { background-color: red; color: yellow } +.faded { opacity: 0.5; } +.group-start { color: blue; } +.group-end { color: blue; } +.input-wrapper { + white-space: nowrap; + display: flex; + align-items: center; +} +#test-output { + border: 1px inset; + border-radius: 0.25em; + padding: 0.25em; + /*max-height: 30em;*/ + overflow: auto; + white-space: break-spaces; + display: flex; flex-direction: column; + font-family: monospace; +} +#test-output.reverse { + flex-direction: column-reverse; +} +label[for] { cursor: pointer } Index: ext/wasm/common/whwasmutil.js ================================================================== --- ext/wasm/common/whwasmutil.js +++ ext/wasm/common/whwasmutil.js @@ -13,15 +13,21 @@ The whwasmutil is developed in conjunction with the Jaccwabyt project: https://fossil.wanderinghorse.net/r/jaccwabyt + and sqlite3: + + https://sqlite.org + + This file is kept in sync between both of those trees. + Maintenance reminder: If you're reading this in a tree other than - the Jaccwabyt tree, note that this copy may be replaced with + one of those listed above, note that this copy may be replaced with upstream copies of that one from time to time. Thus the code - installed by this function "should not" be edited outside of that - project, else it risks getting overwritten. + installed by this function "should not" be edited outside of those + projects, else it risks getting overwritten. */ /** This function is intended to simplify porting around various bits of WASM-related utility code from project to project. @@ -61,11 +67,11 @@ if allocation is available. - WASM-exported "indirect function table" access and manipulation. e.g. creating new WASM-side functions using JS functions, analog to Emscripten's addFunction() and - removeFunction() but slightly different. + uninstallFunction() but slightly different. - Get/set specific heap memory values, analog to Emscripten's getValue() and setValue(). - String length counting in UTF-8 bytes (C-style and JS strings). @@ -107,12 +113,12 @@ this code. In an Enscripten environment it must be set to `Module['asm']`. The exports object must contain a minimum of the following symbols: - `memory`: a WebAssembly.Memory object representing the WASM - memory. _Alternately_, the `memory` property can be set on the - target instance, in particular if the WASM heap memory is + memory. _Alternately_, the `memory` property can be set as + `target.memory`, in particular if the WASM heap memory is initialized in JS an _imported_ into WASM, as opposed to being initialized in WASM and exported to JS. - `__indirect_function_table`: the WebAssembly.Table object which holds WASM-exported functions. This API does not strictly @@ -130,11 +136,15 @@ the target object. It "should" be set to true if the WASM environment is compiled with BigInt support, else it must be false. If it is false, certain BigInt-related features will trigger an exception if invoked. This property, if not set when this is called, will get a default value of true only if the BigInt64Array - constructor is available, else it will default to false. + constructor is available, else it will default to false. Note that + having the BigInt type is not sufficient for full int64 integration + with WASM: the target WASM file must also have been built with + that support. In Emscripten that's done using the `-sWASM_BIGINT` + flag. Some optional APIs require that the target have the following methods: - 'alloc()` must behave like C's `malloc()`, allocating N bytes of @@ -210,13 +220,14 @@ /** Pointers in WASM are currently assumed to be 32-bit, but someday that will certainly change. */ const ptrIR = target.pointerIR || 'i32'; - const ptrSizeof = ('i32'===ptrIR ? 4 - : ('i64'===ptrIR - ? 8 : toss("Unhandled ptrSizeof:",ptrIR))); + const ptrSizeof = target.ptrSizeof = + ('i32'===ptrIR ? 4 + : ('i64'===ptrIR + ? 8 : toss("Unhandled ptrSizeof:",ptrIR))); /** Stores various cached state. */ const cache = Object.create(null); /** Previously-recorded size of cache.memory.buffer, noted so that we can recreate the view objects if the heap grows. */ cache.heapSize = 0; @@ -292,11 +303,11 @@ Returns an integer-based TypedArray view of the WASM heap memory buffer associated with the given block size. If passed an integer as the first argument and unsigned is truthy then the "U" (unsigned) variant of that view is returned, else the signed variant is returned. If passed a TypedArray value, the - 2nd argument is ignores. Note that Float32Array and + 2nd argument is ignored. Note that Float32Array and Float64Array views are not supported by this function. Note that growth of the heap will invalidate any references to this heap, so do not hold a reference longer than needed and do not use a reference after any operation which may @@ -324,19 +335,19 @@ case 32: return unsigned ? c.HEAP32U : c.HEAP32; case 64: if(c.HEAP64) return unsigned ? c.HEAP64U : c.HEAP64; break; default: - if(this.bigIntEnabled){ + if(target.bigIntEnabled){ if(n===self['BigUint64Array']) return c.HEAP64U; else if(n===self['BigInt64Array']) return c.HEAP64; break; } } toss("Invalid heapForSize() size: expecting 8, 16, 32,", "or (if BigInt is enabled) 64."); - }.bind(target); + }; /** Returns the WASM-exported "indirect function table." */ target.functionTable = function(){ @@ -344,35 +355,37 @@ /** -----------------^^^^^ "seems" to be a standardized export name. From Emscripten release notes from 2020-09-10: - Use `__indirect_function_table` as the import name for the table, which is what LLVM does. */ - }.bind(target); + }; /** Given a function pointer, returns the WASM function table entry if found, else returns a falsy value. */ target.functionEntry = function(fptr){ - const ft = this.functionTable(); + const ft = target.functionTable(); return fptr < ft.length ? ft.get(fptr) : undefined; - }.bind(target); + }; /** Creates a WASM function which wraps the given JS function and returns the JS binding of that WASM function. The signature - argument must be the Jaccwabyt-format or Emscripten + string must be the Jaccwabyt-format or Emscripten addFunction()-format function signature string. In short: in may have one of the following formats: - - Emscripten: `x...`, where the first x is a letter representing + - Emscripten: `"x..."`, where the first x is a letter representing the result type and subsequent letters represent the argument - types. See below. + types. Functions with no arguments have only a single + letter. See below. - - Jaccwabyt: `x(...)` where `x` is the letter representing the + - Jaccwabyt: `"x(...)"` where `x` is the letter representing the result type and letters in the parens (if any) represent the - argument types. See below. + argument types. Functions with no arguments use `x()`. See + below. Supported letters: - `i` = int32 - `p` = int32 ("pointer") @@ -389,21 +402,29 @@ - `s`, `P`: same as `p` Sidebar: this code is developed together with Jaccwabyt, thus the support for its signature format. + + The arguments may be supplied in either order: (func,sig) or + (sig,func). */ target.jsFuncToWasm = function f(func, sig){ /** Attribution: adapted up from Emscripten-generated glue code, refactored primarily for efficiency's sake, eliminating call-local functions and superfluous temporary arrays. */ if(!f._){/*static init...*/ f._ = { // Map of signature letters to type IR values - sigTypes: Object.create(null), + sigTypes: Object.assign(Object.create(null),{ + i: 'i32', p: 'i32', P: 'i32', s: 'i32', + j: 'i64', f: 'f32', d: 'f64' + }), // Map of type IR values to WASM type code values - typeCodes: Object.create(null), + typeCodes: Object.assign(Object.create(null),{ + f64: 0x7c, f32: 0x7d, i64: 0x7e, i32: 0x7f + }), /** Encodes n, which must be <2^14 (16384), into target array tgt, as a little-endian value, using the given method ('push' or 'unshift'). */ uleb128Encode: function(tgt, method, n){ if(n<128) tgt[method](n); @@ -428,26 +449,27 @@ signature is invalid. */ /******** // only valid for use with the WebAssembly.Function ctor, which // is not yet documented on MDN. sigToWasm: function(sig){ const rc = {parameters:[], results: []}; - if('v'!==sig[0]) rc.results.push(f._.letterType(sig[0])); + if('v'!==sig[0]) rc.results.push(f.sigTypes(sig[0])); for(const x of f._.sigParams(sig)){ - rc.parameters.push(f._.letterType(x)); + rc.parameters.push(f._.typeCodes(x)); } return rc; },************/ /** Pushes the WASM data type code for the given signature letter to the given target array. Throws if letter is invalid. */ pushSigType: (dest, letter)=>dest.push(f._.typeCodes[f._.letterType(letter)]) }; - f._.sigTypes.i = f._.sigTypes.p = f._.sigTypes.P = f._.sigTypes.s = 'i32'; - f._.sigTypes.j = 'i64'; f._.sigTypes.f = 'f32'; f._.sigTypes.d = 'f64'; - f._.typeCodes['i32'] = 0x7f; f._.typeCodes['i64'] = 0x7e; - f._.typeCodes['f32'] = 0x7d; f._.typeCodes['f64'] = 0x7c; }/*static init*/ + if('string'===typeof func){ + const x = sig; + sig = func; + func = x; + } const sigParams = f._.sigParams(sig); const wasmCode = [0x01/*count: 1*/, 0x60/*function*/]; f._.uleb128Encode(wasmCode, 'push', sigParams.length); for(const x of sigParams) f._.pushSigType(wasmCode, x); if('v'===sig[0]) wasmCode.push(0); @@ -480,12 +502,15 @@ this.jsFuncToWasm(). It uses that function to create a WASM-exported function, installs that function to the next available slot of this.functionTable(), and returns the function's index in that table (which acts as a pointer to that function). The returned pointer can be passed to - removeFunction() to uninstall it and free up the table slot for + uninstallFunction() to uninstall it and free up the table slot for reuse. + + If passed (string,function) arguments then it treats the first + argument as the signature and second as the function. As a special case, if the passed-in function is a WASM-exported function then the signature argument is ignored and func is installed as-is, without requiring re-compilation/re-wrapping. @@ -496,17 +521,25 @@ `-sALLOW_TABLE_GROWTH` flag. Sidebar: this function differs from Emscripten's addFunction() _primarily_ in that it does not share that function's undocumented behavior of reusing a function if it's passed to - addFunction() more than once, which leads to removeFunction() + addFunction() more than once, which leads to uninstallFunction() breaking clients which do not take care to avoid that case: https://github.com/emscripten-core/emscripten/issues/17323 */ target.installFunction = function f(func, sig){ - const ft = this.functionTable(); + if(2!==arguments.length){ + toss("installFunction() requires exactly 2 arguments"); + } + if('string'===typeof func){ + const x = sig; + sig = func; + func = x; + } + const ft = target.functionTable(); const oldLen = ft.length; let ptr; while(cache.freeFuncIndexes.length){ ptr = cache.freeFuncIndexes.pop(); if(ft.get(ptr)){ /* Table was modified via a different API */ @@ -530,17 +563,17 @@ throw e; } } // It's not a WASM-exported function, so compile one... try { - ft.set(ptr, this.jsFuncToWasm(func, sig)); + ft.set(ptr, target.jsFuncToWasm(func, sig)); }catch(e){ if(ptr===oldLen) cache.freeFuncIndexes.push(oldLen); throw e; } return ptr; - }.bind(target); + }; /** Requires a pointer value previously returned from this.installFunction(). Removes that function from the WASM function table, marks its table slot as free for re-use, and @@ -549,16 +582,16 @@ ptr was not returned by that function. The returned function may be passed back to installFunction() to reinstall it. */ target.uninstallFunction = function(ptr){ const fi = cache.freeFuncIndexes; - const ft = this.functionTable(); + const ft = target.functionTable(); fi.push(ptr); const rc = ft.get(ptr); ft.set(ptr, null); return rc; - }.bind(target); + }; /** Given a WASM heap memory address and a data type name in the form (i8, i16, i32, i64, float (or f32), double (or f64)), this fetches the numeric value from that address and returns it as a @@ -600,10 +633,14 @@ As a rule setMemValue() must be called to set (typically zero out) the pointer's value, else it will contain an essentially random value. + ACHTUNG: calling this often, e.g. in a loop, can have a noticably + painful impact on performance. Rather than doing so, use + heapForSize() to fetch the heap object and read directly from it. + See: setMemValue() */ target.getMemValue = function(ptr, type='i8'){ if(type.endsWith('*')) type = ptrIR; const c = (cache.memory && cache.heapSize === cache.memory.buffer.byteLength) @@ -612,18 +649,18 @@ case 'i1': case 'i8': return c.HEAP8[ptr>>0]; case 'i16': return c.HEAP16[ptr>>1]; case 'i32': return c.HEAP32[ptr>>2]; case 'i64': - if(this.bigIntEnabled) return BigInt(c.HEAP64[ptr>>3]); + if(target.bigIntEnabled) return BigInt(c.HEAP64[ptr>>3]); break; case 'float': case 'f32': return c.HEAP32F[ptr>>2]; case 'double': case 'f64': return Number(c.HEAP64F[ptr>>3]); default: break; } toss('Invalid type for getMemValue():',type); - }.bind(target); + }; /** The counterpart of getMemValue(), this sets a numeric value at the given WASM heap address, using the type to define how many bytes are written. Throws if given an invalid type. See @@ -630,10 +667,14 @@ getMemValue() for details about the type argument. If the 3rd argument ends with `*` then it is treated as a pointer type and this function behaves as if the 3rd argument were `i32`. This function returns itself. + + ACHTUNG: calling this often, e.g. in a loop, can have a noticably + painful impact on performance. Rather than doing so, use + heapForSize() to fetch the heap object and assign directly to it. */ target.setMemValue = function f(ptr, value, type='i8'){ if (type.endsWith('*')) type = ptrIR; const c = (cache.memory && cache.heapSize === cache.memory.buffer.byteLength) ? cache : heapWrappers(); @@ -652,10 +693,36 @@ case 'double': case 'f64': c.HEAP64F[ptr>>3] = value; return f; } toss('Invalid type for setMemValue(): ' + type); }; + + /** Convenience form of getMemValue() intended for fetching + pointer-to-pointer values. */ + target.getPtrValue = (ptr)=>target.getMemValue(ptr, ptrIR); + + /** Convenience form of setMemValue() intended for setting + pointer-to-pointer values. */ + target.setPtrValue = (ptr, value)=>target.setMemValue(ptr, value, ptrIR); + + /** + Returns true if the given value appears to be legal for use as + a WASM pointer value. Its _range_ of values is not (cannot be) + validated except to ensure that it is a 32-bit integer with a + value of 0 or greater. Likewise, it cannot verify whether the + value actually refers to allocated memory in the WASM heap. + */ + target.isPtr32 = (ptr)=>('number'===typeof ptr && (ptr===(ptr|0)) && ptr>=0); + + /** + isPtr() is an alias for isPtr32(). If/when 64-bit WASM pointer + support becomes widespread, it will become an alias for either + isPtr32() or the as-yet-hypothetical isPtr64(), depending on a + configuration option. + */ + target.isPtr = target.isPtr32; + /** Expects ptr to be a pointer into the WASM heap memory which refers to a NUL-terminated C-style string encoded as UTF-8. Returns the length, in bytes, of the string, as for `strlen(3)`. As a special case, if !ptr then it it returns `null`. Throws if @@ -666,26 +733,34 @@ const h = heapWrappers().HEAP8U; let pos = ptr; for( ; h[pos] !== 0; ++pos ){} return pos - ptr; }; + + /** Internal helper to use in operations which need to distinguish + between SharedArrayBuffer heap memory and non-shared heap. */ + const __SAB = ('undefined'===typeof SharedArrayBuffer) + ? function(){} : SharedArrayBuffer; + const __utf8Decode = function(arrayBuffer, begin, end){ + return cache.utf8Decoder.decode( + (arrayBuffer.buffer instanceof __SAB) + ? arrayBuffer.slice(begin, end) + : arrayBuffer.subarray(begin, end) + ); + }; /** Expects ptr to be a pointer into the WASM heap memory which refers to a NUL-terminated C-style string encoded as UTF-8. This function counts its byte length using cstrlen() then returns a JS-format string representing its contents. As a special case, if ptr is falsy, `null` is returned. */ target.cstringToJs = function(ptr){ - const n = this.cstrlen(ptr); - if(null===n) return n; - return n - ? cache.utf8Decoder.decode( - new Uint8Array(heapWrappers().HEAP8U.buffer, ptr, n) - ) : ""; - }.bind(target); + const n = target.cstrlen(ptr); + return n ? __utf8Decode(heapWrappers().HEAP8U, ptr, ptr+n) : (null===n ? n : ""); + }; /** Given a JS string, this function returns its UTF-8 length in bytes. Returns null if str is not a string. */ @@ -809,20 +884,20 @@ this way, and converting such strings back to JS strings will have undefined results. */ target.cstrncpy = function(tgtPtr, srcPtr, n){ if(!tgtPtr || !srcPtr) toss("cstrncpy() does not accept NULL strings."); - if(n<0) n = this.cstrlen(strPtr)+1; + if(n<0) n = target.cstrlen(strPtr)+1; else if(!(n>0)) return 0; - const heap = this.heap8u(); + const heap = target.heap8u(); let i = 0, ch; for(; i < n && (ch = heap[srcPtr+i]); ++i){ heap[tgtPtr+i] = ch; } if(icache.scopedAlloc.length, set: ()=>toss("The 'active' property is read-only.") @@ -1002,62 +1077,119 @@ */ target.scopedAllocCString = (jstr, returnWithLength=false)=>__allocCStr(jstr, returnWithLength, target.scopedAlloc, 'scopedAllocCString()'); + // impl for allocMainArgv() and scopedAllocMainArgv(). + const __allocMainArgv = function(isScoped, list){ + if(!list.length) toss("Cannot allocate empty array."); + const pList = target[ + isScoped ? 'scopedAlloc' : 'alloc' + ](list.length * target.ptrSizeof); + let i = 0; + list.forEach((e)=>{ + target.setPtrValue(pList + (target.ptrSizeof * i++), + target[ + isScoped ? 'scopedAllocCString' : 'allocCString' + ](""+e)); + }); + return pList; + }; + + /** + Creates an array, using scopedAlloc(), suitable for passing to a + C-level main() routine. The input is a collection with a length + property and a forEach() method. A block of memory list.length + entries long is allocated and each pointer-sized block of that + memory is populated with a scopedAllocCString() conversion of the + (""+value) of each element. Returns a pointer to the start of the + list, suitable for passing as the 2nd argument to a C-style + main() function. + + Throws if list.length is falsy or scopedAllocPush() is not active. + */ + target.scopedAllocMainArgv = (list)=>__allocMainArgv(true, list); + + /** + Identical to scopedAllocMainArgv() but uses alloc() instead of + scopedAllocMainArgv + */ + target.allocMainArgv = (list)=>__allocMainArgv(false, list); + /** Wraps function call func() in a scopedAllocPush() and scopedAllocPop() block, such that all calls to scopedAlloc() and friends from within that call will have their memory freed automatically when func() returns. If func throws or propagates an exception, the scope is still popped, otherwise it returns the result of calling func(). */ target.scopedAllocCall = function(func){ - this.scopedAllocPush(); - try{ return func() } finally{ this.scopedAllocPop() } - }.bind(target); + target.scopedAllocPush(); + try{ return func() } finally{ target.scopedAllocPop() } + }; /** Internal impl for allocPtr() and scopedAllocPtr(). */ - const __allocPtr = function(howMany, method){ - __affirmAlloc(this, method); - let m = this[method](howMany * ptrSizeof); - this.setMemValue(m, 0, ptrIR) + const __allocPtr = function(howMany, safePtrSize, method){ + __affirmAlloc(target, method); + const pIr = safePtrSize ? 'i64' : ptrIR; + let m = target[method](howMany * (safePtrSize ? 8 : ptrSizeof)); + target.setMemValue(m, 0, pIr) if(1===howMany){ return m; } const a = [m]; for(let i = 1; i < howMany; ++i){ - m += ptrSizeof; + m += (safePtrSize ? 8 : ptrSizeof); a[i] = m; - this.setMemValue(m, 0, ptrIR); + target.setMemValue(m, 0, pIr); } return a; - }.bind(target); + }; /** - Allocates a single chunk of memory capable of holding `howMany` - pointers and zeroes them out. If `howMany` is 1 then the memory - chunk is returned directly, else an array of pointer addresses is - returned, which can optionally be used with "destructuring - assignment" like this: + Allocates one or more pointers as a single chunk of memory and + zeroes them out. + + The first argument is the number of pointers to allocate. The + second specifies whether they should use a "safe" pointer size (8 + bytes) or whether they may use the default pointer size + (typically 4 but also possibly 8). + + How the result is returned depends on its first argument: if + passed 1, it returns the allocated memory address. If passed more + than one then an array of pointer addresses is returned, which + can optionally be used with "destructuring assignment" like this: ``` const [p1, p2, p3] = allocPtr(3); ``` ACHTUNG: when freeing the memory, pass only the _first_ result value to dealloc(). The others are part of the same memory chunk and must not be freed separately. + + The reason for the 2nd argument is.. + + When one of the returned pointers will refer to a 64-bit value, + e.g. a double or int64, an that value must be written or fetched, + e.g. using setMemValue() or getMemValue(), it is important that + the pointer in question be aligned to an 8-byte boundary or else + it will not be fetched or written properly and will corrupt or + read neighboring memory. It is only safe to pass false when the + client code is certain that it will only get/fetch 4-byte values + (or smaller). */ - target.allocPtr = (howMany=1)=>__allocPtr(howMany, 'alloc'); + target.allocPtr = + (howMany=1, safePtrSize=true)=>__allocPtr(howMany, safePtrSize, 'alloc'); /** Identical to allocPtr() except that it allocates using scopedAlloc() instead of alloc(). */ - target.scopedAllocPtr = (howMany=1)=>__allocPtr(howMany, 'scopedAlloc'); + target.scopedAllocPtr = + (howMany=1, safePtrSize=true)=>__allocPtr(howMany, safePtrSize, 'scopedAlloc'); /** If target.exports[name] exists, it is returned, else an exception is thrown. */ @@ -1068,31 +1200,31 @@ const __argcMismatch = (f,n)=>toss(f+"() requires",n,"argument(s)."); /** Looks up a WASM-exported function named fname from - target.exports. If found, it is called, passed all remaining + target.exports. If found, it is called, passed all remaining arguments, and its return value is returned to xCall's caller. If not found, an exception is thrown. This function does no - conversion of argument or return types, but see xWrap() - and xCallWrapped() for variants which do. + conversion of argument or return types, but see xWrap() and + xCallWrapped() for variants which do. As a special case, if passed only 1 argument after the name and that argument in an Array, that array's entries become the function arguments. (This is not an ambiguous case because it's not legal to pass an Array object to a WASM function.) */ target.xCall = function(fname, ...args){ - const f = this.xGet(fname); + const f = target.xGet(fname); if(!(f instanceof Function)) toss("Exported symbol",fname,"is not a function."); if(f.length!==args.length) __argcMismatch(fname,f.length) /* This is arguably over-pedantic but we want to help clients keep from shooting themselves in the foot when calling C APIs. */; return (2===arguments.length && Array.isArray(arguments[1])) ? f.apply(null, arguments[1]) : f.apply(null, args); - }.bind(target); + }; /** State for use with xWrap() */ cache.xWrap = Object.create(null); @@ -1100,25 +1232,32 @@ /** Map of type names to argument conversion functions. */ cache.xWrap.convert.arg = Object.create(null); /** Map of type names to return result conversion functions. */ cache.xWrap.convert.result = Object.create(null); - xcv.arg.i64 = (i)=>BigInt(i); + if(target.bigIntEnabled){ + xcv.arg.i64 = (i)=>BigInt(i); + } xcv.arg.i32 = (i)=>(i | 0); xcv.arg.i16 = (i)=>((i | 0) & 0xFFFF); xcv.arg.i8 = (i)=>((i | 0) & 0xFF); xcv.arg.f32 = xcv.arg.float = (i)=>Number(i).valueOf(); xcv.arg.f64 = xcv.arg.double = xcv.arg.f32; xcv.arg.int = xcv.arg.i32; - xcv.result['*'] = xcv.result['pointer'] = xcv.arg[ptrIR]; - - for(const t of ['i8', 'i16', 'i32', 'int', 'i64', - 'f32', 'float', 'f64', 'double']){ - xcv.arg[t+'*'] = xcv.result[t+'*'] = xcv.arg[ptrIR] - xcv.result[t] = xcv.arg[t] || toss("Missing arg converter:",t); - } - xcv.arg['**'] = xcv.arg[ptrIR]; + xcv.result['*'] = xcv.result['pointer'] = xcv.arg['**'] = xcv.arg[ptrIR]; + xcv.result['number'] = (v)=>Number(v); + + { /* Copy certain xcv.arg[...] handlers to xcv.result[...] and + add pointer-style variants of them. */ + const copyToResult = ['i8', 'i16', 'i32', 'int', + 'f32', 'float', 'f64', 'double']; + if(target.bigIntEnabled) copyToResult.push('i64'); + for(const t of copyToResult){ + xcv.arg[t+'*'] = xcv.result[t+'*'] = xcv.arg[ptrIR]; + xcv.result[t] = xcv.arg[t] || toss("Missing arg converter:",t); + } + } /** In order for args of type string to work in various contexts in the sqlite3 API, we need to pass them on as, variably, a C-string or a pointer value. Thus for ARGs of type 'string' and @@ -1132,21 +1271,22 @@ TODO? Permit an Int8Array/Uint8Array and convert it to a string? Would that be too much magic concentrated in one place, ready to backfire? */ - xcv.arg.string = xcv.arg['pointer'] = xcv.arg['*'] = function(v){ - if('string'===typeof v) return target.scopedAllocCString(v); - return v ? xcv.arg[ptrIR](v) : null; - }; - xcv.result.string = (i)=>target.cstringToJs(i); - xcv.result['string:free'] = function(i){ + xcv.arg.string = xcv.arg.utf8 = xcv.arg['pointer'] = xcv.arg['*'] + = function(v){ + if('string'===typeof v) return target.scopedAllocCString(v); + return v ? xcv.arg[ptrIR](v) : null; + }; + xcv.result.string = xcv.result.utf8 = (i)=>target.cstringToJs(i); + xcv.result['string:free'] = xcv.result['utf8:free'] = (i)=>{ try { return i ? target.cstringToJs(i) : null } finally{ target.dealloc(i) } }; xcv.result.json = (i)=>JSON.parse(target.cstringToJs(i)); - xcv.result['json:free'] = function(i){ + xcv.result['json:free'] = (i)=>{ try{ return i ? JSON.parse(target.cstringToJs(i)) : null } finally{ target.dealloc(i) } } xcv.result['void'] = (v)=>undefined; xcv.result['null'] = (v)=>v; @@ -1162,23 +1302,23 @@ the value will always be treated like -1 (which has a useful case in the sqlite3 bindings). */ xcv.arg['func-ptr'] = function(v){ if(!(v instanceof Function)) return xcv.arg[ptrIR]; - const f = this.jsFuncToWasm(v, WHAT_SIGNATURE); - }.bind(target); + const f = target.jsFuncToWasm(v, WHAT_SIGNATURE); + }; } - const __xArgAdapter = + const __xArgAdapterCheck = (t)=>xcv.arg[t] || toss("Argument adapter not found:",t); - const __xResultAdapter = + const __xResultAdapterCheck = (t)=>xcv.result[t] || toss("Result adapter not found:",t); - cache.xWrap.convertArg = (t,v)=>__xArgAdapter(t)(v); + cache.xWrap.convertArg = (t,v)=>__xArgAdapterCheck(t)(v); cache.xWrap.convertResult = - (t,v)=>(null===t ? v : (t ? __xResultAdapter(t)(v) : undefined)); + (t,v)=>(null===t ? v : (t ? __xResultAdapterCheck(t)(v) : undefined)); /** Creates a wrapper for the WASM-exported function fname. Uses xGet() to fetch the exported function (which throws on error) and returns either that function or a wrapper for that @@ -1237,47 +1377,67 @@ - `**` (args): is simply a descriptive alias for the WASM pointer type. It's primarily intended to mark output-pointer arguments. - `i64` (args and results): passes the value to BigInt() to - convert it to an int64. + convert it to an int64. Only available if bigIntEnabled is + true. - `f32` (`float`), `f64` (`double`) (args and results): pass their argument to Number(). i.e. the adaptor does not currently distinguish between the two types of floating-point numbers. + - `number` (results): converts the result to a JS Number using + Number(theValue).valueOf(). Note that this is for result + conversions only, as it's not possible to generically know + which type of number to convert arguments to. + Non-numeric conversions include: - - `string` (args): has two different semantics in order to - accommodate various uses of certain C APIs (e.g. output-style - strings)... - - - If the arg is a string, it creates a _temporary_ C-string to - pass to the exported function, cleaning it up before the - wrapper returns. If a long-lived C-string pointer is - required, that requires client-side code to create the - string, then pass its pointer to the function. + - `string` or `utf8` (args): has two different semantics in order + to accommodate various uses of certain C APIs + (e.g. output-style strings)... + + - If the arg is a string, it creates a _temporary_ + UTF-8-encoded C-string to pass to the exported function, + cleaning it up before the wrapper returns. If a long-lived + C-string pointer is required, that requires client-side code + to create the string, then pass its pointer to the function. - Else the arg is assumed to be a pointer to a string the client has already allocated and it's passed on as a WASM pointer. - - `string` (results): treats the result value as a const C-string, - copies it to a JS string, and returns that JS string. - - - `string:free` (results): treats the result value as a non-const - C-string, ownership of which has just been transfered to the - caller. It copies the C-string to a JS string, frees the - C-string, and returns the JS string. If such a result value is - NULL, the JS result is `null`. + - `string` or `utf8` (results): treats the result value as a + const C-string, encoded as UTF-8, copies it to a JS string, + and returns that JS string. + + - `string:free` or `utf8:free) (results): treats the result value + as a non-const UTF-8 C-string, ownership of which has just been + transfered to the caller. It copies the C-string to a JS + string, frees the C-string, and returns the JS string. If such + a result value is NULL, the JS result is `null`. Achtung: when + using an API which returns results from a specific allocator, + e.g. `my_malloc()`, this conversion _is not legal_. Instead, an + equivalent conversion which uses the appropriate deallocator is + required. For example: + +```js + target.xWrap.resultAdaptor('string:my_free',(i)=>{ + try { return i ? target.cstringToJs(i) : null } + finally{ target.exports.my_free(i) } + }; +``` - `json` (results): treats the result as a const C-string and returns the result of passing the converted-to-JS string to JSON.parse(). Returns `null` if the C-string is a NULL pointer. - `json:free` (results): works exactly like `string:free` but - returns the same thing as the `json` adapter. + returns the same thing as the `json` adapter. Note the + warning in `string:free` regarding maching allocators and + deallocators. The type names for results and arguments are validated when xWrap() is called and any unknown names will trigger an exception. @@ -1308,38 +1468,36 @@ */ target.xWrap = function(fname, resultType, ...argTypes){ if(3===arguments.length && Array.isArray(arguments[2])){ argTypes = arguments[2]; } - const xf = this.xGet(fname); - if(argTypes.length!==xf.length) __argcMismatch(fname, xf.length) + const xf = target.xGet(fname); + if(argTypes.length!==xf.length) __argcMismatch(fname, xf.length); if((null===resultType) && 0===xf.length){ /* Func taking no args with an as-is return. We don't need a wrapper. */ return xf; } /*Verify the arg type conversions are valid...*/; - if(undefined!==resultType && null!==resultType) __xResultAdapter(resultType); - argTypes.forEach(__xArgAdapter) + if(undefined!==resultType && null!==resultType) __xResultAdapterCheck(resultType); + argTypes.forEach(__xArgAdapterCheck); if(0===xf.length){ // No args to convert, so we can create a simpler wrapper... - return function(){ - return (arguments.length - ? __argcMismatch(fname, xf.length) - : cache.xWrap.convertResult(resultType, xf.call(null))); - }; + return (...args)=>(args.length + ? __argcMismatch(fname, xf.length) + : cache.xWrap.convertResult(resultType, xf.call(null))); } return function(...args){ if(args.length!==xf.length) __argcMismatch(fname, xf.length); - const scope = this.scopedAllocPush(); + const scope = target.scopedAllocPush(); try{ const rc = xf.apply(null,args.map((v,i)=>cache.xWrap.convertArg(argTypes[i], v))); return cache.xWrap.convertResult(resultType, rc); }finally{ - this.scopedAllocPop(scope); + target.scopedAllocPop(scope); } - }.bind(this); - }.bind(target)/*xWrap()*/; + }; + }/*xWrap()*/; /** Internal impl for xWrap.resultAdapter() and argAdaptor(). */ const __xAdapter = function(func, argc, typeName, adapter, modeName, xcvPart){ if('string'===typeof typeName){ if(1===argc) return xcvPart[typeName]; @@ -1426,26 +1584,25 @@ exported function to call. The 2nd its the name of its result type, as documented for xWrap(). The 3rd is an array of argument type name, as documented for xWrap() (use a falsy value or an empty array for nullary functions). The 4th+ arguments are arguments for the call, with the special case that if the 4th - argument is an array, it is used as the arguments for the call - (again, falsy or an empty array for nullary functions). Returns - the converted result of the call. + argument is an array, it is used as the arguments for the + call. Returns the converted result of the call. - This is just a thin wrapp around xWrap(). If the given function + This is just a thin wrapper around xWrap(). If the given function is to be called more than once, it's more efficient to use xWrap() to create a wrapper, then to call that wrapper as many times as needed. For one-shot calls, however, this variant is arguably more efficient because it will hypothetically free the wrapper function quickly. */ target.xCallWrapped = function(fname, resultType, argTypes, ...args){ if(Array.isArray(arguments[3])) args = arguments[3]; - return this.xWrap(fname, resultType, argTypes||[]).apply(null, args||[]); - }.bind(target); - + return target.xWrap(fname, resultType, argTypes||[]).apply(null, args||[]); + }; + return target; }; /** yawl (Yet Another Wasm Loader) provides very basic wasm loader. @@ -1453,15 +1610,15 @@ - `uri`: required URI of the WASM file to load. - `onload(loadResult,config)`: optional callback. The first argument is the result object from - WebAssembly.instanitate[Streaming](). The 2nd is the config + WebAssembly.instantiate[Streaming](). The 2nd is the config object passed to this function. Described in more detail below. - `imports`: optional imports object for - WebAssembly.instantiate[Streaming](). The default is am empty set + WebAssembly.instantiate[Streaming](). The default is an empty set of imports. If the module requires any imports, this object must include them. - `wasmUtilTarget`: optional object suitable for passing to WhWasmUtilInstaller(). If set, it gets passed to that function @@ -1520,14 +1677,15 @@ tgt.memory = (config.imports && config.imports.env && config.imports.env.memory) || toss("Missing 'memory' object!"); } if(!tgt.alloc && arg.instance.exports.malloc){ + const exports = arg.instance.exports; tgt.alloc = function(n){ - return this(n) || toss("Allocation of",n,"bytes failed."); - }.bind(arg.instance.exports.malloc); - tgt.dealloc = function(m){this(m)}.bind(arg.instance.exports.free); + return exports.malloc(n) || toss("Allocation of",n,"bytes failed."); + }; + tgt.dealloc = function(m){exports.free(m)}; } wui(tgt); } if(config.onload) config.onload(arg,config); return arg /* for any then() handler attached to ADDED ext/wasm/demo-123-worker.html Index: ext/wasm/demo-123-worker.html ================================================================== --- /dev/null +++ ext/wasm/demo-123-worker.html @@ -0,0 +1,44 @@ + + + + + + + Hello, sqlite3 + + + +

1-2-sqlite3 worker demo

+ + + ADDED ext/wasm/demo-123.html Index: ext/wasm/demo-123.html ================================================================== --- /dev/null +++ ext/wasm/demo-123.html @@ -0,0 +1,24 @@ + + + + + + + Hello, sqlite3 + + + +

1-2-sqlite3 demo

+ + + + ADDED ext/wasm/demo-123.js Index: ext/wasm/demo-123.js ================================================================== --- /dev/null +++ ext/wasm/demo-123.js @@ -0,0 +1,289 @@ +/* + 2022-09-19 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A basic demonstration of the SQLite3 "OO#1" API. +*/ +'use strict'; +(function(){ + /** + Set up our output channel differently depending + on whether we are running in a worker thread or + the main (UI) thread. + */ + let logHtml; + if(self.window === self /* UI thread */){ + console.log("Running demo from main UI thread."); + logHtml = function(cssClass,...args){ + const ln = document.createElement('div'); + if(cssClass) ln.classList.add(cssClass); + ln.append(document.createTextNode(args.join(' '))); + document.body.append(ln); + }; + }else{ /* Worker thread */ + console.log("Running demo from Worker thread."); + logHtml = function(cssClass,...args){ + postMessage({ + type:'log', + payload:{cssClass, args} + }); + }; + } + const log = (...args)=>logHtml('',...args); + const warn = (...args)=>logHtml('warning',...args); + const error = (...args)=>logHtml('error',...args); + + const demo1 = function(sqlite3){ + const capi = sqlite3.capi/*C-style API*/, + oo = sqlite3.oo1/*high-level OO API*/; + log("sqlite3 version",capi.sqlite3_libversion(), capi.sqlite3_sourceid()); + const db = new oo.DB("/mydb.sqlite3",'ct'); + log("transient db =",db.filename); + /** + Never(!) rely on garbage collection to clean up DBs and + (especially) prepared statements. Always wrap their lifetimes + in a try/finally construct, as demonstrated below. By and + large, client code can entirely avoid lifetime-related + complications of prepared statement objects by using the + DB.exec() method for SQL execution. + */ + try { + log("Create a table..."); + db.exec("CREATE TABLE IF NOT EXISTS t(a,b)"); + //Equivalent: + db.exec({ + sql:"CREATE TABLE IF NOT EXISTS t(a,b)" + // ... numerous other options ... + }); + // SQL can be either a string or a byte array + // or an array of strings which get concatenated + // together as-is (so be sure to end each statement + // with a semicolon). + + log("Insert some data using exec()..."); + let i; + for( i = 20; i <= 25; ++i ){ + db.exec({ + sql: "insert into t(a,b) values (?,?)", + // bind by parameter index... + bind: [i, i*2] + }); + db.exec({ + sql: "insert into t(a,b) values ($a,$b)", + // bind by parameter name... + bind: {$a: i * 10, $b: i * 20} + }); + } + + log("Insert using a prepared statement..."); + let q = db.prepare([ + // SQL may be a string or array of strings + // (concatenated w/o separators). + "insert into t(a,b) ", + "values(?,?)" + ]); + try { + for( i = 100; i < 103; ++i ){ + q.bind( [i, i*2] ).step(); + q.reset(); + } + // Equivalent... + for( i = 103; i <= 105; ++i ){ + q.bind(1, i).bind(2, i*2).stepReset(); + } + }finally{ + q.finalize(); + } + + log("Query data with exec() using rowMode 'array'..."); + db.exec({ + sql: "select a from t order by a limit 3", + rowMode: 'array', // 'array' (default), 'object', or 'stmt' + callback: function(row){ + log("row ",++this.counter,"=",row); + }.bind({counter: 0}) + }); + + log("Query data with exec() using rowMode 'object'..."); + db.exec({ + sql: "select a as aa, b as bb from t order by aa limit 3", + rowMode: 'object', + callback: function(row){ + log("row ",++this.counter,"=",JSON.stringify(row)); + }.bind({counter: 0}) + }); + + log("Query data with exec() using rowMode 'stmt'..."); + db.exec({ + sql: "select a from t order by a limit 3", + rowMode: 'stmt', + callback: function(row){ + log("row ",++this.counter,"get(0) =",row.get(0)); + }.bind({counter: 0}) + }); + + log("Query data with exec() using rowMode INTEGER (result column index)..."); + db.exec({ + sql: "select a, b from t order by a limit 3", + rowMode: 1, // === result column 1 + callback: function(row){ + log("row ",++this.counter,"b =",row); + }.bind({counter: 0}) + }); + + log("Query data with exec() using rowMode $COLNAME (result column name)..."); + db.exec({ + sql: "select a a, b from t order by a limit 3", + rowMode: '$a', + callback: function(value){ + log("row ",++this.counter,"a =",value); + }.bind({counter: 0}) + }); + + log("Query data with exec() without a callback..."); + let resultRows = []; + db.exec({ + sql: "select a, b from t order by a limit 3", + rowMode: 'object', + resultRows: resultRows + }); + log("Result rows:",JSON.stringify(resultRows,undefined,2)); + + log("Create a scalar UDF..."); + db.createFunction({ + name: 'twice', + xFunc: function(pCx, arg){ // note the call arg count + return arg + arg; + } + }); + log("Run scalar UDF and collect result column names..."); + let columnNames = []; + db.exec({ + sql: "select a, twice(a), twice(''||a) from t order by a desc limit 3", + columnNames: columnNames, + rowMode: 'stmt', + callback: function(row){ + log("a =",row.get(0), "twice(a) =", row.get(1), + "twice(''||a) =",row.get(2)); + } + }); + log("Result column names:",columnNames); + + try{ + log("The following use of the twice() UDF will", + "fail because of incorrect arg count..."); + db.exec("select twice(1,2,3)"); + }catch(e){ + warn("Got expected exception:",e.message); + } + + try { + db.transaction( function(D) { + D.exec("delete from t"); + log("In transaction: count(*) from t =",db.selectValue("select count(*) from t")); + throw new sqlite3.SQLite3Error("Demonstrating transaction() rollback"); + }); + }catch(e){ + if(e instanceof sqlite3.SQLite3Error){ + log("Got expected exception from db.transaction():",e.message); + log("count(*) from t =",db.selectValue("select count(*) from t")); + }else{ + throw e; + } + } + + try { + db.savepoint( function(D) { + D.exec("delete from t"); + log("In savepoint: count(*) from t =",db.selectValue("select count(*) from t")); + D.savepoint(function(DD){ + const rows = []; + DD.exec({ + sql: ["insert into t(a,b) values(99,100);", + "select count(*) from t"], + rowMode: 0, + resultRows: rows + }); + log("In nested savepoint. Row count =",rows[0]); + throw new sqlite3.SQLite3Error("Demonstrating nested savepoint() rollback"); + }) + }); + }catch(e){ + if(e instanceof sqlite3.SQLite3Error){ + log("Got expected exception from nested db.savepoint():",e.message); + log("count(*) from t =",db.selectValue("select count(*) from t")); + }else{ + throw e; + } + } + }finally{ + db.close(); + } + + log("That's all, folks!"); + + /** + Some of the features of the OO API not demonstrated above... + + - get change count (total or statement-local, 32- or 64-bit) + - get a DB's file name + + Misc. Stmt features: + + - Various forms of bind() + - clearBindings() + - reset() + - Various forms of step() + - Variants of get() for explicit type treatment/conversion, + e.g. getInt(), getFloat(), getBlob(), getJSON() + - getColumnName(ndx), getColumnNames() + - getParamIndex(name) + */ + }/*demo1()*/; + + log("Loading and initializing sqlite3 module..."); + if(self.window!==self) /*worker thread*/{ + /* + If sqlite3.js is in a directory other than this script, in order + to get sqlite3.js to resolve sqlite3.wasm properly, we have to + explicitly tell it where sqlite3.js is being loaded from. We do + that by passing the `sqlite3.dir=theDirName` URL argument to + _this_ script. That URL argument will be seen by the JS/WASM + loader and it will adjust the sqlite3.wasm path accordingly. If + sqlite3.js/.wasm are in the same directory as this script then + that's not needed. + + URL arguments passed as part of the filename via importScripts() + are simply lost, and such scripts see the self.location of + _this_ script. + */ + let sqlite3Js = 'sqlite3.js'; + const urlParams = new URL(self.location.href).searchParams; + if(urlParams.has('sqlite3.dir')){ + sqlite3Js = urlParams.get('sqlite3.dir') + '/' + sqlite3Js; + } + importScripts(sqlite3Js); + } + self.sqlite3InitModule({ + // We can redirect any stdout/stderr from the module + // like so... + print: log, + printErr: error + }).then(function(sqlite3){ + //console.log('sqlite3 =',sqlite3); + log("Done initializing. Running demo..."); + try { + demo1(sqlite3); + }catch(e){ + error("Exception:",e.message); + } + }); +})(); ADDED ext/wasm/demo-jsstorage.html Index: ext/wasm/demo-jsstorage.html ================================================================== --- /dev/null +++ ext/wasm/demo-jsstorage.html @@ -0,0 +1,49 @@ + + + + + + + + + sqlite3-kvvfs.js tests + + +
sqlite3-kvvfs.js tests
+ +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+
+ Options +
+ + + + + +
+
+
+ + + + + + ADDED ext/wasm/demo-jsstorage.js Index: ext/wasm/demo-jsstorage.js ================================================================== --- /dev/null +++ ext/wasm/demo-jsstorage.js @@ -0,0 +1,114 @@ +/* + 2022-09-12 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A basic test script for sqlite3.wasm with kvvfs support. This file + must be run in main JS thread and sqlite3.js must have been loaded + before it. +*/ +'use strict'; +(function(){ + const T = self.SqliteTestUtil; + const toss = function(...args){throw new Error(args.join(' '))}; + const debug = console.debug.bind(console); + const eOutput = document.querySelector('#test-output'); + const logC = console.log.bind(console) + const logE = function(domElement){ + eOutput.append(domElement); + }; + const logHtml = function(cssClass,...args){ + const ln = document.createElement('div'); + if(cssClass) ln.classList.add(cssClass); + ln.append(document.createTextNode(args.join(' '))); + logE(ln); + } + const log = function(...args){ + logC(...args); + logHtml('',...args); + }; + const warn = function(...args){ + logHtml('warning',...args); + }; + const error = function(...args){ + logHtml('error',...args); + }; + + const runTests = function(sqlite3){ + const capi = sqlite3.capi, + oo = sqlite3.oo1, + wasm = sqlite3.wasm; + log("Loaded module:",capi.sqlite3_libversion(), capi.sqlite3_sourceid()); + T.assert( 0 !== capi.sqlite3_vfs_find(null) ); + if(!capi.sqlite3_vfs_find('kvvfs')){ + error("This build is not kvvfs-capable."); + return; + } + + const dbStorage = 0 ? 'session' : 'local'; + const theStore = 's'===dbStorage[0] ? sessionStorage : localStorage; + const db = new oo.JsStorageDb( dbStorage ); + // Or: oo.DB(dbStorage, 'c', 'kvvfs') + log("db.storageSize():",db.storageSize()); + document.querySelector('#btn-clear-storage').addEventListener('click',function(){ + const sz = db.clearStorage(); + log("kvvfs",db.filename+"Storage cleared:",sz,"entries."); + }); + document.querySelector('#btn-clear-log').addEventListener('click',function(){ + eOutput.innerText = ''; + }); + document.querySelector('#btn-init-db').addEventListener('click',function(){ + try{ + const saveSql = []; + db.exec({ + sql: ["drop table if exists t;", + "create table if not exists t(a);", + "insert into t(a) values(?),(?),(?)"], + bind: [performance.now() >> 0, + (performance.now() * 2) >> 0, + (performance.now() / 2) >> 0], + saveSql + }); + console.log("saveSql =",saveSql,theStore); + log("DB (re)initialized."); + }catch(e){ + error(e.message); + } + }); + const btnSelect = document.querySelector('#btn-select1'); + btnSelect.addEventListener('click',function(){ + log("DB rows:"); + try{ + db.exec({ + sql: "select * from t order by a", + rowMode: 0, + callback: (v)=>log(v) + }); + }catch(e){ + error(e.message); + } + }); + document.querySelector('#btn-storage-size').addEventListener('click',function(){ + log("size.storageSize(",dbStorage,") says", db.storageSize(), + "bytes"); + }); + log("Storage backend:",db.filename); + if(0===db.selectValue('select count(*) from sqlite_master')){ + log("DB is empty. Use the init button to populate it."); + }else{ + log("DB contains data from a previous session. Use the Clear Ctorage button to delete it."); + btnSelect.click(); + } + }; + + sqlite3InitModule(self.sqlite3TestModule).then((sqlite3)=>{ + runTests(sqlite3); + }); +})(); ADDED ext/wasm/demo-worker1-promiser.html Index: ext/wasm/demo-worker1-promiser.html ================================================================== --- /dev/null +++ ext/wasm/demo-worker1-promiser.html @@ -0,0 +1,34 @@ + + + + + + + + + worker-promise tests + + +
worker-promise tests
+ +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+
Most stuff on this page happens in the dev console.
+
+
+ + + + + ADDED ext/wasm/demo-worker1-promiser.js Index: ext/wasm/demo-worker1-promiser.js ================================================================== --- /dev/null +++ ext/wasm/demo-worker1-promiser.js @@ -0,0 +1,270 @@ +/* + 2022-08-23 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + Demonstration of the sqlite3 Worker API #1 Promiser: a Promise-based + proxy for for the sqlite3 Worker #1 API. +*/ +'use strict'; +(function(){ + const T = self.SqliteTestUtil; + const eOutput = document.querySelector('#test-output'); + const warn = console.warn.bind(console); + const error = console.error.bind(console); + const log = console.log.bind(console); + const logHtml = async function(cssClass,...args){ + log.apply(this, args); + const ln = document.createElement('div'); + if(cssClass) ln.classList.add(cssClass); + ln.append(document.createTextNode(args.join(' '))); + eOutput.append(ln); + }; + + let startTime; + const testCount = async ()=>{ + logHtml("","Total test count:",T.counter+". Total time =",(performance.now() - startTime),"ms"); + }; + + //why is this triggered even when we catch() a Promise? + //window.addEventListener('unhandledrejection', function(event) { + // warn('unhandledrejection',event); + //}); + + const promiserConfig = { + worker: ()=>{ + const w = new Worker("jswasm/sqlite3-worker1.js"); + w.onerror = (event)=>error("worker.onerror",event); + return w; + }, + debug: 1 ? undefined : (...args)=>console.debug('worker debug',...args), + onunhandled: function(ev){ + error("Unhandled worker message:",ev.data); + }, + onready: function(){ + self.sqlite3TestModule.setStatus(null)/*hide the HTML-side is-loading spinner*/; + runTests(); + }, + onerror: function(ev){ + error("worker1 error:",ev); + } + }; + const workerPromise = self.sqlite3Worker1Promiser(promiserConfig); + delete self.sqlite3Worker1Promiser; + + const wtest = async function(msgType, msgArgs, callback){ + if(2===arguments.length && 'function'===typeof msgArgs){ + callback = msgArgs; + msgArgs = undefined; + } + const p = workerPromise({type: msgType, args:msgArgs}); + return callback ? p.then(callback).finally(testCount) : p; + }; + + const runTests = async function(){ + const dbFilename = '/testing2.sqlite3'; + startTime = performance.now(); + + let sqConfig; + await wtest('config-get', (ev)=>{ + const r = ev.result; + log('sqlite3.config subset:', r); + T.assert('boolean' === typeof r.bigIntEnabled) + .assert('string'===typeof r.wasmfsOpfsDir) + .assert('boolean' === typeof r.wasmfsOpfsEnabled); + sqConfig = r; + }); + logHtml('', + "Sending 'open' message and waiting for its response before continuing..."); + + await wtest('open', { + filename: dbFilename, + simulateError: 0 /* if true, fail the 'open' */, + }, function(ev){ + const r = ev.result; + log("then open result",r); + T.assert(ev.dbId === r.dbId) + .assert(ev.messageId) + .assert('string' === typeof r.vfs); + promiserConfig.dbId = ev.dbId; + }).then(runTests2); + }; + + const runTests2 = async function(){ + const mustNotReach = ()=>toss("This is not supposed to be reached."); + + await wtest('exec',{ + sql: ["create table t(a,b)", + "insert into t(a,b) values(1,2),(3,4),(5,6)" + ].join(';'), + multi: true, + resultRows: [], columnNames: [] + }, function(ev){ + ev = ev.result; + T.assert(0===ev.resultRows.length) + .assert(0===ev.columnNames.length); + }); + + await wtest('exec',{ + sql: 'select a a, b b from t order by a', + resultRows: [], columnNames: [], + }, function(ev){ + ev = ev.result; + T.assert(3===ev.resultRows.length) + .assert(1===ev.resultRows[0][0]) + .assert(6===ev.resultRows[2][1]) + .assert(2===ev.columnNames.length) + .assert('b'===ev.columnNames[1]); + }); + + await wtest('exec',{ + sql: 'select a a, b b from t order by a', + resultRows: [], columnNames: [], + rowMode: 'object' + }, function(ev){ + ev = ev.result; + T.assert(3===ev.resultRows.length) + .assert(1===ev.resultRows[0].a) + .assert(6===ev.resultRows[2].b) + }); + + await wtest( + 'exec', + {sql:'intentional_error'}, + mustNotReach + ).catch((e)=>{ + warn("Intentional error:",e); + }); + + await wtest('exec',{ + sql:'select 1 union all select 3', + resultRows: [], + }, function(ev){ + ev = ev.result; + T.assert(2 === ev.resultRows.length) + .assert(1 === ev.resultRows[0][0]) + .assert(3 === ev.resultRows[1][0]); + }); + + const resultRowTest1 = function f(ev){ + if(undefined === f.counter) f.counter = 0; + if(null === ev.rowNumber){ + /* End of result set. */ + T.assert(undefined === ev.row) + .assert(2===ev.columnNames.length) + .assert('a'===ev.columnNames[0]) + .assert('B'===ev.columnNames[1]); + }else{ + T.assert(ev.rowNumber > 0); + ++f.counter; + } + log("exec() result row:",ev); + T.assert(null === ev.rowNumber || 'number' === typeof ev.row.B); + }; + await wtest('exec',{ + sql: 'select a a, b B from t order by a limit 3', + callback: resultRowTest1, + rowMode: 'object' + }, function(ev){ + T.assert(3===resultRowTest1.counter); + resultRowTest1.counter = 0; + }); + + const resultRowTest2 = function f(ev){ + if(null === ev.rowNumber){ + /* End of result set. */ + T.assert(undefined === ev.row) + .assert(1===ev.columnNames.length) + .assert('a'===ev.columnNames[0]) + }else{ + T.assert(ev.rowNumber > 0); + f.counter = ev.rowNumber; + } + log("exec() result row:",ev); + T.assert(null === ev.rowNumber || 'number' === typeof ev.row); + }; + await wtest('exec',{ + sql: 'select a a from t limit 3', + callback: resultRowTest2, + rowMode: 0 + }, function(ev){ + T.assert(3===resultRowTest2.counter); + }); + + const resultRowTest3 = function f(ev){ + if(null === ev.rowNumber){ + T.assert(3===ev.columnNames.length) + .assert('foo'===ev.columnNames[0]) + .assert('bar'===ev.columnNames[1]) + .assert('baz'===ev.columnNames[2]); + }else{ + f.counter = ev.rowNumber; + T.assert('number' === typeof ev.row); + } + }; + await wtest('exec',{ + sql: "select 'foo' foo, a bar, 'baz' baz from t limit 2", + callback: resultRowTest3, + columnNames: [], + rowMode: ':bar' + }, function(ev){ + log("exec() result row:",ev); + T.assert(2===resultRowTest3.counter); + }); + + await wtest('exec',{ + multi: true, + sql:[ + 'pragma foreign_keys=0;', + // ^^^ arbitrary query with no result columns + 'select a, b from t order by a desc; select a from t;' + // multi-exec only honors results from the first + // statement with result columns (regardless of whether) + // it has any rows). + ], + rowMode: 1, + resultRows: [] + },function(ev){ + const rows = ev.result.resultRows; + T.assert(3===rows.length). + assert(6===rows[0]); + }); + + await wtest('exec',{sql: 'delete from t where a>3'}); + + await wtest('exec',{ + sql: 'select count(a) from t', + resultRows: [] + },function(ev){ + ev = ev.result; + T.assert(1===ev.resultRows.length) + .assert(2===ev.resultRows[0][0]); + }); + + await wtest('export', function(ev){ + ev = ev.result; + T.assert('string' === typeof ev.filename) + .assert(ev.byteArray instanceof Uint8Array) + .assert(ev.byteArray.length > 1024) + .assert('application/x-sqlite3' === ev.mimetype); + }); + + /***** close() tests must come last. *****/ + await wtest('close',{},function(ev){ + T.assert('string' === typeof ev.result.filename); + }); + + await wtest('close', (ev)=>{ + T.assert(undefined === ev.result.filename); + }).finally(()=>logHtml('',"That's all, folks!")); + }/*runTests2()*/; + + log("Init complete, but async init bits may still be running."); +})(); ADDED ext/wasm/demo-worker1.html Index: ext/wasm/demo-worker1.html ================================================================== --- /dev/null +++ ext/wasm/demo-worker1.html @@ -0,0 +1,34 @@ + + + + + + + + + + sqlite3-worker1.js tests + + +
sqlite3-worker1.js tests
+ +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+
Most stuff on this page happens in the dev console.
+
+
+ + + + ADDED ext/wasm/demo-worker1.js Index: ext/wasm/demo-worker1.js ================================================================== --- /dev/null +++ ext/wasm/demo-worker1.js @@ -0,0 +1,345 @@ +/* + 2022-05-22 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A basic test script for sqlite3-worker1.js. + + Note that the wrapper interface demonstrated in + demo-worker1-promiser.js is much easier to use from client code, as it + lacks the message-passing acrobatics demonstrated in this file. +*/ +'use strict'; +(function(){ + const T = self.SqliteTestUtil; + const SW = new Worker("jswasm/sqlite3-worker1.js"); + const DbState = { + id: undefined + }; + const eOutput = document.querySelector('#test-output'); + const log = console.log.bind(console); + const logHtml = function(cssClass,...args){ + log.apply(this, args); + const ln = document.createElement('div'); + if(cssClass) ln.classList.add(cssClass); + ln.append(document.createTextNode(args.join(' '))); + eOutput.append(ln); + }; + const warn = console.warn.bind(console); + const error = console.error.bind(console); + const toss = (...args)=>{throw new Error(args.join(' '))}; + + SW.onerror = function(event){ + error("onerror",event); + }; + + let startTime; + + /** + A queue for callbacks which are to be run in response to async + DB commands. See the notes in runTests() for why we need + this. The event-handling plumbing of this file requires that + any DB command which includes a `messageId` property also have + a queued callback entry, as the existence of that property in + response payloads is how it knows whether or not to shift an + entry off of the queue. + */ + const MsgHandlerQueue = { + queue: [], + id: 0, + push: function(type,callback){ + this.queue.push(callback); + return type + '-' + (++this.id); + }, + shift: function(){ + return this.queue.shift(); + } + }; + + const testCount = ()=>{ + logHtml("","Total test count:",T.counter+". Total time =",(performance.now() - startTime),"ms"); + }; + + const logEventResult = function(ev){ + const evd = ev.result; + logHtml(evd.errorClass ? 'error' : '', + "runOneTest",ev.messageId,"Worker time =", + (ev.workerRespondTime - ev.workerReceivedTime),"ms.", + "Round-trip event time =", + (performance.now() - ev.departureTime),"ms.", + (evd.errorClass ? ev.message : "")//, JSON.stringify(evd) + ); + }; + + const runOneTest = function(eventType, eventArgs, callback){ + T.assert(eventArgs && 'object'===typeof eventArgs); + /* ^^^ that is for the testing and messageId-related code, not + a hard requirement of all of the Worker-exposed APIs. */ + const messageId = MsgHandlerQueue.push(eventType,function(ev){ + logEventResult(ev); + if(callback instanceof Function){ + callback(ev); + testCount(); + } + }); + const msg = { + type: eventType, + args: eventArgs, + dbId: DbState.id, + messageId: messageId, + departureTime: performance.now() + }; + log("Posting",eventType,"message to worker dbId="+(DbState.id||'default')+':',msg); + SW.postMessage(msg); + }; + + /** Methods which map directly to onmessage() event.type keys. + They get passed the inbound event.data. */ + const dbMsgHandler = { + open: function(ev){ + DbState.id = ev.dbId; + log("open result",ev); + }, + exec: function(ev){ + log("exec result",ev); + }, + export: function(ev){ + log("export result",ev); + }, + error: function(ev){ + error("ERROR from the worker:",ev); + logEventResult(ev); + }, + resultRowTest1: function f(ev){ + if(undefined === f.counter) f.counter = 0; + if(null === ev.rowNumber){ + /* End of result set. */ + T.assert(undefined === ev.row) + .assert(Array.isArray(ev.columnNames)) + .assert(ev.columnNames.length); + }else{ + T.assert(ev.rowNumber > 0); + ++f.counter; + } + //log("exec() result row:",ev); + T.assert(null === ev.rowNumber || 'number' === typeof ev.row.b); + } + }; + + /** + "The problem" now is that the test results are async. We + know, however, that the messages posted to the worker will + be processed in the order they are passed to it, so we can + create a queue of callbacks to handle them. The problem + with that approach is that it's not error-handling + friendly, in that an error can cause us to bypass a result + handler queue entry. We have to perform some extra + acrobatics to account for that. + + Problem #2 is that we cannot simply start posting events: we + first have to post an 'open' event, wait for it to respond, and + collect its db ID before continuing. If we don't wait, we may + well fire off 10+ messages before the open actually responds. + */ + const runTests2 = function(){ + const mustNotReach = ()=>{ + throw new Error("This is not supposed to be reached."); + }; + runOneTest('exec',{ + sql: ["create table t(a,b);", + "insert into t(a,b) values(1,2),(3,4),(5,6)" + ], + resultRows: [], columnNames: [] + }, function(ev){ + ev = ev.result; + T.assert(0===ev.resultRows.length) + .assert(0===ev.columnNames.length); + }); + runOneTest('exec',{ + sql: 'select a a, b b from t order by a', + resultRows: [], columnNames: [], saveSql:[] + }, function(ev){ + ev = ev.result; + T.assert(3===ev.resultRows.length) + .assert(1===ev.resultRows[0][0]) + .assert(6===ev.resultRows[2][1]) + .assert(2===ev.columnNames.length) + .assert('b'===ev.columnNames[1]); + }); + //if(1){ error("Returning prematurely for testing."); return; } + runOneTest('exec',{ + sql: 'select a a, b b from t order by a', + resultRows: [], columnNames: [], + rowMode: 'object' + }, function(ev){ + ev = ev.result; + T.assert(3===ev.resultRows.length) + .assert(1===ev.resultRows[0].a) + .assert(6===ev.resultRows[2].b) + }); + runOneTest('exec',{sql:'intentional_error'}, mustNotReach); + // Ensure that the message-handler queue survives ^^^ that error... + runOneTest('exec',{ + sql:'select 1', + resultRows: [], + //rowMode: 'array', // array is the default in the Worker interface + }, function(ev){ + ev = ev.result; + T.assert(1 === ev.resultRows.length) + .assert(1 === ev.resultRows[0][0]); + }); + runOneTest('exec',{ + sql: 'select a a, b b from t order by a', + callback: 'resultRowTest1', + rowMode: 'object' + }, function(ev){ + T.assert(3===dbMsgHandler.resultRowTest1.counter); + dbMsgHandler.resultRowTest1.counter = 0; + }); + runOneTest('exec',{ + sql:[ + "pragma foreign_keys=0;", + // ^^^ arbitrary query with no result columns + "select a, b from t order by a desc;", + "select a from t;" + // multi-statement exec only honors results from the first + // statement with result columns (regardless of whether) + // it has any rows). + ], + rowMode: 1, + resultRows: [] + },function(ev){ + const rows = ev.result.resultRows; + T.assert(3===rows.length). + assert(6===rows[0]); + }); + runOneTest('exec',{sql: 'delete from t where a>3'}); + runOneTest('exec',{ + sql: 'select count(a) from t', + resultRows: [] + },function(ev){ + ev = ev.result; + T.assert(1===ev.resultRows.length) + .assert(2===ev.resultRows[0][0]); + }); + runOneTest('export',{}, function(ev){ + ev = ev.result; + log("export result:",ev); + T.assert('string' === typeof ev.filename) + .assert(ev.byteArray instanceof Uint8Array) + .assert(ev.byteArray.length > 1024) + .assert('application/x-sqlite3' === ev.mimetype); + }); + /***** close() tests must come last. *****/ + runOneTest('close',{unlink:true},function(ev){ + ev = ev.result; + T.assert('string' === typeof ev.filename); + }); + runOneTest('close',{unlink:true},function(ev){ + ev = ev.result; + T.assert(undefined === ev.filename); + logHtml('warning',"This is the final test."); + }); + logHtml('warning',"Finished posting tests. Waiting on async results."); + }; + + const runTests = function(){ + /** + Design decision time: all remaining tests depend on the 'open' + command having succeeded. In order to support multiple DBs, the + upcoming commands ostensibly have to know the ID of the DB they + want to talk to. We have two choices: + + 1) We run 'open' and wait for its response, which contains the + db id. + + 2) We have the Worker automatically use the current "default + db" (the one which was most recently opened) if no db id is + provided in the message. When we do this, the main thread may + well fire off _all_ of the test messages before the 'open' + actually responds, but because the messages are handled on a + FIFO basis, those after the initial 'open' will pick up the + "default" db. However, if the open fails, then all pending + messages (until next next 'open', at least) except for 'close' + will fail and we have no way of cancelling them once they've + been posted to the worker. + + Which approach we use below depends on the boolean value of + waitForOpen. + */ + const waitForOpen = 1, + simulateOpenError = 0 /* if true, the remaining tests will + all barf if waitForOpen is + false. */; + logHtml('', + "Sending 'open' message and",(waitForOpen ? "" : "NOT ")+ + "waiting for its response before continuing."); + startTime = performance.now(); + runOneTest('open', { + filename:'testing2.sqlite3', + simulateError: simulateOpenError + }, function(ev){ + log("open result",ev); + T.assert('testing2.sqlite3'===ev.result.filename) + .assert(ev.dbId) + .assert(ev.messageId) + .assert('string' === typeof ev.result.vfs); + DbState.id = ev.dbId; + if(waitForOpen) setTimeout(runTests2, 0); + }); + if(!waitForOpen) runTests2(); + }; + + SW.onmessage = function(ev){ + if(!ev.data || 'object'!==typeof ev.data){ + warn("Unknown sqlite3-worker message type:",ev); + return; + } + ev = ev.data/*expecting a nested object*/; + //log("main window onmessage:",ev); + if(ev.result && ev.messageId){ + /* We're expecting a queued-up callback handler. */ + const f = MsgHandlerQueue.shift(); + if('error'===ev.type){ + dbMsgHandler.error(ev); + return; + } + T.assert(f instanceof Function); + f(ev); + return; + } + switch(ev.type){ + case 'sqlite3-api': + switch(ev.result){ + case 'worker1-ready': + log("Message:",ev); + self.sqlite3TestModule.setStatus(null); + runTests(); + return; + default: + warn("Unknown sqlite3-api message type:",ev); + return; + } + default: + if(dbMsgHandler.hasOwnProperty(ev.type)){ + try{dbMsgHandler[ev.type](ev);} + catch(err){ + error("Exception while handling db result message", + ev,":",err); + } + return; + } + warn("Unknown sqlite3-api message type:",ev); + } + }; + log("Init complete, but async init bits may still be running."); + log("Installing Worker into global scope SW for dev purposes."); + self.SW = SW; +})(); ADDED ext/wasm/dist.make Index: ext/wasm/dist.make ================================================================== --- /dev/null +++ ext/wasm/dist.make @@ -0,0 +1,101 @@ +#!/do/not/make +#^^^ help emacs select edit mode +# +# Intended to include'd by ./GNUmakefile. +# +# 'make dist' rules for creating a distribution archive of the WASM/JS +# pieces, noting that we only build a dist of the built files, not the +# numerous pieces required to build them. +####################################################################### +MAKEFILE.dist := $(lastword $(MAKEFILE_LIST)) + +######################################################################## +# Chicken/egg situation: we need $(bin.version-info) to get the version +# info for the archive name, but that binary may not yet be built, and +# won't be built until we expand the dependencies. We have to use a +# temporary name for the archive. +dist-name = sqlite-wasm-TEMP +#ifeq (0,1) +# $(info WARNING *******************************************************************) +# $(info ** Be sure to create the desired build configuration before creating the) +# $(info ** distribution archive. Use one of the following targets to do so:) +# $(info **) +# $(info ** o2: builds with -O2, resulting in the fastest builds) +# $(info ** oz: builds with -Oz, resulting in the smallest builds) +# $(info /WARNING *******************************************************************) +#endif + +######################################################################## +# dist.build must be the name of a target which triggers the +# build of the files to be packed into the dist archive. The +# intention is that it be one of (o0, o1, o2, o3, os, oz), each of +# which uses like-named -Ox optimization level flags. The o2 target +# provides the best overall runtime speeds. The oz target provides +# slightly slower speeds (roughly 10%) with significantly smaller WASM +# file sizes. Note that -O2 (the o2 target) results in faster binaries +# than both -O3 and -Os (the o3 and os targets) in all tests run to +# date. +dist.build ?= oz + +dist-dir.top := $(dist-name) +dist-dir.jswasm := $(dist-dir.top)/$(notdir $(dir.dout)) +dist-dir.common := $(dist-dir.top)/common +dist.top.extras := \ + demo-123.html demo-123-worker.html demo-123.js \ + tester1.html tester1-worker.html tester1.js \ + demo-jsstorage.html demo-jsstorage.js \ + demo-worker1.html demo-worker1.js \ + demo-worker1-promiser.html demo-worker1-promiser.js +dist.jswasm.extras := $(sqlite3-api.ext.jses) $(sqlite3.wasm) +dist.common.extras := \ + $(wildcard $(dir.common)/*.css) \ + $(dir.common)/SqliteTestUtil.js + +.PHONY: dist +######################################################################## +# dist: create the end-user deliverable archive. +# +# Maintenance reminder: because dist depends on $(dist.build), and +# $(dist.build) will depend on clean, having any deps on +# $(dist-archive) which themselves may be cleaned up by the clean +# target will lead to grief in parallel builds (-j #). Thus +# $(dist-target)'s deps must be trimmed to non-generated files or +# files which are _not_ cleaned up by the clean target. +# +# Note that we require $(bin.version-info) in order to figure out the +# dist file's name, so cannot (without a recursive make) have the +# target name equal to the archive name. +dist: \ + $(bin.stripccomments) $(bin.version-info) \ + $(dist.build) \ + $(MAKEFILE) $(MAKEFILE.dist) + @echo "Making end-user deliverables..." + @rm -fr $(dist-dir.top) + @mkdir -p $(dist-dir.jswasm) $(dist-dir.common) + @cp -p $(dist.top.extras) $(dist-dir.top) + @cp -p README-dist.txt $(dist-dir.top)/README.txt + @cp -p index-dist.html $(dist-dir.top)/index.html + @cp -p $(dist.jswasm.extras) $(dist-dir.jswasm) + @$(bin.stripccomments) -k -k < $(sqlite3.js) \ + > $(dist-dir.jswasm)/$(notdir $(sqlite3.js)) + @cp -p $(dist.common.extras) $(dist-dir.common) + @set -e; \ + vnum=$$($(bin.version-info) --download-version); \ + vdir=sqlite-wasm-$$vnum; \ + arczip=$$vdir.zip; \ + echo "Making $$arczip ..."; \ + rm -fr $$arczip $$vdir; \ + mv $(dist-dir.top) $$vdir; \ + zip -qr $$arczip $$vdir; \ + rm -fr $$vdir; \ + ls -la $$arczip; \ + set +e; \ + unzip -lv $$arczip || echo "Missing unzip app? Not fatal." + +# We need a separate `clean` rule to account for weirdness in +# a sub-make, where we get a copy of the $(dist-name) dir +# copied into the new $(dist-name) dir. +.PHONY: dist-clean +clean: dist-clean +dist-clean: + rm -fr $(dist-name) $(wildcard sqlite-wasm-*.zip) ADDED ext/wasm/fiddle.make Index: ext/wasm/fiddle.make ================================================================== --- /dev/null +++ ext/wasm/fiddle.make @@ -0,0 +1,194 @@ +#!/do/not/make +#^^^ help emacs select edit mode +# +# Intended to include'd by ./GNUmakefile. +####################################################################### +MAKEFILE.fiddle := $(lastword $(MAKEFILE_LIST)) + +######################################################################## +# shell.c and its build flags... +make-np-0 := make -C $(dir.top) -n -p +make-np-1 := sed -e 's/(TOP)/(dir.top)/g' +$(eval $(shell $(make-np-0) | grep -e '^SHELL_OPT ' | $(make-np-1))) +$(eval $(shell $(make-np-0) | grep -e '^SHELL_SRC ' | $(make-np-1))) +# ^^^ can't do that in 1 invocation b/c newlines get stripped +ifeq (,$(SHELL_OPT)) +$(error Could not parse SHELL_OPT from $(dir.top)/Makefile.) +endif +ifeq (,$(SHELL_SRC)) +$(error Could not parse SHELL_SRC from $(dir.top)/Makefile.) +endif +$(dir.top)/shell.c: $(SHELL_SRC) $(dir.top)/tool/mkshellc.tcl + $(MAKE) -C $(dir.top) shell.c +# /shell.c +######################################################################## + +EXPORTED_FUNCTIONS.fiddle := $(dir.tmp)/EXPORTED_FUNCTIONS.fiddle +fiddle.emcc-flags = \ + $(emcc.cflags) $(emcc_opt_full) \ + --minify 0 \ + -sALLOW_TABLE_GROWTH \ + -sABORTING_MALLOC \ + -sSTRICT_JS \ + -sENVIRONMENT=web,worker \ + -sMODULARIZE \ + -sDYNAMIC_EXECUTION=0 \ + -sWASM_BIGINT=$(emcc.WASM_BIGINT) \ + -sEXPORT_NAME=$(sqlite3.js.init-func) \ + -Wno-limited-postlink-optimizations \ + $(sqlite3.js.flags.--post-js) \ + $(emcc.exportedRuntimeMethods) \ + -sEXPORTED_FUNCTIONS=@$(abspath $(EXPORTED_FUNCTIONS.fiddle)) \ + $(SQLITE_OPT) $(SHELL_OPT) \ + -DSQLITE_SHELL_FIDDLE +# -D_POSIX_C_SOURCE is needed for strdup() with emcc + +fiddle.EXPORTED_FUNCTIONS.in := \ + EXPORTED_FUNCTIONS.fiddle.in \ + $(EXPORTED_FUNCTIONS.api) + +$(EXPORTED_FUNCTIONS.fiddle): $(fiddle.EXPORTED_FUNCTIONS.in) $(MAKEFILE.fiddle) + sort -u $(fiddle.EXPORTED_FUNCTIONS.in) > $@ + +fiddle-module.js := $(dir.fiddle)/fiddle-module.js +fiddle-module.wasm := $(subst .js,.wasm,$(fiddle-module.js)) +fiddle.cses := $(dir.top)/shell.c $(sqlite3-wasm.c) + +fiddle.SOAP.js := $(dir.fiddle)/$(notdir $(SOAP.js)) +$(fiddle.SOAP.js): $(SOAP.js) + cp $< $@ + +$(eval $(call call-make-pre-js,fiddle-module)) +$(fiddle-module.js): $(MAKEFILE) $(MAKEFILE.fiddle) \ + $(EXPORTED_FUNCTIONS.fiddle) \ + $(fiddle.cses) $(pre-post-fiddle-module.deps) $(fiddle.SOAP.js) + $(emcc.bin) -o $@ $(fiddle.emcc-flags) \ + $(pre-post-common.flags) $(pre-post-fiddle-module.flags) \ + $(fiddle.cses) + $(maybe-wasm-strip) $(fiddle-module.wasm) + gzip < $@ > $@.gz + gzip < $(fiddle-module.wasm) > $(fiddle-module.wasm).gz + +$(dir.fiddle)/fiddle.js.gz: $(dir.fiddle)/fiddle.js + gzip < $< > $@ + +clean: clean-fiddle +clean-fiddle: + rm -f $(fiddle-module.js) $(fiddle-module.js).gz \ + $(fiddle-module.wasm) $(fiddle-module.wasm).gz \ + $(dir.fiddle)/$(SOAP.js) \ + $(dir.fiddle)/fiddle-module.worker.js \ + EXPORTED_FUNCTIONS.fiddle +.PHONY: fiddle +fiddle: $(fiddle-module.js) $(dir.fiddle)/fiddle.js.gz +all: fiddle + +######################################################################## +# fiddle_remote is the remote destination for the fiddle app. It +# must be a [user@]HOST:/path for rsync. +# Note that the target "should probably" contain a symlink of +# index.html -> fiddle.html. +fiddle_remote ?= +ifeq (,$(fiddle_remote)) +ifneq (,$(wildcard /home/stephan)) + fiddle_remote = wh:www/wh/sqlite3/. +else ifneq (,$(wildcard /home/drh)) + #fiddle_remote = if appropriate, add that user@host:/path here +endif +endif +push-fiddle: fiddle + @if [ x = "x$(fiddle_remote)" ]; then \ + echo "fiddle_remote must be a [user@]HOST:/path for rsync"; \ + exit 1; \ + fi + rsync -va fiddle/ $(fiddle_remote) +# end fiddle remote push +######################################################################## + + +######################################################################## +# Explanation of the emcc build flags follows. Full docs for these can +# be found at: +# +# https://github.com/emscripten-core/emscripten/blob/main/src/settings.js +# +# -sENVIRONMENT=web: elides bootstrap code related to non-web JS +# environments like node.js. Removing this makes the output a tiny +# tick larger but hypothetically makes it more portable to +# non-browser JS environments. +# +# -sMODULARIZE: changes how the generated code is structured to avoid +# declaring a global Module object and instead installing a function +# which loads and initializes the module. The function is named... +# +# -sEXPORT_NAME=jsFunctionName (see -sMODULARIZE) +# +# -sEXPORTED_RUNTIME_METHODS=@/absolute/path/to/file: a file +# containing a list of emscripten-supplied APIs, one per line, which +# must be exported into the generated JS. Must be an absolute path! +# +# -sEXPORTED_FUNCTIONS=@/absolute/path/to/file: a file containing a +# list of C functions, one per line, which must be exported via wasm +# so they're visible to JS. C symbols names in that file must all +# start with an underscore for reasons known only to the emcc +# developers. e.g., _sqlite3_open_v2 and _sqlite3_finalize. Must be +# an absolute path! +# +# -sSTRICT_JS ensures that the emitted JS code includes the 'use +# strict' option. Note that -sSTRICT is more broadly-scoped and +# results in build errors. +# +# -sALLOW_TABLE_GROWTH is required for (at a minimum) the UDF-binding +# feature. Without it, JS functions cannot be made to proxy C-side +# callbacks. +# +# -sABORTING_MALLOC causes the JS-bound _malloc() to abort rather than +# return 0 on OOM. If set to 0 then all code which uses _malloc() +# must, just like in C, check the result before using it, else +# they're likely to corrupt the JS/WASM heap by writing to its +# address of 0. It is, as of this writing, enabled in Emscripten by +# default but we enable it explicitly in case that default changes. +# +# -sDYNAMIC_EXECUTION=0 disables eval() and the Function constructor. +# If the build runs without these, it's preferable to use this flag +# because certain execution environments disallow those constructs. +# This flag is not strictly necessary, however. +# +# -sWASM_BIGINT is UNTESTED but "should" allow the int64-using C APIs +# to work with JS/wasm, insofar as the JS environment supports the +# BigInt type. That support requires an extremely recent browser: +# Safari didn't get that support until late 2020. +# +# --no-entry: for compiling library code with no main(). If this is +# not supplied and the code has a main(), it is called as part of the +# module init process. Note that main() is #if'd out of shell.c +# (renamed) when building in wasm mode. +# +# --pre-js/--post-js=FILE relative or absolute paths to JS files to +# prepend/append to the emcc-generated bootstrapping JS. It's +# easier/faster to develop with separate JS files (reduces rebuilding +# requirements) but certain configurations, namely -sMODULARIZE, may +# require using at least a --pre-js file. They can be used +# individually and need not be paired. +# +# -O0..-O3 and -Oz: optimization levels affect not only C-style +# optimization but whether or not the resulting generated JS code +# gets minified. -O0 compiles _much_ more quickly than -O3 or -Oz, +# and doesn't minimize any JS code, so is recommended for +# development. -O3 or -Oz are recommended for deployment, but +# primarily because -Oz will shrink the wasm file notably. JS-side +# minification makes little difference in terms of overall +# distributable size. +# +# --minify 0: disables minification of the generated JS code, +# regardless of optimization level. Minification of the JS has +# minimal overall effect in the larger scheme of things and results +# in JS files which can neither be edited nor viewed as text files in +# Fossil (which flags them as binary because of their extreme line +# lengths). Interestingly, whether or not the comments in the +# generated JS file get stripped is unaffected by this setting and +# depends entirely on the optimization level. Higher optimization +# levels reduce the size of the JS considerably even without +# minification. +# +######################################################################## Index: ext/wasm/fiddle/fiddle-worker.js ================================================================== --- ext/wasm/fiddle/fiddle-worker.js +++ ext/wasm/fiddle/fiddle-worker.js @@ -87,215 +87,293 @@ Noting that it happens in Firefox as well as Chrome. Harmless but annoying. */ "use strict"; (function(){ - /** - Posts a message in the form {type,data} unless passed more than 2 - args, in which case it posts {type, data:[arg1...argN]}. - */ - const wMsg = function(type,data){ - postMessage({ - type, - data: arguments.length<3 - ? data - : Array.prototype.slice.call(arguments,1) - }); - }; - - const stdout = function(){wMsg('stdout', Array.prototype.slice.call(arguments));}; - const stderr = function(){wMsg('stderr', Array.prototype.slice.call(arguments));}; - - self.onerror = function(/*message, source, lineno, colno, error*/) { - const err = arguments[4]; - if(err && 'ExitStatus'==err.name){ - /* This is relevant for the sqlite3 shell binding but not the - lower-level binding. */ - fiddleModule.isDead = true; - stderr("FATAL ERROR:", err.message); - stderr("Restarting the app requires reloading the page."); - wMsg('error', err); - } - console.error(err); - fiddleModule.setStatus('Exception thrown, see JavaScript console: '+err); - }; - - const Sqlite3Shell = { - /** Returns the name of the currently-opened db. */ - dbFilename: function f(){ - if(!f._) f._ = fiddleModule.cwrap('fiddle_db_filename', "string", ['string']); - return f._(); - }, - /** - Runs the given text through the shell as if it had been typed - in by a user. Fires a working/start event before it starts and - working/end event when it finishes. - */ - exec: function f(sql){ - if(!f._) f._ = fiddleModule.cwrap('fiddle_exec', null, ['string']); - if(fiddleModule.isDead){ - stderr("shell module has exit()ed. Cannot run SQL."); - return; - } - wMsg('working','start'); - try { - if(f._running){ - stderr('Cannot run multiple commands concurrently.'); - }else{ - f._running = true; - f._(sql); - } - } finally { - delete f._running; - wMsg('working','end'); - } - }, - resetDb: function f(){ - if(!f._) f._ = fiddleModule.cwrap('fiddle_reset_db', null); - stdout("Resetting database."); - f._(); - stdout("Reset",this.dbFilename()); - }, - /* Interrupt can't work: this Worker is tied up working, so won't get the - interrupt event which would be needed to perform the interrupt. */ - interrupt: function f(){ - if(!f._) f._ = fiddleModule.cwrap('fiddle_interrupt', null); - stdout("Requesting interrupt."); - f._(); - } - }; - - self.onmessage = function f(ev){ - ev = ev.data; - if(!f.cache){ - f.cache = { - prevFilename: null - }; - } - //console.debug("worker: onmessage.data",ev); - switch(ev.type){ - case 'shellExec': Sqlite3Shell.exec(ev.data); return; - case 'db-reset': Sqlite3Shell.resetDb(); return; - case 'interrupt': Sqlite3Shell.interrupt(); return; - /** Triggers the export of the current db. Fires an - event in the form: - - {type:'db-export', - data:{ - filename: name of db, - buffer: contents of the db file (Uint8Array), - error: on error, a message string and no buffer property. - } - } - */ - case 'db-export': { - const fn = Sqlite3Shell.dbFilename(); - stdout("Exporting",fn+"."); - const fn2 = fn ? fn.split(/[/\\]/).pop() : null; - try{ - if(!fn2) throw new Error("DB appears to be closed."); - wMsg('db-export',{ - filename: fn2, - buffer: fiddleModule.FS.readFile(fn, {encoding:"binary"}) - }); - }catch(e){ - /* Post a failure message so that UI elements disabled - during the export can be re-enabled. */ - wMsg('db-export',{ - filename: fn, - error: e.message - }); - } - return; - } - case 'open': { - /* Expects: { - buffer: ArrayBuffer | Uint8Array, - filename: for logging/informational purposes only - } */ - const opt = ev.data; - let buffer = opt.buffer; - if(buffer instanceof Uint8Array){ - }else if(buffer instanceof ArrayBuffer){ - buffer = new Uint8Array(buffer); - }else{ - stderr("'open' expects {buffer:Uint8Array} containing an uploaded db."); - return; - } - const fn = ( - opt.filename - ? opt.filename.split(/[/\\]/).pop().replace('"','_') - : ("db-"+((Math.random() * 10000000) | 0)+ - "-"+((Math.random() * 10000000) | 0)+".sqlite3") - ); - /* We cannot delete the existing db file until the new one - is installed, which means that we risk overflowing our - quota (if any) by having both the previous and current - db briefly installed in the virtual filesystem. */ - fiddleModule.FS.createDataFile("/", fn, buffer, true, true); - const oldName = Sqlite3Shell.dbFilename(); - Sqlite3Shell.exec('.open "/'+fn+'"'); - if(oldName && oldName !== fn){ - try{fiddleModule.FS.unlink(oldName);} - catch(e){/*ignored*/} - } - stdout("Replaced DB with",fn+"."); - return; - } - }; - console.warn("Unknown fiddle-worker message type:",ev); - }; - - /** - emscripten module for use with build mode -sMODULARIZE. - */ - const fiddleModule = { - print: stdout, - printErr: stderr, - /** - Intercepts status updates from the emscripting module init - and fires worker events with a type of 'status' and a - payload of: - - { - text: string | null, // null at end of load process - step: integer // starts at 1, increments 1 per call - } - - We have no way of knowing in advance how many steps will - be processed/posted, so creating a "percentage done" view is - not really practical. One can be approximated by giving it a - current value of message.step and max value of message.step+1, - though. - - When work is finished, a message with a text value of null is - submitted. - - After a message with text==null is posted, the module may later - post messages about fatal problems, e.g. an exit() being - triggered, so it is recommended that UI elements for posting - status messages not be outright removed from the DOM when - text==null, and that they instead be hidden until/unless - text!=null. - */ - setStatus: function f(text){ - if(!f.last) f.last = { step: 0, text: '' }; - else if(text === f.last.text) return; - f.last.text = text; - wMsg('module',{ - type:'status', - data:{step: ++f.last.step, text: text||null} - }); - } - }; - - importScripts('fiddle-module.js'); - /** - initFiddleModule() is installed via fiddle-module.js due to - building with: - - emcc ... -sMODULARIZE=1 -sEXPORT_NAME=initFiddleModule - */ - initFiddleModule(fiddleModule).then(function(thisModule){ - wMsg('fiddle-ready'); - }); + /** + Posts a message in the form {type,data}. If passed more than 2 + args, the 3rd must be an array of "transferable" values to pass + as the 2nd argument to postMessage(). */ + const wMsg = + (type,data,transferables)=>{ + postMessage({type, data}, transferables || []); + }; + const stdout = (...args)=>wMsg('stdout', args); + const stderr = (...args)=>wMsg('stderr', args); + const toss = (...args)=>{ + throw new Error(args.join(' ')); + }; + const fixmeOPFS = "(FIXME: won't work with OPFS-over-sqlite3_vfs.)"; + let sqlite3 /* gets assigned when the wasm module is loaded */; + + self.onerror = function(/*message, source, lineno, colno, error*/) { + const err = arguments[4]; + if(err && 'ExitStatus'==err.name){ + /* This is relevant for the sqlite3 shell binding but not the + lower-level binding. */ + fiddleModule.isDead = true; + stderr("FATAL ERROR:", err.message); + stderr("Restarting the app requires reloading the page."); + wMsg('error', err); + } + console.error(err); + fiddleModule.setStatus('Exception thrown, see JavaScript console: '+err); + }; + + const Sqlite3Shell = { + /** Returns the name of the currently-opened db. */ + dbFilename: function f(){ + if(!f._) f._ = sqlite3.wasm.xWrap('fiddle_db_filename', "string", ['string']); + return f._(0); + }, + dbHandle: function f(){ + if(!f._) f._ = sqlite3.wasm.xWrap("fiddle_db_handle", "sqlite3*"); + return f._(); + }, + dbIsOpfs: function f(){ + return sqlite3.opfs && sqlite3.capi.sqlite3_js_db_uses_vfs( + this.dbHandle(), "opfs" + ); + }, + runMain: function f(){ + if(f.argv) return 0===f.argv.rc; + const dbName = "/fiddle.sqlite3"; + f.argv = [ + 'sqlite3-fiddle.wasm', + '-bail', '-safe', + dbName + /* Reminder: because of how we run fiddle, we have to ensure + that any argv strings passed to its main() are valid until + the wasm environment shuts down. */ + ]; + const capi = sqlite3.capi, wasm = sqlite3.wasm; + /* We need to call sqlite3_shutdown() in order to avoid numerous + legitimate warnings from the shell about it being initialized + after sqlite3_initialize() has been called. This means, + however, that any initialization done by the JS code may need + to be re-done (e.g. re-registration of dynamically-loaded + VFSes). We need a more generic approach to running such + init-level code. */ + capi.sqlite3_shutdown(); + f.argv.pArgv = wasm.allocMainArgv(f.argv); + f.argv.rc = wasm.exports.fiddle_main( + f.argv.length, f.argv.pArgv + ); + if(f.argv.rc){ + stderr("Fatal error initializing sqlite3 shell."); + fiddleModule.isDead = true; + return false; + } + stdout("SQLite version", capi.sqlite3_libversion(), + capi.sqlite3_sourceid().substr(0,19)); + stdout('Welcome to the "fiddle" shell.'); + if(sqlite3.opfs){ + stdout("\nOPFS is available. To open a persistent db, use:\n\n", + " .open file:name?vfs=opfs\n\nbut note that some", + "features (e.g. upload) do not yet work with OPFS."); + sqlite3.opfs.registerVfs(); + } + stdout('\nEnter ".help" for usage hints.'); + this.exec([ // initialization commands... + '.nullvalue NULL', + '.headers on' + ].join('\n')); + return true; + }, + /** + Runs the given text through the shell as if it had been typed + in by a user. Fires a working/start event before it starts and + working/end event when it finishes. + */ + exec: function f(sql){ + if(!f._){ + if(!this.runMain()) return; + f._ = sqlite3.wasm.xWrap('fiddle_exec', null, ['string']); + } + if(fiddleModule.isDead){ + stderr("shell module has exit()ed. Cannot run SQL."); + return; + } + wMsg('working','start'); + try { + if(f._running){ + stderr('Cannot run multiple commands concurrently.'); + }else if(sql){ + if(Array.isArray(sql)) sql = sql.join(''); + f._running = true; + f._(sql); + } + }finally{ + delete f._running; + wMsg('working','end'); + } + }, + resetDb: function f(){ + if(!f._) f._ = sqlite3.wasm.xWrap('fiddle_reset_db', null); + stdout("Resetting database."); + f._(); + stdout("Reset",this.dbFilename()); + }, + /* Interrupt can't work: this Worker is tied up working, so won't get the + interrupt event which would be needed to perform the interrupt. */ + interrupt: function f(){ + if(!f._) f._ = sqlite3.wasm.xWrap('fiddle_interrupt', null); + stdout("Requesting interrupt."); + f._(); + } + }; + + self.onmessage = function f(ev){ + ev = ev.data; + if(!f.cache){ + f.cache = { + prevFilename: null + }; + } + //console.debug("worker: onmessage.data",ev); + switch(ev.type){ + case 'shellExec': Sqlite3Shell.exec(ev.data); return; + case 'db-reset': Sqlite3Shell.resetDb(); return; + case 'interrupt': Sqlite3Shell.interrupt(); return; + /** Triggers the export of the current db. Fires an + event in the form: + + {type:'db-export', + data:{ + filename: name of db, + buffer: contents of the db file (Uint8Array), + error: on error, a message string and no buffer property. + } + } + */ + case 'db-export': { + const fn = Sqlite3Shell.dbFilename(); + stdout("Exporting",fn+"."); + const fn2 = fn ? fn.split(/[/\\]/).pop() : null; + try{ + if(!fn2) toss("DB appears to be closed."); + const buffer = sqlite3.capi.sqlite3_js_db_export( + Sqlite3Shell.dbHandle() + ); + wMsg('db-export',{filename: fn2, buffer: buffer.buffer}, [buffer.buffer]); + }catch(e){ + console.error("Export failed:",e); + /* Post a failure message so that UI elements disabled + during the export can be re-enabled. */ + wMsg('db-export',{ + filename: fn, + error: e.message + }); + } + return; + } + case 'open': { + /* Expects: { + buffer: ArrayBuffer | Uint8Array, + filename: the filename for the db. Any dir part is + stripped. + } + */ + const opt = ev.data; + let buffer = opt.buffer; + stderr('open():',fixmeOPFS); + if(buffer instanceof ArrayBuffer){ + buffer = new Uint8Array(buffer); + }else if(!(buffer instanceof Uint8Array)){ + stderr("'open' expects {buffer:Uint8Array} containing an uploaded db."); + return; + } + const fn = ( + opt.filename + ? opt.filename.split(/[/\\]/).pop().replace('"','_') + : ("db-"+((Math.random() * 10000000) | 0)+ + "-"+((Math.random() * 10000000) | 0)+".sqlite3") + ); + try { + /* We cannot delete the existing db file until the new one + is installed, which means that we risk overflowing our + quota (if any) by having both the previous and current + db briefly installed in the virtual filesystem. */ + const fnAbs = '/'+fn; + const oldName = Sqlite3Shell.dbFilename(); + if(oldName && oldName===fnAbs){ + /* We cannot create the replacement file while the current file + is opened, nor does the shell have a .close command, so we + must temporarily switch to another db... */ + Sqlite3Shell.exec('.open :memory:'); + fiddleModule.FS.unlink(fnAbs); + } + fiddleModule.FS.createDataFile("/", fn, buffer, true, true); + Sqlite3Shell.exec('.open "'+fnAbs+'"'); + if(oldName && oldName!==fnAbs){ + try{fiddleModule.fsUnlink(oldName)} + catch(e){/*ignored*/} + } + stdout("Replaced DB with",fn+"."); + }catch(e){ + stderr("Error installing db",fn+":",e.message); + } + return; + } + }; + console.warn("Unknown fiddle-worker message type:",ev); + }; + + /** + emscripten module for use with build mode -sMODULARIZE. + */ + const fiddleModule = { + print: stdout, + printErr: stderr, + /** + Intercepts status updates from the emscripting module init + and fires worker events with a type of 'status' and a + payload of: + + { + text: string | null, // null at end of load process + step: integer // starts at 1, increments 1 per call + } + + We have no way of knowing in advance how many steps will + be processed/posted, so creating a "percentage done" view is + not really practical. One can be approximated by giving it a + current value of message.step and max value of message.step+1, + though. + + When work is finished, a message with a text value of null is + submitted. + + After a message with text==null is posted, the module may later + post messages about fatal problems, e.g. an exit() being + triggered, so it is recommended that UI elements for posting + status messages not be outright removed from the DOM when + text==null, and that they instead be hidden until/unless + text!=null. + */ + setStatus: function f(text){ + if(!f.last) f.last = { step: 0, text: '' }; + else if(text === f.last.text) return; + f.last.text = text; + wMsg('module',{ + type:'status', + data:{step: ++f.last.step, text: text||null} + }); + } + }; + + importScripts('fiddle-module.js'+self.location.search); + /** + initFiddleModule() is installed via fiddle-module.js due to + building with: + + emcc ... -sMODULARIZE=1 -sEXPORT_NAME=initFiddleModule + */ + sqlite3InitModule(fiddleModule).then((_sqlite3)=>{ + sqlite3 = _sqlite3; + const dbVfs = sqlite3.wasm.xWrap('fiddle_db_vfs', "*", ['string']); + fiddleModule.fsUnlink = (fn)=>{ + return sqlite3.wasm.sqlite3_wasm_vfs_unlink(dbVfs(0), fn); + }; + wMsg('fiddle-ready'); + })/*then()*/; })(); Index: ext/wasm/fiddle/fiddle.js ================================================================== --- ext/wasm/fiddle/fiddle.js +++ ext/wasm/fiddle/fiddle.js @@ -13,199 +13,199 @@ This is the main entry point for the sqlite3 fiddle app. It sets up the various UI bits, loads a Worker for the db connection, and manages the communication between the UI and worker. */ (function(){ - 'use strict'; - /* Recall that the 'self' symbol, except where locally - overwritten, refers to the global window or worker object. */ - - const storage = (function(NS/*namespace object in which to store this module*/){ - /* Pedantic licensing note: this code originated in the Fossil SCM - source tree, where it has a different license, but the person who - ported it into sqlite is the same one who wrote it for fossil. */ - 'use strict'; - NS = NS||{}; - - /** - This module provides a basic wrapper around localStorage - or sessionStorage or a dummy proxy object if neither - of those are available. - */ - const tryStorage = function f(obj){ - if(!f.key) f.key = 'storage.access.check'; - try{ - obj.setItem(f.key, 'f'); - const x = obj.getItem(f.key); - obj.removeItem(f.key); - if(x!=='f') throw new Error(f.key+" failed") - return obj; - }catch(e){ - return undefined; - } - }; - - /** Internal storage impl for this module. */ - const $storage = - tryStorage(window.localStorage) - || tryStorage(window.sessionStorage) - || tryStorage({ - // A basic dummy xyzStorage stand-in - $$$:{}, - setItem: function(k,v){this.$$$[k]=v}, - getItem: function(k){ - return this.$$$.hasOwnProperty(k) ? this.$$$[k] : undefined; - }, - removeItem: function(k){delete this.$$$[k]}, - clear: function(){this.$$$={}} - }); - - /** - For the dummy storage we need to differentiate between - $storage and its real property storage for hasOwnProperty() - to work properly... - */ - const $storageHolder = $storage.hasOwnProperty('$$$') ? $storage.$$$ : $storage; - - /** - A prefix which gets internally applied to all storage module - property keys so that localStorage and sessionStorage across the - same browser profile instance do not "leak" across multiple apps - being hosted by the same origin server. Such cross-polination is - still there but, with this key prefix applied, it won't be - immediately visible via the storage API. - - With this in place we can justify using localStorage instead of - sessionStorage. - - One implication of using localStorage and sessionStorage is that - their scope (the same "origin" and client application/profile) - allows multiple apps on the same origin to use the same - storage. Thus /appA/foo could then see changes made via - /appB/foo. The data do not cross user- or browser boundaries, - though, so it "might" arguably be called a - feature. storageKeyPrefix was added so that we can sandbox that - state for each separate app which shares an origin. - - See: https://fossil-scm.org/forum/forumpost/4afc4d34de - - Sidebar: it might seem odd to provide a key prefix and stick all - properties in the topmost level of the storage object. We do that - because adding a layer of object to sandbox each app would mean - (de)serializing that whole tree on every storage property change. - e.g. instead of storageObject.projectName.foo we have - storageObject[storageKeyPrefix+'foo']. That's soley for - efficiency's sake (in terms of battery life and - environment-internal storage-level effort). - */ - const storageKeyPrefix = ( - $storageHolder===$storage/*localStorage or sessionStorage*/ - ? ( - (NS.config ? - (NS.config.projectCode || NS.config.projectName - || NS.config.shortProjectName) - : false) - || window.location.pathname - )+'::' : ( - '' /* transient storage */ - ) - ); - - /** - A proxy for localStorage or sessionStorage or a - page-instance-local proxy, if neither one is availble. - - Which exact storage implementation is uses is unspecified, and - apps must not rely on it. - */ - NS.storage = { - storageKeyPrefix: storageKeyPrefix, - /** Sets the storage key k to value v, implicitly converting - it to a string. */ - set: (k,v)=>$storage.setItem(storageKeyPrefix+k,v), - /** Sets storage key k to JSON.stringify(v). */ - setJSON: (k,v)=>$storage.setItem(storageKeyPrefix+k,JSON.stringify(v)), - /** Returns the value for the given storage key, or - dflt if the key is not found in the storage. */ - get: (k,dflt)=>$storageHolder.hasOwnProperty( - storageKeyPrefix+k - ) ? $storage.getItem(storageKeyPrefix+k) : dflt, - /** Returns true if the given key has a value of "true". If the - key is not found, it returns true if the boolean value of dflt - is "true". (Remember that JS persistent storage values are all - strings.) */ - getBool: function(k,dflt){ - return 'true'===this.get(k,''+(!!dflt)); - }, - /** Returns the JSON.parse()'d value of the given - storage key's value, or dflt is the key is not - found or JSON.parse() fails. */ - getJSON: function f(k,dflt){ - try { - const x = this.get(k,f); - return x===f ? dflt : JSON.parse(x); - } - catch(e){return dflt} - }, - /** Returns true if the storage contains the given key, - else false. */ - contains: (k)=>$storageHolder.hasOwnProperty(storageKeyPrefix+k), - /** Removes the given key from the storage. Returns this. */ - remove: function(k){ - $storage.removeItem(storageKeyPrefix+k); - return this; - }, - /** Clears ALL keys from the storage. Returns this. */ - clear: function(){ - this.keys().forEach((k)=>$storage.removeItem(/*w/o prefix*/k)); - return this; - }, - /** Returns an array of all keys currently in the storage. */ - keys: ()=>Object.keys($storageHolder).filter((v)=>(v||'').startsWith(storageKeyPrefix)), - /** Returns true if this storage is transient (only available - until the page is reloaded), indicating that fileStorage - and sessionStorage are unavailable. */ - isTransient: ()=>$storageHolder!==$storage, - /** Returns a symbolic name for the current storage mechanism. */ - storageImplName: function(){ - if($storage===window.localStorage) return 'localStorage'; - else if($storage===window.sessionStorage) return 'sessionStorage'; - else return 'transient'; - }, - - /** - Returns a brief help text string for the currently-selected - storage type. - */ - storageHelpDescription: function(){ - return { - localStorage: "Browser-local persistent storage with an "+ - "unspecified long-term lifetime (survives closing the browser, "+ - "but maybe not a browser upgrade).", - sessionStorage: "Storage local to this browser tab, "+ - "lost if this tab is closed.", - "transient": "Transient storage local to this invocation of this page." - }[this.storageImplName()]; - } - }; - return NS.storage; - })({})/*storage API setup*/; - - - /** Name of the stored copy of SqliteFiddle.config. */ - const configStorageKey = 'sqlite3-fiddle-config'; - - /** - The SqliteFiddle object is intended to be the primary - app-level object for the main-thread side of the sqlite - fiddle application. It uses a worker thread to load the - sqlite WASM module and communicate with it. - */ - const SF/*local convenience alias*/ - = window.SqliteFiddle/*canonical name*/ = { - /* Config options. */ - config: { + 'use strict'; + /* Recall that the 'self' symbol, except where locally + overwritten, refers to the global window or worker object. */ + + const storage = (function(NS/*namespace object in which to store this module*/){ + /* Pedantic licensing note: this code originated in the Fossil SCM + source tree, where it has a different license, but the person who + ported it into sqlite is the same one who wrote it for fossil. */ + 'use strict'; + NS = NS||{}; + + /** + This module provides a basic wrapper around localStorage + or sessionStorage or a dummy proxy object if neither + of those are available. + */ + const tryStorage = function f(obj){ + if(!f.key) f.key = 'storage.access.check'; + try{ + obj.setItem(f.key, 'f'); + const x = obj.getItem(f.key); + obj.removeItem(f.key); + if(x!=='f') throw new Error(f.key+" failed") + return obj; + }catch(e){ + return undefined; + } + }; + + /** Internal storage impl for this module. */ + const $storage = + tryStorage(window.localStorage) + || tryStorage(window.sessionStorage) + || tryStorage({ + // A basic dummy xyzStorage stand-in + $$$:{}, + setItem: function(k,v){this.$$$[k]=v}, + getItem: function(k){ + return this.$$$.hasOwnProperty(k) ? this.$$$[k] : undefined; + }, + removeItem: function(k){delete this.$$$[k]}, + clear: function(){this.$$$={}} + }); + + /** + For the dummy storage we need to differentiate between + $storage and its real property storage for hasOwnProperty() + to work properly... + */ + const $storageHolder = $storage.hasOwnProperty('$$$') ? $storage.$$$ : $storage; + + /** + A prefix which gets internally applied to all storage module + property keys so that localStorage and sessionStorage across the + same browser profile instance do not "leak" across multiple apps + being hosted by the same origin server. Such cross-polination is + still there but, with this key prefix applied, it won't be + immediately visible via the storage API. + + With this in place we can justify using localStorage instead of + sessionStorage. + + One implication of using localStorage and sessionStorage is that + their scope (the same "origin" and client application/profile) + allows multiple apps on the same origin to use the same + storage. Thus /appA/foo could then see changes made via + /appB/foo. The data do not cross user- or browser boundaries, + though, so it "might" arguably be called a + feature. storageKeyPrefix was added so that we can sandbox that + state for each separate app which shares an origin. + + See: https://fossil-scm.org/forum/forumpost/4afc4d34de + + Sidebar: it might seem odd to provide a key prefix and stick all + properties in the topmost level of the storage object. We do that + because adding a layer of object to sandbox each app would mean + (de)serializing that whole tree on every storage property change. + e.g. instead of storageObject.projectName.foo we have + storageObject[storageKeyPrefix+'foo']. That's soley for + efficiency's sake (in terms of battery life and + environment-internal storage-level effort). + */ + const storageKeyPrefix = ( + $storageHolder===$storage/*localStorage or sessionStorage*/ + ? ( + (NS.config ? + (NS.config.projectCode || NS.config.projectName + || NS.config.shortProjectName) + : false) + || window.location.pathname + )+'::' : ( + '' /* transient storage */ + ) + ); + + /** + A proxy for localStorage or sessionStorage or a + page-instance-local proxy, if neither one is availble. + + Which exact storage implementation is uses is unspecified, and + apps must not rely on it. + */ + NS.storage = { + storageKeyPrefix: storageKeyPrefix, + /** Sets the storage key k to value v, implicitly converting + it to a string. */ + set: (k,v)=>$storage.setItem(storageKeyPrefix+k,v), + /** Sets storage key k to JSON.stringify(v). */ + setJSON: (k,v)=>$storage.setItem(storageKeyPrefix+k,JSON.stringify(v)), + /** Returns the value for the given storage key, or + dflt if the key is not found in the storage. */ + get: (k,dflt)=>$storageHolder.hasOwnProperty( + storageKeyPrefix+k + ) ? $storage.getItem(storageKeyPrefix+k) : dflt, + /** Returns true if the given key has a value of "true". If the + key is not found, it returns true if the boolean value of dflt + is "true". (Remember that JS persistent storage values are all + strings.) */ + getBool: function(k,dflt){ + return 'true'===this.get(k,''+(!!dflt)); + }, + /** Returns the JSON.parse()'d value of the given + storage key's value, or dflt is the key is not + found or JSON.parse() fails. */ + getJSON: function f(k,dflt){ + try { + const x = this.get(k,f); + return x===f ? dflt : JSON.parse(x); + } + catch(e){return dflt} + }, + /** Returns true if the storage contains the given key, + else false. */ + contains: (k)=>$storageHolder.hasOwnProperty(storageKeyPrefix+k), + /** Removes the given key from the storage. Returns this. */ + remove: function(k){ + $storage.removeItem(storageKeyPrefix+k); + return this; + }, + /** Clears ALL keys from the storage. Returns this. */ + clear: function(){ + this.keys().forEach((k)=>$storage.removeItem(/*w/o prefix*/k)); + return this; + }, + /** Returns an array of all keys currently in the storage. */ + keys: ()=>Object.keys($storageHolder).filter((v)=>(v||'').startsWith(storageKeyPrefix)), + /** Returns true if this storage is transient (only available + until the page is reloaded), indicating that fileStorage + and sessionStorage are unavailable. */ + isTransient: ()=>$storageHolder!==$storage, + /** Returns a symbolic name for the current storage mechanism. */ + storageImplName: function(){ + if($storage===window.localStorage) return 'localStorage'; + else if($storage===window.sessionStorage) return 'sessionStorage'; + else return 'transient'; + }, + + /** + Returns a brief help text string for the currently-selected + storage type. + */ + storageHelpDescription: function(){ + return { + localStorage: "Browser-local persistent storage with an "+ + "unspecified long-term lifetime (survives closing the browser, "+ + "but maybe not a browser upgrade).", + sessionStorage: "Storage local to this browser tab, "+ + "lost if this tab is closed.", + "transient": "Transient storage local to this invocation of this page." + }[this.storageImplName()]; + } + }; + return NS.storage; + })({})/*storage API setup*/; + + + /** Name of the stored copy of SqliteFiddle.config. */ + const configStorageKey = 'sqlite3-fiddle-config'; + + /** + The SqliteFiddle object is intended to be the primary + app-level object for the main-thread side of the sqlite + fiddle application. It uses a worker thread to load the + sqlite WASM module and communicate with it. + */ + const SF/*local convenience alias*/ + = window.SqliteFiddle/*canonical name*/ = { + /* Config options. */ + config: { /* If true, SqliteFiddle.echo() will auto-scroll the output widget to the bottom when it receives output, else it won't. */ autoScrollOutput: true, /* If true, the output area will be cleared before each @@ -217,593 +217,599 @@ echoToConsole: false, /* If true, display input/output areas side-by-side. */ sideBySide: true, /* If true, swap positions of the input/output areas. */ swapInOut: false - }, - /** - Emits the given text, followed by a line break, to the - output widget. If given more than one argument, they are - join()'d together with a space between each. As a special - case, if passed a single array, that array is used in place - of the arguments array (this is to facilitate receiving - lists of arguments via worker events). - */ - echo: function f(text) { + }, + /** + Emits the given text, followed by a line break, to the + output widget. If given more than one argument, they are + join()'d together with a space between each. As a special + case, if passed a single array, that array is used in place + of the arguments array (this is to facilitate receiving + lists of arguments via worker events). + */ + echo: function f(text) { /* Maintenance reminder: we currently require/expect a textarea output element. It might be nice to extend this to behave differently if the output element is a non-textarea element, in which case it would need to append the given text as a TEXT node and add a line break. */ if(!f._){ - f._ = document.getElementById('output'); - f._.value = ''; // clear browser cache + f._ = document.getElementById('output'); + f._.value = ''; // clear browser cache } if(arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' '); else if(1===arguments.length && Array.isArray(text)) text = text.join(' '); // These replacements are necessary if you render to raw HTML //text = text.replace(/&/g, "&"); //text = text.replace(//g, ">"); //text = text.replace('\n', '
', 'g'); if(null===text){/*special case: clear output*/ - f._.value = ''; - return; + f._.value = ''; + return; }else if(this.echo._clearPending){ - delete this.echo._clearPending; - f._.value = ''; + delete this.echo._clearPending; + f._.value = ''; } if(this.config.echoToConsole) console.log(text); if(this.jqTerm) this.jqTerm.echo(text); f._.value += text + "\n"; if(this.config.autoScrollOutput){ - f._.scrollTop = f._.scrollHeight; + f._.scrollTop = f._.scrollHeight; } - }, - _msgMap: {}, - /** Adds a worker message handler for messages of the given - type. */ - addMsgHandler: function f(type,callback){ + }, + _msgMap: {}, + /** Adds a worker message handler for messages of the given + type. */ + addMsgHandler: function f(type,callback){ if(Array.isArray(type)){ - type.forEach((t)=>this.addMsgHandler(t, callback)); - return this; + type.forEach((t)=>this.addMsgHandler(t, callback)); + return this; } (this._msgMap.hasOwnProperty(type) ? this._msgMap[type] : (this._msgMap[type] = [])).push(callback); return this; - }, - /** Given a worker message, runs all handlers for msg.type. */ - runMsgHandlers: function(msg){ + }, + /** Given a worker message, runs all handlers for msg.type. */ + runMsgHandlers: function(msg){ const list = (this._msgMap.hasOwnProperty(msg.type) ? this._msgMap[msg.type] : false); if(!list){ - console.warn("No handlers found for message type:",msg); - return false; + console.warn("No handlers found for message type:",msg); + return false; } //console.debug("runMsgHandlers",msg); list.forEach((f)=>f(msg)); return true; - }, - /** Removes all message handlers for the given message type. */ - clearMsgHandlers: function(type){ - delete this._msgMap[type]; - return this; - }, - /* Posts a message in the form {type, data} to the db worker. Returns this. */ - wMsg: function(type,data){ - this.worker.postMessage({type, data}); - return this; - }, - /** - Prompts for confirmation and, if accepted, deletes - all content and tables in the (transient) database. - */ - resetDb: function(){ - if(window.confirm("Really destroy all content and tables " - +"in the (transient) db?")){ - this.wMsg('db-reset'); - } - return this; - }, - /** Stores this object's config in the browser's storage. */ - storeConfig: function(){ - storage.setJSON(configStorageKey,this.config); - } - }; - - if(1){ /* Restore SF.config */ - const storedConfig = storage.getJSON(configStorageKey); - if(storedConfig){ - /* Copy all properties to SF.config which are currently in - storedConfig. We don't bother copying any other - properties: those have been removed from the app in the - meantime. */ - Object.keys(SF.config).forEach(function(k){ - if(storedConfig.hasOwnProperty(k)){ - SF.config[k] = storedConfig[k]; - } - }); - } - } - - SF.worker = new Worker('fiddle-worker.js'); - SF.worker.onmessage = (ev)=>SF.runMsgHandlers(ev.data); - SF.addMsgHandler(['stdout', 'stderr'], (ev)=>SF.echo(ev.data)); - - /* querySelectorAll() proxy */ - const EAll = function(/*[element=document,] cssSelector*/){ - return (arguments.length>1 ? arguments[0] : document) - .querySelectorAll(arguments[arguments.length-1]); - }; - /* querySelector() proxy */ - const E = function(/*[element=document,] cssSelector*/){ - return (arguments.length>1 ? arguments[0] : document) - .querySelector(arguments[arguments.length-1]); - }; - - /** Handles status updates from the Module object. */ - SF.addMsgHandler('module', function f(ev){ - ev = ev.data; - if('status'!==ev.type){ - console.warn("Unexpected module-type message:",ev); - return; - } - if(!f.ui){ - f.ui = { - status: E('#module-status'), - progress: E('#module-progress'), - spinner: E('#module-spinner') - }; - } - const msg = ev.data; - if(f.ui.progres){ - progress.value = msg.step; - progress.max = msg.step + 1/*we don't know how many steps to expect*/; - } - if(1==msg.step){ - f.ui.progress.classList.remove('hidden'); - f.ui.spinner.classList.remove('hidden'); - } - if(msg.text){ - f.ui.status.classList.remove('hidden'); - f.ui.status.innerText = msg.text; - }else{ - if(f.ui.progress){ - f.ui.progress.remove(); - f.ui.spinner.remove(); - delete f.ui.progress; - delete f.ui.spinner; - } - f.ui.status.classList.add('hidden'); - /* The module can post messages about fatal problems, - e.g. an exit() being triggered or assertion failure, - after the last "load" message has arrived, so - leave f.ui.status and message listener intact. */ - } - }); - - /** - The 'fiddle-ready' event is fired (with no payload) when the - wasm module has finished loading. Interestingly, that happens - _before_ the final module:status event */ - SF.addMsgHandler('fiddle-ready', function(){ - SF.clearMsgHandlers('fiddle-ready'); - self.onSFLoaded(); - }); - - /** - Performs all app initialization which must wait until after the - worker module is loaded. This function removes itself when it's - called. - */ - self.onSFLoaded = function(){ - delete this.onSFLoaded; - // Unhide all elements which start out hidden - EAll('.initially-hidden').forEach((e)=>e.classList.remove('initially-hidden')); - E('#btn-reset').addEventListener('click',()=>SF.resetDb()); - const taInput = E('#input'); - const btnClearIn = E('#btn-clear'); - btnClearIn.addEventListener('click',function(){ - taInput.value = ''; - },false); - // Ctrl-enter and shift-enter both run the current SQL. - taInput.addEventListener('keydown',function(ev){ - if((ev.ctrlKey || ev.shiftKey) && 13 === ev.keyCode){ - ev.preventDefault(); - ev.stopPropagation(); - btnShellExec.click(); - } - }, false); - const taOutput = E('#output'); - const btnClearOut = E('#btn-clear-output'); - btnClearOut.addEventListener('click',function(){ - taOutput.value = ''; - if(SF.jqTerm) SF.jqTerm.clear(); - },false); - const btnShellExec = E('#btn-shell-exec'); - btnShellExec.addEventListener('click',function(ev){ - let sql; - ev.preventDefault(); - if(taInput.selectionStart e.addEventListener('click', cmdClick, false) - ); - - btnInterrupt.addEventListener('click',function(){ - SF.wMsg('interrupt'); - }); - - /** Initiate a download of the db. */ - const btnExport = E('#btn-export'); - const eLoadDb = E('#load-db'); - const btnLoadDb = E('#btn-load-db'); - btnLoadDb.addEventListener('click', ()=>eLoadDb.click()); - /** - Enables (if passed true) or disables all UI elements which - "might," if timed "just right," interfere with an - in-progress db import/export/exec operation. - */ - const enableMutatingElements = function f(enable){ - if(!f._elems){ - f._elems = [ - /* UI elements to disable while import/export are - running. Normally the export is fast enough - that this won't matter, but we really don't - want to be reading (from outside of sqlite) the - db when the user taps btnShellExec. */ - btnShellExec, btnExport, eLoadDb - ]; - } - f._elems.forEach( enable - ? (e)=>e.removeAttribute('disabled') - : (e)=>e.setAttribute('disabled','disabled') ); - }; - btnExport.addEventListener('click',function(){ - enableMutatingElements(false); - SF.wMsg('db-export'); - }); - SF.addMsgHandler('db-export', function(ev){ - enableMutatingElements(true); - ev = ev.data; - if(ev.error){ - SF.echo("Export failed:",ev.error); - return; - } - const blob = new Blob([ev.buffer], {type:"application/x-sqlite3"}); - const a = document.createElement('a'); - document.body.appendChild(a); - a.href = window.URL.createObjectURL(blob); - a.download = ev.filename; - a.addEventListener('click',function(){ - setTimeout(function(){ - SF.echo("Exported (possibly auto-downloaded):",ev.filename); - window.URL.revokeObjectURL(a.href); - a.remove(); - },500); - }); - a.click(); - }); - /** - Handle load/import of an external db file. - */ - eLoadDb.addEventListener('change',function(){ - const f = this.files[0]; - const r = new FileReader(); - const status = {loaded: 0, total: 0}; - enableMutatingElements(false); - r.addEventListener('loadstart', function(){ - SF.echo("Loading",f.name,"..."); - }); - r.addEventListener('progress', function(ev){ - SF.echo("Loading progress:",ev.loaded,"of",ev.total,"bytes."); - }); - const that = this; - r.addEventListener('load', function(){ - enableMutatingElements(true); - SF.echo("Loaded",f.name+". Opening db..."); - SF.wMsg('open',{ - filename: f.name, - buffer: this.result - }); - }); - r.addEventListener('error',function(){ - enableMutatingElements(true); - SF.echo("Loading",f.name,"failed for unknown reasons."); - }); - r.addEventListener('abort',function(){ - enableMutatingElements(true); - SF.echo("Cancelled loading of",f.name+"."); - }); - r.readAsArrayBuffer(f); - }); - - EAll('fieldset.collapsible').forEach(function(fs){ - const btnToggle = E(fs,'legend > .fieldset-toggle'), - content = EAll(fs,':scope > div'); - btnToggle.addEventListener('click', function(){ - fs.classList.toggle('collapsed'); - content.forEach((d)=>d.classList.toggle('hidden')); - }, false); - }); - - /** - Given a DOM element, this routine measures its "effective - height", which is the bounding top/bottom range of this element - and all of its children, recursively. For some DOM structure - cases, a parent may have a reported height of 0 even though - children have non-0 sizes. - - Returns 0 if !e or if the element really has no height. - */ - const effectiveHeight = function f(e){ - if(!e) return 0; - if(!f.measure){ - f.measure = function callee(e, depth){ - if(!e) return; - const m = e.getBoundingClientRect(); - if(0===depth){ - callee.top = m.top; - callee.bottom = m.bottom; - }else{ - callee.top = m.top ? Math.min(callee.top, m.top) : callee.top; - callee.bottom = Math.max(callee.bottom, m.bottom); - } - Array.prototype.forEach.call(e.children,(e)=>callee(e,depth+1)); - if(0===depth){ - //console.debug("measure() height:",e.className, callee.top, callee.bottom, (callee.bottom - callee.top)); - f.extra += callee.bottom - callee.top; - } - return f.extra; - }; - } - f.extra = 0; - f.measure(e,0); - return f.extra; - }; - - /** - Returns a function, that, as long as it continues to be invoked, - will not be triggered. The function will be called after it stops - being called for N milliseconds. If `immediate` is passed, call - the callback immediately and hinder future invocations until at - least the given time has passed. - - If passed only 1 argument, or passed a falsy 2nd argument, - the default wait time set in this function's $defaultDelay - property is used. - - Source: underscore.js, by way of https://davidwalsh.name/javascript-debounce-function - */ - const debounce = function f(func, wait, immediate) { - var timeout; - if(!wait) wait = f.$defaultDelay; - return function() { - const context = this, args = Array.prototype.slice.call(arguments); - const later = function() { - timeout = undefined; - if(!immediate) func.apply(context, args); - }; - const callNow = immediate && !timeout; - clearTimeout(timeout); - timeout = setTimeout(later, wait); - if(callNow) func.apply(context, args); - }; - }; - debounce.$defaultDelay = 500 /*arbitrary*/; - - const ForceResizeKludge = (function(){ - /* Workaround for Safari mayhem regarding use of vh CSS - units.... We cannot use vh units to set the main view - size because Safari chokes on that, so we calculate - that height here. Larger than ~95% is too big for - Firefox on Android, causing the input area to move - off-screen. */ - const appViews = EAll('.app-view'); - const elemsToCount = [ - /* Elements which we need to always count in the - visible body size. */ - E('body > header'), - E('body > footer') - ]; - const resized = function f(){ - if(f.$disabled) return; - const wh = window.innerHeight; - var ht; - var extra = 0; - elemsToCount.forEach((e)=>e ? extra += effectiveHeight(e) : false); - ht = wh - extra; - appViews.forEach(function(e){ - e.style.height = - e.style.maxHeight = [ - "calc(", (ht>=100 ? ht : 100), "px", - " - 2em"/*fudge value*/,")" - /* ^^^^ hypothetically not needed, but both - Chrome/FF on Linux will force scrollbars on the - body if this value is too small. */ - ].join(''); - }); - }; - resized.$disabled = true/*gets deleted when setup is finished*/; - window.addEventListener('resize', debounce(resized, 250), false); - return resized; - })(); - - /** Set up a selection list of examples */ - (function(){ - const xElem = E('#select-examples'); - const examples = [ - {name: "Help", sql: -`-- ================================================ --- Use ctrl-enter or shift-enter to execute sqlite3 --- shell commands and SQL. --- If a subset of the text is currently selected, --- only that part is executed. --- ================================================ -.help`}, - {name: "Timer on", sql: ".timer on"}, - {name: "Setup table T", sql:`.nullvalue NULL -CREATE TABLE t(a,b); -INSERT INTO t(a,b) VALUES('abc',123),('def',456),(NULL,789),('ghi',012); -SELECT * FROM t;`}, - {name: "Table list", sql: ".tables"}, - {name: "Box Mode", sql: ".mode box"}, - {name: "JSON Mode", sql: ".mode json"}, - {name: "Mandlebrot", sql: `WITH RECURSIVE - xaxis(x) AS (VALUES(-2.0) UNION ALL SELECT x+0.05 FROM xaxis WHERE x<1.2), - yaxis(y) AS (VALUES(-1.0) UNION ALL SELECT y+0.1 FROM yaxis WHERE y<1.0), - m(iter, cx, cy, x, y) AS ( - SELECT 0, x, y, 0.0, 0.0 FROM xaxis, yaxis - UNION ALL - SELECT iter+1, cx, cy, x*x-y*y + cx, 2.0*x*y + cy FROM m - WHERE (x*x + y*y) < 4.0 AND iter<28 - ), - m2(iter, cx, cy) AS ( - SELECT max(iter), cx, cy FROM m GROUP BY cx, cy - ), - a(t) AS ( - SELECT group_concat( substr(' .+*#', 1+min(iter/7,4), 1), '') - FROM m2 GROUP BY cy - ) -SELECT group_concat(rtrim(t),x'0a') as Mandelbrot FROM a;`} - ]; - const newOpt = function(lbl,val){ - const o = document.createElement('option'); - o.value = val; - if(!val) o.setAttribute('disabled',true); - o.appendChild(document.createTextNode(lbl)); - xElem.appendChild(o); - }; - newOpt("Examples (replaces input!)"); - examples.forEach((o)=>newOpt(o.name, o.sql)); - //xElem.setAttribute('disabled',true); - xElem.selectedIndex = 0; - xElem.addEventListener('change', function(){ - taInput.value = '-- ' + - this.selectedOptions[0].innerText + - '\n' + this.value; - SF.dbExec(this.value); - }); - })()/* example queries */; - - SF.echo(null/*clear any output generated by the init process*/); - if(window.jQuery && window.jQuery.terminal){ - /* Set up the terminal-style view... */ - const eTerm = window.jQuery('#view-terminal').empty(); - SF.jqTerm = eTerm.terminal(SF.dbExec.bind(SF),{ - prompt: 'sqlite> ', - greetings: false /* note that the docs incorrectly call this 'greeting' */ - }); - /* Set up a button to toggle the views... */ - const head = E('header#titlebar'); - const btnToggleView = document.createElement('button'); - btnToggleView.appendChild(document.createTextNode("Toggle View")); - head.appendChild(btnToggleView); - btnToggleView.addEventListener('click',function f(){ - EAll('.app-view').forEach(e=>e.classList.toggle('hidden')); - if(document.body.classList.toggle('terminal-mode')){ - ForceResizeKludge(); - } - }, false); - btnToggleView.click()/*default to terminal view*/; - } - SF.dbExec(null/*init the db and output the header*/); - SF.echo('This experimental app is provided in the hope that it', - 'may prove interesting or useful but is not an officially', - 'supported deliverable of the sqlite project. It is subject to', - 'any number of changes or outright removal at any time.\n'); - delete ForceResizeKludge.$disabled; - ForceResizeKludge(); - - btnShellExec.click(); - }/*onSFLoaded()*/; + }, + /** Removes all message handlers for the given message type. */ + clearMsgHandlers: function(type){ + delete this._msgMap[type]; + return this; + }, + /* Posts a message in the form {type, data} to the db worker. Returns this. */ + wMsg: function(type,data,transferables){ + this.worker.postMessage({type, data}, transferables || []); + return this; + }, + /** + Prompts for confirmation and, if accepted, deletes + all content and tables in the (transient) database. + */ + resetDb: function(){ + if(window.confirm("Really destroy all content and tables " + +"in the (transient) db?")){ + this.wMsg('db-reset'); + } + return this; + }, + /** Stores this object's config in the browser's storage. */ + storeConfig: function(){ + storage.setJSON(configStorageKey,this.config); + } + }; + + if(1){ /* Restore SF.config */ + const storedConfig = storage.getJSON(configStorageKey); + if(storedConfig){ + /* Copy all properties to SF.config which are currently in + storedConfig. We don't bother copying any other + properties: those have been removed from the app in the + meantime. */ + Object.keys(SF.config).forEach(function(k){ + if(storedConfig.hasOwnProperty(k)){ + SF.config[k] = storedConfig[k]; + } + }); + } + } + + SF.worker = new Worker('fiddle-worker.js'+self.location.search); + SF.worker.onmessage = (ev)=>SF.runMsgHandlers(ev.data); + SF.addMsgHandler(['stdout', 'stderr'], (ev)=>SF.echo(ev.data)); + + /* querySelectorAll() proxy */ + const EAll = function(/*[element=document,] cssSelector*/){ + return (arguments.length>1 ? arguments[0] : document) + .querySelectorAll(arguments[arguments.length-1]); + }; + /* querySelector() proxy */ + const E = function(/*[element=document,] cssSelector*/){ + return (arguments.length>1 ? arguments[0] : document) + .querySelector(arguments[arguments.length-1]); + }; + + /** Handles status updates from the Emscripten Module object. */ + SF.addMsgHandler('module', function f(ev){ + ev = ev.data; + if('status'!==ev.type){ + console.warn("Unexpected module-type message:",ev); + return; + } + if(!f.ui){ + f.ui = { + status: E('#module-status'), + progress: E('#module-progress'), + spinner: E('#module-spinner') + }; + } + const msg = ev.data; + if(f.ui.progres){ + progress.value = msg.step; + progress.max = msg.step + 1/*we don't know how many steps to expect*/; + } + if(1==msg.step){ + f.ui.progress.classList.remove('hidden'); + f.ui.spinner.classList.remove('hidden'); + } + if(msg.text){ + f.ui.status.classList.remove('hidden'); + f.ui.status.innerText = msg.text; + }else{ + if(f.ui.progress){ + f.ui.progress.remove(); + f.ui.spinner.remove(); + delete f.ui.progress; + delete f.ui.spinner; + } + f.ui.status.classList.add('hidden'); + /* The module can post messages about fatal problems, + e.g. an exit() being triggered or assertion failure, + after the last "load" message has arrived, so + leave f.ui.status and message listener intact. */ + } + }); + + /** + The 'fiddle-ready' event is fired (with no payload) when the + wasm module has finished loading. Interestingly, that happens + _before_ the final module:status event */ + SF.addMsgHandler('fiddle-ready', function(){ + SF.clearMsgHandlers('fiddle-ready'); + self.onSFLoaded(); + }); + + /** + Performs all app initialization which must wait until after the + worker module is loaded. This function removes itself when it's + called. + */ + self.onSFLoaded = function(){ + delete this.onSFLoaded; + // Unhide all elements which start out hidden + EAll('.initially-hidden').forEach((e)=>e.classList.remove('initially-hidden')); + E('#btn-reset').addEventListener('click',()=>SF.resetDb()); + const taInput = E('#input'); + const btnClearIn = E('#btn-clear'); + btnClearIn.addEventListener('click',function(){ + taInput.value = ''; + },false); + // Ctrl-enter and shift-enter both run the current SQL. + taInput.addEventListener('keydown',function(ev){ + if((ev.ctrlKey || ev.shiftKey) && 13 === ev.keyCode){ + ev.preventDefault(); + ev.stopPropagation(); + btnShellExec.click(); + } + }, false); + const taOutput = E('#output'); + const btnClearOut = E('#btn-clear-output'); + btnClearOut.addEventListener('click',function(){ + taOutput.value = ''; + if(SF.jqTerm) SF.jqTerm.clear(); + },false); + const btnShellExec = E('#btn-shell-exec'); + btnShellExec.addEventListener('click',function(ev){ + let sql; + ev.preventDefault(); + if(taInput.selectionStart e.addEventListener('click', cmdClick, false) + ); + + btnInterrupt.addEventListener('click',function(){ + SF.wMsg('interrupt'); + }); + + /** Initiate a download of the db. */ + const btnExport = E('#btn-export'); + const eLoadDb = E('#load-db'); + const btnLoadDb = E('#btn-load-db'); + btnLoadDb.addEventListener('click', ()=>eLoadDb.click()); + /** + Enables (if passed true) or disables all UI elements which + "might," if timed "just right," interfere with an + in-progress db import/export/exec operation. + */ + const enableMutatingElements = function f(enable){ + if(!f._elems){ + f._elems = [ + /* UI elements to disable while import/export are + running. Normally the export is fast enough + that this won't matter, but we really don't + want to be reading (from outside of sqlite) the + db when the user taps btnShellExec. */ + btnShellExec, btnExport, eLoadDb + ]; + } + f._elems.forEach( enable + ? (e)=>e.removeAttribute('disabled') + : (e)=>e.setAttribute('disabled','disabled') ); + }; + btnExport.addEventListener('click',function(){ + enableMutatingElements(false); + SF.wMsg('db-export'); + }); + SF.addMsgHandler('db-export', function(ev){ + enableMutatingElements(true); + ev = ev.data; + if(ev.error){ + SF.echo("Export failed:",ev.error); + return; + } + const blob = new Blob([ev.buffer], + {type:"application/x-sqlite3"}); + const a = document.createElement('a'); + document.body.appendChild(a); + a.href = window.URL.createObjectURL(blob); + a.download = ev.filename; + a.addEventListener('click',function(){ + setTimeout(function(){ + SF.echo("Exported (possibly auto-downloaded):",ev.filename); + window.URL.revokeObjectURL(a.href); + a.remove(); + },500); + }); + a.click(); + }); + /** + Handle load/import of an external db file. + */ + eLoadDb.addEventListener('change',function(){ + const f = this.files[0]; + const r = new FileReader(); + const status = {loaded: 0, total: 0}; + enableMutatingElements(false); + r.addEventListener('loadstart', function(){ + SF.echo("Loading",f.name,"..."); + }); + r.addEventListener('progress', function(ev){ + SF.echo("Loading progress:",ev.loaded,"of",ev.total,"bytes."); + }); + const that = this; + r.addEventListener('load', function(){ + enableMutatingElements(true); + SF.echo("Loaded",f.name+". Opening db..."); + SF.wMsg('open',{ + filename: f.name, + buffer: this.result + }, [this.result]); + }); + r.addEventListener('error',function(){ + enableMutatingElements(true); + SF.echo("Loading",f.name,"failed for unknown reasons."); + }); + r.addEventListener('abort',function(){ + enableMutatingElements(true); + SF.echo("Cancelled loading of",f.name+"."); + }); + r.readAsArrayBuffer(f); + }); + + EAll('fieldset.collapsible').forEach(function(fs){ + const btnToggle = E(fs,'legend > .fieldset-toggle'), + content = EAll(fs,':scope > div'); + btnToggle.addEventListener('click', function(){ + fs.classList.toggle('collapsed'); + content.forEach((d)=>d.classList.toggle('hidden')); + }, false); + }); + + /** + Given a DOM element, this routine measures its "effective + height", which is the bounding top/bottom range of this element + and all of its children, recursively. For some DOM structure + cases, a parent may have a reported height of 0 even though + children have non-0 sizes. + + Returns 0 if !e or if the element really has no height. + */ + const effectiveHeight = function f(e){ + if(!e) return 0; + if(!f.measure){ + f.measure = function callee(e, depth){ + if(!e) return; + const m = e.getBoundingClientRect(); + if(0===depth){ + callee.top = m.top; + callee.bottom = m.bottom; + }else{ + callee.top = m.top ? Math.min(callee.top, m.top) : callee.top; + callee.bottom = Math.max(callee.bottom, m.bottom); + } + Array.prototype.forEach.call(e.children,(e)=>callee(e,depth+1)); + if(0===depth){ + //console.debug("measure() height:",e.className, callee.top, callee.bottom, (callee.bottom - callee.top)); + f.extra += callee.bottom - callee.top; + } + return f.extra; + }; + } + f.extra = 0; + f.measure(e,0); + return f.extra; + }; + + /** + Returns a function, that, as long as it continues to be invoked, + will not be triggered. The function will be called after it stops + being called for N milliseconds. If `immediate` is passed, call + the callback immediately and hinder future invocations until at + least the given time has passed. + + If passed only 1 argument, or passed a falsy 2nd argument, + the default wait time set in this function's $defaultDelay + property is used. + + Source: underscore.js, by way of https://davidwalsh.name/javascript-debounce-function + */ + const debounce = function f(func, wait, immediate) { + var timeout; + if(!wait) wait = f.$defaultDelay; + return function() { + const context = this, args = Array.prototype.slice.call(arguments); + const later = function() { + timeout = undefined; + if(!immediate) func.apply(context, args); + }; + const callNow = immediate && !timeout; + clearTimeout(timeout); + timeout = setTimeout(later, wait); + if(callNow) func.apply(context, args); + }; + }; + debounce.$defaultDelay = 500 /*arbitrary*/; + + const ForceResizeKludge = (function(){ + /* Workaround for Safari mayhem regarding use of vh CSS + units.... We cannot use vh units to set the main view + size because Safari chokes on that, so we calculate + that height here. Larger than ~95% is too big for + Firefox on Android, causing the input area to move + off-screen. */ + const appViews = EAll('.app-view'); + const elemsToCount = [ + /* Elements which we need to always count in the + visible body size. */ + E('body > header'), + E('body > footer') + ]; + const resized = function f(){ + if(f.$disabled) return; + const wh = window.innerHeight; + var ht; + var extra = 0; + elemsToCount.forEach((e)=>e ? extra += effectiveHeight(e) : false); + ht = wh - extra; + appViews.forEach(function(e){ + e.style.height = + e.style.maxHeight = [ + "calc(", (ht>=100 ? ht : 100), "px", + " - 2em"/*fudge value*/,")" + /* ^^^^ hypothetically not needed, but both + Chrome/FF on Linux will force scrollbars on the + body if this value is too small. */ + ].join(''); + }); + }; + resized.$disabled = true/*gets deleted when setup is finished*/; + window.addEventListener('resize', debounce(resized, 250), false); + return resized; + })(); + + /** Set up a selection list of examples */ + (function(){ + const xElem = E('#select-examples'); + const examples = [ + {name: "Help", sql: [ + "-- ================================================\n", + "-- Use ctrl-enter or shift-enter to execute sqlite3\n", + "-- shell commands and SQL.\n", + "-- If a subset of the text is currently selected,\n", + "-- only that part is executed.\n", + "-- ================================================\n", + ".help\n" + ]}, + //{name: "Timer on", sql: ".timer on"}, + // ^^^ re-enable if emscripten re-enables getrusage() + {name: "Setup table T", sql:[ + ".nullvalue NULL\n", + "CREATE TABLE t(a,b);\n", + "INSERT INTO t(a,b) VALUES('abc',123),('def',456),(NULL,789),('ghi',012);\n", + "SELECT * FROM t;\n" + ]}, + {name: "Table list", sql: ".tables"}, + {name: "Box Mode", sql: ".mode box"}, + {name: "JSON Mode", sql: ".mode json"}, + {name: "Mandlebrot", sql:[ + "WITH RECURSIVE", + " xaxis(x) AS (VALUES(-2.0) UNION ALL SELECT x+0.05 FROM xaxis WHERE x<1.2),\n", + " yaxis(y) AS (VALUES(-1.0) UNION ALL SELECT y+0.1 FROM yaxis WHERE y<1.0),\n", + " m(iter, cx, cy, x, y) AS (\n", + " SELECT 0, x, y, 0.0, 0.0 FROM xaxis, yaxis\n", + " UNION ALL\n", + " SELECT iter+1, cx, cy, x*x-y*y + cx, 2.0*x*y + cy FROM m \n", + " WHERE (x*x + y*y) < 4.0 AND iter<28\n", + " ),\n", + " m2(iter, cx, cy) AS (\n", + " SELECT max(iter), cx, cy FROM m GROUP BY cx, cy\n", + " ),\n", + " a(t) AS (\n", + " SELECT group_concat( substr(' .+*#', 1+min(iter/7,4), 1), '') \n", + " FROM m2 GROUP BY cy\n", + " )\n", + "SELECT group_concat(rtrim(t),x'0a') as Mandelbrot FROM a;\n", + ]} + ]; + const newOpt = function(lbl,val){ + const o = document.createElement('option'); + if(Array.isArray(val)) val = val.join(''); + o.value = val; + if(!val) o.setAttribute('disabled',true); + o.appendChild(document.createTextNode(lbl)); + xElem.appendChild(o); + }; + newOpt("Examples (replaces input!)"); + examples.forEach((o)=>newOpt(o.name, o.sql)); + //xElem.setAttribute('disabled',true); + xElem.selectedIndex = 0; + xElem.addEventListener('change', function(){ + taInput.value = '-- ' + + this.selectedOptions[0].innerText + + '\n' + this.value; + SF.dbExec(this.value); + }); + })()/* example queries */; + + //SF.echo(null/*clear any output generated by the init process*/); + if(window.jQuery && window.jQuery.terminal){ + /* Set up the terminal-style view... */ + const eTerm = window.jQuery('#view-terminal').empty(); + SF.jqTerm = eTerm.terminal(SF.dbExec.bind(SF),{ + prompt: 'sqlite> ', + greetings: false /* note that the docs incorrectly call this 'greeting' */ + }); + /* Set up a button to toggle the views... */ + const head = E('header#titlebar'); + const btnToggleView = document.createElement('button'); + btnToggleView.appendChild(document.createTextNode("Toggle View")); + head.appendChild(btnToggleView); + btnToggleView.addEventListener('click',function f(){ + EAll('.app-view').forEach(e=>e.classList.toggle('hidden')); + if(document.body.classList.toggle('terminal-mode')){ + ForceResizeKludge(); + } + }, false); + btnToggleView.click()/*default to terminal view*/; + } + SF.echo('This experimental app is provided in the hope that it', + 'may prove interesting or useful but is not an officially', + 'supported deliverable of the sqlite project. It is subject to', + 'any number of changes or outright removal at any time.\n'); + const urlParams = new URL(self.location.href).searchParams; + SF.dbExec(urlParams.get('sql') || null); + delete ForceResizeKludge.$disabled; + ForceResizeKludge(); + }/*onSFLoaded()*/; })(); ADDED ext/wasm/index-dist.html Index: ext/wasm/index-dist.html ================================================================== --- /dev/null +++ ext/wasm/index-dist.html @@ -0,0 +1,90 @@ + + + + + + + sqlite3 WASM Demo Page Index + + + +
sqlite3 WASM demo pages
+
+
Below is the list of demo pages for the sqlite3 WASM + builds. The intent is that this page be run + using the functional equivalent of:
+
althttpd -enable-sab -page index.html
+
and the individual pages be started in their own tab. + Warnings and Caveats: +
    +
  • Some of these pages require that the web server emit the + so-called + COOP + and + COEP + headers. althttpd requires the + -enable-sab flag for that. +
  • +
+
+
The tests and demos... +
    +
  • Core-most tests +
      +
    • tester1: Core unit and + regression tests for the various APIs and surrounding + utility code.
    • +
    • tester1-worker: same thing + but running in a Worker.
    • +
    +
  • +
  • Higher-level apps and demos... +
      +
    • demo-123 provides a + no-nonsense example of adding sqlite3 support to a web + page in the UI thread.
    • +
    • demo-123-worker is + the same as demo-123 but loads and runs + sqlite3 from a Worker thread.
    • +
    • demo-jsstorage: very basic + demo of using the key-value VFS for storing a persistent db + in JS localStorage or sessionStorage.
    • +
    • demo-worker1: + Worker-based wrapper of the OO API #1. Its Promise-based + wrapper is significantly easier to use, however.
    • +
    • demo-worker1-promiser: + a demo of the Promise-based wrapper of the Worker1 API.
    • +
    +
  • +
+
+ + + + ADDED ext/wasm/index.html Index: ext/wasm/index.html ================================================================== --- /dev/null +++ ext/wasm/index.html @@ -0,0 +1,115 @@ + + + + + + + + sqlite3 WASM Testing Page Index + + +
sqlite3 WASM test pages
+
+
Below is the list of test pages for the sqlite3 WASM + builds. All of them require that this directory have been + "make"d first. The intent is that this page be run + using:
+
althttpd -enable-sab -page index.html
+
and the individual tests be started in their own tab. + Warnings and Caveats: +
    +
  • Some of these pages require that + the web server emit the so-called + COOP + and + COEP + headers. althttpd requires the + -enable-sab flag for that. +
  • +
  • Any OPFS-related pages require very recent version of + Chrome or Chromium (v102 at least, possibly newer). OPFS + support in the other major browsers is pending. Development + and testing is currently done against a dev-channel release + of Chrome (v107 as of 2022-09-26). +
  • +
  • Whether or not WASMFS/OPFS support is enabled on any given + page may depend on build-time options which are off by + default. +
  • +
+
+
The tests and demos... +
    +
  • Core-most tests +
      +
    • tester1: Core unit and + regression tests for the various APIs and surrounding + utility code.
    • +
    • tester1-worker: same thing + but running in a Worker.
    • +
    +
  • +
  • High-level apps and demos... +
      +
    • fiddle is an HTML front-end + to a wasm build of the sqlite3 shell.
    • +
    • demo-123 provides a + no-nonsense example of adding sqlite3 support to a web + page in the UI thread.
    • +
    • demo-123-worker is + the same as demo-123 but loads and runs + sqlite3 from a Worker thread.
    • +
    • demo-jsstorage: very basic + demo of using the key-value VFS for storing a persistent db + in JS localStorage or sessionStorage.
    • +
    • demo-worker1: + Worker-based wrapper of the OO API #1. Its Promise-based + wrapper is significantly easier to use, however.
    • +
    • demo-worker1-promiser: + a demo of the Promise-based wrapper of the Worker1 API.
    • +
    +
  • +
  • speedtest1 ports (sqlite3's primary benchmarking tool)... + +
  • +
  • The obligatory "misc." category... + +
  • + +
+
+ + + + Index: ext/wasm/jaccwabyt/jaccwabyt.js ================================================================== --- ext/wasm/jaccwabyt/jaccwabyt.js +++ ext/wasm/jaccwabyt/jaccwabyt.js @@ -359,11 +359,11 @@ Uses __lookupMember(obj.structInfo,memberName) to find a member, throwing if not found. Returns its signature, either in this framework's native format or in Emscripten format. */ const __memberSignature = function f(obj,memberName,emscriptenFormat=false){ - if(!f._) f._ = (x)=>x.replace(/[^vipPsjrd]/g,'').replace(/[pPs]/g,'i'); + if(!f._) f._ = (x)=>x.replace(/[^vipPsjrd]/g,"").replace(/[pPs]/g,'i'); const m = __lookupMember(obj.structInfo, memberName, true); return emscriptenFormat ? f._(m.signature) : m.signature; }; /** @@ -392,11 +392,21 @@ return a; }); const __utf8Decoder = new TextDecoder('utf-8'); const __utf8Encoder = new TextEncoder(); - + /** Internal helper to use in operations which need to distinguish + between SharedArrayBuffer heap memory and non-shared heap. */ + const __SAB = ('undefined'===typeof SharedArrayBuffer) + ? function(){} : SharedArrayBuffer; + const __utf8Decode = function(arrayBuffer, begin, end){ + return __utf8Decoder.decode( + (arrayBuffer.buffer instanceof __SAB) + ? arrayBuffer.slice(begin, end) + : arrayBuffer.subarray(begin, end) + ); + }; /** Uses __lookupMember() to find the given obj.structInfo key. Returns that member if it is a string, else returns false. If the member is not found, throws if tossIfNotFound is true, else returns false. @@ -435,12 +445,11 @@ const mem = heap(); for( ; mem[pos]!==0; ++pos ) { //log("mem[",pos,"]",mem[pos]); }; //log("addr =",addr,"pos =",pos); - if(addr===pos) return ""; - return __utf8Decoder.decode(new Uint8Array(mem.buffer, addr, pos-addr)); + return (addr===pos) ? "" : __utf8Decode(mem, addr, pos); }; /** Adds value v to obj.ondispose, creating ondispose, or converting it to an array, if needed. Index: ext/wasm/jaccwabyt/jaccwabyt.md ================================================================== --- ext/wasm/jaccwabyt/jaccwabyt.md +++ ext/wasm/jaccwabyt/jaccwabyt.md @@ -807,13 +807,11 @@ - `pointer` A read-only numeric property which is the "pointer" returned by the configured allocator when this object is constructed. After `dispose()` (inherited from [StructType][]) is called, this property - has the `undefined` value. When passing instances of this struct to - C-bound code, `pointer` is the value which must be passed in place - of a C-side struct pointer. When calling C-side code which takes a + has the `undefined` value. When calling C-side code which takes a pointer to a struct of this type, simply pass it `myStruct.pointer`. Appendices ============================================================ DELETED ext/wasm/jaccwabyt/jaccwabyt_test.c Index: ext/wasm/jaccwabyt/jaccwabyt_test.c ================================================================== --- ext/wasm/jaccwabyt/jaccwabyt_test.c +++ /dev/null @@ -1,178 +0,0 @@ -#include -#include /* memset() */ -#include /* offsetof() */ -#include /* snprintf() */ -#include /* int64_t */ -/*#include */ /* malloc/free(), needed for emscripten exports. */ -extern void * malloc(size_t); -extern void free(void *); - -/* -** 2022-06-25 -** -** The author disclaims copyright to this source code. In place of a -** legal notice, here is a blessing: -** -** * May you do good and not evil. -** * May you find forgiveness for yourself and forgive others. -** * May you share freely, never taking more than you give. -** -*********************************************************************** -** -** Utility functions for use with the emscripten/WASM bits. These -** functions ARE NOT part of the sqlite3 public API. They are strictly -** for internal use by the JS/WASM bindings. -** -** This file is intended to be WASM-compiled together with sqlite3.c, -** e.g.: -** -** emcc ... sqlite3.c wasm_util.c -*/ - -/* -** Experimenting with output parameters. -*/ -int jaccwabyt_test_intptr(int * p){ - if(1==((int)p)%3){ - /* kludge to get emscripten to export malloc() and free() */; - free(malloc(0)); - } - return *p = *p * 2; -} -int64_t jaccwabyt_test_int64_max(void){ - return (int64_t)0x7fffffffffffffff; -} -int64_t jaccwabyt_test_int64_min(void){ - return ~jaccwabyt_test_int64_max(); -} -int64_t jaccwabyt_test_int64_times2(int64_t x){ - return x * 2; -} - -void jaccwabyt_test_int64_minmax(int64_t * min, int64_t *max){ - *max = jaccwabyt_test_int64_max(); - *min = jaccwabyt_test_int64_min(); - /*printf("minmax: min=%lld, max=%lld\n", *min, *max);*/ -} -int64_t jaccwabyt_test_int64ptr(int64_t * p){ - /*printf("jaccwabyt_test_int64ptr( @%lld = 0x%llx )\n", (int64_t)p, *p);*/ - return *p = *p * 2; -} - -void jaccwabyt_test_stack_overflow(int recurse){ - if(recurse) jaccwabyt_test_stack_overflow(recurse); -} - -struct WasmTestStruct { - int v4; - void * ppV; - const char * cstr; - int64_t v8; - void (*xFunc)(void*); -}; -typedef struct WasmTestStruct WasmTestStruct; -void jaccwabyt_test_struct(WasmTestStruct * s){ - if(s){ - s->v4 *= 2; - s->v8 = s->v4 * 2; - s->ppV = s; - s->cstr = __FILE__; - if(s->xFunc) s->xFunc(s); - } - return; -} - -/** For testing the 'string-free' whwasmutil.xWrap() conversion. */ -char * jaccwabyt_test_str_hello(int fail){ - char * s = fail ? 0 : (char *)malloc(6); - if(s){ - memcpy(s, "hello", 5); - s[5] = 0; - } - return s; -} - -/* -** Returns a NUL-terminated string containing a JSON-format metadata -** regarding C structs, for use with the StructBinder API. The -** returned memory is static and is only written to the first time -** this is called. -*/ -const char * jaccwabyt_test_ctype_json(void){ - static char strBuf[1024 * 8] = {0}; - int n = 0, structCount = 0, groupCount = 0; - char * pos = &strBuf[1] /* skip first byte for now to help protect - against a small race condition */; - char const * const zEnd = pos + sizeof(strBuf); - if(strBuf[0]) return strBuf; - /* Leave first strBuf[0] at 0 until the end to help guard against a - tiny race condition. If this is called twice concurrently, they - might end up both writing to strBuf, but they'll both write the - same thing, so that's okay. If we set byte 0 up front then the - 2nd instance might return a partially-populated string. */ - - //////////////////////////////////////////////////////////////////// - // First we need to build up our macro framework... - //////////////////////////////////////////////////////////////////// - // Core output macros... -#define lenCheck assert(pos < zEnd - 100) -#define outf(format,...) \ - pos += snprintf(pos, ((size_t)(zEnd - pos)), format, __VA_ARGS__); \ - lenCheck -#define out(TXT) outf("%s",TXT) -#define CloseBrace(LEVEL) \ - assert(LEVEL<5); memset(pos, '}', LEVEL); pos+=LEVEL; lenCheck - - //////////////////////////////////////////////////////////////////// - // Macros for emitting StructBinder descriptions... -#define StructBinder__(TYPE) \ - n = 0; \ - outf("%s{", (structCount++ ? ", " : "")); \ - out("\"name\": \"" # TYPE "\","); \ - outf("\"sizeof\": %d", (int)sizeof(TYPE)); \ - out(",\"members\": {"); -#define StructBinder_(T) StructBinder__(T) -// ^^^ indirection needed to expand CurrentStruct -#define StructBinder StructBinder_(CurrentStruct) -#define _StructBinder CloseBrace(2) -#define M(MEMBER,SIG) \ - outf("%s\"%s\": " \ - "{\"offset\":%d,\"sizeof\": %d,\"signature\":\"%s\"}", \ - (n++ ? ", " : ""), #MEMBER, \ - (int)offsetof(CurrentStruct,MEMBER), \ - (int)sizeof(((CurrentStruct*)0)->MEMBER), \ - SIG) - // End of macros - //////////////////////////////////////////////////////////////////// - - out("\"structs\": ["); { - -#define CurrentStruct WasmTestStruct - StructBinder { - M(v4,"i"); - M(cstr,"s"); - M(ppV,"p"); - M(v8,"j"); - M(xFunc,"v(p)"); - } _StructBinder; -#undef CurrentStruct - - } out( "]"/*structs*/); - out("}"/*top-level object*/); - *pos = 0; - strBuf[0] = '{'/*end of the race-condition workaround*/; - return strBuf; -#undef DefGroup -#undef Def -#undef _DefGroup -#undef StructBinder -#undef StructBinder_ -#undef StructBinder__ -#undef M -#undef _StructBinder -#undef CurrentStruct -#undef CloseBrace -#undef out -#undef outf -#undef lenCheck -} DELETED ext/wasm/jaccwabyt/jaccwabyt_test.exports Index: ext/wasm/jaccwabyt/jaccwabyt_test.exports ================================================================== --- ext/wasm/jaccwabyt/jaccwabyt_test.exports +++ /dev/null @@ -1,10 +0,0 @@ -_jaccwabyt_test_intptr -_jaccwabyt_test_int64ptr -_jaccwabyt_test_int64_max -_jaccwabyt_test_int64_min -_jaccwabyt_test_int64_minmax -_jaccwabyt_test_int64_times2 -_jaccwabyt_test_struct -_jaccwabyt_test_ctype_json -_jaccwabyt_test_stack_overflow -_jaccwabyt_test_str_hello ADDED ext/wasm/module-symbols.html Index: ext/wasm/module-symbols.html ================================================================== --- /dev/null +++ ext/wasm/module-symbols.html @@ -0,0 +1,333 @@ + + + + + + + sqlite3 Module Symbols + + + +
+ + +

Loading WASM module... + If this takes "a long time" it may have failed and the browser's + dev console may contain hints as to why. +

+ +

+ This page lists the SQLite3 APIs exported + by sqlite3.wasm and exposed to clients + by sqlite3.js. These lists are generated dynamically + by loading the JS/WASM module and introspecting it, with the following + caveats: +

+ +
    +
  • Some APIs are explicitly filtered out of these lists because + they are strictly for internal use within the JS/WASM APIs and + its own test code. +
  • +
  • This page runs in the main UI thread so cannot see features + which are only available in a Worker thread. If this page were + to function via a Worker, it would not be able to see + functionality only available in the main thread. Starting a + Worker here to fetch those symbols requires loading a second + copy of the sqlite3 WASM module and JS code. +
  • +
+ +
+ +

This page exposes a global symbol named sqlite3 + which can be inspected using the browser's dev tools. +

+ +

Jump to...

+ + + +

sqlite3 Namespace

+

+ The sqlite3 namespace object exposes the following... +

+
+ + +

sqlite3.version Object

+

+ The sqlite3.version object exposes the following... +

+
+ + +

sqlite3_...() Function List

+ +

The sqlite3.capi namespace exposes the following + sqlite3_...() + functions... +

+
+

+ = function is specific to the JS/WASM + bindings, not part of the C API. +

+ + +

SQLITE_... Constants

+ +

The sqlite3.capi namespace exposes the following + SQLITE_... + constants... +

+
+ + +

sqlite3.oo1 Namespace

+

+ The sqlite3.oo1 namespace exposes the following... +

+
+ + +

sqlite3.wasm Namespace

+

+ The sqlite3.wasm namespace exposes the + following... +

+
+ + +

sqlite3.wasm.pstack Namespace

+

+ The sqlite3.wasm.pstack namespace exposes the + following... +

+
+ + +

Compilation Options

+

+ SQLITE_... compilation options used in this build + of sqlite3.wasm... +

+
+ +
+ + +
+ ADDED ext/wasm/scratchpad-wasmfs-main.html Index: ext/wasm/scratchpad-wasmfs-main.html ================================================================== --- /dev/null +++ ext/wasm/scratchpad-wasmfs-main.html @@ -0,0 +1,40 @@ + + + + + + + + + sqlite3 WASMFS/OPFS Main-thread Scratchpad + + +
sqlite3 WASMFS/OPFS Main-thread Scratchpad
+ +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+

Scratchpad/test app for the WASMF/OPFS integration in the + main window thread. This page requires that the sqlite3 API have + been built with WASMFS support. If OPFS support is available then + it "should" persist a database across reloads (watch the dev console + output), otherwise it will not. +

+

All stuff on this page happens in the dev console.

+
+
+ + + + + ADDED ext/wasm/scratchpad-wasmfs-main.js Index: ext/wasm/scratchpad-wasmfs-main.js ================================================================== --- /dev/null +++ ext/wasm/scratchpad-wasmfs-main.js @@ -0,0 +1,70 @@ +/* + 2022-05-22 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A basic test script for sqlite3-api.js. This file must be run in + main JS thread and sqlite3.js must have been loaded before it. +*/ +'use strict'; +(function(){ + const toss = function(...args){throw new Error(args.join(' '))}; + const log = console.log.bind(console), + warn = console.warn.bind(console), + error = console.error.bind(console); + + const stdout = log; + const stderr = error; + + const test1 = function(db){ + db.exec("create table if not exists t(a);") + .transaction(function(db){ + db.prepare("insert into t(a) values(?)") + .bind(new Date().getTime()) + .stepFinalize(); + stdout("Number of values in table t:", + db.selectValue("select count(*) from t")); + }); + }; + + const runTests = function(sqlite3){ + const capi = sqlite3.capi, + oo = sqlite3.oo1, + wasm = sqlite3.wasm; + stdout("Loaded sqlite3:",capi.sqlite3_libversion(), capi.sqlite3_sourceid()); + const persistentDir = capi.sqlite3_wasmfs_opfs_dir(); + if(persistentDir){ + stdout("Persistent storage dir:",persistentDir); + }else{ + stderr("No persistent storage available."); + } + const startTime = performance.now(); + let db; + try { + db = new oo.DB(persistentDir+'/foo.db'); + stdout("DB filename:",db.filename); + const banner1 = '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', + banner2 = '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'; + [ + test1 + ].forEach((f)=>{ + const n = performance.now(); + stdout(banner1,"Running",f.name+"()..."); + f(db, sqlite3); + stdout(banner2,f.name+"() took ",(performance.now() - n),"ms"); + }); + }finally{ + if(db) db.close(); + } + stdout("Total test time:",(performance.now() - startTime),"ms"); + }; + + sqlite3InitModule(self.sqlite3TestModule).then(runTests); +})(); ADDED ext/wasm/speedtest1-wasmfs.html Index: ext/wasm/speedtest1-wasmfs.html ================================================================== --- /dev/null +++ ext/wasm/speedtest1-wasmfs.html @@ -0,0 +1,149 @@ + + + + + + + + + speedtest1-wasmfs.wasm + + +
speedtest1-wasmfs.wasm
+ + +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+
This page starts running the main exe when it loads, which will + block the UI until it finishes! Adding UI controls to manually configure and start it + are TODO.
+ +
Achtung: running it with the dev tools open may + drastically slow it down. For faster results, keep the dev + tools closed when running it! +
+
Output is delayed/buffered because we cannot update the UI while the + speedtest is running. Output will appear below when ready... +
+ + + + + ADDED ext/wasm/speedtest1-worker.html Index: ext/wasm/speedtest1-worker.html ================================================================== --- /dev/null +++ ext/wasm/speedtest1-worker.html @@ -0,0 +1,372 @@ + + + + + + + + + speedtest1.wasm Worker + + +
speedtest1.wasm Worker
+ + +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+ +
+ + + + +
+
+
+
+ Tips: +
    +
  • Control-click the flags to (de)select multiple flags.
  • +
  • The --big-transactions flag is important for two + of the bigger tests. Without it, those tests create a + combined total of 140k implicit transactions, reducing their + speed to an absolute crawl, especially when WASMFS is + activated. +
  • +
  • The easiest way to try different optimization levels is, + from this directory: +
    $ rm -f speedtest1.js; make -e emcc_opt='-O2' speedtest1.js
    + Then reload this page. -O2 seems to consistently produce the fastest results. +
  • +
+
+ + + + ADDED ext/wasm/speedtest1-worker.js Index: ext/wasm/speedtest1-worker.js ================================================================== --- /dev/null +++ ext/wasm/speedtest1-worker.js @@ -0,0 +1,99 @@ +'use strict'; +(function(){ + let speedtestJs = 'speedtest1.js'; + const urlParams = new URL(self.location.href).searchParams; + if(urlParams.has('sqlite3.dir')){ + speedtestJs = urlParams.get('sqlite3.dir') + '/' + speedtestJs; + } + importScripts('common/whwasmutil.js', speedtestJs); + /** + If this environment contains OPFS, this function initializes it and + returns the name of the dir on which OPFS is mounted, else it returns + an empty string. + */ + const wasmfsDir = function f(wasmUtil){ + if(undefined !== f._) return f._; + const pdir = '/opfs'; + if( !self.FileSystemHandle + || !self.FileSystemDirectoryHandle + || !self.FileSystemFileHandle){ + return f._ = ""; + } + try{ + if(0===wasmUtil.xCallWrapped( + 'sqlite3_wasm_init_wasmfs', 'i32', ['string'], pdir + )){ + return f._ = pdir; + }else{ + return f._ = ""; + } + }catch(e){ + // sqlite3_wasm_init_wasmfs() is not available + return f._ = ""; + } + }; + wasmfsDir._ = undefined; + + const mPost = function(msgType,payload){ + postMessage({type: msgType, data: payload}); + }; + + const App = Object.create(null); + App.logBuffer = []; + const logMsg = (type,msgArgs)=>{ + const msg = msgArgs.join(' '); + App.logBuffer.push(msg); + mPost(type,msg); + }; + const log = (...args)=>logMsg('stdout',args); + const logErr = (...args)=>logMsg('stderr',args); + + const runSpeedtest = function(cliFlagsArray){ + const scope = App.wasm.scopedAllocPush(); + const dbFile = App.pDir+"/speedtest1.sqlite3"; + try{ + const argv = [ + "speedtest1.wasm", ...cliFlagsArray, dbFile + ]; + App.logBuffer.length = 0; + mPost('run-start', [...argv]); + App.wasm.xCall('wasm_main', argv.length, + App.wasm.scopedAllocMainArgv(argv)); + }catch(e){ + mPost('error',e.message); + }finally{ + App.wasm.scopedAllocPop(scope); + mPost('run-end', App.logBuffer.join('\n')); + App.logBuffer.length = 0; + } + }; + + self.onmessage = function(msg){ + msg = msg.data; + switch(msg.type){ + case 'run': runSpeedtest(msg.data || []); break; + default: + logErr("Unhandled worker message type:",msg.type); + break; + } + }; + + const EmscriptenModule = { + print: log, + printErr: logErr, + setStatus: (text)=>mPost('load-status',text) + }; + self.sqlite3InitModule(EmscriptenModule).then((sqlite3)=>{ + const S = sqlite3; + App.vfsUnlink = function(pDb, fname){ + const pVfs = S.wasm.sqlite3_wasm_db_vfs(pDb, 0); + if(pVfs) S.wasm.sqlite3_wasm_vfs_unlink(pVfs, fname||0); + }; + App.pDir = wasmfsDir(S.wasm); + App.wasm = S.wasm; + //if(App.pDir) log("Persistent storage:",pDir); + //else log("Using transient storage."); + mPost('ready',true); + log("Registered VFSes:", ...S.capi.sqlite3_js_vfs_list()); + }); +})(); ADDED ext/wasm/speedtest1.html Index: ext/wasm/speedtest1.html ================================================================== --- /dev/null +++ ext/wasm/speedtest1.html @@ -0,0 +1,174 @@ + + + + + + + + + speedtest1.wasm + + +
speedtest1.wasm
+ + +
+
+
Initializing app...
+
+ On a slow internet connection this may take a moment. If this + message displays for "a long time", intialization may have + failed and the JavaScript console may contain clues as to why. +
+
+
Downloading...
+
+ +
+
This page starts running the main exe when it loads, which will + block the UI until it finishes! Adding UI controls to manually configure and start it + are TODO.
+
+
Achtung: running it with the dev tools open may + drastically slow it down. For faster results, keep the dev + tools closed when running it! +
+
Output is delayed/buffered because we cannot update the UI while the + speedtest is running. Output will appear below when ready... +
+ + + + + ADDED ext/wasm/split-speedtest1-script.sh Index: ext/wasm/split-speedtest1-script.sh ================================================================== --- /dev/null +++ ext/wasm/split-speedtest1-script.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Expects $1 to be a (speedtest1 --script) output file. Output is a +# series of SQL files extracted from that file. +infile=${1:?arg = speedtest1 --script output file} +testnums=$(grep -e '^-- begin test' "$infile" | cut -d' ' -f4) +if [ x = "x${testnums}" ]; then + echo "Could not parse any begin/end blocks out of $infile" 1>&2 + exit 1 +fi +odir=${infile%%/*} +if [ "$odir" = "$infile" ]; then odir="."; fi +#echo testnums=$testnums +for n in $testnums; do + ofile=$odir/$(printf "speedtest1-%03d.sql" $n) + sed -n -e "/^-- begin test $n /,/^-- end test $n\$/p" $infile > $ofile + echo -e "$n\t$ofile" +done ADDED ext/wasm/sql/000-mandelbrot.sql Index: ext/wasm/sql/000-mandelbrot.sql ================================================================== --- /dev/null +++ ext/wasm/sql/000-mandelbrot.sql @@ -0,0 +1,17 @@ +WITH RECURSIVE + xaxis(x) AS (VALUES(-2.0) UNION ALL SELECT x+0.05 FROM xaxis WHERE x<1.2), + yaxis(y) AS (VALUES(-1.0) UNION ALL SELECT y+0.1 FROM yaxis WHERE y<1.0), + m(iter, cx, cy, x, y) AS ( + SELECT 0, x, y, 0.0, 0.0 FROM xaxis, yaxis + UNION ALL + SELECT iter+1, cx, cy, x*x-y*y + cx, 2.0*x*y + cy FROM m + WHERE (x*x + y*y) < 4.0 AND iter<28 + ), + m2(iter, cx, cy) AS ( + SELECT max(iter), cx, cy FROM m GROUP BY cx, cy + ), + a(t) AS ( + SELECT group_concat( substr(' .+*#', 1+min(iter/7,4), 1), '') + FROM m2 GROUP BY cy + ) +SELECT group_concat(rtrim(t),x'0a') as Mandelbrot FROM a; ADDED ext/wasm/sql/001-sudoku.sql Index: ext/wasm/sql/001-sudoku.sql ================================================================== --- /dev/null +++ ext/wasm/sql/001-sudoku.sql @@ -0,0 +1,28 @@ +WITH RECURSIVE + input(sud) AS ( + VALUES('53..7....6..195....98....6.8...6...34..8.3..17...2...6.6....28....419..5....8..79') + ), + digits(z, lp) AS ( + VALUES('1', 1) + UNION ALL SELECT + CAST(lp+1 AS TEXT), lp+1 FROM digits WHERE lp<9 + ), + x(s, ind) AS ( + SELECT sud, instr(sud, '.') FROM input + UNION ALL + SELECT + substr(s, 1, ind-1) || z || substr(s, ind+1), + instr( substr(s, 1, ind-1) || z || substr(s, ind+1), '.' ) + FROM x, digits AS z + WHERE ind>0 + AND NOT EXISTS ( + SELECT 1 + FROM digits AS lp + WHERE z.z = substr(s, ((ind-1)/9)*9 + lp, 1) + OR z.z = substr(s, ((ind-1)%9) + (lp-1)*9 + 1, 1) + OR z.z = substr(s, (((ind-1)/3) % 3) * 3 + + ((ind-1)/27) * 27 + lp + + ((lp-1) / 3) * 6, 1) + ) + ) +SELECT s FROM x WHERE ind=0; ADDED ext/wasm/test-opfs-vfs.html Index: ext/wasm/test-opfs-vfs.html ================================================================== --- /dev/null +++ ext/wasm/test-opfs-vfs.html @@ -0,0 +1,26 @@ + + + + + + + + + Async-behind-Sync experiment + + +
Async-behind-Sync sqlite3_vfs
+
This performs a sanity test of the "opfs" sqlite3_vfs. + See the dev console for all output. +
+
+ Use this link to delete the persistent OPFS-side db (if any). +
+
+ + + ADDED ext/wasm/test-opfs-vfs.js Index: ext/wasm/test-opfs-vfs.js ================================================================== --- /dev/null +++ ext/wasm/test-opfs-vfs.js @@ -0,0 +1,85 @@ +/* + 2022-09-17 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + A testing ground for the OPFS VFS. +*/ +'use strict'; +const tryOpfsVfs = async function(sqlite3){ + const toss = function(...args){throw new Error(args.join(' '))}; + const logPrefix = "OPFS tester:"; + const log = (...args)=>console.log(logPrefix,...args); + const warn = (...args)=>console.warn(logPrefix,...args); + const error = (...args)=>console.error(logPrefix,...args); + const opfs = sqlite3.opfs; + log("tryOpfsVfs()"); + if(!sqlite3.opfs){ + const e = toss("OPFS is not available."); + error(e); + throw e; + } + const capi = sqlite3.capi; + const pVfs = capi.sqlite3_vfs_find("opfs") || toss("Missing 'opfs' VFS."); + const oVfs = capi.sqlite3_vfs.instanceForPointer(pVfs) || toss("Unexpected instanceForPointer() result.");; + log("OPFS VFS:",pVfs, oVfs); + + const wait = async (ms)=>{ + return new Promise((resolve)=>setTimeout(resolve, ms)); + }; + + const urlArgs = new URL(self.location.href).searchParams; + const dbFile = "my-persistent.db"; + if(urlArgs.has('delete')) sqlite3.opfs.unlink(dbFile); + + const db = new opfs.OpfsDb(dbFile,'ct'); + log("db file:",db.filename); + try{ + if(opfs.entryExists(dbFile)){ + let n = db.selectValue("select count(*) from sqlite_schema"); + log("Persistent data found. sqlite_schema entry count =",n); + } + db.transaction((db)=>{ + db.exec({ + sql:[ + "create table if not exists t(a);", + "insert into t(a) values(?),(?),(?);", + ], + bind: [performance.now() | 0, + (performance.now() |0) / 2, + (performance.now() |0) / 4] + }); + }); + log("count(*) from t =",db.selectValue("select count(*) from t")); + + // Some sanity checks of the opfs utility functions... + const testDir = '/sqlite3-opfs-'+opfs.randomFilename(12); + const aDir = testDir+'/test/dir'; + await opfs.mkdir(aDir) || toss("mkdir failed"); + await opfs.mkdir(aDir) || toss("mkdir must pass if the dir exists"); + await opfs.unlink(testDir+'/test') && toss("delete 1 should have failed (dir not empty)"); + //await opfs.entryExists(testDir) + await opfs.unlink(testDir+'/test/dir') || toss("delete 2 failed"); + await opfs.unlink(testDir+'/test/dir') && toss("delete 2b should have failed (dir already deleted)"); + await opfs.unlink(testDir, true) || toss("delete 3 failed"); + await opfs.entryExists(testDir) && toss("entryExists(",testDir,") should have failed"); + }finally{ + db.close(); + } + + log("Done!"); +}/*tryOpfsVfs()*/; + +importScripts('jswasm/sqlite3.js'); +self.sqlite3InitModule() + .then((sqlite3)=>tryOpfsVfs(sqlite3)) + .catch((e)=>{ + console.error("Error initializing module:",e); + }); ADDED ext/wasm/tester1-worker.html Index: ext/wasm/tester1-worker.html ================================================================== --- /dev/null +++ ext/wasm/tester1-worker.html @@ -0,0 +1,63 @@ + + + + + + + + + sqlite3 tester #1 (Worker thread) + + + +

sqlite3 WASM/JS tester #1 (Worker thread)

+
See tester1.html + for the UI-thread variant.
+
+ + +
+
+ + + ADDED ext/wasm/tester1.html Index: ext/wasm/tester1.html ================================================================== --- /dev/null +++ ext/wasm/tester1.html @@ -0,0 +1,28 @@ + + + + + + + + + sqlite3 tester #1 (UI thread) + + + +

sqlite3 WASM/JS tester #1 (UI thread)

+
See tester1-worker.html + for the Worker-thread variant.
+
+ + +
+
+ + + + ADDED ext/wasm/tester1.js Index: ext/wasm/tester1.js ================================================================== --- /dev/null +++ ext/wasm/tester1.js @@ -0,0 +1,1864 @@ +/* + 2022-10-12 + + The author disclaims copyright to this source code. In place of a + legal notice, here is a blessing: + + * May you do good and not evil. + * May you find forgiveness for yourself and forgive others. + * May you share freely, never taking more than you give. + + *********************************************************************** + + Main functional and regression tests for the sqlite3 WASM API. + + This mini-framework works like so: + + This script adds a series of test groups, each of which contains an + arbitrary number of tests, into a queue. After loading of the + sqlite3 WASM/JS module is complete, that queue is processed. If any + given test fails, the whole thing fails. This script is built such + that it can run from the main UI thread or worker thread. Test + groups and individual tests can be assigned a predicate function + which determines whether to run them or not, and this is + specifically intended to be used to toggle certain tests on or off + for the main/worker threads. + + Each test group defines a state object which gets applied as each + test function's `this`. Test functions can use that to, e.g., set up + a db in an early test and close it in a later test. Each test gets + passed the sqlite3 namespace object as its only argument. +*/ +'use strict'; +(function(){ + /** + Set up our output channel differently depending + on whether we are running in a worker thread or + the main (UI) thread. + */ + let logClass; + /* Predicate for tests/groups. */ + const isUIThread = ()=>(self.window===self && self.document); + /* Predicate for tests/groups. */ + const isWorker = ()=>!isUIThread(); + /* Predicate for tests/groups. */ + const testIsTodo = ()=>false; + const haveWasmCTests = ()=>{ + return !!wasm.exports.sqlite3_wasm_test_intptr; + }; + { + const mapToString = (v)=>{ + switch(typeof v){ + case 'number': case 'string': case 'boolean': + case 'undefined': case 'bigint': + return ''+v; + default: break; + } + if(null===v) return 'null'; + if(v instanceof Error){ + v = { + message: v.message, + stack: v.stack, + errorClass: v.name + }; + } + return JSON.stringify(v,undefined,2); + }; + const normalizeArgs = (args)=>args.map(mapToString); + if( isUIThread() ){ + console.log("Running in the UI thread."); + const logTarget = document.querySelector('#test-output'); + logClass = function(cssClass,...args){ + const ln = document.createElement('div'); + if(cssClass){ + for(const c of (Array.isArray(cssClass) ? cssClass : [cssClass])){ + ln.classList.add(c); + } + } + ln.append(document.createTextNode(normalizeArgs(args).join(' '))); + logTarget.append(ln); + }; + const cbReverse = document.querySelector('#cb-log-reverse'); + const cbReverseKey = 'tester1:cb-log-reverse'; + const cbReverseIt = ()=>{ + logTarget.classList[cbReverse.checked ? 'add' : 'remove']('reverse'); + //localStorage.setItem(cbReverseKey, cbReverse.checked ? 1 : 0); + }; + cbReverse.addEventListener('change', cbReverseIt, true); + /*if(localStorage.getItem(cbReverseKey)){ + cbReverse.checked = !!(+localStorage.getItem(cbReverseKey)); + }*/ + cbReverseIt(); + }else{ /* Worker thread */ + console.log("Running in a Worker thread."); + logClass = function(cssClass,...args){ + postMessage({ + type:'log', + payload:{cssClass, args: normalizeArgs(args)} + }); + }; + } + } + const reportFinalTestStatus = function(pass){ + if(isUIThread()){ + const e = document.querySelector('#color-target'); + e.classList.add(pass ? 'tests-pass' : 'tests-fail'); + }else{ + postMessage({type:'test-result', payload:{pass}}); + } + }; + const log = (...args)=>{ + //console.log(...args); + logClass('',...args); + } + const warn = (...args)=>{ + console.warn(...args); + logClass('warning',...args); + } + const error = (...args)=>{ + console.error(...args); + logClass('error',...args); + }; + + const toss = (...args)=>{ + error(...args); + throw new Error(args.join(' ')); + }; + const tossQuietly = (...args)=>{ + throw new Error(args.join(' ')); + }; + + const roundMs = (ms)=>Math.round(ms*100)/100; + + /** + Helpers for writing sqlite3-specific tests. + */ + const TestUtil = { + /** Running total of the number of tests run via + this API. */ + counter: 0, + /* Separator line for log messages. */ + separator: '------------------------------------------------------------', + /** + If expr is a function, it is called and its result + is returned, coerced to a bool, else expr, coerced to + a bool, is returned. + */ + toBool: function(expr){ + return (expr instanceof Function) ? !!expr() : !!expr; + }, + /** Throws if expr is false. If expr is a function, it is called + and its result is evaluated. If passed multiple arguments, + those after the first are a message string which get applied + as an exception message if the assertion fails. The message + arguments are concatenated together with a space between each. + */ + assert: function f(expr, ...msg){ + ++this.counter; + if(!this.toBool(expr)){ + throw new Error(msg.length ? msg.join(' ') : "Assertion failed."); + } + return this; + }, + /** Calls f() and squelches any exception it throws. If it + does not throw, this function throws. */ + mustThrow: function(f, msg){ + ++this.counter; + let err; + try{ f(); } catch(e){err=e;} + if(!err) throw new Error(msg || "Expected exception."); + return this; + }, + /** + Works like mustThrow() but expects filter to be a regex, + function, or string to match/filter the resulting exception + against. If f() does not throw, this test fails and an Error is + thrown. If filter is a regex, the test passes if + filter.test(error.message) passes. If it's a function, the test + passes if filter(error) returns truthy. If it's a string, the + test passes if the filter matches the exception message + precisely. In all other cases the test fails, throwing an + Error. + + If it throws, msg is used as the error report unless it's falsy, + in which case a default is used. + */ + mustThrowMatching: function(f, filter, msg){ + ++this.counter; + let err; + try{ f(); } catch(e){err=e;} + if(!err) throw new Error(msg || "Expected exception."); + let pass = false; + if(filter instanceof RegExp) pass = filter.test(err.message); + else if(filter instanceof Function) pass = filter(err); + else if('string' === typeof filter) pass = (err.message === filter); + if(!pass){ + throw new Error(msg || ("Filter rejected this exception: "+err.message)); + } + return this; + }, + /** Throws if expr is truthy or expr is a function and expr() + returns truthy. */ + throwIf: function(expr, msg){ + ++this.counter; + if(this.toBool(expr)) throw new Error(msg || "throwIf() failed"); + return this; + }, + /** Throws if expr is falsy or expr is a function and expr() + returns falsy. */ + throwUnless: function(expr, msg){ + ++this.counter; + if(!this.toBool(expr)) throw new Error(msg || "throwUnless() failed"); + return this; + }, + eqApprox: (v1,v2,factor=0.05)=>(v1>=(v2-factor) && v1<=(v2+factor)), + TestGroup: (function(){ + let groupCounter = 0; + const TestGroup = function(name, predicate){ + this.number = ++groupCounter; + this.name = name; + this.predicate = predicate; + this.tests = []; + }; + TestGroup.prototype = { + addTest: function(testObj){ + this.tests.push(testObj); + return this; + }, + run: async function(sqlite3){ + log(TestUtil.separator); + logClass('group-start',"Group #"+this.number+':',this.name); + const indent = ' '; + if(this.predicate && !this.predicate(sqlite3)){ + logClass('warning',indent, + "SKIPPING group because predicate says to."); + return; + } + const assertCount = TestUtil.counter; + const groupState = Object.create(null); + const skipped = []; + let runtime = 0, i = 0; + for(const t of this.tests){ + ++i; + const n = this.number+"."+i; + log(indent, n+":", t.name); + if(t.predicate && !t.predicate(sqlite3)){ + logClass('warning', indent, indent, + 'SKIPPING because predicate says to'); + skipped.push( n+': '+t.name ); + }else{ + const tc = TestUtil.counter, now = performance.now(); + await t.test.call(groupState, sqlite3); + const then = performance.now(); + runtime += then - now; + logClass('faded',indent, indent, + TestUtil.counter - tc, 'assertion(s) in', + roundMs(then-now),'ms'); + } + } + logClass('green', + "Group #"+this.number+":",(TestUtil.counter - assertCount), + "assertion(s) in",roundMs(runtime),"ms"); + if(skipped.length){ + logClass('warning',"SKIPPED test(s) in group",this.number+":",skipped); + } + } + }; + return TestGroup; + })()/*TestGroup*/, + testGroups: [], + currentTestGroup: undefined, + addGroup: function(name, predicate){ + this.testGroups.push( this.currentTestGroup = + new this.TestGroup(name, predicate) ); + return this; + }, + addTest: function(name, callback){ + let predicate; + if(1===arguments.length){ + const opt = arguments[0]; + predicate = opt.predicate; + name = opt.name; + callback = opt.test; + } + this.currentTestGroup.addTest({ + name, predicate, test: callback + }); + return this; + }, + runTests: async function(sqlite3){ + return new Promise(async function(pok,pnok){ + try { + let runtime = 0; + for(let g of this.testGroups){ + const now = performance.now(); + await g.run(sqlite3); + runtime += performance.now() - now; + } + log(TestUtil.separator); + logClass(['strong','green'], + "Done running tests.",TestUtil.counter,"assertions in", + roundMs(runtime),'ms'); + pok(); + reportFinalTestStatus(true); + }catch(e){ + error(e); + pnok(e); + reportFinalTestStatus(false); + } + }.bind(this)); + } + }/*TestUtil*/; + const T = TestUtil; + T.g = T.addGroup; + T.t = T.addTest; + let capi, wasm/*assigned after module init*/; + //////////////////////////////////////////////////////////////////////// + // End of infrastructure setup. Now define the tests... + //////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////// + T.g('Basic sanity checks') + .t('Namespace object checks', function(sqlite3){ + const wasmCtypes = wasm.ctype; + T.assert(wasmCtypes.structs[0].name==='sqlite3_vfs'). + assert(wasmCtypes.structs[0].members.szOsFile.sizeof>=4). + assert(wasmCtypes.structs[1/*sqlite3_io_methods*/ + ].members.xFileSize.offset>0); + [ /* Spot-check a handful of constants to make sure they got installed... */ + 'SQLITE_SCHEMA','SQLITE_NULL','SQLITE_UTF8', + 'SQLITE_STATIC', 'SQLITE_DIRECTONLY', + 'SQLITE_OPEN_CREATE', 'SQLITE_OPEN_DELETEONCLOSE' + ].forEach((k)=>T.assert('number' === typeof capi[k])); + [/* Spot-check a few of the WASM API methods. */ + 'alloc', 'dealloc', 'installFunction' + ].forEach((k)=>T.assert(wasm[k] instanceof Function)); + + T.assert(capi.sqlite3_errstr(capi.SQLITE_IOERR_ACCESS).indexOf("I/O")>=0). + assert(capi.sqlite3_errstr(capi.SQLITE_CORRUPT).indexOf('malformed')>0). + assert(capi.sqlite3_errstr(capi.SQLITE_OK) === 'not an error'); + + try { + throw new sqlite3.WasmAllocError; + }catch(e){ + T.assert(e instanceof Error) + .assert(e instanceof sqlite3.WasmAllocError) + .assert("Allocation failed." === e.message); + } + try { + throw new sqlite3.WasmAllocError("test",{ + cause: 3 + }); + }catch(e){ + T.assert(3 === e.cause) + .assert("test" === e.message); + } + try {throw new sqlite3.WasmAllocError("test","ing",".")} + catch(e){T.assert("test ing ." === e.message)} + + try{ throw new sqlite3.SQLite3Error(capi.SQLITE_SCHEMA) } + catch(e){ T.assert('SQLITE_SCHEMA' === e.message) } + try{ sqlite3.SQLite3Error.toss(capi.SQLITE_CORRUPT,{cause: true}) } + catch(e){ + T.assert('SQLITE_CORRUPT'===e.message) + .assert(true===e.cause); + } + }) + //////////////////////////////////////////////////////////////////// + .t('strglob/strlike', function(sqlite3){ + T.assert(0===capi.sqlite3_strglob("*.txt", "foo.txt")). + assert(0!==capi.sqlite3_strglob("*.txt", "foo.xtx")). + assert(0===capi.sqlite3_strlike("%.txt", "foo.txt", 0)). + assert(0!==capi.sqlite3_strlike("%.txt", "foo.xtx", 0)); + }) + //////////////////////////////////////////////////////////////////// + ;/*end of basic sanity checks*/ + + //////////////////////////////////////////////////////////////////// + T.g('C/WASM Utilities') + .t('sqlite3.wasm namespace', function(sqlite3){ + const w = wasm; + const chr = (x)=>x.charCodeAt(0); + //log("heap getters..."); + { + const li = [8, 16, 32]; + if(w.bigIntEnabled) li.push(64); + for(const n of li){ + const bpe = n/8; + const s = w.heapForSize(n,false); + T.assert(bpe===s.BYTES_PER_ELEMENT). + assert(w.heapForSize(s.constructor) === s); + const u = w.heapForSize(n,true); + T.assert(bpe===u.BYTES_PER_ELEMENT). + assert(s!==u). + assert(w.heapForSize(u.constructor) === u); + } + } + + // isPtr32() + { + const ip = w.isPtr32; + T.assert(ip(0)) + .assert(!ip(-1)) + .assert(!ip(1.1)) + .assert(!ip(0xffffffff)) + .assert(ip(0x7fffffff)) + .assert(!ip()) + .assert(!ip(null)/*might change: under consideration*/) + ; + } + + //log("jstrlen()..."); + { + T.assert(3 === w.jstrlen("abc")).assert(4 === w.jstrlen("äbc")); + } + + //log("jstrcpy()..."); + { + const fillChar = 10; + let ua = new Uint8Array(8), rc, + refill = ()=>ua.fill(fillChar); + refill(); + rc = w.jstrcpy("hello", ua); + T.assert(6===rc).assert(0===ua[5]).assert(chr('o')===ua[4]); + refill(); + ua[5] = chr('!'); + rc = w.jstrcpy("HELLO", ua, 0, -1, false); + T.assert(5===rc).assert(chr('!')===ua[5]).assert(chr('O')===ua[4]); + refill(); + rc = w.jstrcpy("the end", ua, 4); + //log("rc,ua",rc,ua); + T.assert(4===rc).assert(0===ua[7]). + assert(chr('e')===ua[6]).assert(chr('t')===ua[4]); + refill(); + rc = w.jstrcpy("the end", ua, 4, -1, false); + T.assert(4===rc).assert(chr(' ')===ua[7]). + assert(chr('e')===ua[6]).assert(chr('t')===ua[4]); + refill(); + rc = w.jstrcpy("", ua, 0, 1, true); + //log("rc,ua",rc,ua); + T.assert(1===rc).assert(0===ua[0]); + refill(); + rc = w.jstrcpy("x", ua, 0, 1, true); + //log("rc,ua",rc,ua); + T.assert(1===rc).assert(0===ua[0]); + refill(); + rc = w.jstrcpy('äbä', ua, 0, 1, true); + T.assert(1===rc, 'Must not write partial multi-byte char.') + .assert(0===ua[0]); + refill(); + rc = w.jstrcpy('äbä', ua, 0, 2, true); + T.assert(1===rc, 'Must not write partial multi-byte char.') + .assert(0===ua[0]); + refill(); + rc = w.jstrcpy('äbä', ua, 0, 2, false); + T.assert(2===rc).assert(fillChar!==ua[1]).assert(fillChar===ua[2]); + }/*jstrcpy()*/ + + //log("cstrncpy()..."); + { + const scope = w.scopedAllocPush(); + try { + let cStr = w.scopedAllocCString("hello"); + const n = w.cstrlen(cStr); + let cpy = w.scopedAlloc(n+10); + let rc = w.cstrncpy(cpy, cStr, n+10); + T.assert(n+1 === rc). + assert("hello" === w.cstringToJs(cpy)). + assert(chr('o') === w.getMemValue(cpy+n-1)). + assert(0 === w.getMemValue(cpy+n)); + let cStr2 = w.scopedAllocCString("HI!!!"); + rc = w.cstrncpy(cpy, cStr2, 3); + T.assert(3===rc). + assert("HI!lo" === w.cstringToJs(cpy)). + assert(chr('!') === w.getMemValue(cpy+2)). + assert(chr('l') === w.getMemValue(cpy+3)); + }finally{ + w.scopedAllocPop(scope); + } + } + + //log("jstrToUintArray()..."); + { + let a = w.jstrToUintArray("hello", false); + T.assert(5===a.byteLength).assert(chr('o')===a[4]); + a = w.jstrToUintArray("hello", true); + T.assert(6===a.byteLength).assert(chr('o')===a[4]).assert(0===a[5]); + a = w.jstrToUintArray("äbä", false); + T.assert(5===a.byteLength).assert(chr('b')===a[2]); + a = w.jstrToUintArray("äbä", true); + T.assert(6===a.byteLength).assert(chr('b')===a[2]).assert(0===a[5]); + } + + //log("allocCString()..."); + { + const cstr = w.allocCString("hällo, world"); + const n = w.cstrlen(cstr); + T.assert(13 === n) + .assert(0===w.getMemValue(cstr+n)) + .assert(chr('d')===w.getMemValue(cstr+n-1)); + } + + //log("scopedAlloc() and friends..."); + { + const alloc = w.alloc, dealloc = w.dealloc; + w.alloc = w.dealloc = null; + T.assert(!w.scopedAlloc.level) + .mustThrowMatching(()=>w.scopedAlloc(1), /^No scopedAllocPush/) + .mustThrowMatching(()=>w.scopedAllocPush(), /missing alloc/); + w.alloc = alloc; + T.mustThrowMatching(()=>w.scopedAllocPush(), /missing alloc/); + w.dealloc = dealloc; + T.mustThrowMatching(()=>w.scopedAllocPop(), /^Invalid state/) + .mustThrowMatching(()=>w.scopedAlloc(1), /^No scopedAllocPush/) + .mustThrowMatching(()=>w.scopedAlloc.level=0, /read-only/); + const asc = w.scopedAllocPush(); + let asc2; + try { + const p1 = w.scopedAlloc(16), + p2 = w.scopedAlloc(16); + T.assert(1===w.scopedAlloc.level) + .assert(Number.isFinite(p1)) + .assert(Number.isFinite(p2)) + .assert(asc[0] === p1) + .assert(asc[1]===p2); + asc2 = w.scopedAllocPush(); + const p3 = w.scopedAlloc(16); + T.assert(2===w.scopedAlloc.level) + .assert(Number.isFinite(p3)) + .assert(2===asc.length) + .assert(p3===asc2[0]); + + const [z1, z2, z3] = w.scopedAllocPtr(3); + T.assert('number'===typeof z1).assert(z2>z1).assert(z3>z2) + .assert(0===w.getMemValue(z1,'i32'), 'allocPtr() must zero the targets') + .assert(0===w.getMemValue(z3,'i32')); + }finally{ + // Pop them in "incorrect" order to make sure they behave: + w.scopedAllocPop(asc); + T.assert(0===asc.length); + T.mustThrowMatching(()=>w.scopedAllocPop(asc), + /^Invalid state object/); + if(asc2){ + T.assert(2===asc2.length,'Should be p3 and z1'); + w.scopedAllocPop(asc2); + T.assert(0===asc2.length); + T.mustThrowMatching(()=>w.scopedAllocPop(asc2), + /^Invalid state object/); + } + } + T.assert(0===w.scopedAlloc.level); + w.scopedAllocCall(function(){ + T.assert(1===w.scopedAlloc.level); + const [cstr, n] = w.scopedAllocCString("hello, world", true); + T.assert(12 === n) + .assert(0===w.getMemValue(cstr+n)) + .assert(chr('d')===w.getMemValue(cstr+n-1)); + }); + }/*scopedAlloc()*/ + + //log("xCall()..."); + { + const pJson = w.xCall('sqlite3_wasm_enum_json'); + T.assert(Number.isFinite(pJson)).assert(w.cstrlen(pJson)>300); + } + + //log("xWrap()..."); + { + T.mustThrowMatching(()=>w.xWrap('sqlite3_libversion',null,'i32'), + /requires 0 arg/). + assert(w.xWrap.resultAdapter('i32') instanceof Function). + assert(w.xWrap.argAdapter('i32') instanceof Function); + let fw = w.xWrap('sqlite3_libversion','utf8'); + T.mustThrowMatching(()=>fw(1), /requires 0 arg/); + let rc = fw(); + T.assert('string'===typeof rc).assert(rc.length>5); + rc = w.xCallWrapped('sqlite3_wasm_enum_json','*'); + T.assert(rc>0 && Number.isFinite(rc)); + rc = w.xCallWrapped('sqlite3_wasm_enum_json','utf8'); + T.assert('string'===typeof rc).assert(rc.length>300); + if(haveWasmCTests()){ + fw = w.xWrap('sqlite3_wasm_test_str_hello', 'utf8:free',['i32']); + rc = fw(0); + T.assert('hello'===rc); + rc = fw(1); + T.assert(null===rc); + + if(w.bigIntEnabled){ + w.xWrap.resultAdapter('thrice', (v)=>3n*BigInt(v)); + w.xWrap.argAdapter('twice', (v)=>2n*BigInt(v)); + fw = w.xWrap('sqlite3_wasm_test_int64_times2','thrice','twice'); + rc = fw(1); + T.assert(12n===rc); + + w.scopedAllocCall(function(){ + let pI1 = w.scopedAlloc(8), pI2 = pI1+4; + w.setMemValue(pI1, 0,'*')(pI2, 0, '*'); + let f = w.xWrap('sqlite3_wasm_test_int64_minmax',undefined,['i64*','i64*']); + let r1 = w.getMemValue(pI1, 'i64'), r2 = w.getMemValue(pI2, 'i64'); + T.assert(!Number.isSafeInteger(r1)).assert(!Number.isSafeInteger(r2)); + }); + } + } + } + }/*WhWasmUtil*/) + + //////////////////////////////////////////////////////////////////// + .t('sqlite3.StructBinder (jaccwabyt)', function(sqlite3){ + const S = sqlite3, W = S.wasm; + const MyStructDef = { + sizeof: 16, + members: { + p4: {offset: 0, sizeof: 4, signature: "i"}, + pP: {offset: 4, sizeof: 4, signature: "P"}, + ro: {offset: 8, sizeof: 4, signature: "i", readOnly: true}, + cstr: {offset: 12, sizeof: 4, signature: "s"} + } + }; + if(W.bigIntEnabled){ + const m = MyStructDef; + m.members.p8 = {offset: m.sizeof, sizeof: 8, signature: "j"}; + m.sizeof += m.members.p8.sizeof; + } + const StructType = S.StructBinder.StructType; + const K = S.StructBinder('my_struct',MyStructDef); + T.mustThrowMatching(()=>K(), /via 'new'/). + mustThrowMatching(()=>new K('hi'), /^Invalid pointer/); + const k1 = new K(), k2 = new K(); + try { + T.assert(k1.constructor === K). + assert(K.isA(k1)). + assert(k1 instanceof K). + assert(K.prototype.lookupMember('p4').key === '$p4'). + assert(K.prototype.lookupMember('$p4').name === 'p4'). + mustThrowMatching(()=>K.prototype.lookupMember('nope'), /not a mapped/). + assert(undefined === K.prototype.lookupMember('nope',false)). + assert(k1 instanceof StructType). + assert(StructType.isA(k1)). + assert(K.resolveToInstance(k1.pointer)===k1). + mustThrowMatching(()=>K.resolveToInstance(null,true), /is-not-a my_struct/). + assert(k1 === StructType.instanceForPointer(k1.pointer)). + mustThrowMatching(()=>k1.$ro = 1, /read-only/); + Object.keys(MyStructDef.members).forEach(function(key){ + key = K.memberKey(key); + T.assert(0 == k1[key], + "Expecting allocation to zero the memory "+ + "for "+key+" but got: "+k1[key]+ + " from "+k1.memoryDump()); + }); + T.assert('number' === typeof k1.pointer). + mustThrowMatching(()=>k1.pointer = 1, /pointer/). + assert(K.instanceForPointer(k1.pointer) === k1); + k1.$p4 = 1; k1.$pP = 2; + T.assert(1 === k1.$p4).assert(2 === k1.$pP); + if(MyStructDef.members.$p8){ + k1.$p8 = 1/*must not throw despite not being a BigInt*/; + k1.$p8 = BigInt(Number.MAX_SAFE_INTEGER * 2); + T.assert(BigInt(2 * Number.MAX_SAFE_INTEGER) === k1.$p8); + } + T.assert(!k1.ondispose); + k1.setMemberCString('cstr', "A C-string."); + T.assert(Array.isArray(k1.ondispose)). + assert(k1.ondispose[0] === k1.$cstr). + assert('number' === typeof k1.$cstr). + assert('A C-string.' === k1.memberToJsString('cstr')); + k1.$pP = k2; + T.assert(k1.$pP === k2); + k1.$pP = null/*null is special-cased to 0.*/; + T.assert(0===k1.$pP); + let ptr = k1.pointer; + k1.dispose(); + T.assert(undefined === k1.pointer). + assert(undefined === K.instanceForPointer(ptr)). + mustThrowMatching(()=>{k1.$pP=1}, /disposed instance/); + const k3 = new K(); + ptr = k3.pointer; + T.assert(k3 === K.instanceForPointer(ptr)); + K.disposeAll(); + T.assert(ptr). + assert(undefined === k2.pointer). + assert(undefined === k3.pointer). + assert(undefined === K.instanceForPointer(ptr)); + }finally{ + k1.dispose(); + k2.dispose(); + } + + if(!W.bigIntEnabled){ + log("Skipping WasmTestStruct tests: BigInt not enabled."); + return; + } + + const WTStructDesc = + W.ctype.structs.filter((e)=>'WasmTestStruct'===e.name)[0]; + const autoResolvePtr = true /* EXPERIMENTAL */; + if(autoResolvePtr){ + WTStructDesc.members.ppV.signature = 'P'; + } + const WTStruct = S.StructBinder(WTStructDesc); + //log(WTStruct.structName, WTStruct.structInfo); + const wts = new WTStruct(); + //log("WTStruct.prototype keys:",Object.keys(WTStruct.prototype)); + try{ + T.assert(wts.constructor === WTStruct). + assert(WTStruct.memberKeys().indexOf('$ppV')>=0). + assert(wts.memberKeys().indexOf('$v8')>=0). + assert(!K.isA(wts)). + assert(WTStruct.isA(wts)). + assert(wts instanceof WTStruct). + assert(wts instanceof StructType). + assert(StructType.isA(wts)). + assert(wts === StructType.instanceForPointer(wts.pointer)); + T.assert(wts.pointer>0).assert(0===wts.$v4).assert(0n===wts.$v8). + assert(0===wts.$ppV).assert(0===wts.$xFunc). + assert(WTStruct.instanceForPointer(wts.pointer) === wts); + const testFunc = + W.xGet('sqlite3_wasm_test_struct'/*name gets mangled in -O3 builds!*/); + let counter = 0; + //log("wts.pointer =",wts.pointer); + const wtsFunc = function(arg){ + /*log("This from a JS function called from C, "+ + "which itself was called from JS. arg =",arg);*/ + ++counter; + T.assert(WTStruct.instanceForPointer(arg) === wts); + if(3===counter){ + tossQuietly("Testing exception propagation."); + } + } + wts.$v4 = 10; wts.$v8 = 20; + wts.$xFunc = W.installFunction(wtsFunc, wts.memberSignature('xFunc')) + T.assert(0===counter).assert(10 === wts.$v4).assert(20n === wts.$v8) + .assert(0 === wts.$ppV).assert('number' === typeof wts.$xFunc) + .assert(0 === wts.$cstr) + .assert(wts.memberIsString('$cstr')) + .assert(!wts.memberIsString('$v4')) + .assert(null === wts.memberToJsString('$cstr')) + .assert(W.functionEntry(wts.$xFunc) instanceof Function); + /* It might seem silly to assert that the values match + what we just set, but recall that all of those property + reads and writes are, via property interceptors, + actually marshaling their data to/from a raw memory + buffer, so merely reading them back is actually part of + testing the struct-wrapping API. */ + + testFunc(wts.pointer); + //log("wts.pointer, wts.$ppV",wts.pointer, wts.$ppV); + T.assert(1===counter).assert(20 === wts.$v4).assert(40n === wts.$v8) + .assert(autoResolvePtr ? (wts.$ppV === wts) : (wts.$ppV === wts.pointer)) + .assert('string' === typeof wts.memberToJsString('cstr')) + .assert(wts.memberToJsString('cstr') === wts.memberToJsString('$cstr')) + .mustThrowMatching(()=>wts.memberToJsString('xFunc'), + /Invalid member type signature for C-string/) + ; + testFunc(wts.pointer); + T.assert(2===counter).assert(40 === wts.$v4).assert(80n === wts.$v8) + .assert(autoResolvePtr ? (wts.$ppV === wts) : (wts.$ppV === wts.pointer)); + /** The 3rd call to wtsFunc throw from JS, which is called + from C, which is called from JS. Let's ensure that + that exception propagates back here... */ + T.mustThrowMatching(()=>testFunc(wts.pointer),/^Testing/); + W.uninstallFunction(wts.$xFunc); + wts.$xFunc = 0; + if(autoResolvePtr){ + wts.$ppV = 0; + T.assert(!wts.$ppV); + //WTStruct.debugFlags(0x03); + wts.$ppV = wts; + T.assert(wts === wts.$ppV) + //WTStruct.debugFlags(0); + } + wts.setMemberCString('cstr', "A C-string."); + T.assert(Array.isArray(wts.ondispose)). + assert(wts.ondispose[0] === wts.$cstr). + assert('A C-string.' === wts.memberToJsString('cstr')); + const ptr = wts.pointer; + wts.dispose(); + T.assert(ptr).assert(undefined === wts.pointer). + assert(undefined === WTStruct.instanceForPointer(ptr)) + }finally{ + wts.dispose(); + } + }/*StructBinder*/) + + //////////////////////////////////////////////////////////////////// + .t('sqlite3.StructBinder part 2', function(sqlite3){ + // https://www.sqlite.org/c3ref/vfs.html + // https://www.sqlite.org/c3ref/io_methods.html + const sqlite3_io_methods = capi.sqlite3_io_methods, + sqlite3_vfs = capi.sqlite3_vfs, + sqlite3_file = capi.sqlite3_file; + //log("struct sqlite3_file", sqlite3_file.memberKeys()); + //log("struct sqlite3_vfs", sqlite3_vfs.memberKeys()); + //log("struct sqlite3_io_methods", sqlite3_io_methods.memberKeys()); + const installMethod = function callee(tgt, name, func){ + if(1===arguments.length){ + return (n,f)=>callee(tgt,n,f); + } + if(!callee.argcProxy){ + callee.argcProxy = function(func,sig){ + return function(...args){ + if(func.length!==arguments.length){ + toss("Argument mismatch. Native signature is:",sig); + } + return func.apply(this, args); + } + }; + callee.ondisposeRemoveFunc = function(){ + if(this.__ondispose){ + const who = this; + this.__ondispose.forEach( + (v)=>{ + if('number'===typeof v){ + try{wasm.uninstallFunction(v)} + catch(e){/*ignore*/} + }else{/*wasm function wrapper property*/ + delete who[v]; + } + } + ); + delete this.__ondispose; + } + }; + }/*static init*/ + const sigN = tgt.memberSignature(name), + memKey = tgt.memberKey(name); + //log("installMethod",tgt, name, sigN); + if(!tgt.__ondispose){ + T.assert(undefined === tgt.ondispose); + tgt.ondispose = [callee.ondisposeRemoveFunc]; + tgt.__ondispose = []; + } + const fProxy = callee.argcProxy(func, sigN); + const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true)); + tgt[memKey] = pFunc; + /** + ACHTUNG: function pointer IDs are from a different pool than + allocation IDs, starting at 1 and incrementing in steps of 1, + so if we set tgt[memKey] to those values, we'd very likely + later misinterpret them as plain old pointer addresses unless + unless we use some silly heuristic like "all values <5k are + presumably function pointers," or actually perform a function + lookup on every pointer to first see if it's a function. That + would likely work just fine, but would be kludgy. + + It turns out that "all values less than X are functions" is + essentially how it works in wasm: a function pointer is + reported to the client as its index into the + __indirect_function_table. + + So... once jaccwabyt can be told how to access the + function table, it could consider all pointer values less + than that table's size to be functions. As "real" pointer + values start much, much higher than the function table size, + that would likely work reasonably well. e.g. the object + pointer address for sqlite3's default VFS is (in this local + setup) 65104, whereas the function table has fewer than 600 + entries. + */ + const wrapperKey = '$'+memKey; + tgt[wrapperKey] = fProxy; + tgt.__ondispose.push(pFunc, wrapperKey); + //log("tgt.__ondispose =",tgt.__ondispose); + return (n,f)=>callee(tgt, n, f); + }/*installMethod*/; + + const installIOMethods = function instm(iom){ + (iom instanceof capi.sqlite3_io_methods) || toss("Invalid argument type."); + if(!instm._requireFileArg){ + instm._requireFileArg = function(arg,methodName){ + arg = capi.sqlite3_file.resolveToInstance(arg); + if(!arg){ + err("sqlite3_io_methods::xClose() was passed a non-sqlite3_file."); + } + return arg; + }; + instm._methods = { + // https://sqlite.org/c3ref/io_methods.html + xClose: /*i(P)*/function(f){ + /* int (*xClose)(sqlite3_file*) */ + log("xClose(",f,")"); + if(!(f = instm._requireFileArg(f,'xClose'))) return capi.SQLITE_MISUSE; + f.dispose(/*noting that f has externally-owned memory*/); + return 0; + }, + xRead: /*i(Ppij)*/function(f,dest,n,offset){ + /* int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst) */ + log("xRead(",arguments,")"); + if(!(f = instm._requireFileArg(f))) return capi.SQLITE_MISUSE; + wasm.heap8().fill(0, dest + offset, n); + return 0; + }, + xWrite: /*i(Ppij)*/function(f,dest,n,offset){ + /* int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst) */ + log("xWrite(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xWrite'))) return capi.SQLITE_MISUSE; + return 0; + }, + xTruncate: /*i(Pj)*/function(f){ + /* int (*xTruncate)(sqlite3_file*, sqlite3_int64 size) */ + log("xTruncate(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xTruncate'))) return capi.SQLITE_MISUSE; + return 0; + }, + xSync: /*i(Pi)*/function(f){ + /* int (*xSync)(sqlite3_file*, int flags) */ + log("xSync(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xSync'))) return capi.SQLITE_MISUSE; + return 0; + }, + xFileSize: /*i(Pp)*/function(f,pSz){ + /* int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize) */ + log("xFileSize(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xFileSize'))) return capi.SQLITE_MISUSE; + wasm.setMemValue(pSz, 0/*file size*/); + return 0; + }, + xLock: /*i(Pi)*/function(f){ + /* int (*xLock)(sqlite3_file*, int) */ + log("xLock(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xLock'))) return capi.SQLITE_MISUSE; + return 0; + }, + xUnlock: /*i(Pi)*/function(f){ + /* int (*xUnlock)(sqlite3_file*, int) */ + log("xUnlock(",arguments,")"); + if(!(f=instm._requireFileArg(f,'xUnlock'))) return capi.SQLITE_MISUSE; + return 0; + }, + xCheckReservedLock: /*i(Pp)*/function(){ + /* int (*xCheckReservedLock)(sqlite3_file*, int *pResOut) */ + log("xCheckReservedLock(",arguments,")"); + return 0; + }, + xFileControl: /*i(Pip)*/function(){ + /* int (*xFileControl)(sqlite3_file*, int op, void *pArg) */ + log("xFileControl(",arguments,")"); + return capi.SQLITE_NOTFOUND; + }, + xSectorSize: /*i(P)*/function(){ + /* int (*xSectorSize)(sqlite3_file*) */ + log("xSectorSize(",arguments,")"); + return 0/*???*/; + }, + xDeviceCharacteristics:/*i(P)*/function(){ + /* int (*xDeviceCharacteristics)(sqlite3_file*) */ + log("xDeviceCharacteristics(",arguments,")"); + return 0; + } + }; + }/*static init*/ + iom.$iVersion = 1; + Object.keys(instm._methods).forEach( + (k)=>installMethod(iom, k, instm._methods[k]) + ); + }/*installIOMethods()*/; + + const iom = new sqlite3_io_methods, sfile = new sqlite3_file; + const err = console.error.bind(console); + try { + const IOM = sqlite3_io_methods, S3F = sqlite3_file; + //log("iom proto",iom,iom.constructor.prototype); + //log("sfile",sfile,sfile.constructor.prototype); + T.assert(0===sfile.$pMethods).assert(iom.pointer > 0); + //log("iom",iom); + sfile.$pMethods = iom.pointer; + T.assert(iom.pointer === sfile.$pMethods) + .assert(IOM.resolveToInstance(iom)) + .assert(undefined ===IOM.resolveToInstance(sfile)) + .mustThrow(()=>IOM.resolveToInstance(0,true)) + .assert(S3F.resolveToInstance(sfile.pointer)) + .assert(undefined===S3F.resolveToInstance(iom)) + .assert(iom===IOM.resolveToInstance(sfile.$pMethods)); + T.assert(0===iom.$iVersion); + installIOMethods(iom); + T.assert(1===iom.$iVersion); + //log("iom.__ondispose",iom.__ondispose); + T.assert(Array.isArray(iom.__ondispose)).assert(iom.__ondispose.length>10); + }finally{ + iom.dispose(); + T.assert(undefined === iom.__ondispose); + } + + const dVfs = new sqlite3_vfs(capi.sqlite3_vfs_find(null)); + try { + const SB = sqlite3.StructBinder; + T.assert(dVfs instanceof SB.StructType) + .assert(dVfs.pointer) + .assert('sqlite3_vfs' === dVfs.structName) + .assert(!!dVfs.structInfo) + .assert(SB.StructType.hasExternalPointer(dVfs)) + .assert(dVfs.$iVersion>0) + .assert('number'===typeof dVfs.$zName) + .assert('number'===typeof dVfs.$xSleep) + .assert(wasm.functionEntry(dVfs.$xOpen)) + .assert(dVfs.memberIsString('zName')) + .assert(dVfs.memberIsString('$zName')) + .assert(!dVfs.memberIsString('pAppData')) + .mustThrowMatching(()=>dVfs.memberToJsString('xSleep'), + /Invalid member type signature for C-string/) + .mustThrowMatching(()=>dVfs.memberSignature('nope'), /nope is not a mapped/) + .assert('string' === typeof dVfs.memberToJsString('zName')) + .assert(dVfs.memberToJsString('zName')===dVfs.memberToJsString('$zName')) + ; + //log("Default VFS: @",dVfs.pointer); + Object.keys(sqlite3_vfs.structInfo.members).forEach(function(mname){ + const mk = sqlite3_vfs.memberKey(mname), mbr = sqlite3_vfs.structInfo.members[mname], + addr = dVfs[mk], prefix = 'defaultVfs.'+mname; + if(1===mbr.signature.length){ + let sep = '?', val = undefined; + switch(mbr.signature[0]){ + // TODO: move this into an accessor, e.g. getPreferredValue(member) + case 'i': case 'j': case 'f': case 'd': sep = '='; val = dVfs[mk]; break + case 'p': case 'P': sep = '@'; val = dVfs[mk]; break; + case 's': sep = '='; + val = dVfs.memberToJsString(mname); + break; + } + //log(prefix, sep, val); + }else{ + //log(prefix," = funcptr @",addr, wasm.functionEntry(addr)); + } + }); + }finally{ + dVfs.dispose(); + T.assert(undefined===dVfs.pointer); + } + }/*StructBinder part 2*/) + + //////////////////////////////////////////////////////////////////// + .t('sqlite3.wasm.pstack', function(sqlite3){ + const P = wasm.pstack; + const isAllocErr = (e)=>e instanceof sqlite3.WasmAllocError; + const stack = P.pointer; + T.assert(0===stack % 8 /* must be 8-byte aligned */); + try{ + const remaining = P.remaining; + T.assert(P.quota >= 4096) + .assert(remaining === P.quota) + .mustThrowMatching(()=>P.alloc(0), isAllocErr) + .mustThrowMatching(()=>P.alloc(-1), isAllocErr); + let p1 = P.alloc(12); + T.assert(p1 === stack - 16/*8-byte aligned*/) + .assert(P.pointer === p1); + let p2 = P.alloc(7); + T.assert(p2 === p1-8/*8-byte aligned, stack grows downwards*/) + .mustThrowMatching(()=>P.alloc(remaining), isAllocErr) + .assert(24 === stack - p2) + .assert(P.pointer === p2); + let n = remaining - (stack - p2); + let p3 = P.alloc(n); + T.assert(p3 === stack-remaining) + .mustThrowMatching(()=>P.alloc(1), isAllocErr); + }finally{ + P.restore(stack); + } + + T.assert(P.pointer === stack); + try { + const [p1, p2, p3] = P.allocChunks(3,4); + T.assert(P.pointer === stack-16/*always rounded to multiple of 8*/) + .assert(p2 === p1 + 4) + .assert(p3 === p2 + 4); + T.mustThrowMatching(()=>P.allocChunks(1024, 1024 * 16), + (e)=>e instanceof sqlite3.WasmAllocError) + }finally{ + P.restore(stack); + } + + T.assert(P.pointer === stack); + try { + let [p1, p2, p3] = P.allocPtr(3,false); + let sPos = stack-16/*always rounded to multiple of 8*/; + T.assert(P.pointer === sPos) + .assert(p2 === p1 + 4) + .assert(p3 === p2 + 4); + [p1, p2, p3] = P.allocPtr(3); + T.assert(P.pointer === sPos-24/*3 x 8 bytes*/) + .assert(p2 === p1 + 8) + .assert(p3 === p2 + 8); + p1 = P.allocPtr(); + T.assert('number'===typeof p1); + }finally{ + P.restore(stack); + } + }/*pstack tests*/) + + //////////////////////////////////////////////////////////////////// + ;/*end of C/WASM utils checks*/ + + T.g('sqlite3_randomness()') + .t('To memory buffer', function(sqlite3){ + const stack = wasm.pstack.pointer; + try{ + const n = 520; + const p = wasm.pstack.alloc(n); + T.assert(0===wasm.getMemValue(p)) + .assert(0===wasm.getMemValue(p+n-1)); + T.assert(undefined === capi.sqlite3_randomness(n - 10, p)); + let j, check = 0; + const heap = wasm.heap8u(); + for(j = 0; j < 10 && 0===check; ++j){ + check += heap[p + j]; + } + T.assert(check > 0); + check = 0; + // Ensure that the trailing bytes were not modified... + for(j = n - 10; j < n && 0===check; ++j){ + check += heap[p + j]; + } + T.assert(0===check); + }finally{ + wasm.pstack.restore(stack); + } + }) + .t('To byte array', function(sqlite3){ + const ta = new Uint8Array(117); + let i, n = 0; + for(i=0; i0); + const t0 = new Uint8Array(0); + T.assert(t0 === capi.sqlite3_randomness(t0), + "0-length array is a special case"); + }) + ;/*end sqlite3_randomness() checks*/ + + //////////////////////////////////////////////////////////////////////// + T.g('sqlite3.oo1') + .t('Create db', function(sqlite3){ + const dbFile = '/tester1.db'; + wasm.sqlite3_wasm_vfs_unlink(0, dbFile); + const db = this.db = new sqlite3.oo1.DB(dbFile); + T.assert(Number.isInteger(db.pointer)) + .mustThrowMatching(()=>db.pointer=1, /read-only/) + .assert(0===sqlite3.capi.sqlite3_extended_result_codes(db.pointer,1)) + .assert('main'===db.dbName(0)) + .assert('string' === typeof db.dbVfsName()); + // Custom db error message handling via sqlite3_prepare_v2/v3() + let rc = capi.sqlite3_prepare_v3(db.pointer, {/*invalid*/}, -1, 0, null, null); + T.assert(capi.SQLITE_MISUSE === rc) + .assert(0 === capi.sqlite3_errmsg(db.pointer).indexOf("Invalid SQL")) + .assert(dbFile === db.dbFilename()) + .assert(!db.dbFilename('nope')); + }) + + //////////////////////////////////////////////////////////////////// + .t('DB.Stmt', function(S){ + let st = this.db.prepare( + new TextEncoder('utf-8').encode("select 3 as a") + ); + //debug("statement =",st); + try { + T.assert(Number.isInteger(st.pointer)) + .mustThrowMatching(()=>st.pointer=1, /read-only/) + .assert(1===this.db.openStatementCount()) + .assert(!st._mayGet) + .assert('a' === st.getColumnName(0)) + .assert(1===st.columnCount) + .assert(0===st.parameterCount) + .mustThrow(()=>st.bind(1,null)) + .assert(true===st.step()) + .assert(3 === st.get(0)) + .mustThrow(()=>st.get(1)) + .mustThrow(()=>st.get(0,~capi.SQLITE_INTEGER)) + .assert(3 === st.get(0,capi.SQLITE_INTEGER)) + .assert(3 === st.getInt(0)) + .assert('3' === st.get(0,capi.SQLITE_TEXT)) + .assert('3' === st.getString(0)) + .assert(3.0 === st.get(0,capi.SQLITE_FLOAT)) + .assert(3.0 === st.getFloat(0)) + .assert(3 === st.get({}).a) + .assert(3 === st.get([])[0]) + .assert(3 === st.getJSON(0)) + .assert(st.get(0,capi.SQLITE_BLOB) instanceof Uint8Array) + .assert(1===st.get(0,capi.SQLITE_BLOB).length) + .assert(st.getBlob(0) instanceof Uint8Array) + .assert('3'.charCodeAt(0) === st.getBlob(0)[0]) + .assert(st._mayGet) + .assert(false===st.step()) + .assert(!st._mayGet) + ; + T.assert(0===capi.sqlite3_strglob("*.txt", "foo.txt")). + assert(0!==capi.sqlite3_strglob("*.txt", "foo.xtx")). + assert(0===capi.sqlite3_strlike("%.txt", "foo.txt", 0)). + assert(0!==capi.sqlite3_strlike("%.txt", "foo.xtx", 0)); + }finally{ + st.finalize(); + } + T.assert(!st.pointer) + .assert(0===this.db.openStatementCount()); + }) + + //////////////////////////////////////////////////////////////////////// + .t('sqlite3_js_...()', function(){ + const db = this.db; + if(1){ + const vfsList = capi.sqlite3_js_vfs_list(); + T.assert(vfsList.length>1); + T.assert('string'===typeof vfsList[0]); + //log("vfsList =",vfsList); + for(const v of vfsList){ + T.assert('string' === typeof v) + .assert(capi.sqlite3_vfs_find(v) > 0); + } + } + /** + Trivia: the magic db name ":memory:" does not actually use the + "memdb" VFS unless "memdb" is _explicitly_ provided as the VFS + name. Instead, it uses the default VFS with an in-memory btree. + Thus this.db's VFS may not be memdb even though it's an in-memory + db. + */ + const pVfsMem = capi.sqlite3_vfs_find('memdb'), + pVfsDflt = capi.sqlite3_vfs_find(0), + pVfsDb = capi.sqlite3_js_db_vfs(db.pointer); + T.assert(pVfsMem > 0) + .assert(pVfsDflt > 0) + .assert(pVfsDb > 0) + .assert(pVfsMem !== pVfsDflt + /* memdb lives on top of the default vfs */) + .assert(pVfsDb === pVfsDflt || pVfsdb === pVfsMem) + ; + /*const vMem = new capi.sqlite3_vfs(pVfsMem), + vDflt = new capi.sqlite3_vfs(pVfsDflt), + vDb = new capi.sqlite3_vfs(pVfsDb);*/ + const duv = capi.sqlite3_js_db_uses_vfs; + T.assert(pVfsDflt === duv(db.pointer, 0) + || pVfsMem === duv(db.pointer,0)) + .assert(!duv(db.pointer, "foo")) + ; + }/*sqlite3_js_...()*/) + + //////////////////////////////////////////////////////////////////// + .t('Table t', function(sqlite3){ + const db = this.db; + let list = []; + let rc = db.exec({ + sql:['CREATE TABLE t(a,b);', + // ^^^ using TEMP TABLE breaks the db export test + "INSERT INTO t(a,b) VALUES(1,2),(3,4),", + "(?,?),('blob',X'6869')"/*intentionally missing semicolon to test for + off-by-one bug in string-to-WASM conversion*/], + saveSql: list, + bind: [5,6] + }); + //debug("Exec'd SQL:", list); + T.assert(rc === db) + .assert(2 === list.length) + .assert('string'===typeof list[1]) + .assert(4===db.changes()); + if(wasm.bigIntEnabled){ + T.assert(4n===db.changes(false,true)); + } + let blob = db.selectValue("select b from t where a='blob'"); + T.assert(blob instanceof Uint8Array). + assert(0x68===blob[0] && 0x69===blob[1]); + blob = null; + let counter = 0, colNames = []; + list.length = 0; + db.exec(new TextEncoder('utf-8').encode("SELECT a a, b b FROM t"),{ + rowMode: 'object', + resultRows: list, + columnNames: colNames, + callback: function(row,stmt){ + ++counter; + T.assert((row.a%2 && row.a<6) || 'blob'===row.a); + } + }); + T.assert(2 === colNames.length) + .assert('a' === colNames[0]) + .assert(4 === counter) + .assert(4 === list.length); + list.length = 0; + db.exec("SELECT a a, b b FROM t",{ + rowMode: 'array', + callback: function(row,stmt){ + ++counter; + T.assert(Array.isArray(row)) + .assert((0===row[1]%2 && row[1]<7) + || (row[1] instanceof Uint8Array)); + } + }); + T.assert(8 === counter); + T.assert(Number.MIN_SAFE_INTEGER === + db.selectValue("SELECT "+Number.MIN_SAFE_INTEGER)). + assert(Number.MAX_SAFE_INTEGER === + db.selectValue("SELECT "+Number.MAX_SAFE_INTEGER)); + if(wasm.bigIntEnabled && haveWasmCTests()){ + const mI = wasm.xCall('sqlite3_wasm_test_int64_max'); + const b = BigInt(Number.MAX_SAFE_INTEGER * 2); + T.assert(b === db.selectValue("SELECT "+b)). + assert(b === db.selectValue("SELECT ?", b)). + assert(mI == db.selectValue("SELECT $x", {$x:mI})); + }else{ + /* Curiously, the JS spec seems to be off by one with the definitions + of MIN/MAX_SAFE_INTEGER: + + https://github.com/emscripten-core/emscripten/issues/17391 */ + T.mustThrow(()=>db.selectValue("SELECT "+(Number.MAX_SAFE_INTEGER+1))). + mustThrow(()=>db.selectValue("SELECT "+(Number.MIN_SAFE_INTEGER-1))); + } + + let st = db.prepare("update t set b=:b where a='blob'"); + try { + const ndx = st.getParamIndex(':b'); + T.assert(1===ndx); + st.bindAsBlob(ndx, "ima blob").reset(true); + } finally { + st.finalize(); + } + + try { + db.prepare("/*empty SQL*/"); + toss("Must not be reached."); + }catch(e){ + T.assert(e instanceof sqlite3.SQLite3Error) + .assert(0==e.message.indexOf('Cannot prepare empty')); + } + }) + + //////////////////////////////////////////////////////////////////////// + .t('selectArray/Object()', function(sqlite3){ + const db = this.db; + let rc = db.selectArray('select a, b from t where a=?', 5); + T.assert(Array.isArray(rc)) + .assert(2===rc.length) + .assert(5===rc[0] && 6===rc[1]); + rc = db.selectArray('select a, b from t where b=-1'); + T.assert(undefined === rc); + rc = db.selectObject('select a A, b b from t where b=?', 6); + T.assert(rc && 'object'===typeof rc) + .assert(5===rc.A) + .assert(6===rc.b); + rc = db.selectArray('select a, b from t where b=-1'); + T.assert(undefined === rc); + }) + + //////////////////////////////////////////////////////////////////////// + .t('sqlite3_js_db_export()', function(){ + const db = this.db; + const xp = capi.sqlite3_js_db_export(db.pointer); + T.assert(xp instanceof Uint8Array) + .assert(xp.byteLength>0) + .assert(0 === xp.byteLength % 512); + }/*sqlite3_js_db_export()*/) + + //////////////////////////////////////////////////////////////////// + .t('Scalar UDFs', function(sqlite3){ + const db = this.db; + db.createFunction("foo",(pCx,a,b)=>a+b); + T.assert(7===db.selectValue("select foo(3,4)")). + assert(5===db.selectValue("select foo(3,?)",2)). + assert(5===db.selectValue("select foo(?,?2)",[1,4])). + assert(5===db.selectValue("select foo($a,$b)",{$a:0,$b:5})); + db.createFunction("bar", { + arity: -1, + xFunc: (pCx,...args)=>{ + let rc = 0; + for(const v of args) rc += v; + return rc; + } + }).createFunction({ + name: "asis", + xFunc: (pCx,arg)=>arg + }); + T.assert(0===db.selectValue("select bar()")). + assert(1===db.selectValue("select bar(1)")). + assert(3===db.selectValue("select bar(1,2)")). + assert(-1===db.selectValue("select bar(1,2,-4)")). + assert('hi' === db.selectValue("select asis('hi')")). + assert('hi' === db.selectValue("select ?",'hi')). + assert(null === db.selectValue("select null")). + assert(null === db.selectValue("select asis(null)")). + assert(1 === db.selectValue("select ?",1)). + assert(2 === db.selectValue("select ?",[2])). + assert(3 === db.selectValue("select $a",{$a:3})). + assert(T.eqApprox(3.1,db.selectValue("select 3.0 + 0.1"))). + assert(T.eqApprox(1.3,db.selectValue("select asis(1 + 0.3)"))); + + let blobArg = new Uint8Array(2); + blobArg.set([0x68, 0x69], 0); + let blobRc = db.selectValue("select asis(?1)", blobArg); + T.assert(blobRc instanceof Uint8Array). + assert(2 === blobRc.length). + assert(0x68==blobRc[0] && 0x69==blobRc[1]); + blobRc = db.selectValue("select asis(X'6869')"); + T.assert(blobRc instanceof Uint8Array). + assert(2 === blobRc.length). + assert(0x68==blobRc[0] && 0x69==blobRc[1]); + + blobArg = new Int8Array(2); + blobArg.set([0x68, 0x69]); + //debug("blobArg=",blobArg); + blobRc = db.selectValue("select asis(?1)", blobArg); + T.assert(blobRc instanceof Uint8Array). + assert(2 === blobRc.length); + //debug("blobRc=",blobRc); + T.assert(0x68==blobRc[0] && 0x69==blobRc[1]); + }) + + //////////////////////////////////////////////////////////////////// + .t({ + name: 'Aggregate UDFs', + test: function(sqlite3){ + const db = this.db; + const sjac = capi.sqlite3_js_aggregate_context; + db.createFunction({ + name: 'summer', + xStep: (pCtx, n)=>{ + const ac = sjac(pCtx, 4); + wasm.setMemValue(ac, wasm.getMemValue(ac,'i32') + Number(n), 'i32'); + }, + xFinal: (pCtx)=>{ + const ac = sjac(pCtx, 0); + return ac ? wasm.getMemValue(ac,'i32') : 0; + } + }); + let v = db.selectValue([ + "with cte(v) as (", + "select 3 union all select 5 union all select 7", + ") select summer(v), summer(v+1) from cte" + /* ------------------^^^^^^^^^^^ ensures that we're handling + sqlite3_aggregate_context() properly. */ + ]); + T.assert(15===v); + T.mustThrowMatching(()=>db.selectValue("select summer(1,2)"), + /wrong number of arguments/); + + db.createFunction({ + name: 'summerN', + arity: -1, + xStep: (pCtx, ...args)=>{ + const ac = sjac(pCtx, 4); + let sum = wasm.getMemValue(ac, 'i32'); + for(const v of args) sum += Number(v); + wasm.setMemValue(ac, sum, 'i32'); + }, + xFinal: (pCtx)=>{ + const ac = sjac(pCtx, 0); + capi.sqlite3_result_int( pCtx, ac ? wasm.getMemValue(ac,'i32') : 0 ); + // xFinal() may either return its value directly or call + // sqlite3_result_xyz() and return undefined. Both are + // functionally equivalent. + } + }); + T.assert(18===db.selectValue('select summerN(1,8,9), summerN(2,3,4)')); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{ + xFunc: ()=>{}, xStep: ()=>{} + }); + }, /scalar or aggregate\?/); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{xStep: ()=>{}}); + }, /Missing xFinal/); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{xFinal: ()=>{}}); + }, /Missing xStep/); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{}); + }, /Missing function-type properties/); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{xFunc:()=>{}, xDestroy:'nope'}); + }, /xDestroy property must be a function/); + T.mustThrowMatching(()=>{ + db.createFunction('nope',{xFunc:()=>{}, pApp:'nope'}); + }, /Invalid value for pApp/); + } + }/*aggregate UDFs*/) + + //////////////////////////////////////////////////////////////////////// + .t({ + name: 'Aggregate UDFs (64-bit)', + predicate: ()=>wasm.bigIntEnabled, + test: function(sqlite3){ + const db = this.db; + const sjac = capi.sqlite3_js_aggregate_context; + db.createFunction({ + name: 'summer64', + xStep: (pCtx, n)=>{ + const ac = sjac(pCtx, 8); + wasm.setMemValue(ac, wasm.getMemValue(ac,'i64') + BigInt(n), 'i64'); + }, + xFinal: (pCtx)=>{ + const ac = sjac(pCtx, 0); + return ac ? wasm.getMemValue(ac,'i64') : 0n; + } + }); + let v = db.selectValue([ + "with cte(v) as (", + "select 9007199254740991 union all select 1 union all select 2", + ") select summer64(v), summer64(v+1) from cte" + ]); + T.assert(9007199254740994n===v); + } + }/*aggregate UDFs*/) + + //////////////////////////////////////////////////////////////////// + .t({ + name: 'Window UDFs', + test: function(){ + /* Example window function, table, and results taken from: + https://sqlite.org/windowfunctions.html#udfwinfunc */ + const db = this.db; + const sjac = (cx,n=4)=>capi.sqlite3_js_aggregate_context(cx,n); + const xValueFinal = (pCtx)=>{ + const ac = sjac(pCtx, 0); + return ac ? wasm.getMemValue(ac,'i32') : 0; + }; + const xStepInverse = (pCtx, n)=>{ + const ac = sjac(pCtx); + wasm.setMemValue(ac, wasm.getMemValue(ac,'i32') + Number(n), 'i32'); + }; + db.createFunction({ + name: 'winsumint', + xStep: (pCtx, n)=>xStepInverse(pCtx, n), + xInverse: (pCtx, n)=>xStepInverse(pCtx, -n), + xFinal: xValueFinal, + xValue: xValueFinal + }); + db.exec([ + "CREATE TEMP TABLE twin(x, y); INSERT INTO twin VALUES", + "('a', 4),('b', 5),('c', 3),('d', 8),('e', 1)" + ]); + let rc = db.exec({ + returnValue: 'resultRows', + sql:[ + "SELECT x, winsumint(y) OVER (", + "ORDER BY x ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING", + ") AS sum_y ", + "FROM twin ORDER BY x;" + ] + }); + T.assert(Array.isArray(rc)) + .assert(5 === rc.length); + let count = 0; + for(const row of rc){ + switch(++count){ + case 1: T.assert('a'===row[0] && 9===row[1]); break; + case 2: T.assert('b'===row[0] && 12===row[1]); break; + case 3: T.assert('c'===row[0] && 16===row[1]); break; + case 4: T.assert('d'===row[0] && 12===row[1]); break; + case 5: T.assert('e'===row[0] && 9===row[1]); break; + default: toss("Too many rows to window function."); + } + } + const resultRows = []; + rc = db.exec({ + resultRows, + returnValue: 'resultRows', + sql:[ + "SELECT x, winsumint(y) OVER (", + "ORDER BY x ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING", + ") AS sum_y ", + "FROM twin ORDER BY x;" + ] + }); + T.assert(rc === resultRows) + .assert(5 === rc.length); + + rc = db.exec({ + returnValue: 'saveSql', + sql: "select 1; select 2; -- empty\n; select 3" + }); + T.assert(Array.isArray(rc)) + .assert(3===rc.length) + .assert('select 1;' === rc[0]) + .assert('select 2;' === rc[1]) + .assert('-- empty\n; select 3' === rc[2] + /* Strange but true. */); + + T.mustThrowMatching(()=>{ + db.exec({sql:'', returnValue: 'nope'}); + }, /^Invalid returnValue/); + + db.exec("DROP TABLE twin"); + } + }/*window UDFs*/) + + //////////////////////////////////////////////////////////////////// + .t("ATTACH", function(){ + const db = this.db; + const resultRows = []; + db.exec({ + sql:new TextEncoder('utf-8').encode([ + // ^^^ testing string-vs-typedarray handling in exec() + "attach 'session' as foo;", + "create table foo.bar(a);", + "insert into foo.bar(a) values(1),(2),(3);", + "select a from foo.bar order by a;" + ].join('')), + rowMode: 0, + resultRows + }); + T.assert(3===resultRows.length) + .assert(2===resultRows[1]); + T.assert(2===db.selectValue('select a from foo.bar where a>1 order by a')); + let colCount = 0, rowCount = 0; + const execCallback = function(pVoid, nCols, aVals, aNames){ + colCount = nCols; + ++rowCount; + T.assert(2===aVals.length) + .assert(2===aNames.length) + .assert(+(aVals[1]) === 2 * +(aVals[0])); + }; + let rc = capi.sqlite3_exec( + db.pointer, "select a, a*2 from foo.bar", execCallback, + 0, 0 + ); + T.assert(0===rc).assert(3===rowCount).assert(2===colCount); + rc = capi.sqlite3_exec( + db.pointer, "select a from foo.bar", ()=>{ + tossQuietly("Testing throwing from exec() callback."); + }, 0, 0 + ); + T.assert(capi.SQLITE_ABORT === rc); + db.exec("detach foo"); + T.mustThrow(()=>db.exec("select * from foo.bar")); + }) + + //////////////////////////////////////////////////////////////////// + .t({ + name: 'C-side WASM tests (if compiled in)', + predicate: haveWasmCTests, + test: function(){ + const w = wasm, db = this.db; + const stack = w.scopedAllocPush(); + let ptrInt; + const origValue = 512; + const ptrValType = 'i32'; + try{ + ptrInt = w.scopedAlloc(4); + w.setMemValue(ptrInt,origValue, ptrValType); + const cf = w.xGet('sqlite3_wasm_test_intptr'); + const oldPtrInt = ptrInt; + //log('ptrInt',ptrInt); + //log('getMemValue(ptrInt)',w.getMemValue(ptrInt)); + T.assert(origValue === w.getMemValue(ptrInt, ptrValType)); + const rc = cf(ptrInt); + //log('cf(ptrInt)',rc); + //log('ptrInt',ptrInt); + //log('getMemValue(ptrInt)',w.getMemValue(ptrInt,ptrValType)); + T.assert(2*origValue === rc). + assert(rc === w.getMemValue(ptrInt,ptrValType)). + assert(oldPtrInt === ptrInt); + const pi64 = w.scopedAlloc(8)/*ptr to 64-bit integer*/; + const o64 = 0x010203040506/*>32-bit integer*/; + const ptrType64 = 'i64'; + if(w.bigIntEnabled){ + w.setMemValue(pi64, o64, ptrType64); + //log("pi64 =",pi64, "o64 = 0x",o64.toString(16), o64); + const v64 = ()=>w.getMemValue(pi64,ptrType64) + //log("getMemValue(pi64)",v64()); + T.assert(v64() == o64); + //T.assert(o64 === w.getMemValue(pi64, ptrType64)); + const cf64w = w.xGet('sqlite3_wasm_test_int64ptr'); + cf64w(pi64); + //log("getMemValue(pi64)",v64()); + T.assert(v64() == BigInt(2 * o64)); + cf64w(pi64); + T.assert(v64() == BigInt(4 * o64)); + + const biTimes2 = w.xGet('sqlite3_wasm_test_int64_times2'); + T.assert(BigInt(2 * o64) === + biTimes2(BigInt(o64)/*explicit conv. required to avoid TypeError + in the call :/ */)); + + const pMin = w.scopedAlloc(16); + const pMax = pMin + 8; + const g64 = (p)=>w.getMemValue(p,ptrType64); + w.setMemValue(pMin, 0, ptrType64); + w.setMemValue(pMax, 0, ptrType64); + const minMaxI64 = [ + w.xCall('sqlite3_wasm_test_int64_min'), + w.xCall('sqlite3_wasm_test_int64_max') + ]; + T.assert(minMaxI64[0] < BigInt(Number.MIN_SAFE_INTEGER)). + assert(minMaxI64[1] > BigInt(Number.MAX_SAFE_INTEGER)); + //log("int64_min/max() =",minMaxI64, typeof minMaxI64[0]); + w.xCall('sqlite3_wasm_test_int64_minmax', pMin, pMax); + T.assert(g64(pMin) === minMaxI64[0], "int64 mismatch"). + assert(g64(pMax) === minMaxI64[1], "int64 mismatch"); + //log("pMin",g64(pMin), "pMax",g64(pMax)); + w.setMemValue(pMin, minMaxI64[0], ptrType64); + T.assert(g64(pMin) === minMaxI64[0]). + assert(minMaxI64[0] === db.selectValue("select ?",g64(pMin))). + assert(minMaxI64[1] === db.selectValue("select ?",g64(pMax))); + const rxRange = /too big/; + T.mustThrowMatching(()=>{db.prepare("select ?").bind(minMaxI64[0] - BigInt(1))}, + rxRange). + mustThrowMatching(()=>{db.prepare("select ?").bind(minMaxI64[1] + BigInt(1))}, + (e)=>rxRange.test(e.message)); + }else{ + log("No BigInt support. Skipping related tests."); + log("\"The problem\" here is that we can manipulate, at the byte level,", + "heap memory to set 64-bit values, but we can't get those values", + "back into JS because of the lack of 64-bit integer support."); + } + }finally{ + const x = w.scopedAlloc(1), y = w.scopedAlloc(1), z = w.scopedAlloc(1); + //log("x=",x,"y=",y,"z=",z); // just looking at the alignment + w.scopedAllocPop(stack); + } + } + }/* jaccwabyt-specific tests */) + + .t('Close db', function(){ + T.assert(this.db).assert(Number.isInteger(this.db.pointer)); + wasm.exports.sqlite3_wasm_db_reset(this.db.pointer); + this.db.close(); + T.assert(!this.db.pointer); + }) + ;/* end of oo1 checks */ + + //////////////////////////////////////////////////////////////////////// + T.g('kvvfs') + .t('kvvfs sanity checks', function(sqlite3){ + if(isWorker()){ + T.assert( + !capi.sqlite3_vfs_find('kvvfs'), + "Expecting kvvfs to be unregistered." + ); + log("kvvfs is (correctly) unavailable in a Worker."); + return; + } + const filename = 'session'; + const pVfs = capi.sqlite3_vfs_find('kvvfs'); + T.assert(pVfs); + const JDb = sqlite3.oo1.JsStorageDb; + const unlink = ()=>JDb.clearStorage(filename); + unlink(); + let db = new JDb(filename); + try { + db.exec([ + 'create table kvvfs(a);', + 'insert into kvvfs(a) values(1),(2),(3)' + ]); + T.assert(3 === db.selectValue('select count(*) from kvvfs')); + db.close(); + db = new JDb(filename); + db.exec('insert into kvvfs(a) values(4),(5),(6)'); + T.assert(6 === db.selectValue('select count(*) from kvvfs')); + }finally{ + db.close(); + unlink(); + } + }/*kvvfs sanity checks*/) + ;/* end kvvfs tests */ + + //////////////////////////////////////////////////////////////////////// + T.g('OPFS (Worker thread only and only in supported browsers)', + (sqlite3)=>{return !!sqlite3.opfs}) + .t({ + name: 'OPFS sanity checks', + test: async function(sqlite3){ + const opfs = sqlite3.opfs; + const filename = 'sqlite3-tester1.db'; + const pVfs = capi.sqlite3_vfs_find('opfs'); + T.assert(pVfs); + const unlink = (fn=filename)=>wasm.sqlite3_wasm_vfs_unlink(pVfs,fn); + unlink(); + let db = new opfs.OpfsDb(filename); + try { + db.exec([ + 'create table p(a);', + 'insert into p(a) values(1),(2),(3)' + ]); + T.assert(3 === db.selectValue('select count(*) from p')); + db.close(); + db = new opfs.OpfsDb(filename); + db.exec('insert into p(a) values(4),(5),(6)'); + T.assert(6 === db.selectValue('select count(*) from p')); + }finally{ + db.close(); + unlink(); + } + + if(1){ + // Sanity-test sqlite3_wasm_vfs_create_file()... + const fSize = 1379; + let sh; + try{ + T.assert(!(await opfs.entryExists(filename))); + let rc = wasm.sqlite3_wasm_vfs_create_file( + pVfs, filename, null, fSize + ); + T.assert(0===rc) + .assert(await opfs.entryExists(filename)); + const fh = await opfs.rootDirectory.getFileHandle(filename); + sh = await fh.createSyncAccessHandle(); + T.assert(fSize === await sh.getSize()); + }finally{ + if(sh) await sh.close(); + unlink(); + } + } + + // Some sanity checks of the opfs utility functions... + const testDir = '/sqlite3-opfs-'+opfs.randomFilename(12); + const aDir = testDir+'/test/dir'; + T.assert(await opfs.mkdir(aDir), "mkdir failed") + .assert(await opfs.mkdir(aDir), "mkdir must pass if the dir exists") + .assert(!(await opfs.unlink(testDir+'/test')), "delete 1 should have failed (dir not empty)") + .assert((await opfs.unlink(testDir+'/test/dir')), "delete 2 failed") + .assert(!(await opfs.unlink(testDir+'/test/dir')), + "delete 2b should have failed (dir already deleted)") + .assert((await opfs.unlink(testDir, true)), "delete 3 failed") + .assert(!(await opfs.entryExists(testDir)), + "entryExists(",testDir,") should have failed"); + } + }/*OPFS sanity checks*/) + ;/* end OPFS tests */ + + //////////////////////////////////////////////////////////////////////// + log("Loading and initializing sqlite3 WASM module..."); + if(!isUIThread()){ + /* + If sqlite3.js is in a directory other than this script, in order + to get sqlite3.js to resolve sqlite3.wasm properly, we have to + explicitly tell it where sqlite3.js is being loaded from. We do + that by passing the `sqlite3.dir=theDirName` URL argument to + _this_ script. That URL argument will be seen by the JS/WASM + loader and it will adjust the sqlite3.wasm path accordingly. If + sqlite3.js/.wasm are in the same directory as this script then + that's not needed. + + URL arguments passed as part of the filename via importScripts() + are simply lost, and such scripts see the self.location of + _this_ script. + */ + let sqlite3Js = 'sqlite3.js'; + const urlParams = new URL(self.location.href).searchParams; + if(urlParams.has('sqlite3.dir')){ + sqlite3Js = urlParams.get('sqlite3.dir') + '/' + sqlite3Js; + } + importScripts(sqlite3Js); + } + self.sqlite3InitModule({ + print: log, + printErr: error + }).then(function(sqlite3){ + //console.log('sqlite3 =',sqlite3); + log("Done initializing WASM/JS bits. Running tests..."); + capi = sqlite3.capi; + wasm = sqlite3.wasm; + log("sqlite3 version:",capi.sqlite3_libversion(), + capi.sqlite3_sourceid()); + if(wasm.bigIntEnabled){ + log("BigInt/int64 support is enabled."); + }else{ + logClass('warning',"BigInt/int64 support is disabled."); + } + if(haveWasmCTests()){ + log("sqlite3_wasm_test_...() APIs are available."); + }else{ + logClass('warning',"sqlite3_wasm_test_...() APIs unavailable."); + } + TestUtil.runTests(sqlite3); + }); +})(); DELETED ext/wasm/testing1.html Index: ext/wasm/testing1.html ================================================================== --- ext/wasm/testing1.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - sqlite3-api.js tests - - -
sqlite3-api.js tests
- -
-
-
Initializing app...
-
- On a slow internet connection this may take a moment. If this - message displays for "a long time", intialization may have - failed and the JavaScript console may contain clues as to why. -
-
-
Downloading...
-
- -
-
Most stuff on this page happens in the dev console.
-
-
- - - - - DELETED ext/wasm/testing1.js Index: ext/wasm/testing1.js ================================================================== --- ext/wasm/testing1.js +++ /dev/null @@ -1,1080 +0,0 @@ -/* - 2022-05-22 - - The author disclaims copyright to this source code. In place of a - legal notice, here is a blessing: - - * May you do good and not evil. - * May you find forgiveness for yourself and forgive others. - * May you share freely, never taking more than you give. - - *********************************************************************** - - A basic test script for sqlite3-api.js. This file must be run in - main JS thread and sqlite3.js must have been loaded before it. -*/ -'use strict'; -(function(){ - const T = self.SqliteTestUtil; - const toss = function(...args){throw new Error(args.join(' '))}; - const debug = console.debug.bind(console); - const eOutput = document.querySelector('#test-output'); - const log = console.log.bind(console) - const logHtml = function(...args){ - log.apply(this, args); - const ln = document.createElement('div'); - ln.append(document.createTextNode(args.join(' '))); - eOutput.append(ln); - }; - - const eqApprox = function(v1,v2,factor=0.05){ - //debug('eqApprox',v1, v2); - return v1>=(v2-factor) && v1<=(v2+factor); - }; - - const testBasicSanity = function(db,sqlite3){ - const capi = sqlite3.capi; - log("Basic sanity tests..."); - T.assert(Number.isInteger(db.pointer)). - mustThrowMatching(()=>db.pointer=1, /read-only/). - assert(0===capi.sqlite3_extended_result_codes(db.pointer,1)). - assert('main'===db.dbName(0)); - let pId; - let st = db.prepare( - new TextEncoder('utf-8').encode("select 3 as a") - /* Testing handling of Uint8Array input */ - ); - //debug("statement =",st); - try { - T.assert(Number.isInteger(st.pointer)) - .mustThrowMatching(()=>st.pointer=1, /read-only/) - .assert(1===db.openStatementCount()) - .assert(!st._mayGet) - .assert('a' === st.getColumnName(0)) - .assert(1===st.columnCount) - .assert(0===st.parameterCount) - .mustThrow(()=>st.bind(1,null)) - .assert(true===st.step()) - .assert(3 === st.get(0)) - .mustThrow(()=>st.get(1)) - .mustThrow(()=>st.get(0,~capi.SQLITE_INTEGER)) - .assert(3 === st.get(0,capi.SQLITE_INTEGER)) - .assert(3 === st.getInt(0)) - .assert('3' === st.get(0,capi.SQLITE_TEXT)) - .assert('3' === st.getString(0)) - .assert(3.0 === st.get(0,capi.SQLITE_FLOAT)) - .assert(3.0 === st.getFloat(0)) - .assert(3 === st.get({}).a) - .assert(3 === st.get([])[0]) - .assert(3 === st.getJSON(0)) - .assert(st.get(0,capi.SQLITE_BLOB) instanceof Uint8Array) - .assert(1===st.get(0,capi.SQLITE_BLOB).length) - .assert(st.getBlob(0) instanceof Uint8Array) - .assert('3'.charCodeAt(0) === st.getBlob(0)[0]) - .assert(st._mayGet) - .assert(false===st.step()) - .assert(!st._mayGet) - ; - pId = st.pointer; - T.assert(0===capi.sqlite3_strglob("*.txt", "foo.txt")). - assert(0!==capi.sqlite3_strglob("*.txt", "foo.xtx")). - assert(0===capi.sqlite3_strlike("%.txt", "foo.txt", 0)). - assert(0!==capi.sqlite3_strlike("%.txt", "foo.xtx", 0)); - }finally{ - st.finalize(); - } - T.assert(!st.pointer) - .assert(0===db.openStatementCount()); - let list = []; - db.exec({ - sql:['CREATE TABLE t(a,b);', - "INSERT INTO t(a,b) VALUES(1,2),(3,4),", - "(?,?),('blob',X'6869')"/*intentionally missing semicolon to test for - off-by-one bug in string-to-WASM conversion*/], - multi: true, - saveSql: list, - bind: [5,6] - }); - //debug("Exec'd SQL:", list); - T.assert(2 === list.length) - .assert('string'===typeof list[1]) - .assert(4===db.changes()); - if(capi.wasm.bigIntEnabled){ - T.assert(4n===db.changes(false,true)); - } - let blob = db.selectValue("select b from t where a='blob'"); - T.assert(blob instanceof Uint8Array). - assert(0x68===blob[0] && 0x69===blob[1]); - blob = null; - - let counter = 0, colNames = []; - list.length = 0; - db.exec(new TextEncoder('utf-8').encode("SELECT a a, b b FROM t"),{ - rowMode: 'object', - resultRows: list, - columnNames: colNames, - callback: function(row,stmt){ - ++counter; - T.assert((row.a%2 && row.a<6) || 'blob'===row.a); - } - }); - T.assert(2 === colNames.length) - .assert('a' === colNames[0]) - .assert(4 === counter) - .assert(4 === list.length); - list.length = 0; - db.exec("SELECT a a, b b FROM t",{ - rowMode: 'array', - callback: function(row,stmt){ - ++counter; - T.assert(Array.isArray(row)) - .assert((0===row[1]%2 && row[1]<7) - || (row[1] instanceof Uint8Array)); - } - }); - T.assert(8 === counter); - T.assert(Number.MIN_SAFE_INTEGER === - db.selectValue("SELECT "+Number.MIN_SAFE_INTEGER)). - assert(Number.MAX_SAFE_INTEGER === - db.selectValue("SELECT "+Number.MAX_SAFE_INTEGER)); - if(capi.wasm.bigIntEnabled){ - const mI = capi.wasm.xCall('jaccwabyt_test_int64_max'); - const b = BigInt(Number.MAX_SAFE_INTEGER * 2); - T.assert(b === db.selectValue("SELECT "+b)). - assert(b === db.selectValue("SELECT ?", b)). - assert(mI == db.selectValue("SELECT $x", {$x:mI})); - }else{ - /* Curiously, the JS spec seems to be off by one with the definitions - of MIN/MAX_SAFE_INTEGER: - - https://github.com/emscripten-core/emscripten/issues/17391 */ - T.mustThrow(()=>db.selectValue("SELECT "+(Number.MAX_SAFE_INTEGER+1))). - mustThrow(()=>db.selectValue("SELECT "+(Number.MIN_SAFE_INTEGER-1))); - } - - st = db.prepare("update t set b=:b where a='blob'"); - try { - const ndx = st.getParamIndex(':b'); - T.assert(1===ndx); - st.bindAsBlob(ndx, "ima blob").reset(true); - } finally { - st.finalize(); - } - - try { - throw new capi.WasmAllocError; - }catch(e){ - T.assert(e instanceof Error) - .assert(e instanceof capi.WasmAllocError); - } - - try { - db.prepare("/*empty SQL*/"); - toss("Must not be reached."); - }catch(e){ - T.assert(e instanceof sqlite3.SQLite3Error) - .assert(0==e.message.indexOf('Cannot prepare empty')); - } - - T.assert(capi.sqlite3_errstr(capi.SQLITE_IOERR_ACCESS).indexOf("I/O")>=0). - assert(capi.sqlite3_errstr(capi.SQLITE_CORRUPT).indexOf('malformed')>0). - assert(capi.sqlite3_errstr(capi.SQLITE_OK) === 'not an error'); - - // Custom db error message handling via sqlite3_prepare_v2/v3() - if(capi.wasm.exports.sqlite3_wasm_db_error){ - log("Testing custom error message via prepare_v3()..."); - let rc = capi.sqlite3_prepare_v3(db.pointer, [/*invalid*/], -1, 0, null, null); - T.assert(capi.SQLITE_MISUSE === rc) - .assert(0 === capi.sqlite3_errmsg(db.pointer).indexOf("Invalid SQL")); - log("errmsg =",capi.sqlite3_errmsg(db.pointer)); - } - }/*testBasicSanity()*/; - - const testUDF = function(db){ - db.createFunction("foo",function(a,b){return a+b}); - T.assert(7===db.selectValue("select foo(3,4)")). - assert(5===db.selectValue("select foo(3,?)",2)). - assert(5===db.selectValue("select foo(?,?2)",[1,4])). - assert(5===db.selectValue("select foo($a,$b)",{$a:0,$b:5})); - db.createFunction("bar", { - arity: -1, - callback: function(){ - var rc = 0; - for(let i = 0; i < arguments.length; ++i) rc += arguments[i]; - return rc; - } - }).createFunction({ - name: "asis", - callback: (arg)=>arg - }); - - //log("Testing DB::selectValue() w/ UDF..."); - T.assert(0===db.selectValue("select bar()")). - assert(1===db.selectValue("select bar(1)")). - assert(3===db.selectValue("select bar(1,2)")). - assert(-1===db.selectValue("select bar(1,2,-4)")). - assert('hi'===db.selectValue("select asis('hi')")); - - T.assert('hi' === db.selectValue("select ?",'hi')). - assert(null===db.selectValue("select null")). - assert(null === db.selectValue("select ?",null)). - assert(null === db.selectValue("select ?",[null])). - assert(null === db.selectValue("select $a",{$a:null})). - assert(eqApprox(3.1,db.selectValue("select 3.0 + 0.1"))). - assert(eqApprox(1.3,db.selectValue("select asis(1 + 0.3)"))) - ; - - //log("Testing binding and UDF propagation of blobs..."); - let blobArg = new Uint8Array(2); - blobArg.set([0x68, 0x69], 0); - let blobRc = db.selectValue("select asis(?1)", blobArg); - T.assert(blobRc instanceof Uint8Array). - assert(2 === blobRc.length). - assert(0x68==blobRc[0] && 0x69==blobRc[1]); - blobRc = db.selectValue("select asis(X'6869')"); - T.assert(blobRc instanceof Uint8Array). - assert(2 === blobRc.length). - assert(0x68==blobRc[0] && 0x69==blobRc[1]); - - blobArg = new Int8Array(2); - blobArg.set([0x68, 0x69]); - //debug("blobArg=",blobArg); - blobRc = db.selectValue("select asis(?1)", blobArg); - T.assert(blobRc instanceof Uint8Array). - assert(2 === blobRc.length); - //debug("blobRc=",blobRc); - T.assert(0x68==blobRc[0] && 0x69==blobRc[1]); - }; - - const testAttach = function(db){ - const resultRows = []; - db.exec({ - sql:new TextEncoder('utf-8').encode([ - // ^^^ testing string-vs-typedarray handling in execMulti() - "attach 'foo.db' as foo;", - "create table foo.bar(a);", - "insert into foo.bar(a) values(1),(2),(3);", - "select a from foo.bar order by a;" - ].join('')), - multi: true, - rowMode: 0, - resultRows - }); - T.assert(3===resultRows.length) - .assert(2===resultRows[1]); - T.assert(2===db.selectValue('select a from foo.bar where a>1 order by a')); - db.exec("detach foo"); - T.mustThrow(()=>db.exec("select * from foo.bar")); - }; - - const testIntPtr = function(db,S,Module){ - const w = S.capi.wasm; - const stack = w.scopedAllocPush(); - let ptrInt; - const origValue = 512; - const ptrValType = 'i32'; - try{ - ptrInt = w.scopedAlloc(4); - w.setMemValue(ptrInt,origValue, ptrValType); - const cf = w.xGet('jaccwabyt_test_intptr'); - const oldPtrInt = ptrInt; - //log('ptrInt',ptrInt); - //log('getMemValue(ptrInt)',w.getMemValue(ptrInt)); - T.assert(origValue === w.getMemValue(ptrInt, ptrValType)); - const rc = cf(ptrInt); - //log('cf(ptrInt)',rc); - //log('ptrInt',ptrInt); - //log('getMemValue(ptrInt)',w.getMemValue(ptrInt,ptrValType)); - T.assert(2*origValue === rc). - assert(rc === w.getMemValue(ptrInt,ptrValType)). - assert(oldPtrInt === ptrInt); - const pi64 = w.scopedAlloc(8)/*ptr to 64-bit integer*/; - const o64 = 0x010203040506/*>32-bit integer*/; - const ptrType64 = 'i64'; - if(w.bigIntEnabled){ - log("BigInt support is enabled..."); - w.setMemValue(pi64, o64, ptrType64); - //log("pi64 =",pi64, "o64 = 0x",o64.toString(16), o64); - const v64 = ()=>w.getMemValue(pi64,ptrType64) - //log("getMemValue(pi64)",v64()); - T.assert(v64() == o64); - //T.assert(o64 === w.getMemValue(pi64, ptrType64)); - const cf64w = w.xGet('jaccwabyt_test_int64ptr'); - cf64w(pi64); - //log("getMemValue(pi64)",v64()); - T.assert(v64() == BigInt(2 * o64)); - cf64w(pi64); - T.assert(v64() == BigInt(4 * o64)); - - const biTimes2 = w.xGet('jaccwabyt_test_int64_times2'); - T.assert(BigInt(2 * o64) === - biTimes2(BigInt(o64)/*explicit conv. required to avoid TypeError - in the call :/ */)); - - const pMin = w.scopedAlloc(16); - const pMax = pMin + 8; - const g64 = (p)=>w.getMemValue(p,ptrType64); - w.setMemValue(pMin, 0, ptrType64); - w.setMemValue(pMax, 0, ptrType64); - const minMaxI64 = [ - w.xCall('jaccwabyt_test_int64_min'), - w.xCall('jaccwabyt_test_int64_max') - ]; - T.assert(minMaxI64[0] < BigInt(Number.MIN_SAFE_INTEGER)). - assert(minMaxI64[1] > BigInt(Number.MAX_SAFE_INTEGER)); - //log("int64_min/max() =",minMaxI64, typeof minMaxI64[0]); - w.xCall('jaccwabyt_test_int64_minmax', pMin, pMax); - T.assert(g64(pMin) === minMaxI64[0], "int64 mismatch"). - assert(g64(pMax) === minMaxI64[1], "int64 mismatch"); - //log("pMin",g64(pMin), "pMax",g64(pMax)); - w.setMemValue(pMin, minMaxI64[0], ptrType64); - T.assert(g64(pMin) === minMaxI64[0]). - assert(minMaxI64[0] === db.selectValue("select ?",g64(pMin))). - assert(minMaxI64[1] === db.selectValue("select ?",g64(pMax))); - const rxRange = /out of range for int64/; - T.mustThrowMatching(()=>{db.prepare("select ?").bind(minMaxI64[0] - BigInt(1))}, - rxRange). - mustThrowMatching(()=>{db.prepare("select ?").bind(minMaxI64[1] + BigInt(1))}, - (e)=>rxRange.test(e.message)); - }else{ - log("No BigInt support. Skipping related tests."); - log("\"The problem\" here is that we can manipulate, at the byte level,", - "heap memory to set 64-bit values, but we can't get those values", - "back into JS because of the lack of 64-bit integer support."); - } - }finally{ - const x = w.scopedAlloc(1), y = w.scopedAlloc(1), z = w.scopedAlloc(1); - //log("x=",x,"y=",y,"z=",z); // just looking at the alignment - w.scopedAllocPop(stack); - } - }/*testIntPtr()*/; - - const testStructStuff = function(db,S,M){ - const W = S.capi.wasm, C = S; - /** Maintenance reminder: the rest of this function is copy/pasted - from the upstream jaccwabyt tests. */ - log("Jaccwabyt tests..."); - const MyStructDef = { - sizeof: 16, - members: { - p4: {offset: 0, sizeof: 4, signature: "i"}, - pP: {offset: 4, sizeof: 4, signature: "P"}, - ro: {offset: 8, sizeof: 4, signature: "i", readOnly: true}, - cstr: {offset: 12, sizeof: 4, signature: "s"} - } - }; - if(W.bigIntEnabled){ - const m = MyStructDef; - m.members.p8 = {offset: m.sizeof, sizeof: 8, signature: "j"}; - m.sizeof += m.members.p8.sizeof; - } - const StructType = C.StructBinder.StructType; - const K = C.StructBinder('my_struct',MyStructDef); - T.mustThrowMatching(()=>K(), /via 'new'/). - mustThrowMatching(()=>new K('hi'), /^Invalid pointer/); - const k1 = new K(), k2 = new K(); - try { - T.assert(k1.constructor === K). - assert(K.isA(k1)). - assert(k1 instanceof K). - assert(K.prototype.lookupMember('p4').key === '$p4'). - assert(K.prototype.lookupMember('$p4').name === 'p4'). - mustThrowMatching(()=>K.prototype.lookupMember('nope'), /not a mapped/). - assert(undefined === K.prototype.lookupMember('nope',false)). - assert(k1 instanceof StructType). - assert(StructType.isA(k1)). - assert(K.resolveToInstance(k1.pointer)===k1). - mustThrowMatching(()=>K.resolveToInstance(null,true), /is-not-a my_struct/). - assert(k1 === StructType.instanceForPointer(k1.pointer)). - mustThrowMatching(()=>k1.$ro = 1, /read-only/); - Object.keys(MyStructDef.members).forEach(function(key){ - key = K.memberKey(key); - T.assert(0 == k1[key], - "Expecting allocation to zero the memory "+ - "for "+key+" but got: "+k1[key]+ - " from "+k1.memoryDump()); - }); - T.assert('number' === typeof k1.pointer). - mustThrowMatching(()=>k1.pointer = 1, /pointer/). - assert(K.instanceForPointer(k1.pointer) === k1); - k1.$p4 = 1; k1.$pP = 2; - T.assert(1 === k1.$p4).assert(2 === k1.$pP); - if(MyStructDef.members.$p8){ - k1.$p8 = 1/*must not throw despite not being a BigInt*/; - k1.$p8 = BigInt(Number.MAX_SAFE_INTEGER * 2); - T.assert(BigInt(2 * Number.MAX_SAFE_INTEGER) === k1.$p8); - } - T.assert(!k1.ondispose); - k1.setMemberCString('cstr', "A C-string."); - T.assert(Array.isArray(k1.ondispose)). - assert(k1.ondispose[0] === k1.$cstr). - assert('number' === typeof k1.$cstr). - assert('A C-string.' === k1.memberToJsString('cstr')); - k1.$pP = k2; - T.assert(k1.$pP === k2); - k1.$pP = null/*null is special-cased to 0.*/; - T.assert(0===k1.$pP); - let ptr = k1.pointer; - k1.dispose(); - T.assert(undefined === k1.pointer). - assert(undefined === K.instanceForPointer(ptr)). - mustThrowMatching(()=>{k1.$pP=1}, /disposed instance/); - const k3 = new K(); - ptr = k3.pointer; - T.assert(k3 === K.instanceForPointer(ptr)); - K.disposeAll(); - T.assert(ptr). - assert(undefined === k2.pointer). - assert(undefined === k3.pointer). - assert(undefined === K.instanceForPointer(ptr)); - }finally{ - k1.dispose(); - k2.dispose(); - } - - if(!W.bigIntEnabled){ - log("Skipping WasmTestStruct tests: BigInt not enabled."); - return; - } - - const ctype = W.xCallWrapped('jaccwabyt_test_ctype_json', 'json'); - log("Struct descriptions:",ctype.structs); - const WTStructDesc = - ctype.structs.filter((e)=>'WasmTestStruct'===e.name)[0]; - const autoResolvePtr = true /* EXPERIMENTAL */; - if(autoResolvePtr){ - WTStructDesc.members.ppV.signature = 'P'; - } - const WTStruct = C.StructBinder(WTStructDesc); - log(WTStruct.structName, WTStruct.structInfo); - const wts = new WTStruct(); - log("WTStruct.prototype keys:",Object.keys(WTStruct.prototype)); - try{ - T.assert(wts.constructor === WTStruct). - assert(WTStruct.memberKeys().indexOf('$ppV')>=0). - assert(wts.memberKeys().indexOf('$v8')>=0). - assert(!K.isA(wts)). - assert(WTStruct.isA(wts)). - assert(wts instanceof WTStruct). - assert(wts instanceof StructType). - assert(StructType.isA(wts)). - assert(wts === StructType.instanceForPointer(wts.pointer)); - T.assert(wts.pointer>0).assert(0===wts.$v4).assert(0n===wts.$v8). - assert(0===wts.$ppV).assert(0===wts.$xFunc). - assert(WTStruct.instanceForPointer(wts.pointer) === wts); - const testFunc = - W.xGet('jaccwabyt_test_struct'/*name gets mangled in -O3 builds!*/); - let counter = 0; - log("wts.pointer =",wts.pointer); - const wtsFunc = function(arg){ - log("This from a JS function called from C, "+ - "which itself was called from JS. arg =",arg); - ++counter; - T.assert(WTStruct.instanceForPointer(arg) === wts); - if(3===counter){ - toss("Testing exception propagation."); - } - } - wts.$v4 = 10; wts.$v8 = 20; - wts.$xFunc = W.installFunction(wtsFunc, wts.memberSignature('xFunc')) - /* ^^^ compiles wtsFunc to WASM and returns its new function pointer */; - T.assert(0===counter).assert(10 === wts.$v4).assert(20n === wts.$v8) - .assert(0 === wts.$ppV).assert('number' === typeof wts.$xFunc) - .assert(0 === wts.$cstr) - .assert(wts.memberIsString('$cstr')) - .assert(!wts.memberIsString('$v4')) - .assert(null === wts.memberToJsString('$cstr')) - .assert(W.functionEntry(wts.$xFunc) instanceof Function); - /* It might seem silly to assert that the values match - what we just set, but recall that all of those property - reads and writes are, via property interceptors, - actually marshaling their data to/from a raw memory - buffer, so merely reading them back is actually part of - testing the struct-wrapping API. */ - - testFunc(wts.pointer); - log("wts.pointer, wts.$ppV",wts.pointer, wts.$ppV); - T.assert(1===counter).assert(20 === wts.$v4).assert(40n === wts.$v8) - .assert(autoResolvePtr ? (wts.$ppV === wts) : (wts.$ppV === wts.pointer)) - .assert('string' === typeof wts.memberToJsString('cstr')) - .assert(wts.memberToJsString('cstr') === wts.memberToJsString('$cstr')) - .mustThrowMatching(()=>wts.memberToJsString('xFunc'), - /Invalid member type signature for C-string/) - ; - testFunc(wts.pointer); - T.assert(2===counter).assert(40 === wts.$v4).assert(80n === wts.$v8) - .assert(autoResolvePtr ? (wts.$ppV === wts) : (wts.$ppV === wts.pointer)); - /** The 3rd call to wtsFunc throw from JS, which is called - from C, which is called from JS. Let's ensure that - that exception propagates back here... */ - T.mustThrowMatching(()=>testFunc(wts.pointer),/^Testing/); - W.uninstallFunction(wts.$xFunc); - wts.$xFunc = 0; - if(autoResolvePtr){ - wts.$ppV = 0; - T.assert(!wts.$ppV); - WTStruct.debugFlags(0x03); - wts.$ppV = wts; - T.assert(wts === wts.$ppV) - WTStruct.debugFlags(0); - } - wts.setMemberCString('cstr', "A C-string."); - T.assert(Array.isArray(wts.ondispose)). - assert(wts.ondispose[0] === wts.$cstr). - assert('A C-string.' === wts.memberToJsString('cstr')); - const ptr = wts.pointer; - wts.dispose(); - T.assert(ptr).assert(undefined === wts.pointer). - assert(undefined === WTStruct.instanceForPointer(ptr)) - }finally{ - wts.dispose(); - } - }/*testStructStuff()*/; - - const testSqliteStructs = function(db,sqlite3,M){ - log("Tinkering with sqlite3_io_methods..."); - // https://www.sqlite.org/c3ref/vfs.html - // https://www.sqlite.org/c3ref/io_methods.html - const capi = sqlite3.capi, W = capi.wasm; - const sqlite3_io_methods = capi.sqlite3_io_methods, - sqlite3_vfs = capi.sqlite3_vfs, - sqlite3_file = capi.sqlite3_file; - log("struct sqlite3_file", sqlite3_file.memberKeys()); - log("struct sqlite3_vfs", sqlite3_vfs.memberKeys()); - log("struct sqlite3_io_methods", sqlite3_io_methods.memberKeys()); - - const installMethod = function callee(tgt, name, func){ - if(1===arguments.length){ - return (n,f)=>callee(tgt,n,f); - } - if(!callee.argcProxy){ - callee.argcProxy = function(func,sig){ - return function(...args){ - if(func.length!==arguments.length){ - toss("Argument mismatch. Native signature is:",sig); - } - return func.apply(this, args); - } - }; - callee.ondisposeRemoveFunc = function(){ - if(this.__ondispose){ - const who = this; - this.__ondispose.forEach( - (v)=>{ - if('number'===typeof v){ - try{capi.wasm.uninstallFunction(v)} - catch(e){/*ignore*/} - }else{/*wasm function wrapper property*/ - delete who[v]; - } - } - ); - delete this.__ondispose; - } - }; - }/*static init*/ - const sigN = tgt.memberSignature(name), - memKey = tgt.memberKey(name); - //log("installMethod",tgt, name, sigN); - if(!tgt.__ondispose){ - T.assert(undefined === tgt.ondispose); - tgt.ondispose = [callee.ondisposeRemoveFunc]; - tgt.__ondispose = []; - } - const fProxy = callee.argcProxy(func, sigN); - const pFunc = capi.wasm.installFunction(fProxy, tgt.memberSignature(name, true)); - tgt[memKey] = pFunc; - /** - ACHTUNG: function pointer IDs are from a different pool than - allocation IDs, starting at 1 and incrementing in steps of 1, - so if we set tgt[memKey] to those values, we'd very likely - later misinterpret them as plain old pointer addresses unless - unless we use some silly heuristic like "all values <5k are - presumably function pointers," or actually perform a function - lookup on every pointer to first see if it's a function. That - would likely work just fine, but would be kludgy. - - It turns out that "all values less than X are functions" is - essentially how it works in wasm: a function pointer is - reported to the client as its index into the - __indirect_function_table. - - So... once jaccwabyt can be told how to access the - function table, it could consider all pointer values less - than that table's size to be functions. As "real" pointer - values start much, much higher than the function table size, - that would likely work reasonably well. e.g. the object - pointer address for sqlite3's default VFS is (in this local - setup) 65104, whereas the function table has fewer than 600 - entries. - */ - const wrapperKey = '$'+memKey; - tgt[wrapperKey] = fProxy; - tgt.__ondispose.push(pFunc, wrapperKey); - //log("tgt.__ondispose =",tgt.__ondispose); - return (n,f)=>callee(tgt, n, f); - }/*installMethod*/; - - const installIOMethods = function instm(iom){ - (iom instanceof capi.sqlite3_io_methods) || toss("Invalid argument type."); - if(!instm._requireFileArg){ - instm._requireFileArg = function(arg,methodName){ - arg = capi.sqlite3_file.resolveToInstance(arg); - if(!arg){ - err("sqlite3_io_methods::xClose() was passed a non-sqlite3_file."); - } - return arg; - }; - instm._methods = { - // https://sqlite.org/c3ref/io_methods.html - xClose: /*i(P)*/function(f){ - /* int (*xClose)(sqlite3_file*) */ - log("xClose(",f,")"); - if(!(f = instm._requireFileArg(f,'xClose'))) return capi.SQLITE_MISUSE; - f.dispose(/*noting that f has externally-owned memory*/); - return 0; - }, - xRead: /*i(Ppij)*/function(f,dest,n,offset){ - /* int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst) */ - log("xRead(",arguments,")"); - if(!(f = instm._requireFileArg(f))) return capi.SQLITE_MISUSE; - capi.wasm.heap8().fill(0, dest + offset, n); - return 0; - }, - xWrite: /*i(Ppij)*/function(f,dest,n,offset){ - /* int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst) */ - log("xWrite(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xWrite'))) return capi.SQLITE_MISUSE; - return 0; - }, - xTruncate: /*i(Pj)*/function(f){ - /* int (*xTruncate)(sqlite3_file*, sqlite3_int64 size) */ - log("xTruncate(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xTruncate'))) return capi.SQLITE_MISUSE; - return 0; - }, - xSync: /*i(Pi)*/function(f){ - /* int (*xSync)(sqlite3_file*, int flags) */ - log("xSync(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xSync'))) return capi.SQLITE_MISUSE; - return 0; - }, - xFileSize: /*i(Pp)*/function(f,pSz){ - /* int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize) */ - log("xFileSize(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xFileSize'))) return capi.SQLITE_MISUSE; - capi.wasm.setMemValue(pSz, 0/*file size*/); - return 0; - }, - xLock: /*i(Pi)*/function(f){ - /* int (*xLock)(sqlite3_file*, int) */ - log("xLock(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xLock'))) return capi.SQLITE_MISUSE; - return 0; - }, - xUnlock: /*i(Pi)*/function(f){ - /* int (*xUnlock)(sqlite3_file*, int) */ - log("xUnlock(",arguments,")"); - if(!(f=instm._requireFileArg(f,'xUnlock'))) return capi.SQLITE_MISUSE; - return 0; - }, - xCheckReservedLock: /*i(Pp)*/function(){ - /* int (*xCheckReservedLock)(sqlite3_file*, int *pResOut) */ - log("xCheckReservedLock(",arguments,")"); - return 0; - }, - xFileControl: /*i(Pip)*/function(){ - /* int (*xFileControl)(sqlite3_file*, int op, void *pArg) */ - log("xFileControl(",arguments,")"); - return capi.SQLITE_NOTFOUND; - }, - xSectorSize: /*i(P)*/function(){ - /* int (*xSectorSize)(sqlite3_file*) */ - log("xSectorSize(",arguments,")"); - return 0/*???*/; - }, - xDeviceCharacteristics:/*i(P)*/function(){ - /* int (*xDeviceCharacteristics)(sqlite3_file*) */ - log("xDeviceCharacteristics(",arguments,")"); - return 0; - } - }; - }/*static init*/ - iom.$iVersion = 1; - Object.keys(instm._methods).forEach( - (k)=>installMethod(iom, k, instm._methods[k]) - ); - }/*installIOMethods()*/; - - const iom = new sqlite3_io_methods, sfile = new sqlite3_file; - const err = console.error.bind(console); - try { - const IOM = sqlite3_io_methods, S3F = sqlite3_file; - //log("iom proto",iom,iom.constructor.prototype); - //log("sfile",sfile,sfile.constructor.prototype); - T.assert(0===sfile.$pMethods).assert(iom.pointer > 0); - //log("iom",iom); - /** Some of the following tests require that pMethods has a - signature of "P", as opposed to "p". */ - sfile.$pMethods = iom; - T.assert(iom === sfile.$pMethods); - sfile.$pMethods = iom.pointer; - T.assert(iom === sfile.$pMethods) - .assert(IOM.resolveToInstance(iom)) - .assert(undefined ===IOM.resolveToInstance(sfile)) - .mustThrow(()=>IOM.resolveToInstance(0,true)) - .assert(S3F.resolveToInstance(sfile.pointer)) - .assert(undefined===S3F.resolveToInstance(iom)); - T.assert(0===iom.$iVersion); - installIOMethods(iom); - T.assert(1===iom.$iVersion); - //log("iom.__ondispose",iom.__ondispose); - T.assert(Array.isArray(iom.__ondispose)).assert(iom.__ondispose.length>10); - }finally{ - iom.dispose(); - T.assert(undefined === iom.__ondispose); - } - - const dVfs = new sqlite3_vfs(capi.sqlite3_vfs_find(null)); - try { - const SB = sqlite3.StructBinder; - T.assert(dVfs instanceof SB.StructType) - .assert(dVfs.pointer) - .assert('sqlite3_vfs' === dVfs.structName) - .assert(!!dVfs.structInfo) - .assert(SB.StructType.hasExternalPointer(dVfs)) - .assert(3===dVfs.$iVersion) - .assert('number'===typeof dVfs.$zName) - .assert('number'===typeof dVfs.$xSleep) - .assert(capi.wasm.functionEntry(dVfs.$xOpen)) - .assert(dVfs.memberIsString('zName')) - .assert(dVfs.memberIsString('$zName')) - .assert(!dVfs.memberIsString('pAppData')) - .mustThrowMatching(()=>dVfs.memberToJsString('xSleep'), - /Invalid member type signature for C-string/) - .mustThrowMatching(()=>dVfs.memberSignature('nope'), /nope is not a mapped/) - .assert('string' === typeof dVfs.memberToJsString('zName')) - .assert(dVfs.memberToJsString('zName')===dVfs.memberToJsString('$zName')) - ; - log("Default VFS: @",dVfs.pointer); - Object.keys(sqlite3_vfs.structInfo.members).forEach(function(mname){ - const mk = sqlite3_vfs.memberKey(mname), mbr = sqlite3_vfs.structInfo.members[mname], - addr = dVfs[mk], prefix = 'defaultVfs.'+mname; - if(1===mbr.signature.length){ - let sep = '?', val = undefined; - switch(mbr.signature[0]){ - // TODO: move this into an accessor, e.g. getPreferredValue(member) - case 'i': case 'j': case 'f': case 'd': sep = '='; val = dVfs[mk]; break - case 'p': case 'P': sep = '@'; val = dVfs[mk]; break; - case 's': sep = '='; - //val = capi.wasm.UTF8ToString(addr); - val = dVfs.memberToJsString(mname); - break; - } - log(prefix, sep, val); - } - else{ - log(prefix," = funcptr @",addr, capi.wasm.functionEntry(addr)); - } - }); - }finally{ - dVfs.dispose(); - T.assert(undefined===dVfs.pointer); - } - }/*testSqliteStructs()*/; - - const testWasmUtil = function(DB,S){ - const w = S.capi.wasm; - /** - Maintenance reminder: the rest of this function is part of the - upstream Jaccwabyt tree. - */ - const chr = (x)=>x.charCodeAt(0); - log("heap getters..."); - { - const li = [8, 16, 32]; - if(w.bigIntEnabled) li.push(64); - for(const n of li){ - const bpe = n/8; - const s = w.heapForSize(n,false); - T.assert(bpe===s.BYTES_PER_ELEMENT). - assert(w.heapForSize(s.constructor) === s); - const u = w.heapForSize(n,true); - T.assert(bpe===u.BYTES_PER_ELEMENT). - assert(s!==u). - assert(w.heapForSize(u.constructor) === u); - } - } - - log("jstrlen()..."); - { - T.assert(3 === w.jstrlen("abc")).assert(4 === w.jstrlen("äbc")); - } - - log("jstrcpy()..."); - { - const fillChar = 10; - let ua = new Uint8Array(8), rc, - refill = ()=>ua.fill(fillChar); - refill(); - rc = w.jstrcpy("hello", ua); - T.assert(6===rc).assert(0===ua[5]).assert(chr('o')===ua[4]); - refill(); - ua[5] = chr('!'); - rc = w.jstrcpy("HELLO", ua, 0, -1, false); - T.assert(5===rc).assert(chr('!')===ua[5]).assert(chr('O')===ua[4]); - refill(); - rc = w.jstrcpy("the end", ua, 4); - //log("rc,ua",rc,ua); - T.assert(4===rc).assert(0===ua[7]). - assert(chr('e')===ua[6]).assert(chr('t')===ua[4]); - refill(); - rc = w.jstrcpy("the end", ua, 4, -1, false); - T.assert(4===rc).assert(chr(' ')===ua[7]). - assert(chr('e')===ua[6]).assert(chr('t')===ua[4]); - refill(); - rc = w.jstrcpy("", ua, 0, 1, true); - //log("rc,ua",rc,ua); - T.assert(1===rc).assert(0===ua[0]); - refill(); - rc = w.jstrcpy("x", ua, 0, 1, true); - //log("rc,ua",rc,ua); - T.assert(1===rc).assert(0===ua[0]); - refill(); - rc = w.jstrcpy('äbä', ua, 0, 1, true); - T.assert(1===rc, 'Must not write partial multi-byte char.') - .assert(0===ua[0]); - refill(); - rc = w.jstrcpy('äbä', ua, 0, 2, true); - T.assert(1===rc, 'Must not write partial multi-byte char.') - .assert(0===ua[0]); - refill(); - rc = w.jstrcpy('äbä', ua, 0, 2, false); - T.assert(2===rc).assert(fillChar!==ua[1]).assert(fillChar===ua[2]); - }/*jstrcpy()*/ - - log("cstrncpy()..."); - { - w.scopedAllocPush(); - try { - let cStr = w.scopedAllocCString("hello"); - const n = w.cstrlen(cStr); - let cpy = w.scopedAlloc(n+10); - let rc = w.cstrncpy(cpy, cStr, n+10); - T.assert(n+1 === rc). - assert("hello" === w.cstringToJs(cpy)). - assert(chr('o') === w.getMemValue(cpy+n-1)). - assert(0 === w.getMemValue(cpy+n)); - let cStr2 = w.scopedAllocCString("HI!!!"); - rc = w.cstrncpy(cpy, cStr2, 3); - T.assert(3===rc). - assert("HI!lo" === w.cstringToJs(cpy)). - assert(chr('!') === w.getMemValue(cpy+2)). - assert(chr('l') === w.getMemValue(cpy+3)); - }finally{ - w.scopedAllocPop(); - } - } - - log("jstrToUintArray()..."); - { - let a = w.jstrToUintArray("hello", false); - T.assert(5===a.byteLength).assert(chr('o')===a[4]); - a = w.jstrToUintArray("hello", true); - T.assert(6===a.byteLength).assert(chr('o')===a[4]).assert(0===a[5]); - a = w.jstrToUintArray("äbä", false); - T.assert(5===a.byteLength).assert(chr('b')===a[2]); - a = w.jstrToUintArray("äbä", true); - T.assert(6===a.byteLength).assert(chr('b')===a[2]).assert(0===a[5]); - } - - log("allocCString()..."); - { - const cstr = w.allocCString("hällo, world"); - const n = w.cstrlen(cstr); - T.assert(13 === n) - .assert(0===w.getMemValue(cstr+n)) - .assert(chr('d')===w.getMemValue(cstr+n-1)); - } - - log("scopedAlloc() and friends..."); - { - const alloc = w.alloc, dealloc = w.dealloc; - w.alloc = w.dealloc = null; - T.assert(!w.scopedAlloc.level) - .mustThrowMatching(()=>w.scopedAlloc(1), /^No scopedAllocPush/) - .mustThrowMatching(()=>w.scopedAllocPush(), /missing alloc/); - w.alloc = alloc; - T.mustThrowMatching(()=>w.scopedAllocPush(), /missing alloc/); - w.dealloc = dealloc; - T.mustThrowMatching(()=>w.scopedAllocPop(), /^Invalid state/) - .mustThrowMatching(()=>w.scopedAlloc(1), /^No scopedAllocPush/) - .mustThrowMatching(()=>w.scopedAlloc.level=0, /read-only/); - const asc = w.scopedAllocPush(); - let asc2; - try { - const p1 = w.scopedAlloc(16), - p2 = w.scopedAlloc(16); - T.assert(1===w.scopedAlloc.level) - .assert(Number.isFinite(p1)) - .assert(Number.isFinite(p2)) - .assert(asc[0] === p1) - .assert(asc[1]===p2); - asc2 = w.scopedAllocPush(); - const p3 = w.scopedAlloc(16); - T.assert(2===w.scopedAlloc.level) - .assert(Number.isFinite(p3)) - .assert(2===asc.length) - .assert(p3===asc2[0]); - - const [z1, z2, z3] = w.scopedAllocPtr(3); - T.assert('number'===typeof z1).assert(z2>z1).assert(z3>z2) - .assert(0===w.getMemValue(z1,'i32'), 'allocPtr() must zero the targets') - .assert(0===w.getMemValue(z3,'i32')); - }finally{ - // Pop them in "incorrect" order to make sure they behave: - w.scopedAllocPop(asc); - T.assert(0===asc.length); - T.mustThrowMatching(()=>w.scopedAllocPop(asc), - /^Invalid state object/); - if(asc2){ - T.assert(2===asc2.length,'Should be p3 and z1'); - w.scopedAllocPop(asc2); - T.assert(0===asc2.length); - T.mustThrowMatching(()=>w.scopedAllocPop(asc2), - /^Invalid state object/); - } - } - T.assert(0===w.scopedAlloc.level); - w.scopedAllocCall(function(){ - T.assert(1===w.scopedAlloc.level); - const [cstr, n] = w.scopedAllocCString("hello, world", true); - T.assert(12 === n) - .assert(0===w.getMemValue(cstr+n)) - .assert(chr('d')===w.getMemValue(cstr+n-1)); - }); - }/*scopedAlloc()*/ - - log("xCall()..."); - { - const pJson = w.xCall('jaccwabyt_test_ctype_json'); - T.assert(Number.isFinite(pJson)).assert(w.cstrlen(pJson)>300); - } - - log("xWrap()..."); - { - //int jaccwabyt_test_intptr(int * p); - //int64_t jaccwabyt_test_int64_max(void) - //int64_t jaccwabyt_test_int64_min(void) - //int64_t jaccwabyt_test_int64_times2(int64_t x) - //void jaccwabyt_test_int64_minmax(int64_t * min, int64_t *max) - //int64_t jaccwabyt_test_int64ptr(int64_t * p) - //const char * jaccwabyt_test_ctype_json(void) - T.mustThrowMatching(()=>w.xWrap('jaccwabyt_test_ctype_json',null,'i32'), - /requires 0 arg/). - assert(w.xWrap.resultAdapter('i32') instanceof Function). - assert(w.xWrap.argAdapter('i32') instanceof Function); - let fw = w.xWrap('jaccwabyt_test_ctype_json','string'); - T.mustThrowMatching(()=>fw(1), /requires 0 arg/); - let rc = fw(); - T.assert('string'===typeof rc).assert(rc.length>300); - rc = w.xCallWrapped('jaccwabyt_test_ctype_json','*'); - T.assert(rc>0 && Number.isFinite(rc)); - rc = w.xCallWrapped('jaccwabyt_test_ctype_json','string'); - T.assert('string'===typeof rc).assert(rc.length>300); - fw = w.xWrap('jaccwabyt_test_str_hello', 'string:free',['i32']); - rc = fw(0); - T.assert('hello'===rc); - rc = fw(1); - T.assert(null===rc); - - w.xWrap.resultAdapter('thrice', (v)=>3n*BigInt(v)); - w.xWrap.argAdapter('twice', (v)=>2n*BigInt(v)); - fw = w.xWrap('jaccwabyt_test_int64_times2','thrice','twice'); - rc = fw(1); - T.assert(12n===rc); - - w.scopedAllocCall(function(){ - let pI1 = w.scopedAlloc(8), pI2 = pI1+4; - w.setMemValue(pI1, 0,'*')(pI2, 0, '*'); - let f = w.xWrap('jaccwabyt_test_int64_minmax',undefined,['i64*','i64*']); - let r1 = w.getMemValue(pI1, 'i64'), r2 = w.getMemValue(pI2, 'i64'); - T.assert(!Number.isSafeInteger(r1)).assert(!Number.isSafeInteger(r2)); - }); - } - }/*testWasmUtil()*/; - - const runTests = function(Module){ - //log("Module",Module); - const sqlite3 = Module.sqlite3, - capi = sqlite3.capi, - oo = sqlite3.oo1, - wasm = capi.wasm; - log("Loaded module:",capi.sqlite3_libversion(), capi.sqlite3_sourceid()); - log("Build options:",wasm.compileOptionUsed()); - - if(1){ - /* Let's grab those last few lines of test coverage for - sqlite3-api.js... */ - const rc = wasm.compileOptionUsed(['COMPILER']); - T.assert(1 === rc.COMPILER); - const obj = {COMPILER:undefined}; - wasm.compileOptionUsed(obj); - T.assert(1 === obj.COMPILER); - } - log("WASM heap size =",wasm.heap8().length); - //log("capi.wasm.exports.__indirect_function_table",capi.wasm.exports.__indirect_function_table); - - const wasmCtypes = wasm.ctype; - //log("wasmCtypes",wasmCtypes); - T.assert(wasmCtypes.structs[0].name==='sqlite3_vfs'). - assert(wasmCtypes.structs[0].members.szOsFile.sizeof>=4). - assert(wasmCtypes.structs[1/*sqlite3_io_methods*/ - ].members.xFileSize.offset>0); - //log(wasmCtypes.structs[0].name,"members",wasmCtypes.structs[0].members); - [ /* Spot-check a handful of constants to make sure they got installed... */ - 'SQLITE_SCHEMA','SQLITE_NULL','SQLITE_UTF8', - 'SQLITE_STATIC', 'SQLITE_DIRECTONLY', - 'SQLITE_OPEN_CREATE', 'SQLITE_OPEN_DELETEONCLOSE' - ].forEach(function(k){ - T.assert('number' === typeof capi[k]); - }); - [/* Spot-check a few of the WASM API methods. */ - 'alloc', 'dealloc', 'installFunction' - ].forEach(function(k){ - T.assert(capi.wasm[k] instanceof Function); - }); - - const db = new oo.DB(':memory:'), startTime = performance.now(); - try { - log("DB filename:",db.filename,db.fileName()); - const banner1 = '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', - banner2 = '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'; - [ - testWasmUtil, testBasicSanity, testUDF, - testAttach, testIntPtr, testStructStuff, - testSqliteStructs - ].forEach((f)=>{ - const t = T.counter, n = performance.now(); - logHtml(banner1,"Running",f.name+"()..."); - f(db, sqlite3, Module); - logHtml(banner2,f.name+"():",T.counter - t,'tests in',(performance.now() - n),"ms"); - }); - }finally{ - db.close(); - } - logHtml("Total Test count:",T.counter,"in",(performance.now() - startTime),"ms"); - log('capi.wasm.exports',capi.wasm.exports); - }; - - sqlite3InitModule(self.sqlite3TestModule).then(function(theModule){ - /** Use a timeout so that we are (hopefully) out from under - the module init stack when our setup gets run. Just on - principle, not because we _need_ to be. */ - //console.debug("theModule =",theModule); - //setTimeout(()=>runTests(theModule), 0); - // ^^^ Chrome warns: "VIOLATION: setTimeout() handler took A WHOLE 50ms!" - self._MODULE = theModule /* this is only to facilitate testing from the console */ - runTests(theModule); - }); -})(); DELETED ext/wasm/testing2.html Index: ext/wasm/testing2.html ================================================================== --- ext/wasm/testing2.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - sqlite3-worker.js tests - - -
sqlite3-worker.js tests
- -
-
-
Initializing app...
-
- On a slow internet connection this may take a moment. If this - message displays for "a long time", intialization may have - failed and the JavaScript console may contain clues as to why. -
-
-
Downloading...
-
- -
-
Most stuff on this page happens in the dev console.
-
-
- - - - DELETED ext/wasm/testing2.js Index: ext/wasm/testing2.js ================================================================== --- ext/wasm/testing2.js +++ /dev/null @@ -1,340 +0,0 @@ -/* - 2022-05-22 - - The author disclaims copyright to this source code. In place of a - legal notice, here is a blessing: - - * May you do good and not evil. - * May you find forgiveness for yourself and forgive others. - * May you share freely, never taking more than you give. - - *********************************************************************** - - A basic test script for sqlite3-worker.js. -*/ -'use strict'; -(function(){ - const T = self.SqliteTestUtil; - const SW = new Worker("api/sqlite3-worker.js"); - const DbState = { - id: undefined - }; - const eOutput = document.querySelector('#test-output'); - const log = console.log.bind(console) - const logHtml = function(cssClass,...args){ - log.apply(this, args); - const ln = document.createElement('div'); - if(cssClass) ln.classList.add(cssClass); - ln.append(document.createTextNode(args.join(' '))); - eOutput.append(ln); - }; - const warn = console.warn.bind(console); - const error = console.error.bind(console); - const toss = (...args)=>{throw new Error(args.join(' '))}; - /** Posts a worker message as {type:type, data:data}. */ - const wMsg = function(type,data){ - log("Posting message to worker dbId="+(DbState.id||'default')+':',data); - SW.postMessage({ - type, - dbId: DbState.id, - data, - departureTime: performance.now() - }); - return SW; - }; - - SW.onerror = function(event){ - error("onerror",event); - }; - - let startTime; - - /** - A queue for callbacks which are to be run in response to async - DB commands. See the notes in runTests() for why we need - this. The event-handling plumbing of this file requires that - any DB command which includes a `messageId` property also have - a queued callback entry, as the existence of that property in - response payloads is how it knows whether or not to shift an - entry off of the queue. - */ - const MsgHandlerQueue = { - queue: [], - id: 0, - push: function(type,callback){ - this.queue.push(callback); - return type + '-' + (++this.id); - }, - shift: function(){ - return this.queue.shift(); - } - }; - - const testCount = ()=>{ - logHtml("","Total test count:",T.counter+". Total time =",(performance.now() - startTime),"ms"); - }; - - const logEventResult = function(evd){ - logHtml(evd.errorClass ? 'error' : '', - "runOneTest",evd.messageId,"Worker time =", - (evd.workerRespondTime - evd.workerReceivedTime),"ms.", - "Round-trip event time =", - (performance.now() - evd.departureTime),"ms.", - (evd.errorClass ? evd.message : "") - ); - }; - - const runOneTest = function(eventType, eventData, callback){ - T.assert(eventData && 'object'===typeof eventData); - /* ^^^ that is for the testing and messageId-related code, not - a hard requirement of all of the Worker-exposed APIs. */ - eventData.messageId = MsgHandlerQueue.push(eventType,function(ev){ - logEventResult(ev.data); - if(callback instanceof Function){ - callback(ev); - testCount(); - } - }); - wMsg(eventType, eventData); - }; - - /** Methods which map directly to onmessage() event.type keys. - They get passed the inbound event.data. */ - const dbMsgHandler = { - open: function(ev){ - DbState.id = ev.dbId; - log("open result",ev.data); - }, - exec: function(ev){ - log("exec result",ev.data); - }, - export: function(ev){ - log("export result",ev.data); - }, - error: function(ev){ - error("ERROR from the worker:",ev.data); - logEventResult(ev.data); - }, - resultRowTest1: function f(ev){ - if(undefined === f.counter) f.counter = 0; - if(ev.data) ++f.counter; - //log("exec() result row:",ev.data); - T.assert(null===ev.data || 'number' === typeof ev.data.b); - } - }; - - /** - "The problem" now is that the test results are async. We - know, however, that the messages posted to the worker will - be processed in the order they are passed to it, so we can - create a queue of callbacks to handle them. The problem - with that approach is that it's not error-handling - friendly, in that an error can cause us to bypass a result - handler queue entry. We have to perform some extra - acrobatics to account for that. - - Problem #2 is that we cannot simply start posting events: we - first have to post an 'open' event, wait for it to respond, and - collect its db ID before continuing. If we don't wait, we may - well fire off 10+ messages before the open actually responds. - */ - const runTests2 = function(){ - const mustNotReach = ()=>{ - throw new Error("This is not supposed to be reached."); - }; - runOneTest('exec',{ - sql: ["create table t(a,b)", - "insert into t(a,b) values(1,2),(3,4),(5,6)" - ].join(';'), - multi: true, - resultRows: [], columnNames: [] - }, function(ev){ - ev = ev.data; - T.assert(0===ev.resultRows.length) - .assert(0===ev.columnNames.length); - }); - runOneTest('exec',{ - sql: 'select a a, b b from t order by a', - resultRows: [], columnNames: [], - }, function(ev){ - ev = ev.data; - T.assert(3===ev.resultRows.length) - .assert(1===ev.resultRows[0][0]) - .assert(6===ev.resultRows[2][1]) - .assert(2===ev.columnNames.length) - .assert('b'===ev.columnNames[1]); - }); - runOneTest('exec',{ - sql: 'select a a, b b from t order by a', - resultRows: [], columnNames: [], - rowMode: 'object' - }, function(ev){ - ev = ev.data; - T.assert(3===ev.resultRows.length) - .assert(1===ev.resultRows[0].a) - .assert(6===ev.resultRows[2].b) - }); - runOneTest('exec',{sql:'intentional_error'}, mustNotReach); - // Ensure that the message-handler queue survives ^^^ that error... - runOneTest('exec',{ - sql:'select 1', - resultRows: [], - //rowMode: 'array', // array is the default in the Worker interface - }, function(ev){ - ev = ev.data; - T.assert(1 === ev.resultRows.length) - .assert(1 === ev.resultRows[0][0]); - }); - runOneTest('exec',{ - sql: 'select a a, b b from t order by a', - callback: 'resultRowTest1', - rowMode: 'object' - }, function(ev){ - T.assert(3===dbMsgHandler.resultRowTest1.counter); - dbMsgHandler.resultRowTest1.counter = 0; - }); - runOneTest('exec',{ - multi: true, - sql:[ - 'pragma foreign_keys=0;', - // ^^^ arbitrary query with no result columns - 'select a, b from t order by a desc; select a from t;' - // multi-exec only honors results from the first - // statement with result columns (regardless of whether) - // it has any rows). - ], - rowMode: 1, - resultRows: [] - },function(ev){ - const rows = ev.data.resultRows; - T.assert(3===rows.length). - assert(6===rows[0]); - }); - runOneTest('exec',{sql: 'delete from t where a>3'}); - runOneTest('exec',{ - sql: 'select count(a) from t', - resultRows: [] - },function(ev){ - ev = ev.data; - T.assert(1===ev.resultRows.length) - .assert(2===ev.resultRows[0][0]); - }); - if(0){ - // export requires reimpl. for portability reasons. - runOneTest('export',{}, function(ev){ - ev = ev.data; - T.assert('string' === typeof ev.filename) - .assert(ev.buffer instanceof Uint8Array) - .assert(ev.buffer.length > 1024) - .assert('application/x-sqlite3' === ev.mimetype); - }); - } - /***** close() tests must come last. *****/ - runOneTest('close',{unlink:true},function(ev){ - ev = ev.data; - T.assert('string' === typeof ev.filename); - }); - runOneTest('close',{unlink:true},function(ev){ - ev = ev.data; - T.assert(undefined === ev.filename); - }); - }; - - const runTests = function(){ - /** - Design decision time: all remaining tests depend on the 'open' - command having succeeded. In order to support multiple DBs, the - upcoming commands ostensibly have to know the ID of the DB they - want to talk to. We have two choices: - - 1) We run 'open' and wait for its response, which contains the - db id. - - 2) We have the Worker automatically use the current "default - db" (the one which was most recently opened) if no db id is - provided in the message. When we do this, the main thread may - well fire off _all_ of the test messages before the 'open' - actually responds, but because the messages are handled on a - FIFO basis, those after the initial 'open' will pick up the - "default" db. However, if the open fails, then all pending - messages (until next next 'open', at least) except for 'close' - will fail and we have no way of cancelling them once they've - been posted to the worker. - - We currently do (2) because (A) it's certainly the most - client-friendly thing to do and (B) it seems likely that most - apps using this API will only have a single db to work with so - won't need to juggle multiple DB ids. If we revert to (1) then - the following call to runTests2() needs to be moved into the - callback function of the runOneTest() check for the 'open' - command. Note, also, that using approach (2) does not keep the - user from instead using approach (1), noting that doing so - requires explicit handling of the 'open' message to account for - it. - */ - const waitForOpen = 1, - simulateOpenError = 0 /* if true, the remaining tests will - all barf if waitForOpen is - false. */; - logHtml('', - "Sending 'open' message and",(waitForOpen ? "" : "NOT ")+ - "waiting for its response before continuing."); - startTime = performance.now(); - runOneTest('open', { - filename:'testing2.sqlite3', - simulateError: simulateOpenError - }, function(ev){ - //log("open result",ev); - T.assert('testing2.sqlite3'===ev.data.filename) - .assert(ev.data.dbId) - .assert(ev.data.messageId); - DbState.id = ev.data.dbId; - if(waitForOpen) setTimeout(runTests2, 0); - }); - if(!waitForOpen) runTests2(); - }; - - SW.onmessage = function(ev){ - if(!ev.data || 'object'!==typeof ev.data){ - warn("Unknown sqlite3-worker message type:",ev); - return; - } - ev = ev.data/*expecting a nested object*/; - //log("main window onmessage:",ev); - if(ev.data && ev.data.messageId){ - /* We're expecting a queued-up callback handler. */ - const f = MsgHandlerQueue.shift(); - if('error'===ev.type){ - dbMsgHandler.error(ev); - return; - } - T.assert(f instanceof Function); - f(ev); - return; - } - switch(ev.type){ - case 'sqlite3-api': - switch(ev.data){ - case 'worker-ready': - log("Message:",ev); - self.sqlite3TestModule.setStatus(null); - runTests(); - return; - default: - warn("Unknown sqlite3-api message type:",ev); - return; - } - default: - if(dbMsgHandler.hasOwnProperty(ev.type)){ - try{dbMsgHandler[ev.type](ev);} - catch(err){ - error("Exception while handling db result message", - ev,":",err); - } - return; - } - warn("Unknown sqlite3-api message type:",ev); - } - }; - log("Init complete, but async init bits may still be running."); -})(); ADDED ext/wasm/version-info.c Index: ext/wasm/version-info.c ================================================================== --- /dev/null +++ ext/wasm/version-info.c @@ -0,0 +1,106 @@ +/* +** 2022-10-16 +** +** The author disclaims copyright to this source code. In place of a +** legal notice, here is a blessing: +** +** * May you do good and not evil. +** * May you find forgiveness for yourself and forgive others. +** * May you share freely, never taking more than you give. +** +************************************************************************* +** This file simply outputs sqlite3 version information in JSON form, +** intended for embedding in the sqlite3 JS API build. +*/ +#ifdef TEST_VERSION +/*3029003 3039012*/ +#define SQLITE_VERSION "X.Y.Z" +#define SQLITE_VERSION_NUMBER TEST_VERSION +#define SQLITE_SOURCE_ID "dummy" +#else +#include "sqlite3.h" +#endif +#include +#include +static void usage(const char *zAppName){ + puts("Emits version info about the sqlite3 it is built against."); + printf("Usage: %s [--quote] --INFO-FLAG:\n\n", zAppName); + puts(" --version Emit SQLITE_VERSION (3.X.Y)"); + puts(" --version-number Emit SQLITE_VERSION_NUMBER (30XXYYZZ)"); + puts(" --download-version Emit /download.html version number (3XXYYZZ)"); + puts(" --source-id Emit SQLITE_SOURCE_ID"); + puts(" --json Emit all info in JSON form"); + puts("\nThe non-JSON formats may be modified by:\n"); + puts(" --quote Add double quotes around output."); +} + +int main(int argc, char const * const * argv){ + int fJson = 0; + int fVersion = 0; + int fVersionNumber = 0; + int fDlVersion = 0; + int dlVersion = 0; + int fSourceInfo = 0; + int fQuote = 0; + int nFlags = 0; + int i; + + for( i = 1; i < argc; ++i ){ + const char * zArg = argv[i]; + while('-'==*zArg) ++zArg; + if( 0==strcmp("version", zArg) ){ + fVersion = 1; + }else if( 0==strcmp("version-number", zArg) ){ + fVersionNumber = 1; + }else if( 0==strcmp("download-version", zArg) ){ + fDlVersion = 1; + }else if( 0==strcmp("source-id", zArg) ){ + fSourceInfo = 1; + }else if( 0==strcmp("json", zArg) ){ + fJson = 1; + }else if( 0==strcmp("quote", zArg) ){ + fQuote = 1; + --nFlags; + }else{ + printf("Unhandled flag: %s\n", argv[i]); + usage(argv[0]); + return 1; + } + ++nFlags; + } + + if( 0==nFlags ) fJson = 1; + + { + const int v = SQLITE_VERSION_NUMBER; + int ver[4] = {0,0,0,0}; + ver[0] = (v / 1000000) * 1000000; + ver[1] = v % 1000000 / 100 * 1000; + ver[2] = v % 100 * 100; + dlVersion = ver[0] + ver[1] + ver[2] + ver[3]; + } + if( fJson ){ + printf("{\"libVersion\": \"%s\", " + "\"libVersionNumber\": %d, " + "\"sourceId\": \"%s\"," + "\"downloadVersion\": %d}"/*missing newline is intentional*/, + SQLITE_VERSION, + SQLITE_VERSION_NUMBER, + SQLITE_SOURCE_ID, + dlVersion); + }else{ + if(fQuote) printf("%c", '"'); + if( fVersion ){ + printf("%s", SQLITE_VERSION); + }else if( fVersionNumber ){ + printf("%d", SQLITE_VERSION_NUMBER); + }else if( fSourceInfo ){ + printf("%s", SQLITE_SOURCE_ID); + }else if( fDlVersion ){ + printf("%d", dlVersion); + } + if(fQuote) printf("%c", '"'); + puts(""); + } + return 0; +} ADDED ext/wasm/wasmfs.make Index: ext/wasm/wasmfs.make ================================================================== --- /dev/null +++ ext/wasm/wasmfs.make @@ -0,0 +1,113 @@ +#!/usr/bin/make +#^^^^ help emacs select makefile mode +# +# This is a sub-make for building a standalone wasmfs-based +# sqlite3.wasm. It is intended to be "include"d from the main +# GNUMakefile. +######################################################################## +MAKEFILE.wasmfs := $(lastword $(MAKEFILE_LIST)) + +# Maintenance reminder: these particular files cannot be built into a +# subdirectory because loading of the auxiliary +# sqlite3-wasmfs.worker.js file it creates fails if sqlite3-wasmfs.js +# is loaded from any directory other than the one in which the +# containing HTML lives. Similarly, they cannot be loaded from a +# Worker to an Emscripten quirk regarding loading nested Workers. +dir.wasmfs := $(dir.wasm) +sqlite3-wasmfs.js := $(dir.wasmfs)/sqlite3-wasmfs.js +sqlite3-wasmfs.wasm := $(dir.wasmfs)/sqlite3-wasmfs.wasm + +CLEAN_FILES += $(sqlite3-wasmfs.js) $(sqlite3-wasmfs.wasm) \ + $(subst .js,.worker.js,$(sqlite3-wasmfs.js)) + +######################################################################## +# emcc flags for .c/.o. +sqlite3-wasmfs.cflags := +sqlite3-wasmfs.cflags += -std=c99 -fPIC +sqlite3-wasmfs.cflags += -pthread +sqlite3-wasmfs.cflags += $(cflags.common) +sqlite3-wasmfs.cflags += $(SQLITE_OPT) -DSQLITE_ENABLE_WASMFS + +######################################################################## +# emcc flags specific to building the final .js/.wasm file... +sqlite3-wasmfs.jsflags := -fPIC +sqlite3-wasmfs.jsflags += --no-entry +sqlite3-wasmfs.jsflags += --minify 0 +sqlite3-wasmfs.jsflags += -sMODULARIZE +sqlite3-wasmfs.jsflags += -sSTRICT_JS +sqlite3-wasmfs.jsflags += -sDYNAMIC_EXECUTION=0 +sqlite3-wasmfs.jsflags += -sNO_POLYFILL +sqlite3-wasmfs.jsflags += -sEXPORTED_FUNCTIONS=@$(abspath $(dir.api)/EXPORTED_FUNCTIONS.sqlite3-api) +sqlite3-wasmfs.jsflags += -sEXPORTED_RUNTIME_METHODS=FS,wasmMemory,allocateUTF8OnStack + # wasmMemory ==> for -sIMPORTED_MEMORY + # allocateUTF8OnStack ==> wasmfs internals +sqlite3-wasmfs.jsflags += -sUSE_CLOSURE_COMPILER=0 +sqlite3-wasmfs.jsflags += -sIMPORTED_MEMORY +#sqlite3-wasmfs.jsflags += -sINITIAL_MEMORY=13107200 +#sqlite3-wasmfs.jsflags += -sTOTAL_STACK=4194304 +sqlite3-wasmfs.jsflags += -sEXPORT_NAME=$(sqlite3.js.init-func) +sqlite3-wasmfs.jsflags += -sGLOBAL_BASE=4096 # HYPOTHETICALLY keep func table indexes from overlapping w/ heap addr. +#sqlite3-wasmfs.jsflags += -sFILESYSTEM=0 # only for experimentation. sqlite3 needs the FS API +# Perhaps the wasmfs build doesn't? +#sqlite3-wasmfs.jsflags += -sABORTING_MALLOC +sqlite3-wasmfs.jsflags += -sALLOW_TABLE_GROWTH +sqlite3-wasmfs.jsflags += -Wno-limited-postlink-optimizations +# ^^^^^ it likes to warn when we have "limited optimizations" via the -g3 flag. +sqlite3-wasmfs.jsflags += -sERROR_ON_UNDEFINED_SYMBOLS=0 +sqlite3-wasmfs.jsflags += -sLLD_REPORT_UNDEFINED +#sqlite3-wasmfs.jsflags += --import-undefined +sqlite3-wasmfs.jsflags += -sMEMORY64=0 +sqlite3-wasmfs.jsflags += -sINITIAL_MEMORY=128450560 +# ^^^^ 64MB is not enough for WASMFS/OPFS test runs using batch-runner.js +sqlite3-wasmfs.fsflags := -pthread -sWASMFS -sPTHREAD_POOL_SIZE=2 -sENVIRONMENT=web,worker +# -sPTHREAD_POOL_SIZE values of 2 or higher trigger that bug. +sqlite3-wasmfs.jsflags += $(sqlite3-wasmfs.fsflags) +#sqlite3-wasmfs.jsflags += -sALLOW_MEMORY_GROWTH +#^^^ using ALLOW_MEMORY_GROWTH produces a warning from emcc: +# USE_PTHREADS + ALLOW_MEMORY_GROWTH may run non-wasm code slowly, +# see https://github.com/WebAssembly/design/issues/1271 [-Wpthreads-mem-growth] +sqlite3-wasmfs.jsflags += -sWASM_BIGINT=$(emcc.WASM_BIGINT) +$(eval $(call call-make-pre-js,sqlite3-wasmfs)) +sqlite3-wasmfs.jsflags += $(pre-post-common.flags) $(pre-post-sqlite3-wasmfs.flags) +$(sqlite3-wasmfs.js): $(sqlite3-wasm.c) \ + $(EXPORTED_FUNCTIONS.api) $(MAKEFILE) $(MAKEFILE.wasmfs) \ + $(pre-post-sqlite3-wasmfs.deps) + @echo "Building $@ ..." + $(emcc.bin) -o $@ $(emcc_opt_full) $(emcc.flags) \ + $(sqlite3-wasmfs.cflags) $(sqlite3-wasmfs.jsflags) \ + $(sqlite3-wasm.c) + chmod -x $(sqlite3-wasmfs.wasm) + $(maybe-wasm-strip) $(sqlite3-wasmfs.wasm) + @ls -la $@ $(sqlite3-wasmfs.wasm) +$(sqlite3-wasmfs.wasm): $(sqlite3-wasmfs.js) +wasmfs: $(sqlite3-wasmfs.js) +all: wasmfs + +######################################################################## +# speedtest1 for wasmfs. +speedtest1-wasmfs.js := $(dir.wasmfs)/speedtest1-wasmfs.js +speedtest1-wasmfs.wasm := $(subst .js,.wasm,$(speedtest1-wasmfs.js)) +speedtest1-wasmfs.eflags := $(sqlite3-wasmfs.fsflags) +speedtest1-wasmfs.eflags += $(SQLITE_OPT) -DSQLITE_ENABLE_WASMFS +speedtest1-wasmfs.eflags += -sALLOW_MEMORY_GROWTH=0 +speedtest1-wasmfs.eflags += -sINITIAL_MEMORY=$(emcc.INITIAL_MEMORY.128) +$(eval $(call call-make-pre-js,speedtest1-wasmfs)) +$(speedtest1-wasmfs.js): $(speedtest1.cses) $(sqlite3-wasmfs.js) \ + $(MAKEFILE) $(MAKEFILE.wasmfs) \ + $(pre-post-speedtest1-wasmfs.deps) \ + $(EXPORTED_FUNCTIONS.speedtest1) + @echo "Building $@ ..." + $(emcc.bin) \ + $(speedtest1-wasmfs.eflags) $(speedtest1-common.eflags) \ + $(pre-post-speedtest1-wasmfs.flags) \ + $(speedtest1.cflags) \ + $(sqlite3-wasmfs.cflags) \ + -o $@ $(speedtest1.cses) -lm + $(maybe-wasm-strip) $(speedtest1-wasmfs.wasm) + ls -la $@ $(speedtest1-wasmfs.wasm) + +speedtest1: $(speedtest1-wasmfs.js) +CLEAN_FILES += $(speedtest1-wasmfs.js) $(speedtest1-wasmfs.wasm) \ + $(subst .js,.worker.js,$(speedtest1-wasmfs.js)) +# end speedtest1.js +######################################################################## Index: magic.txt ================================================================== --- magic.txt +++ magic.txt @@ -7,26 +7,27 @@ # using: # # PRAGMA application_id = INTEGER; # # INTEGER can be any signed 32-bit integer. That integer is written as -# a 4-byte big-endian integer into offset 68 of the database header. +# a 4-byte big-endian integer into offset 68 of the database header. # # The Monotone application used "PRAGMA user_version=1598903374;" to set # its identifier long before "PRAGMA application_id" became available. # The user_version is very similar to application_id except that it is -# stored at offset 68 instead of offset 60. The application_id pragma +# stored at offset 60 instead of offset 68. The application_id pragma # is preferred. The rule using offset 60 for Monotone is for historical # compatibility only. # 0 string =SQLite\ format\ 3 ->68 belong =0x0f055112 Fossil checkout - ->68 belong =0x0f055113 Fossil global configuration - +>68 belong =0x0f055112 Fossil checkout - +>68 belong =0x0f055113 Fossil global configuration - >68 belong =0x0f055111 Fossil repository - >68 belong =0x42654462 Bentley Systems BeSQLite Database - >68 belong =0x42654c6e Bentley Systems Localization File - >60 belong =0x5f4d544e Monotone source repository - >68 belong =0x47504b47 OGC GeoPackage file - >68 belong =0x47503130 OGC GeoPackage version 1.0 file - >68 belong =0x45737269 Esri Spatially-Enabled Database - >68 belong =0x4d504258 MBTiles tileset - +>68 belong =0x6a035744 TeXnicard card database >0 string =SQLite SQLite3 database Index: main.mk ================================================================== --- main.mk +++ main.mk @@ -66,11 +66,11 @@ fts3_write.o fts5.o func.o global.o hash.o \ icu.o insert.o json.o legacy.o loadext.o \ main.o malloc.o mem0.o mem1.o mem2.o mem3.o mem5.o \ memdb.o memjournal.o \ mutex.o mutex_noop.o mutex_unix.o mutex_w32.o \ - notify.o opcodes.o os.o os_unix.o os_win.o \ + notify.o opcodes.o os.o os_kv.o os_unix.o os_win.o \ pager.o pcache.o pcache1.o pragma.o prepare.o printf.o \ random.o resolve.o rowset.o rtree.o \ select.o sqlite3rbu.o status.o stmt.o \ table.o threads.o tokenize.o treeview.o trigger.o \ update.o upsert.o userauth.o util.o vacuum.o \ @@ -132,10 +132,11 @@ $(TOP)/src/notify.c \ $(TOP)/src/os.c \ $(TOP)/src/os.h \ $(TOP)/src/os_common.h \ $(TOP)/src/os_setup.h \ + $(TOP)/src/os_kv.c \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/os_win.h \ $(TOP)/src/pager.c \ $(TOP)/src/pager.h \ @@ -342,10 +343,11 @@ $(TOP)/src/test_osinst.c \ $(TOP)/src/test_pcache.c \ $(TOP)/src/test_quota.c \ $(TOP)/src/test_rtree.c \ $(TOP)/src/test_schema.c \ + $(TOP)/src/test_schemapool.c \ $(TOP)/src/test_server.c \ $(TOP)/src/test_sqllog.c \ $(TOP)/src/test_superlock.c \ $(TOP)/src/test_syscall.c \ $(TOP)/src/test_tclsh.c \ @@ -360,11 +362,10 @@ # Extensions to be statically loaded. # TESTSRC += \ $(TOP)/ext/misc/amatch.c \ $(TOP)/ext/misc/appendvfs.c \ - $(TOP)/ext/misc/bgckpt.c \ $(TOP)/ext/misc/carray.c \ $(TOP)/ext/misc/cksumvfs.c \ $(TOP)/ext/misc/closure.c \ $(TOP)/ext/misc/csv.c \ $(TOP)/ext/misc/decimal.c \ @@ -388,11 +389,14 @@ $(TOP)/ext/misc/wholenumber.c \ $(TOP)/ext/misc/zipfile.c \ $(TOP)/ext/fts5/fts5_tcl.c \ $(TOP)/ext/fts5/fts5_test_mi.c \ $(TOP)/ext/fts5/fts5_test_tok.c \ - $(TOP)/ext/rtree/test_rtreedoc.c + $(TOP)/ext/rtree/test_rtreedoc.c \ + $(TOP)/ext/recover/sqlite3recover.c \ + $(TOP)/ext/recover/dbdata.c \ + $(TOP)/ext/recover/test_recover.c #TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c #TESTSRC += $(TOP)/ext/fts3/fts3_tokenizer.c @@ -399,10 +403,11 @@ TESTSRC2 = \ $(TOP)/src/attach.c \ $(TOP)/src/backup.c \ $(TOP)/src/btree.c \ $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ $(TOP)/src/date.c \ $(TOP)/src/dbpage.c \ $(TOP)/src/dbstat.c \ $(TOP)/src/expr.c \ $(TOP)/src/func.c \ @@ -410,10 +415,11 @@ $(TOP)/src/insert.c \ $(TOP)/src/wal.c \ $(TOP)/src/main.c \ $(TOP)/src/mem5.c \ $(TOP)/src/os.c \ + $(TOP)/src/os_kv.c \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/pager.c \ $(TOP)/src/pragma.c \ $(TOP)/src/prepare.c \ @@ -442,11 +448,10 @@ $(TOP)/ext/fts3/fts3_tokenizer.c \ $(TOP)/ext/fts3/fts3_write.c \ $(TOP)/ext/async/sqlite3async.c \ $(TOP)/ext/misc/stmt.c \ $(TOP)/ext/session/sqlite3session.c \ - $(TOP)/ext/session/sqlite3changebatch.c \ $(TOP)/ext/session/test_session.c \ fts5.c # Header files used by all library source files. # @@ -539,21 +544,26 @@ SHELL_OPT += -DSQLITE_ENABLE_STMTVTAB SHELL_OPT += -DSQLITE_ENABLE_DBPAGE_VTAB SHELL_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB SHELL_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB SHELL_OPT += -DSQLITE_ENABLE_OFFSET_SQL_FUNC -FUZZCHECK_OPT = -DSQLITE_ENABLE_MEMSYS5 +FUZZCHECK_OPT += -I$(TOP)/test +FUZZCHECK_OPT += -I$(TOP)/ext/recover +FUZZCHECK_OPT += -DSQLITE_ENABLE_MEMSYS5 FUZZCHECK_OPT += -DSQLITE_MAX_MEMORY=50000000 FUZZCHECK_OPT += -DSQLITE_PRINTF_PRECISION_LIMIT=1000 FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS4 FUZZCHECK_OPT += -DSQLITE_ENABLE_RTREE FUZZCHECK_OPT += -DSQLITE_ENABLE_GEOPOLY FUZZCHECK_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB FUZZCHECK_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB FUZZSRC += $(TOP)/test/fuzzcheck.c FUZZSRC += $(TOP)/test/ossfuzz.c +FUZZSRC += $(TOP)/test/vt02.c FUZZSRC += $(TOP)/test/fuzzinvariants.c +FUZZSRC += $(TOP)/ext/recover/dbdata.c +FUZZSRC += $(TOP)/ext/recover/sqlite3recover.c DBFUZZ_OPT = KV_OPT = -DSQLITE_THREADSAFE=0 -DSQLITE_DIRECT_OVERFLOW_READ ST_OPT = -DSQLITE_THREADSAFE=0 # This is the default Makefile target. The objects listed here @@ -608,11 +618,11 @@ dbfuzz2$(EXE): $(TOP)/test/dbfuzz2.c sqlite3.c sqlite3.h $(TCCX) -I. -g -O0 -DSTANDALONE -o dbfuzz2$(EXE) \ $(DBFUZZ2_OPTS) $(TOP)/test/dbfuzz2.c sqlite3.c $(TLIBS) $(THREADLIB) -fuzzcheck$(EXE): $(FUZZSRC) sqlite3.c sqlite3.h +fuzzcheck$(EXE): $(FUZZSRC) sqlite3.c sqlite3.h $(FUZZDEP) $(TCCX) -o fuzzcheck$(EXE) -DSQLITE_THREADSAFE=0 -DSQLITE_OMIT_LOAD_EXTENSION \ -DSQLITE_ENABLE_MEMSYS5 $(FUZZCHECK_OPT) -DSQLITE_OSS_FUZZ \ $(FUZZSRC) sqlite3.c $(TLIBS) $(THREADLIB) ossshell$(EXE): $(TOP)/test/ossfuzz.c $(TOP)/test/ossshell.c sqlite3.c sqlite3.h @@ -759,11 +769,13 @@ $(TOP)/ext/misc/uint.c \ $(TOP)/ext/expert/sqlite3expert.c \ $(TOP)/ext/expert/sqlite3expert.h \ $(TOP)/ext/misc/zipfile.c \ $(TOP)/ext/misc/memtrace.c \ - $(TOP)/ext/misc/dbdata.c \ + $(TOP)/ext/recover/dbdata.c \ + $(TOP)/ext/recover/sqlite3recover.c \ + $(TOP)/ext/recover/sqlite3recover.h \ $(TOP)/src/test_windirent.c shell.c: $(SHELL_SRC) $(TOP)/tool/mkshellc.tcl tclsh $(TOP)/tool/mkshellc.tcl >shell.c @@ -1001,19 +1013,15 @@ THREADTEST3_SRC = $(TOP)/test/threadtest3.c \ $(TOP)/test/tt3_checkpoint.c \ $(TOP)/test/tt3_index.c \ $(TOP)/test/tt3_vacuum.c \ $(TOP)/test/tt3_stress.c \ - $(TOP)/test/tt3_bcwal2.c \ $(TOP)/test/tt3_lookaside1.c threadtest3$(EXE): sqlite3.o $(THREADTEST3_SRC) $(TOP)/src/test_multiplex.c $(TCCX) $(TOP)/test/threadtest3.c $(TOP)/src/test_multiplex.c sqlite3.o -o $@ $(THREADLIB) -bc_test1$(EXE): sqlite3.o $(TOP)/test/bc_test1.c $(TOP)/test/tt3_core.c - $(TCCX) $(TOP)/test/bc_test1.c sqlite3.o -o $@ $(THREADLIB) - threadtest: threadtest3$(EXE) ./threadtest3$(EXE) TEST_EXTENSION = $(SHPREFIX)testloadext.$(SO) $(TEST_EXTENSION): $(TOP)/src/test_loadext.c ADDED sqlite_cfg.h.in Index: sqlite_cfg.h.in ================================================================== --- /dev/null +++ sqlite_cfg.h.in @@ -0,0 +1,142 @@ +/* sqlite_cfg.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the `fdatasync' function. */ +#undef HAVE_FDATASYNC + +/* Define to 1 if you have the `gmtime_r' function. */ +#undef HAVE_GMTIME_R + +/* Define to 1 if the system has the type `int16_t'. */ +#undef HAVE_INT16_T + +/* Define to 1 if the system has the type `int32_t'. */ +#undef HAVE_INT32_T + +/* Define to 1 if the system has the type `int64_t'. */ +#undef HAVE_INT64_T + +/* Define to 1 if the system has the type `int8_t'. */ +#undef HAVE_INT8_T + +/* Define to 1 if the system has the type `intptr_t'. */ +#undef HAVE_INTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `isnan' function. */ +#undef HAVE_ISNAN + +/* Define to 1 if you have the `localtime_r' function. */ +#undef HAVE_LOCALTIME_R + +/* Define to 1 if you have the `localtime_s' function. */ +#undef HAVE_LOCALTIME_S + +/* Define to 1 if you have the header file. */ +#undef HAVE_MALLOC_H + +/* Define to 1 if you have the `malloc_usable_size' function. */ +#undef HAVE_MALLOC_USABLE_SIZE + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `pread' function. */ +#undef HAVE_PREAD + +/* Define to 1 if you have the `pread64' function. */ +#undef HAVE_PREAD64 + +/* Define to 1 if you have the `pwrite' function. */ +#undef HAVE_PWRITE + +/* Define to 1 if you have the `pwrite64' function. */ +#undef HAVE_PWRITE64 + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the `strchrnul' function. */ +#undef HAVE_STRCHRNUL + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if the system has the type `uint16_t'. */ +#undef HAVE_UINT16_T + +/* Define to 1 if the system has the type `uint32_t'. */ +#undef HAVE_UINT32_T + +/* Define to 1 if the system has the type `uint64_t'. */ +#undef HAVE_UINT64_T + +/* Define to 1 if the system has the type `uint8_t'. */ +#undef HAVE_UINT8_T + +/* Define to 1 if the system has the type `uintptr_t'. */ +#undef HAVE_UINTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the `usleep' function. */ +#undef HAVE_USLEEP + +/* Define to 1 if you have the `utime' function. */ +#undef HAVE_UTIME + +/* Define to 1 if you have the header file. */ +#undef HAVE_ZLIB_H + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Enable large inode numbers on Mac OS X 10.5. */ +#ifndef _DARWIN_USE_64_BIT_INODE +# define _DARWIN_USE_64_BIT_INODE 1 +#endif + +/* Number of bits in a file offset, on hosts where this is settable. */ +#undef _FILE_OFFSET_BITS + +/* Define for large files, on AIX-style hosts. */ +#undef _LARGE_FILES Index: src/alter.c ================================================================== --- src/alter.c +++ src/alter.c @@ -110,12 +110,12 @@ */ static void renameReloadSchema(Parse *pParse, int iDb, u16 p5){ Vdbe *v = pParse->pVdbe; if( v ){ sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0, p5); - if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0, p5); + sqlite3VdbeAddParseSchemaOp(pParse, iDb, 0, p5); + if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse, 1, 0, p5); } } /* ** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy" Index: src/analyze.c ================================================================== --- src/analyze.c +++ src/analyze.c @@ -210,10 +210,11 @@ if( iregRoot. This is important ** because the OpenWrite opcode below will be needing it. */ + sqlite3SchemaWritable(pParse, iDb); sqlite3NestedParse(pParse, "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols ); aRoot[i] = (u32)pParse->regRoot; aCreateTbl[i] = OPFLAG_P2ISREG; @@ -951,10 +952,11 @@ assert( k>=0 && knColumn ); i = pIdx->aiColumn[k]; if( NEVER(i==XN_ROWID) ){ VdbeComment((v,"%s.rowid",pIdx->zName)); }else if( i==XN_EXPR ){ + assert( pIdx->bHasExpr ); VdbeComment((v,"%s.expr(%d)",pIdx->zName, k)); }else{ VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zCnName)); } } Index: src/attach.c ================================================================== --- src/attach.c +++ src/attach.c @@ -203,18 +203,18 @@ ** If this fails, or if opening the file failed, then close the file and ** remove the entry from the db->aDb[] array. i.e. put everything back the ** way we found it. */ if( rc==SQLITE_OK ){ - sqlite3BtreeEnterAll(db); db->init.iDb = 0; db->mDbFlags &= ~(DBFLAG_SchemaKnownOk); - if( !REOPEN_AS_MEMDB(db) ){ + if( !IsSharedSchema(db) && !REOPEN_AS_MEMDB(db) ){ + sqlite3BtreeEnterAll(db); rc = sqlite3Init(db, &zErrDyn); + sqlite3BtreeLeaveAll(db); + assert( zErrDyn==0 || rc!=SQLITE_OK ); } - sqlite3BtreeLeaveAll(db); - assert( zErrDyn==0 || rc!=SQLITE_OK ); } #ifdef SQLITE_USER_AUTHENTICATION if( rc==SQLITE_OK && !REOPEN_AS_MEMDB(db) ){ u8 newAuth = 0; rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); @@ -310,10 +310,11 @@ pTrig->pTabSchema = pTrig->pSchema; } pEntry = sqliteHashNext(pEntry); } + (void)sqlite3SchemaDisconnect(db, i, 0); sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; pDb->pSchema = 0; sqlite3CollapseDatabaseArray(db); return; Index: src/bitvec.c ================================================================== --- src/bitvec.c +++ src/bitvec.c @@ -169,16 +169,10 @@ int sqlite3BitvecSet(Bitvec *p, u32 i){ u32 h; if( p==0 ) return SQLITE_OK; assert( i>0 ); assert( i<=p->iSize ); - if( i>p->iSize || i==0 ){ - sqlite3_log(SQLITE_ERROR, - "Bitvec: setting bit %d of bitvec size %d\n", (int)i, (int)p->iSize - ); - abort(); - } i--; while((p->iSize > BITVEC_NBIT) && p->iDivisor) { u32 bin = i/p->iDivisor; i = i%p->iDivisor; if( p->u.apSub[bin]==0 ){ Index: src/btree.c ================================================================== --- src/btree.c +++ src/btree.c @@ -12,11 +12,10 @@ ** This file implements an external (disk-based) database using BTrees. ** See the header comment on "btreeInt.h" for additional information. ** Including a description of file format and an overview of operation. */ #include "btreeInt.h" -#include "vdbeInt.h" /* ** The header string that appears at the beginning of every ** SQLite database. */ @@ -479,244 +478,11 @@ } } #endif /* SQLITE_OMIT_SHARED_CACHE */ -#ifndef SQLITE_OMIT_CONCURRENT -/* -** The following structure - BtreePtrmap - stores the in-memory pointer map -** used for newly allocated pages in CONCURRENT transactions. Such pages are -** always allocated in a contiguous block (from the end of the file) starting -** with page BtreePtrmap.iFirst. -*/ -typedef struct RollbackEntry RollbackEntry; -typedef struct PtrmapEntry PtrmapEntry; -struct PtrmapEntry { - Pgno parent; - u8 eType; -}; -struct RollbackEntry { - Pgno pgno; - Pgno parent; - u8 eType; -}; -struct BtreePtrmap { - Pgno iFirst; /* First new page number aPtr[0] */ - - int nPtrAlloc; /* Allocated size of aPtr[] array */ - PtrmapEntry *aPtr; /* Array of parent page numbers */ - - int nSvpt; /* Used size of aSvpt[] array */ - int nSvptAlloc; /* Allocated size of aSvpt[] */ - int *aSvpt; /* First aRollback[] entry for savepoint i */ - - int nRollback; /* Used size of aRollback[] array */ - int nRollbackAlloc; /* Allocated size of aRollback[] array */ - RollbackEntry *aRollback; /* Array of rollback entries */ -}; - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** If page number pgno is greater than or equal to BtreePtrmap.iFirst, -** store an entry for it in the pointer-map structure. -*/ -static int btreePtrmapStore( - BtShared *pBt, - Pgno pgno, - u8 eType, - Pgno parent -){ - BtreePtrmap *pMap = pBt->pMap; - if( pgno>=pMap->iFirst ){ - int iEntry = pgno - pMap->iFirst; - - /* Grow the aPtr[] array as required */ - while( iEntry>=pMap->nPtrAlloc ){ - int nNew = pMap->nPtrAlloc ? pMap->nPtrAlloc*2 : 16; - PtrmapEntry *aNew = (PtrmapEntry*)sqlite3_realloc( - pMap->aPtr, nNew*sizeof(PtrmapEntry) - ); - if( aNew==0 ){ - return SQLITE_NOMEM; - }else{ - int nByte = (nNew-pMap->nPtrAlloc)*sizeof(PtrmapEntry); - memset(&aNew[pMap->nPtrAlloc], 0, nByte); - pMap->aPtr = aNew; - pMap->nPtrAlloc = nNew; - } - } - - /* Add an entry to the rollback log if required */ - if( pMap->nSvpt>0 && pMap->aPtr[iEntry].parent ){ - if( pMap->nRollback>=pMap->nRollbackAlloc ){ - int nNew = pMap->nRollback ? pMap->nRollback*2 : 16; - RollbackEntry *aNew = (RollbackEntry*)sqlite3_realloc( - pMap->aRollback, nNew*sizeof(RollbackEntry) - ); - if( aNew==0 ){ - return SQLITE_NOMEM; - }else{ - pMap->aRollback = aNew; - pMap->nRollbackAlloc = nNew; - } - } - - pMap->aRollback[pMap->nRollback].pgno = pgno; - pMap->aRollback[pMap->nRollback].parent = pMap->aPtr[iEntry].parent; - pMap->aRollback[pMap->nRollback].eType = pMap->aPtr[iEntry].eType; - pMap->nRollback++; - } - - /* Update the aPtr[] array */ - pMap->aPtr[iEntry].parent = parent; - pMap->aPtr[iEntry].eType = eType; - } - - return SQLITE_OK; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Open savepoint iSavepoint, if it is not already open. -*/ -static int btreePtrmapBegin(BtShared *pBt, int nSvpt){ - BtreePtrmap *pMap = pBt->pMap; - if( pMap && nSvpt>pMap->nSvpt ){ - int i; - if( nSvpt>=pMap->nSvptAlloc ){ - int nNew = pMap->nSvptAlloc ? pMap->nSvptAlloc*2 : 16; - int *aNew = sqlite3_realloc(pMap->aSvpt, sizeof(int) * nNew); - if( aNew==0 ){ - return SQLITE_NOMEM; - }else{ - pMap->aSvpt = aNew; - pMap->nSvptAlloc = nNew; - } - } - - for(i=pMap->nSvpt; iaSvpt[i] = pMap->nRollback; - } - pMap->nSvpt = nSvpt; - } - - return SQLITE_OK; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Rollback (if op==SAVEPOINT_ROLLBACK) or release (if op==SAVEPOINT_RELEASE) -** savepoint iSvpt. -*/ -static void btreePtrmapEnd(BtShared *pBt, int op, int iSvpt){ - BtreePtrmap *pMap = pBt->pMap; - if( pMap ){ - assert( op==SAVEPOINT_ROLLBACK || op==SAVEPOINT_RELEASE ); - assert( iSvpt>=0 || (iSvpt==-1 && op==SAVEPOINT_ROLLBACK) ); - if( iSvpt<0 ){ - pMap->nSvpt = 0; - pMap->nRollback = 0; - memset(pMap->aPtr, 0, sizeof(Pgno) * pMap->nPtrAlloc); - }else if( iSvptnSvpt ){ - if( op==SAVEPOINT_ROLLBACK ){ - int ii; - for(ii=pMap->nRollback-1; ii>=pMap->aSvpt[iSvpt]; ii--){ - RollbackEntry *p = &pMap->aRollback[ii]; - PtrmapEntry *pEntry = &pMap->aPtr[p->pgno - pMap->iFirst]; - pEntry->parent = p->parent; - pEntry->eType = p->eType; - } - } - pMap->nSvpt = iSvpt + (op==SAVEPOINT_ROLLBACK); - pMap->nRollback = pMap->aSvpt[iSvpt]; - } - } -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** This function is called after an CONCURRENT transaction is opened on the -** database. It allocates the BtreePtrmap structure used to track pointers -** to allocated pages and zeroes the nFree/iTrunk fields in the database -** header on page 1. -*/ -static int btreePtrmapAllocate(BtShared *pBt){ - int rc = SQLITE_OK; - if( pBt->pMap==0 ){ - BtreePtrmap *pMap = sqlite3_malloc(sizeof(BtreePtrmap)); - if( pMap==0 ){ - rc = SQLITE_NOMEM; - }else{ - memset(&pBt->pPage1->aData[32], 0, sizeof(u32)*2); - memset(pMap, 0, sizeof(BtreePtrmap)); - pMap->iFirst = pBt->nPage + 1; - pBt->pMap = pMap; - } - } - return rc; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Free any BtreePtrmap structure allocated by an earlier call to -** btreePtrmapAllocate(). -*/ -static void btreePtrmapDelete(BtShared *pBt){ - BtreePtrmap *pMap = pBt->pMap; - if( pMap ){ - sqlite3_free(pMap->aRollback); - sqlite3_free(pMap->aPtr); - sqlite3_free(pMap->aSvpt); - sqlite3_free(pMap); - pBt->pMap = 0; - } -} - -/* -** Check that the pointer-map does not contain any entries with a parent -** page of 0. Call sqlite3_log() multiple times to output the entire -** data structure if it does. -*/ -static void btreePtrmapCheck(BtShared *pBt, Pgno nPage){ - Pgno i; - int bProblem = 0; - BtreePtrmap *p = pBt->pMap; - - for(i=p->iFirst; i<=nPage; i++){ - PtrmapEntry *pEntry = &p->aPtr[i-p->iFirst]; - if( pEntry->eType==PTRMAP_OVERFLOW1 - || pEntry->eType==PTRMAP_OVERFLOW2 - || pEntry->eType==PTRMAP_BTREE - ){ - if( pEntry->parent==0 ){ - bProblem = 1; - break; - } - } - } - - if( bProblem ){ - for(i=p->iFirst; i<=nPage; i++){ - PtrmapEntry *pEntry = &p->aPtr[i-p->iFirst]; - sqlite3_log(SQLITE_CORRUPT, - "btreePtrmapCheck: pgno=%d eType=%d parent=%d", - (int)i, (int)pEntry->eType, (int)pEntry->parent - ); - } - abort(); - } -} - -#else /* SQLITE_OMIT_CONCURRENT */ -# define btreePtrmapAllocate(x) SQLITE_OK -# define btreePtrmapDelete(x) -# define btreePtrmapBegin(x,y) SQLITE_OK -# define btreePtrmapEnd(x,y,z) -# define btreePtrmapCheck(y,z) -#endif /* SQLITE_OMIT_CONCURRENT */ - -static void releasePage(MemPage *pPage); /* Forward reference */ +static void releasePage(MemPage *pPage); /* Forward reference */ static void releasePageOne(MemPage *pPage); /* Forward reference */ static void releasePageNotNull(MemPage *pPage); /* Forward reference */ /* ***** This routine is used inside of assert() only **** @@ -1237,17 +1003,10 @@ assert( sqlite3_mutex_held(pBt->mutex) ); /* The super-journal page number must never be used as a pointer map page */ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); -#ifndef SQLITE_OMIT_CONCURRENT - if( pBt->pMap ){ - *pRC = btreePtrmapStore(pBt, key, eType, parent); - return; - } -#endif - assert( pBt->autoVacuum ); if( key==0 ){ *pRC = SQLITE_CORRUPT_BKPT; return; } @@ -1894,11 +1653,10 @@ /* Remove the slot from the free-list. Update the number of ** fragmented bytes within the page. */ memcpy(&aData[iAddr], &aData[pc], 2); aData[hdr+7] += (u8)x; - testcase( pc+x>maxPC ); return &aData[pc]; }else if( x+pc > maxPC ){ /* This slot extends off the end of the usable part of the page */ *pRc = SQLITE_CORRUPT_PAGE(pPg); return 0; @@ -2550,21 +2308,10 @@ testcase( pgno==0 ); assert( pgno!=0 || rc!=SQLITE_OK ); return rc; } -#ifndef SQLITE_OMIT_CONCURRENT -/* -** Set the value of the MemPage.pgnoRoot variable, if it exists. -*/ -static void setMempageRoot(MemPage *pPg, u32 pgnoRoot){ - pPg->pgnoRoot = pgnoRoot; -} -#else -# define setMempageRoot(x,y) -#endif - /* ** Release a MemPage. This should be called once for each prior ** call to btreeGetPage. ** ** Page1 is a special case and must be released using releasePageOne(). @@ -3464,14 +3211,14 @@ } if( page1[19]>1 ){ goto page1_init_failed; } #else - if( page1[18]>3 ){ + if( page1[18]>2 ){ pBt->btsFlags |= BTS_READ_ONLY; } - if( page1[19]>3 ){ + if( page1[19]>2 ){ goto page1_init_failed; } /* If the read version is set to 2, this database should be accessed ** in WAL mode. If the log is not already open, open it now. Then @@ -3479,13 +3226,13 @@ ** The caller detects this and calls this function again. This is ** required as the version of page 1 currently in the page1 buffer ** may not be the latest version - there may be a newer one in the log ** file. */ - if( page1[19]>=2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ + if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ int isOpen = 0; - rc = sqlite3PagerOpenWal(pBt->pPager, (page1[19]==3), &isOpen); + rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); if( rc!=SQLITE_OK ){ goto page1_init_failed; }else{ setDefaultSyncFlag(pBt, SQLITE_DEFAULT_WAL_SYNCHRONOUS+1); if( isOpen==0 ){ @@ -3739,11 +3486,10 @@ */ int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ BtShared *pBt = p->pBt; Pager *pPager = pBt->pPager; int rc = SQLITE_OK; - int bConcurrent = (p->db->eConcurrent && !ISAUTOVACUUM); sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the btree is already in a write-transaction, or it @@ -3823,20 +3569,15 @@ ** file is not pBt->pageSize. In this case lockBtree() will update ** pBt->pageSize to the page-size of the file on disk. */ while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); - if( pBt->aSchemaVersion ){ - pBt->aSchemaVersion[SCHEMA_VERSION_AFTERLOCKBTREE] = sqlite3STimeNow(); - } - if( rc==SQLITE_OK && wrflag ){ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){ rc = SQLITE_READONLY; }else{ - int exFlag = bConcurrent ? -1 : (wrflag>1); - rc = sqlite3PagerBegin(pPager, exFlag, sqlite3TempInMemory(p->db)); + rc = sqlite3PagerBegin(pPager, wrflag>1, sqlite3TempInMemory(p->db)); if( rc==SQLITE_OK ){ rc = newDatabase(pBt); }else if( rc==SQLITE_BUSY_SNAPSHOT && pBt->inTransaction==TRANS_NONE ){ /* if there was no transaction opened when this function was ** called and SQLITE_BUSY_SNAPSHOT is returned, change the error @@ -3896,33 +3637,20 @@ } } } trans_begun: -#ifndef SQLITE_OMIT_CONCURRENT - if( bConcurrent && rc==SQLITE_OK && sqlite3PagerIsWal(pBt->pPager) ){ - rc = sqlite3PagerBeginConcurrent(pBt->pPager); - if( rc==SQLITE_OK && wrflag ){ - rc = btreePtrmapAllocate(pBt); - } - } -#endif - if( rc==SQLITE_OK ){ if( pSchemaVersion ){ *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]); } if( wrflag ){ /* This call makes sure that the pager has the correct number of ** open savepoints. If the second parameter is greater than 0 and ** the sub-journal is not already open, then it will be opened here. */ - int nSavepoint = p->db->nSavepoint; - rc = sqlite3PagerOpenSavepoint(pPager, nSavepoint); - if( rc==SQLITE_OK && nSavepoint ){ - rc = btreePtrmapBegin(pBt, nSavepoint); - } + rc = sqlite3PagerOpenSavepoint(pPager, p->db->nSavepoint); } } btreeIntegrity(p); sqlite3BtreeLeave(p); @@ -4012,10 +3740,13 @@ put4byte(pCell+info.nSize-4, iTo); break; } } }else{ + if( pCell+4 > pPage->aData+pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); + } if( get4byte(pCell)==iFrom ){ put4byte(pCell, iTo); break; } } @@ -4383,179 +4114,10 @@ #else /* ifndef SQLITE_OMIT_AUTOVACUUM */ # define setChildPtrmaps(x) SQLITE_OK #endif -#ifndef SQLITE_OMIT_CONCURRENT -/* -** This function is called as part of merging an CONCURRENT transaction with -** the snapshot at the head of the wal file. It relocates all pages in the -** range iFirst..iLast, inclusive. It is assumed that the BtreePtrmap -** structure at BtShared.pMap contains the location of the pointers to each -** page in the range. -** -** If pnCurrent is NULL, then all pages in the range are moved to currently -** free locations (i.e. free-list entries) within the database file before page -** iFirst. -** -** Or, if pnCurrent is not NULL, then it points to a value containing the -** current size of the database file in pages. In this case, all pages are -** relocated to the end of the database file - page iFirst is relocated to -** page (*pnCurrent+1), page iFirst+1 to page (*pnCurrent+2), and so on. -** Value *pnCurrent is set to the new size of the database before this -** function returns. -** -** If no error occurs, SQLITE_OK is returned. Otherwise, an SQLite error code. -*/ -static int btreeRelocateRange( - BtShared *pBt, /* B-tree handle */ - Pgno iFirst, /* First page to relocate */ - Pgno iLast, /* Last page to relocate */ - Pgno *pnCurrent /* If not NULL, IN/OUT: Database size */ -){ - int rc = SQLITE_OK; - BtreePtrmap *pMap = pBt->pMap; - Pgno iPg; - - for(iPg=iFirst; iPg<=iLast && rc==SQLITE_OK; iPg++){ - MemPage *pFree = 0; /* Page allocated from free-list */ - MemPage *pPg = 0; - Pgno iNew; /* New page number for pPg */ - PtrmapEntry *pEntry; /* Pointer map entry for page iPg */ - - if( iPg==PENDING_BYTE_PAGE(pBt) ) continue; - pEntry = &pMap->aPtr[iPg - pMap->iFirst]; - - if( pEntry->eType==PTRMAP_FREEPAGE ){ - Pgno dummy; - rc = allocateBtreePage(pBt, &pFree, &dummy, iPg, BTALLOC_EXACT); - if( pFree ){ - assert( sqlite3PagerPageRefcount(pFree->pDbPage)==1 ); - sqlite3PcacheDrop(pFree->pDbPage); - } - assert( rc!=SQLITE_OK || dummy==iPg ); - }else if( pnCurrent ){ - btreeGetPage(pBt, iPg, &pPg, 0); - assert( sqlite3PagerIswriteable(pPg->pDbPage) ); - assert( sqlite3PagerPageRefcount(pPg->pDbPage)==1 ); - iNew = ++(*pnCurrent); - if( iNew==PENDING_BYTE_PAGE(pBt) ) iNew = ++(*pnCurrent); - rc = relocatePage(pBt, pPg, pEntry->eType, pEntry->parent, iNew, 1); - releasePageNotNull(pPg); - }else{ - rc = allocateBtreePage(pBt, &pFree, &iNew, iFirst-1, BTALLOC_LE); - assert( rc!=SQLITE_OK || iNeweType, pEntry->parent,iNew,1); - releasePage(pPg); - } - } - } - return rc; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** The b-tree handle passed as the only argument is about to commit an -** CONCURRENT transaction. At this point it is guaranteed that this is -** possible - the wal WRITER lock is held and it is known that there are -** no conflicts with committed transactions. -*/ -static int btreeFixUnlocked(Btree *p){ - BtShared *pBt = p->pBt; - MemPage *pPage1 = pBt->pPage1; - u8 *p1 = pPage1->aData; - Pager *pPager = pBt->pPager; - int rc = SQLITE_OK; - - /* If page 1 of the database is not writable, then no pages were allocated - ** or freed by this transaction. In this case no special handling is - ** required. Otherwise, if page 1 is dirty, proceed. */ - BtreePtrmap *pMap = pBt->pMap; - Pgno iTrunk = get4byte(&p1[32]); - Pgno nPage = btreePagecount(pBt); - u32 nFree = get4byte(&p1[36]); - - assert( pBt->pMap ); - rc = sqlite3PagerUpgradeSnapshot(pPager, pPage1->pDbPage); - assert( p1==pPage1->aData ); - - if( rc==SQLITE_OK ){ - Pgno nHPage = get4byte(&p1[28]); - Pgno nFin = nHPage; /* Size of db after transaction merge */ - - if( sqlite3PagerIswriteable(pPage1->pDbPage) ){ - Pgno iHTrunk = get4byte(&p1[32]); - u32 nHFree = get4byte(&p1[36]); - - btreePtrmapCheck(pBt, nPage); - - /* Attach the head database free list to the end of the current - ** transactions free-list (if any). */ - if( iTrunk!=0 ){ - put4byte(&p1[36], nHFree + nFree); - put4byte(&p1[32], iTrunk); - while( iTrunk ){ - DbPage *pTrunk = sqlite3PagerLookup(pPager, iTrunk); - iTrunk = get4byte((u8*)pTrunk->pData); - if( iTrunk==0 ){ - put4byte((u8*)pTrunk->pData, iHTrunk); - } - sqlite3PagerUnref(pTrunk); - }; - } - - if( nHPage<(pMap->iFirst-1) ){ - /* The database consisted of (pMap->iFirst-1) pages when the current - ** concurrent transaction was opened. And an concurrent transaction may - ** not be executed on an auto-vacuum database - so the db should - ** not have shrunk since the transaction was opened. Therefore nHPage - ** should be set to (pMap->iFirst-1) or greater. */ - rc = SQLITE_CORRUPT_BKPT; - }else{ - /* The current transaction allocated pages pMap->iFirst through - ** nPage (inclusive) at the end of the database file. Meanwhile, - ** other transactions have allocated (iFirst..nHPage). So move - ** pages (iFirst..MIN(nPage,nHPage)) to (MAX(nPage,nHPage)+1). */ - Pgno iLast = MIN(nPage, nHPage); /* Last page to move */ - Pgno nCurrent; /* Current size of db */ - - nCurrent = MAX(nPage, nHPage); - pBt->nPage = nCurrent; - rc = btreeRelocateRange(pBt, pMap->iFirst, iLast, &nCurrent); - - /* There are now no collisions with the snapshot at the head of the - ** database file. So at this point it would be possible to write - ** the transaction out to disk. Before doing so though, attempt to - ** relocate some of the new pages to free locations within the body - ** of the database file (i.e. free-list entries). */ - if( rc==SQLITE_OK ){ - assert( nCurrent!=PENDING_BYTE_PAGE(pBt) ); - sqlite3PagerSetDbsize(pBt->pPager, nCurrent); - nFree = get4byte(&p1[36]); - nFin = nCurrent-nFree; - if( nCurrent>PENDING_BYTE_PAGE(pBt) && nFin<=PENDING_BYTE_PAGE(pBt) ){ - nFin--; - } - nFin = MAX(nFin, nHPage); - rc = btreeRelocateRange(pBt, nFin+1, nCurrent, 0); - } - - put4byte(&p1[28], nFin); - } - } - sqlite3PagerSetDbsize(pPager, nFin); - } - - return rc; -} -#else -# define btreeFixUnlocked(X) SQLITE_OK -#endif /* SQLITE_OMIT_CONCURRENT */ - /* ** This routine does the first phase of a two-phase commit. This routine ** causes a rollback journal to be created (if it does not already exist) ** and populated with enough information so that if a power loss occurs ** the database can be restored to its original state by playing back @@ -4583,14 +4145,12 @@ int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zSuperJrnl){ int rc = SQLITE_OK; if( p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); - #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - assert( ISCONCURRENT==0 ); rc = autoVacuumCommit(p); if( rc!=SQLITE_OK ){ sqlite3BtreeLeave(p); return rc; } @@ -4597,16 +4157,11 @@ } if( pBt->bDoTruncate ){ sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage); } #endif - if( rc==SQLITE_OK && ISCONCURRENT && p->db->eConcurrent==CONCURRENT_OPEN ){ - rc = btreeFixUnlocked(p); - } - if( rc==SQLITE_OK ){ - rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zSuperJrnl, 0); - } + rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zSuperJrnl, 0); sqlite3BtreeLeave(p); } return rc; } @@ -4645,15 +4200,10 @@ ** pager if this call closed the only read or write transaction. */ p->inTrans = TRANS_NONE; unlockBtreeIfUnused(pBt); } - /* If this was an CONCURRENT transaction, delete the pBt->pMap object. - ** Also call PagerEndConcurrent() to ensure that the pager has discarded - ** the record of all pages read within the transaction. */ - btreePtrmapDelete(pBt); - sqlite3PagerEndConcurrent(pBt->pPager); btreeIntegrity(p); } /* ** Commit the transaction currently in progress. @@ -4879,13 +4429,10 @@ ** an index greater than all savepoints created explicitly using ** SQL statements. It is illegal to open, release or rollback any ** such savepoints while the statement transaction savepoint is active. */ rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); - if( rc==SQLITE_OK ){ - rc = btreePtrmapBegin(pBt, iStatement); - } sqlite3BtreeLeave(p); return rc; } /* @@ -4905,11 +4452,10 @@ if( p && p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); - btreePtrmapEnd(pBt, op, iSavepoint); if( op==SAVEPOINT_ROLLBACK ){ rc = saveAllCursors(pBt, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); @@ -5703,13 +5249,10 @@ ** the new child page does not match the flags field of the parent (i.e. ** if an intkey page appears to be the parent of a non-intkey page, or ** vice-versa). */ static int moveToChild(BtCursor *pCur, u32 newPgno){ - BtShared *pBt = pCur->pBt; - int rc; - assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPageiPage>=0 ); if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ @@ -5719,16 +5262,12 @@ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); pCur->aiIdx[pCur->iPage] = pCur->ix; pCur->apPage[pCur->iPage] = pCur->pPage; pCur->ix = 0; pCur->iPage++; - rc = getAndInitPage(pBt, newPgno, &pCur->pPage, - pCur, pCur->curPagerFlags); - if( rc==SQLITE_OK ){ - setMempageRoot(pCur->pPage, pCur->pgnoRoot); - } - return rc; + return getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur, + pCur->curPagerFlags); } #ifdef SQLITE_DEBUG /* ** Page pParent is an internal (non-leaf) tree page. This function @@ -5836,11 +5375,10 @@ 0, pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; return rc; } - setMempageRoot(pCur->pPage, pCur->pgnoRoot); pCur->iPage = 0; pCur->curIntKey = pCur->pPage->intKey; } pRoot = pCur->pPage; assert( pRoot->pgno==pCur->pgnoRoot || CORRUPT_DB ); @@ -6539,18 +6077,11 @@ } } pPage = pCur->pPage; idx = ++pCur->ix; - if( !pPage->isInit || sqlite3FaultSim(412) ){ - /* The only known way for this to happen is for there to be a - ** recursive SQL function that does a DELETE operation as part of a - ** SELECT which deletes content out from under an active cursor - ** in a corrupt database file where the table being DELETE-ed from - ** has pages in common with the table being queried. See TH3 - ** module cov1/btree78.test testcase 220 (2018-06-08) for an - ** example. */ + if( NEVER(!pPage->isInit) || sqlite3FaultSim(412) ){ return SQLITE_CORRUPT_BKPT; } if( idx>=pPage->nCell ){ if( !pPage->leaf ){ @@ -6719,29 +6250,20 @@ MemPage *pTrunk = 0; MemPage *pPrevTrunk = 0; Pgno mxPage; /* Total size of the database file */ assert( sqlite3_mutex_held(pBt->mutex) ); - assert( eMode==BTALLOC_ANY || (nearby>0 && REQUIRE_PTRMAP ) ); + assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); pPage1 = pBt->pPage1; mxPage = btreePagecount(pBt); - /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36 - ** stores stores the total number of pages on the freelist. */ + /* EVIDENCE-OF: R-21003-45125 The 4-byte big-endian integer at offset 36 + ** stores the total number of pages on the freelist. */ n = get4byte(&pPage1->aData[36]); testcase( n==mxPage-1 ); if( n>=mxPage ){ return SQLITE_CORRUPT_BKPT; } - - /* Ensure page 1 is writable. This function will either change the number - ** of pages in the free-list or the size of the database file. Since both - ** of these operations involve modifying page 1 header fields, page 1 - ** will definitely be written by this transaction. If this is an CONCURRENT - ** transaction, ensure the BtreePtrmap structure has been allocated. */ - rc = sqlite3PagerWrite(pPage1->pDbPage); - if( rc ) return rc; - if( n>0 ){ /* There are pages on the freelist. Reuse one of those pages. */ Pgno iTrunk; u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ u32 nSearch = 0; /* Count of the number of search attempts */ @@ -6748,33 +6270,32 @@ /* If eMode==BTALLOC_EXACT and a query of the pointer-map ** shows that the page 'nearby' is somewhere on the free-list, then ** the entire-list will be searched for that page. */ +#ifndef SQLITE_OMIT_AUTOVACUUM if( eMode==BTALLOC_EXACT ){ - assert( ISAUTOVACUUM!=ISCONCURRENT ); - if( ISAUTOVACUUM ){ - if( nearby<=mxPage ){ - u8 eType; - assert( nearby>0 ); - assert( pBt->autoVacuum ); - rc = ptrmapGet(pBt, nearby, &eType, 0); - if( rc ) return rc; - if( eType==PTRMAP_FREEPAGE ){ - searchList = 1; - } - } - }else{ - searchList = 1; + if( nearby<=mxPage ){ + u8 eType; + assert( nearby>0 ); + assert( pBt->autoVacuum ); + rc = ptrmapGet(pBt, nearby, &eType, 0); + if( rc ) return rc; + if( eType==PTRMAP_FREEPAGE ){ + searchList = 1; + } } }else if( eMode==BTALLOC_LE ){ searchList = 1; } +#endif /* Decrement the free-list count by 1. Set iTrunk to the index of the ** first free-list trunk page. iPrevTrunk is initially 1. */ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc ) return rc; put4byte(&pPage1->aData[36], n-1); /* The code within this loop is run only once if the 'searchList' variable ** is not true. Otherwise, it runs once for each trunk-page on the ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT) @@ -7078,11 +6599,11 @@ } /* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); if( rc ) goto freepage_out; } /* Now manipulate the actual database free-list structure. There are two @@ -7409,11 +6930,11 @@ PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) ); } #endif rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); - +#ifndef SQLITE_OMIT_AUTOVACUUM /* If the database supports auto-vacuum, and the second or subsequent ** overflow page is being allocated, add an entry to the pointer-map ** for that page now. ** ** If this is the first overflow page, then write a partial entry @@ -7420,17 +6941,18 @@ ** to the pointer-map. If we write nothing to this pointer-map slot, ** then the optimistic overflow chain processing in clearCell() ** may misinterpret the uninitialized values and delete the ** wrong pages from the database. */ - if( REQUIRE_PTRMAP && rc==SQLITE_OK ){ + if( pBt->autoVacuum && rc==SQLITE_OK ){ u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc); if( rc ){ releasePage(pOvfl); } } +#endif if( rc ){ releasePage(pToRelease); return rc; } @@ -7567,11 +7089,10 @@ ** balancing, and the dividers are adjacent and sorted. */ assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ }else{ - BtShared *pBt = pPage->pBt; int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ *pRC = rc; return; } @@ -7582,11 +7103,11 @@ if( rc ){ *pRC = rc; return; } /* The allocateSpace() routine guarantees the following properties ** if it returns successfully */ assert( idx >= 0 ); assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); - assert( idx+sz <= (int)pBt->usableSize ); + assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); if( iChild ){ /* In a corrupt database where an entry in the cell index section of ** a btree page has a value of 3 or less, the pCell value might point ** as many as 4 bytes in front of the start of the aData buffer for @@ -7602,16 +7123,18 @@ put2byte(pIns, idx); pPage->nCell++; /* increment the cell count */ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); - if( REQUIRE_PTRMAP ){ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ ptrmapPutOvflPtr(pPage, pPage, pCell, pRC); } +#endif } } /* ** The following parameters determine how many adjacent pages get involved @@ -8147,11 +7670,11 @@ ** of the parent page are still manipulated by thh code below. ** That is Ok, at this point the parent page is guaranteed to ** be marked as dirty. Returning an error code will cause a ** rollback, undoing any changes made to the parent page. */ - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); if( szCell>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pNew, pCell, &rc); } } @@ -8285,11 +7808,11 @@ } /* If this is an auto-vacuum database, update the pointer-map entries ** for any b-tree or overflow pages that pTo now contains the pointers to. */ - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ *pRC = setChildPtrmaps(pTo); } } } @@ -8336,12 +7859,11 @@ static int balance_nonroot( MemPage *pParent, /* Parent page of siblings being balanced */ int iParentIdx, /* Index of "the page" in pParent */ u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ int isRoot, /* True if pParent is a root-page */ - int bBulk, /* True if this call is part of a bulk load */ - Pgno pgnoRoot /* Root page of b-tree being balanced */ + int bBulk /* True if this call is part of a bulk load */ ){ BtShared *pBt; /* The whole database */ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ int nNew = 0; /* Number of pages in apNew[] */ int nOld; /* Number of pages in apOld[] */ @@ -8432,11 +7954,10 @@ if( rc ){ memset(apOld, 0, (i)*sizeof(MemPage*)); goto balance_cleanup; } } - setMempageRoot(apOld[i], pgnoRoot); nMaxCells += apOld[i]->nCell + ArraySize(pParent->apOvfl); if( (i--)==0 ) break; if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){ apDiv[i] = pParent->apOvfl[0]; @@ -8773,11 +8294,11 @@ apNew[i] = pNew; nNew++; cntOld[i] = b.nCell; /* Set the pointer-map entry for the new sibling page. */ - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); if( rc!=SQLITE_OK ){ goto balance_cleanup; } } @@ -8866,11 +8387,11 @@ ** If the sibling pages are not leaves, then the pointer map entry ** associated with the right-child of each sibling may also need to be ** updated. This happens below, after the sibling pages have been ** populated, not here. */ - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ MemPage *pOld; MemPage *pNew = pOld = apNew[0]; int cntOldNext = pNew->nCell + pNew->nOverflow; int iNew = 0; int iOld = 0; @@ -9059,11 +8580,11 @@ - apNew[0]->nCell*2) || rc!=SQLITE_OK ); copyNodeContent(apNew[0], pParent, &rc); freePage(apNew[0], &rc); - }else if( REQUIRE_PTRMAP && !leafCorrection ){ + }else if( ISAUTOVACUUM && !leafCorrection ){ /* Fix the pointer map entries associated with the right-child of each ** sibling page. All other pointer map entries have already been taken ** care of. */ for(i=0; iaData[8]); @@ -9142,11 +8663,11 @@ */ rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc==SQLITE_OK ){ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); copyNodeContent(pRoot, pChild, &rc); - if( REQUIRE_PTRMAP ){ + if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); } } if( rc ){ *ppChild = 0; @@ -9246,10 +8767,15 @@ assert( pCur->pPage->nOverflow ); } }else{ break; } + }else if( sqlite3PagerPageRefcount(pPage->pDbPage)>1 ){ + /* The page being written is not a root page, and there is currently + ** more than one reference to it. This only happens if the page is one + ** of its own ancestor pages. Corruption. */ + rc = SQLITE_CORRUPT_BKPT; }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; rc = sqlite3PagerWrite(pParent->pDbPage); @@ -9300,11 +8826,11 @@ ** copied either into the body of a database page or into the new ** pSpace buffer passed to the latter call to balance_nonroot(). */ u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, - pCur->hints&BTREE_BULKLOAD, pCur->pgnoRoot); + pCur->hints&BTREE_BULKLOAD); if( pFree ){ /* If pFree is not NULL, it points to the pSpace buffer used ** by a previous call to balance_nonroot(). Its contents are ** now stored either on real database pages or within the ** new pSpace buffer, so it may be safely freed here. */ @@ -9669,11 +9195,11 @@ } BTREE_CLEAR_CELL(rc, pPage, oldCell, info); testcase( pCur->curFlags & BTCF_ValidOvfl ); invalidateOverflowCache(pCur); if( info.nSize==szNew && info.nLocal==info.nPayload - && (!REQUIRE_PTRMAP || szNewminLocal) + && (!ISAUTOVACUUM || szNewminLocal) ){ /* Overwrite the old cell with the new if they are the same size. ** We could also try to do this if the old cell is smaller, then add ** the leftover space to the free list. But experiments show that ** doing that is no faster then skipping this optimization and just @@ -10256,12 +9782,11 @@ */ static int clearDatabasePage( BtShared *pBt, /* The BTree that contains the table */ Pgno pgno, /* Page number to clear */ int freePageFlag, /* Deallocate page if true */ - i64 *pnChange, /* Add number of Cells freed to this counter */ - Pgno pgnoRoot + i64 *pnChange /* Add number of Cells freed to this counter */ ){ MemPage *pPage; int rc; unsigned char *pCell; int i; @@ -10272,11 +9797,10 @@ if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); if( rc ) return rc; - setMempageRoot(pPage, pgnoRoot); if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) ){ rc = SQLITE_CORRUPT_BKPT; goto cleardatabasepage_out; @@ -10283,20 +9807,18 @@ } hdr = pPage->hdrOffset; for(i=0; inCell; i++){ pCell = findCell(pPage, i); if( !pPage->leaf ){ - rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange, pgnoRoot); + rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } BTREE_CLEAR_CELL(rc, pPage, pCell, info); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ - rc = clearDatabasePage( - pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange, pgnoRoot - ); + rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; if( pPage->intKey ) pnChange = 0; } if( pnChange ){ testcase( !pPage->intKey ); @@ -10338,11 +9860,11 @@ ** is the root of a table b-tree - if it is not, the following call is ** a no-op). */ if( p->hasIncrblobCur ){ invalidateIncrblobCursors(p, (Pgno)iTable, 0, 1); } - rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange, (Pgno)iTable); + rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } sqlite3BtreeLeave(p); return rc; } @@ -11245,16 +10767,14 @@ #endif checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); } pBt->db->flags = savedDbFlags; - /* Make sure every page in the file is referenced. Skip this if the - ** database is currently being written by a CONCURRENT transaction (it - ** may fail as pages that were part of the free-list when the transaction - ** was opened cannot be counted). */ + /* Make sure every page in the file is referenced + */ if( !bPartial ){ - for(i=1; ISCONCURRENT==0 && i<=sCheck.nPage && sCheck.mxErr; i++){ + for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ checkAppendMsg(&sCheck, "Page %d is never used", i); } #else @@ -11503,11 +11023,11 @@ */ int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ BtShared *pBt = pBtree->pBt; int rc; /* Return code */ - assert( iVersion==1 || iVersion==2 || iVersion==3 ); + assert( iVersion==1 || iVersion==2 ); /* If setting the version fields to 1, do not automatically open the ** WAL connection, even if the version fields are currently set to 2. */ pBt->btsFlags &= ~BTS_NO_WAL; @@ -11551,103 +11071,18 @@ ** Return the size of the header added to each page by this module. */ int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } /* -** This function is called to ensure that all locks required to commit the -** current write-transaction to the database file are held. If the db is -** in rollback mode, this means the EXCLUSIVE lock on the database file. -** -** Or, if this is an CONCURRENT transaction on a wal-mode database, the WRITER -** lock on the wal file. In this case this function also checks that the -** CONCURRENT transaction can be safely committed (does not commit with any -** other transaction committed since it was opened). -** -** SQLITE_OK is returned if successful. SQLITE_BUSY if the required locks -** cannot be obtained due to a conflicting lock. If the locks cannot be -** obtained for an CONCURRENT transaction due to a conflict with an already -** committed transaction, SQLITE_BUSY_SNAPSHOT is returned. Otherwise, if -** some other error (OOM, IO, etc.) occurs, the relevant SQLite error code -** is returned. +** If no transaction is active and the database is not a temp-db, clear +** the in-memory pager cache. */ -int sqlite3BtreeExclusiveLock(Btree *p){ - int rc; - Pgno pgno = 0; +void sqlite3BtreeClearCache(Btree *p){ BtShared *pBt = p->pBt; - assert( p->inTrans==TRANS_WRITE && pBt->pPage1 ); - sqlite3BtreeEnter(p); - rc = sqlite3PagerExclusiveLock(pBt->pPager, - (p->db->eConcurrent==CONCURRENT_SCHEMA) ? 0 : pBt->pPage1->pDbPage, - &pgno - ); -#ifdef SQLITE_OMIT_CONCURRENT - assert( pgno==0 ); -#else - if( rc==SQLITE_BUSY_SNAPSHOT && pgno ){ - PgHdr *pPg = 0; - int rc2 = sqlite3PagerGet(pBt->pPager, pgno, &pPg, 0); - if( rc2==SQLITE_OK ){ - int bWrite = -1; - const char *zObj = 0; - const char *zTab = 0; - char zContent[17]; - - if( pPg ){ - Pgno pgnoRoot = 0; - HashElem *pE; - Schema *pSchema; - u8 *aData = (u8*)sqlite3PagerGetData(pPg); - int i; - for(i=0; i<8; i++){ - static const char hexdigits[] = { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' - }; - zContent[i*2] = hexdigits[(aData[i] >> 4)]; - zContent[i*2+1] = hexdigits[(aData[i] & 0xF)]; - } - zContent[16] = '\0'; - - pgnoRoot = ((MemPage*)sqlite3PagerGetExtra(pPg))->pgnoRoot; - bWrite = sqlite3PagerIswriteable(pPg); - sqlite3PagerUnref(pPg); - - pSchema = sqlite3SchemaGet(p->db, p); - if( pSchema ){ - for(pE=sqliteHashFirst(&pSchema->tblHash); pE; pE=sqliteHashNext(pE)){ - Table *pTab = (Table *)sqliteHashData(pE); - if( pTab->tnum==(int)pgnoRoot ){ - zObj = pTab->zName; - zTab = 0; - }else{ - Index *pIdx; - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->tnum==(int)pgnoRoot ){ - zObj = pIdx->zName; - zTab = pTab->zName; - } - } - } - } - } - } - - sqlite3_log(SQLITE_OK, - "cannot commit CONCURRENT transaction " - "- conflict at page %d " - "(%s page; part of db %s %s%s%s; content=%s...)", - (int)pgno, - (bWrite==0?"read-only":(bWrite>0?"read/write":"unknown")), - (zTab ? "index" : "table"), - (zTab ? zTab : ""), (zTab ? "." : ""), (zObj ? zObj : "UNKNOWN"), - zContent - ); - } - } -#endif - sqlite3BtreeLeave(p); - return rc; + if( pBt->inTransaction==TRANS_NONE ){ + sqlite3PagerClearCache(pBt->pPager); + } } #if !defined(SQLITE_OMIT_SHARED_CACHE) /* ** Return true if the Btree passed as the only argument is sharable. @@ -11664,10 +11099,5 @@ int sqlite3BtreeConnectionCount(Btree *p){ testcase( p->sharable ); return p->pBt->nRef; } #endif - -void sqlite3BtreeIsSchemaVersion(Btree *p, u64 *a){ - p->pBt->aSchemaVersion = a; - sqlite3PagerIsSchemaVersion(p->pBt->pPager, a); -} Index: src/btree.h ================================================================== --- src/btree.h +++ src/btree.h @@ -348,12 +348,10 @@ sqlite3_uint64 sqlite3BtreeSeekCount(Btree*); #else # define sqlite3BtreeSeekCount(X) 0 #endif -int sqlite3BtreeExclusiveLock(Btree *pBt); - #ifndef NDEBUG int sqlite3BtreeCursorIsValid(BtCursor*); #endif int sqlite3BtreeCursorIsValidNN(BtCursor*); @@ -367,10 +365,12 @@ #ifndef SQLITE_OMIT_WAL int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); #endif int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); + +void sqlite3BtreeClearCache(Btree*); /* ** If we are not using shared cache, then there is no need to ** use mutexes to access the BtShared structures. So make the ** Enter and Leave procedures no-ops. @@ -408,8 +408,7 @@ # define sqlite3BtreeHoldsMutex(X) 1 # define sqlite3BtreeHoldsAllMutexes(X) 1 # define sqlite3SchemaMutexHeld(X,Y,Z) 1 #endif -void sqlite3BtreeIsSchemaVersion(Btree *p, u64 *a); #endif /* SQLITE_BTREE_H */ Index: src/btreeInt.h ================================================================== --- src/btreeInt.h +++ src/btreeInt.h @@ -230,11 +230,10 @@ /* Forward declarations */ typedef struct MemPage MemPage; typedef struct BtLock BtLock; typedef struct CellInfo CellInfo; -typedef struct BtreePtrmap BtreePtrmap; /* ** This is a magic string that appears at the beginning of every ** SQLite database in order to identify the file as a real database. ** @@ -274,13 +273,10 @@ struct MemPage { u8 isInit; /* True if previously initialized. MUST BE FIRST! */ u8 intKey; /* True if table b-trees. False for index b-trees */ u8 intKeyLeaf; /* True if the leaf of an intKey table */ Pgno pgno; /* Page number for this page */ -#ifndef SQLITE_OMIT_CONCURRENT - Pgno pgnoRoot; /* Root page of b-tree that this page belongs to */ -#endif /* Only the first 8 bytes (above) are zeroed by pager.c when a new page ** is allocated. All fields that follow must be initialized before use */ u8 leaf; /* True if a leaf page */ u8 hdrOffset; /* 100 for page 1. 0 otherwise */ u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ @@ -458,15 +454,11 @@ BtShared *pNext; /* Next on a list of sharable BtShared structs */ BtLock *pLock; /* List of locks held on this shared-btree struct */ Btree *pWriter; /* Btree with currently open write transaction */ #endif u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */ -#ifndef SQLITE_OMIT_CONCURRENT - BtreePtrmap *pMap; -#endif int nPreformatSize; /* Size of last cell written by TransferRow() */ - u64 *aSchemaVersion; }; /* ** Allowed values for BtShared.btsFlags */ @@ -679,23 +671,16 @@ ** if the database supports auto-vacuum or not. Because it is used ** within an expression that is an argument to another macro ** (sqliteMallocRaw), it is not possible to use conditional compilation. ** So, this macro is defined instead. */ -#ifdef SQLITE_OMIT_AUTOVACUUM -#define ISAUTOVACUUM 0 +#ifndef SQLITE_OMIT_AUTOVACUUM +#define ISAUTOVACUUM (pBt->autoVacuum) #else -#define ISAUTOVACUUM (pBt->autoVacuum) +#define ISAUTOVACUUM 0 #endif -#ifdef SQLITE_OMIT_CONCURRENT -# define ISCONCURRENT 0 -#else -# define ISCONCURRENT (pBt->pMap!=0) -#endif - -#define REQUIRE_PTRMAP (ISAUTOVACUUM || ISCONCURRENT) /* ** This structure is passed around through all the sanity checking routines ** in order to keep track of some global state information. ** Index: src/build.c ================================================================== --- src/build.c +++ src/build.c @@ -338,10 +338,43 @@ int sqlite3UserAuthTable(const char *zTable){ return sqlite3_stricmp(zTable, "sqlite_user")==0; } #endif +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +/* +** If this database connection was opened with the SQLITE_OPEN_SHARED_SCHEMA +** flag specified, then ensure that the database schema for database iDb +** is loaded. Either by obtaining a Schema object from the schema-pool, or +** by reading the contents of the sqlite_master table. Unless it is NULL, +** the location indicated by parameter pbUnload is set to 1 if a shared-schema +** is loaded. +** +** If the database handle was not opened with SQLITE_OPEN_SHARED_SCHEMA, or +** if the schema for database iDb is already loaded, this function is a no-op. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. If +** an error code is returned, (*pzErr) may be set to point to a buffer +** containing an error message. It is the responsibility of the caller to +** eventually free this buffer using sqlite3_free(). +*/ +int sqlite3SchemaLoad(sqlite3 *db, int iDb, int *pbUnload, char **pzErr){ + int rc = SQLITE_OK; + if( IsSharedSchema(db) + && DbHasProperty(db, iDb, DB_SchemaLoaded)==0 + && (db->init.busy==0 || (iDb!=1 && db->init.iDb==1)) + ){ + struct sqlite3InitInfo sv = db->init; + memset(&db->init, 0, sizeof(struct sqlite3InitInfo)); + rc = sqlite3InitOne(db, iDb, pzErr, 0); + db->init = sv; + if( pbUnload && rc==SQLITE_OK && iDb!=1 ) *pbUnload = 1; + } + return rc; +} +#endif + /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the ** database containing the table. Return NULL if not found. ** @@ -363,63 +396,45 @@ ** exists */ if( db->auth.authLevelnDb; i++){ - if( sqlite3StrICmp(zDatabase, db->aDb[i].zDbSName)==0 ) break; - } - if( i>=db->nDb ){ - /* No match against the official names. But always match "main" - ** to schema 0 as a legacy fallback. */ - if( sqlite3StrICmp(zDatabase,"main")==0 ){ - i = 0; - }else{ - return 0; - } - } - p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, zName); - if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){ - if( i==1 ){ - if( sqlite3StrICmp(zName+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 - || sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 - || sqlite3StrICmp(zName+7, &LEGACY_SCHEMA_TABLE[7])==0 - ){ - p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, - LEGACY_TEMP_SCHEMA_TABLE); - } - }else{ - if( sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 ){ - p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, - LEGACY_SCHEMA_TABLE); - } - } - } - }else{ - /* Match against TEMP first */ - p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, zName); - if( p ) return p; - /* The main database is second */ - p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, zName); - if( p ) return p; - /* Attached databases are in order of attachment */ - for(i=2; inDb; i++){ - assert( sqlite3SchemaMutexHeld(db, i, 0) ); - p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, zName); - if( p ) break; - } - if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){ - if( sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 ){ - p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, LEGACY_SCHEMA_TABLE); - }else if( sqlite3StrICmp(zName+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ - p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, - LEGACY_TEMP_SCHEMA_TABLE); - } - } - } - return p; + while(1){ + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + if( zDatabase==0 || sqlite3DbIsNamed(db, j, zDatabase) ){ + int bUnload = 0; + assert( sqlite3SchemaMutexHeld(db, j, 0) ); + if( IsSharedSchema(db) ){ + Parse *pParse = db->pParse; + if( pParse && pParse->nErr==0 ){ + pParse->rc = sqlite3SchemaLoad(db, j, &bUnload, &pParse->zErrMsg); + if( pParse->rc ) pParse->nErr++; + } + } + p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName); + if( p ) return p; + if( bUnload ){ + sqlite3SchemaRelease(db, j); + } + } + } + /* Not found. If the name we were looking for was temp.sqlite_master + ** then change the name to sqlite_temp_master and try again. */ + if( sqlite3StrICmp(zName, PREFERRED_SCHEMA_TABLE)==0 ){ + zName = LEGACY_SCHEMA_TABLE; + continue; + } + if( sqlite3StrICmp(zName, PREFERRED_TEMP_SCHEMA_TABLE)==0 ){ + zName = LEGACY_TEMP_SCHEMA_TABLE; + continue; + } + if( sqlite3StrICmp(zName, LEGACY_SCHEMA_TABLE)!=0 ) break; + if( sqlite3_stricmp(zDatabase, db->aDb[1].zDbSName)!=0 ) break; + zName = LEGACY_TEMP_SCHEMA_TABLE; + } + return 0; } /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the @@ -440,10 +455,11 @@ sqlite3 *db = pParse->db; /* Read the database schema. If an error occurs, leave an error message ** and code in pParse and return NULL. */ if( (db->mDbFlags & DBFLAG_SchemaKnownOk)==0 + && !IsSharedSchema(db) && SQLITE_OK!=sqlite3ReadSchema(pParse) ){ return 0; } @@ -451,36 +467,46 @@ if( p==0 ){ #ifndef SQLITE_OMIT_VIRTUALTABLE /* If zName is the not the name of a table in the schema created using ** CREATE, then check to see if it is the name of an virtual table that ** can be an eponymous virtual table. */ - if( pParse->disableVtab==0 && db->init.busy==0 ){ + if( (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)==0 && db->init.busy==0 ){ Module *pMod = (Module*)sqlite3HashFind(&db->aModule, zName); if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ pMod = sqlite3PragmaVtabRegister(db, zName); } - if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){ - testcase( pMod->pEpoTab==0 ); - return pMod->pEpoTab; + if( pMod ){ + if( IsSharedSchema(db) && pParse->nErr==0 ){ + int bDummy = 0; + pParse->rc = sqlite3SchemaLoad(db, 0, &bDummy, &pParse->zErrMsg); + if( pParse->rc ) pParse->nErr++; + (void)bDummy; + } + if( sqlite3VtabEponymousTableInit(pParse, pMod) ){ + Table *pEpoTab = pMod->pEpoTab; + if( pEpoTab ){ + assert( IsSharedSchema(db)||pEpoTab->pSchema==db->aDb[0].pSchema ); + pEpoTab->pSchema = db->aDb[0].pSchema; /* For SHARED_SCHEMA mode */ + } + return pEpoTab; + } } } #endif if( flags & LOCATE_NOERR ) return 0; pParse->checkSchema = 1; - }else if( IsVirtual(p) && pParse->disableVtab ){ + }else if( IsVirtual(p) && (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)!=0 ){ p = 0; } - if( p==0 ){ + if( p==0 && (!IsSharedSchema(db) || pParse->nErr==0) ){ const char *zMsg = flags & LOCATE_VIEW ? "no such view" : "no such table"; if( zDbase ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); } - }else{ - assert( HasRowid(p) || p->iPKey<0 ); } return p; } @@ -645,15 +671,14 @@ assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); DbSetProperty(db, iDb, DB_ResetWanted); DbSetProperty(db, 1, DB_ResetWanted); db->mDbFlags &= ~DBFLAG_SchemaKnownOk; } - if( db->nSchemaLock==0 ){ for(i=0; inDb; i++){ if( DbHasProperty(db, i, DB_ResetWanted) ){ - sqlite3SchemaClear(db->aDb[i].pSchema); + sqlite3SchemaClearOrDisconnect(db, i); } } } } @@ -662,20 +687,21 @@ ** "main" and "temp") for a single database connection. */ void sqlite3ResetAllSchemasOfConnection(sqlite3 *db){ int i; sqlite3BtreeEnterAll(db); - for(i=0; inDb; i++){ + for(i=0; inDb; i=(i?i+1:2)){ Db *pDb = &db->aDb[i]; if( pDb->pSchema ){ if( db->nSchemaLock==0 ){ - sqlite3SchemaClear(pDb->pSchema); + sqlite3SchemaClearOrDisconnect(db, i); }else{ DbSetProperty(db, i, DB_ResetWanted); } } } + sqlite3SchemaClear(db->aDb[1].pSchema); db->mDbFlags &= ~(DBFLAG_SchemaChange|DBFLAG_SchemaKnownOk); sqlite3VtabUnlockList(db); sqlite3BtreeLeaveAll(db); if( db->nSchemaLock==0 ){ sqlite3CollapseDatabaseArray(db); @@ -1279,11 +1305,11 @@ ** and types will be used, so there is no need to test for namespace ** collisions. */ if( !IN_SPECIAL_PARSE ){ char *zDb = db->aDb[iDb].zDbSName; - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + if( !IsSharedSchema(db) && SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto begin_table_error; } pTable = sqlite3FindTable(db, zName, zDb); if( pTable ){ if( !noErr ){ @@ -2274,11 +2300,12 @@ } /* Recompute the colNotIdxed field of the Index. ** ** colNotIdxed is a bitmask that has a 0 bit representing each indexed -** columns that are within the first 63 columns of the table. The +** columns that are within the first 63 columns of the table and a 1 for +** all other bits (all columns that are not in the index). The ** high-order bit of colNotIdxed is always 1. All unindexed columns ** of the table have a 1. ** ** 2019-10-24: For the purpose of this computation, virtual columns are ** not considered to be covered by the index, even if they are in the @@ -2302,11 +2329,11 @@ testcase( x==BMS-2 ); if( xcolNotIdxed = ~m; - assert( (pIdx->colNotIdxed>>63)==1 ); + assert( (pIdx->colNotIdxed>>63)==1 ); /* See note-20221022-a */ } /* ** This routine runs at the end of parsing a CREATE TABLE statement that ** has a WITHOUT ROWID clause. The job of this routine is to convert both @@ -2904,11 +2931,11 @@ } } #endif /* Reparse everything to update our internal data structures */ - sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3VdbeAddParseSchemaOp(pParse, iDb, sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0); } /* Add the table to the in-memory representation of the database. */ @@ -3463,11 +3490,11 @@ if( db->mallocFailed ){ goto exit_drop_table; } assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); - if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; + if( !IsSharedSchema(db) && sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; assert( isView==0 || isView==LOCATE_VIEW ); pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]); if( noErr ) db->suppressErr--; @@ -3478,10 +3505,11 @@ } goto exit_drop_table; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 && iDbnDb ); + sqlite3SchemaWritable(pParse, iDb); /* If pTab is a virtual table, call ViewGetColumnNames() to ensure ** it is initialized. */ if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){ @@ -3932,11 +3960,11 @@ } assert( db->mallocFailed==0 ); if( IN_DECLARE_VTAB && idxType!=SQLITE_IDXTYPE_PRIMARYKEY ){ goto exit_create_index; } - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + if( !IsSharedSchema(db) && SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_create_index; } if( sqlite3HasExplicitNulls(pParse, pList) ){ goto exit_create_index; } @@ -4190,10 +4218,11 @@ pList = 0; } j = XN_EXPR; pIndex->aiColumn[i] = XN_EXPR; pIndex->uniqNotNull = 0; + pIndex->bHasExpr = 1; }else{ j = pCExpr->iColumn; assert( j<=0x7fff ); if( j<0 ){ j = pTab->iPKey; @@ -4201,10 +4230,11 @@ if( pTab->aCol[j].notNull==0 ){ pIndex->uniqNotNull = 0; } if( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ){ pIndex->bHasVCol = 1; + pIndex->bHasExpr = 1; } } pIndex->aiColumn[i] = (i16)j; } zColl = 0; @@ -4430,11 +4460,11 @@ ** to invalidate all pre-compiled statements. */ if( pTblName ){ sqlite3RefillIndex(pParse, pIndex, iMem); sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3VdbeAddParseSchemaOp(pParse, iDb, sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName), 0); sqlite3VdbeAddOp2(v, OP_Expire, 0, 1); } sqlite3VdbeJumpHere(v, (int)pIndex->tnum); @@ -4580,10 +4610,11 @@ sqlite3ErrorMsg(pParse, "index associated with UNIQUE " "or PRIMARY KEY constraint cannot be dropped", 0); goto exit_drop_index; } iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); + sqlite3SchemaWritable(pParse, iDb); #ifndef SQLITE_OMIT_AUTHORIZATION { int code = SQLITE_DROP_INDEX; Table *pTab = pIndex->pTable; const char *zDb = db->aDb[iDb].zDbSName; @@ -5116,11 +5147,11 @@ if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){ return; } v = sqlite3GetVdbe(pParse); if( !v ) return; - if( type==TK_IMMEDIATE || type==TK_EXCLUSIVE ){ + if( type!=TK_DEFERRED ){ for(i=0; inDb; i++){ int eTxnType; Btree *pBt = db->aDb[i].pBt; if( pBt && sqlite3BtreeIsReadonly(pBt) ){ eTxnType = 0; /* Read txn */ @@ -5131,11 +5162,11 @@ } sqlite3VdbeAddOp2(v, OP_Transaction, i, eTxnType); sqlite3VdbeUsesBtree(v, i); } } - sqlite3VdbeAddOp3(v, OP_AutoCommit, 0, 0, (type==TK_CONCURRENT)); + sqlite3VdbeAddOp0(v, OP_AutoCommit); } /* ** Generate VDBE code for a COMMIT or ROLLBACK statement. ** Code for ROLLBACK is generated if eType==TK_ROLLBACK. Otherwise Index: src/callback.c ================================================================== --- src/callback.c +++ src/callback.c @@ -14,10 +14,67 @@ ** of user defined functions and collation sequences. */ #include "sqliteInt.h" +/* +** Connections opened with the SQLITE_OPEN_SHARED_SCHEMA flag specified +** may use SchemaPool objects for any database that is not the temp db +** (iDb==1). For such databases (type "struct Db") there are three states +** the Schema/SchemaPool object may be in. +** +** 1) pSPool==0, pSchema points to an empty object allocated by +** sqlite3_malloc(). DB_SchemaLoaded flag is clear. +** +** 2) pSPool!=0, pSchema points to a populated object owned by the +** SchemaPool. DB_SchemaLoaded flag is set. +** +** 3) pSPool!=0, pSchema points to the SchemaPool's static object +** (SchemaPool.sSchema). +*/ +struct SchemaPool { + int nRef; /* Number of pointers to this object */ + int nDelete; /* Schema objects deleted by ReleaseAll() */ + u64 cksum; /* Checksum for this Schema contents */ + Schema *pSchema; /* Linked list of Schema objects */ + Schema sSchema; /* The single dummy schema object */ + SchemaPool *pNext; /* Next element in schemaPoolList */ +}; + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +#ifdef SQLITE_DEBUG +static void assert_schema_state_ok(sqlite3 *db){ + if( IsSharedSchema(db) && db->eOpenState!=SQLITE_STATE_ZOMBIE ){ + int i; + for(i=0; inDb; i++){ + if( i!=1 ){ + Db *pDb = &db->aDb[i]; + Btree *pBt = pDb->pBt; + if( pBt==0 ) continue; + assert( sqlite3BtreeSchema(pBt, 0, 0)==0 ); + assert( pDb->pSchema ); + if( pDb->pSPool ){ + if( DbHasProperty(db, i, DB_SchemaLoaded)==0 ){ + assert( pDb->pSchema->tblHash.count==0 ); + assert( pDb->pSchema==&pDb->pSPool->sSchema ); + }else{ + assert( pDb->pSchema!=&pDb->pSPool->sSchema ); + } + }else{ + assert( DbHasProperty(db, i, DB_SchemaLoaded)==0 ); + assert( pDb->pSchema->tblHash.count==0 ); + assert( pDb->pSchema!=&pDb->pSPool->sSchema ); + } + } + } + } +} +#else +# define assert_schema_state_ok(x) +#endif +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + /* ** Invoke the 'collation needed' callback to request a collation sequence ** in the encoding enc of name zName, length nName. */ static void callCollNeeded(sqlite3 *db, int enc, const char *zName){ @@ -514,19 +571,321 @@ } pSchema->schemaFlags &= ~(DB_SchemaLoaded|DB_ResetWanted); } /* -** Find and return the schema associated with a BTree. Create -** a new one if necessary. +** If this database was opened with the SQLITE_OPEN_SHARED_SCHEMA flag +** and iDb!=1, then disconnect from the schema-pool associated with +** database iDb. Otherwise, clear the Schema object belonging to +** database iDb. +** +** If an OOM error occurs while disconnecting from a schema-pool, +** the db->mallocFailed flag is set. +*/ +void sqlite3SchemaClearOrDisconnect(sqlite3 *db, int iDb){ + Db *pDb = &db->aDb[iDb]; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && iDb!=1 && pDb->pSPool ){ + sqlite3SchemaDisconnect(db, iDb, 1); + }else +#endif + { + sqlite3SchemaClear(pDb->pSchema); + } +} + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +/* +** Global linked list of SchemaPool objects. Read and write access must +** be protected by the SQLITE_MUTEX_STATIC_MASTER mutex. +*/ +static SchemaPool *SQLITE_WSD schemaPoolList = 0; + +#ifdef SQLITE_TEST +/* +** Return a pointer to the head of the linked list of SchemaPool objects. +** This is used by the virtual table in file test_schemapool.c. +*/ +SchemaPool *sqlite3SchemaPoolList(void){ return schemaPoolList; } +#endif + +/* +** Database handle db was opened with the SHARED_SCHEMA flag, and database +** iDb is currently connected to a schema-pool. When this function is called, +** (*pnByte) is set to nInit plus the amount of memory used to store a +** single instance of the Schema objects managed by the schema-pool. +** This function adjusts (*pnByte) sot hat it is set to nInit plus +** (nSchema/nRef) of the amount of memory used by a single Schema object, +** where nSchema is the number of Schema objects allocated by this pool, +** and nRef is the number of connections to the schema-pool. +*/ +void sqlite3SchemaAdjustUsed(sqlite3 *db, int iDb, int nInit, int *pnByte){ + SchemaPool *pSPool = db->aDb[iDb].pSPool; + int nSchema = 0; + Schema *p; + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + for(p=pSPool->pSchema; p; p=p->pNext){ + nSchema++; + } + *pnByte = nInit + ((*pnByte - nInit) * nSchema) / pSPool->nRef; + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); +} + +/* +** Check that the schema of db iDb is writable (either because it is the +** temp db schema or because the db handle was opened without +** SQLITE_OPEN_SHARED_SCHEMA). If so, do nothing. Otherwise, leave an +** error in the Parse object. +*/ +void sqlite3SchemaWritable(Parse *pParse, int iDb){ + if( iDb!=1 && IsSharedSchema(pParse->db) && IN_DECLARE_VTAB==0 ){ + sqlite3ErrorMsg(pParse, "attempt to modify read-only schema"); + } +} + +/* +** The schema object passed as the only argument was allocated using +** sqlite3_malloc() and then populated using the usual mechanism. This +** function frees both the Schema object and its contents. +*/ +static void schemaDelete(Schema *pSchema){ + sqlite3SchemaClear((void*)pSchema); + sqlite3_free(pSchema); +} + +/* +** When this function is called, the database connection Db must be +** using a schema-pool (Db.pSPool!=0) and must currently have Db.pSchema +** set to point to a populated schema object checked out from the +** schema-pool. It is also assumed that the STATIC_MASTER mutex is held. +** This function returns the Schema object to the schema-pool and sets +** Db.pSchema to point to the schema-pool's static, empty, Schema object. +*/ +static void schemaRelease(sqlite3 *db, Db *pDb){ + Schema *pRelease = pDb->pSchema; + SchemaPool *pSPool = pDb->pSPool; + + assert( pDb->pSchema->iGeneration==pSPool->sSchema.iGeneration ); + pDb->pSchema = &pSPool->sSchema; + + assert( pDb->pSPool && pRelease ); + assert( pRelease->schemaFlags & DB_SchemaLoaded ); + assert( (pDb->pSchema->schemaFlags & DB_SchemaLoaded)==0 ); + assert( sqlite3_mutex_held(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER)) ); + + /* If the DBFLAG_FreeSchema flag is set and the database connection holds + ** at least one other copy of the schema being released, delete it instead + ** of returning it to the schema-pool. */ + if( db->mDbFlags & DBFLAG_FreeSchema ){ + int i; + for(i=0; inDb; i++){ + Db *p = &db->aDb[i]; + if( p!=pDb && p->pSchema!=&pSPool->sSchema && pDb->pSPool==p->pSPool ){ + pSPool->nDelete++; + schemaDelete(pRelease); + return; + } + } + } + + pRelease->pNext = pDb->pSPool->pSchema; + pDb->pSPool->pSchema = pRelease; +} + +/* +** The schema for database iDb of database handle db, which was opened +** with SQLITE_OPEN_SHARED_SCHEMA, has just been parsed. This function either +** finds a matching SchemaPool object on the global list (schemaPoolList) or +** else allocates a new one and sets the Db.pSPool variable accordingly. +** +** SQLITE_OK is returned if no error occurs, or an SQLite error code +** (SQLITE_NOMEM) otherwise. +*/ +int sqlite3SchemaConnect(sqlite3 *db, int iDb, u64 cksum){ + Schema *pSchema = db->aDb[iDb].pSchema; + SchemaPool *p; + + assert( pSchema && iDb!=1 && db->aDb[iDb].pSPool==0 ); + + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + + /* Search for a matching SchemaPool object */ + for(p=schemaPoolList; p; p=p->pNext){ + if( p->cksum==cksum && p->sSchema.schema_cookie==pSchema->schema_cookie ){ + break; + } + } + if( !p ){ + /* No SchemaPool object found. Allocate a new one. */ + p = (SchemaPool*)sqlite3_malloc(sizeof(SchemaPool)); + if( p ){ + memset(p, 0, sizeof(SchemaPool)); + p->cksum = cksum; + p->pNext = schemaPoolList; + schemaPoolList = p; + + p->sSchema.schema_cookie = pSchema->schema_cookie; + p->sSchema.iGeneration = pSchema->iGeneration; + p->sSchema.file_format = pSchema->file_format; + p->sSchema.enc = pSchema->enc; + p->sSchema.cache_size = pSchema->cache_size; + } + } + + if( p ) p->nRef++; + + /* If the SchemaPool contains one or more free schemas at the moment, + ** delete one of them. */ + if( p && p->pSchema ){ + Schema *pDel = p->pSchema; + p->pSchema = pDel->pNext; + schemaDelete(pDel); + } + + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + + db->aDb[iDb].pSPool = p; + return (p ? SQLITE_OK : SQLITE_NOMEM); +} + +/* +** If parameter iDb is 1 (the temp db), or if connection handle db was not +** opened with the SQLITE_OPEN_SHARED_SCHEMA flag, this function is a no-op. +** Otherwise, it disconnects from the schema-pool associated with database +** iDb, assuming it is connected. +** +** If parameter bNew is true, then Db.pSchema is set to point to a new, empty, +** Schema object obtained from sqlite3_malloc(). Or, if bNew is false, then +** Db.pSchema is set to NULL before returning. +** +** If the bNew parameter is true, then this function may allocate memory. +** If the allocation attempt fails, then SQLITE_NOMEM is returned and the +** schema-pool is not disconnected from. Or, if no OOM error occurs, +** SQLITE_OK is returned. +*/ +int sqlite3SchemaDisconnect(sqlite3 *db, int iDb, int bNew){ + int rc = SQLITE_OK; + if( IsSharedSchema(db) ){ + Db *pDb = &db->aDb[iDb]; + SchemaPool *pSPool = pDb->pSPool; + assert_schema_state_ok(db); + assert( pDb->pSchema ); + + if( pSPool==0 ){ + assert( pDb->pVTable==0 ); + assert( bNew==0 ); + schemaDelete(pDb->pSchema); + pDb->pSchema = 0; + }else{ + VTable *p; + VTable *pNext; + for(p=pDb->pVTable; p; p=pNext){ + pNext = p->pNext; + sqlite3VtabUnlock(p); + } + pDb->pVTable = 0; + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + if( DbHasProperty(db, iDb, DB_SchemaLoaded) ){ + schemaRelease(db, pDb); + } + if( bNew ){ + Schema *pNew = sqlite3SchemaGet(db, 0); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + pDb->pSchema = pNew; + } + } + if( rc==SQLITE_OK ){ + assert( pSPool->nRef>=1 ); + pDb->pSPool = 0; + pSPool->nRef--; + if( pSPool->nRef<=0 ){ + SchemaPool **pp; + while( pSPool->pSchema ){ + Schema *pNext = pSPool->pSchema->pNext; + schemaDelete(pSPool->pSchema); + pSPool->pSchema = pNext; + } + for(pp=&schemaPoolList; (*pp)!=pSPool; pp=&((*pp)->pNext)); + *pp = pSPool->pNext; + sqlite3_free(pSPool); + } + } + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + } + } + return rc; +} + +/* +** Extract and return a pointer to a schema object from the SchemaPool passed +** as the only argument, if one is available. If one is not available, return +** NULL. +*/ +Schema *sqlite3SchemaExtract(SchemaPool *pSPool){ + Schema *pRet = 0; + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + if( pSPool->pSchema ){ + pRet = pSPool->pSchema; + pSPool->pSchema = pRet->pNext; + pRet->pNext = 0; + } + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + return pRet; +} + +/* +** Return all sharable schemas held by database handle db back to their +** respective schema-pools. Db.pSchema variables are left pointing to +** the static, empty, Schema object owned by each schema-pool. +*/ +void sqlite3SchemaReleaseAll(sqlite3 *db){ + int i; + assert_schema_state_ok(db); + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + for(i=0; inDb; i++){ + if( i!=1 ){ + Db *pDb = &db->aDb[i]; + if( pDb->pSPool && DbHasProperty(db,i,DB_SchemaLoaded) ){ + schemaRelease(db, pDb); + } + } + } + db->mDbFlags &= ~DBFLAG_FreeSchema; + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); +} + +/* +** Release any sharable schema held by connection iDb of database handle +** db. Db.pSchema is left pointing to the static, empty, Schema object +** owned by the schema-pool. +*/ +void sqlite3SchemaRelease(sqlite3 *db, int iDb){ + Db *pDb = &db->aDb[iDb]; + assert( iDb!=1 ); + assert_schema_state_ok(db); + sqlite3_mutex_enter( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); + schemaRelease(db, pDb); + sqlite3_mutex_leave( sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER) ); +} + +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + +/* +** In most cases, this function finds and returns the schema associated +** with BTree handle pBt, creating a new one if necessary. However, if +** the database handle was opened with the SQLITE_OPEN_SHARED_SCHEMA flag +** specified, a new, empty, Schema object in memory obtained by +** sqlite3_malloc() is always returned. */ Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ - Schema * p; - if( pBt ){ - p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear); + Schema *p; + if( pBt && IsSharedSchema(db)==0 ){ + p = (Schema*)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear); }else{ - p = (Schema *)sqlite3DbMallocZero(0, sizeof(Schema)); + p = (Schema*)sqlite3DbMallocZero(0, sizeof(Schema)); } if( !p ){ sqlite3OomFault(db); }else if ( 0==p->file_format ){ sqlite3HashInit(&p->tblHash); Index: src/ctime.c ================================================================== --- src/ctime.c +++ src/ctime.c @@ -26,11 +26,11 @@ /* ** Include the configuration header output by 'configure' if we're using the ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -#include "config.h" +#include "sqlite_cfg.h" #define SQLITECONFIG_H 1 #endif /* These macros are provided to "stringify" the value of the define ** for those options in which the value is meaningful. */ @@ -191,10 +191,13 @@ "DISABLE_PAGECACHE_OVERFLOW_STATS", #endif #ifdef SQLITE_DISABLE_SKIPAHEAD_DISTINCT "DISABLE_SKIPAHEAD_DISTINCT", #endif +#ifdef SQLITE_DQS + "DQS=" CTIMEOPT_VAL(SQLITE_DQS), +#endif #ifdef SQLITE_ENABLE_8_3_NAMES "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), #endif #ifdef SQLITE_ENABLE_API_ARMOR "ENABLE_API_ARMOR", @@ -307,10 +310,13 @@ #ifdef SQLITE_ENABLE_RTREE "ENABLE_RTREE", #endif #ifdef SQLITE_ENABLE_SESSION "ENABLE_SESSION", +#endif +#if SQLITE_ENABLE_SHARED_SCHEMA + "ENABLE_SHARED_SCHEMA", #endif #ifdef SQLITE_ENABLE_SNAPSHOT "ENABLE_SNAPSHOT", #endif #ifdef SQLITE_ENABLE_SORTER_REFERENCES Index: src/dbpage.c ================================================================== --- src/dbpage.c +++ src/dbpage.c @@ -272,25 +272,31 @@ sqlite3_result_int(ctx, pCsr->pgno); break; } case 1: { /* data */ DbPage *pDbPage = 0; - rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); - if( rc==SQLITE_OK ){ - sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, - SQLITE_TRANSIENT); + if( pCsr->pgno==((PENDING_BYTE/pCsr->szPage)+1) ){ + /* The pending byte page. Assume it is zeroed out. Attempting to + ** request this page from the page is an SQLITE_CORRUPT error. */ + sqlite3_result_zeroblob(ctx, pCsr->szPage); + }else{ + rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); + if( rc==SQLITE_OK ){ + sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, + SQLITE_TRANSIENT); + } + sqlite3PagerUnref(pDbPage); } - sqlite3PagerUnref(pDbPage); break; } default: { /* schema */ sqlite3 *db = sqlite3_context_db_handle(ctx); sqlite3_result_text(ctx, db->aDb[pCsr->iDb].zDbSName, -1, SQLITE_STATIC); break; } } - return SQLITE_OK; + return rc; } static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ DbpageCursor *pCsr = (DbpageCursor *)pCursor; *pRowid = pCsr->pgno; @@ -346,15 +352,16 @@ goto update_fail; } pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pDbPage); - if( rc==SQLITE_OK ){ - memcpy(sqlite3PagerGetData(pDbPage), - sqlite3_value_blob(argv[3]), - szPage); + const void *pData = sqlite3_value_blob(argv[3]); + assert( pData!=0 || pTab->db->mallocFailed ); + if( pData + && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK + ){ + memcpy(sqlite3PagerGetData(pDbPage), pData, szPage); } } sqlite3PagerUnref(pDbPage); return rc; Index: src/expr.c ================================================================== --- src/expr.c +++ src/expr.c @@ -53,13 +53,12 @@ } op = pExpr->op; if( op==TK_REGISTER ) op = pExpr->op2; if( op==TK_COLUMN || op==TK_AGG_COLUMN ){ assert( ExprUseYTab(pExpr) ); - if( pExpr->y.pTab ){ - return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); - } + assert( pExpr->y.pTab!=0 ); + return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); } if( op==TK_SELECT ){ assert( ExprUseXSelect(pExpr) ); assert( pExpr->x.pSelect!=0 ); assert( pExpr->x.pSelect->pEList!=0 ); @@ -173,21 +172,18 @@ const Expr *p = pExpr; while( p ){ int op = p->op; if( op==TK_REGISTER ) op = p->op2; if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){ + int j; assert( ExprUseYTab(p) ); - if( p->y.pTab!=0 ){ - /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally - ** a TK_COLUMN but was previously evaluated and cached in a register */ - int j = p->iColumn; - if( j>=0 ){ - const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]); - pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); - } - break; - } + assert( p->y.pTab!=0 ); + if( (j = p->iColumn)>=0 ){ + const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]); + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + } + break; } if( op==TK_CAST || op==TK_UPLUS ){ p = p->pLeft; continue; } @@ -3788,14 +3784,11 @@ int iCol, /* Index of the column to extract */ int regOut /* Extract the value into this register */ ){ Column *pCol; assert( v!=0 ); - if( pTab==0 ){ - sqlite3VdbeAddOp3(v, OP_Column, iTabCur, iCol, regOut); - return; - } + assert( pTab!=0 ); if( iCol<0 || iCol==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); VdbeComment((v, "%s.rowid", pTab->zName)); }else{ int op; @@ -4041,10 +4034,57 @@ #endif /* !defined(SQLITE_UNTESTABLE) */ } return target; } +/* +** Check to see if pExpr is one of the indexed expressions on pParse->pIdxExpr. +** If it is, then resolve the expression by reading from the index and +** return the register into which the value has been read. If pExpr is +** not an indexed expression, then return negative. +*/ +static SQLITE_NOINLINE int sqlite3IndexedExprLookup( + Parse *pParse, /* The parsing context */ + Expr *pExpr, /* The expression to potentially bypass */ + int target /* Where to store the result of the expression */ +){ + IndexedExpr *p; + Vdbe *v; + for(p=pParse->pIdxExpr; p; p=p->pIENext){ + int iDataCur = p->iDataCur; + if( iDataCur<0 ) continue; + if( pParse->iSelfTab ){ + if( p->iDataCur!=pParse->iSelfTab-1 ) continue; + iDataCur = -1; + } + if( sqlite3ExprCompare(0, pExpr, p->pExpr, iDataCur)!=0 ) continue; + v = pParse->pVdbe; + assert( v!=0 ); + if( p->bMaybeNullRow ){ + /* If the index is on a NULL row due to an outer join, then we + ** cannot extract the value from the index. The value must be + ** computed using the original expression. */ + int addr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp3(v, OP_IfNullRow, p->iIdxCur, addr+3, target); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target); + VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol)); + sqlite3VdbeGoto(v, 0); + p = pParse->pIdxExpr; + pParse->pIdxExpr = 0; + sqlite3ExprCode(pParse, pExpr, target); + pParse->pIdxExpr = p; + sqlite3VdbeJumpHere(v, addr+2); + }else{ + sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target); + VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol)); + } + return target; + } + return -1; /* Not found */ +} + /* ** Generate code into the current Vdbe to evaluate the given ** expression. Attempt to store the results in register "target". ** Return the register where results are stored. @@ -4069,10 +4109,15 @@ assert( v!=0 ); expr_code_doover: if( pExpr==0 ){ op = TK_NULL; + }else if( pParse->pIdxExpr!=0 + && !ExprHasProperty(pExpr, EP_Leaf) + && (r1 = sqlite3IndexedExprLookup(pParse, pExpr, target))>=0 + ){ + return r1; }else{ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) ); op = pExpr->op; } switch( op ){ @@ -4114,15 +4159,12 @@ ** constant. */ int aff; iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target); assert( ExprUseYTab(pExpr) ); - if( pExpr->y.pTab ){ - aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); - }else{ - aff = pExpr->affExpr; - } + assert( pExpr->y.pTab!=0 ); + aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); if( aff>SQLITE_AFF_BLOB ){ static const char zAff[] = "B\000C\000D\000E"; assert( SQLITE_AFF_BLOB=='A' ); assert( SQLITE_AFF_TEXT=='B' ); sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0, @@ -4180,16 +4222,14 @@ ** in the index refer to the table to which the index belongs */ iTab = pParse->iSelfTab - 1; } } assert( ExprUseYTab(pExpr) ); + assert( pExpr->y.pTab!=0 ); iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab, pExpr->iColumn, iTab, target, pExpr->op2); - if( pExpr->y.pTab==0 && pExpr->affExpr==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); - } return iReg; } case TK_INTEGER: { codeInteger(pParse, pExpr, 0, target); return target; @@ -5239,10 +5279,11 @@ case TK_ISNULL: case TK_NOTNULL: { assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); testcase( regFree1==0 ); break; @@ -5413,10 +5454,11 @@ break; } case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); testcase( regFree1==0 ); break; @@ -5566,11 +5608,17 @@ return 1; } if( pB->op==TK_COLLATE && sqlite3ExprCompare(pParse, pA,pB->pLeft,iTab)<2 ){ return 1; } - return 2; + if( pA->op==TK_AGG_COLUMN && pB->op==TK_COLUMN + && pB->iTable<0 && pA->iTable==iTab + ){ + /* fall through */ + }else{ + return 2; + } } assert( !ExprHasProperty(pA, EP_IntValue) ); assert( !ExprHasProperty(pB, EP_IntValue) ); if( pA->u.zToken ){ if( pA->op==TK_FUNCTION || pA->op==TK_AGG_FUNCTION ){ @@ -5868,14 +5916,14 @@ /* The y.pTab=0 assignment in wherecode.c always happens after the ** impliesNotNullRow() test */ assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); assert( pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); if( (pLeft->op==TK_COLUMN - && pLeft->y.pTab!=0 + && ALWAYS(pLeft->y.pTab!=0) && IsVirtual(pLeft->y.pTab)) || (pRight->op==TK_COLUMN - && pRight->y.pTab!=0 + && ALWAYS(pRight->y.pTab!=0) && IsVirtual(pRight->y.pTab)) ){ return WRC_Prune; } /* no break */ deliberate_fall_through Index: src/func.c ================================================================== --- src/func.c +++ src/func.c @@ -521,13 +521,12 @@ sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ sqlite_int64 r; - sqlite3 *db = sqlite3_context_db_handle(context); UNUSED_PARAMETER2(NotUsed, NotUsed2); - sqlite3FastRandomness(&db->sPrng, sizeof(r), &r); + sqlite3_randomness(sizeof(r), &r); if( r<0 ){ /* We need to prevent a random number of 0x8000000000000000 ** (or -9223372036854775808) since when you do abs() of that ** number of you get the same value back again. To do this ** in a way that is testable, mask the sign bit off of negative @@ -549,20 +548,19 @@ int argc, sqlite3_value **argv ){ sqlite3_int64 n; unsigned char *p; - sqlite3 *db = sqlite3_context_db_handle(context); assert( argc==1 ); UNUSED_PARAMETER(argc); n = sqlite3_value_int64(argv[0]); if( n<1 ){ n = 1; } p = contextMalloc(context, n); if( p ){ - sqlite3FastRandomness(&db->sPrng, n, p); + sqlite3_randomness(n, p); sqlite3_result_blob(context, (char*)p, n, sqlite3_free); } } /* @@ -740,11 +738,11 @@ ** ** For a case-insensitive search, set variable cx to be the same as ** c but in the other case and search the input string for either ** c or cx. */ - if( c<=0x80 ){ + if( c<0x80 ){ char zStop[3]; int bMatch; if( noCase ){ zStop[0] = sqlite3Toupper(c); zStop[1] = sqlite3Tolower(c); @@ -823,19 +821,31 @@ /* ** The sqlite3_strglob() interface. Return 0 on a match (like strcmp()) and ** non-zero if there is no match. */ int sqlite3_strglob(const char *zGlobPattern, const char *zString){ - return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); + if( zString==0 ){ + return zGlobPattern!=0; + }else if( zGlobPattern==0 ){ + return 1; + }else { + return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); + } } /* ** The sqlite3_strlike() interface. Return 0 on a match and non-zero for ** a miss - like strcmp(). */ int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){ - return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); + if( zStr==0 ){ + return zPattern!=0; + }else if( zPattern==0 ){ + return 1; + }else{ + return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); + } } /* ** Count the number of times that the LIKE operator (or GLOB which is ** just a variation of LIKE) gets called. This is used for testing Index: src/global.c ================================================================== --- src/global.c +++ src/global.c @@ -373,14 +373,10 @@ ** sqlite3StdTypeLen[] The length (in bytes) of each entry ** in sqlite3StdType[]. ** ** sqlite3StdTypeAffinity[] The affinity associated with each entry ** in sqlite3StdType[]. -** -** sqlite3StdTypeMap[] The type value (as returned from -** sqlite3_column_type() or sqlite3_value_type()) -** for each entry in sqlite3StdType[]. */ const unsigned char sqlite3StdTypeLen[] = { 3, 4, 3, 7, 4, 4 }; const char sqlite3StdTypeAffinity[] = { SQLITE_AFF_NUMERIC, SQLITE_AFF_BLOB, @@ -387,21 +383,13 @@ SQLITE_AFF_INTEGER, SQLITE_AFF_INTEGER, SQLITE_AFF_REAL, SQLITE_AFF_TEXT }; -const char sqlite3StdTypeMap[] = { - 0, - SQLITE_BLOB, - SQLITE_INTEGER, - SQLITE_INTEGER, - SQLITE_FLOAT, - SQLITE_TEXT -}; const char *sqlite3StdType[] = { "ANY", "BLOB", "INT", "INTEGER", "REAL", "TEXT" }; Index: src/insert.c ================================================================== --- src/insert.c +++ src/insert.c @@ -94,10 +94,11 @@ aff = pTab->aCol[x].affinity; }else if( x==XN_ROWID ){ aff = SQLITE_AFF_INTEGER; }else{ assert( x==XN_EXPR ); + assert( pIdx->bHasExpr ); assert( pIdx->aColExpr!=0 ); aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); } if( affSQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; @@ -106,10 +107,32 @@ pIdx->zColAff[n] = 0; } return pIdx->zColAff; } + +/* +** Compute an affinity string for a table. Space is obtained +** from sqlite3DbMalloc(). The caller is responsible for freeing +** the space when done. +*/ +char *sqlite3TableAffinityStr(sqlite3 *db, const Table *pTab){ + char *zColAff; + zColAff = (char *)sqlite3DbMallocRaw(db, pTab->nCol+1); + if( zColAff ){ + int i, j; + for(i=j=0; inCol; i++){ + if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ + zColAff[j++] = pTab->aCol[i].affinity; + } + } + do{ + zColAff[j--] = 0; + }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB ); + } + return zColAff; +} /* ** Make changes to the evolving bytecode to do affinity transformations ** of values that are about to be gathered into a row for table pTab. ** @@ -148,11 +171,11 @@ ** register set as the OP_MakeRecord. If iReg>0 then register iReg is ** the first of a series of registers that will form the new record. ** Apply the type checking to that array of registers. */ void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ - int i, j; + int i; char *zColAff; if( pTab->tabFlags & TF_Strict ){ if( iReg==0 ){ /* Move the previous opcode (which should be OP_MakeRecord) forward ** by one slot and insert a new OP_TypeCheck where the current @@ -171,26 +194,15 @@ } return; } zColAff = pTab->zColAff; if( zColAff==0 ){ - sqlite3 *db = sqlite3VdbeDb(v); - zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); + zColAff = sqlite3TableAffinityStr(0, pTab); if( !zColAff ){ - sqlite3OomFault(db); + sqlite3OomFault(sqlite3VdbeDb(v)); return; } - - for(i=j=0; inCol; i++){ - assert( pTab->aCol[i].affinity!=0 || sqlite3VdbeParser(v)->nErr>0 ); - if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ - zColAff[j++] = pTab->aCol[i].affinity; - } - } - do{ - zColAff[j--] = 0; - }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB ); pTab->zColAff = zColAff; } assert( zColAff!=0 ); i = sqlite3Strlen30NN(zColAff); if( i ){ Index: src/loadext.c ================================================================== --- src/loadext.c +++ src/loadext.c @@ -506,11 +506,13 @@ sqlite3_serialize, #else 0, 0, #endif - sqlite3_db_name + sqlite3_db_name, + /* Version 3.40.0 and later */ + sqlite3_value_encoding }; /* True if x is the directory separator character */ #if SQLITE_OS_WIN Index: src/main.c ================================================================== --- src/main.c +++ src/main.c @@ -1165,10 +1165,21 @@ for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ Table *pTab = (Table *)sqliteHashData(p); if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab); } } +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && i!=1 ){ + VTable *pVTable; + VTable *pNext; + for(pVTable=db->aDb[i].pVTable; pVTable; pVTable=pNext){ + pNext = pVTable->pNext; + sqlite3VtabUnlock(pVTable); + } + db->aDb[i].pVTable = 0; + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ } for(p=sqliteHashFirst(&db->aModule); p; p=sqliteHashNext(p)){ Module *pMod = (Module *)sqliteHashData(p); if( pMod->pEpoTab ){ sqlite3VtabDisconnect(db, pMod->pEpoTab); @@ -1333,10 +1344,11 @@ struct Db *pDb = &db->aDb[j]; if( pDb->pBt ){ sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; if( j!=1 ){ + (void)sqlite3SchemaDisconnect(db, j, 0); pDb->pSchema = 0; } } } /* Clear the TEMP schema separately and last */ @@ -2116,11 +2128,11 @@ #endif sqlite3_mutex_enter(db->mutex); rc = sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)!=0; sqlite3_mutex_leave(db->mutex); if( rc ) return SQLITE_OK; - zCopy = sqlite3_mprintf(zName); + zCopy = sqlite3_mprintf("%s", zName); if( zCopy==0 ) return SQLITE_NOMEM; return sqlite3_create_function_v2(db, zName, nArg, SQLITE_UTF8, zCopy, sqlite3InvalidFunction, 0, 0, sqlite3_free); } @@ -3243,11 +3255,11 @@ db->nDb = 2; db->eOpenState = SQLITE_STATE_BUSY; db->aDb = db->aDbStatic; db->lookaside.bDisable = 1; db->lookaside.sz = 0; - sqlite3FastPrngInit(&db->sPrng); + assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS; db->autoCommit = 1; db->nextAutovac = -1; @@ -3350,10 +3362,23 @@ createCollation(db, "RTRIM", SQLITE_UTF8, 0, rtrimCollFunc, 0); if( db->mallocFailed ){ goto opendb_out; } +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) + /* Process magic filenames ":localStorage:" and ":sessionStorage:" */ + if( zFilename && zFilename[0]==':' ){ + if( strcmp(zFilename, ":localStorage:")==0 ){ + zFilename = "file:local?vfs=kvvfs"; + flags |= SQLITE_OPEN_URI; + }else if( strcmp(zFilename, ":sessionStorage:")==0 ){ + zFilename = "file:session?vfs=kvvfs"; + flags |= SQLITE_OPEN_URI; + } + } +#endif /* SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) */ + /* Parse the filename/URI argument ** ** Only allow sensible combinations of bits in the flags argument. ** Throw an error if any non-sense combination is used. If we ** do not block illegal combinations here, it could trigger @@ -3380,10 +3405,16 @@ if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); sqlite3ErrorWithMsg(db, rc, zErrMsg ? "%s" : 0, zErrMsg); sqlite3_free(zErrMsg); goto opendb_out; } + assert( db->pVfs!=0 ); +#if SQLITE_OS_KV || defined(SQLITE_OS_KV_OPTIONAL) + if( sqlite3_stricmp(db->pVfs->zName, "kvvfs")==0 ){ + db->temp_store = 2; + } +#endif /* Open the backend database driver */ rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0, flags | SQLITE_OPEN_MAIN_DB); if( rc!=SQLITE_OK ){ @@ -3757,38 +3788,53 @@ char const **pzCollSeq, /* OUTPUT: Collation sequence name */ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ int *pPrimaryKey, /* OUTPUT: True if column part of PK */ int *pAutoinc /* OUTPUT: True if column is auto-increment */ ){ - int rc; + int rc = SQLITE_OK; char *zErrMsg = 0; Table *pTab = 0; Column *pCol = 0; int iCol = 0; char const *zDataType = 0; char const *zCollSeq = 0; int notnull = 0; int primarykey = 0; int autoinc = 0; - + int bUnlock; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zTableName==0 ){ return SQLITE_MISUSE_BKPT; } #endif /* Ensure the database schema has been loaded */ sqlite3_mutex_enter(db->mutex); + bUnlock = sqlite3LockReusableSchema(db); sqlite3BtreeEnterAll(db); - rc = sqlite3Init(db, &zErrMsg); - if( SQLITE_OK!=rc ){ - goto error_out; + if( IsSharedSchema(db)==0 ){ + rc = sqlite3Init(db, &zErrMsg); } /* Locate the table in question */ - pTab = sqlite3FindTable(db, zTableName, zDbName); + if( rc==SQLITE_OK ){ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + Parse sParse; /* Fake Parse object for FindTable */ + Parse *pSaved = db->pParse; + memset(&sParse, 0, sizeof(sParse)); + db->pParse = &sParse; +#endif + pTab = sqlite3FindTable(db, zTableName, zDbName); +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + sqlite3_free(sParse.zErrMsg); + rc = sParse.rc; + db->pParse = pSaved; +#endif + } + if( SQLITE_OK!=rc ) goto error_out; + if( !pTab || IsView(pTab) ){ pTab = 0; goto error_out; } @@ -3857,10 +3903,11 @@ rc = SQLITE_ERROR; } sqlite3ErrorWithMsg(db, rc, (zErrMsg?"%s":0), zErrMsg); sqlite3DbFree(db, zErrMsg); rc = sqlite3ApiExit(db, rc); + sqlite3UnlockReusableSchema(db, bUnlock); sqlite3_mutex_leave(db->mutex); return rc; } /* @@ -3928,10 +3975,13 @@ int iNew = *(int*)pArg; *(int*)pArg = sqlite3BtreeGetRequestedReserve(pBtree); if( iNew>=0 && iNew<=255 ){ sqlite3BtreeSetPageSize(pBtree, 0, iNew, 0); } + rc = SQLITE_OK; + }else if( op==SQLITE_FCNTL_RESET_CACHE ){ + sqlite3BtreeClearCache(pBtree); rc = SQLITE_OK; }else{ int nSave = db->busyHandler.nBusy; rc = sqlite3OsFileControl(fd, op, pArg); db->busyHandler.nBusy = nSave; @@ -4489,11 +4539,11 @@ ** functions. ** ** Memory layout must be compatible with that generated by the pager ** and expected by sqlite3_uri_parameter() and databaseName(). */ -char *sqlite3_create_filename( +const char *sqlite3_create_filename( const char *zDatabase, const char *zJournal, const char *zWal, int nParam, const char **azParam @@ -4525,14 +4575,14 @@ /* ** Free memory obtained from sqlite3_create_filename(). It is a severe ** error to call this routine with any parameter other than a pointer ** previously obtained from sqlite3_create_filename() or a NULL pointer. */ -void sqlite3_free_filename(char *p){ +void sqlite3_free_filename(const char *p){ if( p==0 ) return; - p = (char*)databaseName(p); - sqlite3_free(p - 4); + p = databaseName(p); + sqlite3_free((char*)p - 4); } /* ** This is a utility routine, useful to VFS implementations, that checks @@ -4779,12 +4829,12 @@ ** Recover as many snapshots as possible from the wal file associated with ** schema zDb of database db. */ int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){ int rc = SQLITE_ERROR; - int iDb; #ifndef SQLITE_OMIT_WAL + int iDb; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ return SQLITE_MISUSE_BKPT; } @@ -4813,39 +4863,10 @@ void sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){ sqlite3_free(pSnapshot); } #endif /* SQLITE_ENABLE_SNAPSHOT */ -SQLITE_EXPERIMENTAL int sqlite3_wal_info( - sqlite3 *db, const char *zDb, - unsigned int *pnPrior, unsigned int *pnFrame -){ - int rc = SQLITE_OK; - -#ifndef SQLITE_OMIT_WAL - Btree *pBt; - int iDb; - -#ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) ){ - return SQLITE_MISUSE_BKPT; - } -#endif - - sqlite3_mutex_enter(db->mutex); - iDb = sqlite3FindDbName(db, zDb); - if( iDb<0 ){ - return SQLITE_ERROR; - } - pBt = db->aDb[iDb].pBt; - rc = sqlite3PagerWalInfo(sqlite3BtreePager(pBt), pnPrior, pnFrame); - sqlite3_mutex_leave(db->mutex); -#endif /* SQLITE_OMIT_WAL */ - - return rc; -} - #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* ** Given the name of a compile-time option, return true if that option ** was used and false if not. ** Index: src/mem5.c ================================================================== --- src/mem5.c +++ src/mem5.c @@ -422,13 +422,17 @@ int iFullSz; if( n<=mem5.szAtom*2 ){ if( n<=mem5.szAtom ) return mem5.szAtom; return mem5.szAtom*2; } - if( n>0x40000000 ) return 0; + if( n>0x10000000 ){ + if( n>0x40000000 ) return 0; + if( n>0x20000000 ) return 0x40000000; + return 0x20000000; + } for(iFullSz=mem5.szAtom*8; iFullSz=n ) return iFullSz/2; + if( (iFullSz/2)>=(i64)n ) return iFullSz/2; return iFullSz; } /* ** Return the ceiling of the logarithm base 2 of iValue. Index: src/memdb.c ================================================================== --- src/memdb.c +++ src/memdb.c @@ -107,10 +107,11 @@ static int memdbWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); static int memdbTruncate(sqlite3_file*, sqlite3_int64 size); static int memdbSync(sqlite3_file*, int flags); static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize); static int memdbLock(sqlite3_file*, int); +static int memdbUnlock(sqlite3_file*, int); /* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */ static int memdbFileControl(sqlite3_file*, int op, void *pArg); /* static int memdbSectorSize(sqlite3_file*); // not used */ static int memdbDeviceCharacteristics(sqlite3_file*); static int memdbFetch(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); @@ -165,11 +166,11 @@ memdbWrite, /* xWrite */ memdbTruncate, /* xTruncate */ memdbSync, /* xSync */ memdbFileSize, /* xFileSize */ memdbLock, /* xLock */ - memdbLock, /* xUnlock - same as xLock in this case */ + memdbUnlock, /* xUnlock */ 0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */ memdbFileControl, /* xFileControl */ 0, /* memdbSectorSize,*/ /* xSectorSize */ memdbDeviceCharacteristics, /* xDeviceCharacteristics */ 0, /* xShmMap */ @@ -366,44 +367,86 @@ */ static int memdbLock(sqlite3_file *pFile, int eLock){ MemFile *pThis = (MemFile*)pFile; MemStore *p = pThis->pStore; int rc = SQLITE_OK; - if( eLock==pThis->eLock ) return SQLITE_OK; + if( eLock<=pThis->eLock ) return SQLITE_OK; memdbEnter(p); - if( eLock>SQLITE_LOCK_SHARED ){ - if( p->mFlags & SQLITE_DESERIALIZE_READONLY ){ - rc = SQLITE_READONLY; - }else if( pThis->eLock<=SQLITE_LOCK_SHARED ){ - if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nWrLock = 1; - } - } - }else if( eLock==SQLITE_LOCK_SHARED ){ - if( pThis->eLock > SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; - }else if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nRdLock++; - } - }else{ - assert( eLock==SQLITE_LOCK_NONE ); - if( pThis->eLock>SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; - } - assert( p->nRdLock>0 ); - p->nRdLock--; + + assert( p->nWrLock==0 || p->nWrLock==1 ); + assert( pThis->eLock<=SQLITE_LOCK_SHARED || p->nWrLock==1 ); + assert( pThis->eLock==SQLITE_LOCK_NONE || p->nRdLock>=1 ); + + if( eLock>SQLITE_LOCK_SHARED && (p->mFlags & SQLITE_DESERIALIZE_READONLY) ){ + rc = SQLITE_READONLY; + }else{ + switch( eLock ){ + case SQLITE_LOCK_SHARED: { + assert( pThis->eLock==SQLITE_LOCK_NONE ); + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nRdLock++; + } + break; + }; + + case SQLITE_LOCK_RESERVED: + case SQLITE_LOCK_PENDING: { + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( ALWAYS(pThis->eLock==SQLITE_LOCK_SHARED) ){ + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nWrLock = 1; + } + } + break; + } + + default: { + assert( eLock==SQLITE_LOCK_EXCLUSIVE ); + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( p->nRdLock>1 ){ + rc = SQLITE_BUSY; + }else if( pThis->eLock==SQLITE_LOCK_SHARED ){ + p->nWrLock = 1; + } + break; + } + } } if( rc==SQLITE_OK ) pThis->eLock = eLock; memdbLeave(p); return rc; } + +/* +** Unlock an memdb-file. +*/ +static int memdbUnlock(sqlite3_file *pFile, int eLock){ + MemFile *pThis = (MemFile*)pFile; + MemStore *p = pThis->pStore; + if( eLock>=pThis->eLock ) return SQLITE_OK; + memdbEnter(p); + + assert( eLock==SQLITE_LOCK_SHARED || eLock==SQLITE_LOCK_NONE ); + if( eLock==SQLITE_LOCK_SHARED ){ + if( ALWAYS(pThis->eLock>SQLITE_LOCK_SHARED) ){ + p->nWrLock--; + } + }else{ + if( pThis->eLock>SQLITE_LOCK_SHARED ){ + p->nWrLock--; + } + p->nRdLock--; + } + + pThis->eLock = eLock; + memdbLeave(p); + return SQLITE_OK; +} #if 0 /* ** This interface is only used for crash recovery, which does not ** occur on an in-memory database. @@ -508,11 +551,11 @@ int szName; UNUSED_PARAMETER(pVfs); memset(pFile, 0, sizeof(*pFile)); szName = sqlite3Strlen30(zName); - if( szName>1 && zName[0]=='/' ){ + if( szName>1 && (zName[0]=='/' || zName[0]=='\\') ){ int i; #ifndef SQLITE_MUTEX_OMIT sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); #endif sqlite3_mutex_enter(pVfsMutex); @@ -854,10 +897,17 @@ sqlite3_free(pData); } sqlite3_mutex_leave(db->mutex); return rc; } + +/* +** Return true if the VFS is the memvfs. +*/ +int sqlite3IsMemdb(const sqlite3_vfs *pVfs){ + return pVfs==&memdb_vfs; +} /* ** This routine is called when the extension is loaded. ** Register the new VFS. */ Index: src/os.c ================================================================== --- src/os.c +++ src/os.c @@ -104,13 +104,15 @@ DO_OS_MALLOC_TEST(id); return id->pMethods->xFileSize(id, pSize); } int sqlite3OsLock(sqlite3_file *id, int lockType){ DO_OS_MALLOC_TEST(id); + assert( lockType>=SQLITE_LOCK_SHARED && lockType<=SQLITE_LOCK_EXCLUSIVE ); return id->pMethods->xLock(id, lockType); } int sqlite3OsUnlock(sqlite3_file *id, int lockType){ + assert( lockType==SQLITE_LOCK_NONE || lockType==SQLITE_LOCK_SHARED ); return id->pMethods->xUnlock(id, lockType); } int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ DO_OS_MALLOC_TEST(id); return id->pMethods->xCheckReservedLock(id, pResOut); ADDED src/os_kv.c Index: src/os_kv.c ================================================================== --- /dev/null +++ src/os_kv.c @@ -0,0 +1,972 @@ +/* +** 2022-09-06 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains an experimental VFS layer that operates on a +** Key/Value storage engine where both keys and values must be pure +** text. +*/ +#include +#if SQLITE_OS_KV || (SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)) + +/***************************************************************************** +** Debugging logic +*/ + +/* SQLITE_KV_TRACE() is used for tracing calls to kvstorage routines. */ +#if 0 +#define SQLITE_KV_TRACE(X) printf X +#else +#define SQLITE_KV_TRACE(X) +#endif + +/* SQLITE_KV_LOG() is used for tracing calls to the VFS interface */ +#if 0 +#define SQLITE_KV_LOG(X) printf X +#else +#define SQLITE_KV_LOG(X) +#endif + + +/* +** Forward declaration of objects used by this VFS implementation +*/ +typedef struct KVVfsFile KVVfsFile; + +/* A single open file. There are only two files represented by this +** VFS - the database and the rollback journal. +*/ +struct KVVfsFile { + sqlite3_file base; /* IO methods */ + const char *zClass; /* Storage class */ + int isJournal; /* True if this is a journal file */ + unsigned int nJrnl; /* Space allocated for aJrnl[] */ + char *aJrnl; /* Journal content */ + int szPage; /* Last known page size */ + sqlite3_int64 szDb; /* Database file size. -1 means unknown */ +}; + +/* +** Methods for KVVfsFile +*/ +static int kvvfsClose(sqlite3_file*); +static int kvvfsReadDb(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int kvvfsReadJrnl(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int kvvfsWriteDb(sqlite3_file*,const void*,int iAmt, sqlite3_int64); +static int kvvfsWriteJrnl(sqlite3_file*,const void*,int iAmt, sqlite3_int64); +static int kvvfsTruncateDb(sqlite3_file*, sqlite3_int64 size); +static int kvvfsTruncateJrnl(sqlite3_file*, sqlite3_int64 size); +static int kvvfsSyncDb(sqlite3_file*, int flags); +static int kvvfsSyncJrnl(sqlite3_file*, int flags); +static int kvvfsFileSizeDb(sqlite3_file*, sqlite3_int64 *pSize); +static int kvvfsFileSizeJrnl(sqlite3_file*, sqlite3_int64 *pSize); +static int kvvfsLock(sqlite3_file*, int); +static int kvvfsUnlock(sqlite3_file*, int); +static int kvvfsCheckReservedLock(sqlite3_file*, int *pResOut); +static int kvvfsFileControlDb(sqlite3_file*, int op, void *pArg); +static int kvvfsFileControlJrnl(sqlite3_file*, int op, void *pArg); +static int kvvfsSectorSize(sqlite3_file*); +static int kvvfsDeviceCharacteristics(sqlite3_file*); + +/* +** Methods for sqlite3_vfs +*/ +static int kvvfsOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int kvvfsDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int kvvfsAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int kvvfsFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *kvvfsDlOpen(sqlite3_vfs*, const char *zFilename); +static int kvvfsRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int kvvfsSleep(sqlite3_vfs*, int microseconds); +static int kvvfsCurrentTime(sqlite3_vfs*, double*); +static int kvvfsCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); + +static sqlite3_vfs sqlite3OsKvvfsObject = { + 1, /* iVersion */ + sizeof(KVVfsFile), /* szOsFile */ + 1024, /* mxPathname */ + 0, /* pNext */ + "kvvfs", /* zName */ + 0, /* pAppData */ + kvvfsOpen, /* xOpen */ + kvvfsDelete, /* xDelete */ + kvvfsAccess, /* xAccess */ + kvvfsFullPathname, /* xFullPathname */ + kvvfsDlOpen, /* xDlOpen */ + 0, /* xDlError */ + 0, /* xDlSym */ + 0, /* xDlClose */ + kvvfsRandomness, /* xRandomness */ + kvvfsSleep, /* xSleep */ + kvvfsCurrentTime, /* xCurrentTime */ + 0, /* xGetLastError */ + kvvfsCurrentTimeInt64 /* xCurrentTimeInt64 */ +}; + +/* Methods for sqlite3_file objects referencing a database file +*/ +static sqlite3_io_methods kvvfs_db_io_methods = { + 1, /* iVersion */ + kvvfsClose, /* xClose */ + kvvfsReadDb, /* xRead */ + kvvfsWriteDb, /* xWrite */ + kvvfsTruncateDb, /* xTruncate */ + kvvfsSyncDb, /* xSync */ + kvvfsFileSizeDb, /* xFileSize */ + kvvfsLock, /* xLock */ + kvvfsUnlock, /* xUnlock */ + kvvfsCheckReservedLock, /* xCheckReservedLock */ + kvvfsFileControlDb, /* xFileControl */ + kvvfsSectorSize, /* xSectorSize */ + kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + 0, /* xFetch */ + 0 /* xUnfetch */ +}; + +/* Methods for sqlite3_file objects referencing a rollback journal +*/ +static sqlite3_io_methods kvvfs_jrnl_io_methods = { + 1, /* iVersion */ + kvvfsClose, /* xClose */ + kvvfsReadJrnl, /* xRead */ + kvvfsWriteJrnl, /* xWrite */ + kvvfsTruncateJrnl, /* xTruncate */ + kvvfsSyncJrnl, /* xSync */ + kvvfsFileSizeJrnl, /* xFileSize */ + kvvfsLock, /* xLock */ + kvvfsUnlock, /* xUnlock */ + kvvfsCheckReservedLock, /* xCheckReservedLock */ + kvvfsFileControlJrnl, /* xFileControl */ + kvvfsSectorSize, /* xSectorSize */ + kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + 0, /* xFetch */ + 0 /* xUnfetch */ +}; + +/****** Storage subsystem **************************************************/ +#include +#include +#include + +/* Forward declarations for the low-level storage engine +*/ +static int kvstorageWrite(const char*, const char *zKey, const char *zData); +static int kvstorageDelete(const char*, const char *zKey); +static int kvstorageRead(const char*, const char *zKey, char *zBuf, int nBuf); +#define KVSTORAGE_KEY_SZ 32 + +/* Expand the key name with an appropriate prefix and put the result +** zKeyOut[]. The zKeyOut[] buffer is assumed to hold at least +** KVSTORAGE_KEY_SZ bytes. +*/ +static void kvstorageMakeKey( + const char *zClass, + const char *zKeyIn, + char *zKeyOut +){ + sqlite3_snprintf(KVSTORAGE_KEY_SZ, zKeyOut, "kvvfs-%s-%s", zClass, zKeyIn); +} + +/* Write content into a key. zClass is the particular namespace of the +** underlying key/value store to use - either "local" or "session". +** +** Both zKey and zData are zero-terminated pure text strings. +** +** Return the number of errors. +*/ +static int kvstorageWrite( + const char *zClass, + const char *zKey, + const char *zData +){ + FILE *fd; + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + fd = fopen(zXKey, "wb"); + if( fd ){ + SQLITE_KV_TRACE(("KVVFS-WRITE %-15s (%d) %.50s%s\n", zXKey, + (int)strlen(zData), zData, + strlen(zData)>50 ? "..." : "")); + fputs(zData, fd); + fclose(fd); + return 0; + }else{ + return 1; + } +} + +/* Delete a key (with its corresponding data) from the key/value +** namespace given by zClass. If the key does not previously exist, +** this routine is a no-op. +*/ +static int kvstorageDelete(const char *zClass, const char *zKey){ + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + unlink(zXKey); + SQLITE_KV_TRACE(("KVVFS-DELETE %-15s\n", zXKey)); + return 0; +} + +/* Read the value associated with a zKey from the key/value namespace given +** by zClass and put the text data associated with that key in the first +** nBuf bytes of zBuf[]. The value might be truncated if zBuf is not large +** enough to hold it all. The value put into zBuf must always be zero +** terminated, even if it gets truncated because nBuf is not large enough. +** +** Return the total number of bytes in the data, without truncation, and +** not counting the final zero terminator. Return -1 if the key does +** not exist. +** +** If nBuf<=0 then this routine simply returns the size of the data without +** actually reading it. +*/ +static int kvstorageRead( + const char *zClass, + const char *zKey, + char *zBuf, + int nBuf +){ + FILE *fd; + struct stat buf; + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + if( access(zXKey, R_OK)!=0 + || stat(zXKey, &buf)!=0 + || !S_ISREG(buf.st_mode) + ){ + SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey)); + return -1; + } + if( nBuf<=0 ){ + return (int)buf.st_size; + }else if( nBuf==1 ){ + zBuf[0] = 0; + SQLITE_KV_TRACE(("KVVFS-READ %-15s (%d)\n", zXKey, + (int)buf.st_size)); + return (int)buf.st_size; + } + if( nBuf > buf.st_size + 1 ){ + nBuf = buf.st_size + 1; + } + fd = fopen(zXKey, "rb"); + if( fd==0 ){ + SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey)); + return -1; + }else{ + sqlite3_int64 n = fread(zBuf, 1, nBuf-1, fd); + fclose(fd); + zBuf[n] = 0; + SQLITE_KV_TRACE(("KVVFS-READ %-15s (%lld) %.50s%s\n", zXKey, + n, zBuf, n>50 ? "..." : "")); + return (int)n; + } +} + +/* +** An internal level of indirection which enables us to replace the +** kvvfs i/o methods with JavaScript implementations in WASM builds. +** Maintenance reminder: if this struct changes in any way, the JSON +** rendering of its structure must be updated in +** sqlite3_wasm_enum_json(). There are no binary compatibility +** concerns, so it does not need an iVersion member. This file is +** necessarily always compiled together with sqlite3_wasm_enum_json(), +** and JS code dynamically creates the mapping of members based on +** that JSON description. +*/ +typedef struct sqlite3_kvvfs_methods sqlite3_kvvfs_methods; +struct sqlite3_kvvfs_methods { + int (*xRead)(const char *zClass, const char *zKey, char *zBuf, int nBuf); + int (*xWrite)(const char *zClass, const char *zKey, const char *zData); + int (*xDelete)(const char *zClass, const char *zKey); + const int nKeySize; +}; + +/* +** This object holds the kvvfs I/O methods which may be swapped out +** for JavaScript-side implementations in WASM builds. In such builds +** it cannot be const, but in native builds it should be so that +** the compiler can hopefully optimize this level of indirection out. +** That said, kvvfs is intended primarily for use in WASM builds. +** +** Note that this is not explicitly flagged as static because the +** amalgamation build will tag it with SQLITE_PRIVATE. +*/ +#ifndef SQLITE_WASM +const +#endif +sqlite3_kvvfs_methods sqlite3KvvfsMethods = { +kvstorageRead, +kvstorageWrite, +kvstorageDelete, +KVSTORAGE_KEY_SZ +}; + +/****** Utility subroutines ************************************************/ + +/* +** Encode binary into the text encoded used to persist on disk. +** The output text is stored in aOut[], which must be at least +** nData+1 bytes in length. +** +** Return the actual length of the encoded text, not counting the +** zero terminator at the end. +** +** Encoding format +** --------------- +** +** * Non-zero bytes are encoded as upper-case hexadecimal +** +** * A sequence of one or more zero-bytes that are not at the +** beginning of the buffer are encoded as a little-endian +** base-26 number using a..z. "a" means 0. "b" means 1, +** "z" means 25. "ab" means 26. "ac" means 52. And so forth. +** +** * Because there is no overlap between the encoding characters +** of hexadecimal and base-26 numbers, it is always clear where +** one stops and the next begins. +*/ +static int kvvfsEncode(const char *aData, int nData, char *aOut){ + int i, j; + const unsigned char *a = (const unsigned char*)aData; + for(i=j=0; i>4]; + aOut[j++] = "0123456789ABCDEF"[c&0xf]; + }else{ + /* A sequence of 1 or more zeros is stored as a little-endian + ** base-26 number using a..z as the digits. So one zero is "b". + ** Two zeros is "c". 25 zeros is "z", 26 zeros is "ab", 27 is "bb", + ** and so forth. + */ + int k; + for(k=1; i+k0 ){ + aOut[j++] = 'a'+(k%26); + k /= 26; + } + } + } + aOut[j] = 0; + return j; +} + +static const signed char kvvfsHexValue[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +/* +** Decode the text encoding back to binary. The binary content is +** written into pOut, which must be at least nOut bytes in length. +** +** The return value is the number of bytes actually written into aOut[]. +*/ +static int kvvfsDecode(const char *a, char *aOut, int nOut){ + int i, j; + int c; + const unsigned char *aIn = (const unsigned char*)a; + i = 0; + j = 0; + while( 1 ){ + c = kvvfsHexValue[aIn[i]]; + if( c<0 ){ + int n = 0; + int mult = 1; + c = aIn[i]; + if( c==0 ) break; + while( c>='a' && c<='z' ){ + n += (c - 'a')*mult; + mult *= 26; + c = aIn[++i]; + } + if( j+n>nOut ) return -1; + memset(&aOut[j], 0, n); + j += n; + c = aIn[i]; + if( c==0 ) break; + }else{ + aOut[j] = c<<4; + c = kvvfsHexValue[aIn[++i]]; + if( c<0 ) break; + aOut[j++] += c; + i++; + } + } + return j; +} + +/* +** Decode a complete journal file. Allocate space in pFile->aJrnl +** and store the decoding there. Or leave pFile->aJrnl set to NULL +** if an error is encountered. +** +** The first few characters of the text encoding will be a little-endian +** base-26 number (digits a..z) that is the total number of bytes +** in the decoded journal file image. This base-26 number is followed +** by a single space, then the encoding of the journal. The space +** separator is required to act as a terminator for the base-26 number. +*/ +static void kvvfsDecodeJournal( + KVVfsFile *pFile, /* Store decoding in pFile->aJrnl */ + const char *zTxt, /* Text encoding. Zero-terminated */ + int nTxt /* Bytes in zTxt, excluding zero terminator */ +){ + unsigned int n = 0; + int c, i, mult; + i = 0; + mult = 1; + while( (c = zTxt[i++])>='a' && c<='z' ){ + n += (zTxt[i] - 'a')*mult; + mult *= 26; + } + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = sqlite3_malloc64( n ); + if( pFile->aJrnl==0 ){ + pFile->nJrnl = 0; + return; + } + pFile->nJrnl = n; + n = kvvfsDecode(zTxt+i, pFile->aJrnl, pFile->nJrnl); + if( nnJrnl ){ + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = 0; + pFile->nJrnl = 0; + } +} + +/* +** Read or write the "sz" element, containing the database file size. +*/ +static sqlite3_int64 kvvfsReadFileSize(KVVfsFile *pFile){ + char zData[50]; + zData[0] = 0; + sqlite3KvvfsMethods.xRead(pFile->zClass, "sz", zData, sizeof(zData)-1); + return strtoll(zData, 0, 0); +} +static int kvvfsWriteFileSize(KVVfsFile *pFile, sqlite3_int64 sz){ + char zData[50]; + sqlite3_snprintf(sizeof(zData), zData, "%lld", sz); + return sqlite3KvvfsMethods.xWrite(pFile->zClass, "sz", zData); +} + +/****** sqlite3_io_methods methods ******************************************/ + +/* +** Close an kvvfs-file. +*/ +static int kvvfsClose(sqlite3_file *pProtoFile){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + + SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass, + pFile->isJournal ? "journal" : "db")); + sqlite3_free(pFile->aJrnl); + return SQLITE_OK; +} + +/* +** Read from the -journal file. +*/ +static int kvvfsReadJrnl( + sqlite3_file *pProtoFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + assert( pFile->isJournal ); + SQLITE_KV_LOG(("xRead('%s-journal',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + if( pFile->aJrnl==0 ){ + int szTxt = kvstorageRead(pFile->zClass, "jrnl", 0, 0); + char *aTxt; + if( szTxt<=4 ){ + return SQLITE_IOERR; + } + aTxt = sqlite3_malloc64( szTxt+1 ); + if( aTxt==0 ) return SQLITE_NOMEM; + kvstorageRead(pFile->zClass, "jrnl", aTxt, szTxt+1); + kvvfsDecodeJournal(pFile, aTxt, szTxt); + sqlite3_free(aTxt); + if( pFile->aJrnl==0 ) return SQLITE_IOERR; + } + if( iOfst+iAmt>pFile->nJrnl ){ + return SQLITE_IOERR_SHORT_READ; + } + memcpy(zBuf, pFile->aJrnl+iOfst, iAmt); + return SQLITE_OK; +} + +/* +** Read from the database file. +*/ +static int kvvfsReadDb( + sqlite3_file *pProtoFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + unsigned int pgno; + int got, n; + char zKey[30]; + char aData[133073]; + assert( iOfst>=0 ); + assert( iAmt>=0 ); + SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + if( iOfst+iAmt>=512 ){ + if( (iOfst % iAmt)!=0 ){ + return SQLITE_IOERR_READ; + } + if( (iAmt & (iAmt-1))!=0 || iAmt<512 || iAmt>65536 ){ + return SQLITE_IOERR_READ; + } + pFile->szPage = iAmt; + pgno = 1 + iOfst/iAmt; + }else{ + pgno = 1; + } + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, aData, sizeof(aData)-1); + if( got<0 ){ + n = 0; + }else{ + aData[got] = 0; + if( iOfst+iAmt<512 ){ + int k = iOfst+iAmt; + aData[k*2] = 0; + n = kvvfsDecode(aData, &aData[2000], sizeof(aData)-2000); + if( n>=iOfst+iAmt ){ + memcpy(zBuf, &aData[2000+iOfst], iAmt); + n = iAmt; + }else{ + n = 0; + } + }else{ + n = kvvfsDecode(aData, zBuf, iAmt); + } + } + if( nzClass, iAmt, iOfst)); + if( iEnd>=0x10000000 ) return SQLITE_FULL; + if( pFile->aJrnl==0 || pFile->nJrnlaJrnl, iEnd); + if( aNew==0 ){ + return SQLITE_IOERR_NOMEM; + } + pFile->aJrnl = aNew; + if( pFile->nJrnlaJrnl+pFile->nJrnl, 0, iOfst-pFile->nJrnl); + } + pFile->nJrnl = iEnd; + } + memcpy(pFile->aJrnl+iOfst, zBuf, iAmt); + return SQLITE_OK; +} + +/* +** Write into the database file. +*/ +static int kvvfsWriteDb( + sqlite3_file *pProtoFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + unsigned int pgno; + char zKey[30]; + char aData[131073]; + SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + assert( iAmt>=512 && iAmt<=65536 ); + assert( (iAmt & (iAmt-1))==0 ); + assert( pFile->szPage<0 || pFile->szPage==iAmt ); + pFile->szPage = iAmt; + pgno = 1 + iOfst/iAmt; + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + kvvfsEncode(zBuf, iAmt, aData); + if( sqlite3KvvfsMethods.xWrite(pFile->zClass, zKey, aData) ){ + return SQLITE_IOERR; + } + if( iOfst+iAmt > pFile->szDb ){ + pFile->szDb = iOfst + iAmt; + } + return SQLITE_OK; +} + +/* +** Truncate an kvvfs-file. +*/ +static int kvvfsTruncateJrnl(sqlite3_file *pProtoFile, sqlite_int64 size){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xTruncate('%s-journal',%lld)\n", pFile->zClass, size)); + assert( size==0 ); + sqlite3KvvfsMethods.xDelete(pFile->zClass, "jrnl"); + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = 0; + pFile->nJrnl = 0; + return SQLITE_OK; +} +static int kvvfsTruncateDb(sqlite3_file *pProtoFile, sqlite_int64 size){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + if( pFile->szDb>size + && pFile->szPage>0 + && (size % pFile->szPage)==0 + ){ + char zKey[50]; + unsigned int pgno, pgnoMax; + SQLITE_KV_LOG(("xTruncate('%s-db',%lld)\n", pFile->zClass, size)); + pgno = 1 + size/pFile->szPage; + pgnoMax = 2 + pFile->szDb/pFile->szPage; + while( pgno<=pgnoMax ){ + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + sqlite3KvvfsMethods.xDelete(pFile->zClass, zKey); + pgno++; + } + pFile->szDb = size; + return kvvfsWriteFileSize(pFile, size) ? SQLITE_IOERR : SQLITE_OK; + } + return SQLITE_IOERR; +} + +/* +** Sync an kvvfs-file. +*/ +static int kvvfsSyncJrnl(sqlite3_file *pProtoFile, int flags){ + int i, n; + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + char *zOut; + SQLITE_KV_LOG(("xSync('%s-journal')\n", pFile->zClass)); + if( pFile->nJrnl<=0 ){ + return kvvfsTruncateJrnl(pProtoFile, 0); + } + zOut = sqlite3_malloc64( pFile->nJrnl*2 + 50 ); + if( zOut==0 ){ + return SQLITE_IOERR_NOMEM; + } + n = pFile->nJrnl; + i = 0; + do{ + zOut[i++] = 'a' + (n%26); + n /= 26; + }while( n>0 ); + zOut[i++] = ' '; + kvvfsEncode(pFile->aJrnl, pFile->nJrnl, &zOut[i]); + i = sqlite3KvvfsMethods.xWrite(pFile->zClass, "jrnl", zOut); + sqlite3_free(zOut); + return i ? SQLITE_IOERR : SQLITE_OK; +} +static int kvvfsSyncDb(sqlite3_file *pProtoFile, int flags){ + return SQLITE_OK; +} + +/* +** Return the current file-size of an kvvfs-file. +*/ +static int kvvfsFileSizeJrnl(sqlite3_file *pProtoFile, sqlite_int64 *pSize){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xFileSize('%s-journal')\n", pFile->zClass)); + *pSize = pFile->nJrnl; + return SQLITE_OK; +} +static int kvvfsFileSizeDb(sqlite3_file *pProtoFile, sqlite_int64 *pSize){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xFileSize('%s-db')\n", pFile->zClass)); + if( pFile->szDb>=0 ){ + *pSize = pFile->szDb; + }else{ + *pSize = kvvfsReadFileSize(pFile); + } + return SQLITE_OK; +} + +/* +** Lock an kvvfs-file. +*/ +static int kvvfsLock(sqlite3_file *pProtoFile, int eLock){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + assert( !pFile->isJournal ); + SQLITE_KV_LOG(("xLock(%s,%d)\n", pFile->zClass, eLock)); + + if( eLock!=SQLITE_LOCK_NONE ){ + pFile->szDb = kvvfsReadFileSize(pFile); + } + return SQLITE_OK; +} + +/* +** Unlock an kvvfs-file. +*/ +static int kvvfsUnlock(sqlite3_file *pProtoFile, int eLock){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + assert( !pFile->isJournal ); + SQLITE_KV_LOG(("xUnlock(%s,%d)\n", pFile->zClass, eLock)); + if( eLock==SQLITE_LOCK_NONE ){ + pFile->szDb = -1; + } + return SQLITE_OK; +} + +/* +** Check if another file-handle holds a RESERVED lock on an kvvfs-file. +*/ +static int kvvfsCheckReservedLock(sqlite3_file *pProtoFile, int *pResOut){ + SQLITE_KV_LOG(("xCheckReservedLock\n")); + *pResOut = 0; + return SQLITE_OK; +} + +/* +** File control method. For custom operations on an kvvfs-file. +*/ +static int kvvfsFileControlJrnl(sqlite3_file *pProtoFile, int op, void *pArg){ + SQLITE_KV_LOG(("xFileControl(%d) on journal\n", op)); + return SQLITE_NOTFOUND; +} +static int kvvfsFileControlDb(sqlite3_file *pProtoFile, int op, void *pArg){ + SQLITE_KV_LOG(("xFileControl(%d) on database\n", op)); + if( op==SQLITE_FCNTL_SYNC ){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + int rc = SQLITE_OK; + SQLITE_KV_LOG(("xSync('%s-db')\n", pFile->zClass)); + if( pFile->szDb>0 && 0!=kvvfsWriteFileSize(pFile, pFile->szDb) ){ + rc = SQLITE_IOERR; + } + return rc; + } + return SQLITE_NOTFOUND; +} + +/* +** Return the sector-size in bytes for an kvvfs-file. +*/ +static int kvvfsSectorSize(sqlite3_file *pFile){ + return 512; +} + +/* +** Return the device characteristic flags supported by an kvvfs-file. +*/ +static int kvvfsDeviceCharacteristics(sqlite3_file *pProtoFile){ + return 0; +} + +/****** sqlite3_vfs methods *************************************************/ + +/* +** Open an kvvfs file handle. +*/ +static int kvvfsOpen( + sqlite3_vfs *pProtoVfs, + const char *zName, + sqlite3_file *pProtoFile, + int flags, + int *pOutFlags +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + if( zName==0 ) zName = ""; + SQLITE_KV_LOG(("xOpen(\"%s\")\n", zName)); + if( strcmp(zName, "local")==0 + || strcmp(zName, "session")==0 + ){ + pFile->isJournal = 0; + pFile->base.pMethods = &kvvfs_db_io_methods; + }else + if( strcmp(zName, "local-journal")==0 + || strcmp(zName, "session-journal")==0 + ){ + pFile->isJournal = 1; + pFile->base.pMethods = &kvvfs_jrnl_io_methods; + }else{ + return SQLITE_CANTOPEN; + } + if( zName[0]=='s' ){ + pFile->zClass = "session"; + }else{ + pFile->zClass = "local"; + } + pFile->aJrnl = 0; + pFile->nJrnl = 0; + pFile->szPage = -1; + pFile->szDb = -1; + return SQLITE_OK; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int kvvfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + if( strcmp(zPath, "local-journal")==0 ){ + sqlite3KvvfsMethods.xDelete("local", "jrnl"); + }else + if( strcmp(zPath, "session-journal")==0 ){ + sqlite3KvvfsMethods.xDelete("session", "jrnl"); + } + return SQLITE_OK; +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int kvvfsAccess( + sqlite3_vfs *pProtoVfs, + const char *zPath, + int flags, + int *pResOut +){ + SQLITE_KV_LOG(("xAccess(\"%s\")\n", zPath)); + if( strcmp(zPath, "local-journal")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("local", "jrnl", 0, 0)>0; + }else + if( strcmp(zPath, "session-journal")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("session", "jrnl", 0, 0)>0; + }else + if( strcmp(zPath, "local")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("local", "sz", 0, 0)>0; + }else + if( strcmp(zPath, "session")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("session", "sz", 0, 0)>0; + }else + { + *pResOut = 0; + } + SQLITE_KV_LOG(("xAccess returns %d\n",*pResOut)); + return SQLITE_OK; +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (INST_MAX_PATHNAME+1) bytes. +*/ +static int kvvfsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + size_t nPath; +#ifdef SQLITE_OS_KV_ALWAYS_LOCAL + zPath = "local"; +#endif + nPath = strlen(zPath); + SQLITE_KV_LOG(("xFullPathname(\"%s\")\n", zPath)); + if( nOut +static int kvvfsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; + struct timeval sNow; + (void)gettimeofday(&sNow, 0); /* Cannot fail given valid arguments */ + *pTimeOut = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; + return SQLITE_OK; +} +#endif /* SQLITE_OS_KV || SQLITE_OS_UNIX */ + +#if SQLITE_OS_KV +/* +** This routine is called initialize the KV-vfs as the default VFS. +*/ +int sqlite3_os_init(void){ + return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 1); +} +int sqlite3_os_end(void){ + return SQLITE_OK; +} +#endif /* SQLITE_OS_KV */ + +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) +int sqlite3KvvfsInit(void){ + return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 0); +} +#endif Index: src/os_setup.h ================================================================== --- src/os_setup.h +++ src/os_setup.h @@ -18,40 +18,74 @@ /* ** Figure out if we are dealing with Unix, Windows, or some other operating ** system. ** -** After the following block of preprocess macros, all of SQLITE_OS_UNIX, -** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of -** the three will be 1. The other two will be 0. -*/ -#if defined(SQLITE_OS_OTHER) -# if SQLITE_OS_OTHER==1 -# undef SQLITE_OS_UNIX -# define SQLITE_OS_UNIX 0 -# undef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# else -# undef SQLITE_OS_OTHER -# endif -#endif -#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) -# define SQLITE_OS_OTHER 0 -# ifndef SQLITE_OS_WIN -# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ - defined(__MINGW32__) || defined(__BORLANDC__) -# define SQLITE_OS_WIN 1 -# define SQLITE_OS_UNIX 0 -# else -# define SQLITE_OS_WIN 0 -# define SQLITE_OS_UNIX 1 -# endif -# else -# define SQLITE_OS_UNIX 0 -# endif -#else -# ifndef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# endif -#endif +** After the following block of preprocess macros, all of +** +** SQLITE_OS_KV +** SQLITE_OS_OTHER +** SQLITE_OS_UNIX +** SQLITE_OS_WIN +** +** will defined to either 1 or 0. One of them will be 1. The others will be 0. +** If none of the macros are initially defined, then select either +** SQLITE_OS_UNIX or SQLITE_OS_WIN depending on the target platform. +** +** If SQLITE_OS_OTHER=1 is specified at compile-time, then the application +** must provide its own VFS implementation together with sqlite3_os_init() +** and sqlite3_os_end() routines. +*/ +#if !defined(SQLITE_OS_KV) && !defined(SQLITE_OS_OTHER) && \ + !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_WIN) +# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ + defined(__MINGW32__) || defined(__BORLANDC__) +# define SQLITE_OS_WIN 1 +# define SQLITE_OS_UNIX 0 +# else +# define SQLITE_OS_WIN 0 +# define SQLITE_OS_UNIX 1 +# endif +#endif +#if SQLITE_OS_OTHER+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +#endif +#if SQLITE_OS_KV+1>1 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +# define SQLITE_OMIT_LOAD_EXTENSION 1 +# define SQLITE_OMIT_WAL 1 +# define SQLITE_OMIT_DEPRECATED 1 +# undef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 3 /* Always use memory for temporary storage */ +# define SQLITE_DQS 0 +# define SQLITE_OMIT_SHARED_CACHE 1 +# define SQLITE_OMIT_AUTOINIT 1 +#endif +#if SQLITE_OS_UNIX+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +#endif +#if SQLITE_OS_WIN+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +#endif + #endif /* SQLITE_OS_SETUP_H */ Index: src/os_unix.c ================================================================== --- src/os_unix.c +++ src/os_unix.c @@ -44,20 +44,10 @@ ** plus implementations of sqlite3_os_init() and sqlite3_os_end(). */ #include "sqliteInt.h" #if SQLITE_OS_UNIX /* This file is used on unix only */ -/* Turn this feature on in all builds for now */ -#define SQLITE_MUTEXFREE_SHMLOCK 1 -#define SQLITE_MFS_EXCLUSIVE 255 -#ifndef SQLITE_MFS_NSHARD -# define SQLITE_MFS_NSHARD 8 -#endif -#if SQLITE_MFS_NSHARD<1 -# error "SQLITE_MFS_NSHARD must be greater than 0" -#endif - /* ** There are various methods for file locking used for concurrency ** control: ** ** 1. POSIX locking (the default), @@ -95,17 +85,17 @@ #endif /* ** standard include files. */ -#include -#include +#include /* amalgamator: keep */ +#include /* amalgamator: keep */ #include #include -#include +#include /* amalgamator: keep */ #include -#include +#include /* amalgamator: keep */ #include #if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 # include #endif @@ -694,10 +684,13 @@ if( fd<0 ){ if( errno==EINTR ) continue; break; } if( fd>=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; + if( (f & (O_EXCL|O_CREAT))==(O_EXCL|O_CREAT) ){ + (void)osUnlink(z); + } osClose(fd); sqlite3_log(SQLITE_WARNING, "attempt to open \"%s\" as file descriptor %d", z, fd); fd = -1; if( osOpen("/dev/null", O_RDONLY, m)<0 ) break; @@ -1185,14 +1178,10 @@ #endif #if OS_VXWORKS sem_t *pSem; /* Named POSIX semaphore */ char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ #endif -#ifdef SQLITE_SHARED_MAPPING - sqlite3_int64 nSharedMapping; /* Size of mapped region in bytes */ - void *pSharedMapping; /* Memory mapped region */ -#endif }; /* ** A lists of all unixInodeInfo objects. ** @@ -1341,17 +1330,10 @@ assert( unixFileMutexNotheld(pFile) ); if( ALWAYS(pInode) ){ pInode->nRef--; if( pInode->nRef==0 ){ assert( pInode->pShmNode==0 ); -#ifdef SQLITE_SHARED_MAPPING - if( pInode->pSharedMapping ){ - osMunmap(pInode->pSharedMapping, pInode->nSharedMapping); - pInode->pSharedMapping = 0; - pInode->nSharedMapping = 0; - } -#endif sqlite3_mutex_enter(pInode->pLockMutex); closePendingFds(pFile); sqlite3_mutex_leave(pInode->pLockMutex); if( pInode->pPrev ){ assert( pInode->pPrev->pNext==pInode ); @@ -2211,18 +2193,10 @@ /* ** Close the file. */ static int nolockClose(sqlite3_file *id) { -#ifdef SQLITE_SHARED_MAPPING - unixFile *pFd = (unixFile*)id; - if( pFd->pInode ){ - unixEnterMutex(); - releaseInodeInfo(pFd); - unixLeaveMutex(); - } -#endif return closeUnixFile(id); } /******************* End of the no-op lock implementation ********************* ******************************************************************************/ @@ -4072,13 +4046,10 @@ } *(i64*)pArg = pFile->mmapSizeMax; if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ pFile->mmapSizeMax = newLimit; -#ifdef SQLITE_SHARED_MAPPING - if( pFile->pInode==0 ) -#endif if( pFile->mmapSize>0 ){ unixUnmapfile(pFile); rc = unixMapfile(pFile, -1); } } @@ -4324,44 +4295,11 @@ #ifdef SQLITE_DEBUG u8 exclMask; /* Mask of exclusive locks held */ u8 sharedMask; /* Mask of shared locks held */ u8 nextShmId; /* Next available unixShm.id value */ #endif - -#ifdef SQLITE_MUTEXFREE_SHMLOCK - /* In unix-excl mode, if SQLITE_MUTEXFREE_SHMLOCK is defined, all locks - ** are stored in the following 64-bit value. There are in total 8 - ** shm-locking slots, each of which are assigned 8-bits from the 64-bit - ** value. The least-significant 8 bits correspond to shm-locking slot - ** 0, and so on. - ** - ** If the 8-bits corresponding to a shm-locking locking slot are set to - ** 0xFF, then a write-lock is held on the slot. Or, if they are set to - ** a non-zero value smaller than 0xFF, then they represent the total - ** number of read-locks held on the slot. There is no way to distinguish - ** between a write-lock and 255 read-locks. */ - struct LockingSlot { - u32 nLock; - u64 aPadding[7]; - } aMFSlot[3 + SQLITE_MFS_NSHARD*5]; -#endif }; - -/* -** Atomic CAS primitive used in multi-process mode. Equivalent to: -** -** int unixCompareAndSwap(u32 *ptr, u32 oldval, u32 newval){ -** if( *ptr==oldval ){ -** *ptr = newval; -** return 1; -** } -** return 0; -** } -*/ -#define unixCompareAndSwap(ptr,oldval,newval) \ - __sync_bool_compare_and_swap(ptr,oldval,newval) - /* ** Structure used internally by this VFS to record the state of an ** open shared memory connection. ** @@ -4379,13 +4317,10 @@ unixShm *pNext; /* Next unixShm with the same unixShmNode */ u8 hasMutex; /* True if holding the unixShmNode->pShmMutex */ u8 id; /* Id of this connection within its unixShmNode */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ -#ifdef SQLITE_MUTEXFREE_SHMLOCK - u8 aMFCurrent[8]; /* Current slot used for each shared lock */ -#endif }; /* ** Constants used for locking */ @@ -4929,91 +4864,10 @@ if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; sqlite3_mutex_leave(pShmNode->pShmMutex); return rc; } -#ifdef SQLITE_MUTEXFREE_SHMLOCK -static int unixMutexFreeShmlock( - unixFile *pFd, /* Database file holding the shared memory */ - int ofst, /* First lock to acquire or release */ - int n, /* Number of locks to acquire or release */ - int flags /* What to do with the lock */ -){ - struct LockMapEntry { - int iFirst; - int nSlot; - } aMap[9] = { - { 0, 1 }, - { 1, 1 }, - { 2, 1 }, - { 3+0*SQLITE_MFS_NSHARD, SQLITE_MFS_NSHARD }, - { 3+1*SQLITE_MFS_NSHARD, SQLITE_MFS_NSHARD }, - { 3+2*SQLITE_MFS_NSHARD, SQLITE_MFS_NSHARD }, - { 3+3*SQLITE_MFS_NSHARD, SQLITE_MFS_NSHARD }, - { 3+4*SQLITE_MFS_NSHARD, SQLITE_MFS_NSHARD }, - { 3+5*SQLITE_MFS_NSHARD, 0 }, - }; - - unixShm *p = pFd->pShm; /* The shared memory being locked */ - unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ - - if( flags & SQLITE_SHM_SHARED ){ - /* SHARED locks */ - u32 iOld, iNew, *ptr; - int iIncr = -1; - if( (flags & SQLITE_SHM_UNLOCK)==0 ){ - p->aMFCurrent[ofst] = (p->aMFCurrent[ofst] + 1) % aMap[ofst].nSlot; - iIncr = 1; - } - ptr = &pShmNode->aMFSlot[aMap[ofst].iFirst + p->aMFCurrent[ofst]].nLock; - do { - iOld = *ptr; - iNew = iOld + iIncr; - if( iNew>SQLITE_MFS_EXCLUSIVE ){ - return SQLITE_BUSY; - } - }while( 0==unixCompareAndSwap(ptr, iOld, iNew) ); - }else{ - /* EXCLUSIVE locks */ - u16 mask = (1<<(ofst+n)) - (1<exclMask) ){ - int iFirst = aMap[ofst].iFirst; - int iLast = aMap[ofst+n].iFirst; - int i; - for(i=iFirst; iaMFSlot[i].nLock; - if( flags & SQLITE_SHM_UNLOCK ){ - assert( (*ptr)==SQLITE_MFS_EXCLUSIVE ); - *ptr = 0; - }else{ - u32 iOld; - do { - iOld = *ptr; - if( iOld>0 ){ - while( i>iFirst ){ - i--; - pShmNode->aMFSlot[i].nLock = 0; - } - return SQLITE_BUSY; - } - }while( 0==unixCompareAndSwap(ptr, iOld, SQLITE_MFS_EXCLUSIVE) ); - } - } - if( flags & SQLITE_SHM_UNLOCK ){ - p->exclMask &= ~mask; - }else{ - p->exclMask |= mask; - } - } - } - - return SQLITE_OK; -} -#else -# define unixMutexFreeShmlock(a,b,c,d) SQLITE_OK -#endif - /* ** Check that the pShmNode->aLock[] array comports with the locking bitmasks ** held by each client. Return true if it does, or false otherwise. This ** is to be used in an assert(). e.g. ** @@ -5080,15 +4934,10 @@ || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); assert( pShmNode->hShm>=0 || pDbFd->pInode->bProcessLock==1 ); assert( pShmNode->hShm<0 || pDbFd->pInode->bProcessLock==0 ); - - if( pDbFd->pInode->bProcessLock ){ - return unixMutexFreeShmlock(pDbFd, ofst, n, flags); - } - /* Check that, if this to be a blocking lock, no locks that occur later ** in the following list than the lock being obtained are already held: ** ** 1. Checkpointer lock (ofst==1). @@ -5197,20 +5046,16 @@ */ static void unixShmBarrier( sqlite3_file *fd /* Database file holding the shared memory */ ){ UNUSED_PARAMETER(fd); -#ifdef SQLITE_MUTEXFREE_SHMLOCK - __sync_synchronize(); -#else sqlite3MemoryBarrier(); /* compiler-defined memory barrier */ assert( fd->pMethods->xLock==nolockLock || unixFileMutexNotheld((unixFile*)fd) ); unixEnterMutex(); /* Also mutex, for redundancy */ unixLeaveMutex(); -#endif } /* ** Close a connection to shared-memory. Delete the underlying ** storage if deleteFlag is true. @@ -5275,13 +5120,10 @@ /* ** If it is currently memory mapped, unmap file pFd. */ static void unixUnmapfile(unixFile *pFd){ assert( pFd->nFetchOut==0 ); -#ifdef SQLITE_SHARED_MAPPING - if( pFd->pInode ) return; -#endif if( pFd->pMapRegion ){ osMunmap(pFd->pMapRegion, pFd->mmapSizeActual); pFd->pMapRegion = 0; pFd->mmapSize = 0; pFd->mmapSizeActual = 0; @@ -5409,32 +5251,10 @@ } if( nMap>pFd->mmapSizeMax ){ nMap = pFd->mmapSizeMax; } -#ifdef SQLITE_SHARED_MAPPING - if( pFd->pInode ){ - unixInodeInfo *pInode = pFd->pInode; - if( pFd->pMapRegion ) return SQLITE_OK; - unixEnterMutex(); - if( pInode->pSharedMapping==0 ){ - u8 *pNew = osMmap(0, nMap, PROT_READ, MAP_SHARED, pFd->h, 0); - if( pNew==MAP_FAILED ){ - unixLogError(SQLITE_OK, "mmap", pFd->zPath); - pFd->mmapSizeMax = 0; - }else{ - pInode->pSharedMapping = pNew; - pInode->nSharedMapping = nMap; - } - } - pFd->pMapRegion = pInode->pSharedMapping; - pFd->mmapSizeActual = pFd->mmapSize = pInode->nSharedMapping; - unixLeaveMutex(); - return SQLITE_OK; - } -#endif - assert( nMap>0 || (pFd->mmapSize==0 && pFd->pMapRegion==0) ); if( nMap!=pFd->mmapSize ){ unixRemapfile(pFd, nMap); } @@ -5869,13 +5689,10 @@ if( pLockingStyle == &posixIoMethods #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE || pLockingStyle == &nfsIoMethods #endif -#ifdef SQLITE_SHARED_MAPPING - || pLockingStyle == &nolockIoMethods -#endif ){ unixEnterMutex(); rc = findInodeInfo(pNew, &pNew->pInode); if( rc!=SQLITE_OK ){ /* If an error occurred in findInodeInfo(), close the file descriptor @@ -8249,10 +8066,13 @@ 0==strcmp(aVfs[i].zName,SQLITE_DEFAULT_UNIX_VFS)); #else sqlite3_vfs_register(&aVfs[i], i==0); #endif } +#ifdef SQLITE_OS_KV_OPTIONAL + sqlite3KvvfsInit(); +#endif unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); #ifndef SQLITE_OMIT_WAL /* Validate lock assumptions */ assert( SQLITE_SHM_NLOCK==8 ); /* Number of available locks */ Index: src/os_win.c ================================================================== --- src/os_win.c +++ src/os_win.c @@ -4723,13 +4723,14 @@ } return 0; } /* -** If sqlite3_temp_directory is not, take the mutex and return true. +** If sqlite3_temp_directory is defined, take the mutex and return true. ** -** If sqlite3_temp_directory is NULL, omit the mutex and return false. +** If sqlite3_temp_directory is NULL (undefined), omit the mutex and +** return false. */ static int winTempDirDefined(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR)); if( sqlite3_temp_directory!=0 ) return 1; sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR)); Index: src/pager.c ================================================================== --- src/pager.c +++ src/pager.c @@ -19,11 +19,10 @@ ** another is writing. */ #ifndef SQLITE_OMIT_DISKIO #include "sqliteInt.h" #include "wal.h" -#include "vdbeInt.h" /******************* NOTES ON THE DESIGN OF THE PAGER ************************ ** ** This comment block describes invariants that hold when using a rollback @@ -657,13 +656,10 @@ int errCode; /* One of several kinds of errors */ int nRec; /* Pages journalled since last j-header written */ u32 cksumInit; /* Quasi-random value added to every checksum */ u32 nSubRec; /* Number of records written to sub-journal */ Bitvec *pInJournal; /* One bit for each page in the database file */ -#ifndef SQLITE_OMIT_CONCURRENT - Bitvec *pAllRead; /* Pages read within current CONCURRENT trans. */ -#endif sqlite3_file *fd; /* File descriptor for database */ sqlite3_file *jfd; /* File descriptor for main journal */ sqlite3_file *sjfd; /* File descriptor for sub-journal */ i64 journalOff; /* Current write offset in the journal file */ i64 journalHdr; /* Byte offset to previous journal header */ @@ -702,11 +698,10 @@ PCache *pPCache; /* Pointer to page cache object */ #ifndef SQLITE_OMIT_WAL Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif - u64 *aSchemaVersion; }; /* ** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains ** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS @@ -792,10 +787,24 @@ # define USEFETCH(x) ((x)->bUseFetch) #else # define USEFETCH(x) 0 #endif +/* +** The argument to this macro is a file descriptor (type sqlite3_file*). +** Return 0 if it is not open, or non-zero (but not 1) if it is. +** +** This is so that expressions can be written as: +** +** if( isOpen(pPager->jfd) ){ ... +** +** instead of +** +** if( pPager->jfd->pMethods ){ ... +*/ +#define isOpen(pFd) ((pFd)->pMethods!=0) + #ifdef SQLITE_DIRECT_OVERFLOW_READ /* ** Return true if page pgno can be read directly from the database file ** by the b-tree layer. This is the case if: ** @@ -904,13 +913,11 @@ assert( p->eLock!=UNKNOWN_LOCK ); assert( pPager->errCode==SQLITE_OK ); if( !pagerUseWal(pPager) ){ assert( p->eLock>=RESERVED_LOCK ); } -#ifndef SQLITE_OMIT_CONCURRENT - assert( pPager->dbSize==pPager->dbOrigSize || pPager->pAllRead ); -#endif + assert( pPager->dbSize==pPager->dbOrigSize ); assert( pPager->dbOrigSize==pPager->dbFileSize ); assert( pPager->dbOrigSize==pPager->dbHintSize ); assert( pPager->setSuper==0 ); break; @@ -925,11 +932,10 @@ */ assert( p->eLock>=RESERVED_LOCK ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL - || p->journalMode==PAGER_JOURNALMODE_WAL2 ); } assert( pPager->dbOrigSize==pPager->dbFileSize ); assert( pPager->dbOrigSize==pPager->dbHintSize ); break; @@ -940,11 +946,10 @@ assert( !pagerUseWal(pPager) ); assert( p->eLock>=EXCLUSIVE_LOCK ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL - || p->journalMode==PAGER_JOURNALMODE_WAL2 || (sqlite3OsDeviceCharacteristics(p->fd)&SQLITE_IOCAP_BATCH_ATOMIC) ); assert( pPager->dbOrigSize<=pPager->dbHintSize ); break; @@ -953,11 +958,10 @@ assert( pPager->errCode==SQLITE_OK ); assert( !pagerUseWal(pPager) ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL - || p->journalMode==PAGER_JOURNALMODE_WAL2 || (sqlite3OsDeviceCharacteristics(p->fd)&SQLITE_IOCAP_BATCH_ATOMIC) ); break; case PAGER_ERROR: @@ -1794,57 +1798,10 @@ } } return rc; } -#ifndef SQLITE_OMIT_CONCURRENT -/* -** If they are not already, begin recording all pages read from the pager layer -** by the b-tree layer This is used by concurrent transactions. Return -** SQLITE_OK if successful, or an SQLite error code (SQLITE_NOMEM) if an error -** occurs. -*/ -int sqlite3PagerBeginConcurrent(Pager *pPager){ - int rc = SQLITE_OK; - if( pPager->pAllRead==0 ){ - pPager->pAllRead = sqlite3BitvecCreate(pPager->dbSize); - pPager->dbOrigSize = pPager->dbSize; - if( pPager->pAllRead==0 ){ - rc = SQLITE_NOMEM; - } - } - return rc; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Stop recording all pages read from the pager layer by the b-tree layer -** and discard any current records. -*/ -void sqlite3PagerEndConcurrent(Pager *pPager){ - sqlite3BitvecDestroy(pPager->pAllRead); - pPager->pAllRead = 0; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Return true if the database is in wal mode. False otherwise. -*/ -int sqlite3PagerIsWal(Pager *pPager){ - return pPager->pWal!=0; -} -#endif /* SQLITE_OMIT_CONCURRENT */ - -/* -** Free the Pager.pInJournal and Pager.pAllRead bitvec objects. -*/ -static void pagerFreeBitvecs(Pager *pPager){ - sqlite3BitvecDestroy(pPager->pInJournal); - pPager->pInJournal = 0; - sqlite3PagerEndConcurrent(pPager); -} - /* ** This function is a no-op if the pager is in exclusive mode and not ** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN ** state. ** @@ -1865,11 +1822,12 @@ assert( pPager->eState==PAGER_READER || pPager->eState==PAGER_OPEN || pPager->eState==PAGER_ERROR ); - pagerFreeBitvecs(pPager); + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; releaseAllSavepoints(pPager); if( pagerUseWal(pPager) ){ assert( !isOpen(pPager->jfd) ); sqlite3WalEndReadTransaction(pPager->pWal); @@ -2098,11 +2056,11 @@ rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); } } pPager->journalOff = 0; }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST - || (pPager->exclusiveMode && pPager->journalModeexclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) ){ rc = zeroJournalHdr(pPager, hasSuper||pPager->tempFile); pPager->journalOff = 0; }else{ /* This branch may be executed with Pager.journalMode==MEMORY if @@ -2112,12 +2070,11 @@ */ int bDelete = !pPager->tempFile; assert( sqlite3JournalIsInMemory(pPager->jfd)==0 ); assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE || pPager->journalMode==PAGER_JOURNALMODE_MEMORY - || pPager->journalMode==PAGER_JOURNALMODE_WAL - || pPager->journalMode==PAGER_JOURNALMODE_WAL2 + || pPager->journalMode==PAGER_JOURNALMODE_WAL ); sqlite3OsClose(pPager->jfd); if( bDelete ){ rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, pPager->extraSync); } @@ -2133,11 +2090,12 @@ sqlite3PagerUnrefNotNull(p); } } #endif - pagerFreeBitvecs(pPager); + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; pPager->nRec = 0; if( rc==SQLITE_OK ){ if( MEMDB || pagerFlushOnCommit(pPager, bCommit) ){ sqlite3PcacheCleanAll(pPager->pPCache); }else{ @@ -3143,28 +3101,12 @@ ** ** + Discard the cached page (if refcount==0), or ** + Reload page content from the database (if refcount>0). */ pPager->dbSize = pPager->dbOrigSize; - rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager, -#ifdef SQLITE_OMIT_CONCURRENT - 0 -#else - pPager->pAllRead!=0 -#endif - ); + rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager); pList = sqlite3PcacheDirtyList(pPager->pPCache); - -#ifndef SQLITE_OMIT_CONCURRENT - /* If this is an CONCURRENT transaction, then page 1 must be reread from - ** the db file, even if it is not dirty. This is because the b-tree layer - ** may have already zeroed the nFree and iTrunk header fields. */ - if( rc==SQLITE_OK && (pList==0 || pList->pgno!=1) && pPager->pAllRead ){ - rc = pagerUndoCallback((void*)pPager, 1); - } -#endif - while( pList && rc==SQLITE_OK ){ PgHdr *pNext = pList->pDirty; rc = pagerUndoCallback((void *)pPager, pList->pgno); pList = pNext; } @@ -3210,12 +3152,10 @@ nList = 0; for(p=pList; (*ppNext = p)!=0; p=p->pDirty){ if( p->pgno<=nTruncate ){ ppNext = &p->pDirty; nList++; - PAGERTRACE(("TO-WAL %d page %d hash(%08x)\n", - PAGERID(pPager), p->pgno, pager_pagehash(p))); } } assert( pList ); }else{ nList = 1; @@ -3265,21 +3205,11 @@ sqlite3WalEndReadTransaction(pPager->pWal); rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed); if( rc!=SQLITE_OK || changed ){ pager_reset(pPager); - if( pPager->aSchemaVersion ){ - pPager->aSchemaVersion[SCHEMA_VERSION_AFTERRESET] = sqlite3STimeNow(); - } if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); - if( pPager->aSchemaVersion ){ - pPager->aSchemaVersion[SCHEMA_VERSION_AFTERUNFETCH] = sqlite3STimeNow(); - } - assert( pPager->journalMode==PAGER_JOURNALMODE_WAL - || pPager->journalMode==PAGER_JOURNALMODE_WAL2 - ); - pPager->journalMode = sqlite3WalJournalMode(pPager->pWal); } return rc; } #endif @@ -3371,13 +3301,13 @@ if( rc ) return rc; if( nPage==0 ){ rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0); }else{ testcase( sqlite3PcachePagecount(pPager->pPCache)==0 ); - rc = sqlite3PagerOpenWal(pPager, 0, 0); + rc = sqlite3PagerOpenWal(pPager, 0); } - }else if( pPager->journalMode>=PAGER_JOURNALMODE_WAL ){ + }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){ pPager->journalMode = PAGER_JOURNALMODE_DELETE; } } } return rc; @@ -4293,11 +4223,11 @@ || pPager->eState==PAGER_WRITER_DBMOD ); assert( assert_pager_state(pPager) ); assert( !pagerUseWal(pPager) ); - rc = sqlite3PagerExclusiveLock(pPager, 0, 0); + rc = sqlite3PagerExclusiveLock(pPager); if( rc!=SQLITE_OK ) return rc; if( !pPager->noSync ){ assert( !pPager->tempFile ); if( isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ @@ -4644,16 +4574,10 @@ } pPager->aStat[PAGER_STAT_SPILL]++; pPg->pDirty = 0; if( pagerUseWal(pPager) ){ -#ifndef SQLITE_OMIT_CONCURRENT - /* If the transaction is a "BEGIN CONCURRENT" transaction, the page - ** cannot be flushed to disk. Return early in this case. */ - if( pPager->pAllRead ) return SQLITE_OK; -#endif - /* Write a single frame for this page to the log. */ rc = subjournalPageIfRequired(pPg); if( rc==SQLITE_OK ){ rc = pagerWalFrames(pPager, pPg, 0, 0); } @@ -4890,11 +4814,10 @@ nPathname + 1 + /* database filename */ nUriByte + /* query parameters */ nPathname + 8 + 1 + /* Journal filename */ #ifndef SQLITE_OMIT_WAL nPathname + 4 + 1 + /* WAL filename */ - nPathname + 5 + 1 + /* Second WAL filename */ #endif 3 /* Terminator */ ); assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); if( !pPtr ){ @@ -4943,12 +4866,10 @@ memcpy(pPtr, "-wal", 4); pPtr += 4 + 1; #ifdef SQLITE_ENABLE_8_3_NAMES sqlite3FileSuffix3(zFilename, pPager->zWal); pPtr = (u8*)(pPager->zWal + sqlite3Strlen30(pPager->zWal)+1); #endif - memcpy(pPtr, zPathname, nPathname); pPtr += nPathname; - memcpy(pPtr, "-wal2", 5); pPtr += 5 + 1; }else{ pPager->zWal = 0; } #endif (void)pPtr; /* Suppress warning about unused pPtr value */ @@ -5572,22 +5493,10 @@ assert( pPager->errCode==SQLITE_OK ); assert( pPager->eState>=PAGER_READER ); assert( assert_pager_state(pPager) ); assert( pPager->hasHeldSharedLock==1 ); -#ifndef SQLITE_OMIT_CONCURRENT - /* If this is an CONCURRENT transaction and the page being read was - ** present in the database file when the transaction was opened, - ** mark it as read in the pAllRead vector. */ - pPg = 0; - if( pPager->pAllRead && pgno<=pPager->dbOrigSize ){ - PAGERTRACE(("USING page %d\n", pgno)); - rc = sqlite3BitvecSet(pPager->pAllRead, pgno); - if( rc!=SQLITE_OK ) goto pager_acquire_err; - } -#endif - if( pgno==0 ) return SQLITE_CORRUPT_BKPT; pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); if( pBase==0 ){ pPg = 0; rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); @@ -5600,14 +5509,10 @@ pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); assert( pPg==(*ppPage) ); assert( pPg->pgno==pgno ); assert( pPg->pPager==pPager || pPg->pPager==0 ); - if( pPager->aSchemaVersion ){ - pPager->aSchemaVersion[SCHEMA_VERSION_AFTERPCACHE] = sqlite3STimeNow(); - } - noContent = (flags & PAGER_GET_NOCONTENT)!=0; if( pPg->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ assert( pgno!=PAGER_SJ_PGNO(pPager) ); @@ -5928,18 +5833,15 @@ /* ** Begin a write-transaction on the specified pager object. If a ** write-transaction has already been opened, this function is a no-op. ** -** If the exFlag argument is 0, then acquire at least a RESERVED -** lock on the database file. If exFlag is >0, then acquire at least +** If the exFlag argument is false, then acquire at least a RESERVED +** lock on the database file. If exFlag is true, then acquire at least ** an EXCLUSIVE lock. If such a lock is already held, no locking ** functions need be called. ** -** If (exFlag<0) and the database is in WAL mode, do not take any locks. -** The transaction will run in CONCURRENT mode instead. -** ** If the subjInMemory argument is non-zero, then any sub-journal opened ** within this transaction will be opened as an in-memory file. This ** has no effect if the sub-journal is already opened (as it may be when ** running in exclusive mode) or if the transaction does not require a ** sub-journal. If the subjInMemory argument is zero, then any required @@ -5953,10 +5855,11 @@ assert( pPager->eState>=PAGER_READER && pPager->eStatesubjInMemory = (u8)subjInMemory; if( pPager->eState==PAGER_READER ){ assert( pPager->pInJournal==0 ); + if( pagerUseWal(pPager) ){ /* If the pager is configured to use locking_mode=exclusive, and an ** exclusive lock on the database is not already held, obtain it now. */ if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){ @@ -5968,22 +5871,21 @@ } /* Grab the write lock on the log file. If successful, upgrade to ** PAGER_RESERVED state. Otherwise, return an error code to the caller. ** The busy-handler is not invoked if another connection already - ** holds the write-lock. If possible, the upper layer will call it. */ - if( exFlag>=0 ){ - rc = sqlite3WalBeginWriteTransaction(pPager->pWal); - } + ** holds the write-lock. If possible, the upper layer will call it. + */ + rc = sqlite3WalBeginWriteTransaction(pPager->pWal); }else{ /* Obtain a RESERVED lock on the database file. If the exFlag parameter ** is true, then immediately upgrade this to an EXCLUSIVE lock. The ** busy-handler callback can be used when upgrading to the EXCLUSIVE ** lock, but not when obtaining the RESERVED lock. */ rc = pagerLockDb(pPager, RESERVED_LOCK); - if( rc==SQLITE_OK && exFlag>0 ){ + if( rc==SQLITE_OK && exFlag ){ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); } } if( rc==SQLITE_OK ){ @@ -6279,11 +6181,11 @@ /* ** Return TRUE if the page given in the argument was previously passed ** to sqlite3PagerWrite(). In other words, return TRUE if it is ok ** to change the content of the page. */ -#if !defined(SQLITE_OMIT_CONCURRENT) || !defined(NDEBUG) +#ifndef NDEBUG int sqlite3PagerIswriteable(DbPage *pPg){ return pPg->flags & PGHDR_WRITEABLE; } #endif @@ -6435,30 +6337,21 @@ } return rc; } /* -** This function is called to ensure that all locks required to commit the -** current write-transaction to the database file are held. If the db is -** in rollback mode, this means the EXCLUSIVE lock on the database file. -** -** Or, if this is a non-CONCURRENT transaction on a wal-mode database, this -** function is a no-op. -** -** If this is an CONCURRENT transaction on a wal-mode database, this function -** attempts to obtain the WRITER lock on the wal file and also checks to -** see that the transaction can be safely committed (does not commit with -** any other transaction committed since it was opened). -** -** If the required locks are already held or successfully obtained and -** the transaction can be committed, SQLITE_OK is returned. If a required lock -** cannot be obtained, SQLITE_BUSY is returned. Or, if the current transaction -** is CONCURRENT and cannot be committed due to a conflict, SQLITE_BUSY_SNAPSHOT -** is returned. Otherwise, if some other error occurs (IO error, OOM etc.), -** and SQLite error code is returned. -*/ -int sqlite3PagerExclusiveLock(Pager *pPager, PgHdr *pPage1, Pgno *piConflict){ +** This function may only be called while a write-transaction is active in +** rollback. If the connection is in WAL mode, this call is a no-op. +** Otherwise, if the connection does not already have an EXCLUSIVE lock on +** the database file, an attempt is made to obtain one. +** +** If the EXCLUSIVE lock is already held or the attempt to obtain it is +** successful, or the connection is in WAL mode, SQLITE_OK is returned. +** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is +** returned. +*/ +int sqlite3PagerExclusiveLock(Pager *pPager){ int rc = pPager->errCode; assert( assert_pager_state(pPager) ); if( rc==SQLITE_OK ){ assert( pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD @@ -6466,76 +6359,13 @@ ); assert( assert_pager_state(pPager) ); if( 0==pagerUseWal(pPager) ){ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); } -#ifndef SQLITE_OMIT_CONCURRENT - else{ - if( pPager->pAllRead ){ - /* This is an CONCURRENT transaction. Attempt to lock the wal database - ** here. If SQLITE_BUSY (but not SQLITE_BUSY_SNAPSHOT) is returned, - ** invoke the busy-handler and try again for as long as it returns - ** non-zero. */ - do { - rc = sqlite3WalLockForCommit( - pPager->pWal, pPage1, pPager->pAllRead, piConflict - ); - }while( rc==SQLITE_BUSY - && pPager->xBusyHandler(pPager->pBusyHandlerArg) - ); - } - } -#endif /* SQLITE_OMIT_CONCURRENT */ - } - return rc; -} - -#ifndef SQLITE_OMIT_CONCURRENT -/* -** This function is called as part of committing an CONCURRENT transaction. -** At this point the wal WRITER lock is held, and all pages in the cache -** except for page 1 are compatible with the snapshot at the head of the -** wal file. -** -** This function updates the in-memory data structures and reloads the -** contents of page 1 so that the client is operating on the snapshot -** at the head of the wal file. -** -** SQLITE_OK is returned if successful, or an SQLite error code otherwise. -*/ -int sqlite3PagerUpgradeSnapshot(Pager *pPager, DbPage *pPage1){ - int rc; - - assert( pPager->pWal && pPager->pAllRead ); - rc = sqlite3WalUpgradeSnapshot(pPager->pWal); - if( rc==SQLITE_OK ){ - rc = readDbPage(pPage1); - } - - return rc; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** Set the in-memory cache of the database file size to nSz pages. -*/ -void sqlite3PagerSetDbsize(Pager *pPager, Pgno nSz){ - pPager->dbSize = nSz; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** If this is a WAL mode connection and the WRITER lock is currently held, -** relinquish it. -*/ -void sqlite3PagerDropExclusiveLock(Pager *pPager){ - if( pagerUseWal(pPager) ){ - sqlite3WalEndWriteTransaction(pPager->pWal); - } -} -#endif /* SQLITE_OMIT_CONCURRENT */ - + } + return rc; +} /* ** Sync the database file for the pager pPager. zSuper points to the name ** of a super-journal file that should be written into the individual ** journal file. zSuper may be NULL, which is interpreted as no @@ -7453,12 +7283,11 @@ assert( eMode==PAGER_JOURNALMODE_DELETE /* 0 */ || eMode==PAGER_JOURNALMODE_PERSIST /* 1 */ || eMode==PAGER_JOURNALMODE_OFF /* 2 */ || eMode==PAGER_JOURNALMODE_TRUNCATE /* 3 */ || eMode==PAGER_JOURNALMODE_MEMORY /* 4 */ - || eMode==PAGER_JOURNALMODE_WAL /* 5 */ - || eMode==PAGER_JOURNALMODE_WAL2 /* 6 */ ); + || eMode==PAGER_JOURNALMODE_WAL /* 5 */ ); /* This routine is only called from the OP_JournalMode opcode, and ** the logic there will never allow a temporary file to be changed ** to WAL mode. */ @@ -7488,16 +7317,13 @@ assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); assert( (PAGER_JOURNALMODE_DELETE & 5)==0 ); assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 ); assert( (PAGER_JOURNALMODE_OFF & 5)==0 ); assert( (PAGER_JOURNALMODE_WAL & 5)==5 ); - assert( (PAGER_JOURNALMODE_WAL2 & 5)==4 ); assert( isOpen(pPager->fd) || pPager->exclusiveMode ); - if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 - && eMode!=PAGER_JOURNALMODE_WAL2 /* TODO: fix this if possible */ - ){ + if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ /* In this case we would like to delete the journal file. If it is ** not possible, then that is not a problem. Deleting the journal file ** here is an optimization only. ** ** Before deleting the journal file, obtain a RESERVED lock on the @@ -7666,11 +7492,11 @@ ** Call sqlite3WalOpen() to open the WAL handle. If the pager is in ** exclusive-locking mode when this function is called, take an EXCLUSIVE ** lock on the database file and use heap-memory to store the wal-index ** in. Otherwise, use the normal shared-memory. */ -static int pagerOpenWal(Pager *pPager, int bWal2){ +static int pagerOpenWal(Pager *pPager){ int rc = SQLITE_OK; assert( pPager->pWal==0 && pPager->tempFile==0 ); assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); @@ -7687,11 +7513,11 @@ ** (e.g. due to malloc() failure), return an error code. */ if( rc==SQLITE_OK ){ rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, pPager->zWal, pPager->exclusiveMode, - pPager->journalSizeLimit, bWal2, &pPager->pWal + pPager->journalSizeLimit, &pPager->pWal ); } pagerFixMaplimit(pPager); return rc; @@ -7713,11 +7539,10 @@ ** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK ** without doing anything. */ int sqlite3PagerOpenWal( Pager *pPager, /* Pager object */ - int bWal2, /* Open in wal2 mode if not already open */ int *pbOpen /* OUT: Set to true if call is a no-op */ ){ int rc = SQLITE_OK; /* Return code */ assert( assert_pager_state(pPager) ); @@ -7730,13 +7555,13 @@ if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; /* Close any rollback journal previously open */ sqlite3OsClose(pPager->jfd); - rc = pagerOpenWal(pPager, bWal2); + rc = pagerOpenWal(pPager); if( rc==SQLITE_OK ){ - pPager->journalMode = bWal2?PAGER_JOURNALMODE_WAL2:PAGER_JOURNALMODE_WAL; + pPager->journalMode = PAGER_JOURNALMODE_WAL; pPager->eState = PAGER_OPEN; } }else{ *pbOpen = 1; } @@ -7754,13 +7579,11 @@ ** If successful, the EXCLUSIVE lock is not released before returning. */ int sqlite3PagerCloseWal(Pager *pPager, sqlite3 *db){ int rc = SQLITE_OK; - assert( pPager->journalMode==PAGER_JOURNALMODE_WAL - || pPager->journalMode==PAGER_JOURNALMODE_WAL2 - ); + assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); /* If the log file is not already open, but does exist in the file-system, ** it may need to be checkpointed before the connection can switch to ** rollback mode. Open it now so this can happen. */ @@ -7771,11 +7594,11 @@ rc = sqlite3OsAccess( pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists ); } if( rc==SQLITE_OK && logexists ){ - rc = pagerOpenWal(pPager, 0); + rc = pagerOpenWal(pPager); } } /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on ** the database file, the log and log-summary files will be deleted. @@ -7894,15 +7717,10 @@ assert( pPager->pWal ); sqlite3WalSnapshotUnlock(pPager->pWal); } #endif /* SQLITE_ENABLE_SNAPSHOT */ - -int sqlite3PagerWalInfo(Pager *pPager, u32 *pnPrior, u32 *pnFrame){ - return sqlite3WalInfo(pPager->pWal, pnPrior, pnFrame); -} - #endif /* !SQLITE_OMIT_WAL */ #ifdef SQLITE_ENABLE_ZIPVFS /* ** A read-lock must be held on the pager when this function is called. If @@ -7915,11 +7733,6 @@ assert( pPager->eState>=PAGER_READER ); return sqlite3WalFramesize(pPager->pWal); } #endif -void sqlite3PagerIsSchemaVersion(Pager *pPager, u64 *a){ - pPager->aSchemaVersion = a; - sqlite3WalIsSchemaVersion(pPager->pWal, a); -} - #endif /* SQLITE_OMIT_DISKIO */ Index: src/pager.h ================================================================== --- src/pager.h +++ src/pager.h @@ -80,27 +80,10 @@ #define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ #define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ #define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ #define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ #define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ -#define PAGER_JOURNALMODE_WAL2 6 /* Use write-ahead logging mode 2 */ - -#define isWalMode(x) ((x)==PAGER_JOURNALMODE_WAL || (x)==PAGER_JOURNALMODE_WAL2) - -/* -** The argument to this macro is a file descriptor (type sqlite3_file*). -** Return 0 if it is not open, or non-zero (but not 1) if it is. -** -** This is so that expressions can be written as: -** -** if( isOpen(pPager->jfd) ){ ... -** -** instead of -** -** if( pPager->jfd->pMethods ){ ... -*/ -#define isOpen(pFd) ((pFd)->pMethods!=0) /* ** Flags that make up the mask passed to sqlite3PagerGet(). */ #define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */ @@ -178,24 +161,23 @@ /* Functions used to manage pager transactions and savepoints. */ void sqlite3PagerPagecount(Pager*, int*); int sqlite3PagerBegin(Pager*, int exFlag, int); int sqlite3PagerCommitPhaseOne(Pager*,const char *zSuper, int); -int sqlite3PagerExclusiveLock(Pager*, DbPage *pPage1, Pgno*); +int sqlite3PagerExclusiveLock(Pager*); int sqlite3PagerSync(Pager *pPager, const char *zSuper); int sqlite3PagerCommitPhaseTwo(Pager*); int sqlite3PagerRollback(Pager*); int sqlite3PagerOpenSavepoint(Pager *pPager, int n); int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); int sqlite3PagerSharedLock(Pager *pPager); - #ifndef SQLITE_OMIT_WAL int sqlite3PagerCheckpoint(Pager *pPager, sqlite3*, int, int*, int*); int sqlite3PagerWalSupported(Pager *pPager); int sqlite3PagerWalCallback(Pager *pPager); - int sqlite3PagerOpenWal(Pager *pPager, int, int *pisOpen); + int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); int sqlite3PagerCloseWal(Pager *pPager, sqlite3*); # ifdef SQLITE_ENABLE_SNAPSHOT int sqlite3PagerSnapshotGet(Pager*, sqlite3_snapshot **ppSnapshot); int sqlite3PagerSnapshotOpen(Pager*, sqlite3_snapshot *pSnapshot); int sqlite3PagerSnapshotRecover(Pager *pPager); @@ -241,30 +223,14 @@ /* Functions used to truncate the database file. */ void sqlite3PagerTruncateImage(Pager*,Pgno); void sqlite3PagerRekey(DbPage*, Pgno, u16); -#ifndef SQLITE_OMIT_CONCURRENT -void sqlite3PagerEndConcurrent(Pager*); -int sqlite3PagerBeginConcurrent(Pager*); -void sqlite3PagerDropExclusiveLock(Pager*); -int sqlite3PagerUpgradeSnapshot(Pager *pPager, DbPage*); -void sqlite3PagerSetDbsize(Pager *pPager, Pgno); -int sqlite3PagerIsWal(Pager*); -#else -# define sqlite3PagerEndConcurrent(x) -#endif - -#if defined(SQLITE_DEBUG) || !defined(SQLITE_OMIT_CONCURRENT) -int sqlite3PagerIswriteable(DbPage*); -#endif - -int sqlite3PagerWalInfo(Pager*, u32 *pnPrior, u32 *pnFrame); - /* Functions to support testing and debugging. */ #if !defined(NDEBUG) || defined(SQLITE_TEST) Pgno sqlite3PagerPagenumber(DbPage*); + int sqlite3PagerIswriteable(DbPage*); #endif #ifdef SQLITE_TEST int *sqlite3PagerStats(Pager*); void sqlite3PagerRefdump(Pager*); void disable_simulated_io_errors(void); @@ -272,8 +238,6 @@ #else # define disable_simulated_io_errors() # define enable_simulated_io_errors() #endif -void sqlite3PagerIsSchemaVersion(Pager*, u64*); - #endif /* SQLITE_PAGER_H */ Index: src/parse.y ================================================================== --- src/parse.y +++ src/parse.y @@ -103,17 +103,10 @@ ** ** Then the "b" IdList records the list "a,b,c". */ struct TrigEvent { int a; IdList * b; }; -/* -** Generate a syntax error -*/ -static void parserSyntaxError(Parse *pParse, Token *p){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", p); -} - struct FrameBound { int eType; Expr *pExpr; }; /* ** Disable lookaside memory allocation for objects that might be ** shared across database connections. @@ -169,20 +162,11 @@ trans_opt ::= TRANSACTION nm. %type transtype {int} transtype(A) ::= . {A = TK_DEFERRED;} transtype(A) ::= DEFERRED(X). {A = @X; /*A-overwrites-X*/} transtype(A) ::= IMMEDIATE(X). {A = @X; /*A-overwrites-X*/} -transtype(A) ::= ID(X). { - Token *p = &X; - if( p->n==9 && sqlite3_strnicmp(p->z,"exclusive",9)==0 ){ - A = TK_EXCLUSIVE; - }else if( p->n==10 && sqlite3_strnicmp(p->z,"concurrent",10)==0 ){ - A = TK_CONCURRENT; /*A-overwrites-X*/ - }else{ - parserSyntaxError(pParse, p); - } -} +transtype(A) ::= EXCLUSIVE(X). {A = @X; /*A-overwrites-X*/} cmd ::= COMMIT|END(X) trans_opt. {sqlite3EndTransaction(pParse,@X);} cmd ::= ROLLBACK(X) trans_opt. {sqlite3EndTransaction(pParse,@X);} savepoint_opt ::= SAVEPOINT. savepoint_opt ::= . @@ -309,10 +293,11 @@ // An IDENTIFIER can be a generic identifier, or one of several // keywords. Any non-standard keyword can also be an identifier. // %token_class id ID|INDEXED. + // And "ids" is an identifer-or-string. // %token_class ids ID|STRING. @@ -1132,11 +1117,11 @@ ** that look like this: #1 #2 ... These terms refer to registers ** in the virtual machine. #N is the N-th register. */ Token t = X; /*A-overwrites-X*/ assert( t.n>=2 ); if( pParse->nested==0 ){ - parserSyntaxError(pParse, &t); + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); A = 0; }else{ A = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); if( A ) sqlite3GetInt32(&t.z[1], &A->iTable); } @@ -1914,11 +1899,10 @@ FUNCTION /* A function invocation */ UMINUS /* Unary minus */ UPLUS /* Unary plus */ TRUTH /* IS TRUE or IS FALSE or IS NOT TRUE or IS NOT FALSE */ REGISTER /* Reference to a VDBE register */ - CONCURRENT /* BEGIN CONCURRENT */ VECTOR /* Vector */ SELECT_COLUMN /* Choose a single column from a multi-column SELECT */ IF_NULL_ROW /* the if-null-row operator */ ASTERISK /* The "*" in count(*) and similar */ SPAN /* The span operator */ Index: src/pragma.c ================================================================== --- src/pragma.c +++ src/pragma.c @@ -260,20 +260,19 @@ */ const char *sqlite3JournalModename(int eMode){ static char * const azModeName[] = { "delete", "persist", "off", "truncate", "memory" #ifndef SQLITE_OMIT_WAL - , "wal", "wal2" + , "wal" #endif }; assert( PAGER_JOURNALMODE_DELETE==0 ); assert( PAGER_JOURNALMODE_PERSIST==1 ); assert( PAGER_JOURNALMODE_OFF==2 ); assert( PAGER_JOURNALMODE_TRUNCATE==3 ); assert( PAGER_JOURNALMODE_MEMORY==4 ); assert( PAGER_JOURNALMODE_WAL==5 ); - assert( PAGER_JOURNALMODE_WAL2==6 ); assert( eMode>=0 && eMode<=ArraySize(azModeName) ); if( eMode==ArraySize(azModeName) ) return 0; return azModeName[eMode]; } @@ -475,11 +474,17 @@ goto pragma_out; } /* Make sure the database schema is loaded if the pragma requires that */ if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){ - if( sqlite3ReadSchema(pParse) ) goto pragma_out; + if( IsSharedSchema(db) && (zDb || (pPragma->mPragFlg & PragFlg_OneSchema)) ){ + assert( iDb>=0 && iDbnDb ); + pParse->rc = sqlite3SchemaLoad(db, iDb, 0, &pParse->zErrMsg); + if( pParse->rc ) goto pragma_out; + }else{ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + } } /* Register the result column names for pragmas that return results */ if( (pPragma->mPragFlg & PragFlg_NoColumns)==0 && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0) @@ -1751,12 +1756,13 @@ Index *pIdx, *pPk; Index *pPrior = 0; /* Previous index */ int loopTop; int iDataCur, iIdxCur; int r1 = -1; - int bStrict; + int bStrict; /* True for a STRICT table */ int r2; /* Previous key for WITHOUT ROWID tables */ + int mxCol; /* Maximum non-virtual column number */ if( !IsOrdinaryTable(pTab) ) continue; if( pObjTab && pObjTab!=pTab ) continue; if( isQuick || HasRowid(pTab) ){ pPk = 0; @@ -1777,15 +1783,26 @@ } assert( pParse->nMem>=8+j ); assert( sqlite3NoTempsInRange(pParse,1,7+j) ); sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); + + /* Fetch the right-most column from the table. This will cause + ** the entire record header to be parsed and sanity checked. It + ** will also prepopulate the cursor column cache that is used + ** by the OP_IsType code, so it is a required step. + */ + mxCol = pTab->nCol-1; + while( mxCol>=0 + && ((pTab->aCol[mxCol].colFlags & COLFLAG_VIRTUAL)!=0 + || pTab->iPKey==mxCol) ) mxCol--; + if( mxCol>=0 ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, mxCol, 3); + sqlite3VdbeTypeofColumn(v, 3); + } + if( !isQuick ){ - /* Sanity check on record header decoding */ - sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3); - sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); - VdbeComment((v, "(right-most column)")); if( pPk ){ /* Verify WITHOUT ROWID keys are in ascending order */ int a1; char *zErr; a1 = sqlite3VdbeAddOp4Int(v, OP_IdxGT, iDataCur, 0,r2,pPk->nKeyCol); @@ -1801,48 +1818,126 @@ for(j=0; jnKeyCol; j++){ sqlite3ExprCodeLoadIndexColumn(pParse, pPk, iDataCur, j, r2+j); } } } - /* Verify that all NOT NULL columns really are NOT NULL. At the - ** same time verify the type of the content of STRICT tables */ + /* Verify datatypes for all columns: + ** + ** (1) NOT NULL columns may not contain a NULL + ** (2) Datatype must be exact for non-ANY columns in STRICT tables + ** (3) Datatype for TEXT columns in non-STRICT tables must be + ** NULL, TEXT, or BLOB. + ** (4) Datatype for numeric columns in non-STRICT tables must not + ** be a TEXT value that can be losslessly converted to numeric. + */ bStrict = (pTab->tabFlags & TF_Strict)!=0; for(j=0; jnCol; j++){ char *zErr; - Column *pCol = pTab->aCol + j; - int doError, jmp2; + Column *pCol = pTab->aCol + j; /* The column to be checked */ + int labelError; /* Jump here to report an error */ + int labelOk; /* Jump here if all looks ok */ + int p1, p3, p4; /* Operands to the OP_IsType opcode */ + int doTypeCheck; /* Check datatypes (besides NOT NULL) */ + if( j==pTab->iPKey ) continue; - if( pCol->notNull==0 && !bStrict ) continue; - doError = bStrict ? sqlite3VdbeMakeLabel(pParse) : 0; - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); - if( sqlite3VdbeGetLastOp(v)->opcode==OP_Column ){ - sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); + if( bStrict ){ + doTypeCheck = pCol->eCType>COLTYPE_ANY; + }else{ + doTypeCheck = pCol->affinity>SQLITE_AFF_BLOB; + } + if( pCol->notNull==0 && !doTypeCheck ) continue; + + /* Compute the operands that will be needed for OP_IsType */ + p4 = SQLITE_NULL; + if( pCol->colFlags & COLFLAG_VIRTUAL ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); + p1 = -1; + p3 = 3; + }else{ + if( pCol->iDflt ){ + sqlite3_value *pDfltValue = 0; + sqlite3ValueFromExpr(db, sqlite3ColumnExpr(pTab,pCol), ENC(db), + pCol->affinity, &pDfltValue); + if( pDfltValue ){ + p4 = sqlite3_value_type(pDfltValue); + sqlite3ValueFree(pDfltValue); + } + } + p1 = iDataCur; + if( !HasRowid(pTab) ){ + testcase( j!=sqlite3TableColumnToStorage(pTab, j) ); + p3 = sqlite3TableColumnToIndex(sqlite3PrimaryKeyIndex(pTab), j); + }else{ + p3 = sqlite3TableColumnToStorage(pTab,j); + testcase( p3!=j); + } } + + labelError = sqlite3VdbeMakeLabel(pParse); + labelOk = sqlite3VdbeMakeLabel(pParse); if( pCol->notNull ){ - jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); + /* (1) NOT NULL columns may not contain a NULL */ + int jmp2 = sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + sqlite3VdbeChangeP5(v, 0x0f); + VdbeCoverage(v); zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pCol->zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); - if( bStrict && pCol->eCType!=COLTYPE_ANY ){ - sqlite3VdbeGoto(v, doError); - }else{ - integrityCheckResultRow(v); - } - sqlite3VdbeJumpHere(v, jmp2); - } - if( bStrict && pCol->eCType!=COLTYPE_ANY ){ - jmp2 = sqlite3VdbeAddOp3(v, OP_IsNullOrType, 3, 0, - sqlite3StdTypeMap[pCol->eCType-1]); + if( doTypeCheck ){ + sqlite3VdbeGoto(v, labelError); + sqlite3VdbeJumpHere(v, jmp2); + }else{ + /* VDBE byte code will fall thru */ + } + } + if( bStrict && doTypeCheck ){ + /* (2) Datatype must be exact for non-ANY columns in STRICT tables*/ + static unsigned char aStdTypeMask[] = { + 0x1f, /* ANY */ + 0x18, /* BLOB */ + 0x11, /* INT */ + 0x11, /* INTEGER */ + 0x13, /* REAL */ + 0x14 /* TEXT */ + }; + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + assert( pCol->eCType>=1 && pCol->eCType<=sizeof(aStdTypeMask) ); + sqlite3VdbeChangeP5(v, aStdTypeMask[pCol->eCType-1]); VdbeCoverage(v); zErr = sqlite3MPrintf(db, "non-%s value in %s.%s", sqlite3StdType[pCol->eCType-1], pTab->zName, pTab->aCol[j].zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); - sqlite3VdbeResolveLabel(v, doError); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, jmp2); + }else if( !bStrict && pCol->affinity==SQLITE_AFF_TEXT ){ + /* (3) Datatype for TEXT columns in non-STRICT tables must be + ** NULL, TEXT, or BLOB. */ + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */ + VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "NUMERIC value in %s.%s", + pTab->zName, pTab->aCol[j].zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + }else if( !bStrict && pCol->affinity>=SQLITE_AFF_NUMERIC ){ + /* (4) Datatype for numeric columns in non-STRICT tables must not + ** be a TEXT value that can be converted to numeric. */ + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + sqlite3VdbeChangeP5(v, 0x1b); /* NULL, INT, FLOAT, or BLOB */ + VdbeCoverage(v); + if( p1>=0 ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); + } + sqlite3VdbeAddOp4(v, OP_Affinity, 3, 1, 0, "C", P4_STATIC); + sqlite3VdbeAddOp4Int(v, OP_IsType, -1, labelOk, 3, p4); + sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */ + VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "TEXT value in %s.%s", + pTab->zName, pTab->aCol[j].zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); } + sqlite3VdbeResolveLabel(v, labelError); + integrityCheckResultRow(v); + sqlite3VdbeResolveLabel(v, labelOk); } /* Verify CHECK constraints */ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ ExprList *pCheck = sqlite3ExprListDup(db, pTab->pCheck, 0); if( db->mallocFailed==0 ){ @@ -2062,14 +2157,14 @@ ** ** The user-version is not used internally by SQLite. It may be used by ** applications for any purpose. */ case PragTyp_HEADER_VALUE: { - int iCookie = pPragma->iArg; /* Which cookie to read or write */ + int iCookie; /* Which cookie to read or write */ + iCookie = pPragma->iArg & PRAGMA_HEADER_VALUE_MASK; sqlite3VdbeUsesBtree(v, iDb); - if( iCookie==BTREE_SCHEMA_VERSION ) sqlite3VdbeIsSchemaVersion(v); - if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){ + if( zRight && (pPragma->iArg & PRAGMA_HEADER_VALUE_READONLY)==0 ){ /* Write the specified cookie value */ static const VdbeOpList setCookie[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ { OP_SetCookie, 0, 0, 0}, /* 1 */ }; @@ -2080,10 +2175,15 @@ aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[1].p2 = iCookie; aOp[1].p3 = sqlite3Atoi(zRight); aOp[1].p5 = 1; + if( iCookie==BTREE_SCHEMA_VERSION && (db->flags & SQLITE_Defensive)!=0 ){ + /* Do not allow the use of PRAGMA schema_version=VALUE in defensive + ** mode. Change the OP_SetCookie opcode into a no-op. */ + aOp[1].opcode = OP_Noop; + } }else{ /* Read the specified cookie value */ static const VdbeOpList readCookie[] = { { OP_Transaction, 0, 0, 0}, /* 0 */ { OP_ReadCookie, 0, 1, 0}, /* 1 */ Index: src/pragma.h ================================================================== --- src/pragma.h +++ src/pragma.h @@ -54,15 +54,22 @@ /* Property flags associated with various pragma. */ #define PragFlg_NeedSchema 0x01 /* Force schema load before running */ #define PragFlg_NoColumns 0x02 /* OP_ResultRow called with zero columns */ #define PragFlg_NoColumns1 0x04 /* zero columns if RHS argument is present */ -#define PragFlg_ReadOnly 0x08 /* Read-only HEADER_VALUE */ +#define PragFlg_OneSchema 0x08 /* Only a single schema required */ #define PragFlg_Result0 0x10 /* Acts as query when no argument */ #define PragFlg_Result1 0x20 /* Acts as query when has one argument */ #define PragFlg_SchemaOpt 0x40 /* Schema restricts name search if present */ #define PragFlg_SchemaReq 0x80 /* Schema required - "main" is default */ + +/* For PragTyp_HEADER_VALUE pragmas the Pragma.iArg value is set +** to the index of the header field to access (always 10 or less). +** Ored with HEADER_VALUE_READONLY if the field is read only. */ +#define PRAGMA_HEADER_VALUE_READONLY 0x0100 +#define PRAGMA_HEADER_VALUE_MASK 0x00FF + /* Names of columns for pragmas that return multi-column result ** or that return single-column results where the name of the ** result column is different from the name of the pragma */ @@ -160,11 +167,11 @@ /* iArg: */ BTREE_APPLICATION_ID }, #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) {/* zName: */ "auto_vacuum", /* ePragTyp: */ PragTyp_AUTO_VACUUM, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_AUTOMATIC_INDEX) @@ -181,11 +188,11 @@ /* ColNames: */ 56, 1, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) {/* zName: */ "cache_size", /* ePragTyp: */ PragTyp_CACHE_SIZE, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) {/* zName: */ "cache_spill", @@ -242,13 +249,13 @@ /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) {/* zName: */ "data_version", /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ePragFlg: */ PragFlg_Result0, /* ColNames: */ 0, 0, - /* iArg: */ BTREE_DATA_VERSION }, + /* iArg: */ BTREE_DATA_VERSION|PRAGMA_HEADER_VALUE_READONLY }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) {/* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlg: */ PragFlg_Result0, @@ -256,11 +263,11 @@ /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) {/* zName: */ "default_cache_size", /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1|PragFlg_OneSchema, /* ColNames: */ 55, 1, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) @@ -293,11 +300,11 @@ /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) {/* zName: */ "foreign_key_list", /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt|PragFlg_OneSchema, /* ColNames: */ 0, 8, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) @@ -309,13 +316,13 @@ #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) {/* zName: */ "freelist_count", /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ePragFlg: */ PragFlg_Result0, /* ColNames: */ 0, 0, - /* iArg: */ BTREE_FREE_PAGE_COUNT }, + /* iArg: */ BTREE_FREE_PAGE_COUNT|PRAGMA_HEADER_VALUE_READONLY }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) {/* zName: */ "full_column_names", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, @@ -351,11 +358,11 @@ #endif #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) {/* zName: */ "incremental_vacuum", /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) {/* zName: */ "index_info", @@ -382,11 +389,11 @@ /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) {/* zName: */ "journal_mode", /* ePragTyp: */ PragTyp_JOURNAL_MODE, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, {/* zName: */ "journal_size_limit", /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, @@ -420,11 +427,11 @@ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, /* ColNames: */ 0, 0, /* iArg: */ 0 }, {/* zName: */ "max_page_count", /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, {/* zName: */ "mmap_size", /* ePragTyp: */ PragTyp_MMAP_SIZE, /* ePragFlg: */ 0, @@ -439,29 +446,20 @@ /* ePragFlg: */ PragFlg_Result0, /* ColNames: */ 9, 1, /* iArg: */ 0 }, #endif #endif -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if defined(SQLITE_ENABLE_NOOP_UPDATE) - {/* zName: */ "noop_update", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, - /* ColNames: */ 0, 0, - /* iArg: */ SQLITE_NoopUpdate }, -#endif #endif {/* zName: */ "optimize", /* ePragTyp: */ PragTyp_OPTIMIZE, /* ePragFlg: */ PragFlg_Result1|PragFlg_NeedSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) {/* zName: */ "page_count", /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, {/* zName: */ "page_size", /* ePragTyp: */ PragTyp_PAGE_SIZE, /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, @@ -556,18 +554,18 @@ #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) && defined(SQLITE_DEBUG) {/* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_OneSchema, /* ColNames: */ 33, 5, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) {/* zName: */ "synchronous", /* ePragTyp: */ PragTyp_SYNCHRONOUS, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1|PragFlg_OneSchema, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) {/* zName: */ "table_info", @@ -652,11 +650,11 @@ /* ePragFlg: */ 0, /* ColNames: */ 0, 0, /* iArg: */ 0 }, {/* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, - /* ePragFlg: */ PragFlg_NeedSchema, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_OneSchema, /* ColNames: */ 50, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) {/* zName: */ "writable_schema", Index: src/prepare.c ================================================================== --- src/prepare.c +++ src/prepare.c @@ -41,10 +41,15 @@ zExtra ); pData->rc = SQLITE_ERROR; }else if( db->flags & SQLITE_WriteSchema ){ pData->rc = SQLITE_CORRUPT_BKPT; + }else if( IsSharedSchema(db) + && 0==sqlite3StrNICmp(zExtra, "malformed database schema", 17) + ){ + pData->rc = SQLITE_CORRUPT_BKPT; + *pData->pzErrMsg = sqlite3DbStrDup(db, zExtra); }else{ char *z; const char *zObj = azObj[1] ? azObj[1] : "?"; z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj); if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); @@ -51,10 +56,32 @@ *pData->pzErrMsg = z; pData->rc = SQLITE_CORRUPT_BKPT; } } +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +/* +** Update the Schema.cksum checksum to account for the database object +** specified by the three arguments following the first. +*/ +static void schemaUpdateChecksum( + InitData *pData, /* Schema parse context */ + const char *zName, /* Name of new database object */ + const char *zRoot, /* Root page of new database object */ + const char *zSql /* SQL used to create new database object */ +){ + int i; + u64 cksum = pData->cksum; + if( zName ){ + for(i=0; zName[i]; i++) cksum += (cksum<<3) + zName[i]; + } + if( zRoot ) for(i=0; zRoot[i]; i++) cksum += (cksum<<3) + zRoot[i]; + if( zSql ) for(i=0; zSql[i]; i++) cksum += (cksum<<3) + zSql[i]; + pData->cksum = cksum; +} +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + /* ** Check to see if any sibling index (another index on the same table) ** of pIndex has the same root page number, and if it does, return true. ** This would indicate a corrupt schema. */ @@ -66,10 +93,19 @@ return 0; } /* forward declaration */ static int sqlite3Prepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */ + Vdbe *pReprepare, /* VM being reprepared */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +); +static int sqlite3LockAndPrepare( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ u32 prepFlags, /* Zero or more SQLITE_PREPARE_* flags */ Vdbe *pReprepare, /* VM being reprepared */ @@ -139,11 +175,15 @@ } } db->init.orphanTrigger = 0; db->init.azInit = (const char**)argv; pStmt = 0; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + TESTONLY(rcp = ) sqlite3LockAndPrepare(db, argv[4], -1, 0, 0, &pStmt, 0); +#else TESTONLY(rcp = ) sqlite3Prepare(db, argv[4], -1, 0, 0, &pStmt, 0); +#endif rc = db->errCode; assert( (rc&0xFF)==(rcp&0xFF) ); db->init.iDb = saved_iDb; /* assert( saved_iDb==0 || (db->mDbFlags & DBFLAG_Vacuum)!=0 ); */ if( SQLITE_OK!=rc ){ @@ -151,11 +191,16 @@ assert( iDb==1 ); }else{ if( rc > pData->rc ) pData->rc = rc; if( rc==SQLITE_NOMEM ){ sqlite3OomFault(db); - }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ + }else if( rc!=SQLITE_INTERRUPT +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + && (rc&0xFF)!=SQLITE_LOCKED + && (rc&0xFF)!=SQLITE_IOERR +#endif + ){ corruptSchema(pData, argv, sqlite3_errmsg(db)); } } } db->init.azInit = sqlite3StdType; /* Any array of string ptrs will do */ @@ -182,10 +227,16 @@ if( sqlite3Config.bExtraSchemaChecks ){ corruptSchema(pData, argv, "invalid rootpage"); } } } + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && iDb!=1 ){ + schemaUpdateChecksum(pData, argv[0], argv[1], argv[2]); + } +#endif return 0; } /* ** Attempt to read the database schema and initialize internal @@ -209,13 +260,31 @@ int openedTransaction = 0; int mask = ((db->mDbFlags & DBFLAG_EncodingFixed) | ~DBFLAG_EncodingFixed); assert( (db->mDbFlags & DBFLAG_SchemaKnownOk)==0 ); assert( iDb>=0 && iDbnDb ); - assert( db->aDb[iDb].pSchema ); + assert( db->aDb[iDb].pSchema || (IsSharedSchema(db) && iDb!=1) ); assert( sqlite3_mutex_held(db->mutex) ); assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + + pDb = &db->aDb[iDb]; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + assert( pDb->pSPool==0 || IsSharedSchema(db) ); + if( pDb->pSPool ){ + /* See if there is a free schema object in the schema-pool. If not, + ** disconnect from said schema pool and continue. This function will + ** connect to a (possibly different) schema-pool before returning. */ + Schema *pNew = sqlite3SchemaExtract(pDb->pSPool); + if( pNew ){ + pDb->pSchema = pNew; + return SQLITE_OK; + } + rc = sqlite3SchemaDisconnect(db, iDb, 1); + if( rc!=SQLITE_OK ) goto error_out; + assert( pDb->pSchema && pDb->pSPool==0 ); + } +#endif db->init.busy = 1; /* Construct the in-memory representation schema tables (sqlite_schema or ** sqlite_temp_schema) by invoking the parser directly. The appropriate @@ -233,10 +302,11 @@ initData.iDb = iDb; initData.rc = SQLITE_OK; initData.pzErrMsg = pzErrMsg; initData.mInitFlags = mFlags; initData.nInitRow = 0; + initData.cksum = 0; initData.mxPage = 0; sqlite3InitCallback(&initData, 5, (char **)azArg, 0); db->mDbFlags &= mask; if( initData.rc ){ rc = initData.rc; @@ -243,11 +313,10 @@ goto error_out; } /* Create a cursor to hold the database open */ - pDb = &db->aDb[iDb]; if( pDb->pBt==0 ){ assert( iDb==1 ); DbSetProperty(db, 1, DB_SchemaLoaded); rc = SQLITE_OK; goto error_out; @@ -401,10 +470,14 @@ ** table even when its contents have been corrupted. */ DbSetProperty(db, iDb, DB_SchemaLoaded); rc = SQLITE_OK; } + + if( rc==SQLITE_OK && iDb!=1 && IsSharedSchema(db) ){ + rc = sqlite3SchemaConnect(db, iDb, initData.cksum); + } /* Jump here for an error that occurs after successfully allocating ** curMain and calling sqlite3BtreeEnter(). For an error that occurs ** before that point, jump to error_out. */ @@ -423,10 +496,39 @@ } db->init.busy = 0; return rc; } + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +/* +** If this is a SHARED_SCHEMA connection and the DBFLAG_SchemaInUse flag +** is not currently set, set it and return non-zero. Otherwise, return 0. +*/ +int sqlite3LockReusableSchema(sqlite3 *db){ + if( IsSharedSchema(db) && (db->mDbFlags & DBFLAG_SchemaInuse)==0 ){ + db->mDbFlags |= DBFLAG_SchemaInuse; + return 1; + } + return 0; +} +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +/* +** This function is a no-op for non-SHARED_SCHEMA connections, or if bRelease +** is zero. Otherwise, clear the DBFLAG_SchemaInuse flag and release all +** schema references currently held. +*/ +void sqlite3UnlockReusableSchema(sqlite3 *db, int bRelease){ + if( bRelease ){ + db->mDbFlags &= ~DBFLAG_SchemaInuse; + sqlite3SchemaReleaseAll(db); + } +} +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + /* ** Initialize all database files - the main database file, the file ** used to store temporary tables, and any additional database files ** created using ATTACH statements. Return a success code. If an ** error occurs, write an error message into *pzErrMsg. @@ -433,35 +535,38 @@ ** ** After a database is initialized, the DB_SchemaLoaded bit is set ** bit is set in the flags field of the Db structure. */ int sqlite3Init(sqlite3 *db, char **pzErrMsg){ - int i, rc; + int rc = SQLITE_OK; + int bReleaseSchema; + int i; int commit_internal = !(db->mDbFlags&DBFLAG_SchemaChange); + + bReleaseSchema = sqlite3LockReusableSchema(db); assert( sqlite3_mutex_held(db->mutex) ); assert( sqlite3BtreeHoldsMutex(db->aDb[0].pBt) ); assert( db->init.busy==0 ); ENC(db) = SCHEMA_ENC(db); assert( db->nDb>0 ); /* Do the main schema first */ if( !DbHasProperty(db, 0, DB_SchemaLoaded) ){ rc = sqlite3InitOne(db, 0, pzErrMsg, 0); - if( rc ) return rc; } /* All other schemas after the main schema. The "temp" schema must be last */ - for(i=db->nDb-1; i>0; i--){ + for(i=db->nDb-1; rc==SQLITE_OK && i>0; i--){ assert( i==1 || sqlite3BtreeHoldsMutex(db->aDb[i].pBt) ); if( !DbHasProperty(db, i, DB_SchemaLoaded) ){ rc = sqlite3InitOne(db, i, pzErrMsg, 0); - if( rc ) return rc; } } - if( commit_internal ){ + if( rc==SQLITE_OK && commit_internal ){ sqlite3CommitInternalChanges(db); } - return SQLITE_OK; + sqlite3UnlockReusableSchema(db, bReleaseSchema); + return rc; } /* ** This routine is a no-op if the database schema is already initialized. ** Otherwise, the schema is loaded. An error code is returned. @@ -469,15 +574,16 @@ int sqlite3ReadSchema(Parse *pParse){ int rc = SQLITE_OK; sqlite3 *db = pParse->db; assert( sqlite3_mutex_held(db->mutex) ); if( !db->init.busy ){ + db->mDbFlags |= DBFLAG_FreeSchema; /* For sharable-schema mode */ rc = sqlite3Init(db, &pParse->zErrMsg); if( rc!=SQLITE_OK ){ pParse->rc = rc; pParse->nErr++; - }else if( db->noSharedCache ){ + }else if( db->noSharedCache && !IsSharedSchema(db) ){ db->mDbFlags |= DBFLAG_SchemaKnownOk; } } return rc; } @@ -498,10 +604,14 @@ assert( sqlite3_mutex_held(db->mutex) ); for(iDb=0; iDbnDb; iDb++){ int openedTransaction = 0; /* True if a transaction is opened */ Btree *pBt = db->aDb[iDb].pBt; /* Btree database to read cookie from */ if( pBt==0 ) continue; + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && iDb!=1 && db->aDb[iDb].pSPool==0 ) continue; +#endif /* If there is not already a read-only (or read-write) transaction opened ** on the b-tree database, open one now. If a transaction is opened, it ** will be closed immediately after reading the meta-value. */ if( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_NONE ){ @@ -703,11 +813,11 @@ */ if( prepFlags & SQLITE_PREPARE_PERSISTENT ){ sParse.disableLookaside++; DisableLookaside; } - sParse.disableVtab = (prepFlags & SQLITE_PREPARE_NO_VTAB)!=0; + sParse.prepFlags = prepFlags & 0xff; /* Check to verify that it is possible to get a read lock on all ** database schemas. The inability to get a read lock indicates that ** some other database connection is holding a write-lock, which in ** turn means that the other connection has made uncommitted changes @@ -744,11 +854,13 @@ } } } } - sqlite3VtabUnlockList(db); +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( db->pDisconnect ) sqlite3VtabUnlockList(db); +#endif if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; testcase( nBytes==mxLen ); @@ -826,19 +938,21 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ int rc; int cnt = 0; + int bReleaseSchema = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( ppStmt==0 ) return SQLITE_MISUSE_BKPT; #endif *ppStmt = 0; if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex); + bReleaseSchema = sqlite3LockReusableSchema(db); sqlite3BtreeEnterAll(db); do{ /* Make multiple attempts to compile the SQL, until it either succeeds ** or encounters a permanent error. A schema problem after one schema ** reset is considered a permanent error. */ @@ -845,11 +959,15 @@ rc = sqlite3Prepare(db, zSql, nBytes, prepFlags, pOld, ppStmt, pzTail); assert( rc==SQLITE_OK || *ppStmt==0 ); if( rc==SQLITE_OK || db->mallocFailed ) break; }while( (rc==SQLITE_ERROR_RETRY && (cnt++)errMask)==rc ); db->busyHandler.nBusy = 0; sqlite3_mutex_leave(db->mutex); return rc; Index: src/random.c ================================================================== --- src/random.c +++ src/random.c @@ -29,14 +29,14 @@ /* The RFC-7539 ChaCha20 block function */ #define ROTL(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) -#define QR(a, b, c, d) ( \ - a += b, d ^= a, d = ROTL(d,16), \ - c += d, b ^= c, b = ROTL(b,12), \ - a += b, d ^= a, d = ROTL(d, 8), \ +#define QR(a, b, c, d) ( \ + a += b, d ^= a, d = ROTL(d,16), \ + c += d, b ^= c, b = ROTL(b,12), \ + a += b, d ^= a, d = ROTL(d, 8), \ c += d, b ^= c, b = ROTL(b, 7)) static void chacha_block(u32 *out, const u32 *in){ int i; u32 x[16]; memcpy(x, in, 64); @@ -127,32 +127,10 @@ wsdPrng.n = 64; } sqlite3_mutex_leave(mutex); } -/* -** Initialize a fast PRNG. A Fast PRNG is called "fast" because it does -** not need a mutex to operate, though it does use a mutex to initialize. -** The quality of the randomness is not as good as the global PRNG. -*/ -void sqlite3FastPrngInit(FastPrng *pPrng){ - sqlite3_randomness(sizeof(*pPrng), pPrng); - pPrng->x |= 1; -} - -/* -** Generate N bytes of pseudo-randomness using a FastPrng -*/ -void sqlite3FastRandomness(FastPrng *pPrng, int N, void *P){ - unsigned char *pOut = (unsigned char*)P; - while( N-->0 ){ - pPrng->x = ((pPrng->x)>>1) ^ ((1+~((pPrng->x)&1)) & 0xd0000001); - pPrng->y = (pPrng->y)*1103515245 + 12345; - *(pOut++) = (pPrng->x ^ pPrng->y) & 0xff; - } -} - #ifndef SQLITE_UNTESTABLE /* ** For testing purposes, we sometimes want to preserve the state of ** PRNG and restore the PRNG to its saved state at a later time, or ** to reset the PRNG to its initial state. These routines accomplish Index: src/select.c ================================================================== --- src/select.c +++ src/select.c @@ -1285,10 +1285,13 @@ testcase( eDest==SRT_Table ); testcase( eDest==SRT_EphemTab ); testcase( eDest==SRT_Fifo ); testcase( eDest==SRT_DistFifo ); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); + if( pDest->zAffSdst ){ + sqlite3VdbeChangeP4(v, -1, pDest->zAffSdst, nResultCol); + } #ifndef SQLITE_OMIT_CTE if( eDest==SRT_DistFifo ){ /* If the destination is DistFifo, then cursor (iParm+1) is open ** on an ephemeral index. If the current row is already present ** in the index, do not write it to the output. If not, add the @@ -2236,11 +2239,11 @@ if( nName>0 ){ for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){} if( zName[j]==':' ) nName = j; } zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt); - if( cnt>3 ) sqlite3FastRandomness(&db->sPrng, sizeof(cnt), &cnt); + if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt); } pCol->zCnName = zName; pCol->hName = sqlite3StrIHash(zName); if( pX->fg.bNoExpand ){ pCol->colFlags |= COLFLAG_NOEXPAND; @@ -3688,12 +3691,12 @@ /* Jump to the this point in order to terminate the query. */ sqlite3VdbeResolveLabel(v, labelEnd); - /* Reassemble the compound query so that it will be freed correctly - ** by the calling function */ + /* Make arrangements to free the 2nd and subsequent arms of the compound + ** after the parse has finished */ if( pSplit->pPrior ){ sqlite3ParserAddCleanup(pParse, (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior); } pSplit->pPrior = pPrior; @@ -3748,10 +3751,11 @@ Parse *pParse; /* The parsing context */ int iTable; /* Replace references to this table */ int iNewTable; /* New table number */ int isOuterJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */ ExprList *pEList; /* Replacement expressions */ + ExprList *pCList; /* Collation sequences for replacement expr */ } SubstContext; /* Forward Declarations */ static void substExprList(SubstContext*, ExprList*); static void substSelect(SubstContext*, Select*, int); @@ -3789,13 +3793,14 @@ pExpr->op = TK_NULL; }else #endif { Expr *pNew; - Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr; + int iColumn = pExpr->iColumn; + Expr *pCopy = pSubst->pEList->a[iColumn].pExpr; Expr ifNullRow; - assert( pSubst->pEList!=0 && pExpr->iColumnpEList->nExpr ); + assert( pSubst->pEList!=0 && iColumnpEList->nExpr ); assert( pExpr->pRight==0 ); if( sqlite3ExprIsVector(pCopy) ){ sqlite3VectorErrorMsg(pSubst->pParse, pCopy); }else{ sqlite3 *db = pSubst->pParse->db; @@ -3829,15 +3834,20 @@ ExprSetProperty(pExpr, EP_IntValue); } /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ - if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){ - CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr); - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, - (pColl ? pColl->zName : "BINARY") + { + CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, + pSubst->pCList->a[iColumn].pExpr ); + if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){ + pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + (pColl ? pColl->zName : "BINARY") + ); + } } ExprClearProperty(pExpr, EP_Collate); } } }else{ @@ -4025,10 +4035,22 @@ w.xExprCallback = renumberCursorsCb; w.xSelectCallback = sqlite3SelectWalkNoop; sqlite3WalkSelect(&w, p); } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + +/* +** If pSel is not part of a compound SELECT, return a pointer to its +** expression list. Otherwise, return a pointer to the expression list +** of the leftmost SELECT in the compound. +*/ +static ExprList *findLeftmostExprlist(Select *pSel){ + while( pSel->pPrior ){ + pSel = pSel->pPrior; + } + return pSel->pEList; +} #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization. ** This routine returns 1 if it makes changes and 0 if no flattening occurs. @@ -4128,10 +4150,12 @@ ** (17e) the subquery may not contain window functions, and ** (17f) the subquery must not be the RHS of a LEFT JOIN. ** (17g) either the subquery is the first element of the outer ** query or there are no RIGHT or FULL JOINs in any arm ** of the subquery. (This is a duplicate of condition (27b).) +** (17h) The corresponding result set expressions in all arms of the +** compound must have the same affinity. ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, ** LIMIT and OFFSET clauses. The subquery cannot use any compound ** operator other than UNION ALL because all the other compound @@ -4304,10 +4328,11 @@ ** use only the UNION ALL operator. And none of the simple select queries ** that make up the compound SELECT are allowed to be aggregate or distinct ** queries. */ if( pSub->pPrior ){ + int ii; if( pSub->pOrderBy ){ return 0; /* Restriction (20) */ } if( isAgg || (p->selFlags & SF_Distinct)!=0 || isOuterJoin>0 ){ return 0; /* (17d1), (17d2), or (17f) */ @@ -4336,18 +4361,32 @@ testcase( pSub1->pSrc->nSrc>1 ); } /* Restriction (18). */ if( p->pOrderBy ){ - int ii; for(ii=0; iipOrderBy->nExpr; ii++){ if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; } } /* Restriction (23) */ if( (p->selFlags & SF_Recursive) ) return 0; + + /* Restriction (17h) */ + for(ii=0; iipEList->nExpr; ii++){ + char aff; + assert( pSub->pEList->a[ii].pExpr!=0 ); + aff = sqlite3ExprAffinity(pSub->pEList->a[ii].pExpr); + for(pSub1=pSub->pPrior; pSub1; pSub1=pSub1->pPrior){ + assert( pSub1->pEList!=0 ); + assert( pSub1->pEList->nExpr>ii ); + assert( pSub1->pEList->a[ii].pExpr!=0 ); + if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){ + return 0; + } + } + } if( pSrc->nSrc>1 ){ if( pParse->nSelect>500 ) return 0; if( OptimizationDisabled(db, SQLITE_FlttnUnionAll) ) return 0; aCsrMap = sqlite3DbMallocZero(db, ((i64)pParse->nTab+1)*sizeof(int)); @@ -4578,10 +4617,11 @@ x.pParse = pParse; x.iTable = iParent; x.iNewTable = iNewParent; x.isOuterJoin = isOuterJoin; x.pEList = pSub->pEList; + x.pCList = findLeftmostExprlist(pSub); substSelect(&x, pParent, 0); } /* The flattened query is a compound if either the inner or the ** outer query is a compound. */ @@ -4597,11 +4637,11 @@ if( pSub->pLimit ){ pParent->pLimit = pSub->pLimit; pSub->pLimit = 0; } - /* Recompute the SrcList_item.colUsed masks for the flattened + /* Recompute the SrcItem.colUsed masks for the flattened ** tables. */ for(i=0; ia[i+iFrom]); } } @@ -4986,10 +5026,17 @@ ** window over which any window-function is calculated. ** ** (7) The inner query is a Common Table Expression (CTE) that should ** be materialized. (This restriction is implemented in the calling ** routine.) +** +** (8) The subquery may not be a compound that uses UNION, INTERSECT, +** or EXCEPT. (We could, perhaps, relax this restriction to allow +** this case if none of the comparisons operators between left and +** right arms of the compound use a collation other than BINARY. +** But it is a lot of work to check that case for an obscure and +** minor optimization, so we omit it for now.) ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. */ static int pushDownWhereTerms( @@ -5006,10 +5053,14 @@ #ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pPrior ){ Select *pSel; for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + u8 op = pSel->op; + assert( op==TK_ALL || op==TK_SELECT + || op==TK_UNION || op==TK_INTERSECT || op==TK_EXCEPT ); + if( op!=TK_ALL && op!=TK_SELECT ) return 0; /* restriction (8) */ if( pSel->pWin ) return 0; /* restriction (6b) */ } }else{ if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0; } @@ -5060,10 +5111,11 @@ x.pParse = pParse; x.iTable = pSrc->iCursor; x.iNewTable = pSrc->iCursor; x.isOuterJoin = 0; x.pEList = pSubq->pEList; + x.pCList = findLeftmostExprlist(pSubq); pNew = substExpr(&x, pNew); #ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pWin && 0==pushDownWindowCheck(pParse, pSubq, pNew) ){ /* Restriction 6c has prevented push-down in this case */ sqlite3ExprDelete(pParse->db, pNew); @@ -5584,13 +5636,13 @@ } } #endif /* -** The SrcList_item structure passed as the second argument represents a +** The SrcItem structure passed as the second argument represents a ** sub-query in the FROM clause of a SELECT statement. This function -** allocates and populates the SrcList_item.pTab object. If successful, +** allocates and populates the SrcItem.pTab object. If successful, ** SQLITE_OK is returned. Otherwise, if an OOM error is encountered, ** SQLITE_NOMEM. */ int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ Select *pSel = pFrom->pSelect; @@ -6419,11 +6471,11 @@ #endif } /* ** Check to see if the pThis entry of pTabList is a self-join of a prior view. -** If it is, then return the SrcList_item for the prior view. If it is not, +** If it is, then return the SrcItem for the prior view. If it is not, ** then return 0. */ static SrcItem *isSelfJoinView( SrcList *pTabList, /* Search for self-joins in this FROM clause */ SrcItem *pThis /* Search for prior reference to this subquery */ @@ -7037,11 +7089,14 @@ }else{ VdbeNoopComment((v, "materialize %!S", pItem)); } sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem)); + dest.zAffSdst = sqlite3TableAffinityStr(db, pItem->pTab); sqlite3Select(pParse, pSub, &dest); + sqlite3DbFree(db, dest.zAffSdst); + dest.zAffSdst = 0; pItem->pTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); sqlite3VdbeJumpHere(v, topAddr); @@ -7463,11 +7518,11 @@ ** in the right order to begin with. */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct, - 0, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY) + p, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY) | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0 ); if( pWInfo==0 ){ sqlite3ExprListDelete(db, pDistinct); goto select_end; @@ -7762,11 +7817,11 @@ assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 ); assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 ); SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, - pDistinct, 0, minMaxFlag|distFlag, 0); + pDistinct, p, minMaxFlag|distFlag, 0); if( pWInfo==0 ){ goto select_end; } SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); eDist = sqlite3WhereIsDistinct(pWInfo); Index: src/shell.c.in ================================================================== --- src/shell.c.in +++ src/shell.c.in @@ -14,10 +14,12 @@ */ #if (defined(_WIN32) || defined(WIN32)) && !defined(_CRT_SECURE_NO_WARNINGS) /* This needs to come before any includes for MSVC compiler */ #define _CRT_SECURE_NO_WARNINGS #endif +typedef unsigned int u32; +typedef unsigned short int u16; /* ** Optionally #include a user-defined header, whereby compilation options ** may be set prior to where they take effect, but after platform setup. ** If SQLITE_CUSTOM_INCLUDE=? is defined, its value names the #include @@ -82,10 +84,18 @@ # ifndef _FILE_OFFSET_BITS # define _FILE_OFFSET_BITS 64 # endif # define _LARGEFILE_SOURCE 1 #endif + +#if defined(SQLITE_SHELL_FIDDLE) && !defined(_POSIX_SOURCE) +/* +** emcc requires _POSIX_SOURCE (or one of several similar defines) +** to expose strdup(). +*/ +# define _POSIX_SOURCE +#endif #include #include #include #include @@ -238,10 +248,22 @@ # define setTextMode(X,Y) #endif /* True if the timer is enabled */ static int enableTimer = 0; + +/* A version of strcmp() that works with NULL values */ +static int cli_strcmp(const char *a, const char *b){ + if( a==0 ) a = ""; + if( b==0 ) b = ""; + return strcmp(a,b); +} +static int cli_strncmp(const char *a, const char *b, size_t n){ + if( a==0 ) a = ""; + if( b==0 ) b = ""; + return strncmp(a,b,n); +} /* Return the current wall-clock time */ static sqlite3_int64 timeOfDay(void){ static sqlite3_vfs *clockVfs = 0; sqlite3_int64 t; @@ -527,10 +549,11 @@ */ static void utf8_width_print(FILE *pOut, int w, const char *zUtf){ int i; int n; int aw = w<0 ? -w : w; + if( zUtf==0 ) zUtf = ""; for(i=n=0; zUtf[i]; i++){ if( (zUtf[i]&0xc0)!=0x80 ){ n++; if( n==aw ){ do{ i++; }while( (zUtf[i]&0xc0)==0x80 ); @@ -670,11 +693,11 @@ /* For interactive input on Windows systems, translate the ** multi-byte characterset characters into UTF-8. */ if( stdin_is_interactive && in==stdin ){ char *zTrans = sqlite3_win32_mbcs_to_utf8_v2(zLine, 0); if( zTrans ){ - int nTrans = strlen30(zTrans)+1; + i64 nTrans = strlen(zTrans)+1; if( nTrans>nLine ){ zLine = realloc(zLine, nTrans); shell_check_oom(zLine); } memcpy(zLine, zTrans, nTrans); @@ -806,13 +829,13 @@ ** ** If the third argument, quote, is not '\0', then it is used as a ** quote character for zAppend. */ static void appendText(ShellText *p, const char *zAppend, char quote){ - int len; - int i; - int nAppend = strlen30(zAppend); + i64 len; + i64 i; + i64 nAppend = strlen30(zAppend); len = nAppend+p->n+1; if( quote ){ len += 2; for(i=0; iautoEQPtest ){ utf8_printf(p->out, "%d,%d,%s\n", iEqpId, p2, zText); } pNew = sqlite3_malloc64( sizeof(*pNew) + nText ); shell_check_oom(pNew); @@ -2024,18 +2058,18 @@ /* Render a single level of the graph that has iEqpId as its parent. Called ** recursively to render sublevels. */ static void eqp_render_level(ShellState *p, int iEqpId){ EQPGraphRow *pRow, *pNext; - int n = strlen30(p->sGraph.zPrefix); + i64 n = strlen(p->sGraph.zPrefix); char *z; for(pRow = eqp_next_row(p, iEqpId, 0); pRow; pRow = pNext){ pNext = eqp_next_row(p, iEqpId, pRow); z = pRow->zText; utf8_printf(p->out, "%s%s%s\n", p->sGraph.zPrefix, pNext ? "|--" : "`--", z); - if( n<(int)sizeof(p->sGraph.zPrefix)-7 ){ + if( n<(i64)sizeof(p->sGraph.zPrefix)-7 ){ memcpy(&p->sGraph.zPrefix[n], pNext ? "| " : " ", 4); eqp_render_level(p, pRow->iEqpId); p->sGraph.zPrefix[n] = 0; } } @@ -2631,10 +2665,11 @@ if( len>78 ){ len = 78; while( (zSql[len]&0xc0)==0x80 ) len--; } zCode = sqlite3_mprintf("%.*s", len, zSql); + shell_check_oom(zCode); for(i=0; zCode[i]; i++){ if( IsSpace(zSql[i]) ) zCode[i] = ' '; } if( iOffset<25 ){ zMsg = sqlite3_mprintf("\n %z\n %*s^--- error here", zCode, iOffset, ""); }else{ zMsg = sqlite3_mprintf("\n %z\n %*serror here ---^", zCode, iOffset-14, ""); @@ -2747,11 +2782,11 @@ { "cancelled_write_bytes: ", "Cancelled write bytes:" }, }; int i; for(i=0; icMode = p->mode; sqlite3_reset(pSql); return; } } @@ -3785,14 +3820,14 @@ for(i=1; rc==SQLITE_OK && i=2 && 0==strncmp(z, "-verbose", n) ){ + if( n>=2 && 0==cli_strncmp(z, "-verbose", n) ){ pState->expert.bVerbose = 1; } - else if( n>=2 && 0==strncmp(z, "-sample", n) ){ + else if( n>=2 && 0==cli_strncmp(z, "-sample", n) ){ if( i==(nArg-1) ){ raw_printf(stderr, "option requires an argument: %s\n", z); rc = SQLITE_ERROR; }else{ iSample = (int)integerValue(azArg[++i]); @@ -4136,22 +4171,24 @@ UNUSED_PARAMETER(azNotUsed); if( nArg!=3 || azArg==0 ) return 0; zTable = azArg[0]; zType = azArg[1]; zSql = azArg[2]; + if( zTable==0 ) return 0; + if( zType==0 ) return 0; dataOnly = (p->shellFlgs & SHFLG_DumpDataOnly)!=0; noSys = (p->shellFlgs & SHFLG_DumpNoSys)!=0; - if( strcmp(zTable, "sqlite_sequence")==0 && !noSys ){ + if( cli_strcmp(zTable, "sqlite_sequence")==0 && !noSys ){ if( !dataOnly ) raw_printf(p->out, "DELETE FROM sqlite_sequence;\n"); }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 && !noSys ){ if( !dataOnly ) raw_printf(p->out, "ANALYZE sqlite_schema;\n"); - }else if( strncmp(zTable, "sqlite_", 7)==0 ){ + }else if( cli_strncmp(zTable, "sqlite_", 7)==0 ){ return 0; }else if( dataOnly ){ /* no-op */ - }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){ + }else if( cli_strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){ char *zIns; if( !p->writableSchema ){ raw_printf(p->out, "PRAGMA writable_schema=ON;\n"); p->writableSchema = 1; } @@ -4165,11 +4202,11 @@ return 0; }else{ printSchemaLine(p->out, zSql, ";\n"); } - if( strcmp(zType, "table")==0 ){ + if( cli_strcmp(zType, "table")==0 ){ ShellText sSelect; ShellText sTable; char **azCol; int i; char *savedDestTable; @@ -4328,11 +4365,11 @@ ".clone NEWDB Clone data into NEWDB from the existing database", #endif ".connection [close] [#] Open or close an auxiliary database connection", ".databases List names and files of attached databases", ".dbconfig ?op? ?val? List or change sqlite3_db_config() options", -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) +#if SQLITE_SHELL_HAVE_RECOVER ".dbinfo ?DB? Show status information about the database", #endif ".dump ?OBJECTS? Render database content as SQL", " Options:", " --data-only Output only INSERT statements", @@ -4409,11 +4446,11 @@ " insert SQL insert statements for TABLE", " json Results in a JSON array", " line One value per line", " list Values delimited by \"|\"", " markdown Markdown table format", - " qbox Shorthand for \"box --width 60 --quote\"", + " qbox Shorthand for \"box --wrap 60 --quote\"", " quote Escape answers as for SQL", " table ASCII-art table", " tabs Tab-separated values", " tcl TCL list elements", " OPTIONS: (for columnar modes or insert mode):", @@ -4476,14 +4513,13 @@ #ifndef SQLITE_SHELL_FIDDLE ".quit Exit this program", ".read FILE Read input from FILE or command output", " If FILE begins with \"|\", it is a command that generates the input.", #endif -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) +#if SQLITE_SHELL_HAVE_RECOVER ".recover Recover as much data as possible from corrupt db.", - " --freelist-corrupt Assume the freelist is corrupt", - " --recovery-db NAME Store recovery metadata in database file NAME", + " --ignore-freelist Ignore pages that appear to be on db freelist", " --lost-and-found TABLE Alternative name for the lost-and-found table", " --no-rowids Do not attempt to recover rowid values", " that are not also INTEGER PRIMARY KEYs", #endif #ifndef SQLITE_SHELL_FIDDLE @@ -4521,10 +4557,16 @@ " --sha3-224 Use the sha3-224 algorithm", " --sha3-256 Use the sha3-256 algorithm (default)", " --sha3-384 Use the sha3-384 algorithm", " --sha3-512 Use the sha3-512 algorithm", " Any other argument is a LIKE pattern for tables to hash", +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) + ".shared-schema CMD DB1 DB2 ...", + " Commands:", + " check Determine if DB1, DB2, etc have identical schemas", + " fix Attempt to make DB1, DB2, etc compatible", +#endif #if !defined(SQLITE_NOHAVE_SYSTEM) && !defined(SQLITE_SHELL_FIDDLE) ".shell CMD ARGS... Run CMD ARGS... in a system shell", #endif ".show Show the current values for various settings", ".stats ?ARG? Show stats or turn stats on or off", @@ -4584,13 +4626,13 @@ int j = 0; int n = 0; char *zPat; if( zPattern==0 || zPattern[0]=='0' - || strcmp(zPattern,"-a")==0 - || strcmp(zPattern,"-all")==0 - || strcmp(zPattern,"--all")==0 + || cli_strcmp(zPattern,"-a")==0 + || cli_strcmp(zPattern,"-all")==0 + || cli_strcmp(zPattern,"--all")==0 ){ /* Show all commands, but only one line per command */ if( zPattern==0 ) zPattern = ""; for(i=0; iin ){ fclose(in); }else{ while( fgets(zLine, sizeof(zLine), p->in)!=0 ){ nLine++; - if(strncmp(zLine, "| end ", 6)==0 ) break; + if(cli_strncmp(zLine, "| end ", 6)==0 ) break; } p->lineno = nLine; } sqlite3_free(a); utf8_printf(stderr,"Error on line %d of --hexdb input\n", nLine); @@ -4943,32 +4985,32 @@ sqlite3_value **argv ){ const char *zText = (const char*)sqlite3_value_text(argv[0]); UNUSED_PARAMETER(argc); if( zText && zText[0]=='\'' ){ - int nText = sqlite3_value_bytes(argv[0]); - int i; + i64 nText = sqlite3_value_bytes(argv[0]); + i64 i; char zBuf1[20]; char zBuf2[20]; const char *zNL = 0; const char *zCR = 0; - int nCR = 0; - int nNL = 0; + i64 nCR = 0; + i64 nNL = 0; for(i=0; zText[i]; i++){ if( zNL==0 && zText[i]=='\n' ){ zNL = unused_string(zText, "\\n", "\\012", zBuf1); - nNL = (int)strlen(zNL); + nNL = strlen(zNL); } if( zCR==0 && zText[i]=='\r' ){ zCR = unused_string(zText, "\\r", "\\015", zBuf2); - nCR = (int)strlen(zCR); + nCR = strlen(zCR); } } if( zNL || zCR ){ - int iOut = 0; + i64 iOut = 0; i64 nMax = (nNL > nCR) ? nNL : nCR; i64 nAlloc = nMax * nText + (nMax+64)*2; char *zOut = (char*)sqlite3_malloc64(nAlloc); if( zOut==0 ){ sqlite3_result_error_nomem(context); @@ -5060,10 +5102,15 @@ } case SHELL_OPEN_READONLY: { sqlite3_open_v2(zDbFilename, &p->db, SQLITE_OPEN_READONLY|p->openFlags, 0); break; + } + case SHELL_OPEN_SHAREDSCHEMA: { + sqlite3_open_v2(p->pAuxDb->zDbFilename, &p->db, + SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_SHARED_SCHEMA,0); + break; } case SHELL_OPEN_UNSPEC: case SHELL_OPEN_NORMAL: { sqlite3_open_v2(zDbFilename, &p->db, SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|p->openFlags, 0); @@ -5091,11 +5138,11 @@ sqlite3_series_init(p->db, 0, 0); #ifndef SQLITE_SHELL_FIDDLE sqlite3_fileio_init(p->db, 0, 0); sqlite3_completion_init(p->db, 0, 0); #endif -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) +#if SQLITE_SHELL_HAVE_RECOVER sqlite3_dbdata_init(p->db, 0, 0); #endif #ifdef SQLITE_HAVE_ZLIB if( !p->bSafeModePersist ){ sqlite3_zipfile_init(p->db, 0, 0); @@ -5205,12 +5252,12 @@ #elif HAVE_LINENOISE /* ** Linenoise completion callback */ static void linenoise_completion(const char *zLine, linenoiseCompletions *lc){ - int nLine = strlen30(zLine); - int i, iStart; + i64 nLine = strlen(zLine); + i64 i, iStart; sqlite3_stmt *pStmt = 0; char *zSql; char zBuf[1000]; if( nLine>sizeof(zBuf)-30 ) return; @@ -5344,15 +5391,15 @@ ** recognized and do the right thing. NULL is returned if the output ** filename is "off". */ static FILE *output_file_open(const char *zFile, int bTextMode){ FILE *f; - if( strcmp(zFile,"stdout")==0 ){ + if( cli_strcmp(zFile,"stdout")==0 ){ f = stdout; - }else if( strcmp(zFile, "stderr")==0 ){ + }else if( cli_strcmp(zFile, "stderr")==0 ){ f = stderr; - }else if( strcmp(zFile, "off")==0 ){ + }else if( cli_strcmp(zFile, "off")==0 ){ f = 0; }else{ f = fopen(zFile, bTextMode ? "w" : "wb"); if( f==0 ){ utf8_printf(stderr, "Error: cannot open \"%s\"\n", zFile); @@ -5372,11 +5419,11 @@ void *pX /* Auxiliary output */ ){ ShellState *p = (ShellState*)pArg; sqlite3_stmt *pStmt; const char *zSql; - int nSql; + i64 nSql; if( p->traceOut==0 ) return 0; if( mType==SQLITE_TRACE_CLOSE ){ utf8_printf(p->traceOut, "-- closing database connection\n"); return 0; } @@ -5400,21 +5447,22 @@ break; } } } if( zSql==0 ) return 0; - nSql = strlen30(zSql); + nSql = strlen(zSql); + if( nSql>1000000000 ) nSql = 1000000000; while( nSql>0 && zSql[nSql-1]==';' ){ nSql--; } switch( mType ){ case SQLITE_TRACE_ROW: case SQLITE_TRACE_STMT: { - utf8_printf(p->traceOut, "%.*s;\n", nSql, zSql); + utf8_printf(p->traceOut, "%.*s;\n", (int)nSql, zSql); break; } case SQLITE_TRACE_PROFILE: { sqlite3_int64 nNanosec = *(sqlite3_int64*)pX; - utf8_printf(p->traceOut, "%.*s; -- %lld ns\n", nSql, zSql, nNanosec); + utf8_printf(p->traceOut, "%.*s; -- %lld ns\n", (int)nSql, zSql, nNanosec); break; } } return 0; } @@ -5959,11 +6007,11 @@ } raw_printf(p->out, "\n"); } if( zDb==0 ){ zSchemaTab = sqlite3_mprintf("main.sqlite_schema"); - }else if( strcmp(zDb,"temp")==0 ){ + }else if( cli_strcmp(zDb,"temp")==0 ){ zSchemaTab = sqlite3_mprintf("%s", "sqlite_temp_schema"); }else{ zSchemaTab = sqlite3_mprintf("\"%w\".sqlite_schema", zDb); } for(i=0; idb, zDb, SQLITE_FCNTL_DATA_VERSION, &iDataVersion); utf8_printf(p->out, "%-20s %u\n", "data version", iDataVersion); return 0; } -#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) - && defined(SQLITE_ENABLE_DBPAGE_VTAB) */ +#endif /* SQLITE_SHELL_HAVE_RECOVER */ /* ** Print the current sqlite3_errmsg() value to stderr and return 1. */ static int shellDatabaseError(sqlite3 *db){ @@ -6093,11 +6140,11 @@ */ static int optionMatch(const char *zStr, const char *zOpt){ if( zStr[0]!='-' ) return 0; zStr++; if( zStr[0]=='-' ) zStr++; - return strcmp(zStr, zOpt)==0; + return cli_strcmp(zStr, zOpt)==0; } /* ** Delete a file. */ @@ -7249,11 +7296,10 @@ } /* End of the ".archive" or ".ar" command logic *******************************************************************************/ #endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_HAVE_ZLIB) */ -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) /* ** If (*pRc) is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, the SQL statement or statements in zSql are executed using ** database connection db and the error code written to *pRc before ** this function returns. @@ -7288,364 +7334,287 @@ } sqlite3_free(z); } } -/* -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, an attempt is made to allocate, zero and return a pointer -** to a buffer nByte bytes in size. If an OOM error occurs, *pRc is set -** to SQLITE_NOMEM and NULL returned. -*/ -static void *shellMalloc(int *pRc, sqlite3_int64 nByte){ - void *pRet = 0; - if( *pRc==SQLITE_OK ){ - pRet = sqlite3_malloc64(nByte); - if( pRet==0 ){ - *pRc = SQLITE_NOMEM; - }else{ - memset(pRet, 0, nByte); - } - } - return pRet; -} - -/* -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, zFmt is treated as a printf() style string. The result of -** formatting it along with any trailing arguments is written into a -** buffer obtained from sqlite3_malloc(), and pointer to which is returned. -** It is the responsibility of the caller to eventually free this buffer -** using a call to sqlite3_free(). -** -** If an OOM error occurs, (*pRc) is set to SQLITE_NOMEM and a NULL -** pointer returned. -*/ -static char *shellMPrintf(int *pRc, const char *zFmt, ...){ - char *z = 0; - if( *pRc==SQLITE_OK ){ - va_list ap; - va_start(ap, zFmt); - z = sqlite3_vmprintf(zFmt, ap); - va_end(ap); - if( z==0 ){ - *pRc = SQLITE_NOMEM; - } - } - return z; -} - - -/* -** When running the ".recover" command, each output table, and the special -** orphaned row table if it is required, is represented by an instance -** of the following struct. -*/ -typedef struct RecoverTable RecoverTable; -struct RecoverTable { - char *zQuoted; /* Quoted version of table name */ - int nCol; /* Number of columns in table */ - char **azlCol; /* Array of column lists */ - int iPk; /* Index of IPK column */ -}; - -/* -** Free a RecoverTable object allocated by recoverFindTable() or -** recoverOrphanTable(). -*/ -static void recoverFreeTable(RecoverTable *pTab){ - if( pTab ){ - sqlite3_free(pTab->zQuoted); - if( pTab->azlCol ){ - int i; - for(i=0; i<=pTab->nCol; i++){ - sqlite3_free(pTab->azlCol[i]); - } - sqlite3_free(pTab->azlCol); - } - sqlite3_free(pTab); - } -} - -/* -** This function is a no-op if (*pRc) is not SQLITE_OK when it is called. -** Otherwise, it allocates and returns a RecoverTable object based on the -** final four arguments passed to this function. It is the responsibility -** of the caller to eventually free the returned object using -** recoverFreeTable(). -*/ -static RecoverTable *recoverNewTable( - int *pRc, /* IN/OUT: Error code */ - const char *zName, /* Name of table */ - const char *zSql, /* CREATE TABLE statement */ - int bIntkey, - int nCol -){ - sqlite3 *dbtmp = 0; /* sqlite3 handle for testing CREATE TABLE */ - int rc = *pRc; - RecoverTable *pTab = 0; - - pTab = (RecoverTable*)shellMalloc(&rc, sizeof(RecoverTable)); - if( rc==SQLITE_OK ){ - int nSqlCol = 0; - int bSqlIntkey = 0; - sqlite3_stmt *pStmt = 0; - - rc = sqlite3_open("", &dbtmp); - if( rc==SQLITE_OK ){ - sqlite3_create_function(dbtmp, "shell_idquote", 1, SQLITE_UTF8, 0, - shellIdQuote, 0, 0); - } - if( rc==SQLITE_OK ){ - rc = sqlite3_exec(dbtmp, "PRAGMA writable_schema = on", 0, 0, 0); - } - if( rc==SQLITE_OK ){ - rc = sqlite3_exec(dbtmp, zSql, 0, 0, 0); - if( rc==SQLITE_ERROR ){ - rc = SQLITE_OK; - goto finished; - } - } - shellPreparePrintf(dbtmp, &rc, &pStmt, - "SELECT count(*) FROM pragma_table_info(%Q)", zName - ); - if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - nSqlCol = sqlite3_column_int(pStmt, 0); - } - shellFinalize(&rc, pStmt); - - if( rc!=SQLITE_OK || nSqlColiPk to the index - ** of the column, where columns are 0-numbered from left to right. - ** Or, if this is a WITHOUT ROWID table or if there is no IPK column, - ** leave zPk as "_rowid_" and pTab->iPk at -2. */ - pTab->iPk = -2; - if( bIntkey ){ - shellPreparePrintf(dbtmp, &rc, &pPkFinder, - "SELECT cid, name FROM pragma_table_info(%Q) " - " WHERE pk=1 AND type='integer' COLLATE nocase" - " AND NOT EXISTS (SELECT cid FROM pragma_table_info(%Q) WHERE pk=2)" - , zName, zName - ); - if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPkFinder) ){ - pTab->iPk = sqlite3_column_int(pPkFinder, 0); - zPk = (const char*)sqlite3_column_text(pPkFinder, 1); - if( zPk==0 ){ zPk = "_"; /* Defensive. Should never happen */ } - } - } - - pTab->zQuoted = shellMPrintf(&rc, "\"%w\"", zName); - pTab->azlCol = (char**)shellMalloc(&rc, sizeof(char*) * (nSqlCol+1)); - pTab->nCol = nSqlCol; - - if( bIntkey ){ - pTab->azlCol[0] = shellMPrintf(&rc, "\"%w\"", zPk); - }else{ - pTab->azlCol[0] = shellMPrintf(&rc, ""); - } - i = 1; - shellPreparePrintf(dbtmp, &rc, &pStmt, - "SELECT %Q || group_concat(shell_idquote(name), ', ') " - " FILTER (WHERE cid!=%d) OVER (ORDER BY %s cid) " - "FROM pragma_table_info(%Q)", - bIntkey ? ", " : "", pTab->iPk, - bIntkey ? "" : "(CASE WHEN pk=0 THEN 1000000 ELSE pk END), ", - zName - ); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - const char *zText = (const char*)sqlite3_column_text(pStmt, 0); - pTab->azlCol[i] = shellMPrintf(&rc, "%s%s", pTab->azlCol[0], zText); - i++; - } - shellFinalize(&rc, pStmt); - - shellFinalize(&rc, pPkFinder); - } - } - - finished: - sqlite3_close(dbtmp); - *pRc = rc; - if( rc!=SQLITE_OK || (pTab && pTab->zQuoted==0) ){ - recoverFreeTable(pTab); - pTab = 0; - } - return pTab; -} - -/* -** This function is called to search the schema recovered from the -** sqlite_schema table of the (possibly) corrupt database as part -** of a ".recover" command. Specifically, for a table with root page -** iRoot and at least nCol columns. Additionally, if bIntkey is 0, the -** table must be a WITHOUT ROWID table, or if non-zero, not one of -** those. -** -** If a table is found, a (RecoverTable*) object is returned. Or, if -** no such table is found, but bIntkey is false and iRoot is the -** root page of an index in the recovered schema, then (*pbNoop) is -** set to true and NULL returned. Or, if there is no such table or -** index, NULL is returned and (*pbNoop) set to 0, indicating that -** the caller should write data to the orphans table. -*/ -static RecoverTable *recoverFindTable( - ShellState *pState, /* Shell state object */ - int *pRc, /* IN/OUT: Error code */ - int iRoot, /* Root page of table */ - int bIntkey, /* True for an intkey table */ - int nCol, /* Number of columns in table */ - int *pbNoop /* OUT: True if iRoot is root of index */ -){ - sqlite3_stmt *pStmt = 0; - RecoverTable *pRet = 0; - int bNoop = 0; - const char *zSql = 0; - const char *zName = 0; - - /* Search the recovered schema for an object with root page iRoot. */ - shellPreparePrintf(pState->db, pRc, &pStmt, - "SELECT type, name, sql FROM recovery.schema WHERE rootpage=%d", iRoot - ); - while( *pRc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - const char *zType = (const char*)sqlite3_column_text(pStmt, 0); - if( bIntkey==0 && sqlite3_stricmp(zType, "index")==0 ){ - bNoop = 1; - break; - } - if( sqlite3_stricmp(zType, "table")==0 ){ - zName = (const char*)sqlite3_column_text(pStmt, 1); - zSql = (const char*)sqlite3_column_text(pStmt, 2); - if( zName!=0 && zSql!=0 ){ - pRet = recoverNewTable(pRc, zName, zSql, bIntkey, nCol); - break; - } - } - } - - shellFinalize(pRc, pStmt); - *pbNoop = bNoop; - return pRet; -} - -/* -** Return a RecoverTable object representing the orphans table. -*/ -static RecoverTable *recoverOrphanTable( - ShellState *pState, /* Shell state object */ - int *pRc, /* IN/OUT: Error code */ - const char *zLostAndFound, /* Base name for orphans table */ - int nCol /* Number of user data columns */ -){ - RecoverTable *pTab = 0; - if( nCol>=0 && *pRc==SQLITE_OK ){ - int i; - - /* This block determines the name of the orphan table. The prefered - ** name is zLostAndFound. But if that clashes with another name - ** in the recovered schema, try zLostAndFound_0, zLostAndFound_1 - ** and so on until a non-clashing name is found. */ - int iTab = 0; - char *zTab = shellMPrintf(pRc, "%s", zLostAndFound); - sqlite3_stmt *pTest = 0; - shellPrepare(pState->db, pRc, - "SELECT 1 FROM recovery.schema WHERE name=?", &pTest - ); - if( pTest ) sqlite3_bind_text(pTest, 1, zTab, -1, SQLITE_TRANSIENT); - while( *pRc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pTest) ){ - shellReset(pRc, pTest); - sqlite3_free(zTab); - zTab = shellMPrintf(pRc, "%s_%d", zLostAndFound, iTab++); - sqlite3_bind_text(pTest, 1, zTab, -1, SQLITE_TRANSIENT); - } - shellFinalize(pRc, pTest); - - pTab = (RecoverTable*)shellMalloc(pRc, sizeof(RecoverTable)); - if( pTab ){ - pTab->zQuoted = shellMPrintf(pRc, "\"%w\"", zTab); - pTab->nCol = nCol; - pTab->iPk = -2; - if( nCol>0 ){ - pTab->azlCol = (char**)shellMalloc(pRc, sizeof(char*) * (nCol+1)); - if( pTab->azlCol ){ - pTab->azlCol[nCol] = shellMPrintf(pRc, ""); - for(i=nCol-1; i>=0; i--){ - pTab->azlCol[i] = shellMPrintf(pRc, "%s, NULL", pTab->azlCol[i+1]); - } - } - } - - if( *pRc!=SQLITE_OK ){ - recoverFreeTable(pTab); - pTab = 0; - }else{ - raw_printf(pState->out, - "CREATE TABLE %s(rootpgno INTEGER, " - "pgno INTEGER, nfield INTEGER, id INTEGER", pTab->zQuoted - ); - for(i=0; iout, ", c%d", i); - } - raw_printf(pState->out, ");\n"); - } - } - sqlite3_free(zTab); - } - return pTab; +static int sharedSchemaFix(ShellState *pState, const char *zDb){ + int rc = SQLITE_OK; + i64 iLast = 0; + int iCookie = 0; + int iAutoVacuum = 0; + sqlite3_stmt *pStmt = 0; + + shellExecPrintf(pState->db, &rc, "ATTACH '%q' AS _shared_schema_tmp", zDb); + shellExecPrintf(pState->db, &rc, "PRAGMA writable_schema = 1"); + shellExecPrintf(pState->db, &rc, "BEGIN"); + shellPreparePrintf(pState->db, &rc, &pStmt, + "SELECT max(rowid) FROM _shared_schema_tmp.sqlite_master" + ); + sqlite3_step(pStmt); + iLast = sqlite3_column_int64(pStmt, 0); + shellFinalize(&rc, pStmt); + shellPreparePrintf(pState->db, &rc, &pStmt, + "INSERT INTO _shared_schema_tmp.sqlite_master SELECT " + " type, name, tbl_name, (" + " SELECT rootpage FROM _shared_schema_tmp.sqlite_master WHERE " + " type IS o.type AND name IS o.name AND rowid<=?" + " ), sql FROM main.sqlite_master AS o" + ); + sqlite3_bind_int64(pStmt, 1, iLast); + sqlite3_step(pStmt); + shellFinalize(&rc, pStmt); + + shellExecPrintf(pState->db, &rc, + "DELETE FROM _shared_schema_tmp.sqlite_master WHERE rowid<=%lld", + iLast + ); + shellExecPrintf(pState->db, &rc, "COMMIT"); + sqlite3_exec(pState->db, "PRAGMA writable_schema = 0", 0, 0, 0); + + /* Copy the auto-vacuum setting from main to the target db */ + shellPreparePrintf(pState->db, &rc, &pStmt, "PRAGMA main.auto_vacuum"); + sqlite3_step(pStmt); + iAutoVacuum = sqlite3_column_int(pStmt, 0); + shellFinalize(&rc, pStmt); + shellExecPrintf(pState->db, &rc, + "PRAGMA _shared_schema_tmp.auto_vacuum = %d", iAutoVacuum + ); + + /* Vacuum the db in order to standardize the rootpage numbers. */ + shellExecPrintf(pState->db, &rc, "VACUUM _shared_schema_tmp"); + + /* Set the schema-cookie value to the same as database "main" */ + shellPreparePrintf(pState->db, &rc, &pStmt, "PRAGMA main.schema_version"); + sqlite3_step(pStmt); + iCookie = sqlite3_column_int(pStmt, 0); + shellFinalize(&rc, pStmt); + shellExecPrintf(pState->db, &rc, + "PRAGMA _shared_schema_tmp.schema_version = %d", iCookie + ); + + sqlite3_exec(pState->db, "DETACH _shared_schema_tmp", 0, 0, 0); + return rc; +} + +static int sharedSchemaCheck(ShellState *pState, const char *zDb, int *peFix){ + int rc = SQLITE_OK; + int bFailed = 0; + sqlite3_stmt *pStmt = 0; + + if( peFix ) *peFix = 0; + shellExecPrintf(pState->db, &rc, "ATTACH '%q' AS _shared_schema_tmp", zDb); + + /* Check if this database has the same set of objects as the current db */ + shellPreparePrintf(pState->db, &rc, &pStmt, + "SELECT type, name FROM _shared_schema_tmp.sqlite_master AS o " + "WHERE NOT EXISTS (" + " SELECT 1 FROM main.sqlite_master " + " WHERE name IS o.name AND type IS o.type" + ")" + " UNION ALL " + "SELECT type, name FROM main.sqlite_master AS o " + "WHERE NOT EXISTS (" + " SELECT 1 FROM _shared_schema_tmp.sqlite_master " + " WHERE name IS o.name AND type IS o.type" + ")" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + utf8_printf(pState->out, "%s is NOT compatible (objects)\n", zDb); + bFailed = 1; + } + shellFinalize(&rc, pStmt); + + /* Check if this database has the same set of SQL statements as the + ** current db. */ + if( bFailed==0 ){ + shellPreparePrintf(pState->db, &rc, &pStmt, + "SELECT 1 FROM _shared_schema_tmp.sqlite_master AS o " + "WHERE sql IS NOT (" + " SELECT sql FROM main.sqlite_master " + " WHERE name IS o.name AND type IS o.type" + ")" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + utf8_printf(pState->out, "%s is NOT compatible (SQL)\n", zDb); + bFailed = 1; + } + shellFinalize(&rc, pStmt); + } + + /* Check if this database has the same set of root pages as the current + ** db. */ + if( bFailed==0 ){ + shellPreparePrintf(pState->db, &rc, &pStmt, + "SELECT 1 FROM _shared_schema_tmp.sqlite_master AS o " + "WHERE rootpage IS NOT (" + " SELECT rootpage FROM main.sqlite_master " + " WHERE name IS o.name AND type IS o.type" + ")" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + if( peFix==0 ){ + utf8_printf(pState->out, "%s is NOT compatible (root pages)\n", zDb); + } + bFailed = 1; + if( peFix ) *peFix = 1; + } + shellFinalize(&rc, pStmt); + } + + if( bFailed==0 ){ + shellPreparePrintf(pState->db, &rc, &pStmt, + "SELECT 1 WHERE (" + " SELECT group_concat(rootpage || '.' || name || '.' || sql, '.') " + " FROM _shared_schema_tmp.sqlite_master" + ") IS NOT (" + " SELECT group_concat(rootpage || '.' || name || '.' || sql, '.') " + " FROM main.sqlite_master" + ")" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + if( peFix==0 ){ + utf8_printf(pState->out, + "%s is NOT compatible (order of sqlite_master rows)\n", zDb + ); + } + bFailed = 1; + if( peFix ) *peFix = 2; + } + shellFinalize(&rc, pStmt); + } + + if( bFailed==0 ){ + int iMain = -1; + int iNew = +1; + shellPreparePrintf(pState->db, &rc, &pStmt, + "PRAGMA main.schema_version" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + iMain = sqlite3_column_int(pStmt, 0); + } + shellFinalize(&rc, pStmt); + shellPreparePrintf(pState->db, &rc, &pStmt, + "PRAGMA _shared_schema_tmp.schema_version" + ); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + iNew = sqlite3_column_int(pStmt, 0); + } + shellFinalize(&rc, pStmt); + if( rc==SQLITE_OK && iMain!=iNew ){ + if( peFix==0 ){ + utf8_printf(pState->out, + "%s is NOT compatible (schema cookie)\n", zDb + ); + } + bFailed = 1; + if( peFix ) *peFix = 3; + } + } + + if( rc==SQLITE_OK && bFailed==0 ){ + utf8_printf(pState->out, "%s is compatible\n", zDb); + } + + sqlite3_exec(pState->db, "DETACH _shared_schema_tmp", 0, 0, 0); + return rc; +} + +/* +** .shared-schema check|fix DB1 DB2... +*/ +static int sharedSchemaDotCommand( + ShellState *pState, /* Current shell tool state */ + char **azArg, /* Array of arguments passed to dot command */ + int nArg /* Number of entries in azArg[] */ +){ + int rc = SQLITE_OK; + int bFix = 0; /* Fix databases if possible */ + int n1; + int i; + if( nArg<3 ){ + goto shared_schema_usage; + } + + n1 = (int)strlen(azArg[1]); + if( n1>0 && n1<=3 && memcmp("fix", azArg[1], n1)==0 ){ + bFix = 1; + }else if( n1==0 || n1>5 || memcmp("check", azArg[1], n1) ){ + goto shared_schema_usage; + } + + for(i=2; rc==SQLITE_OK && iout, "Fixing %s... ", azArg[i]); + fflush(pState->out); + rc = sharedSchemaFix(pState, azArg[i]); + if( rc==SQLITE_OK ){ + rc = sharedSchemaCheck(pState, azArg[i], &eFix); + if( rc==SQLITE_OK && eFix ){ + utf8_printf(pState->out, "VACUUMing main... "); + fflush(pState->out); + rc = sqlite3_exec(pState->db, "VACUUM main", 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = sharedSchemaCheck(pState, azArg[i], 0); + } + } + } + } + } + + return rc; + shared_schema_usage: + raw_printf(stderr, "usage: .shared-schema check|fix DB1 DB2...\n"); + return SQLITE_ERROR; +} + +#if SQLITE_SHELL_HAVE_RECOVER +/* +** This function is used as a callback by the recover extension. Simply +** print the supplied SQL statement to stdout. +*/ +static int recoverSqlCb(void *pCtx, const char *zSql){ + ShellState *pState = (ShellState*)pCtx; + utf8_printf(pState->out, "%s;\n", zSql); + return SQLITE_OK; } /* ** This function is called to recover data from the database. A script ** to construct a new database containing all recovered data is output ** on stream pState->out. */ static int recoverDatabaseCmd(ShellState *pState, int nArg, char **azArg){ int rc = SQLITE_OK; - sqlite3_stmt *pLoop = 0; /* Loop through all root pages */ - sqlite3_stmt *pPages = 0; /* Loop through all pages in a group */ - sqlite3_stmt *pCells = 0; /* Loop through all cells in a page */ - const char *zRecoveryDb = ""; /* Name of "recovery" database */ - const char *zLostAndFound = "lost_and_found"; - int i; - int nOrphan = -1; - RecoverTable *pOrphan = 0; - - int bFreelist = 1; /* 0 if --freelist-corrupt is specified */ + const char *zRecoveryDb = ""; /* Name of "recovery" database. Debug only */ + const char *zLAF = "lost_and_found"; + int bFreelist = 1; /* 0 if --ignore-freelist is specified */ int bRowids = 1; /* 0 if --no-rowids */ + sqlite3_recover *p = 0; + int i = 0; + for(i=1; iout, azArg[0]); return 1; } } - shellExecPrintf(pState->db, &rc, - /* Attach an in-memory database named 'recovery'. Create an indexed - ** cache of the sqlite_dbptr virtual table. */ - "PRAGMA writable_schema = on;" - "ATTACH %Q AS recovery;" - "DROP TABLE IF EXISTS recovery.dbptr;" - "DROP TABLE IF EXISTS recovery.freelist;" - "DROP TABLE IF EXISTS recovery.map;" - "DROP TABLE IF EXISTS recovery.schema;" - "CREATE TABLE recovery.freelist(pgno INTEGER PRIMARY KEY);", zRecoveryDb - ); - - if( bFreelist ){ - shellExec(pState->db, &rc, - "WITH trunk(pgno) AS (" - " SELECT shell_int32(" - " (SELECT data FROM sqlite_dbpage WHERE pgno=1), 8) AS x " - " WHERE x>0" - " UNION" - " SELECT shell_int32(" - " (SELECT data FROM sqlite_dbpage WHERE pgno=trunk.pgno), 0) AS x " - " FROM trunk WHERE x>0" - ")," - "freelist(data, n, freepgno) AS (" - " SELECT data, min(16384, shell_int32(data, 1)-1), t.pgno " - " FROM trunk t, sqlite_dbpage s WHERE s.pgno=t.pgno" - " UNION ALL" - " SELECT data, n-1, shell_int32(data, 2+n) " - " FROM freelist WHERE n>=0" - ")" - "REPLACE INTO recovery.freelist SELECT freepgno FROM freelist;" - ); - } - - /* If this is an auto-vacuum database, add all pointer-map pages to - ** the freelist table. Do this regardless of whether or not - ** --freelist-corrupt was specified. */ - shellExec(pState->db, &rc, - "WITH ptrmap(pgno) AS (" - " SELECT 2 WHERE shell_int32(" - " (SELECT data FROM sqlite_dbpage WHERE pgno=1), 13" - " )" - " UNION ALL " - " SELECT pgno+1+(SELECT page_size FROM pragma_page_size)/5 AS pp " - " FROM ptrmap WHERE pp<=(SELECT page_count FROM pragma_page_count)" - ")" - "REPLACE INTO recovery.freelist SELECT pgno FROM ptrmap" - ); - - shellExec(pState->db, &rc, - "CREATE TABLE recovery.dbptr(" - " pgno, child, PRIMARY KEY(child, pgno)" - ") WITHOUT ROWID;" - "INSERT OR IGNORE INTO recovery.dbptr(pgno, child) " - " SELECT * FROM sqlite_dbptr" - " WHERE pgno NOT IN freelist AND child NOT IN freelist;" - - /* Delete any pointer to page 1. This ensures that page 1 is considered - ** a root page, regardless of how corrupt the db is. */ - "DELETE FROM recovery.dbptr WHERE child = 1;" - - /* Delete all pointers to any pages that have more than one pointer - ** to them. Such pages will be treated as root pages when recovering - ** data. */ - "DELETE FROM recovery.dbptr WHERE child IN (" - " SELECT child FROM recovery.dbptr GROUP BY child HAVING count(*)>1" - ");" - - /* Create the "map" table that will (eventually) contain instructions - ** for dealing with each page in the db that contains one or more - ** records. */ - "CREATE TABLE recovery.map(" - "pgno INTEGER PRIMARY KEY, maxlen INT, intkey, root INT" - ");" - - /* Populate table [map]. If there are circular loops of pages in the - ** database, the following adds all pages in such a loop to the map - ** as individual root pages. This could be handled better. */ - "WITH pages(i, maxlen) AS (" - " SELECT page_count, (" - " SELECT max(field+1) FROM sqlite_dbdata WHERE pgno=page_count" - " ) FROM pragma_page_count WHERE page_count>0" - " UNION ALL" - " SELECT i-1, (" - " SELECT max(field+1) FROM sqlite_dbdata WHERE pgno=i-1" - " ) FROM pages WHERE i>=2" - ")" - "INSERT INTO recovery.map(pgno, maxlen, intkey, root) " - " SELECT i, maxlen, NULL, (" - " WITH p(orig, pgno, parent) AS (" - " SELECT 0, i, (SELECT pgno FROM recovery.dbptr WHERE child=i)" - " UNION " - " SELECT i, p.parent, " - " (SELECT pgno FROM recovery.dbptr WHERE child=p.parent) FROM p" - " )" - " SELECT pgno FROM p WHERE (parent IS NULL OR pgno = orig)" - ") " - "FROM pages WHERE maxlen IS NOT NULL AND i NOT IN freelist;" - "UPDATE recovery.map AS o SET intkey = (" - " SELECT substr(data, 1, 1)==X'0D' FROM sqlite_dbpage WHERE pgno=o.pgno" - ");" - - /* Extract data from page 1 and any linked pages into table - ** recovery.schema. With the same schema as an sqlite_schema table. */ - "CREATE TABLE recovery.schema(type, name, tbl_name, rootpage, sql);" - "INSERT INTO recovery.schema SELECT " - " max(CASE WHEN field=0 THEN value ELSE NULL END)," - " max(CASE WHEN field=1 THEN value ELSE NULL END)," - " max(CASE WHEN field=2 THEN value ELSE NULL END)," - " max(CASE WHEN field=3 THEN value ELSE NULL END)," - " max(CASE WHEN field=4 THEN value ELSE NULL END)" - "FROM sqlite_dbdata WHERE pgno IN (" - " SELECT pgno FROM recovery.map WHERE root=1" - ")" - "GROUP BY pgno, cell;" - "CREATE INDEX recovery.schema_rootpage ON schema(rootpage);" - ); - - /* Open a transaction, then print out all non-virtual, non-"sqlite_%" - ** CREATE TABLE statements that extracted from the existing schema. */ - if( rc==SQLITE_OK ){ - sqlite3_stmt *pStmt = 0; - /* ".recover" might output content in an order which causes immediate - ** foreign key constraints to be violated. So disable foreign-key - ** constraint enforcement to prevent problems when running the output - ** script. */ - raw_printf(pState->out, "PRAGMA foreign_keys=OFF;\n"); - raw_printf(pState->out, "BEGIN;\n"); - raw_printf(pState->out, "PRAGMA writable_schema = on;\n"); - shellPrepare(pState->db, &rc, - "SELECT sql FROM recovery.schema " - "WHERE type='table' AND sql LIKE 'create table%'", &pStmt - ); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - const char *zCreateTable = (const char*)sqlite3_column_text(pStmt, 0); - raw_printf(pState->out, "CREATE TABLE IF NOT EXISTS %s;\n", - &zCreateTable[12] - ); - } - shellFinalize(&rc, pStmt); - } - - /* Figure out if an orphan table will be required. And if so, how many - ** user columns it should contain */ - shellPrepare(pState->db, &rc, - "SELECT coalesce(max(maxlen), -2) FROM recovery.map WHERE root>1" - , &pLoop - ); - if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pLoop) ){ - nOrphan = sqlite3_column_int(pLoop, 0); - } - shellFinalize(&rc, pLoop); - pLoop = 0; - - shellPrepare(pState->db, &rc, - "SELECT pgno FROM recovery.map WHERE root=?", &pPages - ); - - shellPrepare(pState->db, &rc, - "SELECT max(field), group_concat(shell_escape_crnl(quote" - "(case when (? AND field<0) then NULL else value end)" - "), ', ')" - ", min(field) " - "FROM sqlite_dbdata WHERE pgno = ? AND field != ?" - "GROUP BY cell", &pCells - ); - - /* Loop through each root page. */ - shellPrepare(pState->db, &rc, - "SELECT root, intkey, max(maxlen) FROM recovery.map" - " WHERE root>1 GROUP BY root, intkey ORDER BY root=(" - " SELECT rootpage FROM recovery.schema WHERE name='sqlite_sequence'" - ")", &pLoop - ); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pLoop) ){ - int iRoot = sqlite3_column_int(pLoop, 0); - int bIntkey = sqlite3_column_int(pLoop, 1); - int nCol = sqlite3_column_int(pLoop, 2); - int bNoop = 0; - RecoverTable *pTab; - - assert( bIntkey==0 || bIntkey==1 ); - pTab = recoverFindTable(pState, &rc, iRoot, bIntkey, nCol, &bNoop); - if( bNoop || rc ) continue; - if( pTab==0 ){ - if( pOrphan==0 ){ - pOrphan = recoverOrphanTable(pState, &rc, zLostAndFound, nOrphan); - } - pTab = pOrphan; - if( pTab==0 ) break; - } - - if( 0==sqlite3_stricmp(pTab->zQuoted, "\"sqlite_sequence\"") ){ - raw_printf(pState->out, "DELETE FROM sqlite_sequence;\n"); - } - sqlite3_bind_int(pPages, 1, iRoot); - if( bRowids==0 && pTab->iPk<0 ){ - sqlite3_bind_int(pCells, 1, 1); - }else{ - sqlite3_bind_int(pCells, 1, 0); - } - sqlite3_bind_int(pCells, 3, pTab->iPk); - - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPages) ){ - int iPgno = sqlite3_column_int(pPages, 0); - sqlite3_bind_int(pCells, 2, iPgno); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pCells) ){ - int nField = sqlite3_column_int(pCells, 0); - int iMin = sqlite3_column_int(pCells, 2); - const char *zVal = (const char*)sqlite3_column_text(pCells, 1); - - RecoverTable *pTab2 = pTab; - if( pTab!=pOrphan && (iMin<0)!=bIntkey ){ - if( pOrphan==0 ){ - pOrphan = recoverOrphanTable(pState, &rc, zLostAndFound, nOrphan); - } - pTab2 = pOrphan; - if( pTab2==0 ) break; - } - - nField = nField+1; - if( pTab2==pOrphan ){ - raw_printf(pState->out, - "INSERT INTO %s VALUES(%d, %d, %d, %s%s%s);\n", - pTab2->zQuoted, iRoot, iPgno, nField, - iMin<0 ? "" : "NULL, ", zVal, pTab2->azlCol[nField] - ); - }else{ - raw_printf(pState->out, "INSERT INTO %s(%s) VALUES( %s );\n", - pTab2->zQuoted, pTab2->azlCol[nField], zVal - ); - } - } - shellReset(&rc, pCells); - } - shellReset(&rc, pPages); - if( pTab!=pOrphan ) recoverFreeTable(pTab); - } - shellFinalize(&rc, pLoop); - shellFinalize(&rc, pPages); - shellFinalize(&rc, pCells); - recoverFreeTable(pOrphan); - - /* The rest of the schema */ - if( rc==SQLITE_OK ){ - sqlite3_stmt *pStmt = 0; - shellPrepare(pState->db, &rc, - "SELECT sql, name FROM recovery.schema " - "WHERE sql NOT LIKE 'create table%'", &pStmt - ); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - const char *zSql = (const char*)sqlite3_column_text(pStmt, 0); - if( sqlite3_strnicmp(zSql, "create virt", 11)==0 ){ - const char *zName = (const char*)sqlite3_column_text(pStmt, 1); - char *zPrint = shellMPrintf(&rc, - "INSERT INTO sqlite_schema VALUES('table', %Q, %Q, 0, %Q)", - zName, zName, zSql - ); - raw_printf(pState->out, "%s;\n", zPrint); - sqlite3_free(zPrint); - }else{ - raw_printf(pState->out, "%s;\n", zSql); - } - } - shellFinalize(&rc, pStmt); - } - - if( rc==SQLITE_OK ){ - raw_printf(pState->out, "PRAGMA writable_schema = off;\n"); - raw_printf(pState->out, "COMMIT;\n"); - } - sqlite3_exec(pState->db, "DETACH recovery", 0, 0, 0); - return rc; -} -#endif /* !(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) */ + p = sqlite3_recover_init_sql( + pState->db, "main", recoverSqlCb, (void*)pState + ); + + sqlite3_recover_config(p, 789, (void*)zRecoveryDb); /* Debug use only */ + sqlite3_recover_config(p, SQLITE_RECOVER_LOST_AND_FOUND, (void*)zLAF); + sqlite3_recover_config(p, SQLITE_RECOVER_ROWIDS, (void*)&bRowids); + sqlite3_recover_config(p, SQLITE_RECOVER_FREELIST_CORRUPT,(void*)&bFreelist); + + sqlite3_recover_run(p); + if( sqlite3_recover_errcode(p)!=SQLITE_OK ){ + const char *zErr = sqlite3_recover_errmsg(p); + int errCode = sqlite3_recover_errcode(p); + raw_printf(stderr, "sql error: %s (%d)\n", zErr, errCode); + } + rc = sqlite3_recover_finish(p); + return rc; +} +#endif /* SQLITE_SHELL_HAVE_RECOVER */ /* * zAutoColumn(zCol, &db, ?) => Maybe init db, add column zCol to it. * zAutoColumn(0, &db, ?) => (db!=0) Form columns spec for CREATE TABLE, @@ -8199,11 +7912,11 @@ n = strlen30(azArg[0]); c = azArg[0][0]; clearTempFile(p); #ifndef SQLITE_OMIT_AUTHORIZATION - if( c=='a' && strncmp(azArg[0], "auth", n)==0 ){ + if( c=='a' && cli_strncmp(azArg[0], "auth", n)==0 ){ if( nArg!=2 ){ raw_printf(stderr, "Usage: .auth ON|OFF\n"); rc = 1; goto meta_command_exit; } @@ -8218,20 +7931,20 @@ }else #endif #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_HAVE_ZLIB) \ && !defined(SQLITE_SHELL_FIDDLE) - if( c=='a' && strncmp(azArg[0], "archive", n)==0 ){ + if( c=='a' && cli_strncmp(azArg[0], "archive", n)==0 ){ open_db(p, 0); failIfSafeMode(p, "cannot run .archive in safe mode"); rc = arDotCommand(p, 0, azArg, nArg); }else #endif #ifndef SQLITE_SHELL_FIDDLE - if( (c=='b' && n>=3 && strncmp(azArg[0], "backup", n)==0) - || (c=='s' && n>=3 && strncmp(azArg[0], "save", n)==0) + if( (c=='b' && n>=3 && cli_strncmp(azArg[0], "backup", n)==0) + || (c=='s' && n>=3 && cli_strncmp(azArg[0], "save", n)==0) ){ const char *zDestFile = 0; const char *zDb = 0; sqlite3 *pDest; sqlite3_backup *pBackup; @@ -8241,14 +7954,14 @@ failIfSafeMode(p, "cannot run .%s in safe mode", azArg[0]); for(j=1; j=3 && strncmp(azArg[0], "bail", n)==0 ){ + if( c=='b' && n>=3 && cli_strncmp(azArg[0], "bail", n)==0 ){ if( nArg==2 ){ bail_on_error = booleanValue(azArg[1]); }else{ raw_printf(stderr, "Usage: .bail on|off\n"); rc = 1; } }else - if( c=='b' && n>=3 && strncmp(azArg[0], "binary", n)==0 ){ + if( c=='b' && n>=3 && cli_strncmp(azArg[0], "binary", n)==0 ){ if( nArg==2 ){ if( booleanValue(azArg[1]) ){ setBinaryMode(p->out, 1); }else{ setTextMode(p->out, 1); @@ -8321,16 +8034,16 @@ }else /* The undocumented ".breakpoint" command causes a call to the no-op ** routine named test_breakpoint(). */ - if( c=='b' && n>=3 && strncmp(azArg[0], "breakpoint", n)==0 ){ + if( c=='b' && n>=3 && cli_strncmp(azArg[0], "breakpoint", n)==0 ){ test_breakpoint(); }else #ifndef SQLITE_SHELL_FIDDLE - if( c=='c' && strcmp(azArg[0],"cd")==0 ){ + if( c=='c' && cli_strcmp(azArg[0],"cd")==0 ){ failIfSafeMode(p, "cannot run .cd in safe mode"); if( nArg==2 ){ #if defined(_WIN32) || defined(WIN32) wchar_t *z = sqlite3_win32_utf8_to_unicode(azArg[1]); rc = !SetCurrentDirectoryW(z); @@ -8347,11 +8060,11 @@ rc = 1; } }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ - if( c=='c' && n>=3 && strncmp(azArg[0], "changes", n)==0 ){ + if( c=='c' && n>=3 && cli_strncmp(azArg[0], "changes", n)==0 ){ if( nArg==2 ){ setOrClearFlag(p, SHFLG_CountChanges, azArg[1]); }else{ raw_printf(stderr, "Usage: .changes on|off\n"); rc = 1; @@ -8361,11 +8074,11 @@ #ifndef SQLITE_SHELL_FIDDLE /* Cancel output redirection, if it is currently set (by .testcase) ** Then read the content of the testcase-out.txt file and compare against ** azArg[1]. If there are differences, report an error and exit. */ - if( c=='c' && n>=3 && strncmp(azArg[0], "check", n)==0 ){ + if( c=='c' && n>=3 && cli_strncmp(azArg[0], "check", n)==0 ){ char *zRes = 0; output_reset(p); if( nArg!=2 ){ raw_printf(stderr, "Usage: .check GLOB-PATTERN\n"); rc = 2; @@ -8384,11 +8097,11 @@ sqlite3_free(zRes); }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ #ifndef SQLITE_SHELL_FIDDLE - if( c=='c' && strncmp(azArg[0], "clone", n)==0 ){ + if( c=='c' && cli_strncmp(azArg[0], "clone", n)==0 ){ failIfSafeMode(p, "cannot run .clone in safe mode"); if( nArg==2 ){ tryToClone(p, azArg[1]); }else{ raw_printf(stderr, "Usage: .clone FILENAME\n"); @@ -8395,11 +8108,11 @@ rc = 1; } }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ - if( c=='c' && strncmp(azArg[0], "connection", n)==0 ){ + if( c=='c' && cli_strncmp(azArg[0], "connection", n)==0 ){ if( nArg==1 ){ /* List available connections */ int i; for(i=0; iaAuxDb); i++){ const char *zFile = p->aAuxDb[i].zDbFilename; @@ -8422,11 +8135,11 @@ p->pAuxDb->db = p->db; p->pAuxDb = &p->aAuxDb[i]; globalDb = p->db = p->pAuxDb->db; p->pAuxDb->db = 0; } - }else if( nArg==3 && strcmp(azArg[1], "close")==0 + }else if( nArg==3 && cli_strcmp(azArg[1], "close")==0 && IsDigit(azArg[2][0]) && azArg[2][1]==0 ){ int i = azArg[2][0] - '0'; if( i<0 || i>=ArraySize(p->aAuxDb) ){ /* No-op */ }else if( p->pAuxDb == &p->aAuxDb[i] ){ @@ -8441,11 +8154,11 @@ raw_printf(stderr, "Usage: .connection [close] [CONNECTION-NUMBER]\n"); rc = 1; } }else - if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){ + if( c=='d' && n>1 && cli_strncmp(azArg[0], "databases", n)==0 ){ char **azName = 0; int nName = 0; sqlite3_stmt *pStmt; int i; open_db(p, 0); @@ -8480,11 +8193,11 @@ free(azName[i*2+1]); } sqlite3_free(azName); }else - if( c=='d' && n>=3 && strncmp(azArg[0], "dbconfig", n)==0 ){ + if( c=='d' && n>=3 && cli_strncmp(azArg[0], "dbconfig", n)==0 ){ static const struct DbConfigChoices { const char *zName; int op; } aDbConfig[] = { { "defensive", SQLITE_DBCONFIG_DEFENSIVE }, @@ -8505,11 +8218,11 @@ { "writable_schema", SQLITE_DBCONFIG_WRITABLE_SCHEMA }, }; int ii, v; open_db(p, 0); for(ii=0; ii1 && strcmp(azArg[1], aDbConfig[ii].zName)!=0 ) continue; + if( nArg>1 && cli_strcmp(azArg[1], aDbConfig[ii].zName)!=0 ) continue; if( nArg>=3 ){ sqlite3_db_config(p->db, aDbConfig[ii].op, booleanValue(azArg[2]), 0); } sqlite3_db_config(p->db, aDbConfig[ii].op, -1, &v); utf8_printf(p->out, "%19s %s\n", aDbConfig[ii].zName, v ? "on" : "off"); @@ -8519,22 +8232,22 @@ utf8_printf(stderr, "Error: unknown dbconfig \"%s\"\n", azArg[1]); utf8_printf(stderr, "Enter \".dbconfig\" with no arguments for a list\n"); } }else -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) - if( c=='d' && n>=3 && strncmp(azArg[0], "dbinfo", n)==0 ){ +#if SQLITE_SHELL_HAVE_RECOVER + if( c=='d' && n>=3 && cli_strncmp(azArg[0], "dbinfo", n)==0 ){ rc = shell_dbinfo_command(p, nArg, azArg); }else - if( c=='r' && strncmp(azArg[0], "recover", n)==0 ){ + if( c=='r' && cli_strncmp(azArg[0], "recover", n)==0 ){ open_db(p, 0); rc = recoverDatabaseCmd(p, nArg, azArg); }else -#endif /* !(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_DBPAGE_VTAB) */ +#endif /* SQLITE_SHELL_HAVE_RECOVER */ - if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){ + if( c=='d' && cli_strncmp(azArg[0], "dump", n)==0 ){ char *zLike = 0; char *zSql; int i; int savedShowHeader = p->showHeader; int savedShellFlags = p->shellFlgs; @@ -8543,11 +8256,11 @@ |SHFLG_DumpDataOnly|SHFLG_DumpNoSys); for(i=1; ishowHeader = savedShowHeader; p->shellFlgs = savedShellFlags; }else - if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){ + if( c=='e' && cli_strncmp(azArg[0], "echo", n)==0 ){ if( nArg==2 ){ setOrClearFlag(p, SHFLG_Echo, azArg[1]); }else{ raw_printf(stderr, "Usage: .echo on|off\n"); rc = 1; } }else - if( c=='e' && strncmp(azArg[0], "eqp", n)==0 ){ + if( c=='e' && cli_strncmp(azArg[0], "eqp", n)==0 ){ if( nArg==2 ){ p->autoEQPtest = 0; if( p->autoEQPtrace ){ if( p->db ) sqlite3_exec(p->db, "PRAGMA vdbe_trace=OFF;", 0, 0, 0); p->autoEQPtrace = 0; } - if( strcmp(azArg[1],"full")==0 ){ + if( cli_strcmp(azArg[1],"full")==0 ){ p->autoEQP = AUTOEQP_full; - }else if( strcmp(azArg[1],"trigger")==0 ){ + }else if( cli_strcmp(azArg[1],"trigger")==0 ){ p->autoEQP = AUTOEQP_trigger; #ifdef SQLITE_DEBUG - }else if( strcmp(azArg[1],"test")==0 ){ + }else if( cli_strcmp(azArg[1],"test")==0 ){ p->autoEQP = AUTOEQP_on; p->autoEQPtest = 1; - }else if( strcmp(azArg[1],"trace")==0 ){ + }else if( cli_strcmp(azArg[1],"trace")==0 ){ p->autoEQP = AUTOEQP_full; p->autoEQPtrace = 1; open_db(p, 0); sqlite3_exec(p->db, "SELECT name FROM sqlite_schema LIMIT 1", 0, 0, 0); sqlite3_exec(p->db, "PRAGMA vdbe_trace=ON;", 0, 0, 0); @@ -8683,22 +8396,22 @@ rc = 1; } }else #ifndef SQLITE_SHELL_FIDDLE - if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){ + if( c=='e' && cli_strncmp(azArg[0], "exit", n)==0 ){ if( nArg>1 && (rc = (int)integerValue(azArg[1]))!=0 ) exit(rc); rc = 2; }else #endif /* The ".explain" command is automatic now. It is largely pointless. It ** retained purely for backwards compatibility */ - if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){ + if( c=='e' && cli_strncmp(azArg[0], "explain", n)==0 ){ int val = 1; if( nArg>=2 ){ - if( strcmp(azArg[1],"auto")==0 ){ + if( cli_strcmp(azArg[1],"auto")==0 ){ val = 99; }else{ val = booleanValue(azArg[1]); } } @@ -8714,11 +8427,11 @@ p->autoExplain = 1; } }else #ifndef SQLITE_OMIT_VIRTUALTABLE - if( c=='e' && strncmp(azArg[0], "expert", n)==0 ){ + if( c=='e' && cli_strncmp(azArg[0], "expert", n)==0 ){ if( p->bSafeMode ){ raw_printf(stderr, "Cannot run experimental commands such as \"%s\" in safe mode\n", azArg[0]); rc = 1; @@ -8727,11 +8440,11 @@ expertDotCommand(p, azArg, nArg); } }else #endif - if( c=='f' && strncmp(azArg[0], "filectrl", n)==0 ){ + if( c=='f' && cli_strncmp(azArg[0], "filectrl", n)==0 ){ static const struct { const char *zCtrlName; /* Name of a test-control option */ int ctrlCode; /* Integer code for that option */ const char *zUsage; /* Usage notes */ } aCtrl[] = { @@ -8757,11 +8470,11 @@ open_db(p, 0); zCmd = nArg>=2 ? azArg[1] : "help"; if( zCmd[0]=='-' - && (strcmp(zCmd,"--schema")==0 || strcmp(zCmd,"-schema")==0) + && (cli_strcmp(zCmd,"--schema")==0 || cli_strcmp(zCmd,"-schema")==0) && nArg>=4 ){ zSchema = azArg[2]; for(i=3; iout, "Available file-controls:\n"); for(i=0; iout, " .filectrl %s %s\n", aCtrl[i].zCtrlName, aCtrl[i].zUsage); } @@ -8787,11 +8500,11 @@ /* convert filectrl text option to value. allow any unique prefix ** of the option name, or a numerical value. */ n2 = strlen30(zCmd); for(i=0; iout, "%s\n", zBuf); } }else - if( c=='f' && strncmp(azArg[0], "fullschema", n)==0 ){ + if( c=='f' && cli_strncmp(azArg[0], "fullschema", n)==0 ){ ShellState data; int doStats = 0; memcpy(&data, p, sizeof(data)); data.showHeader = 0; data.cMode = data.mode = MODE_Semi; @@ -8921,21 +8634,21 @@ shell_exec(&data, "SELECT * FROM sqlite_stat4", 0); raw_printf(p->out, "ANALYZE sqlite_schema;\n"); } }else - if( c=='h' && strncmp(azArg[0], "headers", n)==0 ){ + if( c=='h' && cli_strncmp(azArg[0], "headers", n)==0 ){ if( nArg==2 ){ p->showHeader = booleanValue(azArg[1]); p->shellFlgs |= SHFLG_HeaderSet; }else{ raw_printf(stderr, "Usage: .headers on|off\n"); rc = 1; } }else - if( c=='h' && strncmp(azArg[0], "help", n)==0 ){ + if( c=='h' && cli_strncmp(azArg[0], "help", n)==0 ){ if( nArg>=2 ){ n = showHelp(p->out, azArg[1]); if( n==0 ){ utf8_printf(p->out, "Nothing matches '%s'\n", azArg[1]); } @@ -8943,11 +8656,11 @@ showHelp(p->out, 0); } }else #ifndef SQLITE_SHELL_FIDDLE - if( c=='i' && strncmp(azArg[0], "import", n)==0 ){ + if( c=='i' && cli_strncmp(azArg[0], "import", n)==0 ){ char *zTable = 0; /* Insert data into this table */ char *zSchema = 0; /* within this schema (may default to "main") */ char *zFile = 0; /* Name of file to extra content from */ sqlite3_stmt *pStmt = NULL; /* A statement */ int nCol; /* Number of columns in the table */ @@ -8983,22 +8696,22 @@ }else{ utf8_printf(p->out, "ERROR: extra argument: \"%s\". Usage:\n", z); showHelp(p->out, "import"); goto meta_command_exit; } - }else if( strcmp(z,"-v")==0 ){ + }else if( cli_strcmp(z,"-v")==0 ){ eVerbose++; - }else if( strcmp(z,"-schema")==0 && imode==MODE_Csv && strcmp(p->rowSeparator,SEP_CrLf)==0 ){ + if( nSep==2 && p->mode==MODE_Csv + && cli_strcmp(p->rowSeparator,SEP_CrLf)==0 + ){ /* When importing CSV (only), if the row separator is set to the ** default output row separator, change it to the default input ** row separator. This avoids having to maintain different input ** and output row separators. */ sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); @@ -9236,11 +8951,11 @@ } }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ #ifndef SQLITE_UNTESTABLE - if( c=='i' && strncmp(azArg[0], "imposter", n)==0 ){ + if( c=='i' && cli_strncmp(azArg[0], "imposter", n)==0 ){ char *zSql; char *zCollist = 0; sqlite3_stmt *pStmt; int tnum = 0; int isWO = 0; /* True if making an imposter of a WITHOUT ROWID table */ @@ -9337,17 +9052,17 @@ sqlite3_free(zSql); }else #endif /* !defined(SQLITE_OMIT_TEST_CONTROL) */ #ifdef SQLITE_ENABLE_IOTRACE - if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){ + if( c=='i' && cli_strncmp(azArg[0], "iotrace", n)==0 ){ SQLITE_API extern void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...); if( iotrace && iotrace!=stdout ) fclose(iotrace); iotrace = 0; if( nArg<2 ){ sqlite3IoTrace = 0; - }else if( strcmp(azArg[1], "-")==0 ){ + }else if( cli_strcmp(azArg[1], "-")==0 ){ sqlite3IoTrace = iotracePrintf; iotrace = stdout; }else{ iotrace = fopen(azArg[1], "w"); if( iotrace==0 ){ @@ -9359,11 +9074,11 @@ } } }else #endif - if( c=='l' && n>=5 && strncmp(azArg[0], "limits", n)==0 ){ + if( c=='l' && n>=5 && cli_strncmp(azArg[0], "limits", n)==0 ){ static const struct { const char *zLimitName; /* Name of a limit */ int limitCode; /* Integer code for that limit */ } aLimit[] = { { "length", SQLITE_LIMIT_LENGTH }, @@ -9418,17 +9133,17 @@ printf("%20s %d\n", aLimit[iLimit].zLimitName, sqlite3_limit(p->db, aLimit[iLimit].limitCode, -1)); } }else - if( c=='l' && n>2 && strncmp(azArg[0], "lint", n)==0 ){ + if( c=='l' && n>2 && cli_strncmp(azArg[0], "lint", n)==0 ){ open_db(p, 0); lintDotCommand(p, azArg, nArg); }else #if !defined(SQLITE_OMIT_LOAD_EXTENSION) && !defined(SQLITE_SHELL_FIDDLE) - if( c=='l' && strncmp(azArg[0], "load", n)==0 ){ + if( c=='l' && cli_strncmp(azArg[0], "load", n)==0 ){ const char *zFile, *zProc; char *zErrMsg = 0; failIfSafeMode(p, "cannot run .load in safe mode"); if( nArg<2 ){ raw_printf(stderr, "Usage: .load FILE ?ENTRYPOINT?\n"); @@ -9446,11 +9161,11 @@ } }else #endif #ifndef SQLITE_SHELL_FIDDLE - if( c=='l' && strncmp(azArg[0], "log", n)==0 ){ + if( c=='l' && cli_strncmp(azArg[0], "log", n)==0 ){ failIfSafeMode(p, "cannot run .log in safe mode"); if( nArg!=2 ){ raw_printf(stderr, "Usage: .log FILENAME\n"); rc = 1; }else{ @@ -9459,11 +9174,11 @@ p->pLog = output_file_open(zFile, 0); } }else #endif - if( c=='m' && strncmp(azArg[0], "mode", n)==0 ){ + if( c=='m' && cli_strncmp(azArg[0], "mode", n)==0 ){ const char *zMode = 0; const char *zTabname = 0; int i, n2; ColModeOpts cmOpts = ColModeOpts_default; for(i=1; iout, "current output mode: %s\n", modeDescr[p->mode]); } zMode = modeDescr[p->mode]; } n2 = strlen30(zMode); - if( strncmp(zMode,"lines",n2)==0 ){ + if( cli_strncmp(zMode,"lines",n2)==0 ){ p->mode = MODE_Line; sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); - }else if( strncmp(zMode,"columns",n2)==0 ){ + }else if( cli_strncmp(zMode,"columns",n2)==0 ){ p->mode = MODE_Column; if( (p->shellFlgs & SHFLG_HeaderSet)==0 ){ p->showHeader = 1; } sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); p->cmOpts = cmOpts; - }else if( strncmp(zMode,"list",n2)==0 ){ + }else if( cli_strncmp(zMode,"list",n2)==0 ){ p->mode = MODE_List; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Column); sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); - }else if( strncmp(zMode,"html",n2)==0 ){ + }else if( cli_strncmp(zMode,"html",n2)==0 ){ p->mode = MODE_Html; - }else if( strncmp(zMode,"tcl",n2)==0 ){ + }else if( cli_strncmp(zMode,"tcl",n2)==0 ){ p->mode = MODE_Tcl; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Space); sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); - }else if( strncmp(zMode,"csv",n2)==0 ){ + }else if( cli_strncmp(zMode,"csv",n2)==0 ){ p->mode = MODE_Csv; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Comma); sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_CrLf); - }else if( strncmp(zMode,"tabs",n2)==0 ){ + }else if( cli_strncmp(zMode,"tabs",n2)==0 ){ p->mode = MODE_List; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Tab); - }else if( strncmp(zMode,"insert",n2)==0 ){ + }else if( cli_strncmp(zMode,"insert",n2)==0 ){ p->mode = MODE_Insert; set_table_name(p, zTabname ? zTabname : "table"); - }else if( strncmp(zMode,"quote",n2)==0 ){ + }else if( cli_strncmp(zMode,"quote",n2)==0 ){ p->mode = MODE_Quote; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Comma); sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row); - }else if( strncmp(zMode,"ascii",n2)==0 ){ + }else if( cli_strncmp(zMode,"ascii",n2)==0 ){ p->mode = MODE_Ascii; sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Unit); sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Record); - }else if( strncmp(zMode,"markdown",n2)==0 ){ + }else if( cli_strncmp(zMode,"markdown",n2)==0 ){ p->mode = MODE_Markdown; p->cmOpts = cmOpts; - }else if( strncmp(zMode,"table",n2)==0 ){ + }else if( cli_strncmp(zMode,"table",n2)==0 ){ p->mode = MODE_Table; p->cmOpts = cmOpts; - }else if( strncmp(zMode,"box",n2)==0 ){ + }else if( cli_strncmp(zMode,"box",n2)==0 ){ p->mode = MODE_Box; p->cmOpts = cmOpts; - }else if( strncmp(zMode,"count",n2)==0 ){ + }else if( cli_strncmp(zMode,"count",n2)==0 ){ p->mode = MODE_Count; - }else if( strncmp(zMode,"off",n2)==0 ){ + }else if( cli_strncmp(zMode,"off",n2)==0 ){ p->mode = MODE_Off; - }else if( strncmp(zMode,"json",n2)==0 ){ + }else if( cli_strncmp(zMode,"json",n2)==0 ){ p->mode = MODE_Json; }else{ raw_printf(stderr, "Error: mode should be one of: " "ascii box column csv html insert json line list markdown " "qbox quote table tabs tcl\n"); @@ -9583,15 +9298,15 @@ } p->cMode = p->mode; }else #ifndef SQLITE_SHELL_FIDDLE - if( c=='n' && strcmp(azArg[0], "nonce")==0 ){ + if( c=='n' && cli_strcmp(azArg[0], "nonce")==0 ){ if( nArg!=2 ){ raw_printf(stderr, "Usage: .nonce NONCE\n"); rc = 1; - }else if( p->zNonce==0 || strcmp(azArg[1],p->zNonce)!=0 ){ + }else if( p->zNonce==0 || cli_strcmp(azArg[1],p->zNonce)!=0 ){ raw_printf(stderr, "line %d: incorrect nonce: \"%s\"\n", p->lineno, azArg[1]); exit(1); }else{ p->bSafeMode = 0; @@ -9599,21 +9314,21 @@ ** at the end of this procedure */ } }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ - if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 ){ + if( c=='n' && cli_strncmp(azArg[0], "nullvalue", n)==0 ){ if( nArg==2 ){ sqlite3_snprintf(sizeof(p->nullValue), p->nullValue, "%.*s", (int)ArraySize(p->nullValue)-1, azArg[1]); }else{ raw_printf(stderr, "Usage: .nullvalue STRING\n"); rc = 1; } }else - if( c=='o' && strncmp(azArg[0], "open", n)==0 && n>=2 ){ + if( c=='o' && cli_strncmp(azArg[0], "open", n)==0 && n>=2 ){ const char *zFN = 0; /* Pointer to constant filename */ char *zNewFilename = 0; /* Name of the database file to open */ int iName = 1; /* Index in azArg[] of the filename */ int newFlag = 0; /* True to delete file before opening */ int openMode = SHELL_OPEN_UNSPEC; @@ -9673,11 +9388,11 @@ if( newFlag && zFN && !p->bSafeMode ) shellDeleteFile(zFN); #ifndef SQLITE_SHELL_FIDDLE if( p->bSafeMode && p->openMode!=SHELL_OPEN_HEXDB && zFN - && strcmp(zFN,":memory:")!=0 + && cli_strcmp(zFN,":memory:")!=0 ){ failIfSafeMode(p, "cannot open disk-based database files in safe mode"); } #else /* WASM mode has its own sandboxed pseudo-filesystem. */ @@ -9704,12 +9419,13 @@ } }else #ifndef SQLITE_SHELL_FIDDLE if( (c=='o' - && (strncmp(azArg[0], "output", n)==0||strncmp(azArg[0], "once", n)==0)) - || (c=='e' && n==5 && strcmp(azArg[0],"excel")==0) + && (cli_strncmp(azArg[0], "output", n)==0 + || cli_strncmp(azArg[0], "once", n)==0)) + || (c=='e' && n==5 && cli_strcmp(azArg[0],"excel")==0) ){ char *zFile = 0; int bTxtMode = 0; int i; int eMode = 0; @@ -9719,25 +9435,25 @@ zBOM[0] = 0; failIfSafeMode(p, "cannot run .%s in safe mode", azArg[0]); if( c=='e' ){ eMode = 'x'; bOnce = 2; - }else if( strncmp(azArg[0],"once",n)==0 ){ + }else if( cli_strncmp(azArg[0],"once",n)==0 ){ bOnce = 1; } for(i=1; iout, "ERROR: unknown option: \"%s\". Usage:\n", azArg[i]); showHelp(p->out, azArg[0]); @@ -9806,11 +9522,11 @@ } #endif }else{ p->out = output_file_open(zFile, bTxtMode); if( p->out==0 ){ - if( strcmp(zFile,"off")!=0 ){ + if( cli_strcmp(zFile,"off")!=0 ){ utf8_printf(stderr,"Error: cannot write to \"%s\"\n", zFile); } p->out = stdout; rc = 1; } else { @@ -9820,26 +9536,26 @@ } sqlite3_free(zFile); }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ - if( c=='p' && n>=3 && strncmp(azArg[0], "parameter", n)==0 ){ + if( c=='p' && n>=3 && cli_strncmp(azArg[0], "parameter", n)==0 ){ open_db(p,0); if( nArg<=1 ) goto parameter_syntax_error; /* .parameter clear ** Clear all bind parameters by dropping the TEMP table that holds them. */ - if( nArg==2 && strcmp(azArg[1],"clear")==0 ){ + if( nArg==2 && cli_strcmp(azArg[1],"clear")==0 ){ sqlite3_exec(p->db, "DROP TABLE IF EXISTS temp.sqlite_parameters;", 0, 0, 0); }else /* .parameter list ** List all bind parameters. */ - if( nArg==2 && strcmp(azArg[1],"list")==0 ){ + if( nArg==2 && cli_strcmp(azArg[1],"list")==0 ){ sqlite3_stmt *pStmt = 0; int rx; int len = 0; rx = sqlite3_prepare_v2(p->db, "SELECT max(length(key)) " @@ -9864,21 +9580,21 @@ /* .parameter init ** Make sure the TEMP table used to hold bind parameters exists. ** Create it if necessary. */ - if( nArg==2 && strcmp(azArg[1],"init")==0 ){ + if( nArg==2 && cli_strcmp(azArg[1],"init")==0 ){ bind_table_init(p); }else /* .parameter set NAME VALUE ** Set or reset a bind parameter. NAME should be the full parameter ** name exactly as it appears in the query. (ex: $abc, @def). The ** VALUE can be in either SQL literal notation, or if not it will be ** understood to be a text string. */ - if( nArg==4 && strcmp(azArg[1],"set")==0 ){ + if( nArg==4 && cli_strcmp(azArg[1],"set")==0 ){ int rx; char *zSql; sqlite3_stmt *pStmt; const char *zKey = azArg[2]; const char *zValue = azArg[3]; @@ -9912,11 +9628,11 @@ /* .parameter unset NAME ** Remove the NAME binding from the parameter binding table, if it ** exists. */ - if( nArg==3 && strcmp(azArg[1],"unset")==0 ){ + if( nArg==3 && cli_strcmp(azArg[1],"unset")==0 ){ char *zSql = sqlite3_mprintf( "DELETE FROM temp.sqlite_parameters WHERE key=%Q", azArg[2]); shell_check_oom(zSql); sqlite3_exec(p->db, zSql, 0, 0, 0); sqlite3_free(zSql); @@ -9924,21 +9640,21 @@ /* If no command name matches, show a syntax error */ parameter_syntax_error: showHelp(p->out, "parameter"); }else - if( c=='p' && n>=3 && strncmp(azArg[0], "print", n)==0 ){ + if( c=='p' && n>=3 && cli_strncmp(azArg[0], "print", n)==0 ){ int i; for(i=1; i1 ) raw_printf(p->out, " "); utf8_printf(p->out, "%s", azArg[i]); } raw_printf(p->out, "\n"); }else #ifndef SQLITE_OMIT_PROGRESS_CALLBACK - if( c=='p' && n>=3 && strncmp(azArg[0], "progress", n)==0 ){ + if( c=='p' && n>=3 && cli_strncmp(azArg[0], "progress", n)==0 ){ int i; int nn = 0; p->flgProgress = 0; p->mxProgress = 0; p->nProgress = 0; @@ -9945,23 +9661,23 @@ for(i=1; iflgProgress |= SHELL_PROGRESS_QUIET; continue; } - if( strcmp(z,"reset")==0 ){ + if( cli_strcmp(z,"reset")==0 ){ p->flgProgress |= SHELL_PROGRESS_RESET; continue; } - if( strcmp(z,"once")==0 ){ + if( cli_strcmp(z,"once")==0 ){ p->flgProgress |= SHELL_PROGRESS_ONCE; continue; } - if( strcmp(z,"limit")==0 ){ + if( cli_strcmp(z,"limit")==0 ){ if( i+1>=nArg ){ utf8_printf(stderr, "Error: missing argument on --limit\n"); rc = 1; goto meta_command_exit; }else{ @@ -9979,27 +9695,27 @@ open_db(p, 0); sqlite3_progress_handler(p->db, nn, progress_handler, p); }else #endif /* SQLITE_OMIT_PROGRESS_CALLBACK */ - if( c=='p' && strncmp(azArg[0], "prompt", n)==0 ){ + if( c=='p' && cli_strncmp(azArg[0], "prompt", n)==0 ){ if( nArg >= 2) { strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1); } if( nArg >= 3) { strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1); } }else #ifndef SQLITE_SHELL_FIDDLE - if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){ + if( c=='q' && cli_strncmp(azArg[0], "quit", n)==0 ){ rc = 2; }else #endif #ifndef SQLITE_SHELL_FIDDLE - if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 ){ + if( c=='r' && n>=3 && cli_strncmp(azArg[0], "read", n)==0 ){ FILE *inSaved = p->in; int savedLineno = p->lineno; failIfSafeMode(p, "cannot run .read in safe mode"); if( nArg!=2 ){ raw_printf(stderr, "Usage: .read FILE\n"); @@ -10032,11 +9748,11 @@ p->lineno = savedLineno; }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ #ifndef SQLITE_SHELL_FIDDLE - if( c=='r' && n>=3 && strncmp(azArg[0], "restore", n)==0 ){ + if( c=='r' && n>=3 && cli_strncmp(azArg[0], "restore", n)==0 ){ const char *zSrcFile; const char *zDb; sqlite3 *pSrc; sqlite3_backup *pBackup; int nTimeout = 0; @@ -10085,11 +9801,11 @@ } close_db(pSrc); }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ - if( c=='s' && strncmp(azArg[0], "scanstats", n)==0 ){ + if( c=='s' && cli_strncmp(azArg[0], "scanstats", n)==0 ){ if( nArg==2 ){ p->scanstatsOn = (u8)booleanValue(azArg[1]); #ifndef SQLITE_ENABLE_STMT_SCANSTATUS raw_printf(stderr, "Warning: .scanstats not available in this build.\n"); #endif @@ -10097,11 +9813,11 @@ raw_printf(stderr, "Usage: .scanstats on|off\n"); rc = 1; } }else - if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){ + if( c=='s' && cli_strncmp(azArg[0], "schema", n)==0 ){ ShellText sSelect; ShellState data; char *zErrMsg = 0; const char *zDiv = "("; const char *zName = 0; @@ -10240,19 +9956,19 @@ }else{ rc = 0; } }else - if( (c=='s' && n==11 && strncmp(azArg[0], "selecttrace", n)==0) - || (c=='t' && n==9 && strncmp(azArg[0], "treetrace", n)==0) + if( (c=='s' && n==11 && cli_strncmp(azArg[0], "selecttrace", n)==0) + || (c=='t' && n==9 && cli_strncmp(azArg[0], "treetrace", n)==0) ){ unsigned int x = nArg>=2 ? (unsigned int)integerValue(azArg[1]) : 0xffffffff; sqlite3_test_control(SQLITE_TESTCTRL_TRACEFLAGS, 1, &x); }else #if defined(SQLITE_ENABLE_SESSION) - if( c=='s' && strncmp(azArg[0],"session",n)==0 && n>=3 ){ + if( c=='s' && cli_strncmp(azArg[0],"session",n)==0 && n>=3 ){ struct AuxDb *pAuxDb = p->pAuxDb; OpenSession *pSession = &pAuxDb->aSession[0]; char **azCmd = &azArg[1]; int iSes = 0; int nCmd = nArg - 1; @@ -10259,11 +9975,11 @@ int i; if( nArg<=1 ) goto session_syntax_error; open_db(p, 0); if( nArg>=3 ){ for(iSes=0; iSesnSession; iSes++){ - if( strcmp(pAuxDb->aSession[iSes].zName, azArg[1])==0 ) break; + if( cli_strcmp(pAuxDb->aSession[iSes].zName, azArg[1])==0 ) break; } if( iSesnSession ){ pSession = &pAuxDb->aSession[iSes]; azCmd++; nCmd--; @@ -10275,11 +9991,11 @@ /* .session attach TABLE ** Invoke the sqlite3session_attach() interface to attach a particular ** table so that it is never filtered. */ - if( strcmp(azCmd[0],"attach")==0 ){ + if( cli_strcmp(azCmd[0],"attach")==0 ){ if( nCmd!=2 ) goto session_syntax_error; if( pSession->p==0 ){ session_not_open: raw_printf(stderr, "ERROR: No sessions are open\n"); }else{ @@ -10293,11 +10009,13 @@ /* .session changeset FILE ** .session patchset FILE ** Write a changeset or patchset into a file. The file is overwritten. */ - if( strcmp(azCmd[0],"changeset")==0 || strcmp(azCmd[0],"patchset")==0 ){ + if( cli_strcmp(azCmd[0],"changeset")==0 + || cli_strcmp(azCmd[0],"patchset")==0 + ){ FILE *out = 0; failIfSafeMode(p, "cannot run \".session %s\" in safe mode", azCmd[0]); if( nCmd!=2 ) goto session_syntax_error; if( pSession->p==0 ) goto session_not_open; out = fopen(azCmd[1], "wb"); @@ -10327,11 +10045,11 @@ }else /* .session close ** Close the identified session */ - if( strcmp(azCmd[0], "close")==0 ){ + if( cli_strcmp(azCmd[0], "close")==0 ){ if( nCmd!=1 ) goto session_syntax_error; if( pAuxDb->nSession ){ session_close(pSession); pAuxDb->aSession[iSes] = pAuxDb->aSession[--pAuxDb->nSession]; } @@ -10338,11 +10056,11 @@ }else /* .session enable ?BOOLEAN? ** Query or set the enable flag */ - if( strcmp(azCmd[0], "enable")==0 ){ + if( cli_strcmp(azCmd[0], "enable")==0 ){ int ii; if( nCmd>2 ) goto session_syntax_error; ii = nCmd==1 ? -1 : booleanValue(azCmd[1]); if( pAuxDb->nSession ){ ii = sqlite3session_enable(pSession->p, ii); @@ -10352,11 +10070,11 @@ }else /* .session filter GLOB .... ** Set a list of GLOB patterns of table names to be excluded. */ - if( strcmp(azCmd[0], "filter")==0 ){ + if( cli_strcmp(azCmd[0], "filter")==0 ){ int ii, nByte; if( nCmd<2 ) goto session_syntax_error; if( pAuxDb->nSession ){ for(ii=0; iinFilter; ii++){ sqlite3_free(pSession->azFilter[ii]); @@ -10377,11 +10095,11 @@ }else /* .session indirect ?BOOLEAN? ** Query or set the indirect flag */ - if( strcmp(azCmd[0], "indirect")==0 ){ + if( cli_strcmp(azCmd[0], "indirect")==0 ){ int ii; if( nCmd>2 ) goto session_syntax_error; ii = nCmd==1 ? -1 : booleanValue(azCmd[1]); if( pAuxDb->nSession ){ ii = sqlite3session_indirect(pSession->p, ii); @@ -10391,11 +10109,11 @@ }else /* .session isempty ** Determine if the session is empty */ - if( strcmp(azCmd[0], "isempty")==0 ){ + if( cli_strcmp(azCmd[0], "isempty")==0 ){ int ii; if( nCmd!=1 ) goto session_syntax_error; if( pAuxDb->nSession ){ ii = sqlite3session_isempty(pSession->p); utf8_printf(p->out, "session %s isempty flag = %d\n", @@ -10404,27 +10122,27 @@ }else /* .session list ** List all currently open sessions */ - if( strcmp(azCmd[0],"list")==0 ){ + if( cli_strcmp(azCmd[0],"list")==0 ){ for(i=0; inSession; i++){ utf8_printf(p->out, "%d %s\n", i, pAuxDb->aSession[i].zName); } }else /* .session open DB NAME ** Open a new session called NAME on the attached database DB. ** DB is normally "main". */ - if( strcmp(azCmd[0],"open")==0 ){ + if( cli_strcmp(azCmd[0],"open")==0 ){ char *zName; if( nCmd!=3 ) goto session_syntax_error; zName = azCmd[2]; if( zName[0]==0 ) goto session_syntax_error; for(i=0; inSession; i++){ - if( strcmp(pAuxDb->aSession[i].zName,zName)==0 ){ + if( cli_strcmp(pAuxDb->aSession[i].zName,zName)==0 ){ utf8_printf(stderr, "Session \"%s\" already exists\n", zName); goto meta_command_exit; } } if( pAuxDb->nSession>=ArraySize(pAuxDb->aSession) ){ @@ -10451,19 +10169,19 @@ #endif #ifdef SQLITE_DEBUG /* Undocumented commands for internal testing. Subject to change ** without notice. */ - if( c=='s' && n>=10 && strncmp(azArg[0], "selftest-", 9)==0 ){ - if( strncmp(azArg[0]+9, "boolean", n-9)==0 ){ + if( c=='s' && n>=10 && cli_strncmp(azArg[0], "selftest-", 9)==0 ){ + if( cli_strncmp(azArg[0]+9, "boolean", n-9)==0 ){ int i, v; for(i=1; iout, "%s: %d 0x%x\n", azArg[i], v, v); } } - if( strncmp(azArg[0]+9, "integer", n-9)==0 ){ + if( cli_strncmp(azArg[0]+9, "integer", n-9)==0 ){ int i; sqlite3_int64 v; for(i=1; i=4 && strncmp(azArg[0],"selftest",n)==0 ){ + if( c=='s' && n>=4 && cli_strncmp(azArg[0],"selftest",n)==0 ){ int bIsInit = 0; /* True to initialize the SELFTEST table */ int bVerbose = 0; /* Verbose output */ int bSelftestExists; /* True if SELFTEST already exists */ int i, k; /* Loop counters */ int nTest = 0; /* Number of tests runs */ @@ -10485,14 +10203,14 @@ open_db(p,0); for(i=1; i0 ){ printf("%d: %s %s\n", tno, zOp, zSql); } - if( strcmp(zOp,"memo")==0 ){ + if( cli_strcmp(zOp,"memo")==0 ){ utf8_printf(p->out, "%s\n", zSql); }else - if( strcmp(zOp,"run")==0 ){ + if( cli_strcmp(zOp,"run")==0 ){ char *zErrMsg = 0; str.n = 0; str.z[0] = 0; rc = sqlite3_exec(p->db, zSql, captureOutputCallback, &str, &zErrMsg); nTest++; @@ -10558,11 +10276,11 @@ if( rc || zErrMsg ){ nErr++; rc = 1; utf8_printf(p->out, "%d: error-code-%d: %s\n", tno, rc, zErrMsg); sqlite3_free(zErrMsg); - }else if( strcmp(zAns,str.z)!=0 ){ + }else if( cli_strcmp(zAns,str.z)!=0 ){ nErr++; rc = 1; utf8_printf(p->out, "%d: Expected: [%s]\n", tno, zAns); utf8_printf(p->out, "%d: Got: [%s]\n", tno, str.z); } @@ -10578,11 +10296,11 @@ } /* End loop over k */ freeText(&str); utf8_printf(p->out, "%d errors out of %d tests\n", nErr, nTest); }else - if( c=='s' && strncmp(azArg[0], "separator", n)==0 ){ + if( c=='s' && cli_strncmp(azArg[0], "separator", n)==0 ){ if( nArg<2 || nArg>3 ){ raw_printf(stderr, "Usage: .separator COL ?ROW?\n"); rc = 1; } if( nArg>=2 ){ @@ -10593,11 +10311,11 @@ sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, "%.*s", (int)ArraySize(p->rowSeparator)-1, azArg[2]); } }else - if( c=='s' && n>=4 && strncmp(azArg[0],"sha3sum",n)==0 ){ + if( c=='s' && n>=4 && cli_strncmp(azArg[0],"sha3sum",n)==0 ){ const char *zLike = 0; /* Which table to checksum. 0 means everything */ int i; /* Loop counter */ int bSchema = 0; /* Also hash the schema */ int bSeparate = 0; /* Hash each table separately */ int iSize = 224; /* Hash algorithm to use */ @@ -10611,19 +10329,19 @@ for(i=1; iout, "\n"); utf8_printf(p->out, "%12.12s: %s\n", "filename", p->pAuxDb->zDbFilename ? p->pAuxDb->zDbFilename : ""); }else - if( c=='s' && strncmp(azArg[0], "stats", n)==0 ){ + if( c=='s' && cli_strncmp(azArg[0], "stats", n)==0 ){ if( nArg==2 ){ - if( strcmp(azArg[1],"stmt")==0 ){ + if( cli_strcmp(azArg[1],"stmt")==0 ){ p->statsOn = 2; - }else if( strcmp(azArg[1],"vmstep")==0 ){ + }else if( cli_strcmp(azArg[1],"vmstep")==0 ){ p->statsOn = 3; }else{ p->statsOn = (u8)booleanValue(azArg[1]); } }else if( nArg==1 ){ @@ -10802,13 +10528,13 @@ raw_printf(stderr, "Usage: .stats ?on|off|stmt|vmstep?\n"); rc = 1; } }else - if( (c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0) - || (c=='i' && (strncmp(azArg[0], "indices", n)==0 - || strncmp(azArg[0], "indexes", n)==0) ) + if( (c=='t' && n>1 && cli_strncmp(azArg[0], "tables", n)==0) + || (c=='i' && (cli_strncmp(azArg[0], "indices", n)==0 + || cli_strncmp(azArg[0], "indexes", n)==0) ) ){ sqlite3_stmt *pStmt; char **azResult; int nRow, nAlloc; int ii; @@ -10912,11 +10638,11 @@ sqlite3_free(azResult); }else #ifndef SQLITE_SHELL_FIDDLE /* Begin redirecting output to the file "testcase-out.txt" */ - if( c=='t' && strcmp(azArg[0],"testcase")==0 ){ + if( c=='t' && cli_strcmp(azArg[0],"testcase")==0 ){ output_reset(p); p->out = output_file_open("testcase-out.txt", 0); if( p->out==0 ){ raw_printf(stderr, "Error: cannot open 'testcase-out.txt'\n"); } @@ -10927,11 +10653,11 @@ } }else #endif /* !defined(SQLITE_SHELL_FIDDLE) */ #ifndef SQLITE_UNTESTABLE - if( c=='t' && n>=8 && strncmp(azArg[0], "testctrl", n)==0 ){ + if( c=='t' && n>=8 && cli_strncmp(azArg[0], "testctrl", n)==0 ){ static const struct { const char *zCtrlName; /* Name of a test-control option */ int ctrlCode; /* Integer code for that option */ int unSafe; /* Not valid for --safe mode */ const char *zUsage; /* Usage notes */ @@ -10974,11 +10700,11 @@ zCmd++; if( zCmd[0]=='-' && zCmd[1] ) zCmd++; } /* --help lists all test-controls */ - if( strcmp(zCmd,"help")==0 ){ + if( cli_strcmp(zCmd,"help")==0 ){ utf8_printf(p->out, "Available test-controls:\n"); for(i=0; iout, " .testctrl %s %s\n", aCtrl[i].zCtrlName, aCtrl[i].zUsage); } @@ -10988,11 +10714,11 @@ /* convert testctrl text option to value. allow any unique prefix ** of the option name, or a numerical value. */ n2 = strlen30(zCmd); for(i=0; iout, "0x%08x\n", rc2); } }else #endif /* !defined(SQLITE_UNTESTABLE) */ - if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 ){ + if( c=='t' && n>4 && cli_strncmp(azArg[0], "timeout", n)==0 ){ open_db(p, 0); sqlite3_busy_timeout(p->db, nArg>=2 ? (int)integerValue(azArg[1]) : 0); }else - if( c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 ){ + if( c=='t' && n>=5 && cli_strncmp(azArg[0], "timer", n)==0 ){ if( nArg==2 ){ enableTimer = booleanValue(azArg[1]); if( enableTimer && !HAS_TIMER ){ raw_printf(stderr, "Error: timer not available on this system.\n"); enableTimer = 0; @@ -11179,11 +10905,11 @@ rc = 1; } }else #ifndef SQLITE_OMIT_TRACE - if( c=='t' && strncmp(azArg[0], "trace", n)==0 ){ + if( c=='t' && cli_strncmp(azArg[0], "trace", n)==0 ){ int mType = 0; int jj; open_db(p, 0); for(jj=1; jj=3 && strncmp(zOpt, "-allexcept",lenOpt)==0 ){ + if( lenOpt>=3 && cli_strncmp(zOpt, "-allexcept",lenOpt)==0 ){ assert( azArg[nArg]==0 ); sqlite3_drop_modules(p->db, nArg>2 ? (const char**)(azArg+2) : 0); }else{ for(ii=1; iidb, azArg[ii], 0, 0); @@ -11254,18 +10980,18 @@ } }else #endif #if SQLITE_USER_AUTHENTICATION - if( c=='u' && strncmp(azArg[0], "user", n)==0 ){ + if( c=='u' && cli_strncmp(azArg[0], "user", n)==0 ){ if( nArg<2 ){ raw_printf(stderr, "Usage: .user SUBCOMMAND ...\n"); rc = 1; goto meta_command_exit; } open_db(p, 0); - if( strcmp(azArg[1],"login")==0 ){ + if( cli_strcmp(azArg[1],"login")==0 ){ if( nArg!=4 ){ raw_printf(stderr, "Usage: .user login USER PASSWORD\n"); rc = 1; goto meta_command_exit; } @@ -11273,11 +10999,11 @@ strlen30(azArg[3])); if( rc ){ utf8_printf(stderr, "Authentication failed for user %s\n", azArg[2]); rc = 1; } - }else if( strcmp(azArg[1],"add")==0 ){ + }else if( cli_strcmp(azArg[1],"add")==0 ){ if( nArg!=5 ){ raw_printf(stderr, "Usage: .user add USER PASSWORD ISADMIN\n"); rc = 1; goto meta_command_exit; } @@ -11285,11 +11011,11 @@ booleanValue(azArg[4])); if( rc ){ raw_printf(stderr, "User-Add failed: %d\n", rc); rc = 1; } - }else if( strcmp(azArg[1],"edit")==0 ){ + }else if( cli_strcmp(azArg[1],"edit")==0 ){ if( nArg!=5 ){ raw_printf(stderr, "Usage: .user edit USER PASSWORD ISADMIN\n"); rc = 1; goto meta_command_exit; } @@ -11297,11 +11023,11 @@ booleanValue(azArg[4])); if( rc ){ raw_printf(stderr, "User-Edit failed: %d\n", rc); rc = 1; } - }else if( strcmp(azArg[1],"delete")==0 ){ + }else if( cli_strcmp(azArg[1],"delete")==0 ){ if( nArg!=3 ){ raw_printf(stderr, "Usage: .user delete USER\n"); rc = 1; goto meta_command_exit; } @@ -11316,11 +11042,11 @@ goto meta_command_exit; } }else #endif /* SQLITE_USER_AUTHENTICATION */ - if( c=='v' && strncmp(azArg[0], "version", n)==0 ){ + if( c=='v' && cli_strncmp(azArg[0], "version", n)==0 ){ utf8_printf(p->out, "SQLite %s %s\n" /*extra-version-info*/, sqlite3_libversion(), sqlite3_sourceid()); #if SQLITE_HAVE_ZLIB utf8_printf(p->out, "zlib version %s\n", zlibVersion()); #endif @@ -11335,11 +11061,11 @@ #elif defined(__GNUC__) && defined(__VERSION__) utf8_printf(p->out, "gcc-" __VERSION__ "\n"); #endif }else - if( c=='v' && strncmp(azArg[0], "vfsinfo", n)==0 ){ + if( c=='v' && cli_strncmp(azArg[0], "vfsinfo", n)==0 ){ const char *zDbName = nArg==2 ? azArg[1] : "main"; sqlite3_vfs *pVfs = 0; if( p->db ){ sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFS_POINTER, &pVfs); if( pVfs ){ @@ -11349,11 +11075,11 @@ raw_printf(p->out, "vfs.mxPathname = %d\n", pVfs->mxPathname); } } }else - if( c=='v' && strncmp(azArg[0], "vfslist", n)==0 ){ + if( c=='v' && cli_strncmp(azArg[0], "vfslist", n)==0 ){ sqlite3_vfs *pVfs; sqlite3_vfs *pCurrent = 0; if( p->db ){ sqlite3_file_control(p->db, "main", SQLITE_FCNTL_VFS_POINTER, &pCurrent); } @@ -11367,11 +11093,11 @@ raw_printf(p->out, "-----------------------------------\n"); } } }else - if( c=='v' && strncmp(azArg[0], "vfsname", n)==0 ){ + if( c=='v' && cli_strncmp(azArg[0], "vfsname", n)==0 ){ const char *zDbName = nArg==2 ? azArg[1] : "main"; char *zVfsName = 0; if( p->db ){ sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFSNAME, &zVfsName); if( zVfsName ){ @@ -11379,16 +11105,16 @@ sqlite3_free(zVfsName); } } }else - if( c=='w' && strncmp(azArg[0], "wheretrace", n)==0 ){ + if( c=='w' && cli_strncmp(azArg[0], "wheretrace", n)==0 ){ unsigned int x = nArg>=2 ? (unsigned int)integerValue(azArg[1]) : 0xffffffff; sqlite3_test_control(SQLITE_TESTCTRL_TRACEFLAGS, 3, &x); }else - if( c=='w' && strncmp(azArg[0], "width", n)==0 ){ + if( c=='w' && cli_strncmp(azArg[0], "width", n)==0 ){ int j; assert( nArg<=ArraySize(azArg) ); p->nWidth = nArg-1; p->colWidth = realloc(p->colWidth, (p->nWidth+1)*sizeof(int)*2); if( p->colWidth==0 && p->nWidth>0 ) shell_out_of_memory(); @@ -11562,14 +11288,14 @@ const char *zErrorTail; const char *zErrorType; if( zErrMsg==0 ){ zErrorType = "Error"; zErrorTail = sqlite3_errmsg(p->db); - }else if( strncmp(zErrMsg, "in prepare, ",12)==0 ){ + }else if( cli_strncmp(zErrMsg, "in prepare, ",12)==0 ){ zErrorType = "Parse error"; zErrorTail = &zErrMsg[12]; - }else if( strncmp(zErrMsg, "stepping, ", 10)==0 ){ + }else if( cli_strncmp(zErrMsg, "stepping, ", 10)==0 ){ zErrorType = "Runtime error"; zErrorTail = &zErrMsg[10]; }else{ zErrorType = "Error"; zErrorTail = zErrMsg; @@ -11607,11 +11333,11 @@ static char *one_input_line(FILE *in, char *zPrior, int isContinuation){ /* Parse the next line from shellState.wasm.zInput. */ const char *zBegin = shellState.wasm.zPos; const char *z = zBegin; char *zLine = 0; - int nZ = 0; + i64 nZ = 0; UNUSED_PARAMETER(in); UNUSED_PARAMETER(isContinuation); if(!z || !*z){ return 0; @@ -11623,11 +11349,11 @@ --nZ; } shellState.wasm.zPos = z; zLine = realloc(zPrior, nZ+1); shell_check_oom(zLine); - memcpy(zLine, zBegin, (size_t)nZ); + memcpy(zLine, zBegin, nZ); zLine[nZ] = 0; return zLine; } #endif /* SQLITE_SHELL_FIDDLE */ @@ -11641,16 +11367,16 @@ ** Return the number of errors. */ static int process_input(ShellState *p){ char *zLine = 0; /* A single input line */ char *zSql = 0; /* Accumulated SQL text */ - int nLine; /* Length of current line */ - int nSql = 0; /* Bytes of zSql[] used */ - int nAlloc = 0; /* Allocated zSql[] space */ + i64 nLine; /* Length of current line */ + i64 nSql = 0; /* Bytes of zSql[] used */ + i64 nAlloc = 0; /* Allocated zSql[] space */ int rc; /* Error code */ int errCnt = 0; /* Number of errors seen */ - int startline = 0; /* Line number for start of current input */ + i64 startline = 0; /* Line number for start of current input */ QuickScanState qss = QSS_Start; /* Accumulated line status (so far) */ if( p->inputNesting==MAX_INPUT_NESTING ){ /* This will be more informative in a later version. */ utf8_printf(stderr,"Input nesting limit (%d) reached at line %d." @@ -11696,19 +11422,19 @@ } qss = QSS_Start; continue; } /* No single-line dispositions remain; accumulate line(s). */ - nLine = strlen30(zLine); + nLine = strlen(zLine); if( nSql+nLine+2>=nAlloc ){ /* Grow buffer by half-again increments when big. */ nAlloc = nSql+(nSql>>1)+nLine+100; zSql = realloc(zSql, nAlloc); shell_check_oom(zSql); } if( nSql==0 ){ - int i; + i64 i; for(i=0; zLine[i] && IsSpace(zLine[i]); i++){} assert( nAlloc>0 && zSql!=0 ); memcpy(zSql, zLine+i, nLine+1-i); startline = p->lineno; nSql = nLine-i; @@ -11804,11 +11530,11 @@ #endif #endif /* !_WIN32_WCE */ if( home_dir ){ - int n = strlen30(home_dir) + 1; + i64 n = strlen(home_dir) + 1; char *z = malloc( n ); if( z ) memcpy(z, home_dir, n); home_dir = z; } @@ -12047,10 +11773,11 @@ setBinaryMode(stdin, 0); setvbuf(stderr, 0, _IONBF, 0); /* Make sure stderr is unbuffered */ #ifdef SQLITE_SHELL_FIDDLE stdin_is_interactive = 0; stdout_is_console = 1; + data.wasm.zDefaultDbName = "/fiddle.sqlite3"; #else stdin_is_interactive = isatty(0); stdout_is_console = isatty(1); #endif @@ -12074,11 +11801,11 @@ } } #endif #if USE_SYSTEM_SQLITE+0!=1 - if( strncmp(sqlite3_sourceid(),SQLITE_SOURCE_ID,60)!=0 ){ + if( cli_strncmp(sqlite3_sourceid(),SQLITE_SOURCE_ID,60)!=0 ){ utf8_printf(stderr, "SQLite header and source version mismatch\n%s\n%s\n", sqlite3_sourceid(), SQLITE_SOURCE_ID); exit(1); } #endif @@ -12096,13 +11823,13 @@ shell_check_oom(argvToFree); argcToFree = argc; argv = argvToFree + argc; for(i=0; i0x7fff0000 ) szHeap = 0x7fff0000; sqlite3_config(SQLITE_CONFIG_HEAP, malloc((int)szHeap), (int)szHeap, 64); #else (void)cmdline_option_value(argc, argv, ++i); #endif - }else if( strcmp(z,"-pagecache")==0 ){ + }else if( cli_strcmp(z,"-pagecache")==0 ){ sqlite3_int64 n, sz; sz = integerValue(cmdline_option_value(argc,argv,++i)); if( sz>70000 ) sz = 70000; if( sz<0 ) sz = 0; n = integerValue(cmdline_option_value(argc,argv,++i)); @@ -12193,28 +11920,28 @@ n = 0xffffffffffffLL/sz; } sqlite3_config(SQLITE_CONFIG_PAGECACHE, (n>0 && sz>0) ? malloc(n*sz) : 0, sz, n); data.shellFlgs |= SHFLG_Pagecache; - }else if( strcmp(z,"-lookaside")==0 ){ + }else if( cli_strcmp(z,"-lookaside")==0 ){ int n, sz; sz = (int)integerValue(cmdline_option_value(argc,argv,++i)); if( sz<0 ) sz = 0; n = (int)integerValue(cmdline_option_value(argc,argv,++i)); if( n<0 ) n = 0; sqlite3_config(SQLITE_CONFIG_LOOKASIDE, sz, n); if( sz*n==0 ) data.shellFlgs &= ~SHFLG_Lookaside; - }else if( strcmp(z,"-threadsafe")==0 ){ + }else if( cli_strcmp(z,"-threadsafe")==0 ){ int n; n = (int)integerValue(cmdline_option_value(argc,argv,++i)); switch( n ){ case 0: sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); break; case 2: sqlite3_config(SQLITE_CONFIG_MULTITHREAD); break; default: sqlite3_config(SQLITE_CONFIG_SERIALIZED); break; } #ifdef SQLITE_ENABLE_VFSTRACE - }else if( strcmp(z,"-vfstrace")==0 ){ + }else if( cli_strcmp(z,"-vfstrace")==0 ){ extern int vfstrace_register( const char *zTraceName, const char *zOldVfsName, int (*xOut)(const char*,void*), void *pOutArg, @@ -12221,54 +11948,56 @@ int makeDefault ); vfstrace_register("trace",0,(int(*)(const char*,void*))fputs,stderr,1); #endif #ifdef SQLITE_ENABLE_MULTIPLEX - }else if( strcmp(z,"-multiplex")==0 ){ + }else if( cli_strcmp(z,"-multiplex")==0 ){ extern int sqlite3_multiple_initialize(const char*,int); sqlite3_multiplex_initialize(0, 1); #endif - }else if( strcmp(z,"-mmap")==0 ){ + }else if( cli_strcmp(z,"-mmap")==0 ){ sqlite3_int64 sz = integerValue(cmdline_option_value(argc,argv,++i)); sqlite3_config(SQLITE_CONFIG_MMAP_SIZE, sz, sz); #ifdef SQLITE_ENABLE_SORTER_REFERENCES - }else if( strcmp(z,"-sorterref")==0 ){ + }else if( cli_strcmp(z,"-sorterref")==0 ){ sqlite3_int64 sz = integerValue(cmdline_option_value(argc,argv,++i)); sqlite3_config(SQLITE_CONFIG_SORTERREF_SIZE, (int)sz); #endif - }else if( strcmp(z,"-vfs")==0 ){ + }else if( cli_strcmp(z,"-vfs")==0 ){ zVfs = cmdline_option_value(argc, argv, ++i); #ifdef SQLITE_HAVE_ZLIB - }else if( strcmp(z,"-zip")==0 ){ + }else if( cli_strcmp(z,"-zip")==0 ){ data.openMode = SHELL_OPEN_ZIPFILE; #endif - }else if( strcmp(z,"-append")==0 ){ + }else if( cli_strcmp(z,"-append")==0 ){ data.openMode = SHELL_OPEN_APPENDVFS; #ifndef SQLITE_OMIT_DESERIALIZE - }else if( strcmp(z,"-deserialize")==0 ){ + }else if( cli_strcmp(z,"-deserialize")==0 ){ data.openMode = SHELL_OPEN_DESERIALIZE; - }else if( strcmp(z,"-maxsize")==0 && i+1zDbFilename==0 ){ @@ -12334,131 +12063,133 @@ */ for(i=1; i0 ){ utf8_printf(stderr, "Error: cannot mix regular SQL or dot-commands" " with \"%s\"\n", z); return 1; } @@ -12492,11 +12223,11 @@ arDotCommand(&data, 1, argv+i, argc-i); } readStdin = 0; break; #endif - }else if( strcmp(z,"-safe")==0 ){ + }else if( cli_strcmp(z,"-safe")==0 ){ data.bSafeMode = data.bSafeModePersist = 1; }else{ utf8_printf(stderr,"%s: Error: unknown option: %s\n", Argv0, z); raw_printf(stderr,"Use -help for a list of options.\n"); return 1; @@ -12617,34 +12348,34 @@ #ifdef SQLITE_SHELL_FIDDLE /* Only for emcc experimentation purposes. */ int fiddle_experiment(int a,int b){ - return a + b; -} - -/* Only for emcc experimentation purposes. - - Define this function in JS using: - - emcc ... --js-library somefile.js - - containing: - -mergeInto(LibraryManager.library, { - my_foo: function(){ - console.debug("my_foo()",arguments); - } -}); + return a + b; +} + +/* +** Returns a pointer to the current DB handle. +*/ +sqlite3 * fiddle_db_handle(){ + return globalDb; +} + +/* +** Returns a pointer to the given DB name's VFS. If zDbName is 0 then +** "main" is assumed. Returns 0 if no db with the given name is +** open. */ -/*extern void my_foo(sqlite3 *);*/ -/* Only for emcc experimentation purposes. */ -sqlite3 * fiddle_the_db(){ - printf("fiddle_the_db(%p)\n", (const void*)globalDb); - /*my_foo(globalDb);*/ - return globalDb; +sqlite3_vfs * fiddle_db_vfs(const char *zDbName){ + sqlite3_vfs * pVfs = 0; + if(globalDb){ + sqlite3_file_control(globalDb, zDbName ? zDbName : "main", + SQLITE_FCNTL_VFS_POINTER, &pVfs); + } + return pVfs; } + /* Only for emcc experimentation purposes. */ sqlite3 * fiddle_db_arg(sqlite3 *arg){ printf("fiddle_db_arg(%p)\n", (const void*)arg); return arg; } @@ -12654,11 +12385,11 @@ ** SharedWorker() (which manages the wasm module) is performing work ** which should be interrupted. Unfortunately, SharedWorker is not ** portable enough to make real use of. */ void fiddle_interrupt(void){ - if(globalDb) sqlite3_interrupt(globalDb); + if( globalDb ) sqlite3_interrupt(globalDb); } /* ** Returns the filename of the given db name, assuming "main" if ** zDbName is NULL. Returns NULL if globalDb is not opened. @@ -12668,73 +12399,74 @@ ? sqlite3_db_filename(globalDb, zDbName ? zDbName : "main") : NULL; } /* -** Closes, unlinks, and reopens the db using its current filename (or -** the default if the db is currently closed). It is assumed, for -** purposes of the fiddle build, that the file is in a transient -** virtual filesystem within the browser. +** Completely wipes out the contents of the currently-opened database +** but leaves its storage intact for reuse. */ void fiddle_reset_db(void){ - char *zFilename = 0; - if(0==globalDb){ - shellState.pAuxDb->zDbFilename = "/fiddle.sqlite3"; - }else{ - zFilename = - sqlite3_mprintf("%s", sqlite3_db_filename(globalDb, "main")); - shell_check_oom(zFilename); - close_db(globalDb); - shellDeleteFile(zFilename); - shellState.db = 0; - shellState.pAuxDb->zDbFilename = zFilename; - } - open_db(&shellState, 0); - sqlite3_free(zFilename); + if( globalDb ){ + int rc = sqlite3_db_config(globalDb, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0); + if( 0==rc ) rc = sqlite3_exec(globalDb, "VACUUM", 0, 0, 0); + sqlite3_db_config(globalDb, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); + } +} + +/* +** Uses the current database's VFS xRead to stream the db file's +** contents out to the given callback. The callback gets a single +** chunk of size n (its 2nd argument) on each call and must return 0 +** on success, non-0 on error. This function returns 0 on success, +** SQLITE_NOTFOUND if no db is open, or propagates any other non-0 +** code from the callback. Note that this is not thread-friendly: it +** expects that it will be the only thread reading the db file and +** takes no measures to ensure that is the case. +*/ +int fiddle_export_db( int (*xCallback)(unsigned const char *zOut, int n) ){ + sqlite3_int64 nSize = 0; + sqlite3_int64 nPos = 0; + sqlite3_file * pFile = 0; + unsigned char buf[1024 * 8]; + int nBuf = (int)sizeof(buf); + int rc = shellState.db + ? sqlite3_file_control(shellState.db, "main", + SQLITE_FCNTL_FILE_POINTER, &pFile) + : SQLITE_NOTFOUND; + if( rc ) return rc; + rc = pFile->pMethods->xFileSize(pFile, &nSize); + if( rc ) return rc; + if(nSize % nBuf){ + /* DB size is not an even multiple of the buffer size. Reduce + ** buffer size so that we do not unduly inflate the db size when + ** exporting. */ + if(0 == nSize % 4096) nBuf = 4096; + else if(0 == nSize % 2048) nBuf = 2048; + else if(0 == nSize % 1024) nBuf = 1024; + else nBuf = 512; + } + for( ; 0==rc && nPospMethods->xRead(pFile, buf, nBuf, nPos); + if(SQLITE_IOERR_SHORT_READ == rc){ + rc = (nPos + nBuf) < nSize ? rc : 0/*assume EOF*/; + } + if( 0==rc ) rc = xCallback(buf, nBuf); + } + return rc; } /* -** Trivial exportable function for emscripten. Needs to be exported using: -** -** emcc ..flags... -sEXPORTED_FUNCTIONS=_fiddle_exec -sEXPORTED_RUNTIME_METHODS=ccall,cwrap -** -** (Note the underscore before the function name.) It processes zSql -** as if it were input to the sqlite3 shell and redirects all output -** to the wasm binding. +** Trivial exportable function for emscripten. It processes zSql as if +** it were input to the sqlite3 shell and redirects all output to the +** wasm binding. fiddle_main() must have been called before this +** is called, or results are undefined. */ void fiddle_exec(const char * zSql){ - static int once = 0; - int rc = 0; - if(!once){ - /* Simulate an argv array for main() */ - static char * argv[] = {"fiddle", - "-bail", - "-safe"}; - rc = fiddle_main((int)(sizeof(argv)/sizeof(argv[0])), argv); - once = rc ? -1 : 1; - memset(&shellState.wasm, 0, sizeof(shellState.wasm)); - printf( - "SQLite version %s %.19s\n" /*extra-version-info*/, - sqlite3_libversion(), sqlite3_sourceid() - ); - puts("WASM shell"); - puts("Enter \".help\" for usage hints."); - if(once>0){ - fiddle_reset_db(); - } - if(shellState.db){ - printf("Connected to %s.\n", fiddle_db_filename(NULL)); - }else{ - fprintf(stderr,"ERROR initializing db!\n"); - return; - } - } - if(once<0){ - puts("DB init failed. Not executing SQL."); - }else if(zSql && *zSql){ + if(zSql && *zSql){ + if('.'==*zSql) puts(zSql); shellState.wasm.zInput = zSql; shellState.wasm.zPos = zSql; process_input(&shellState); - memset(&shellState.wasm, 0, sizeof(shellState.wasm)); + shellState.wasm.zInput = shellState.wasm.zPos = 0; } } #endif /* SQLITE_SHELL_FIDDLE */ Index: src/sqlite.h.in ================================================================== --- src/sqlite.h.in +++ src/sqlite.h.in @@ -610,13 +610,14 @@ #define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ #define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_EXRESCODE 0x02000000 /* Extended result codes */ /* Reserved: 0x00F00000 */ +#define SQLITE_OPEN_SHARED_SCHEMA 0x01000000 /* Ok for sqlite3_open_v2() */ + /* Legacy compatibility: */ #define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ - /* ** CAPI3REF: Device Characteristics ** ** The xDeviceCharacteristics method of the [sqlite3_io_methods] @@ -668,17 +669,21 @@ /* ** CAPI3REF: File Locking Levels ** ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. +** of an [sqlite3_io_methods] object. These values are ordered from +** lest restrictive to most restrictive. +** +** The argument to xLock() is always SHARED or higher. The argument to +** xUnlock is either SHARED or NONE. */ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 +#define SQLITE_LOCK_NONE 0 /* xUnlock() only */ +#define SQLITE_LOCK_SHARED 1 /* xLock() or xUnlock() */ +#define SQLITE_LOCK_RESERVED 2 /* xLock() only */ +#define SQLITE_LOCK_PENDING 3 /* xLock() only */ +#define SQLITE_LOCK_EXCLUSIVE 4 /* xLock() only */ /* ** CAPI3REF: Synchronization Type Flags ** ** When SQLite invokes the xSync() method of an @@ -752,11 +757,18 @@ **
  • [SQLITE_LOCK_SHARED], **
  • [SQLITE_LOCK_RESERVED], **
  • [SQLITE_LOCK_PENDING], or **
  • [SQLITE_LOCK_EXCLUSIVE]. ** -** xLock() increases the lock. xUnlock() decreases the lock. +** xLock() upgrades the database file lock. In other words, xLock() moves the +** database file lock in the direction NONE toward EXCLUSIVE. The argument to +** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** SQLITE_LOCK_NONE. If the database file lock is already at or above the +** requested lock, then the call to xLock() is a no-op. +** xUnlock() downgrades the database file lock to either SHARED or NONE. +* If the lock is already at or below the requested lock state, then the call +** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, ** PENDING, or EXCLUSIVE lock on the file. It returns true ** if such a lock exists and false otherwise. ** @@ -857,13 +869,12 @@ **
  • [[SQLITE_FCNTL_LOCKSTATE]] ** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This ** opcode causes the xFileControl method to write the current state of ** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], ** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and is only available when the SQLITE_TEST -** compile-time option is used. +** into an integer that the pArg argument points to. +** This capability is only available if SQLite is compiled with [SQLITE_DEBUG]. ** **
  • [[SQLITE_FCNTL_SIZE_HINT]] ** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS ** layer a hint of how large the database file will grow to be during the ** current transaction. This hint is not guaranteed to be accurate but it @@ -1180,10 +1191,16 @@ ** by clients within the current process, only within other processes. ** ** **
  • [[SQLITE_FCNTL_CKSM_FILE]] ** Used by the cksmvfs VFS module only. +** +**
  • [[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then this file-control purges the contents +** of the in-memory page cache. If there is an open transaction, or if +** the db is a temp-db, it is a no-op, not an error. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 #define SQLITE_FCNTL_SET_LOCKPROXYFILE 3 @@ -1222,10 +1239,11 @@ #define SQLITE_FCNTL_CKPT_DONE 37 #define SQLITE_FCNTL_RESERVE_BYTES 38 #define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE #define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE #define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO @@ -1251,10 +1269,30 @@ ** structure must be typedefed in order to work around compiler warnings ** on some platforms. */ typedef struct sqlite3_api_routines sqlite3_api_routines; +/* +** CAPI3REF: File Name +** +** Type [sqlite3_filename] is used by SQLite to pass filenames to the +** xOpen method of a [VFS]. It may be cast to (const char*) and treated +** as a normal, nul-terminated, UTF-8 buffer containing the filename, but +** may also be passed to special APIs such as: +** +**
      +**
    • sqlite3_filename_database() +**
    • sqlite3_filename_journal() +**
    • sqlite3_filename_wal() +**
    • sqlite3_uri_parameter() +**
    • sqlite3_uri_boolean() +**
    • sqlite3_uri_int64() +**
    • sqlite3_uri_key() +**
    +*/ +typedef const char *sqlite3_filename; + /* ** CAPI3REF: OS Interface Object ** ** An instance of the sqlite3_vfs object defines the interface between ** the SQLite core and the underlying operating system. The "vfs" @@ -1429,11 +1467,11 @@ int szOsFile; /* Size of subclassed sqlite3_file */ int mxPathname; /* Maximum file pathname length */ sqlite3_vfs *pNext; /* Next registered VFS */ const char *zName; /* Name of this virtual file system */ void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int (*xOpen)(sqlite3_vfs*, sqlite3_filename zName, sqlite3_file*, int flags, int *pOutFlags); int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); @@ -2307,10 +2345,11 @@ ** deliberately corrupt the database file are disabled. The disabled ** features include but are not limited to the following: **
      **
    • The [PRAGMA writable_schema=ON] statement. **
    • The [PRAGMA journal_mode=OFF] statement. +**
    • The [PRAGMA schema_version=N] statement. **
    • Writes to the [sqlite_dbpage] virtual table. **
    • Direct writes to [shadow tables]. **
    ** ** @@ -3699,14 +3738,14 @@ ** it has access to all the same query parameters as were found on the ** main database file. ** ** See the [URI filename] documentation for additional information. */ -const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); -const char *sqlite3_uri_key(const char *zFilename, int N); +const char *sqlite3_uri_parameter(sqlite3_filename z, const char *zParam); +int sqlite3_uri_boolean(sqlite3_filename z, const char *zParam, int bDefault); +sqlite3_int64 sqlite3_uri_int64(sqlite3_filename, const char*, sqlite3_int64); +const char *sqlite3_uri_key(sqlite3_filename z, int N); /* ** CAPI3REF: Translate filenames ** ** These routines are available to [VFS|custom VFS implementations] for @@ -3731,13 +3770,13 @@ ** In all of the above, if F is not the name of a database, journal or WAL ** filename passed into the VFS from the SQLite core and F is not the ** return value from [sqlite3_db_filename()], then the result is ** undefined and is likely a memory access violation. */ -const char *sqlite3_filename_database(const char*); -const char *sqlite3_filename_journal(const char*); -const char *sqlite3_filename_wal(const char*); +const char *sqlite3_filename_database(sqlite3_filename); +const char *sqlite3_filename_journal(sqlite3_filename); +const char *sqlite3_filename_wal(sqlite3_filename); /* ** CAPI3REF: Database File Corresponding To A Journal ** ** ^If X is the name of a rollback or WAL-mode journal file that is @@ -3799,18 +3838,18 @@ ** used again after sqlite3_free_filename(Y) has been called. This means ** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, ** then the corresponding [sqlite3_module.xClose() method should also be ** invoked prior to calling sqlite3_free_filename(Y). */ -char *sqlite3_create_filename( +sqlite3_filename sqlite3_create_filename( const char *zDatabase, const char *zJournal, const char *zWal, int nParam, const char **azParam ); -void sqlite3_free_filename(char*); +void sqlite3_free_filename(sqlite3_filename); /* ** CAPI3REF: Error Codes And Messages ** METHOD: sqlite3 ** @@ -5509,10 +5548,20 @@ ** such a conversion is possible without loss of information (in other ** words, if the value is a string that looks like a number) ** then the conversion is performed. Otherwise no conversion occurs. ** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ ** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X), +** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or +** sqlite3_value_bytes16(X) might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** ** ^Within the [xUpdate] method of a [virtual table], the ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation ** that the xUpdate method call was invoked to implement and if ** and the prior [xColumn] method call that was invoked to extracted @@ -5573,10 +5622,11 @@ int sqlite3_value_bytes16(sqlite3_value*); int sqlite3_value_type(sqlite3_value*); int sqlite3_value_numeric_type(sqlite3_value*); int sqlite3_value_nochange(sqlite3_value*); int sqlite3_value_frombind(sqlite3_value*); +int sqlite3_value_encoding(sqlite3_value*); /* ** CAPI3REF: Finding The Subtype Of SQL Values ** METHOD: sqlite3_value ** @@ -5626,11 +5676,11 @@ ** In those cases, sqlite3_aggregate_context() might be called for the ** first time from within xFinal().)^ ** ** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer ** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. +** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is ** determined by the N parameter on first successful call. Changing the ** value of N in any subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory @@ -5831,13 +5881,14 @@ ** application-defined function to be a text string in an encoding ** specified by the fifth (and last) parameter, which must be one ** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. ** ^SQLite takes the text result from the application from ** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. +** ^If the 3rd parameter to any of the sqlite3_result_text* interfaces +** other than sqlite3_result_text64() is negative, then SQLite computes +** the string length itself by searching the 2nd parameter for the first +** zero character. ** ^If the 3rd parameter to the sqlite3_result_text* interfaces ** is non-negative, then as many bytes (not characters) of the text ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would @@ -6329,11 +6380,11 @@ **
  • [sqlite3_filename_database()] **
  • [sqlite3_filename_journal()] **
  • [sqlite3_filename_wal()] ** */ -const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); +sqlite3_filename sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Determine if a database is read-only ** METHOD: sqlite3 ** @@ -10282,35 +10333,10 @@ ** This interface is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SNAPSHOT] option. */ SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); -/* -** CAPI3REF: Wal related information regarding the most recent COMMIT -** EXPERIMENTAL -** -** This function reports on the state of the wal file (if any) for database -** zDb, which should be "main", "temp", or the name of the attached database. -** Its results - the values written to the output parameters - are only -** defined if the most recent SQL command on the connection was a successful -** COMMIT that wrote data to wal-mode database zDb. -** -** Assuming the above conditions are met, output parameter (*pnFrame) is set -** to the total number of frames in the wal file. Parameter (*pnPrior) is -** set to the number of frames that were present in the wal file before the -** most recent transaction was committed. So that the number of frames written -** by the most recent transaction is (*pnFrame)-(*pnPrior). -** -** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. It -** is not an error if this function is called at a time when the results -** are undefined. -*/ -SQLITE_EXPERIMENTAL int sqlite3_wal_info( - sqlite3 *db, const char *zDb, - unsigned int *pnPrior, unsigned int *pnFrame -); - /* ** CAPI3REF: Serialize a database ** ** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory ** that is a serialization of the S database on [database connection] D. Index: src/sqlite3ext.h ================================================================== --- src/sqlite3ext.h +++ src/sqlite3ext.h @@ -329,13 +329,13 @@ const char *(*uri_key)(const char*,int); const char *(*filename_database)(const char*); const char *(*filename_journal)(const char*); const char *(*filename_wal)(const char*); /* Version 3.32.0 and later */ - char *(*create_filename)(const char*,const char*,const char*, + const char *(*create_filename)(const char*,const char*,const char*, int,const char**); - void (*free_filename)(char*); + void (*free_filename)(const char*); sqlite3_file *(*database_file_object)(const char*); /* Version 3.34.0 and later */ int (*txn_state)(sqlite3*,const char*); /* Version 3.36.1 and later */ sqlite3_int64 (*changes64)(sqlite3*); @@ -355,10 +355,12 @@ int (*deserialize)(sqlite3*,const char*,unsigned char*, sqlite3_int64,sqlite3_int64,unsigned); unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*, unsigned int); const char *(*db_name)(sqlite3*,int); + /* Version 3.40.0 and later */ + int (*value_encoding)(sqlite3_value*); }; /* ** This is the function signature used for all extension entry points. It ** is also defined in the file "loadext.c". @@ -679,10 +681,12 @@ #ifndef SQLITE_OMIT_DESERIALIZE #define sqlite3_deserialize sqlite3_api->deserialize #define sqlite3_serialize sqlite3_api->serialize #endif #define sqlite3_db_name sqlite3_api->db_name +/* Version 3.40.0 and later */ +#define sqlite3_value_encoding sqlite3_api->value_encoding #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) /* This case when the file really is being compiled as a loadable ** extension */ Index: src/sqliteInt.h ================================================================== --- src/sqliteInt.h +++ src/sqliteInt.h @@ -206,11 +206,11 @@ /* ** Include the configuration header output by 'configure' if we're using the ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -#include "config.h" +#include "sqlite_cfg.h" #define SQLITECONFIG_H 1 #endif #include "sqliteLimit.h" @@ -1179,19 +1179,20 @@ typedef struct Cte Cte; typedef struct CteUse CteUse; typedef struct Db Db; typedef struct DbFixer DbFixer; typedef struct Schema Schema; +typedef struct SchemaPool SchemaPool; typedef struct Expr Expr; typedef struct ExprList ExprList; -typedef struct FastPrng FastPrng; typedef struct FKey FKey; typedef struct FuncDestructor FuncDestructor; typedef struct FuncDef FuncDef; typedef struct FuncDefHash FuncDefHash; typedef struct IdList IdList; typedef struct Index Index; +typedef struct IndexedExpr IndexedExpr; typedef struct IndexSample IndexSample; typedef struct KeyClass KeyClass; typedef struct KeyInfo KeyInfo; typedef struct Lookaside Lookaside; typedef struct LookasideSlot LookasideSlot; @@ -1253,10 +1254,11 @@ #define MASKBIT(n) (((Bitmask)1)<<(n)) #define MASKBIT64(n) (((u64)1)<<(n)) #define MASKBIT32(n) (((unsigned int)1)<<(n)) #define SMASKBIT32(n) ((n)<=31?((unsigned int)1)<<(n):0) #define ALLBITS ((Bitmask)-1) +#define TOPBIT (((Bitmask)1)<<(BMS-1)) /* A VList object records a mapping between parameters/variables/wildcards ** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer ** variable number associated with that parameter. See the format description ** on the sqlite3VListAdd() routine for more information. A VList is really @@ -1267,15 +1269,15 @@ /* ** Defer sourcing vdbe.h and btree.h until after the "u8" and ** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque ** pointer types (i.e. FuncDef) defined above. */ +#include "os.h" #include "pager.h" #include "btree.h" #include "vdbe.h" #include "pcache.h" -#include "os.h" #include "mutex.h" /* The SQLITE_EXTRA_DURABLE compile-time option used to set the default ** synchronous setting to EXTRA. It is no longer supported. */ @@ -1305,18 +1307,10 @@ #endif #ifndef SQLITE_DEFAULT_WAL_SYNCHRONOUS # define SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_SYNCHRONOUS #endif -/* -** State of a simple PRNG used for the per-connection and per-pager -** pseudo-random number generators. -*/ -struct FastPrng { - unsigned int x, y; -}; - /* ** Each database file to be accessed by the system is an instance ** of the following structure. There are normally two of these structures ** in the sqlite.aDb[] array. aDb[0] is the main database file and ** aDb[1] is the database file used to hold temporary tables. Additional @@ -1326,10 +1320,14 @@ char *zDbSName; /* Name of this database. (schema name, not filename) */ Btree *pBt; /* The B*Tree structure for this database file */ u8 safety_level; /* How aggressive at syncing data to disk */ u8 bSyncSet; /* True if "PRAGMA synchronous=N" has been run */ Schema *pSchema; /* Pointer to database schema (possibly shared) */ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + SchemaPool *pSPool; /* For REUSE_SCHEMA mode */ + VTable *pVTable; /* List of all VTable objects (REUSE_SCHEMA mode only) */ +#endif }; /* ** An instance of the following structure stores a database schema. ** @@ -1357,10 +1355,13 @@ Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ u8 file_format; /* Schema format version for this file */ u8 enc; /* Text encoding used by this database */ u16 schemaFlags; /* Flags associated with this schema */ int cache_size; /* Number of pages to use in the cache */ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + Schema *pNext; /* Next Schema object SchemaPool (REUSE_SCHEMA) */ +#endif }; /* ** These macros can be used to test, set, or clear bits in the ** Db.pSchema->flags field. @@ -1560,11 +1561,10 @@ int errMask; /* & result codes with this before returning */ int iSysErrno; /* Errno value from last system error */ u32 dbOptFlags; /* Flags to enable/disable optimizations */ u8 enc; /* Text encoding */ u8 autoCommit; /* The auto-commit flag. */ - u8 eConcurrent; /* CONCURRENT_* value */ u8 temp_store; /* 1: file 2: memory 0: default */ u8 mallocFailed; /* True if we have seen a malloc failure */ u8 bBenignMalloc; /* Do not require OOMs if true */ u8 dfltLockMode; /* Default locking-mode for attached dbs */ signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ @@ -1574,11 +1574,10 @@ u8 mTrace; /* zero or more SQLITE_TRACE flags */ u8 noSharedCache; /* True if no shared-cache backends */ u8 nSqlExec; /* Number of pending OP_SqlExec opcodes */ u8 eOpenState; /* Current condition of the connection */ int nextPagesize; /* Pagesize after VACUUM if >0 */ - FastPrng sPrng; /* State of the per-connection PRNG */ i64 nChange; /* Value returned by sqlite3_changes() */ i64 nTotalChange; /* Value returned by sqlite3_total_changes() */ int aLimit[SQLITE_N_LIMIT]; /* Limits */ int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */ struct sqlite3InitInfo { /* Information used during initialization */ @@ -1684,16 +1683,15 @@ #ifdef SQLITE_USER_AUTHENTICATION sqlite3_userauth auth; /* User authentication information */ #endif }; -/* -** Candidate values for sqlite3.eConcurrent -*/ -#define CONCURRENT_NONE 0 -#define CONCURRENT_OPEN 1 -#define CONCURRENT_SCHEMA 2 +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +# define IsSharedSchema(db) (((db)->openFlags & SQLITE_OPEN_SHARED_SCHEMA)!=0) +#else +# define IsSharedSchema(db) 0 +#endif /* ** A macro to discover the encoding of a database. */ #define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc) @@ -1751,11 +1749,10 @@ #define SQLITE_CountRows HI(0x00001) /* Count rows changed by INSERT, */ /* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ -#define SQLITE_NoopUpdate 0x01000000 /* UPDATE operations are no-ops */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG #define SQLITE_SqlTrace HI(0x0100000) /* Debug print SQL as it executes */ #define SQLITE_VdbeListing HI(0x0200000) /* Debug listings of VDBE progs */ #define SQLITE_VdbeTrace HI(0x0400000) /* True to trace VDBE execution */ @@ -1773,10 +1770,13 @@ #define DBFLAG_VacuumInto 0x0008 /* Currently running VACUUM INTO */ #define DBFLAG_SchemaKnownOk 0x0010 /* Schema is known to be valid */ #define DBFLAG_InternalFunc 0x0020 /* Allow use of internal functions */ #define DBFLAG_EncodingFixed 0x0040 /* No longer possible to change enc. */ +#define DBFLAG_SchemaInuse 0x0080 /* Do not release sharable schemas */ +#define DBFLAG_FreeSchema 0x0100 /* Free extra shared schemas on release */ + /* ** Bits of the sqlite3.dbOptFlags field that are used by the ** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to ** selectively disable various optimizations. */ @@ -1805,10 +1805,11 @@ #define SQLITE_BloomPulldown 0x00100000 /* Run Bloom filters early */ #define SQLITE_BalancedMerge 0x00200000 /* Balance multi-way merges */ #define SQLITE_ReleaseReg 0x00400000 /* Use OP_ReleaseReg for testing */ #define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */ /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */ +#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* ** Macros for testing whether or not optimizations are enabled or disabled. */ @@ -2272,10 +2273,13 @@ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ u8 eVtabRisk; /* Riskiness of allowing hacker access */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + char *zName; /* Table name (REUSE_SCHEMA mode) */ +#endif }; /* Allowed values for VTable.eVtabRisk */ #define SQLITE_VTABRISK_Low 0 @@ -2377,11 +2381,11 @@ ** table support is omitted from the build. */ #ifndef SQLITE_OMIT_VIRTUALTABLE # define IsVirtual(X) ((X)->eTabType==TABTYP_VTAB) # define ExprIsVtab(X) \ - ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->eTabType==TABTYP_VTAB) + ((X)->op==TK_COLUMN && (X)->y.pTab->eTabType==TABTYP_VTAB) #else # define IsVirtual(X) 0 # define ExprIsVtab(X) 0 #endif @@ -2594,13 +2598,25 @@ ** Ex1.aCol[], hence Ex2.aiColumn[1]==0. ** ** The Index.onError field determines whether or not the indexed columns ** must be unique and what to do if they are not. When Index.onError=OE_None, ** it means this is not a unique index. Otherwise it is a unique index -** and the value of Index.onError indicate the which conflict resolution -** algorithm to employ whenever an attempt is made to insert a non-unique +** and the value of Index.onError indicates which conflict resolution +** algorithm to employ when an attempt is made to insert a non-unique ** element. +** +** The colNotIdxed bitmask is used in combination with SrcItem.colUsed +** for a fast test to see if an index can serve as a covering index. +** colNotIdxed has a 1 bit for every column of the original table that +** is *not* available in the index. Thus the expression +** "colUsed & colNotIdxed" will be non-zero if the index is not a +** covering index. The most significant bit of of colNotIdxed will always +** be true (note-20221022-a). If a column beyond the 63rd column of the +** table is used, the "colUsed & colNotIdxed" test will always be non-zero +** and we have to assume either that the index is not covering, or use +** an alternative (slower) algorithm to determine whether or not +** the index is covering. ** ** While parsing a CREATE TABLE or CREATE INDEX statement in order to ** generate VDBE code (as opposed to parsing one read from an sqlite_schema ** table as part of parsing an existing database schema), transient instances ** of this structure may be created. In this case the Index.tnum variable is @@ -2633,19 +2649,21 @@ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */ unsigned bNoQuery:1; /* Do not use this index to optimize queries */ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */ + unsigned bHasExpr:1; /* Index contains an expression, either a literal + ** expression, or a reference to a VIRTUAL column */ #ifdef SQLITE_ENABLE_STAT4 int nSample; /* Number of elements in aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */ tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */ tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */ #endif - Bitmask colNotIdxed; /* 0 for unindexed columns in pTab */ + Bitmask colNotIdxed; /* Unindexed columns in pTab */ }; /* ** Allowed values for Index.idxType */ @@ -3094,10 +3112,18 @@ /* ** The SrcItem object represents a single term in the FROM clause of a query. ** The SrcList object is mostly an array of SrcItems. ** +** The jointype starts out showing the join type between the current table +** and the next table on the list. The parser builds the list this way. +** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each +** jointype expresses the join between the table and the previous table. +** +** In the colUsed field, the high-order bit (bit 63) is set if the table +** contains more than 63 columns and the 64-th or later column is used. +** ** Union member validity: ** ** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc ** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy ** u2.pIBIndex fg.isIndexedBy && !fg.isCte @@ -3133,18 +3159,18 @@ int iCursor; /* The VDBE cursor number used to access this table */ union { Expr *pOn; /* fg.isUsing==0 => The ON clause of a join */ IdList *pUsing; /* fg.isUsing==1 => The USING clause of a join */ } u3; - Bitmask colUsed; /* Bit N (1<62 */ union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ ExprList *pFuncArg; /* Arguments to table-valued-function */ } u1; union { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ - CteUse *pCteUse; /* CTE Usage info info fg.isCte is true */ + CteUse *pCteUse; /* CTE Usage info when fg.isCte is true */ } u2; }; /* ** The OnOrUsing object represents either an ON clause or a USING clause. @@ -3154,27 +3180,15 @@ Expr *pOn; /* The ON clause of a join */ IdList *pUsing; /* The USING clause of a join */ }; /* -** The following structure describes the FROM clause of a SELECT statement. -** Each table or subquery in the FROM clause is a separate element of -** the SrcList.a[] array. -** -** With the addition of multiple database support, the following structure -** can also be used to describe a particular table such as the table that -** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, -** such a table must be a simple name: ID. But in SQLite, the table can -** now be identified by a database name, a dot, then the table name: ID.ID. -** -** The jointype starts out showing the join type between the current table -** and the next table on the list. The parser builds the list this way. -** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each -** jointype expresses the join between the table and the previous table. -** -** In the colUsed field, the high-order bit (bit 63) is set if the table -** contains more than 63 columns and the 64-th or later column is used. +** This object represents one or more tables that are the source of +** content for an SQL statement. For example, a single SrcList object +** is used to hold the FROM clause of a SELECT statement. SrcList also +** represents the target tables for DELETE, INSERT, and UPDATE statements. +** */ struct SrcList { int nSrc; /* Number of tables or subqueries in the FROM clause */ u32 nAlloc; /* Number of entries allocated in a[] below */ SrcItem a[1]; /* One entry for each identifier on the list */ @@ -3515,11 +3529,11 @@ u8 eDest; /* How to dispose of the results. One of SRT_* above. */ int iSDParm; /* A parameter used by the eDest disposal method */ int iSDParm2; /* A second parameter for the eDest disposal method */ int iSdst; /* Base register where results are written */ int nSdst; /* Number of registers allocated */ - char *zAffSdst; /* Affinity used when eDest==SRT_Set */ + char *zAffSdst; /* Affinity used for SRT_Set, SRT_Table, and similar */ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ }; /* ** During code generation of statements that do inserts into AUTOINCREMENT @@ -3580,10 +3594,32 @@ # define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) # define DbMaskAllZero(M) (M)==0 # define DbMaskNonZero(M) (M)!=0 #endif +/* +** For each index X that has as one of its arguments either an expression +** or the name of a virtual generated column, and if X is in scope such that +** the value of the expression can simply be read from the index, then +** there is an instance of this object on the Parse.pIdxExpr list. +** +** During code generation, while generating code to evaluate expressions, +** this list is consulted and if a matching expression is found, the value +** is read from the index rather than being recomputed. +*/ +struct IndexedExpr { + Expr *pExpr; /* The expression contained in the index */ + int iDataCur; /* The data cursor associated with the index */ + int iIdxCur; /* The index cursor */ + int iIdxCol; /* The index column that contains value of pExpr */ + u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */ + IndexedExpr *pIENext; /* Next in a list of all indexed expressions */ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + const char *zIdxName; /* Name of index, used only for bytecode comments */ +#endif +}; + /* ** An instance of the ParseCleanup object specifies an operation that ** should be performed after parsing to deallocation resources obtained ** during the parse and which are no longer needed. */ @@ -3621,11 +3657,11 @@ u8 isMultiWrite; /* True if statement may modify/insert multiple rows */ u8 mayAbort; /* True if statement may throw an ABORT exception */ u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ - u8 disableVtab; /* Disable all virtual tables for this parse */ + u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif int nRangeReg; /* Size of the temporary register block */ @@ -3638,10 +3674,11 @@ ** of the base register during check-constraint eval */ int nLabel; /* The *negative* of the number of labels used */ int nLabelAlloc; /* Number of slots in aLabel */ int *aLabel; /* Space to hold the labels */ ExprList *pConstExpr;/* Constant expressions */ + IndexedExpr *pIdxExpr;/* List of expressions used by active indexes */ Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ int regRowid; /* Register holding rowid of CREATE TABLE entry */ int regRoot; /* Register holding root page number for new objects */ @@ -3822,10 +3859,13 @@ the is stored here */ Schema *pSchema; /* Schema containing the trigger */ Schema *pTabSchema; /* Schema containing the table */ TriggerStep *step_list; /* Link list of trigger program steps */ Trigger *pNext; /* Next trigger associated with the table */ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + char *zTabSchema; /* Temp triggers in IsSharedSchema() dbs only */ +#endif }; /* ** A trigger is either a BEFORE or an AFTER trigger. The following constants ** determine which. @@ -3938,10 +3978,11 @@ char **pzErrMsg; /* Error message stored here */ int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */ int rc; /* Result code stored here */ u32 mInitFlags; /* Flags controlling error messages */ u32 nInitRow; /* Number of rows processed */ + u64 cksum; /* Schema checksum for REUSE_SCHEMA mode */ Pgno mxPage; /* Maximum page number. 0 for no limit. */ } InitData; /* ** Allowed values for mInitFlags @@ -4073,19 +4114,19 @@ SrcList *pSrcList; /* FROM clause */ struct CCurHint *pCCurHint; /* Used by codeCursorHint() */ struct RefSrcList *pRefSrcList; /* sqlite3ReferencesSrcList() */ int *aiCol; /* array of column indexes */ struct IdxCover *pIdxCover; /* Check for index coverage */ - struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */ ExprList *pGroupBy; /* GROUP BY clause */ Select *pSelect; /* HAVING to WHERE clause ctx */ struct WindowRewrite *pRewrite; /* Window rewrite context */ struct WhereConst *pConst; /* WHERE clause constants */ struct RenameCtx *pRename; /* RENAME COLUMN context */ struct Table *pTab; /* Table of generated column */ + struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */ SrcItem *pSrcItem; /* A single FROM clause item */ - DbFixer *pFix; + DbFixer *pFix; /* See sqlite3FixSelect() */ } u; }; /* ** The following structure contains information used by the sqliteFix... @@ -4408,16 +4449,20 @@ ** The alloca() routine never returns NULL. This will cause code paths ** that deal with sqlite3StackAlloc() failures to be unreachable. */ #ifdef SQLITE_USE_ALLOCA # define sqlite3StackAllocRaw(D,N) alloca(N) +# define sqlite3StackAllocRawNN(D,N) alloca(N) # define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) # define sqlite3StackFree(D,P) +# define sqlite3StackFreeNN(D,P) #else # define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) +# define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N) # define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) +# define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P) #endif /* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they ** are, disable MEMSYS3 */ @@ -4773,12 +4818,10 @@ Vdbe *sqlite3GetVdbe(Parse*); #ifndef SQLITE_UNTESTABLE void sqlite3PrngSaveState(void); void sqlite3PrngRestoreState(void); #endif -void sqlite3FastPrngInit(FastPrng*); -void sqlite3FastRandomness(FastPrng*, int N, void *P); void sqlite3RollbackAll(sqlite3*,int); void sqlite3CodeVerifySchema(Parse*, int); void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb); void sqlite3BeginTransaction(Parse*, int); void sqlite3EndTransaction(Parse*,int); @@ -4960,10 +5003,11 @@ #define getVarint sqlite3GetVarint #define putVarint sqlite3PutVarint const char *sqlite3IndexAffinityStr(sqlite3*, Index*); +char *sqlite3TableAffinityStr(sqlite3*,const Table*); void sqlite3TableAffinity(Vdbe*, Table*, int); char sqlite3CompareAffinity(const Expr *pExpr, char aff2); int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity); char sqlite3TableColumnAffinity(const Table*,int); char sqlite3ExprAffinity(const Expr *pExpr); @@ -5031,11 +5075,10 @@ #ifndef SQLITE_AMALGAMATION extern const unsigned char sqlite3OpcodeProperty[]; extern const char sqlite3StrBINARY[]; extern const unsigned char sqlite3StdTypeLen[]; extern const char sqlite3StdTypeAffinity[]; -extern const char sqlite3StdTypeMap[]; extern const char *sqlite3StdType[]; extern const unsigned char sqlite3UpperToLower[]; extern const unsigned char *sqlite3aLTb; extern const unsigned char *sqlite3aEQb; extern const unsigned char *sqlite3aGTb; @@ -5093,10 +5136,33 @@ void sqlite3DeleteIndexSamples(sqlite3*,Index*); void sqlite3DefaultRowEst(Index*); void sqlite3RegisterLikeFunctions(sqlite3*, int); int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*); void sqlite3SchemaClear(void *); +void sqlite3SchemaClearOrDisconnect(sqlite3*, int); + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA +int sqlite3SchemaConnect(sqlite3*, int, u64); +int sqlite3SchemaDisconnect(sqlite3 *, int, int); +Schema *sqlite3SchemaExtract(SchemaPool*); +int sqlite3SchemaLoad(sqlite3*, int, int*, char**); +void sqlite3SchemaReleaseAll(sqlite3*); +void sqlite3SchemaRelease(sqlite3*, int); +void sqlite3SchemaAdjustUsed(sqlite3*, int, int, int*); +void sqlite3SchemaWritable(Parse*, int); +void sqlite3UnlockReusableSchema(sqlite3 *db, int bRelease); +int sqlite3LockReusableSchema(sqlite3 *db); +#else +# define sqlite3SchemaWritable(x,y) +# define sqlite3UnlockReusableSchema(x,y) (void)(y) +# define sqlite3LockReusableSchema(x) 0 +# define sqlite3SchemaDisconnect(x,y,z) SQLITE_OK +# define sqlite3SchemaLoad(w,x,y,z) SQLITE_OK +# define sqlite3SchemaRelease(y,z) +# define sqlite3SchemaConnect(x,y,z) SQLITE_OK +#endif + Schema *sqlite3SchemaGet(sqlite3 *, Btree *); int sqlite3SchemaToIndex(sqlite3 *db, Schema *); KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int); void sqlite3KeyInfoUnref(KeyInfo*); KeyInfo *sqlite3KeyInfoRef(KeyInfo*); @@ -5474,7 +5540,11 @@ void sqlite3VectorErrorMsg(Parse*, Expr*); #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS const char **sqlite3CompileOptions(int *pnOpt); #endif + +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) +int sqlite3KvvfsInit(void); +#endif #endif /* SQLITEINT_H */ Index: src/status.c ================================================================== --- src/status.c +++ src/status.c @@ -286,17 +286,30 @@ ** databases. *pHighwater is set to zero. */ case SQLITE_DBSTATUS_SCHEMA_USED: { int i; /* Used to iterate through schemas */ int nByte = 0; /* Used to accumulate return value */ + int bReleaseSchema; sqlite3BtreeEnterAll(db); + bReleaseSchema = sqlite3LockReusableSchema(db); db->pnBytesFreed = &nByte; assert( db->lookaside.pEnd==db->lookaside.pTrueEnd ); db->lookaside.pEnd = db->lookaside.pStart; for(i=0; inDb; i++){ - Schema *pSchema = db->aDb[i].pSchema; + Schema *pSchema; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + int bUnload = 0; + int nUsed = nByte; + if( db->aDb[i].pSPool ){ + char *zDummy = 0; + rc = sqlite3SchemaLoad(db, i, &bUnload, &zDummy); + sqlite3_free(zDummy); + if( rc ) break; + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + pSchema = db->aDb[i].pSchema; if( ALWAYS(pSchema!=0) ){ HashElem *p; nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * ( pSchema->tblHash.count @@ -314,11 +327,18 @@ } for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ sqlite3DeleteTable(db, (Table *)sqliteHashData(p)); } } +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( db->aDb[i].pSPool ){ + if( bUnload ) sqlite3SchemaRelease(db, i); + sqlite3SchemaAdjustUsed(db, i, nUsed, &nByte); + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ } + sqlite3UnlockReusableSchema(db, bReleaseSchema); db->pnBytesFreed = 0; db->lookaside.pEnd = db->lookaside.pTrueEnd; sqlite3BtreeLeaveAll(db); *pHighwater = 0; Index: src/tclsqlite.c ================================================================== --- src/tclsqlite.c +++ src/tclsqlite.c @@ -3703,10 +3703,13 @@ ){ Tcl_WrongNumArgs(interp, 1, objv, "HANDLE ?FILENAME? ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN?" " ?-nofollow BOOLEAN?" " ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?" +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + " ?-shared-schema BOOLEAN?" +#endif ); return TCL_ERROR; } /* @@ -3834,10 +3837,20 @@ if( b ){ flags |= SQLITE_OPEN_URI; }else{ flags &= ~SQLITE_OPEN_URI; } +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + }else if( strcmp(zArg, "-shared-schema")==0 ){ + int b; + if( Tcl_GetBooleanFromObj(interp, objv[i], &b) ) return TCL_ERROR; + if( b ){ + flags |= SQLITE_OPEN_SHARED_SCHEMA; + }else{ + flags &= ~SQLITE_OPEN_SHARED_SCHEMA; + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ }else if( strcmp(zArg, "-translatefilename")==0 ){ if( Tcl_GetBooleanFromObj(interp, objv[i], &bTranslateFileName) ){ return TCL_ERROR; } }else{ Index: src/test1.c ================================================================== --- src/test1.c +++ src/test1.c @@ -1850,11 +1850,12 @@ {"utf8", SQLITE_UTF8 }, {"utf16", SQLITE_UTF16 }, {"utf16le", SQLITE_UTF16LE }, {"utf16be", SQLITE_UTF16BE }, {"any", SQLITE_ANY }, - {"0", 0 } + {"0", 0 }, + {0, 0 } }; if( objc<5 || (objc%2)==0 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB NAME NARG ENC SWITCHES..."); return TCL_ERROR; @@ -7271,10 +7272,11 @@ } aVerb[] = { { "SQLITE_TESTCTRL_LOCALTIME_FAULT", SQLITE_TESTCTRL_LOCALTIME_FAULT }, { "SQLITE_TESTCTRL_SORTER_MMAP", SQLITE_TESTCTRL_SORTER_MMAP }, { "SQLITE_TESTCTRL_IMPOSTER", SQLITE_TESTCTRL_IMPOSTER }, { "SQLITE_TESTCTRL_INTERNAL_FUNCTIONS", SQLITE_TESTCTRL_INTERNAL_FUNCTIONS}, + { 0, 0 } }; int iVerb; int iFlag; int rc; @@ -7625,11 +7627,16 @@ /* ** optimization_control DB OPT BOOLEAN ** ** Enable or disable query optimizations using the sqlite3_test_control() ** interface. Disable if BOOLEAN is false and enable if BOOLEAN is true. -** OPT is the name of the optimization to be disabled. +** OPT is the name of the optimization to be disabled. OPT can also be a +** list or optimizations names, in which case all optimizations named are +** enabled or disabled. +** +** Each invocation of this control overrides all prior invocations. The +** changes are not cumulative. */ static int SQLITE_TCLAPI optimization_control( void * clientData, Tcl_Interp *interp, int objc, @@ -7638,10 +7645,11 @@ int i; sqlite3 *db; const char *zOpt; int onoff; int mask = 0; + int cnt = 0; static const struct { const char *zOptName; int mask; } aOpt[] = { { "all", SQLITE_AllOpts }, @@ -7656,10 +7664,11 @@ { "omit-noop-join", SQLITE_OmitNoopJoin }, { "stat4", SQLITE_Stat4 }, { "skip-scan", SQLITE_SkipScan }, { "push-down", SQLITE_PushDown }, { "balanced-merge", SQLITE_BalancedMerge }, + { "propagate-const", SQLITE_PropagateConst }, }; if( objc!=4 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB OPT BOOLEAN"); return TCL_ERROR; @@ -7666,25 +7675,26 @@ } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; if( Tcl_GetBooleanFromObj(interp, objv[3], &onoff) ) return TCL_ERROR; zOpt = Tcl_GetString(objv[2]); for(i=0; i=sizeof(aOpt)/sizeof(aOpt[0]) ){ + if( cnt==0 ){ Tcl_AppendResult(interp, "unknown optimization - should be one of:", (char*)0); for(i=0; i2 ){ + Tcl_AppendResult(interp, "cannot create ", argv[1], + "MB file because Windows " + "does not support sparse files", (void*)0); + return TCL_ERROR; + } +#endif pVfs = sqlite3_vfs_find(0); nFile = (int)strlen(argv[2]); zFile = sqlite3_malloc( nFile+2 ); if( zFile==0 ) return TCL_ERROR; Index: src/test_config.c ================================================================== --- src/test_config.c +++ src/test_config.c @@ -679,16 +679,10 @@ Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "1", TCL_GLOBAL_ONLY); #endif -#ifndef SQLITE_OMIT_CONCURRENT - Tcl_SetVar2(interp, "sqlite_options", "concurrent", "1", TCL_GLOBAL_ONLY); -#else - Tcl_SetVar2(interp, "sqlite_options", "concurrent", "0", TCL_GLOBAL_ONLY); -#endif - #ifdef SQLITE_OMIT_UTF16 Tcl_SetVar2(interp, "sqlite_options", "utf16", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "utf16", "1", TCL_GLOBAL_ONLY); #endif @@ -786,10 +780,16 @@ #ifdef SQLITE_OMIT_WINDOWFUNC Tcl_SetVar2(interp, "sqlite_options", "windowfunc", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "windowfunc", "1", TCL_GLOBAL_ONLY); #endif + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + Tcl_SetVar2(interp, "sqlite_options", "sharedschema", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "sharedschema", "0", TCL_GLOBAL_ONLY); +#endif #define LINKVAR(x) { \ static const int cv_ ## x = SQLITE_ ## x; \ Tcl_LinkVar(interp, "SQLITE_" #x, (char *)&(cv_ ## x), \ TCL_LINK_INT | TCL_LINK_READ_ONLY); } Index: src/test_demovfs.c ================================================================== --- src/test_demovfs.c +++ src/test_demovfs.c @@ -460,25 +460,27 @@ if( rc!=0 && errno==ENOENT ) return SQLITE_OK; if( rc==0 && dirSync ){ int dfd; /* File descriptor open on directory */ int i; /* Iterator variable */ + char *zSlash; char zDir[MAXPATHNAME+1]; /* Name of directory containing file zPath */ /* Figure out the directory name from the path of the file deleted. */ sqlite3_snprintf(MAXPATHNAME, zDir, "%s", zPath); zDir[MAXPATHNAME] = '\0'; - for(i=strlen(zDir); i>1 && zDir[i]!='/'; i++); - zDir[i] = '\0'; - - /* Open a file-descriptor on the directory. Sync. Close. */ - dfd = open(zDir, O_RDONLY, 0); - if( dfd<0 ){ - rc = -1; - }else{ - rc = fsync(dfd); - close(dfd); + zSlash = strrchr(zDir,'/'); + if( zSlash ){ + /* Open a file-descriptor on the directory. Sync. Close. */ + zSlash[0] = 0; + dfd = open(zDir, O_RDONLY, 0); + if( dfd<0 ){ + rc = -1; + }else{ + rc = fsync(dfd); + close(dfd); + } } } return (rc==0 ? SQLITE_OK : SQLITE_IOERR_DELETE); } Index: src/test_hexio.c ================================================================== --- src/test_hexio.c +++ src/test_hexio.c @@ -188,11 +188,11 @@ Tcl_SetObjResult(interp, Tcl_NewIntObj(written)); return TCL_OK; } /* -** USAGE: hexio_get_int [-littleendian] HEXDATA +** USAGE: hexio_get_int HEXDATA ** ** Interpret the HEXDATA argument as a big-endian integer. Return ** the value of that integer. HEXDATA can contain between 2 and 8 ** hexadecimal digits. */ @@ -205,24 +205,16 @@ int val; int nIn, nOut; const unsigned char *zIn; unsigned char *aOut; unsigned char aNum[4]; - int bLittle = 0; - - if( objc==3 ){ - int n; - char *z = Tcl_GetStringFromObj(objv[1], &n); - if( n>=2 && n<=13 && memcmp(z, "-littleendian", n)==0 ){ - bLittle = 1; - } - } - if( (objc-bLittle)!=2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "[-littleendian] HEXDATA"); + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "HEXDATA"); return TCL_ERROR; } - zIn = (const unsigned char *)Tcl_GetStringFromObj(objv[1+bLittle], &nIn); + zIn = (const unsigned char *)Tcl_GetStringFromObj(objv[1], &nIn); aOut = sqlite3_malloc( 1 + nIn/2 ); if( aOut==0 ){ return TCL_ERROR; } nOut = sqlite3TestHexToBin(zIn, nIn, aOut); @@ -231,15 +223,11 @@ }else{ memset(aNum, 0, sizeof(aNum)); memcpy(&aNum[4-nOut], aOut, nOut); } sqlite3_free(aOut); - if( bLittle ){ - val = (aNum[3]<<24) | (aNum[2]<<16) | (aNum[1]<<8) | aNum[0]; - }else{ - val = (aNum[0]<<24) | (aNum[1]<<16) | (aNum[2]<<8) | aNum[3]; - } + val = (aNum[0]<<24) | (aNum[1]<<16) | (aNum[2]<<8) | aNum[3]; Tcl_SetObjResult(interp, Tcl_NewIntObj(val)); return TCL_OK; } Index: src/test_multiplex.c ================================================================== --- src/test_multiplex.c +++ src/test_multiplex.c @@ -270,11 +270,11 @@ z = sqlite3_malloc64( n+5 ); if( z==0 ){ return SQLITE_NOMEM; } multiplexFilename(pGroup->zName, pGroup->nName, pGroup->flags, iChunk, z); - pGroup->aReal[iChunk].z = sqlite3_create_filename(z,"","",0,0); + pGroup->aReal[iChunk].z = (char*)sqlite3_create_filename(z,"","",0,0); sqlite3_free(z); if( pGroup->aReal[iChunk].z==0 ) return SQLITE_NOMEM; } return SQLITE_OK; } ADDED src/test_schemapool.c Index: src/test_schemapool.c ================================================================== --- /dev/null +++ src/test_schemapool.c @@ -0,0 +1,282 @@ +/* +** 2006 June 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Code for testing the virtual table interfaces. This code +** is not included in the SQLite library. It is used for automated +** testing of the SQLite library. +*/ + +/* +** None of this works unless we have virtual tables. +*/ +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_TEST) + +#include + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + +#include "sqliteInt.h" + +/* The code in this file defines a sqlite3 virtual-table module with +** the following schema. +*/ +#define SCHEMAPOOL_SCHEMA \ +"CREATE TABLE x(" \ +" cksum INTEGER, " \ +" nref INTEGER, " \ +" nschema INTEGER, " \ +" ndelete INTEGER " \ +")" + +#define SCHEMAPOOL_NFIELD 4 + +typedef struct schemapool_vtab schemapool_vtab; +typedef struct schemapool_cursor schemapool_cursor; + +/* A schema table object */ +struct schemapool_vtab { + sqlite3_vtab base; +}; + +/* A schema table cursor object */ +struct schemapool_cursor { + sqlite3_vtab_cursor base; + sqlite3_int64 *aData; + int iRow; + int nRow; +}; + +/* +** Table destructor for the schema module. +*/ +static int schemaPoolDestroy(sqlite3_vtab *pVtab){ + sqlite3_free(pVtab); + return 0; +} + +/* +** Table constructor for the schema module. +*/ +static int schemaPoolCreate( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + int rc = SQLITE_NOMEM; + schemapool_vtab *pVtab = sqlite3_malloc(sizeof(schemapool_vtab)); + if( pVtab ){ + memset(pVtab, 0, sizeof(schemapool_vtab)); + rc = sqlite3_declare_vtab(db, SCHEMAPOOL_SCHEMA); + if( rc!=SQLITE_OK ){ + sqlite3_free(pVtab); + pVtab = 0; + } + } + *ppVtab = (sqlite3_vtab *)pVtab; + return rc; +} + +/* +** Open a new cursor on the schema table. +*/ +static int schemaPoolOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + schemapool_cursor *pCur; + pCur = sqlite3_malloc(sizeof(schemapool_cursor)); + if( pCur ){ + memset(pCur, 0, sizeof(schemapool_cursor)); + *ppCursor = (sqlite3_vtab_cursor*)pCur; + rc = SQLITE_OK; + } + return rc; +} + +/* +** Close a schema table cursor. +*/ +static int schemaPoolClose(sqlite3_vtab_cursor *cur){ + schemapool_cursor *pCur = (schemapool_cursor*)cur; + sqlite3_free(pCur->aData); + sqlite3_free(pCur); + return SQLITE_OK; +} + +/* +** Retrieve a column of data. +*/ +static int schemaPoolColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + schemapool_cursor *pCur = (schemapool_cursor*)cur; + assert( i==0 || i==1 || i==2 || i==3 ); + sqlite3_result_int64(ctx, pCur->aData[pCur->iRow*SCHEMAPOOL_NFIELD + i]); + return SQLITE_OK; +} + +/* +** Retrieve the current rowid. +*/ +static int schemaPoolRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ + schemapool_cursor *pCur = (schemapool_cursor*)cur; + *pRowid = pCur->iRow + 1; + return SQLITE_OK; +} + +static int schemaPoolEof(sqlite3_vtab_cursor *cur){ + schemapool_cursor *pCur = (schemapool_cursor*)cur; + return pCur->iRow>=pCur->nRow; +} + +/* +** Advance the cursor to the next row. +*/ +static int schemaPoolNext(sqlite3_vtab_cursor *cur){ + schemapool_cursor *pCur = (schemapool_cursor*)cur; + pCur->iRow++; + return SQLITE_OK; +} + +struct SchemaPool { + int nRef; /* Number of pointers to this object */ + int nDelete; /* Schema objects deleted by ReleaseAll() */ + u64 cksum; /* Checksum for this Schema contents */ + Schema *pSchema; /* Linked list of Schema objects */ + Schema sSchema; /* The single dummy schema object */ + SchemaPool *pNext; /* Next element in schemaPoolList */ +}; +extern SchemaPool *sqlite3SchemaPoolList(void); + +/* +** Reset a schemaPool table cursor. +*/ +static int schemaPoolFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + SchemaPool *pSPool; + schemapool_cursor *pCur = (schemapool_cursor*)pVtabCursor; + + sqlite3_free(pCur->aData); + pCur->aData = 0; + pCur->nRow = 0; + pCur->iRow = 0; + + for(pSPool = sqlite3SchemaPoolList(); pSPool; pSPool=pSPool->pNext){ + pCur->nRow++; + } + + if( pCur->nRow ){ + int iRow = 0; + int nByte = SCHEMAPOOL_NFIELD * pCur->nRow * sizeof(i64); + pCur->aData = (i64*)sqlite3_malloc(nByte); + if( pCur->aData==0 ) return SQLITE_NOMEM; + for(pSPool = sqlite3SchemaPoolList(); pSPool; pSPool=pSPool->pNext){ + Schema *p; + i64 nSchema = 0; + for(p=pSPool->pSchema; p; p=p->pNext){ + nSchema++; + } + pCur->aData[0 + iRow*SCHEMAPOOL_NFIELD] = pSPool->cksum; + pCur->aData[1 + iRow*SCHEMAPOOL_NFIELD] = (i64)pSPool->nRef; + pCur->aData[2 + iRow*SCHEMAPOOL_NFIELD] = nSchema; + pCur->aData[3 + iRow*SCHEMAPOOL_NFIELD] = (i64)pSPool->nDelete; + iRow++; + } + } + + return SQLITE_OK; +} + +/* +** Analyse the WHERE condition. +*/ +static int schemaPoolBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + return SQLITE_OK; +} + +/* +** A virtual table module that merely echos method calls into TCL +** variables. +*/ +static sqlite3_module schemaPoolModule = { + 0, /* iVersion */ + schemaPoolCreate, + schemaPoolCreate, + schemaPoolBestIndex, + schemaPoolDestroy, + schemaPoolDestroy, + schemaPoolOpen, /* xOpen - open a cursor */ + schemaPoolClose, /* xClose - close a cursor */ + schemaPoolFilter, /* xFilter - configure scan constraints */ + schemaPoolNext, /* xNext - advance a cursor */ + schemaPoolEof, /* xEof */ + schemaPoolColumn, /* xColumn - read data */ + schemaPoolRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ +}; + +/* +** Decode a pointer to an sqlite3 object. +*/ +extern int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb); + +/* +** Register the schema virtual table module. +*/ +static int SQLITE_TCLAPI register_schemapool_module( + ClientData clientData, /* Not used */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3 *db; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3_create_module(db, "schemapool", &schemaPoolModule, 0); +#endif + return TCL_OK; +} + +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ +#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_TEST) */ + +/* +** Register commands with the TCL interpreter. +*/ +int Sqlitetestschemapool_Init(Tcl_Interp *interp){ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + static struct { + char *zName; + Tcl_ObjCmdProc *xProc; + void *clientData; + } aObjCmd[] = { + { "register_schemapool_module", register_schemapool_module, 0 }, + }; + int i; + for(i=0; idb; /* Obtain a pointer to the sqlite3_file object open on the main db file. */ rc = sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, (void *)&fd); if( rc!=SQLITE_OK ) return rc; /* Obtain the "recovery" lock. Normally, this lock is only obtained by ** clients running database recovery. */ - assert( pLock->bRecoveryLocked==0 ); rc = superlockShmLock(fd, 2, 1, pBusy); if( rc!=SQLITE_OK ) return rc; - pLock->bRecoveryLocked = 1; /* Zero the start of the first shared-memory page. This means that any ** clients that open read or write transactions from this point on will ** have to run recovery before proceeding. Since they need the "recovery" ** lock that this process is holding to do that, no new read or write @@ -142,13 +137,11 @@ /* Obtain exclusive locks on all the "read-lock" slots. Once these locks ** are held, it is guaranteed that there are no active reader, writer or ** checkpointer clients. */ - assert( pLock->bReaderLocked==0 ); rc = superlockShmLock(fd, 3, SQLITE_SHM_NLOCK-3, pBusy); - if( rc==SQLITE_OK ) pLock->bReaderLocked = 1; return rc; } /* ** Release a superlock held on a database file. The argument passed to @@ -161,18 +154,12 @@ int rc; /* Return code */ int flags = SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE; sqlite3_file *fd = 0; rc = sqlite3_file_control(p->db, "main", SQLITE_FCNTL_FILE_POINTER, (void *)&fd); if( rc==SQLITE_OK ){ - if( p->bRecoveryLocked ){ - fd->pMethods->xShmLock(fd, 2, 1, flags); - p->bRecoveryLocked = 0; - } - if( p->bReaderLocked ){ - fd->pMethods->xShmLock(fd, 3, SQLITE_SHM_NLOCK-3, flags); - p->bReaderLocked = 0; - } + fd->pMethods->xShmLock(fd, 2, 1, flags); + fd->pMethods->xShmLock(fd, 3, SQLITE_SHM_NLOCK-3, flags); } } sqlite3_close(p->db); sqlite3_free(p); } @@ -243,11 +230,11 @@ */ if( rc==SQLITE_OK ){ if( SQLITE_OK==(rc = superlockIsWal(pLock)) && pLock->bWal ){ rc = sqlite3_exec(pLock->db, "COMMIT", 0, 0, 0); if( rc==SQLITE_OK ){ - rc = superlockWalLock(pLock, &busy); + rc = superlockWalLock(pLock->db, &busy); } } } if( rc!=SQLITE_OK ){ Index: src/test_tclsh.c ================================================================== --- src/test_tclsh.c +++ src/test_tclsh.c @@ -75,10 +75,11 @@ extern int Sqlitetest_hexio_Init(Tcl_Interp*); extern int Sqlitetest_init_Init(Tcl_Interp*); extern int Sqlitetest_malloc_Init(Tcl_Interp*); extern int Sqlitetest_mutex_Init(Tcl_Interp*); extern int Sqlitetestschema_Init(Tcl_Interp*); + extern int Sqlitetestschemapool_Init(Tcl_Interp*); extern int Sqlitetestsse_Init(Tcl_Interp*); extern int Sqlitetesttclvar_Init(Tcl_Interp*); extern int Sqlitetestfs_Init(Tcl_Interp*); extern int SqlitetestThread_Init(Tcl_Interp*); extern int SqlitetestOnefile_Init(); @@ -97,20 +98,20 @@ #endif extern int Md5_Init(Tcl_Interp*); extern int Fts5tcl_Init(Tcl_Interp *); extern int SqliteRbu_Init(Tcl_Interp*); extern int Sqlitetesttcl_Init(Tcl_Interp*); - extern int Bgckpt_Init(Tcl_Interp*); #if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) extern int Sqlitetestfts3_Init(Tcl_Interp *interp); #endif #ifdef SQLITE_ENABLE_ZIPVFS extern int Zipvfs_Init(Tcl_Interp*); #endif extern int TestExpert_Init(Tcl_Interp*); extern int Sqlitetest_window_Init(Tcl_Interp *); extern int Sqlitetestvdbecov_Init(Tcl_Interp *); + extern int TestRecover_Init(Tcl_Interp*); Tcl_CmdInfo cmdInfo; /* Since the primary use case for this binary is testing of SQLite, ** be sure to generate core files if we crash */ @@ -147,10 +148,11 @@ Sqlitetest_hexio_Init(interp); Sqlitetest_init_Init(interp); Sqlitetest_malloc_Init(interp); Sqlitetest_mutex_Init(interp); Sqlitetestschema_Init(interp); + Sqlitetestschemapool_Init(interp); Sqlitetesttclvar_Init(interp); Sqlitetestfs_Init(interp); SqlitetestThread_Init(interp); SqlitetestOnefile_Init(); SqlitetestOsinst_Init(interp); @@ -167,19 +169,18 @@ TestSession_Init(interp); #endif Fts5tcl_Init(interp); SqliteRbu_Init(interp); Sqlitetesttcl_Init(interp); - Bgckpt_Init(interp); - #if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) Sqlitetestfts3_Init(interp); #endif TestExpert_Init(interp); Sqlitetest_window_Init(interp); Sqlitetestvdbecov_Init(interp); + TestRecover_Init(interp); Tcl_CreateObjCommand( interp, "load_testfixture_extensions", load_testfixture_extensions,0,0 ); return 0; Index: src/trigger.c ================================================================== --- src/trigger.c +++ src/trigger.c @@ -49,28 +49,49 @@ */ Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ Schema *pTmpSchema; /* Schema of the pTab table */ Trigger *pList; /* List of triggers to return */ HashElem *p; /* Loop variable for TEMP triggers */ + +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + char *zSchema = 0; + sqlite3 *db = pParse->db; + if( IsSharedSchema(db) ){ + zSchema = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName; + } +#endif assert( pParse->disableTriggers==0 ); pTmpSchema = pParse->db->aDb[1].pSchema; p = sqliteHashFirst(&pTmpSchema->trigHash); pList = pTab->pTrigger; while( p ){ Trigger *pTrig = (Trigger *)sqliteHashData(p); - if( pTrig->pTabSchema==pTab->pSchema + + int bSchemaMatch; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( zSchema ){ + /* Shared-schema */ + bSchemaMatch = (0==sqlite3StrICmp(pTrig->zTabSchema, zSchema)); + }else +#endif + { + /* Non-shared-schema */ + bSchemaMatch = (pTrig->pTabSchema==pTab->pSchema); + } + + if( bSchemaMatch && pTrig->table && 0==sqlite3StrICmp(pTrig->table, pTab->zName) - && pTrig->pTabSchema!=pTmpSchema + && (pTrig->pTabSchema!=pTmpSchema || pTrig->bReturning) ){ pTrig->pNext = pList; pList = pTrig; }else if( pTrig->op==TK_RETURNING ){ #ifndef SQLITE_OMIT_VIRTUALTABLE assert( pParse->db->pVtabCtx==0 ); -#endif +#endif assert( pParse->bReturning ); assert( &(pParse->u1.pReturning->retTrig) == pTrig ); pTrig->table = pTab->zName; pTrig->pTabSchema = pTab->pSchema; pTrig->pNext = pList; @@ -256,10 +277,16 @@ pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger)); if( pTrigger==0 ) goto trigger_cleanup; pTrigger->zName = zName; zName = 0; pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName); +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && iDb==1 ){ + int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); + pTrigger->zTabSchema = sqlite3DbStrDup(db, db->aDb[iTabDb].zDbSName); + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ pTrigger->pSchema = db->aDb[iDb].pSchema; pTrigger->pTabSchema = pTab->pSchema; pTrigger->op = (u8)op; pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER; if( IN_RENAME_OBJECT ){ @@ -379,11 +406,11 @@ " VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", db->aDb[iDb].zDbSName, zName, pTrig->table, z); sqlite3DbFree(db, z); sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3VdbeAddParseSchemaOp(pParse, iDb, sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName), 0); } if( db->init.busy ){ Trigger *pLink = pTrig; @@ -598,10 +625,13 @@ void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ if( pTrigger==0 || pTrigger->bReturning ) return; sqlite3DeleteTriggerStep(db, pTrigger->step_list); sqlite3DbFree(db, pTrigger->zName); sqlite3DbFree(db, pTrigger->table); +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + sqlite3DbFree(db, pTrigger->zTabSchema); +#endif sqlite3ExprDelete(db, pTrigger->pWhen); sqlite3IdListDelete(db, pTrigger->pColumns); sqlite3DbFree(db, pTrigger); } @@ -669,10 +699,11 @@ sqlite3 *db = pParse->db; int iDb; iDb = sqlite3SchemaToIndex(pParse->db, pTrigger->pSchema); assert( iDb>=0 && iDbnDb ); + sqlite3SchemaWritable(pParse, iDb); pTable = tableOfTrigger(pTrigger); assert( (pTable && pTable->pSchema==pTrigger->pSchema) || iDb==1 ); #ifndef SQLITE_OMIT_AUTHORIZATION if( pTable ){ int code = SQLITE_DROP_TRIGGER; @@ -1161,11 +1192,13 @@ NameContext sNC; /* Name context for sub-vdbe */ SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */ int iEndTrigger = 0; /* Label to jump to if WHEN is false */ Parse sSubParse; /* Parse context for sub-vdbe */ - assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); + assert( pTrigger->zName==0 || IsSharedSchema(pParse->db) + || pTab==tableOfTrigger(pTrigger) + ); assert( pTop->pVdbe ); /* Allocate the TriggerPrg and SubProgram objects. To ensure that they ** are freed if an error occurs, link them into the Parse.pTriggerPrg ** list of the top-level Parse object sooner rather than later. */ @@ -1189,11 +1222,11 @@ sSubParse.pTriggerTab = pTab; sSubParse.pToplevel = pTop; sSubParse.zAuthContext = pTrigger->zName; sSubParse.eTriggerOp = pTrigger->op; sSubParse.nQueryLoop = pParse->nQueryLoop; - sSubParse.disableVtab = pParse->disableVtab; + sSubParse.prepFlags = pParse->prepFlags; v = sqlite3GetVdbe(&sSubParse); if( v ){ VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)", pTrigger->zName, onErrorText(orconf), @@ -1268,11 +1301,13 @@ int orconf /* ON CONFLICT algorithm. */ ){ Parse *pRoot = sqlite3ParseToplevel(pParse); TriggerPrg *pPrg; - assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); + assert( pTrigger->zName==0 || IsSharedSchema(pParse->db) + || pTab==tableOfTrigger(pTrigger) + ); /* It may be that this trigger has already been coded (or is in the ** process of being coded). If this is the case, then an entry with ** a matching TriggerPrg.pTrigger field will be present somewhere ** in the Parse.pTriggerPrg list. Search for such an entry. */ Index: src/update.c ================================================================== --- src/update.c +++ src/update.c @@ -57,15 +57,18 @@ ** (not a virtual table) then the value might have been stored as an ** integer. In that case, add an OP_RealAffinity opcode to make sure ** it has been converted into REAL. */ void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ + Column *pCol; assert( pTab!=0 ); - if( !IsView(pTab) ){ + assert( pTab->nCol>i ); + pCol = &pTab->aCol[i]; + if( pCol->iDflt ){ sqlite3_value *pValue = 0; u8 enc = ENC(sqlite3VdbeDb(v)); - Column *pCol = &pTab->aCol[i]; + assert( !IsView(pTab) ); VdbeComment((v, "%s.%s", pTab->zName, pCol->zCnName)); assert( inCol ); sqlite3ValueFromExpr(sqlite3VdbeDb(v), sqlite3ColumnExpr(pTab,pCol), enc, pCol->affinity, &pValue); @@ -72,11 +75,11 @@ if( pValue ){ sqlite3VdbeAppendP4(v, pValue, P4_MEM); } } #ifndef SQLITE_OMIT_FLOATING_POINT - if( pTab->aCol[i].affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){ + if( pCol->affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); } #endif } @@ -459,21 +462,10 @@ ** column to be updated, make sure we have authorization to change ** that column. */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ -#if defined(SQLITE_ENABLE_NOOP_UPDATE) && !defined(SQLITE_OMIT_FLAG_PRAGMAS) - if( db->flags & SQLITE_NoopUpdate ){ - Token x; - sqlite3ExprDelete(db, pChanges->a[i].pExpr); - x.z = pChanges->a[i].zEName; - x.n = sqlite3Strlen30(x.z); - pChanges->a[i].pExpr = - sqlite3PExpr(pParse, TK_UPLUS, sqlite3ExprAlloc(db, TK_ID, &x, 0), 0); - if( db->mallocFailed ) goto update_cleanup; - } -#endif u8 hCol = sqlite3StrIHash(pChanges->a[i].zEName); /* If this is an UPDATE with a FROM clause, do not resolve expressions ** here. The call to sqlite3Select() below will do that. */ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; Index: src/upsert.c ================================================================== --- src/upsert.c +++ src/upsert.c @@ -164,10 +164,11 @@ Expr *pExpr; sCol[0].u.zToken = (char*)pIdx->azColl[ii]; if( pIdx->aiColumn[ii]==XN_EXPR ){ assert( pIdx->aColExpr!=0 ); assert( pIdx->aColExpr->nExpr>ii ); + assert( pIdx->bHasExpr ); pExpr = pIdx->aColExpr->a[ii].pExpr; if( pExpr->op!=TK_COLLATE ){ sCol[0].pLeft = pExpr; pExpr = &sCol[0]; } Index: src/vacuum.c ================================================================== --- src/vacuum.c +++ src/vacuum.c @@ -123,10 +123,11 @@ if( iDb<0 ) iDb = 0; #endif } if( iDb!=1 ){ int iIntoReg = 0; + sqlite3SchemaWritable(pParse, iDb); if( pInto && sqlite3ResolveSelfReference(pParse,0,0,pInto,0)==0 ){ iIntoReg = ++pParse->nMem; sqlite3ExprCode(pParse, pInto, iIntoReg); } sqlite3VdbeAddOp2(v, OP_Vacuum, iDb, iIntoReg); @@ -159,10 +160,11 @@ int isMemDb; /* True if vacuuming a :memory: database */ int nRes; /* Bytes of reserved space at the end of each page */ int nDb; /* Number of attached databases */ const char *zDbMain; /* Schema name of database to vacuum */ const char *zOut; /* Name of output file */ + u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */ if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); return SQLITE_ERROR; /* IMP: R-12218-18073 */ } @@ -230,16 +232,21 @@ rc = SQLITE_ERROR; sqlite3SetString(pzErrMsg, db, "output file already exists"); goto end_of_vacuum; } db->mDbFlags |= DBFLAG_VacuumInto; + + /* For a VACUUM INTO, the pager-flags are set to the same values as + ** they are for the database being vacuumed, except that PAGER_CACHESPILL + ** is always set. */ + pgflags = db->aDb[iDb].safety_level | (db->flags & PAGER_FLAGS_MASK); } nRes = sqlite3BtreeGetRequestedReserve(pMain); sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size); sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0)); - sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL); + sqlite3BtreeSetPagerFlags(pTemp, pgflags|PAGER_CACHESPILL); /* Begin a transaction and take an exclusive lock on the main database ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below, ** to ensure that we do not try to change the page-size on a WAL database. */ @@ -388,11 +395,10 @@ ** by manually setting the autoCommit flag to true and detaching the ** vacuum database. The vacuum_db journal file is deleted when the pager ** is closed by the DETACH. */ db->autoCommit = 1; - assert( db->eConcurrent==0 ); if( pDb ){ sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; pDb->pSchema = 0; Index: src/vdbe.c ================================================================== --- src/vdbe.c +++ src/vdbe.c @@ -2586,23 +2586,94 @@ goto jump_to_p2; } break; } -/* Opcode: IsNullOrType P1 P2 P3 * * -** Synopsis: if typeof(r[P1]) IN (P3,5) goto P2 +/* Opcode: IsType P1 P2 P3 P4 P5 +** Synopsis: if typeof(P1.P3) in P5 goto P2 ** -** Jump to P2 if the value in register P1 is NULL or has a datatype P3. -** P3 is an integer which should be one of SQLITE_INTEGER, SQLITE_FLOAT, -** SQLITE_BLOB, SQLITE_NULL, or SQLITE_TEXT. +** Jump to P2 if the type of a column in a btree is one of the types specified +** by the P5 bitmask. +** +** P1 is normally a cursor on a btree for which the row decode cache is +** valid through at least column P3. In other words, there should have been +** a prior OP_Column for column P3 or greater. If the cursor is not valid, +** then this opcode might give spurious results. +** The the btree row has fewer than P3 columns, then use P4 as the +** datatype. +** +** If P1 is -1, then P3 is a register number and the datatype is taken +** from the value in that register. +** +** P5 is a bitmask of data types. SQLITE_INTEGER is the least significant +** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04. +** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10. +** +** Take the jump to address P2 if and only if the datatype of the +** value determined by P1 and P3 corresponds to one of the bits in the +** P5 bitmask. +** */ -case OP_IsNullOrType: { /* jump, in1 */ - int doTheJump; - pIn1 = &aMem[pOp->p1]; - doTheJump = (pIn1->flags & MEM_Null)!=0 || sqlite3_value_type(pIn1)==pOp->p3; - VdbeBranchTaken( doTheJump, 2); - if( doTheJump ) goto jump_to_p2; +case OP_IsType: { /* jump */ + VdbeCursor *pC; + u16 typeMask; + u32 serialType; + + assert( pOp->p1>=(-1) && pOp->p1nCursor ); + assert( pOp->p1>=0 || (pOp->p3>=0 && pOp->p3<=(p->nMem+1 - p->nCursor)) ); + if( pOp->p1>=0 ){ + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pOp->p3>=0 ); + if( pOp->p3nHdrParsed ){ + serialType = pC->aType[pOp->p3]; + if( serialType>=12 ){ + if( serialType&1 ){ + typeMask = 0x04; /* SQLITE_TEXT */ + }else{ + typeMask = 0x08; /* SQLITE_BLOB */ + } + }else{ + static const unsigned char aMask[] = { + 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2, + 0x01, 0x01, 0x10, 0x10 + }; + testcase( serialType==0 ); + testcase( serialType==1 ); + testcase( serialType==2 ); + testcase( serialType==3 ); + testcase( serialType==4 ); + testcase( serialType==5 ); + testcase( serialType==6 ); + testcase( serialType==7 ); + testcase( serialType==8 ); + testcase( serialType==9 ); + testcase( serialType==10 ); + testcase( serialType==11 ); + typeMask = aMask[serialType]; + } + }else{ + typeMask = 1 << (pOp->p4.i - 1); + testcase( typeMask==0x01 ); + testcase( typeMask==0x02 ); + testcase( typeMask==0x04 ); + testcase( typeMask==0x08 ); + testcase( typeMask==0x10 ); + } + }else{ + assert( memIsValid(&aMem[pOp->p3]) ); + typeMask = 1 << (sqlite3_value_type((sqlite3_value*)&aMem[pOp->p3])-1); + testcase( typeMask==0x01 ); + testcase( typeMask==0x02 ); + testcase( typeMask==0x04 ); + testcase( typeMask==0x08 ); + testcase( typeMask==0x10 ); + } + VdbeBranchTaken( (typeMask & pOp->p5)!=0, 2); + if( typeMask & pOp->p5 ){ + goto jump_to_p2; + } break; } /* Opcode: ZeroOrNull P1 P2 P3 * * ** Synopsis: r[P2] = 0 OR NULL @@ -2699,23 +2770,25 @@ ** Synopsis: r[P3]=PX cursor P1 column P2 ** ** Interpret the data that cursor P1 points to as a structure built using ** the MakeRecord instruction. (See the MakeRecord opcode for additional ** information about the format of the data.) Extract the P2-th column -** from this record. If there are less that (P2+1) +** from this record. If there are less than (P2+1) ** values in the record, extract a NULL. ** ** The value extracted is stored in register P3. ** ** If the record contains fewer than P2 fields, then extract a NULL. Or, ** if the P4 argument is a P4_MEM use the value of the P4 argument as ** the result. ** -** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 then -** the result is guaranteed to only be used as the argument of a length() -** or typeof() function, respectively. The loading of large blobs can be -** skipped for length() and all content loading can be skipped for typeof(). +** If the OPFLAG_LENGTHARG bit is set in P5 then the result is guaranteed +** to only be used by the length() function or the equivalent. The content +** of large blobs is not loaded, thus saving CPU cycles. If the +** OPFLAG_TYPEOFARG bit is set then the result will only be used by the +** typeof() function or the IS NULL or IS NOT NULL operators or the +** equivalent. In this case, all content loading can be omitted. */ case OP_Column: { u32 p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ BtCursor *pCrsr; /* The B-Tree cursor corresponding to pC */ @@ -3618,11 +3691,10 @@ /* Determine whether or not this is a transaction savepoint. If so, ** and this is a RELEASE command, then the current transaction ** is committed. */ int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; - assert( db->eConcurrent==0 || db->isTransactionSavepoint==0 ); if( isTransaction && p1==SAVEPOINT_RELEASE ){ if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ goto vdbe_return; } db->autoCommit = 1; @@ -3705,73 +3777,54 @@ goto vdbe_return; } break; } -/* Opcode: AutoCommit P1 P2 P3 * * +/* Opcode: AutoCommit P1 P2 * * * ** ** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll ** back any currently active btree transactions. If there are any active ** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if ** there are active writing VMs or active VMs that use shared cache. ** -** If P3 is non-zero, then this instruction is being executed as part of -** a "BEGIN CONCURRENT" command. -** ** This instruction causes the VM to halt. */ case OP_AutoCommit: { int desiredAutoCommit; int iRollback; - int bConcurrent; - int hrc; desiredAutoCommit = pOp->p1; iRollback = pOp->p2; - bConcurrent = pOp->p3; assert( desiredAutoCommit==1 || desiredAutoCommit==0 ); assert( desiredAutoCommit==1 || iRollback==0 ); - assert( desiredAutoCommit==0 || bConcurrent==0 ); - assert( db->autoCommit==0 || db->eConcurrent==CONCURRENT_NONE ); assert( db->nVdbeActive>0 ); /* At least this one VM is active */ assert( p->bIsReader ); if( desiredAutoCommit!=db->autoCommit ){ if( iRollback ){ assert( desiredAutoCommit==1 ); sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); db->autoCommit = 1; - db->eConcurrent = CONCURRENT_NONE; - }else if( desiredAutoCommit - && (db->nVdbeWrite>0 || (db->eConcurrent && db->nVdbeActive>1)) ){ - /* A transaction may only be committed if there are no other active - ** writer VMs. If the transaction is CONCURRENT, then it may only be - ** committed if there are no active VMs at all (readers or writers). - ** - ** If this instruction is a COMMIT and the transaction may not be - ** committed due to one of the conditions above, return an error - ** indicating that other VMs must complete before the COMMIT can - ** be processed. */ + }else if( desiredAutoCommit && db->nVdbeWrite>0 ){ + /* If this instruction implements a COMMIT and other VMs are writing + ** return an error indicating that the other VMs must complete first. + */ sqlite3VdbeError(p, "cannot commit transaction - " "SQL statements in progress"); rc = SQLITE_BUSY; goto abort_due_to_error; }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ goto vdbe_return; }else{ db->autoCommit = (u8)desiredAutoCommit; } - hrc = sqlite3VdbeHalt(p); - if( (hrc & 0xFF)==SQLITE_BUSY ){ + if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ p->pc = (int)(pOp - aOp); db->autoCommit = (u8)(1-desiredAutoCommit); - p->rc = hrc; - rc = SQLITE_BUSY; + p->rc = rc = SQLITE_BUSY; goto vdbe_return; } - assert( bConcurrent==CONCURRENT_NONE || bConcurrent==CONCURRENT_OPEN ); - db->eConcurrent = (u8)bConcurrent; sqlite3CloseSavepoints(db); if( p->rc==SQLITE_OK ){ rc = SQLITE_DONE; }else{ rc = SQLITE_ERROR; @@ -3848,17 +3901,11 @@ } pDb = &db->aDb[pOp->p1]; pBt = pDb->pBt; if( pBt ){ - if( p->bSchemaVersion ){ - sqlite3BtreeIsSchemaVersion(pBt, p->aSchemaVersion); - } rc = sqlite3BtreeBeginTrans(pBt, pOp->p2, &iMeta); - if( p->bSchemaVersion ){ - sqlite3BtreeIsSchemaVersion(pBt, 0); - } testcase( rc==SQLITE_BUSY_SNAPSHOT ); testcase( rc==SQLITE_BUSY_RECOVERY ); if( rc!=SQLITE_OK ){ if( (rc&0xff)==SQLITE_BUSY ){ p->pc = (int)(pOp - aOp); @@ -3866,14 +3913,10 @@ goto vdbe_return; } goto abort_due_to_error; } - if( p->bSchemaVersion ){ - p->aSchemaVersion[SCHEMA_VERSION_BEGINTRANSDONE] = sqlite3STimeNow(); - } - if( p->usesStmtJournal && pOp->p2 && (db->autoCommit==0 || db->nVdbeRead>1) ){ assert( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ); @@ -3898,17 +3941,10 @@ assert( pOp->p5==0 || pOp->p4type==P4_INT32 ); if( rc==SQLITE_OK && pOp->p5 && (iMeta!=pOp->p3 || pDb->pSchema->iGeneration!=pOp->p4.i) ){ - /* - ** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema - ** version is checked to ensure that the schema has not changed since the - ** SQL statement was prepared. - */ - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); /* If the schema-cookie from the database file matches the cookie ** stored with the in-memory representation of the schema, do ** not reload the schema from the database file. ** ** If virtual-tables are in use, this is not just an optimization. @@ -3921,10 +3957,17 @@ ** a v-table method. */ if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){ sqlite3ResetOneSchema(db, pOp->p1); } + /* + ** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema + ** version is checked to ensure that the schema has not changed since the + ** SQL statement was prepared. + */ + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); p->expired = 1; rc = SQLITE_SCHEMA; /* Set changeCntOn to 0 to prevent the value returned by sqlite3_changes() ** from being modified in sqlite3VdbeHalt(). If this statement is @@ -3961,13 +4004,10 @@ assert( DbMaskTest(p->btreeMask, iDb) ); sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta); pOut = out2Prerelease(p, pOp); pOut->u.i = iMeta; - if( p->bSchemaVersion ){ - sqlite3SchemaVersionLog(p); - } break; } /* Opcode: SetCookie P1 P2 P3 * P5 ** @@ -3993,21 +4033,10 @@ assert( DbMaskTest(p->btreeMask, pOp->p1) ); assert( p->readOnly==0 ); pDb = &db->aDb[pOp->p1]; assert( pDb->pBt!=0 ); assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) ); -#ifndef SQLITE_OMIT_CONCURRENT - if( db->eConcurrent - && (pOp->p2==BTREE_USER_VERSION || pOp->p2==BTREE_APPLICATION_ID) - ){ - rc = SQLITE_ERROR; - sqlite3VdbeError(p, "cannot modify %s within CONCURRENT transaction", - pOp->p2==BTREE_USER_VERSION ? "user_version" : "application_id" - ); - goto abort_due_to_error; - } -#endif /* See note about index shifting on OP_ReadCookie */ rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, pOp->p3); if( pOp->p2==BTREE_SCHEMA_VERSION ){ /* When the schema cookie changes, record the new cookie internally */ *(u32*)&pDb->pSchema->schema_cookie = *(u32*)&pOp->p3 - pOp->p5; @@ -4153,15 +4182,10 @@ assert( DbMaskTest(p->btreeMask, iDb) ); pDb = &db->aDb[iDb]; pX = pDb->pBt; assert( pX!=0 ); if( pOp->opcode==OP_OpenWrite ){ -#ifndef SQLITE_OMIT_CONCURRENT - if( db->eConcurrent==CONCURRENT_OPEN && p2==1 && iDb!=1 ){ - db->eConcurrent = CONCURRENT_SCHEMA; - } -#endif assert( OPFLAG_FORDELETE==BTREE_FORDELETE ); wrFlag = BTREE_WRCSR | (pOp->p5 & OPFLAG_FORDELETE); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( pDb->pSchema->file_format < p->minWriteFileFormat ){ p->minWriteFileFormat = pDb->pSchema->file_format; @@ -4699,11 +4723,17 @@ assert( oc!=OP_SeekGE || r.default_rc==+1 ); assert( oc!=OP_SeekLT || r.default_rc==+1 ); r.aMem = &aMem[pOp->p3]; #ifdef SQLITE_DEBUG - { int i; for(i=0; i0 ) REGISTER_TRACE(pOp->p3+i, &r.aMem[i]); + } + } #endif r.eqSeen = 0; rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, &r, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; @@ -4762,54 +4792,76 @@ } break; } -/* Opcode: SeekScan P1 P2 * * * +/* Opcode: SeekScan P1 P2 * * P5 ** Synopsis: Scan-ahead up to P1 rows ** ** This opcode is a prefix opcode to OP_SeekGE. In other words, this ** opcode must be immediately followed by OP_SeekGE. This constraint is ** checked by assert() statements. ** ** This opcode uses the P1 through P4 operands of the subsequent ** OP_SeekGE. In the text that follows, the operands of the subsequent ** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only -** the P1 and P2 operands of this opcode are also used, and are called -** This.P1 and This.P2. +** the P1, P2 and P5 operands of this opcode are also used, and are called +** This.P1, This.P2 and This.P5. ** ** This opcode helps to optimize IN operators on a multi-column index ** where the IN operator is on the later terms of the index by avoiding ** unnecessary seeks on the btree, substituting steps to the next row ** of the b-tree instead. A correct answer is obtained if this opcode ** is omitted or is a no-op. ** ** The SeekGE.P3 and SeekGE.P4 operands identify an unpacked key which ** is the desired entry that we want the cursor SeekGE.P1 to be pointing -** to. Call this SeekGE.P4/P5 row the "target". +** to. Call this SeekGE.P3/P4 row the "target". ** ** If the SeekGE.P1 cursor is not currently pointing to a valid row, ** then this opcode is a no-op and control passes through into the OP_SeekGE. ** ** If the SeekGE.P1 cursor is pointing to a valid row, then that row ** might be the target row, or it might be near and slightly before the -** target row. This opcode attempts to position the cursor on the target -** row by, perhaps by invoking sqlite3BtreeStep() on the cursor -** between 0 and This.P1 times. -** -** There are three possible outcomes from this opcode:
      -** -**
    1. If after This.P1 steps, the cursor is still pointing to a place that -** is earlier in the btree than the target row, then fall through -** into the subsquence OP_SeekGE opcode. -** -**
    2. If the cursor is successfully moved to the target row by 0 or more -** sqlite3BtreeNext() calls, then jump to This.P2, which will land just -** past the OP_IdxGT or OP_IdxGE opcode that follows the OP_SeekGE. -** -**
    3. If the cursor ends up past the target row (indicating that the target -** row does not exist in the btree) then jump to SeekOP.P2. +** target row, or it might be after the target row. If the cursor is +** currently before the target row, then this opcode attempts to position +** the cursor on or after the target row by invoking sqlite3BtreeStep() +** on the cursor between 1 and This.P1 times. +** +** The This.P5 parameter is a flag that indicates what to do if the +** cursor ends up pointing at a valid row that is past the target +** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If +** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0 +** case occurs when there are no inequality constraints to the right of +** the IN constraing. The jump to SeekGE.P2 ends the loop. The P5!=0 case +** occurs when there are inequality constraints to the right of the IN +** operator. In that case, the This.P2 will point either directly to or +** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for +** loop terminate. +** +** Possible outcomes from this opcode:
        +** +**
      1. If the cursor is initally not pointed to any valid row, then +** fall through into the subsequent OP_SeekGE opcode. +** +**
      2. If the cursor is left pointing to a row that is before the target +** row, even after making as many as This.P1 calls to +** sqlite3BtreeNext(), then also fall through into OP_SeekGE. +** +**
      3. If the cursor is left pointing at the target row, either because it +** was at the target row to begin with or because one or more +** sqlite3BtreeNext() calls moved the cursor to the target row, +** then jump to This.P2.., +** +**
      4. If the cursor started out before the target row and a call to +** to sqlite3BtreeNext() moved the cursor off the end of the index +** (indicating that the target row definitely does not exist in the +** btree) then jump to SeekGE.P2, ending the loop. +** +**
      5. If the cursor ends up on a valid row that is past the target row +** (indicating that the target row does not exist in the btree) then +** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0. **
      */ case OP_SeekScan: { VdbeCursor *pC; int res; @@ -4816,18 +4868,29 @@ int nStep; UnpackedRecord r; assert( pOp[1].opcode==OP_SeekGE ); - /* pOp->p2 points to the first instruction past the OP_IdxGT that - ** follows the OP_SeekGE. */ + /* If pOp->p5 is clear, then pOp->p2 points to the first instruction past the + ** OP_IdxGT that follows the OP_SeekGE. Otherwise, it points to the first + ** opcode past the OP_SeekGE itself. */ assert( pOp->p2>=(int)(pOp-aOp)+2 ); - assert( aOp[pOp->p2-1].opcode==OP_IdxGT || aOp[pOp->p2-1].opcode==OP_IdxGE ); - testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); - assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); - assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); - assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); +#ifdef SQLITE_DEBUG + if( pOp->p5==0 ){ + /* There are no inequality constraints following the IN constraint. */ + assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); + assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); + assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); + assert( aOp[pOp->p2-1].opcode==OP_IdxGT + || aOp[pOp->p2-1].opcode==OP_IdxGE ); + testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); + }else{ + /* There are inequality constraints. */ + assert( pOp->p2==(int)(pOp-aOp)+2 ); + assert( aOp[pOp->p2-1].opcode==OP_SeekGE ); + } +#endif assert( pOp->p1>0 ); pC = p->apCsr[pOp[1].p1]; assert( pC!=0 ); assert( pC->eCurType==CURTYPE_BTREE ); @@ -4857,22 +4920,24 @@ #endif res = 0; /* Not needed. Only used to silence a warning. */ while(1){ rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res); if( rc ) goto abort_due_to_error; - if( res>0 ){ + if( res>0 && pOp->p5==0 ){ seekscan_search_fail: + /* Jump to SeekGE.P2, ending the loop */ #ifdef SQLITE_DEBUG if( db->flags&SQLITE_VdbeTrace ){ printf("... %d steps and then skip\n", pOp->p1 - nStep); } #endif VdbeBranchTaken(1,3); pOp++; goto jump_to_p2; } - if( res==0 ){ + if( res>=0 ){ + /* Jump to This.P2, bypassing the OP_SeekGE opcode */ #ifdef SQLITE_DEBUG if( db->flags&SQLITE_VdbeTrace ){ printf("... %d steps and then success\n", pOp->p1 - nStep); } #endif @@ -7605,11 +7670,10 @@ || eNew==PAGER_JOURNALMODE_TRUNCATE || eNew==PAGER_JOURNALMODE_PERSIST || eNew==PAGER_JOURNALMODE_OFF || eNew==PAGER_JOURNALMODE_MEMORY || eNew==PAGER_JOURNALMODE_WAL - || eNew==PAGER_JOURNALMODE_WAL2 || eNew==PAGER_JOURNALMODE_QUERY ); assert( pOp->p1>=0 && pOp->p1nDb ); assert( p->readOnly==0 ); @@ -7624,63 +7688,52 @@ zFilename = sqlite3PagerFilename(pPager, 1); /* Do not allow a transition to journal_mode=WAL for a database ** in temporary storage or if the VFS does not support shared memory */ - if( isWalMode(eNew) + if( eNew==PAGER_JOURNALMODE_WAL && (sqlite3Strlen30(zFilename)==0 /* Temp file */ || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */ ){ eNew = eOld; } - if( eNew!=eOld && (isWalMode(eNew) || isWalMode(eOld)) ){ - - /* Prevent changing directly to wal2 from wal mode. And vice versa. */ - if( isWalMode(eNew) && isWalMode(eOld) ){ - rc = SQLITE_ERROR; - sqlite3VdbeError(p, "cannot change from %s to %s mode", - sqlite3JournalModename(eOld), sqlite3JournalModename(eNew) - ); - goto abort_due_to_error; - } - - /* Prevent switching into or out of wal/wal2 mode mid-transaction */ + if( (eNew!=eOld) + && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL) + ){ if( !db->autoCommit || db->nVdbeRead>1 ){ rc = SQLITE_ERROR; sqlite3VdbeError(p, "cannot change %s wal mode from within a transaction", (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of") ); goto abort_due_to_error; - } - - if( isWalMode(eOld) ){ - /* If leaving WAL mode, close the log file. If successful, the call - ** to PagerCloseWal() checkpoints and deletes the write-ahead-log - ** file. An EXCLUSIVE lock may still be held on the database file - ** after a successful return. - */ - rc = sqlite3PagerCloseWal(pPager, db); - if( rc==SQLITE_OK ){ - sqlite3PagerSetJournalMode(pPager, eNew); - } - }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ - /* Cannot transition directly from MEMORY to WAL. Use mode OFF - ** as an intermediate */ - sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); - } - - /* Open a transaction on the database file. Regardless of the journal - ** mode, this transaction always uses a rollback journal. - */ - assert( sqlite3BtreeTxnState(pBt)!=SQLITE_TXN_WRITE ); - if( rc==SQLITE_OK ){ - /* 1==rollback, 2==wal, 3==wal2 */ - rc = sqlite3BtreeSetVersion(pBt, - 1 + isWalMode(eNew) + (eNew==PAGER_JOURNALMODE_WAL2) - ); + }else{ + + if( eOld==PAGER_JOURNALMODE_WAL ){ + /* If leaving WAL mode, close the log file. If successful, the call + ** to PagerCloseWal() checkpoints and deletes the write-ahead-log + ** file. An EXCLUSIVE lock may still be held on the database file + ** after a successful return. + */ + rc = sqlite3PagerCloseWal(pPager, db); + if( rc==SQLITE_OK ){ + sqlite3PagerSetJournalMode(pPager, eNew); + } + }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ + /* Cannot transition directly from MEMORY to WAL. Use mode OFF + ** as an intermediate */ + sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); + } + + /* Open a transaction on the database file. Regardless of the journal + ** mode, this transaction always uses a rollback journal. + */ + assert( sqlite3BtreeTxnState(pBt)!=SQLITE_TXN_WRITE ); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); + } } } #endif /* ifndef SQLITE_OMIT_WAL */ if( rc ) eNew = eOld; @@ -7812,15 +7865,10 @@ ** P4 contains a pointer to the name of the table being locked. This is only ** used to generate an error message if the lock cannot be obtained. */ case OP_TableLock: { u8 isWriteLock = (u8)pOp->p3; -#ifndef SQLITE_OMIT_CONCURRENT - if( isWriteLock && db->eConcurrent && pOp->p2==1 && pOp->p1!=1 ){ - db->eConcurrent = CONCURRENT_SCHEMA; - } -#endif if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommit) ){ int p1 = pOp->p1; assert( p1>=0 && p1nDb ); assert( DbMaskTest(p->btreeMask, p1) ); assert( isWriteLock==0 || isWriteLock==1 ); @@ -8519,15 +8567,10 @@ ** using the X argument when X begins with "--" and invoking ** sqlite3_expanded_sql(P) otherwise. */ assert( pOp->p4.z==0 || strncmp(pOp->p4.z, "-" "- ", 3)==0 ); - if( p->bSchemaVersion ){ - memset(p->aSchemaVersion, 0, sizeof(p->aSchemaVersion)); - p->aSchemaVersion[SCHEMA_VERSION_START] = sqlite3STimeNow(); - } - /* OP_Init is always instruction 0 */ assert( pOp==p->aOp || pOp->opcode==OP_Trace ); #ifndef SQLITE_OMIT_TRACE if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0 Index: src/vdbe.h ================================================================== --- src/vdbe.h +++ src/vdbe.h @@ -220,16 +220,17 @@ #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) void sqlite3ExplainBreakpoint(const char*,const char*); #else # define sqlite3ExplainBreakpoint(A,B) /*no-op*/ #endif -void sqlite3VdbeAddParseSchemaOp(Vdbe*, int, char*, u16); +void sqlite3VdbeAddParseSchemaOp(Parse*,int,char*,u16); void sqlite3VdbeChangeOpcode(Vdbe*, int addr, u8); void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1); void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2); void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3); void sqlite3VdbeChangeP5(Vdbe*, u16 P5); +void sqlite3VdbeTypeofColumn(Vdbe*, int); void sqlite3VdbeJumpHere(Vdbe*, int addr); void sqlite3VdbeJumpHereOrPopInst(Vdbe*, int addr); int sqlite3VdbeChangeToNoop(Vdbe*, int addr); int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op); #ifdef SQLITE_DEBUG @@ -390,10 +391,6 @@ #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) void sqlite3VdbePrintOp(FILE*, int, VdbeOp*); #endif -void sqlite3VdbeIsSchemaVersion(Vdbe*); -void sqlite3SchemaVersionLog(Vdbe *v); -u64 sqlite3STimeNow(); - #endif /* SQLITE_VDBE_H */ Index: src/vdbeInt.h ================================================================== --- src/vdbeInt.h +++ src/vdbeInt.h @@ -485,26 +485,12 @@ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS i64 *anExec; /* Number of times each op has been executed */ int nScan; /* Entries in aScan[] */ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ #endif - int bSchemaVersion; - u64 aSchemaVersion[8]; }; -#define SCHEMA_VERSION_START 0 /* OP_Init */ -#define SCHEMA_VERSION_AFTERWALTBR 1 /* After walTryBeginRead() loop */ -#define SCHEMA_VERSION_AFTEROPENWAL2 2 /* After walOpenWal2() */ -#define SCHEMA_VERSION_AFTERRESET 3 /* After pager_reset() */ -#define SCHEMA_VERSION_AFTERUNFETCH 4 /* After xUnfetch(0) */ -#define SCHEMA_VERSION_AFTERPCACHE 5 /* After setting the bitvec */ -#define SCHEMA_VERSION_AFTERLOCKBTREE 6 /* After lockBtree() */ -#define SCHEMA_VERSION_BEGINTRANSDONE 7 /* After BeginTrans() */ - -/* Call sqlite3_log() if "PRAGMA schema_version" is slower than this (in us) */ -#define SCHEMA_VERSION_TIMEOUT 2 - /* ** The following are allowed values for Vdbe.eVdbeState */ #define VDBE_INIT_STATE 0 /* Prepared statement under construction */ #define VDBE_READY_STATE 1 /* Ready to run but not yet started */ Index: src/vdbeapi.c ================================================================== --- src/vdbeapi.c +++ src/vdbeapi.c @@ -315,10 +315,13 @@ } assert( eType == aType[pVal->flags&MEM_AffMask] ); } #endif return aType[pVal->flags&MEM_AffMask]; +} +int sqlite3_value_encoding(sqlite3_value *pVal){ + return pVal->enc; } /* Return true if a parameter to xUpdate represents an unchanged column */ int sqlite3_value_nochange(sqlite3_value *pVal){ return (pVal->flags&(MEM_Null|MEM_Zero))==(MEM_Null|MEM_Zero); Index: src/vdbeaux.c ================================================================== --- src/vdbeaux.c +++ src/vdbeaux.c @@ -479,12 +479,14 @@ ** as having been used. ** ** The zWhere string must have been obtained from sqlite3_malloc(). ** This routine will take ownership of the allocated memory. */ -void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere, u16 p5){ +void sqlite3VdbeAddParseSchemaOp(Parse *pParse, int iDb, char *zWhere, u16 p5){ + Vdbe *p = pParse->pVdbe; int j; + sqlite3SchemaWritable(pParse, iDb); sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC); sqlite3VdbeChangeP5(p, p5); for(j=0; jdb->nDb; j++) sqlite3VdbeUsesBtree(p, j); sqlite3MayAbort(p->pParse); } @@ -1152,10 +1154,22 @@ } void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ assert( p->nOp>0 || p->db->mallocFailed ); if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5; } + +/* +** If the previous opcode is an OP_Column that delivers results +** into register iDest, then add the OPFLAG_TYPEOFARG flag to that +** opcode. +*/ +void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){ + VdbeOp *pOp = sqlite3VdbeGetLastOp(p); + if( pOp->p3==iDest && pOp->opcode==OP_Column ){ + pOp->p5 |= OPFLAG_TYPEOFARG; + } +} /* ** Change the P2 operand of instruction addr so that it points to ** the address of the next instruction to be coded. */ @@ -1443,11 +1457,11 @@ assert( n!=P4_INT32 && n!=P4_VTAB ); assert( n<=0 ); if( p->db->mallocFailed ){ freeP4(p->db, n, pP4); }else{ - assert( pP4!=0 ); + assert( pP4!=0 || n==P4_DYNAMIC ); assert( p->nOp>0 ); pOp = &p->aOp[p->nOp-1]; assert( pOp->p4type==P4_NOTUSED ); pOp->p4type = n; pOp->p4.p = pP4; @@ -2798,31 +2812,14 @@ && sqlite3PagerIsMemdb(pPager)==0 ){ assert( i!=1 ); nTrans++; } - rc = sqlite3BtreeExclusiveLock(pBt); + rc = sqlite3PagerExclusiveLock(pPager); sqlite3BtreeLeave(pBt); } } - -#ifndef SQLITE_OMIT_CONCURRENT - if( db->eConcurrent && (rc & 0xFF)==SQLITE_BUSY ){ - /* An SQLITE_BUSY or SQLITE_BUSY_SNAPSHOT was encountered while - ** attempting to take the WRITER lock on a wal file. Release the - ** WRITER locks on all wal files and return early. */ - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ){ - sqlite3BtreeEnter(pBt); - sqlite3PagerDropExclusiveLock(sqlite3BtreePager(pBt)); - sqlite3BtreeLeave(pBt); - } - } - } -#endif - if( rc!=SQLITE_OK ){ return rc; } /* If there are any write-transactions at all, invoke the commit hook */ @@ -3220,11 +3217,10 @@ ** so, abort any other statements this handle currently has active. */ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; - db->eConcurrent = CONCURRENT_NONE; p->nChange = 0; } } } @@ -3259,13 +3255,13 @@ ** or hit an 'OR FAIL' constraint and there are no deferred foreign ** key constraints to hold up the transaction. This means a commit ** is required. */ rc = vdbeCommit(db, p); } - if( (rc & 0xFF)==SQLITE_BUSY && p->readOnly ){ + if( rc==SQLITE_BUSY && p->readOnly ){ sqlite3VdbeLeave(p); - return rc; + return SQLITE_BUSY; }else if( rc!=SQLITE_OK ){ p->rc = rc; sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; }else{ @@ -3286,11 +3282,10 @@ eStatementOp = SAVEPOINT_ROLLBACK; }else{ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; - db->eConcurrent = CONCURRENT_NONE; p->nChange = 0; } } /* If eStatementOp is non-zero, then a statement transaction needs to @@ -3308,11 +3303,10 @@ p->zErrMsg = 0; } sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; - db->eConcurrent = CONCURRENT_NONE; p->nChange = 0; } } /* If this was an INSERT, UPDATE or DELETE and no statement transaction @@ -4581,21 +4575,21 @@ assert( pPKey2->pKeyInfo->nAllField>=pPKey2->nField || CORRUPT_DB ); assert( pPKey2->pKeyInfo->aSortFlags!=0 ); assert( pPKey2->pKeyInfo->nKeyField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); - do{ + while( 1 /*exit-by-break*/ ){ u32 serial_type; /* RHS is an integer */ if( pRhs->flags & (MEM_Int|MEM_IntReal) ){ testcase( pRhs->flags & MEM_Int ); testcase( pRhs->flags & MEM_IntReal ); serial_type = aKey1[idx1]; testcase( serial_type==12 ); if( serial_type>=10 ){ - rc = +1; + rc = serial_type==10 ? -1 : +1; }else if( serial_type==0 ){ rc = -1; }else if( serial_type==7 ){ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r); @@ -4616,11 +4610,11 @@ if( serial_type>=10 ){ /* Serial types 12 or greater are strings and blobs (greater than ** numbers). Types 10 and 11 are currently "reserved for future ** use", so it doesn't really matter what the results of comparing ** them to numberic values are. */ - rc = +1; + rc = serial_type==10 ? -1 : +1; }else if( serial_type==0 ){ rc = -1; }else{ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); if( serial_type==7 ){ @@ -4697,11 +4691,11 @@ } /* RHS is null */ else{ serial_type = aKey1[idx1]; - rc = (serial_type!=0); + rc = (serial_type!=0 && serial_type!=10); } if( rc!=0 ){ int sortFlags = pPKey2->pKeyInfo->aSortFlags[i]; if( sortFlags ){ @@ -4719,12 +4713,17 @@ i++; if( i==pPKey2->nField ) break; pRhs++; d1 += sqlite3VdbeSerialTypeLen(serial_type); + if( d1>(unsigned)nKey1 ) break; idx1 += sqlite3VarintLen(serial_type); - }while( idx1<(unsigned)szHdr1 && d1<=(unsigned)nKey1 ); + if( idx1>=(unsigned)szHdr1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corrupt index */ + } + } /* No memory allocation is ever used on mem1. Prove this using ** the following assert(). If the assert() fails, it indicates a ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */ assert( mem1.szMalloc==0 ); @@ -5214,47 +5213,10 @@ return 0; } return 1; } -#include -void sqlite3VdbeIsSchemaVersion(Vdbe *v){ - v->bSchemaVersion = 1; -} -void sqlite3SchemaVersionLog(Vdbe *v){ - u64 i1 = v->aSchemaVersion[SCHEMA_VERSION_START]; - if( v->aSchemaVersion[SCHEMA_VERSION_BEGINTRANSDONE]>(i1+SCHEMA_VERSION_TIMEOUT) ){ - sqlite3_log(SQLITE_WARNING, - "slow \"PRAGMA schema_version\" (v=4): (%d, %d, %d, %d, %d, %d, %d)", - (v->aSchemaVersion[SCHEMA_VERSION_AFTERWALTBR]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTERWALTBR] - i1), - - (v->aSchemaVersion[SCHEMA_VERSION_AFTEROPENWAL2]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTEROPENWAL2] - i1), - - (v->aSchemaVersion[SCHEMA_VERSION_AFTERRESET]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTERRESET] - i1), - - (v->aSchemaVersion[SCHEMA_VERSION_AFTERUNFETCH]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTERUNFETCH] - i1), - - (v->aSchemaVersion[SCHEMA_VERSION_AFTERPCACHE]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTERPCACHE] - i1), - - (v->aSchemaVersion[SCHEMA_VERSION_AFTERLOCKBTREE]==0) ? 0 : - (int)(v->aSchemaVersion[SCHEMA_VERSION_AFTERLOCKBTREE] - i1), - - (int)(v->aSchemaVersion[SCHEMA_VERSION_BEGINTRANSDONE] - i1) - ); - } -} -u64 sqlite3STimeNow(){ - struct timeval time; - gettimeofday(&time, 0); - return ((u64)time.tv_sec * 1000000 + (u64)time.tv_usec); -} - #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored ** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored ** in memory obtained from sqlite3DbMalloc). Index: src/vdbeblob.c ================================================================== --- src/vdbeblob.c +++ src/vdbeblob.c @@ -133,10 +133,11 @@ int rc = SQLITE_OK; char *zErr = 0; Table *pTab; Incrblob *pBlob = 0; Parse sParse; + int bUnlock; /* True to unlock reusable schemas before returning */ #ifdef SQLITE_ENABLE_API_ARMOR if( ppBlob==0 ){ return SQLITE_MISUSE_BKPT; } @@ -149,10 +150,11 @@ #endif wrFlag = !!wrFlag; /* wrFlag = (wrFlag ? 1 : 0); */ sqlite3_mutex_enter(db->mutex); + bUnlock = sqlite3LockReusableSchema(db); pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); while(1){ sqlite3ParseObjectInit(&sParse,db); if( !pBlob ) goto blob_open_out; sqlite3DbFree(db, zErr); @@ -334,10 +336,11 @@ if( (++nAttempt)>=SQLITE_MAX_SCHEMA_RETRY || rc!=SQLITE_SCHEMA ) break; sqlite3ParseObjectReset(&sParse); } blob_open_out: + sqlite3UnlockReusableSchema(db, bUnlock); if( rc==SQLITE_OK && db->mallocFailed==0 ){ *ppBlob = (sqlite3_blob *)pBlob; }else{ if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); sqlite3DbFree(db, pBlob); Index: src/vdbemem.c ================================================================== --- src/vdbemem.c +++ src/vdbemem.c @@ -830,10 +830,11 @@ assert( MEM_Str==(MEM_Blob>>3) ); pMem->flags |= (pMem->flags&MEM_Blob)>>3; sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero); + if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1; return sqlite3VdbeChangeEncoding(pMem, encoding); } } return SQLITE_OK; } @@ -1964,10 +1965,13 @@ int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){ Mem *p = (Mem*)pVal; assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 ); if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){ return p->n; + } + if( (p->flags & MEM_Str)!=0 && enc!=SQLITE_UTF8 && pVal->enc!=SQLITE_UTF8 ){ + return p->n; } if( (p->flags & MEM_Blob)!=0 ){ if( p->flags & MEM_Zero ){ return p->n + p->u.nZero; }else{ Index: src/vtab.c ================================================================== --- src/vtab.c +++ src/vtab.c @@ -190,10 +190,28 @@ ** this virtual-table, if one has been created, or NULL otherwise. */ VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){ VTable *pVtab; assert( IsVirtual(pTab) ); +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) ){ + int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + if( iDb!=1 ){ + VTable **pp; + for(pp=&db->aDb[iDb].pVTable; *pp; pp=&(*pp)->pNext){ + if( sqlite3StrICmp(pTab->zName, (*pp)->zName)==0 ) break; + } + pVtab = *pp; + if( pVtab && pTab->nCol<=0 ){ + *pp = pVtab->pNext; + sqlite3VtabUnlock(pVtab); + pVtab = 0; + } + return pVtab; + } + } +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ for(pVtab=pTab->u.vtab.p; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); return pVtab; } /* @@ -498,11 +516,11 @@ v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddOp0(v, OP_Expire); zWhere = sqlite3MPrintf(db, "name=%Q AND sql=%Q", pTab->zName, zStmt); - sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere, 0); + sqlite3VdbeAddParseSchemaOp(pParse, iDb, zWhere, 0); sqlite3DbFree(db, zStmt); iReg = ++pParse->nMem; sqlite3VdbeLoadString(v, iReg, pTab->zName); sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg); @@ -568,10 +586,11 @@ int nArg = pTab->u.vtab.nArg; char *zErr = 0; char *zModuleName; int iDb; VtabCtx *pCtx; + int nByte; /* Bytes of space to allocate */ assert( IsVirtual(pTab) ); azArg = (const char *const*)pTab->u.vtab.azArg; /* Check that the virtual-table is not already being initialized */ @@ -587,18 +606,26 @@ zModuleName = sqlite3DbStrDup(db, pTab->zName); if( !zModuleName ){ return SQLITE_NOMEM_BKPT; } - pVTable = sqlite3MallocZero(sizeof(VTable)); + nByte = sizeof(VTable); +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + nByte += sqlite3Strlen30(pTab->zName) + 1; +#endif + pVTable = (VTable*)sqlite3MallocZero(nByte); if( !pVTable ){ sqlite3OomFault(db); sqlite3DbFree(db, zModuleName); return SQLITE_NOMEM_BKPT; } pVTable->db = db; pVTable->pMod = pMod; +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + pVTable->zName = (char*)&pVTable[1]; + memcpy(pVTable->zName, pTab->zName, nByte-sizeof(VTable)); +#endif pVTable->eVtabRisk = SQLITE_VTABRISK_Normal; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pTab->u.vtab.azArg[1] = db->aDb[iDb].zDbSName; @@ -637,16 +664,27 @@ rc = SQLITE_ERROR; }else{ int iCol; u16 oooHidden = 0; /* If everything went according to plan, link the new VTable structure - ** into the linked list headed by pTab->u.vtab.p. Then loop through the - ** columns of the table to see if any of them contain the token "hidden". - ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from - ** the type string. */ - pVTable->pNext = pTab->u.vtab.p; - pTab->u.vtab.p = pVTable; + ** into the linked list headed by pTab->u.vtab.p. Or, if this is a + ** reusable schema, into the linked list headed by Db.pVTable. + ** + ** Then loop through the columns of the table to see if any of them + ** contain the token "hidden". If so, set the Column COLFLAG_HIDDEN flag + ** and remove the token from the type string. */ +#ifdef SQLITE_ENABLE_SHARED_SCHEMA + if( IsSharedSchema(db) && iDb!=1 ){ + pVTable->pNext = db->aDb[iDb].pVTable; + db->aDb[iDb].pVTable = pVTable; + }else +#endif /* ifdef SQLITE_ENABLE_SHARED_SCHEMA */ + { + assert( IsVirtual(pTab) ); + pVTable->pNext = pTab->u.vtab.p; + pTab->u.vtab.p = pVTable; + } for(iCol=0; iColnCol; iCol++){ char *zType = sqlite3ColumnType(&pTab->aCol[iCol], ""); int nType; int i = 0; @@ -697,10 +735,11 @@ int rc; assert( pTab ); assert( IsVirtual(pTab) ); if( sqlite3GetVTable(db, pTab) ){ + assert( !IsVirtual(pTab) || pTab->nCol>0 ); return SQLITE_OK; } /* Locate the required virtual table module */ zMod = pTab->u.vtab.azArg[0]; @@ -1139,11 +1178,11 @@ /* Check to see the left operand is a column in a virtual table */ if( NEVER(pExpr==0) ) return pDef; if( pExpr->op!=TK_COLUMN ) return pDef; assert( ExprUseYTab(pExpr) ); pTab = pExpr->y.pTab; - if( pTab==0 ) return pDef; + if( NEVER(pTab==0) ) return pDef; if( !IsVirtual(pTab) ) return pDef; pVtab = sqlite3GetVTable(db, pTab)->pVtab; assert( pVtab!=0 ); assert( pVtab->pModule!=0 ); pMod = (sqlite3_module *)pVtab->pModule; Index: src/wal.c ================================================================== --- src/wal.c +++ src/wal.c @@ -99,11 +99,11 @@ ** ** READER ALGORITHM ** ** To read a page from the database (call it page number P), a reader ** first checks the WAL to see if it contains page P. If so, then the -** last valid instance of page P that is followed by a commit frame +** last valid instance of page P that is a followed by a commit frame ** or is a commit frame itself becomes the value read. If the WAL ** contains no copies of page P that are valid and which are a commit ** frame or are followed by a commit frame, then page P is read from ** the database file. ** @@ -234,11 +234,11 @@ ** reader might be using some value K0 and a second reader that started ** at a later time (after additional transactions were added to the WAL ** and to the wal-index) might be using a different value K1, where K1>K0. ** Both readers can use the same hash table and mapping section to get ** the correct result. There may be entries in the hash table with -** K>K0, but to the first reader those entries will appear to be unused +** K>K0 but to the first reader, those entries will appear to be unused ** slots in the hash table and so the first reader will get an answer as ** if no values greater than K0 had ever been inserted into the hash table ** in the first place - which is what reader one wants. Meanwhile, the ** second reader using K1 will see additional values that were inserted ** later, which is exactly what reader two wants. @@ -245,199 +245,13 @@ ** ** When a rollback occurs, the value of K is decreased. Hash table entries ** that correspond to frames greater than the new K value are removed ** from the hash table at this point. */ - -/* -** WAL2 NOTES -** -** This file also contains the implementation of "wal2" mode - activated -** using "PRAGMA journal_mode = wal2". Wal2 mode is very similar to wal -** mode, except that it uses two wal files instead of one. Under some -** circumstances, wal2 mode provides more concurrency than legacy wal -** mode. -** -** THE PROBLEM WAL2 SOLVES: -** -** In legacy wal mode, if a writer wishes to write to the database while -** a checkpoint is ongoing, it may append frames to the existing wal file. -** This means that after the checkpoint has finished, the wal file consists -** of a large block of checkpointed frames, followed by a block of -** uncheckpointed frames. In a deployment that features a high volume of -** write traffic, this may mean that the wal file is never completely -** checkpointed. And so grows indefinitely. -** -** An alternative is to use "PRAGMA wal_checkpoint=RESTART" or similar to -** force a complete checkpoint of the wal file. But this must: -** -** 1) Wait on all existing readers to finish, -** 2) Wait on any existing writer, and then block all new writers, -** 3) Do the checkpoint, -** 4) Wait on any new readers that started during steps 2 and 3. Writers -** are still blocked during this step. -** -** This means that in order to avoid the wal file growing indefinitely -** in a busy system, writers must periodically pause to allow a checkpoint -** to complete. In a system with long running readers, such pauses may be -** for a non-trivial amount of time. -** -** OVERVIEW OF SOLUTION -** -** Wal2 mode uses two wal files. After writers have grown the first wal -** file to a pre-configured size, they begin appending transactions to -** the second wal file. Once all existing readers are reading snapshots -** new enough to include the entire first wal file, a checkpointer can -** checkpoint it. -** -** Meanwhile, writers are writing transactions to the second wal file. -** Once that wal file has grown larger than the pre-configured size, each -** new writer checks if: -** -** * the first wal file has been checkpointed, and if so, if -** * there are no readers still reading from the first wal file (once -** it has been checkpointed, new readers read only from the second -** wal file). -** -** If both these conditions are true, the writer may switch back to the -** first wal file. Eventually, a checkpointer can checkpoint the second -** wal file, and so on. -** -** The wal file that writers are currently appending to (the one they -** don't have to check the above two criteria before writing to) is called -** the "current" wal file. -** -** The first wal file takes the same name as the wal file in legacy wal -** mode systems - "-wal". The second is named "-wal2". - -** -** CHECKPOINTS -** -** The "pre-configured size" mentioned above is the value set by -** "PRAGMA journal_size_limit". Or, if journal_size_limit is not set, -** 1000 pages. -** -** There is only a single type of checkpoint in wal2 mode (no "truncate", -** "restart" etc.), and it always checkpoints the entire contents of a single -** wal file. A wal file cannot be checkpointed until after a writer has written -** the first transaction into the other wal file and all readers are reading a -** snapshot that includes at least one transaction from the other wal file. -** -** The wal-hook, if one is registered, is invoked after a write-transaction -** is committed, just as it is in legacy wal mode. The integer parameter -** passed to the wal-hook is the total number of uncheckpointed frames in both -** wal files. Except, the parameter is set to zero if there is no frames -** that may be checkpointed. This happens in two scenarios: -** -** 1. The "other" wal file (the one that the writer did not just append to) -** is completely empty, or -** -** 2. The "other" wal file (the one that the writer did not just append to) -** has already been checkpointed. -** -** -** WAL FILE FORMAT -** -** The file format used for each wal file in wal2 mode is the same as for -** legacy wal mode. Except, the file format field is set to 3021000 -** instead of 3007000. -** -** WAL-INDEX FORMAT -** -** The wal-index format is also very similar. Even though there are two -** wal files, there is still a single wal-index shared-memory area (*-shm -** file with the default unix or win32 VFS). The wal-index header is the -** same size, with the following exceptions it has the same format: -** -** * The version field is set to 3021000 instead of 3007000. -** -** * An unused 32-bit field in the legacy wal-index header is -** now used to store (a) a single bit indicating which of the -** two wal files writers should append to and (b) the number -** of frames in the second wal file (31 bits). -** -** The first hash table in the wal-index contains entries corresponding -** to the first HASHTABLE_NPAGE_ONE frames stored in the first wal file. -** The second hash table in the wal-index contains entries indexing the -** first HASHTABLE_NPAGE frames in the second wal file. The third hash -** table contains the next HASHTABLE_NPAGE frames in the first wal file, -** and so on. -** -** LOCKS -** -** Read-locks are simpler than for legacy wal mode. There are no locking -** slots that contain frame numbers. Instead, there are four distinct -** combinations of read locks a reader may hold: -** -** WAL_LOCK_PART1: "part" lock on first wal, none of second. -** WAL_LOCK_PART1_FULL2: "part" lock on first wal, "full" of second. -** WAL_LOCK_PART2: no lock on first wal, "part" lock on second. -** WAL_LOCK_PART2_FULL1: "full" lock on first wal, "part" lock on second. -** -** When a reader reads the wal-index header as part of opening a read -** transaction, it takes a "part" lock on the current wal file. "Part" -** because the wal file may grow while the read transaction is active, in -** which case the reader would be reading only part of the wal file. -** A part lock prevents a checkpointer from checkpointing the wal file -** on which it is held. -** -** If there is data in the non-current wal file that has not been -** checkpointed, the reader takes a "full" lock on that wal file. A -** "full" lock indicates that the reader is using the entire wal file. -** A full lock prevents a writer from overwriting the wal file on which -** it is held, but does not prevent a checkpointer from checkpointing -** it. -** -** There is still a single WRITER and a single CHECKPOINTER lock. The -** recovery procedure still takes the same exclusive lock on the entire -** range of SQLITE_SHM_NLOCK shm-locks. This works because the read-locks -** above use four of the six read-locking slots used by legacy wal mode. -** -** STARTUP/RECOVERY -** -** The read and write version fields of the database header in a wal2 -** database are set to 0x03, instead of 0x02 as in legacy wal mode. -** -** The wal file format used in wal2 mode is the same as the format used -** in legacy wal mode. However, in order to support recovery, there are two -** differences in the way wal file header fields are populated, as follows: -** -** * When the first wal file is first created, the "nCkpt" field in -** the wal file header is set to 0. Thereafter, each time the writer -** switches wal file, it sets the nCkpt field in the new wal file -** header to ((nCkpt0 + 1) & 0x0F), where nCkpt0 is the value in -** the previous wal file header. This means that the first wal file -** always has an even value in the nCkpt field, and the second wal -** file always has an odd value. -** -** * When a writer switches wal file, it sets the salt values in the -** new wal file to a copy of the checksum for the final frame in -** the previous wal file. -** -** Recovery proceeds as follows: -** -** 1. Each wal file is recovered separately. Except, if the first wal -** file does not exist or is zero bytes in size, the second wal file -** is truncated to zero bytes before it is "recovered". -** -** 2. If both wal files contain valid headers, then the nCkpt fields -** are compared to see which of the two wal files is older. If the -** salt keys in the second wal file match the final frame checksum -** in the older wal file, then both wal files are used. Otherwise, -** the newer wal file is ignored. -** -** 3. Or, if only one or neither of the wal files has a valid header, -** then only a single or no wal files are recovered into the -** reconstructed wal-index. -** -** Refer to header comments for walIndexRecover() for further details. -*/ - #ifndef SQLITE_OMIT_WAL #include "wal.h" -#include "vdbeInt.h" /* ** Trace output macros */ #if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) @@ -446,23 +260,24 @@ #else # define WALTRACE(X) #endif /* -** Both the wal-file and the wal-index contain version fields -** indicating the current version of the system. If a client -** reads the header of a wal file (as part of recovery), or the -** wal-index (as part of opening a read transaction) and (a) the -** header checksum is correct but (b) the version field is not -** recognized, the operation fails with SQLITE_CANTOPEN. +** The maximum (and only) versions of the wal and wal-index formats +** that may be interpreted by this version of SQLite. ** -** Currently, clients support both version-1 ("journal_mode=wal") and -** version-2 ("journal_mode=wal2"). Legacy clients may support version-1 -** only. +** If a client begins recovering a WAL file and finds that (a) the checksum +** values in the wal-header are correct and (b) the version field is not +** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN. +** +** Similarly, if a client successfully reads a wal-index header (i.e. the +** checksum test is successful) and finds that the version field is not +** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite +** returns SQLITE_CANTOPEN. */ -#define WAL_VERSION1 3007000 /* For "journal_mode=wal" */ -#define WAL_VERSION2 3021000 /* For "journal_mode=wal2" */ +#define WAL_MAX_VERSION 3007000 +#define WALINDEX_MAX_VERSION 3007000 /* ** Index numbers for various locking bytes. WAL_NREADER is the number ** of available reader locks and should be at least 3. The default ** is SQLITE_SHM_NLOCK==8 and WAL_NREADER==5. @@ -481,43 +296,10 @@ #define WAL_CKPT_LOCK 1 #define WAL_RECOVER_LOCK 2 #define WAL_READ_LOCK(I) (3+(I)) #define WAL_NREADER (SQLITE_SHM_NLOCK-3) -/* -** Values that may be stored in Wal.readLock in wal2 mode. -** -** In wal mode, the Wal.readLock member is set to -1 when no read-lock -** is held, or else is the index of the read-mark on which a lock is -** held. -** -** In wal2 mode, a value of -1 still indicates that no read-lock is held. -** And a non-zero value still represents the index of the read-mark on -** which a lock is held. There are two differences: -** -** 1. wal2 mode never uses read-mark 0. -** -** 2. locks on each read-mark have a different interpretation, as -** indicated by the symbolic names below. -*/ -#define WAL_LOCK_NONE -1 -#define WAL_LOCK_PART1 1 -#define WAL_LOCK_PART1_FULL2 2 -#define WAL_LOCK_PART2_FULL1 3 -#define WAL_LOCK_PART2 4 - -/* -** This constant is used in wal2 mode only. -** -** In wal2 mode, when committing a transaction, if the current wal file -** is sufficiently large and there are no conflicting locks held, the -** writer writes the new transaction into the start of the other wal -** file. Usually, "sufficiently large" is defined by the value configured -** using "PRAGMA journal_size_limit". However, if no such value has been -** configured, sufficiently large defaults to WAL_DEFAULT_WALSIZE frames. -*/ -#define WAL_DEFAULT_WALSIZE 1000 /* Object declarations */ typedef struct WalIndexHdr WalIndexHdr; typedef struct WalIterator WalIterator; typedef struct WalCkptInfo WalCkptInfo; @@ -533,68 +315,25 @@ ** the total header size is 136 bytes. ** ** The szPage value can be any power of 2 between 512 and 32768, inclusive. ** Or it can be 1 to represent a 65536-byte page. The latter case was ** added in 3.7.1 when support for 64K pages was added. -** -** WAL2 mode notes: Member variable mxFrame2 is only used in wal2 mode -** (when iVersion is set to WAL_VERSION2). The lower 31 bits store -** the maximum frame number in file *-wal2. The most significant bit -** is a flag - set if clients are currently appending to *-wal2, clear -** otherwise. */ struct WalIndexHdr { u32 iVersion; /* Wal-index version */ - u32 mxFrame2; /* See "WAL2 mode notes" above */ + u32 unused; /* Unused (padding) field */ u32 iChange; /* Counter incremented each transaction */ u8 isInit; /* 1 when initialized */ u8 bigEndCksum; /* True if checksums in WAL are big-endian */ u16 szPage; /* Database page size in bytes. 1==64K */ - u32 mxFrame; /* Index of last valid frame in each WAL */ + u32 mxFrame; /* Index of last valid frame in the WAL */ u32 nPage; /* Size of database in pages */ u32 aFrameCksum[2]; /* Checksum of last frame in log */ u32 aSalt[2]; /* Two salt values copied from WAL header */ u32 aCksum[2]; /* Checksum over all prior fields */ }; -/* -** The following macros and functions are get/set methods for the maximum -** frame numbers and current wal file values stored in the WalIndexHdr -** structure. These are helpful because of the unorthodox way in which -** the values are stored in wal2 mode (see above). They are equivalent -** to functions with the following signatures. -** -** u32 walidxGetMxFrame(WalIndexHdr*, int iWal); // get mxFrame -** void walidxSetMxFrame(WalIndexHdr*, int iWal, u32 val); // set mxFrame -** int walidxGetFile(WalIndexHdr*) // get file -** void walidxSetFile(WalIndexHdr*, int val); // set file -*/ -#define walidxGetMxFrame(pHdr, iWal) \ - ((iWal) ? ((pHdr)->mxFrame2 & 0x7FFFFFFF) : (pHdr)->mxFrame) - -static void walidxSetMxFrame(WalIndexHdr *pHdr, int iWal, u32 mxFrame){ - if( iWal ){ - pHdr->mxFrame2 = (pHdr->mxFrame2 & 0x80000000) | mxFrame; - }else{ - pHdr->mxFrame = mxFrame; - } - assert( walidxGetMxFrame(pHdr, iWal)==mxFrame ); -} - -#define walidxGetFile(pHdr) ((pHdr)->mxFrame2 >> 31) - -#define walidxSetFile(pHdr, iWal) ( \ - (pHdr)->mxFrame2 = ((pHdr)->mxFrame2 & 0x7FFFFFFF) | ((iWal)<<31) \ -) - -/* -** Argument is a pointer to a Wal structure. Return true if the current -** cache of the wal-index header indicates "journal_mode=wal2" mode, or -** false otherwise. -*/ -#define isWalMode2(pWal) ((pWal)->hdr.iVersion==WAL_VERSION2) - /* ** A copy of the following object occurs in the wal-index immediately ** following the second copy of the WalIndexHdr. This object stores ** information used by checkpoint. ** @@ -765,11 +504,11 @@ ** following object. */ struct Wal { sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ sqlite3_file *pDbFd; /* File handle for the database file */ - sqlite3_file *apWalFd[2]; /* File handle for "*-wal" and "*-wal2" */ + sqlite3_file *pWalFd; /* File handle for WAL file */ u32 iCallback; /* Value to pass to log callback (or 0) */ i64 mxWalSize; /* Truncate WAL to this size upon reset */ int nWiData; /* Size of array apWiData */ int szFirstBlock; /* Size of first block written to WAL file */ volatile u32 **apWiData; /* Pointer to wal-index content in memory */ @@ -785,27 +524,21 @@ u8 padToSectorBoundary; /* Pad transactions out to the next sector */ u8 bShmUnreliable; /* SHM content is read-only and unreliable */ WalIndexHdr hdr; /* Wal-index header for current transaction */ u32 minFrame; /* Ignore wal frames before this one */ u32 iReCksum; /* On commit, recalculate checksums from here */ - u32 nPriorFrame; /* For sqlite3WalInfo() */ const char *zWalName; /* Name of WAL file */ - const char *zWalName2; /* Name of second WAL file */ u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ - FastPrng sPrng; /* Random number generator */ #ifdef SQLITE_DEBUG u8 lockError; /* True if a locking error has occurred */ #endif #ifdef SQLITE_ENABLE_SNAPSHOT WalIndexHdr *pSnapshot; /* Start transaction here if not NULL */ #endif - int bClosing; /* Set to true at start of sqlite3WalClose() */ - int bWal2; /* bWal2 flag passed to WalOpen() */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT sqlite3 *db; #endif - u64 *aSchemaVersion; }; /* ** Candidate values for Wal.exclusiveMode. */ @@ -1063,11 +796,11 @@ volatile WalIndexHdr *aHdr = walIndexHdr(pWal); const int nCksum = offsetof(WalIndexHdr, aCksum); assert( pWal->writeLock ); pWal->hdr.isInit = 1; - assert( pWal->hdr.iVersion==WAL_VERSION1||pWal->hdr.iVersion==WAL_VERSION2 ); + pWal->hdr.iVersion = WALINDEX_MAX_VERSION; walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); /* Possible TSAN false-positive. See tag-20200519-1 */ memcpy((void*)&aHdr[1], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); walShmBarrier(pWal); memcpy((void*)&aHdr[0], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); @@ -1142,11 +875,11 @@ if( pgno==0 ){ return 0; } /* A frame is only valid if a checksum of the WAL header, - ** all prior frames, the first 16 bytes of this frame-header, + ** all prior frams, the first 16 bytes of this frame-header, ** and the frame-data matches the checksum in the last 8 ** bytes of this frame-header. */ nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); @@ -1190,11 +923,11 @@ /* ** Set or release locks on the WAL. Locks are either shared or exclusive. ** A lock cannot be moved directly between shared and exclusive - it must go -** through the concurrent state first. +** through the unlocked state first. ** ** In locking_mode=EXCLUSIVE, all of these routines become no-ops. */ static int walLockShared(Wal *pWal, int lockIdx){ int rc; @@ -1292,42 +1025,10 @@ rc = SQLITE_ERROR; } return rc; } -static u32 walExternalEncode(int iWal, u32 iFrame){ - u32 iRet; - if( iWal ){ - iRet = HASHTABLE_NPAGE_ONE + iFrame; - iRet += ((iFrame-1) / HASHTABLE_NPAGE) * HASHTABLE_NPAGE; - }else{ - iRet = iFrame; - iFrame += HASHTABLE_NPAGE - HASHTABLE_NPAGE_ONE; - iRet += ((iFrame-1) / HASHTABLE_NPAGE) * HASHTABLE_NPAGE; - } - return iRet; -} - -/* -** Parameter iExternal is an external frame identifier. This function -** transforms it to a wal file number (0 or 1) and frame number within -** this wal file (reported via output parameter *piRead). -*/ -static int walExternalDecode(u32 iExternal, u32 *piRead){ - int iHash = (iExternal+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1)/HASHTABLE_NPAGE; - - if( 0==(iHash & 0x01) ){ - /* A frame in wal file 0 */ - *piRead = (iExternal <= HASHTABLE_NPAGE_ONE) ? iExternal : - iExternal - (iHash/2) * HASHTABLE_NPAGE; - return 0; - } - - *piRead = iExternal - HASHTABLE_NPAGE_ONE - ((iHash-1)/2) * HASHTABLE_NPAGE; - return 1; -} - /* ** Return the number of the wal-index page that contains the hash-table ** and page-number array that contain entries corresponding to WAL frame ** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages ** are numbered starting from 0. @@ -1342,26 +1043,10 @@ ); assert( iHash>=0 ); return iHash; } -/* -** Return the index of the hash-table corresponding to frame iFrame of wal -** file iWal. -*/ -static int walFramePage2(int iWal, u32 iFrame){ - int iRet; - assert( iWal==0 || iWal==1 ); - assert( iFrame>0 ); - if( iWal==0 ){ - iRet = 2*((iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1)/HASHTABLE_NPAGE); - }else{ - iRet = 1 + 2 * ((iFrame-1) / HASHTABLE_NPAGE); - } - return iRet; -} - /* ** Return the page number associated with frame iFrame in this WAL. */ static u32 walFramePgno(Wal *pWal, u32 iFrame){ int iHash = walFramePage(iFrame); @@ -1369,14 +1054,10 @@ return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; } return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE]; } -static u32 walFramePgno2(Wal *pWal, int iWal, u32 iFrame){ - return walFramePgno(pWal, walExternalEncode(iWal, iFrame)); -} - /* ** Remove entries from the hash table that point to WAL slots greater ** than pWal->hdr.mxFrame. ** ** This function is called whenever pWal->hdr.mxFrame is decreased due @@ -1390,50 +1071,41 @@ static void walCleanupHash(Wal *pWal){ WalHashLoc sLoc; /* Hash table location */ int iLimit = 0; /* Zero values greater than this */ int nByte; /* Number of bytes to zero in aPgno[] */ int i; /* Used to iterate through aHash[] */ - int iWal = walidxGetFile(&pWal->hdr); - u32 mxFrame = walidxGetMxFrame(&pWal->hdr, iWal); - - u32 iExternal; - if( isWalMode2(pWal) ){ - iExternal = walExternalEncode(iWal, mxFrame); - }else{ - assert( iWal==0 ); - iExternal = mxFrame; - } assert( pWal->writeLock ); - testcase( mxFrame==HASHTABLE_NPAGE_ONE-1 ); - testcase( mxFrame==HASHTABLE_NPAGE_ONE ); - testcase( mxFrame==HASHTABLE_NPAGE_ONE+1 ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 ); - if( mxFrame==0 ) return; + if( pWal->hdr.mxFrame==0 ) return; /* Obtain pointers to the hash-table and page-number array containing ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed ** that the page said hash-table and array reside on is already mapped.(1) */ - assert( pWal->nWiData>walFramePage(iExternal) ); - assert( pWal->apWiData[walFramePage(iExternal)] ); - i = walHashGet(pWal, walFramePage(iExternal), &sLoc); + assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); + assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); + i = walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &sLoc); if( NEVER(i) ) return; /* Defense-in-depth, in case (1) above is wrong */ /* Zero all hash-table entries that correspond to frame numbers greater ** than pWal->hdr.mxFrame. */ - iLimit = iExternal - sLoc.iZero; + iLimit = pWal->hdr.mxFrame - sLoc.iZero; assert( iLimit>0 ); for(i=0; iiLimit ){ sLoc.aHash[i] = 0; } } /* Zero the entries in the aPgno array that correspond to frames with - ** frame numbers greater than pWal->hdr.mxFrame. */ + ** frame numbers greater than pWal->hdr.mxFrame. + */ nByte = (int)((char *)sLoc.aHash - (char *)&sLoc.aPgno[iLimit]); assert( nByte>=0 ); memset((void *)&sLoc.aPgno[iLimit], 0, nByte); #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT @@ -1450,38 +1122,31 @@ assert( sLoc.aHash[iKey]==j+1 ); } } #endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */ } + /* ** Set an entry in the wal-index that will map database page number ** pPage into WAL frame iFrame. */ -static int walIndexAppend(Wal *pWal, int iWal, u32 iFrame, u32 iPage){ +static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ int rc; /* Return code */ WalHashLoc sLoc; /* Wal-index hash table location */ - u32 iExternal; - - if( isWalMode2(pWal) ){ - iExternal = walExternalEncode(iWal, iFrame); - }else{ - assert( iWal==0 ); - iExternal = iFrame; - } - - rc = walHashGet(pWal, walFramePage(iExternal), &sLoc); + + rc = walHashGet(pWal, walFramePage(iFrame), &sLoc); /* Assuming the wal-index file was successfully mapped, populate the ** page number array and hash table entry. */ if( rc==SQLITE_OK ){ int iKey; /* Hash table key */ int idx; /* Value to write to hash-table slot */ int nCollide; /* Number of hash collisions */ - idx = iExternal - sLoc.iZero; + idx = iFrame - sLoc.iZero; assert( idx <= HASHTABLE_NSLOT/2 + 1 ); /* If this is the first entry to be added to this hash-table, zero the ** entire hash table and aPgno[] array before proceeding. */ @@ -1541,216 +1206,10 @@ } return rc; } -/* -** Recover a single wal file - *-wal if iWal==0, or *-wal2 if iWal==1. -*/ -static int walIndexRecoverOne(Wal *pWal, int iWal, u32 *pnCkpt, int *pbZero){ - i64 nSize; /* Size of log file */ - u32 aFrameCksum[2] = {0, 0}; - int rc; - sqlite3_file *pWalFd = pWal->apWalFd[iWal]; - - assert( iWal==0 || iWal==1 ); - - memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); - sqlite3FastRandomness(&pWal->sPrng, 8, pWal->hdr.aSalt); - - rc = sqlite3OsFileSize(pWalFd, &nSize); - if( rc==SQLITE_OK ){ - if( nSize>WAL_HDRSIZE ){ - u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ - u32 *aPrivate = 0; /* Heap copy of *-shm pg being populated */ - u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ - int szFrame; /* Number of bytes in buffer aFrame[] */ - u8 *aData; /* Pointer to data part of aFrame buffer */ - int szPage; /* Page size according to the log */ - u32 magic; /* Magic value read from WAL header */ - u32 version; /* Magic value read from WAL header */ - int isValid; /* True if this frame is valid */ - int iPg; /* Current 32KB wal-index page */ - int iLastFrame; /* Last frame in wal, based on size alone */ - int iLastPg; /* Last shm page used by this wal */ - - /* Read in the WAL header. */ - rc = sqlite3OsRead(pWalFd, aBuf, WAL_HDRSIZE, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* If the database page size is not a power of two, or is greater than - ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid - ** data. Similarly, if the 'magic' value is invalid, ignore the whole - ** WAL file. - */ - magic = sqlite3Get4byte(&aBuf[0]); - szPage = sqlite3Get4byte(&aBuf[8]); - if( (magic&0xFFFFFFFE)!=WAL_MAGIC - || szPage&(szPage-1) - || szPage>SQLITE_MAX_PAGE_SIZE - || szPage<512 - ){ - return SQLITE_OK; - } - pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); - pWal->szPage = szPage; - - /* Verify that the WAL header checksum is correct */ - walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, - aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum - ); - if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) - || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) - ){ - return SQLITE_OK; - } - - memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); - *pnCkpt = sqlite3Get4byte(&aBuf[12]); - - /* Verify that the version number on the WAL format is one that - ** are able to understand */ - version = sqlite3Get4byte(&aBuf[4]); - if( version!=WAL_VERSION1 && version!=WAL_VERSION2 ){ - return SQLITE_CANTOPEN_BKPT; - } - pWal->hdr.iVersion = version; - - /* Malloc a buffer to read frames into. */ - szFrame = szPage + WAL_FRAME_HDRSIZE; - aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ); - if( !aFrame ){ - return SQLITE_NOMEM_BKPT; - } - aData = &aFrame[WAL_FRAME_HDRSIZE]; - aPrivate = (u32*)&aData[szPage]; - - /* Read all frames from the log file. */ - iLastFrame = (nSize - WAL_HDRSIZE) / szFrame; - if( version==WAL_VERSION2 ){ - iLastPg = walFramePage2(iWal, iLastFrame); - }else{ - iLastPg = walFramePage(iLastFrame); - } - for(iPg=iWal; iPg<=iLastPg; iPg+=(version==WAL_VERSION2 ? 2 : 1)){ - u32 *aShare; - int iFrame; /* Index of last frame read */ - int iLast; - int iFirst; - int nHdr, nHdr32; - - rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare); - assert( aShare!=0 || rc!=SQLITE_OK ); - if( aShare==0 ) break; - pWal->apWiData[iPg] = aPrivate; - - if( iWal ){ - assert( version==WAL_VERSION2 ); - iFirst = 1 + (iPg/2)*HASHTABLE_NPAGE; - iLast = iFirst + HASHTABLE_NPAGE - 1; - }else{ - int i2 = (version==WAL_VERSION2) ? (iPg/2) : iPg; - iLast = HASHTABLE_NPAGE_ONE+i2*HASHTABLE_NPAGE; - iFirst = 1 + (i2==0?0:HASHTABLE_NPAGE_ONE+(i2-1)*HASHTABLE_NPAGE); - } - iLast = MIN(iLast, iLastFrame); - - for(iFrame=iFirst; iFrame<=iLast; iFrame++){ - i64 iOffset = walFrameOffset(iFrame, szPage); - u32 pgno; /* Database page number for frame */ - u32 nTruncate; /* dbsize field from frame header */ - - /* Read and decode the next log frame. */ - rc = sqlite3OsRead(pWalFd, aFrame, szFrame, iOffset); - if( rc!=SQLITE_OK ) break; - isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); - if( !isValid ) break; - rc = walIndexAppend(pWal, iWal, iFrame, pgno); - if( NEVER(rc!=SQLITE_OK) ) break; - - /* If nTruncate is non-zero, this is a commit record. */ - if( nTruncate ){ - pWal->hdr.mxFrame = iFrame; - pWal->hdr.nPage = nTruncate; - pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); - testcase( szPage<=32768 ); - testcase( szPage>=65536 ); - aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; - aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; - } - } - pWal->apWiData[iPg] = aShare; - nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0); - nHdr32 = nHdr / sizeof(u32); -#ifndef SQLITE_SAFER_WALINDEX_RECOVERY - /* Memcpy() should work fine here, on all reasonable implementations. - ** Technically, memcpy() might change the destination to some - ** intermediate value before setting to the final value, and that might - ** cause a concurrent reader to malfunction. Memcpy() is allowed to - ** do that, according to the spec, but no memcpy() implementation that - ** we know of actually does that, which is why we say that memcpy() - ** is safe for this. Memcpy() is certainly a lot faster. - */ - memcpy(&aShare[nHdr32], &aPrivate[nHdr32], WALINDEX_PGSZ-nHdr); -#else - /* In the event that some platform is found for which memcpy() - ** changes the destination to some intermediate value before - ** setting the final value, this alternative copy routine is - ** provided. - */ - { - int i; - for(i=nHdr32; ihdr.aFrameCksum[0] = aFrameCksum[0]; - pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; - - return rc; -} - -static int walOpenWal2(Wal *pWal){ - int rc = SQLITE_OK; - if( !isOpen(pWal->apWalFd[1]) ){ - int f = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); - rc = sqlite3OsOpen(pWal->pVfs, pWal->zWalName2, pWal->apWalFd[1], f, &f); - } - return rc; -} - -static int walTruncateWal2(Wal *pWal){ - int bIs; - int rc; - assert( !isOpen(pWal->apWalFd[1]) ); - rc = sqlite3OsAccess(pWal->pVfs, pWal->zWalName2, SQLITE_ACCESS_EXISTS, &bIs); - if( rc==SQLITE_OK && bIs ){ - rc = walOpenWal2(pWal); - if( rc==SQLITE_OK ){ - rc = sqlite3OsTruncate(pWal->apWalFd[1], 0); - sqlite3OsClose(pWal->apWalFd[1]); - } - } - return rc; -} /* ** Recover the wal-index by reading the write-ahead log file. ** ** This routine first tries to establish an exclusive lock on the @@ -1760,20 +1219,18 @@ ** that this thread is running recovery. If unable to establish ** the necessary locks, this routine returns SQLITE_BUSY. */ static int walIndexRecover(Wal *pWal){ int rc; /* Return Code */ + i64 nSize; /* Size of log file */ + u32 aFrameCksum[2] = {0, 0}; int iLock; /* Lock offset to lock for checkpoint */ - u32 nCkpt1 = 0xFFFFFFFF; - u32 nCkpt2 = 0xFFFFFFFF; - int bZero = 0; - WalIndexHdr hdr; /* Obtain an exclusive lock on all byte in the locking range not already ** locked by the caller. The caller is guaranteed to have locked the ** WAL_WRITE_LOCK byte, and may have also locked the WAL_CKPT_LOCK byte. - ** If successful, the same bytes that are locked here are concurrent before + ** If successful, the same bytes that are locked here are unlocked before ** this function returns. */ assert( pWal->ckptLock==1 || pWal->ckptLock==0 ); assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 ); assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE ); @@ -1784,150 +1241,208 @@ return rc; } WALTRACE(("WAL%p: recovery begin...\n", pWal)); - /* Recover the *-wal file. If a valid version-1 header is recovered - ** from it, do not open the *-wal2 file. Even if it exists. - ** - ** Otherwise, if the *-wal2 file exists or if the "wal2" flag was - ** specified when sqlite3WalOpen() was called, open and recover - ** the *-wal2 file. Except, if the *-wal file was zero bytes in size, - ** truncate the *-wal2 to zero bytes in size. - ** - ** After this block has run, if the *-wal2 file is open the system - ** starts up in VERSION2 mode. In this case pWal->hdr contains the - ** wal-index header considering only *-wal2. Stack variable hdr - ** contains the wal-index header considering only *-wal. The hash - ** tables are populated for both. - ** - ** Or, if the *-wal2 file is not open, start up in VERSION1 mode. - ** pWal->hdr is already populated. - */ - rc = walIndexRecoverOne(pWal, 0, &nCkpt1, &bZero); - assert( pWal->hdr.iVersion==0 - || pWal->hdr.iVersion==WAL_VERSION1 - || pWal->hdr.iVersion==WAL_VERSION2 - ); - if( rc==SQLITE_OK && bZero ){ - rc = walTruncateWal2(pWal); - } - if( rc==SQLITE_OK && pWal->hdr.iVersion!=WAL_VERSION1 ){ - int bOpen = 1; - sqlite3_vfs *pVfs = pWal->pVfs; - if( pWal->hdr.iVersion==0 && pWal->bWal2==0 ){ - rc = sqlite3OsAccess(pVfs, pWal->zWalName2, SQLITE_ACCESS_EXISTS, &bOpen); - } - if( rc==SQLITE_OK && bOpen ){ - rc = walOpenWal2(pWal); - if( rc==SQLITE_OK ){ - hdr = pWal->hdr; - rc = walIndexRecoverOne(pWal, 1, &nCkpt2, 0); - } - } - } - + memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); + + rc = sqlite3OsFileSize(pWal->pWalFd, &nSize); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + if( nSize>WAL_HDRSIZE ){ + u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ + u32 *aPrivate = 0; /* Heap copy of *-shm hash being populated */ + u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ + int szFrame; /* Number of bytes in buffer aFrame[] */ + u8 *aData; /* Pointer to data part of aFrame buffer */ + int szPage; /* Page size according to the log */ + u32 magic; /* Magic value read from WAL header */ + u32 version; /* Magic value read from WAL header */ + int isValid; /* True if this frame is valid */ + u32 iPg; /* Current 32KB wal-index page */ + u32 iLastFrame; /* Last frame in wal, based on nSize alone */ + + /* Read in the WAL header. */ + rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + /* If the database page size is not a power of two, or is greater than + ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid + ** data. Similarly, if the 'magic' value is invalid, ignore the whole + ** WAL file. + */ + magic = sqlite3Get4byte(&aBuf[0]); + szPage = sqlite3Get4byte(&aBuf[8]); + if( (magic&0xFFFFFFFE)!=WAL_MAGIC + || szPage&(szPage-1) + || szPage>SQLITE_MAX_PAGE_SIZE + || szPage<512 + ){ + goto finished; + } + pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); + pWal->szPage = szPage; + pWal->nCkpt = sqlite3Get4byte(&aBuf[12]); + memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); + + /* Verify that the WAL header checksum is correct */ + walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, + aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum + ); + if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) + || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) + ){ + goto finished; + } + + /* Verify that the version number on the WAL format is one that + ** are able to understand */ + version = sqlite3Get4byte(&aBuf[4]); + if( version!=WAL_MAX_VERSION ){ + rc = SQLITE_CANTOPEN_BKPT; + goto finished; + } + + /* Malloc a buffer to read frames into. */ + szFrame = szPage + WAL_FRAME_HDRSIZE; + aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ); + if( !aFrame ){ + rc = SQLITE_NOMEM_BKPT; + goto recovery_error; + } + aData = &aFrame[WAL_FRAME_HDRSIZE]; + aPrivate = (u32*)&aData[szPage]; + + /* Read all frames from the log file. */ + iLastFrame = (nSize - WAL_HDRSIZE) / szFrame; + for(iPg=0; iPg<=(u32)walFramePage(iLastFrame); iPg++){ + u32 *aShare; + u32 iFrame; /* Index of last frame read */ + u32 iLast = MIN(iLastFrame, HASHTABLE_NPAGE_ONE+iPg*HASHTABLE_NPAGE); + u32 iFirst = 1 + (iPg==0?0:HASHTABLE_NPAGE_ONE+(iPg-1)*HASHTABLE_NPAGE); + u32 nHdr, nHdr32; + rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare); + assert( aShare!=0 || rc!=SQLITE_OK ); + if( aShare==0 ) break; + pWal->apWiData[iPg] = aPrivate; + + for(iFrame=iFirst; iFrame<=iLast; iFrame++){ + i64 iOffset = walFrameOffset(iFrame, szPage); + u32 pgno; /* Database page number for frame */ + u32 nTruncate; /* dbsize field from frame header */ + + /* Read and decode the next log frame. */ + rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); + if( rc!=SQLITE_OK ) break; + isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); + if( !isValid ) break; + rc = walIndexAppend(pWal, iFrame, pgno); + if( NEVER(rc!=SQLITE_OK) ) break; + + /* If nTruncate is non-zero, this is a commit record. */ + if( nTruncate ){ + pWal->hdr.mxFrame = iFrame; + pWal->hdr.nPage = nTruncate; + pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); + testcase( szPage<=32768 ); + testcase( szPage>=65536 ); + aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; + aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; + } + } + pWal->apWiData[iPg] = aShare; + nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0); + nHdr32 = nHdr / sizeof(u32); +#ifndef SQLITE_SAFER_WALINDEX_RECOVERY + /* Memcpy() should work fine here, on all reasonable implementations. + ** Technically, memcpy() might change the destination to some + ** intermediate value before setting to the final value, and that might + ** cause a concurrent reader to malfunction. Memcpy() is allowed to + ** do that, according to the spec, but no memcpy() implementation that + ** we know of actually does that, which is why we say that memcpy() + ** is safe for this. Memcpy() is certainly a lot faster. + */ + memcpy(&aShare[nHdr32], &aPrivate[nHdr32], WALINDEX_PGSZ-nHdr); +#else + /* In the event that some platform is found for which memcpy() + ** changes the destination to some intermediate value before + ** setting the final value, this alternative copy routine is + ** provided. + */ + { + int i; + for(i=nHdr32; iapWalFd[1]) ){ - /* The case where *-wal2 may follow *-wal */ - if( nCkpt2<=0x0F && nCkpt2==nCkpt1+1 ){ - if( pWal->hdr.mxFrame - && sqlite3Get4byte((u8*)(&pWal->hdr.aSalt[0]))==hdr.aFrameCksum[0] - && sqlite3Get4byte((u8*)(&pWal->hdr.aSalt[1]))==hdr.aFrameCksum[1] - ){ - walidxSetFile(&pWal->hdr, 1); - walidxSetMxFrame(&pWal->hdr, 1, pWal->hdr.mxFrame); - walidxSetMxFrame(&pWal->hdr, 0, hdr.mxFrame); - }else{ - pWal->hdr = hdr; - } - }else - - /* When *-wal may follow *-wal2 */ - if( (nCkpt2==0x0F && nCkpt1==0) || (nCkpt2<0x0F && nCkpt2==nCkpt1-1) ){ - if( hdr.mxFrame - && sqlite3Get4byte((u8*)(&hdr.aSalt[0]))==pWal->hdr.aFrameCksum[0] - && sqlite3Get4byte((u8*)(&hdr.aSalt[1]))==pWal->hdr.aFrameCksum[1] - ){ - SWAP(WalIndexHdr, pWal->hdr, hdr); - walidxSetMxFrame(&pWal->hdr, 1, hdr.mxFrame); - }else{ - walidxSetFile(&pWal->hdr, 1); - walidxSetMxFrame(&pWal->hdr, 1, pWal->hdr.mxFrame); - walidxSetMxFrame(&pWal->hdr, 0, 0); - } - }else - - /* Fallback */ - if( nCkpt1<=nCkpt2 ){ - pWal->hdr = hdr; - }else{ - walidxSetFile(&pWal->hdr, 1); - walidxSetMxFrame(&pWal->hdr, 1, pWal->hdr.mxFrame); - walidxSetMxFrame(&pWal->hdr, 0, 0); - } - pWal->hdr.iVersion = WAL_VERSION2; - }else{ - pWal->hdr.iVersion = WAL_VERSION1; - } - + int i; + pWal->hdr.aFrameCksum[0] = aFrameCksum[0]; + pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; walIndexWriteHdr(pWal); /* Reset the checkpoint-header. This is safe because this thread is ** currently holding locks that exclude all other writers and ** checkpointers. Then set the values of read-mark slots 1 through N. */ pInfo = walCkptInfo(pWal); - memset((void*)pInfo, 0, sizeof(WalCkptInfo)); - if( 0==isWalMode2(pWal) ){ - int i; - pInfo->nBackfillAttempted = pWal->hdr.mxFrame; - pInfo->aReadMark[0] = 0; - for(i=1; ihdr.mxFrame ){ - pInfo->aReadMark[i] = pWal->hdr.mxFrame; - }else{ - pInfo->aReadMark[i] = READMARK_NOT_USED; - } - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - }else if( rc!=SQLITE_BUSY ){ - break; - } + pInfo->nBackfill = 0; + pInfo->nBackfillAttempted = pWal->hdr.mxFrame; + pInfo->aReadMark[0] = 0; + for(i=1; ihdr.mxFrame ){ + pInfo->aReadMark[i] = pWal->hdr.mxFrame; + }else{ + pInfo->aReadMark[i] = READMARK_NOT_USED; + } + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + }else if( rc!=SQLITE_BUSY ){ + goto recovery_error; } } /* If more than one frame was recovered from the log file, report an ** event via sqlite3_log(). This is to help with identifying performance ** problems caused by applications routinely shutting down without - ** checkpointing the log file. */ + ** checkpointing the log file. + */ if( pWal->hdr.nPage ){ - if( isWalMode2(pWal) ){ - sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, - "recovered (%d,%d) frames from WAL files %s[2] (wal2 mode)", - walidxGetMxFrame(&pWal->hdr, 0), walidxGetMxFrame(&pWal->hdr, 1), - pWal->zWalName - ); - }else{ - sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, - "recovered %d frames from WAL file %s", - pWal->hdr.mxFrame, pWal->zWalName - ); - } + sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, + "recovered %d frames from WAL file %s", + pWal->hdr.mxFrame, pWal->zWalName + ); } } +recovery_error: WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok")); walUnlockExclusive(pWal, iLock, WAL_READ_LOCK(0)-iLock); return rc; } /* -** Close an open wal-index and wal files. +** Close an open wal-index. */ static void walIndexClose(Wal *pWal, int isDelete){ if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE || pWal->bShmUnreliable ){ int i; for(i=0; inWiData; i++){ @@ -1936,12 +1451,10 @@ } } if( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE ){ sqlite3OsShmUnmap(pWal->pDbFd, isDelete); } - sqlite3OsClose(pWal->apWalFd[0]); - sqlite3OsClose(pWal->apWalFd[1]); } /* ** Open a connection to the WAL file zWalName. The database file must ** already be opened on connection pDbFd. The buffer that zWalName points @@ -1961,17 +1474,15 @@ sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ sqlite3_file *pDbFd, /* The open database file */ const char *zWalName, /* Name of the WAL file */ int bNoShm, /* True to run in heap-memory mode */ i64 mxWalSize, /* Truncate WAL to this size on reset */ - int bWal2, /* True to open in wal2 mode */ Wal **ppWal /* OUT: Allocated Wal handle */ ){ int rc; /* Return Code */ Wal *pRet; /* Object to allocate and return */ int flags; /* Flags passed to OsOpen() */ - int nByte; /* Bytes of space to allocate */ assert( zWalName && zWalName[0] ); assert( pDbFd ); /* Verify the values of various constants. Any changes to the values @@ -2016,42 +1527,38 @@ #endif #ifdef UNIX_SHM_BASE assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET ); #endif - nByte = sizeof(Wal) + pVfs->szOsFile*2; /* Allocate an instance of struct Wal to return. */ *ppWal = 0; - pRet = (Wal*)sqlite3MallocZero(nByte); + pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile); if( !pRet ){ return SQLITE_NOMEM_BKPT; } pRet->pVfs = pVfs; - pRet->apWalFd[0] = (sqlite3_file*)((char*)pRet+sizeof(Wal)); - pRet->apWalFd[1] = (sqlite3_file*)((char*)pRet+sizeof(Wal)+pVfs->szOsFile); + pRet->pWalFd = (sqlite3_file *)&pRet[1]; pRet->pDbFd = pDbFd; - pRet->readLock = WAL_LOCK_NONE; + pRet->readLock = -1; pRet->mxWalSize = mxWalSize; pRet->zWalName = zWalName; pRet->syncHeader = 1; pRet->padToSectorBoundary = 1; pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE); - sqlite3FastPrngInit(&pRet->sPrng); - pRet->bWal2 = bWal2; - pRet->zWalName2 = &zWalName[sqlite3Strlen30(zWalName)+1]; - /* Open a file handle on the first write-ahead log file. */ + /* Open file handle on the write-ahead log file. */ flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); - rc = sqlite3OsOpen(pVfs, zWalName, pRet->apWalFd[0], flags, &flags); + rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ pRet->readOnly = WAL_RDONLY; } if( rc!=SQLITE_OK ){ walIndexClose(pRet, 0); + sqlite3OsClose(pRet->pWalFd); sqlite3_free(pRet); }else{ int iDC = sqlite3OsDeviceCharacteristics(pDbFd); if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; } if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){ @@ -2257,54 +1764,38 @@ sqlite3_free(p); } /* ** Construct a WalInterator object that can be used to loop over all -** pages in wal file iWal following frame nBackfill in ascending order. Frames +** pages in the WAL following frame nBackfill in ascending order. Frames ** nBackfill or earlier may be included - excluding them is an optimization ** only. The caller must hold the checkpoint lock. ** -** On success, make *pp point to the newly allocated WalIterator object -** and return SQLITE_OK. Otherwise, return an error code. If this routine -** returns an error, the final value of *pp is undefined. +** On success, make *pp point to the newly allocated WalInterator object +** return SQLITE_OK. Otherwise, return an error code. If this routine +** returns an error, the value of *pp is undefined. ** ** The calling routine should invoke walIteratorFree() to destroy the ** WalIterator object when it has finished with it. */ -static int walIteratorInit( - Wal *pWal, - int iWal, - u32 nBackfill, - WalIterator **pp -){ +static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ WalIterator *p; /* Return value */ int nSegment; /* Number of segments to merge */ u32 iLast; /* Last frame in log */ sqlite3_int64 nByte; /* Number of bytes to allocate */ int i; /* Iterator variable */ - int iLastSeg; /* Last hash table to iterate though */ ht_slot *aTmp; /* Temp space used by merge-sort */ int rc = SQLITE_OK; /* Return Code */ - int iMode = isWalMode2(pWal) ? 2 : 1; - - assert( isWalMode2(pWal) || iWal==0 ); - assert( 0==isWalMode2(pWal) || nBackfill==0 ); /* This routine only runs while holding the checkpoint lock. And ** it only runs if there is actually content in the log (mxFrame>0). */ - iLast = walidxGetMxFrame(&pWal->hdr, iWal); - assert( pWal->ckptLock && iLast>0 ); - - if( iMode==2 ){ - iLastSeg = walFramePage2(iWal, iLast); - }else{ - iLastSeg = walFramePage(iLast); - } - nSegment = 1 + (iLastSeg/iMode); + assert( pWal->ckptLock && pWal->hdr.mxFrame>0 ); + iLast = pWal->hdr.mxFrame; /* Allocate space for the WalIterator object. */ + nSegment = walFramePage(iLast) + 1; nByte = sizeof(WalIterator) + (nSegment-1)*sizeof(struct WalSegment) + iLast*sizeof(ht_slot); p = (WalIterator *)sqlite3_malloc64(nByte); if( !p ){ @@ -2321,45 +1812,35 @@ ); if( !aTmp ){ rc = SQLITE_NOMEM_BKPT; } - i = iMode==2 ? iWal : walFramePage(nBackfill+1); - for(; rc==SQLITE_OK && i<=iLastSeg; i+=iMode){ + for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && i=2 ); - }else{ - iZero = sLoc.iZero; - } - - if( i==iLastSeg ){ - nEntry = (int)(iLast - iZero); + + if( (i+1)==nSegment ){ + nEntry = (int)(iLast - sLoc.iZero); }else{ nEntry = (int)((u32*)sLoc.aHash - (u32*)sLoc.aPgno); } - aIndex = &((ht_slot *)&p->aSegment[p->nSegment])[iZero]; - iZero++; + aIndex = &((ht_slot *)&p->aSegment[p->nSegment])[sLoc.iZero]; + sLoc.iZero++; for(j=0; jaSegment[i/iMode].iZero = iZero; - p->aSegment[i/iMode].nEntry = nEntry; - p->aSegment[i/iMode].aIndex = aIndex; - p->aSegment[i/iMode].aPgno = (u32*)sLoc.aPgno; + walMergesort((u32 *)sLoc.aPgno, aTmp, aIndex, &nEntry); + p->aSegment[i].iZero = sLoc.iZero; + p->aSegment[i].nEntry = nEntry; + p->aSegment[i].aIndex = aIndex; + p->aSegment[i].aPgno = (u32 *)sLoc.aPgno; } } sqlite3_free(aTmp); if( rc!=SQLITE_OK ){ @@ -2506,11 +1987,10 @@ */ static void walRestartHdr(Wal *pWal, u32 salt1){ volatile WalCkptInfo *pInfo = walCkptInfo(pWal); int i; /* Loop counter */ u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */ - assert( isWalMode2(pWal)==0 ); pWal->nCkpt++; pWal->hdr.mxFrame = 0; sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0])); memcpy(&pWal->hdr.aSalt[1], &salt1, 4); walIndexWriteHdr(pWal); @@ -2519,72 +1999,10 @@ pInfo->aReadMark[1] = 0; for(i=2; iaReadMark[i] = READMARK_NOT_USED; assert( pInfo->aReadMark[0]==0 ); } -/* -** This function is used in wal2 mode. -** -** This function is called when writer pWal is just about to start -** writing out frames. Parameter iApp is the current wal file. The "other" wal -** file (wal file !iApp) has been fully checkpointed. This function returns -** SQLITE_OK if there are no readers preventing the writer from switching to -** the other wal file. Or SQLITE_BUSY if there are. -*/ -static int wal2RestartOk(Wal *pWal, int iApp){ - /* The other wal file (wal file !iApp) can be overwritten if there - ** are no readers reading from it - no "full" or "partial" locks. - ** Technically speaking it is not possible for any reader to hold - ** a "part" lock, as this would have prevented the file from being - ** checkpointed. But checking anyway doesn't hurt. The following - ** is equivalent to: - ** - ** if( iApp==0 ) eLock = WAL_LOCK_PART1_FULL2; - ** if( iApp==1 ) eLock = WAL_LOCK_PART1; - */ - int eLock = 1 + (iApp==0); - - assert( WAL_LOCK_PART1==1 ); - assert( WAL_LOCK_PART1_FULL2==2 ); - assert( WAL_LOCK_PART2_FULL1==3 ); - assert( WAL_LOCK_PART2==4 ); - - assert( iApp!=0 || eLock==WAL_LOCK_PART1_FULL2 ); - assert( iApp!=1 || eLock==WAL_LOCK_PART1 ); - - return walLockExclusive(pWal, WAL_READ_LOCK(eLock), 3); -} -static void wal2RestartFinished(Wal *pWal, int iApp){ - walUnlockExclusive(pWal, WAL_READ_LOCK(1 + (iApp==0)), 3); -} - -/* -** This function is used in wal2 mode. -** -** This function is called when a checkpointer wishes to checkpoint wal -** file iCkpt. It takes the required lock and, if successful, returns -** SQLITE_OK. Otherwise, an SQLite error code (e.g. SQLITE_BUSY). If this -** function returns SQLITE_OK, it is the responsibility of the caller -** to invoke wal2CheckpointFinished() to release the lock. -*/ -static int wal2CheckpointOk(Wal *pWal, int iCkpt){ - int eLock = 1 + (iCkpt*2); - - assert( WAL_LOCK_PART1==1 ); - assert( WAL_LOCK_PART1_FULL2==2 ); - assert( WAL_LOCK_PART2_FULL1==3 ); - assert( WAL_LOCK_PART2==4 ); - - assert( iCkpt!=0 || eLock==WAL_LOCK_PART1 ); - assert( iCkpt!=1 || eLock==WAL_LOCK_PART2_FULL1 ); - - return walLockExclusive(pWal, WAL_READ_LOCK(eLock), 2); -} -static void wal2CheckpointFinished(Wal *pWal, int iCkpt){ - walUnlockExclusive(pWal, WAL_READ_LOCK(1 + (iCkpt*2)), 2); -} - /* ** Copy as much content as we can from the WAL back into the database file ** in response to an sqlite3_wal_checkpoint() request or the equivalent. ** ** The amount of information copies from WAL to database might be limited @@ -2630,174 +2048,144 @@ u32 iFrame = 0; /* Wal frame containing data for iDbpage */ u32 mxSafeFrame; /* Max frame that can be backfilled */ u32 mxPage; /* Max database page to write */ int i; /* Loop counter */ volatile WalCkptInfo *pInfo; /* The checkpoint status information */ - int bWal2 = isWalMode2(pWal); /* True for wal2 connections */ - int iCkpt = bWal2 ? !walidxGetFile(&pWal->hdr) : 0; - mxSafeFrame = walidxGetMxFrame(&pWal->hdr, iCkpt); szPage = walPagesize(pWal); testcase( szPage<=32768 ); testcase( szPage>=65536 ); pInfo = walCkptInfo(pWal); - if( (bWal2==1 && pInfo->nBackfill==0 && mxSafeFrame) - || (bWal2==0 && pInfo->nBackfillapWalFd[iCkpt]; - mxPage = pWal->hdr.nPage; - - /* If this is a wal2 system, check for a reader holding a lock - ** preventing this checkpoint operation. If one is found, return - ** early. */ - if( bWal2 ){ - rc = wal2CheckpointOk(pWal, iCkpt); - if( rc!=SQLITE_OK ) return rc; - } + if( pInfo->nBackfillhdr.mxFrame ){ /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); - /* If this is a wal system (not wal2), compute in mxSafeFrame the index - ** of the last frame of the WAL that is safe to write into the database. - ** Frames beyond mxSafeFrame might overwrite database pages that are in - ** use by active readers and thus cannot be backfilled from the WAL. + /* Compute in mxSafeFrame the index of the last frame of the WAL that is + ** safe to write into the database. Frames beyond mxSafeFrame might + ** overwrite database pages that are in use by active readers and thus + ** cannot be backfilled from the WAL. */ - if( bWal2==0 ){ - mxSafeFrame = pWal->hdr.mxFrame; - mxPage = pWal->hdr.nPage; - for(i=1; iaReadMark+i); - if( mxSafeFrame>y ){ - assert( y<=pWal->hdr.mxFrame ); - rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); - if( rc==SQLITE_OK ){ - u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED); - AtomicStore(pInfo->aReadMark+i, iMark); - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - }else if( rc==SQLITE_BUSY ){ - mxSafeFrame = y; - xBusy = 0; - }else{ - goto walcheckpoint_out; - } + mxSafeFrame = pWal->hdr.mxFrame; + mxPage = pWal->hdr.nPage; + for(i=1; iaReadMark+i); + if( mxSafeFrame>y ){ + assert( y<=pWal->hdr.mxFrame ); + rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); + if( rc==SQLITE_OK ){ + u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED); + AtomicStore(pInfo->aReadMark+i, iMark); + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + }else if( rc==SQLITE_BUSY ){ + mxSafeFrame = y; + xBusy = 0; + }else{ + goto walcheckpoint_out; } } } /* Allocate the iterator */ - if( bWal2 || pInfo->nBackfillnBackfill==0 ); - rc = walIteratorInit(pWal, iCkpt, pInfo->nBackfill, &pIter); + if( pInfo->nBackfillnBackfill, &pIter); assert( rc==SQLITE_OK || pIter==0 ); } - if( pIter && (bWal2 - || (rc = walBusyLock(pWal, xBusy, pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK - )){ + if( pIter + && (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK + ){ u32 nBackfill = pInfo->nBackfill; - assert( bWal2==0 || nBackfill==0 ); pInfo->nBackfillAttempted = mxSafeFrame; - /* Sync the wal file being checkpointed to disk */ - rc = sqlite3OsSync(pWalFd, CKPT_SYNC_FLAGS(sync_flags)); + /* Sync the WAL to disk */ + rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags)); /* If the database may grow as a result of this checkpoint, hint - ** about the eventual size of the db file to the VFS layer. */ + ** about the eventual size of the db file to the VFS layer. + */ if( rc==SQLITE_OK ){ i64 nReq = ((i64)mxPage * szPage); i64 nSize; /* Current size of database file */ sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_START, 0); rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); if( rc==SQLITE_OK && nSizehdr.mxFrame + (bWal2?walidxGetMxFrame(&pWal->hdr,1):0); - if( (nSize+65536+mx*szPage)hdr.mxFrame*szPage)pDbFd, SQLITE_FCNTL_SIZE_HINT,&nReq); } } + } /* Iterate through the contents of the WAL, copying data to the db file */ while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ i64 iOffset; - - assert( bWal2==1 || walFramePgno(pWal, iFrame)==iDbpage ); - assert( bWal2==0 || walFramePgno2(pWal, iCkpt, iFrame)==iDbpage ); - + assert( walFramePgno(pWal, iFrame)==iDbpage ); if( AtomicLoad(&db->u1.isInterrupted) ){ rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; break; } if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ - assert( bWal2==0 || iDbpage>mxPage ); continue; } iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; - WALTRACE(("WAL%p: checkpoint frame %d of wal %d to db page %d\n", - pWal, (int)iFrame, iCkpt, (int)iDbpage - )); /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ - rc = sqlite3OsRead(pWalFd, zBuf, szPage, iOffset); + rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); if( rc!=SQLITE_OK ) break; iOffset = (iDbpage-1)*(i64)szPage; testcase( IS_BIG_INT(iOffset) ); rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); if( rc!=SQLITE_OK ) break; } sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_DONE, 0); - /* If work was actually accomplished, truncate the db file, sync the wal - ** file and set WalCkptInfo.nBackfill to indicate so. */ - if( rc==SQLITE_OK && (bWal2 || mxSafeFrame==walIndexHdr(pWal)->mxFrame) ){ - if( !bWal2 ){ + /* If work was actually accomplished... */ + if( rc==SQLITE_OK ){ + if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ i64 szDb = pWal->hdr.nPage*(i64)szPage; testcase( IS_BIG_INT(szDb) ); rc = sqlite3OsTruncate(pWal->pDbFd, szDb); + if( rc==SQLITE_OK ){ + rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags)); + } } if( rc==SQLITE_OK ){ - rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags)); + AtomicStore(&pInfo->nBackfill, mxSafeFrame); } } - if( rc==SQLITE_OK ){ - AtomicStore(&pInfo->nBackfill, (bWal2 ? 1 : mxSafeFrame)); - } /* Release the reader lock held while backfilling */ - if( bWal2==0 ){ - walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); - } + walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); } if( rc==SQLITE_BUSY ){ /* Reset the return code so as not to report a checkpoint failure ** just because there are active readers. */ rc = SQLITE_OK; } - if( bWal2 ) wal2CheckpointFinished(pWal, iCkpt); } /* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the ** entire wal file has been copied into the database file, then block ** until all readers have finished using the wal file. This ensures that ** the next process to write to the database restarts the wal file. */ - if( bWal2==0 && rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ + if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ assert( pWal->writeLock ); if( pInfo->nBackfillhdr.mxFrame ){ rc = SQLITE_BUSY; }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){ u32 salt1; - sqlite3FastRandomness(&pWal->sPrng, 4, &salt1); + sqlite3_randomness(4, &salt1); assert( pInfo->nBackfill==pWal->hdr.mxFrame ); rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1); if( rc==SQLITE_OK ){ if( eMode==SQLITE_CHECKPOINT_TRUNCATE ){ /* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as @@ -2812,11 +2200,11 @@ ** as it would leave the system in a state where the contents of ** the wal-index header do not match the contents of the ** file-system. To avoid this, update the wal-index header to ** indicate that the log file contains zero valid frames. */ walRestartHdr(pWal, salt1); - rc = sqlite3OsTruncate(pWal->apWalFd[0], 0); + rc = sqlite3OsTruncate(pWal->pWalFd, 0); } walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); } } } @@ -2829,22 +2217,20 @@ /* ** If the WAL file is currently larger than nMax bytes in size, truncate ** it to exactly nMax bytes. If an error occurs while doing so, ignore it. */ static void walLimitSize(Wal *pWal, i64 nMax){ - if( isWalMode2(pWal)==0 ){ - i64 sz; - int rx; - sqlite3BeginBenignMalloc(); - rx = sqlite3OsFileSize(pWal->apWalFd[0], &sz); - if( rx==SQLITE_OK && (sz > nMax ) ){ - rx = sqlite3OsTruncate(pWal->apWalFd[0], nMax); - } - sqlite3EndBenignMalloc(); - if( rx ){ - sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName); - } + i64 sz; + int rx; + sqlite3BeginBenignMalloc(); + rx = sqlite3OsFileSize(pWal->pWalFd, &sz); + if( rx==SQLITE_OK && (sz > nMax ) ){ + rx = sqlite3OsTruncate(pWal->pWalFd, nMax); + } + sqlite3EndBenignMalloc(); + if( rx ){ + sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName); } } /* ** Close a connection to a log file. @@ -2857,11 +2243,10 @@ u8 *zBuf /* Buffer of at least nBuf bytes */ ){ int rc = SQLITE_OK; if( pWal ){ int isDelete = 0; /* True to unlink wal and wal-index files */ - pWal->bClosing = 1; /* If an EXCLUSIVE lock can be obtained on the database file (using the ** ordinary, rollback-mode locking methods, this guarantees that the ** connection associated with this log file is the only connection to ** the database. In this case checkpoint the database and unlink both @@ -2870,106 +2255,52 @@ ** The EXCLUSIVE lock is not released before returning. */ if( zBuf!=0 && SQLITE_OK==(rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE)) ){ - int i; if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; } - for(i=0; rc==SQLITE_OK && i<2; i++){ - rc = sqlite3WalCheckpoint(pWal, db, - SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 - ); - if( rc==SQLITE_OK ){ - int bPersist = -1; - sqlite3OsFileControlHint( - pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist - ); - if( bPersist!=1 ){ - /* Try to delete the WAL file if the checkpoint completed and - ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal - ** mode (!bPersist) */ - isDelete = 1; - }else if( pWal->mxWalSize>=0 ){ - /* Try to truncate the WAL file to zero bytes if the checkpoint - ** completed and fsynced (rc==SQLITE_OK) and we are in persistent - ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a - ** non-negative value (pWal->mxWalSize>=0). Note that we truncate - ** to zero bytes as truncating to the journal_size_limit might - ** leave a corrupt WAL file on disk. */ - walLimitSize(pWal, 0); - } - } - - if( isWalMode2(pWal)==0 ) break; - - walCkptInfo(pWal)->nBackfill = 0; - walidxSetFile(&pWal->hdr, !walidxGetFile(&pWal->hdr)); - pWal->writeLock = 1; - walIndexWriteHdr(pWal); - pWal->writeLock = 0; + rc = sqlite3WalCheckpoint(pWal, db, + SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 + ); + if( rc==SQLITE_OK ){ + int bPersist = -1; + sqlite3OsFileControlHint( + pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist + ); + if( bPersist!=1 ){ + /* Try to delete the WAL file if the checkpoint completed and + ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal + ** mode (!bPersist) */ + isDelete = 1; + }else if( pWal->mxWalSize>=0 ){ + /* Try to truncate the WAL file to zero bytes if the checkpoint + ** completed and fsynced (rc==SQLITE_OK) and we are in persistent + ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a + ** non-negative value (pWal->mxWalSize>=0). Note that we truncate + ** to zero bytes as truncating to the journal_size_limit might + ** leave a corrupt WAL file on disk. */ + walLimitSize(pWal, 0); + } } } walIndexClose(pWal, isDelete); + sqlite3OsClose(pWal->pWalFd); if( isDelete ){ sqlite3BeginBenignMalloc(); sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0); - sqlite3OsDelete(pWal->pVfs, pWal->zWalName2, 0); sqlite3EndBenignMalloc(); } WALTRACE(("WAL%p: closed\n", pWal)); sqlite3_free((void *)pWal->apWiData); sqlite3_free(pWal); } return rc; } -/* -** Try to copy the wal-index header from shared-memory into (*pHdr). Return -** zero if successful or non-zero otherwise. If the header is corrupted -** (either because the two copies are inconsistent or because the checksum -** values are incorrect), the read fails and non-zero is returned. -*/ -static int walIndexLoadHdr(Wal *pWal, WalIndexHdr *pHdr){ - u32 aCksum[2]; /* Checksum on the header content */ - WalIndexHdr h2; /* Second copy of the header content */ - WalIndexHdr volatile *aHdr; /* Header in shared memory */ - - /* The first page of the wal-index must be mapped at this point. */ - assert( pWal->nWiData>0 && pWal->apWiData[0] ); - - /* Read the header. This might happen concurrently with a write to the - ** same area of shared memory on a different CPU in a SMP, - ** meaning it is possible that an inconsistent snapshot is read - ** from the file. If this happens, return non-zero. - ** - ** There are two copies of the header at the beginning of the wal-index. - ** When reading, read [0] first then [1]. Writes are in the reverse order. - ** Memory barriers are used to prevent the compiler or the hardware from - ** reordering the reads and writes. - */ - aHdr = walIndexHdr(pWal); - memcpy(pHdr, (void *)&aHdr[0], sizeof(h2)); - walShmBarrier(pWal); - memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); - - if( memcmp(&h2, pHdr, sizeof(h2))!=0 ){ - return 1; /* Dirty read */ - } - if( h2.isInit==0 ){ - return 1; /* Malformed header - probably all zeros */ - } - walChecksumBytes(1, (u8*)&h2, sizeof(h2)-sizeof(h2.aCksum), 0, aCksum); - if( aCksum[0]!=h2.aCksum[0] || aCksum[1]!=h2.aCksum[1] ){ - return 1; /* Checksum does not match */ - } - - return 0; -} - /* ** Try to read the wal-index header. Return 0 on success and 1 if ** there is a problem. ** ** The wal-index is in shared memory. Another thread or process might @@ -2984,14 +2315,47 @@ ** ** If the checksum cannot be verified return non-zero. If the header ** is read successfully and the checksum verified, return zero. */ static SQLITE_NO_TSAN int walIndexTryHdr(Wal *pWal, int *pChanged){ - WalIndexHdr h1; /* Copy of the header content */ + u32 aCksum[2]; /* Checksum on the header content */ + WalIndexHdr h1, h2; /* Two copies of the header content */ + WalIndexHdr volatile *aHdr; /* Header in shared memory */ - if( walIndexLoadHdr(pWal, &h1) ){ - return 1; + /* The first page of the wal-index must be mapped at this point. */ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + + /* Read the header. This might happen concurrently with a write to the + ** same area of shared memory on a different CPU in a SMP, + ** meaning it is possible that an inconsistent snapshot is read + ** from the file. If this happens, return non-zero. + ** + ** tag-20200519-1: + ** There are two copies of the header at the beginning of the wal-index. + ** When reading, read [0] first then [1]. Writes are in the reverse order. + ** Memory barriers are used to prevent the compiler or the hardware from + ** reordering the reads and writes. TSAN and similar tools can sometimes + ** give false-positive warnings about these accesses because the tools do not + ** account for the double-read and the memory barrier. The use of mutexes + ** here would be problematic as the memory being accessed is potentially + ** shared among multiple processes and not all mutex implementions work + ** reliably in that environment. + */ + aHdr = walIndexHdr(pWal); + memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); /* Possible TSAN false-positive */ + walShmBarrier(pWal); + memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); + + if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ + return 1; /* Dirty read */ + } + if( h1.isInit==0 ){ + return 1; /* Malformed header - probably all zeros */ + } + walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum); + if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){ + return 1; /* Checksum does not match */ } if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){ *pChanged = 1; memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr)); @@ -3098,13 +2462,11 @@ /* If the header is read successfully, check the version number to make ** sure the wal-index was not constructed with some future format that ** this version of SQLite cannot understand. */ - if( badHdr==0 - && pWal->hdr.iVersion!=WAL_VERSION1 && pWal->hdr.iVersion!=WAL_VERSION2 - ){ + if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){ rc = SQLITE_CANTOPEN_BKPT; } if( pWal->bShmUnreliable ){ if( rc!=SQLITE_OK ){ walIndexClose(pWal, 0); @@ -3192,11 +2554,11 @@ ** returned any SQLITE_READONLY value, it must return only SQLITE_READONLY ** or SQLITE_READONLY_CANTINIT or some error for all subsequent invocations, ** even if some external agent does a "chmod" to make the shared-memory ** writable by us, until sqlite3OsShmUnmap() has been called. ** This is a requirement on the VFS implementation. - */ + */ rc = sqlite3OsShmMap(pWal->pDbFd, 0, WALINDEX_PGSZ, 0, &pDummy); assert( rc!=SQLITE_OK ); /* SQLITE_OK not possible for read-only connection */ if( rc!=SQLITE_READONLY_CANTINIT ){ rc = (rc==SQLITE_READONLY ? WAL_RETRY : rc); goto begin_unreliable_shm_out; @@ -3209,11 +2571,11 @@ memcpy(&pWal->hdr, (void*)walIndexHdr(pWal), sizeof(WalIndexHdr)); /* Make sure some writer hasn't come in and changed the WAL file out ** from under us, then disconnected, while we were not looking. */ - rc = sqlite3OsFileSize(pWal->apWalFd[0], &szWal); + rc = sqlite3OsFileSize(pWal->pWalFd, &szWal); if( rc!=SQLITE_OK ){ goto begin_unreliable_shm_out; } if( szWalhdr.mxFrame==0 ? SQLITE_OK : WAL_RETRY); goto begin_unreliable_shm_out; } /* Check the salt keys at the start of the wal file still match. */ - rc = sqlite3OsRead(pWal->apWalFd[0], aBuf, WAL_HDRSIZE, 0); + rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); if( rc!=SQLITE_OK ){ goto begin_unreliable_shm_out; } if( memcmp(&pWal->hdr.aSalt, &aBuf[16], 8) ){ /* Some writer has wrapped the WAL file while we were not looking. @@ -3263,11 +2625,11 @@ ){ u32 pgno; /* Database page number for frame */ u32 nTruncate; /* dbsize field from frame header */ /* Read and decode the next log frame. */ - rc = sqlite3OsRead(pWal->apWalFd[0], aFrame, szFrame, iOffset); + rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); if( rc!=SQLITE_OK ) break; if( !walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame) ) break; /* If nTruncate is non-zero, then a complete transaction has been ** appended to this wal file. Set rc to WAL_RETRY and break out of @@ -3345,13 +2707,17 @@ ** so it takes care to hold an exclusive lock on the corresponding ** WAL_READ_LOCK() while changing values. */ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ + u32 mxReadMark; /* Largest aReadMark[] value */ + int mxI; /* Index of largest aReadMark[] value */ + int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ + u32 mxFrame; /* Wal frame to lock to */ - assert( pWal->readLock==WAL_LOCK_NONE ); /* Not currently locked */ + assert( pWal->readLock<0 ); /* Not currently locked */ /* useWal may only be set for read/write connections */ assert( (pWal->readOnly & WAL_SHM_RDONLY)==0 || useWal==0 ); /* Take steps to avoid spinning forever if there is a protocol error. @@ -3420,168 +2786,135 @@ } assert( pWal->nWiData>0 ); assert( pWal->apWiData[0]!=0 ); pInfo = walCkptInfo(pWal); - if( isWalMode2(pWal) ){ - /* This connection needs a "part" lock on the current wal file and, - ** unless pInfo->nBackfill is set to indicate that it has already been - ** checkpointed, a "full" lock on the other wal file. */ - int iWal = walidxGetFile(&pWal->hdr); - int nBackfill = pInfo->nBackfill || walidxGetMxFrame(&pWal->hdr, !iWal)==0; - int eLock = 1 + (iWal*2) + (nBackfill==iWal); - - assert( nBackfill==0 || nBackfill==1 ); - assert( iWal==0 || iWal==1 ); - assert( iWal!=0 || nBackfill!=1 || eLock==WAL_LOCK_PART1 ); - assert( iWal!=0 || nBackfill!=0 || eLock==WAL_LOCK_PART1_FULL2 ); - assert( iWal!=1 || nBackfill!=1 || eLock==WAL_LOCK_PART2 ); - assert( iWal!=1 || nBackfill!=0 || eLock==WAL_LOCK_PART2_FULL1 ); - - rc = walLockShared(pWal, WAL_READ_LOCK(eLock)); - if( rc!=SQLITE_OK ){ - return (rc==SQLITE_BUSY ? WAL_RETRY : rc); - } - walShmBarrier(pWal); - if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ - walUnlockShared(pWal, WAL_READ_LOCK(eLock)); - return WAL_RETRY; - }else{ - pWal->readLock = eLock; - } - assert( pWal->minFrame==0 && walFramePage(pWal->minFrame)==0 ); - }else{ - u32 mxReadMark; /* Largest aReadMark[] value */ - int mxI; /* Index of largest aReadMark[] value */ - int i; /* Loop counter */ - u32 mxFrame; /* Wal frame to lock to */ - if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame - #ifdef SQLITE_ENABLE_SNAPSHOT - && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0) - #endif - ){ - /* The WAL has been completely backfilled (or it is empty). - ** and can be safely ignored. - */ - rc = walLockShared(pWal, WAL_READ_LOCK(0)); - walShmBarrier(pWal); - if( rc==SQLITE_OK ){ - if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr,sizeof(WalIndexHdr)) ){ - /* It is not safe to allow the reader to continue here if frames - ** may have been appended to the log before READ_LOCK(0) was obtained. - ** When holding READ_LOCK(0), the reader ignores the entire log file, - ** which implies that the database file contains a trustworthy - ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from - ** happening, this is usually correct. - ** - ** However, if frames have been appended to the log (or if the log - ** is wrapped and written for that matter) before the READ_LOCK(0) - ** is obtained, that is not necessarily true. A checkpointer may - ** have started to backfill the appended frames but crashed before - ** it finished. Leaving a corrupt image in the database file. - */ - walUnlockShared(pWal, WAL_READ_LOCK(0)); - return WAL_RETRY; - } - pWal->readLock = 0; - return SQLITE_OK; - }else if( rc!=SQLITE_BUSY ){ - return rc; - } - } - - /* If we get this far, it means that the reader will want to use - ** the WAL to get at content from recent commits. The job now is - ** to select one of the aReadMark[] entries that is closest to - ** but not exceeding pWal->hdr.mxFrame and lock that entry. - */ - mxReadMark = 0; - mxI = 0; - mxFrame = pWal->hdr.mxFrame; - #ifdef SQLITE_ENABLE_SNAPSHOT - if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; - } - #endif - for(i=1; iaReadMark+i); - if( mxReadMark<=thisMark && thisMark<=mxFrame ){ - assert( thisMark!=READMARK_NOT_USED ); - mxReadMark = thisMark; - mxI = i; - } - } - if( (pWal->readOnly & WAL_SHM_RDONLY)==0 - && (mxReadMarkaReadMark+i,mxFrame); - mxReadMark = mxFrame; - mxI = i; - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - break; - }else if( rc!=SQLITE_BUSY ){ - return rc; - } - } - } - if( mxI==0 ){ - assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); - return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; - } - - rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); - if( rc ){ - return rc==SQLITE_BUSY ? WAL_RETRY : rc; - } - /* Now that the read-lock has been obtained, check that neither the - ** value in the aReadMark[] array or the contents of the wal-index - ** header have changed. - ** - ** It is necessary to check that the wal-index header did not change - ** between the time it was read and when the shared-lock was obtained - ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility - ** that the log file may have been wrapped by a writer, or that frames - ** that occur later in the log than pWal->hdr.mxFrame may have been - ** copied into the database by a checkpointer. If either of these things - ** happened, then reading the database with the current value of - ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry - ** instead. - ** - ** Before checking that the live wal-index header has not changed - ** since it was read, set Wal.minFrame to the first frame in the wal - ** file that has not yet been checkpointed. This client will not need - ** to read any frames earlier than minFrame from the wal file - they - ** can be safely read directly from the database file. - ** - ** Because a ShmBarrier() call is made between taking the copy of - ** nBackfill and checking that the wal-header in shared-memory still - ** matches the one cached in pWal->hdr, it is guaranteed that the - ** checkpointer that set nBackfill was not working with a wal-index - ** header newer than that cached in pWal->hdr. If it were, that could - ** cause a problem. The checkpointer could omit to checkpoint - ** a version of page X that lies before pWal->minFrame (call that version - ** A) on the basis that there is a newer version (version B) of the same - ** page later in the wal file. But if version B happens to like past - ** frame pWal->hdr.mxFrame - then the client would incorrectly assume - ** that it can read version A from the database file. However, since - ** we can guarantee that the checkpointer that set nBackfill could not - ** see any pages past pWal->hdr.mxFrame, this problem does not come up. - */ - pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; - walShmBarrier(pWal); - if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark - || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) - ){ - walUnlockShared(pWal, WAL_READ_LOCK(mxI)); - return WAL_RETRY; - }else{ - assert( mxReadMark<=pWal->hdr.mxFrame ); - pWal->readLock = (i16)mxI; - } + if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame +#ifdef SQLITE_ENABLE_SNAPSHOT + && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0) +#endif + ){ + /* The WAL has been completely backfilled (or it is empty). + ** and can be safely ignored. + */ + rc = walLockShared(pWal, WAL_READ_LOCK(0)); + walShmBarrier(pWal); + if( rc==SQLITE_OK ){ + if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ + /* It is not safe to allow the reader to continue here if frames + ** may have been appended to the log before READ_LOCK(0) was obtained. + ** When holding READ_LOCK(0), the reader ignores the entire log file, + ** which implies that the database file contains a trustworthy + ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from + ** happening, this is usually correct. + ** + ** However, if frames have been appended to the log (or if the log + ** is wrapped and written for that matter) before the READ_LOCK(0) + ** is obtained, that is not necessarily true. A checkpointer may + ** have started to backfill the appended frames but crashed before + ** it finished. Leaving a corrupt image in the database file. + */ + walUnlockShared(pWal, WAL_READ_LOCK(0)); + return WAL_RETRY; + } + pWal->readLock = 0; + return SQLITE_OK; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + + /* If we get this far, it means that the reader will want to use + ** the WAL to get at content from recent commits. The job now is + ** to select one of the aReadMark[] entries that is closest to + ** but not exceeding pWal->hdr.mxFrame and lock that entry. + */ + mxReadMark = 0; + mxI = 0; + mxFrame = pWal->hdr.mxFrame; +#ifdef SQLITE_ENABLE_SNAPSHOT + if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; + } +#endif + for(i=1; iaReadMark+i); + if( mxReadMark<=thisMark && thisMark<=mxFrame ){ + assert( thisMark!=READMARK_NOT_USED ); + mxReadMark = thisMark; + mxI = i; + } + } + if( (pWal->readOnly & WAL_SHM_RDONLY)==0 + && (mxReadMarkaReadMark+i,mxFrame); + mxReadMark = mxFrame; + mxI = i; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + break; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + } + if( mxI==0 ){ + assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); + return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; + } + + rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + if( rc ){ + return rc==SQLITE_BUSY ? WAL_RETRY : rc; + } + /* Now that the read-lock has been obtained, check that neither the + ** value in the aReadMark[] array or the contents of the wal-index + ** header have changed. + ** + ** It is necessary to check that the wal-index header did not change + ** between the time it was read and when the shared-lock was obtained + ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility + ** that the log file may have been wrapped by a writer, or that frames + ** that occur later in the log than pWal->hdr.mxFrame may have been + ** copied into the database by a checkpointer. If either of these things + ** happened, then reading the database with the current value of + ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry + ** instead. + ** + ** Before checking that the live wal-index header has not changed + ** since it was read, set Wal.minFrame to the first frame in the wal + ** file that has not yet been checkpointed. This client will not need + ** to read any frames earlier than minFrame from the wal file - they + ** can be safely read directly from the database file. + ** + ** Because a ShmBarrier() call is made between taking the copy of + ** nBackfill and checking that the wal-header in shared-memory still + ** matches the one cached in pWal->hdr, it is guaranteed that the + ** checkpointer that set nBackfill was not working with a wal-index + ** header newer than that cached in pWal->hdr. If it were, that could + ** cause a problem. The checkpointer could omit to checkpoint + ** a version of page X that lies before pWal->minFrame (call that version + ** A) on the basis that there is a newer version (version B) of the same + ** page later in the wal file. But if version B happens to like past + ** frame pWal->hdr.mxFrame - then the client would incorrectly assume + ** that it can read version A from the database file. However, since + ** we can guarantee that the checkpointer that set nBackfill could not + ** see any pages past pWal->hdr.mxFrame, this problem does not come up. + */ + pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; + walShmBarrier(pWal); + if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark + || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) + ){ + walUnlockShared(pWal, WAL_READ_LOCK(mxI)); + return WAL_RETRY; + }else{ + assert( mxReadMark<=pWal->hdr.mxFrame ); + pWal->readLock = (i16)mxI; } return rc; } #ifdef SQLITE_ENABLE_SNAPSHOT @@ -3605,13 +2938,10 @@ ** decreased at all. */ int sqlite3WalSnapshotRecover(Wal *pWal){ int rc; - /* Snapshots may not be used with wal2 mode databases. */ - if( isWalMode2(pWal) ) return SQLITE_ERROR; - assert( pWal->readLock>=0 ); rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); if( rc==SQLITE_OK ){ volatile WalCkptInfo *pInfo = walCkptInfo(pWal); int szPage = (int)pWal->szPage; @@ -3637,11 +2967,11 @@ pgno = sLoc.aPgno[i-sLoc.iZero-1]; iDbOff = (i64)(pgno-1) * szPage; if( iDbOff+szPage<=szDb ){ iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; - rc = sqlite3OsRead(pWal->apWalFd[0], pBuf1, szPage, iWalOff); + rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); if( rc==SQLITE_OK ){ rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); } @@ -3688,11 +3018,10 @@ assert( pWal->ckptLock==0 ); #ifdef SQLITE_ENABLE_SNAPSHOT if( pSnapshot ){ - if( isWalMode2(pWal) ) return SQLITE_ERROR; if( memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){ bChanged = 1; } /* It is possible that there is a checkpointer thread running @@ -3720,23 +3049,10 @@ testcase( (rc&0xff)==SQLITE_BUSY ); testcase( (rc&0xff)==SQLITE_IOERR ); testcase( rc==SQLITE_PROTOCOL ); testcase( rc==SQLITE_OK ); - if( pWal->aSchemaVersion ){ - pWal->aSchemaVersion[SCHEMA_VERSION_AFTERWALTBR] = sqlite3STimeNow(); - } - - if( rc==SQLITE_OK && pWal->hdr.iVersion==WAL_VERSION2 ){ - rc = walOpenWal2(pWal); - } - - if( pWal->aSchemaVersion ){ - pWal->aSchemaVersion[SCHEMA_VERSION_AFTEROPENWAL2] = sqlite3STimeNow(); - } - - pWal->nPriorFrame = pWal->hdr.mxFrame; #ifdef SQLITE_ENABLE_SNAPSHOT if( rc==SQLITE_OK ){ if( pSnapshot && memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){ /* At this point the client has a lock on an aReadMark[] slot holding ** a value equal to or smaller than pSnapshot->mxFrame, but pWal->hdr @@ -3800,98 +3116,14 @@ ** Finish with a read transaction. All this does is release the ** read-lock. */ void sqlite3WalEndReadTransaction(Wal *pWal){ sqlite3WalEndWriteTransaction(pWal); - if( pWal->readLock!=WAL_LOCK_NONE ){ + if( pWal->readLock>=0 ){ walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - pWal->readLock = WAL_LOCK_NONE; - } -} - -/* Search hash table iHash for an entry matching page number -** pgno. Each call to this function searches a single hash table -** (each hash table indexes up to HASHTABLE_NPAGE frames). -** -** This code might run concurrently to the code in walIndexAppend() -** that adds entries to the wal-index (and possibly to this hash -** table). This means the value just read from the hash -** slot (aHash[iKey]) may have been added before or after the -** current read transaction was opened. Values added after the -** read transaction was opened may have been written incorrectly - -** i.e. these slots may contain garbage data. However, we assume -** that any slots written before the current read transaction was -** opened remain unmodified. -** -** For the reasons above, the if(...) condition featured in the inner -** loop of the following block is more stringent that would be required -** if we had exclusive access to the hash-table: -** -** (aPgno[iFrame]==pgno): -** This condition filters out normal hash-table collisions. -** -** (iFrame<=iLast): -** This condition filters out entries that were added to the hash -** table after the current read-transaction had started. -*/ -static int walSearchHash( - Wal *pWal, - u32 iLast, - int iHash, - Pgno pgno, - u32 *piRead -){ - WalHashLoc sLoc; /* Hash table location */ - int iKey; /* Hash slot index */ - int nCollide; /* Number of hash collisions remaining */ - int rc; /* Error code */ - - rc = walHashGet(pWal, iHash, &sLoc); - if( rc!=SQLITE_OK ){ - return rc; - } - nCollide = HASHTABLE_NSLOT; - for(iKey=walHash(pgno); sLoc.aHash[iKey]; iKey=walNextHash(iKey)){ - u32 iFrame = sLoc.aHash[iKey] + sLoc.iZero; - if( iFrame<=iLast - && iFrame>=pWal->minFrame - && sLoc.aPgno[sLoc.aHash[iKey]-1]==pgno - ){ - assert( iFrame>*piRead || CORRUPT_DB ); - *piRead = iFrame; - } - if( (nCollide--)==0 ){ - return SQLITE_CORRUPT_BKPT; - } - } - - return SQLITE_OK; -} - -static int walSearchWal( - Wal *pWal, - int iWal, - Pgno pgno, - u32 *piRead -){ - int rc = SQLITE_OK; - int bWal2 = isWalMode2(pWal); - u32 iLast = walidxGetMxFrame(&pWal->hdr, iWal); - if( iLast ){ - int iHash; - int iMinHash = walFramePage(pWal->minFrame); - u32 iExternal = bWal2 ? walExternalEncode(iWal, iLast) : iLast; - assert( bWal2==0 || pWal->minFrame==0 ); - for(iHash=walFramePage(iExternal); - iHash>=iMinHash && *piRead==0; - iHash-=(1+bWal2) - ){ - rc = walSearchHash(pWal, iExternal, iHash, pgno, piRead); - if( rc!=SQLITE_OK ) break; - } - } - return rc; + pWal->readLock = -1; + } } /* ** Search the wal file for page pgno. If found, set *piRead to the frame that ** contains the page. Otherwise, if pgno is not in the wal file, set *piRead @@ -3903,80 +3135,87 @@ int sqlite3WalFindFrame( Wal *pWal, /* WAL handle */ Pgno pgno, /* Database page number to read data for */ u32 *piRead /* OUT: Frame number (or zero) */ ){ - int bWal2 = isWalMode2(pWal); - int iApp = walidxGetFile(&pWal->hdr); - int rc = SQLITE_OK; u32 iRead = 0; /* If !=0, WAL frame to return data from */ - - /* This routine is only be called from within a read transaction. Or, - ** sometimes, as part of a rollback that occurs after an error reaquiring - ** a read-lock in walRestartLog(). */ - assert( pWal->readLock!=WAL_LOCK_NONE || pWal->writeLock ); - - /* If this is a regular wal system, then iApp must be set to 0 (there is - ** only one wal file, after all). Or, if this is a wal2 system and the - ** write-lock is not held, the client must have a partial-wal lock on wal - ** file iApp. This is not always true if the write-lock is held and this - ** function is being called after WalLockForCommit() as part of committing - ** a CONCURRENT transaction. */ -#ifdef SQLITE_DEBUG - if( bWal2 ){ - if( pWal->writeLock==0 ){ - int l = pWal->readLock; - assert( iApp==1 || l==WAL_LOCK_PART1 || l==WAL_LOCK_PART1_FULL2 ); - assert( iApp==0 || l==WAL_LOCK_PART2 || l==WAL_LOCK_PART2_FULL1 ); - } - }else{ - assert( iApp==0 ); - } -#endif - - /* Return early if read-lock 0 is held. */ - if( (pWal->readLock==0 && pWal->bShmUnreliable==0) ){ - assert( !bWal2 ); + u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */ + int iHash; /* Used to loop through N hash tables */ + int iMinHash; + + /* This routine is only be called from within a read transaction. */ + assert( pWal->readLock>=0 || pWal->lockError ); + + /* If the "last page" field of the wal-index header snapshot is 0, then + ** no data will be read from the wal under any circumstances. Return early + ** in this case as an optimization. Likewise, if pWal->readLock==0, + ** then the WAL is ignored by the reader so return early, as if the + ** WAL were empty. + */ + if( iLast==0 || (pWal->readLock==0 && pWal->bShmUnreliable==0) ){ *piRead = 0; return SQLITE_OK; } - /* Search the wal file that the client holds a partial lock on first. */ - rc = walSearchWal(pWal, iApp, pgno, &iRead); - - /* If the requested page was not found, no error has occured, and - ** the client holds a full-wal lock on the other wal file, search it - ** too. */ - if( rc==SQLITE_OK && bWal2 && iRead==0 && ( - pWal->readLock==WAL_LOCK_PART1_FULL2 - || pWal->readLock==WAL_LOCK_PART2_FULL1 -#ifndef SQLITE_OMIT_CONCURRENT - || (pWal->readLock==WAL_LOCK_PART1 && iApp==1) - || (pWal->readLock==WAL_LOCK_PART2 && iApp==0) -#endif - )){ - rc = walSearchWal(pWal, !iApp, pgno, &iRead); - } - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) - if( iRead ){ - u32 iFrame; - int iWal = walExternalDecode(iRead, &iFrame); - WALTRACE(("WAL%p: page %d @ frame %d wal %d\n",pWal,(int)pgno,iFrame,iWal)); - }else{ - WALTRACE(("WAL%p: page %d not found\n", pWal, (int)pgno)); - } -#endif + /* Search the hash table or tables for an entry matching page number + ** pgno. Each iteration of the following for() loop searches one + ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames). + ** + ** This code might run concurrently to the code in walIndexAppend() + ** that adds entries to the wal-index (and possibly to this hash + ** table). This means the value just read from the hash + ** slot (aHash[iKey]) may have been added before or after the + ** current read transaction was opened. Values added after the + ** read transaction was opened may have been written incorrectly - + ** i.e. these slots may contain garbage data. However, we assume + ** that any slots written before the current read transaction was + ** opened remain unmodified. + ** + ** For the reasons above, the if(...) condition featured in the inner + ** loop of the following block is more stringent that would be required + ** if we had exclusive access to the hash-table: + ** + ** (aPgno[iFrame]==pgno): + ** This condition filters out normal hash-table collisions. + ** + ** (iFrame<=iLast): + ** This condition filters out entries that were added to the hash + ** table after the current read-transaction had started. + */ + iMinHash = walFramePage(pWal->minFrame); + for(iHash=walFramePage(iLast); iHash>=iMinHash; iHash--){ + WalHashLoc sLoc; /* Hash table location */ + int iKey; /* Hash slot index */ + int nCollide; /* Number of hash collisions remaining */ + int rc; /* Error code */ + u32 iH; + + rc = walHashGet(pWal, iHash, &sLoc); + if( rc!=SQLITE_OK ){ + return rc; + } + nCollide = HASHTABLE_NSLOT; + iKey = walHash(pgno); + while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){ + u32 iFrame = iH + sLoc.iZero; + if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){ + assert( iFrame>iRead || CORRUPT_DB ); + iRead = iFrame; + } + if( (nCollide--)==0 ){ + return SQLITE_CORRUPT_BKPT; + } + iKey = walNextHash(iKey); + } + if( iRead ) break; + } #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT /* If expensive assert() statements are available, do a linear search ** of the wal-index file content. Make sure the results agree with the - ** result obtained using the hash indexes above. - ** - ** TODO: This is broken for wal2. - */ - if( rc==SQLITE_OK ){ + ** result obtained using the hash indexes above. */ + { u32 iRead2 = 0; u32 iTest; assert( pWal->bShmUnreliable || pWal->minFrame>0 ); for(iTest=iLast; iTest>=pWal->minFrame && iTest>0; iTest--){ if( walFramePgno(pWal, iTest)==pgno ){ @@ -3997,78 +3236,35 @@ ** (which is nOut bytes in size). Return SQLITE_OK if successful, or an ** error code otherwise. */ int sqlite3WalReadFrame( Wal *pWal, /* WAL handle */ - u32 iExternal, /* Frame to read */ + u32 iRead, /* Frame to read */ int nOut, /* Size of buffer pOut in bytes */ u8 *pOut /* Buffer to write page data to */ ){ int sz; - int iWal = 0; - u32 iRead; i64 iOffset; - - /* Figure out the page size */ sz = pWal->hdr.szPage; sz = (sz&0xfe00) + ((sz&0x0001)<<16); testcase( sz<=32768 ); testcase( sz>=65536 ); - - if( isWalMode2(pWal) ){ - /* Figure out which of the two wal files, and the frame within, that - ** iExternal refers to. */ - iWal = walExternalDecode(iExternal, &iRead); - }else{ - iRead = iExternal; - } - - WALTRACE(("WAL%p: reading frame %d wal %d\n", pWal, iRead, iWal)); iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE; /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ - return sqlite3OsRead(pWal->apWalFd[iWal], pOut, (nOut>sz?sz:nOut), iOffset); + return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset); } /* ** Return the size of the database in pages (or zero, if unknown). */ Pgno sqlite3WalDbsize(Wal *pWal){ - if( pWal && ALWAYS(pWal->readLock!=WAL_LOCK_NONE) ){ + if( pWal && ALWAYS(pWal->readLock>=0) ){ return pWal->hdr.nPage; } return 0; } -/* -** Take the WRITER lock on the WAL file. Return SQLITE_OK if successful, -** or an SQLite error code otherwise. This routine does not invoke any -** busy-handler callbacks, that is done at a higher level. -*/ -static int walWriteLock(Wal *pWal){ - int rc; - - /* Cannot start a write transaction without first holding a read lock */ - assert( pWal->readLock>=0 ); - assert( pWal->writeLock==0 ); - assert( pWal->iReCksum==0 ); - - /* If this is a read-only connection, obtaining a write-lock is not - ** possible. In this case return SQLITE_READONLY. Otherwise, attempt - ** to grab the WRITER lock. Set Wal.writeLock to true and return - ** SQLITE_OK if successful, or leave Wal.writeLock clear and return - ** an SQLite error code (possibly SQLITE_BUSY) otherwise. */ - if( pWal->readOnly ){ - rc = SQLITE_READONLY; - }else{ - rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); - if( rc==SQLITE_OK ){ - pWal->writeLock = 1; - } - } - - return rc; -} /* ** This function starts a write transaction on the WAL. ** ** A read transaction must have already been started by a prior call @@ -4091,256 +3287,41 @@ if( pWal->writeLock ){ assert( !memcmp(&pWal->hdr,(void *)walIndexHdr(pWal),sizeof(WalIndexHdr)) ); return SQLITE_OK; } #endif - - rc = walWriteLock(pWal); - if( rc==SQLITE_OK ){ - /* If another connection has written to the database file since the - ** time the read transaction on this connection was started, then - ** the write is disallowed. Release the WRITER lock and return - ** SQLITE_BUSY_SNAPSHOT in this case. */ - if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ - walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); - pWal->writeLock = 0; - rc = SQLITE_BUSY_SNAPSHOT; - } - } - return rc; -} - -/* -** This function is called by a writer that has a read-lock on aReadmark[0] -** (pWal->readLock==0). This function relinquishes that lock and takes a -** lock on a different aReadmark[] slot. -** -** SQLITE_OK is returned if successful, or an SQLite error code otherwise. -*/ -static int walUpgradeReadlock(Wal *pWal){ - int cnt; - int rc; - assert( pWal->writeLock && pWal->readLock==0 ); - assert( isWalMode2(pWal)==0 ); - walUnlockShared(pWal, WAL_READ_LOCK(0)); - pWal->readLock = -1; - cnt = 0; - do{ - int notUsed; - rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); - }while( rc==WAL_RETRY ); - assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ - testcase( (rc&0xff)==SQLITE_IOERR ); - testcase( rc==SQLITE_PROTOCOL ); - testcase( rc==SQLITE_OK ); - return rc; -} - - -#ifndef SQLITE_OMIT_CONCURRENT -/* -** This function is only ever called when committing a "BEGIN CONCURRENT" -** transaction. It may be assumed that no frames have been written to -** the wal file. The second parameter is a pointer to the in-memory -** representation of page 1 of the database (which may or may not be -** dirty). The third is a bitvec with a bit set for each page in the -** database file that was read by the current concurrent transaction. -** -** This function performs three tasks: -** -** 1) It obtains the WRITER lock on the wal file, -** -** 2) It checks that there are no conflicts between the current -** transaction and any transactions committed to the wal file since -** it was opened, and -** -** 3) It ejects any non-dirty pages from the page-cache that have been -** written by another client since the CONCURRENT transaction was started -** (so as to avoid ending up with an inconsistent cache after the -** current transaction is committed). -** -** If no error occurs and the caller may proceed with committing the -** transaction, SQLITE_OK is returned. SQLITE_BUSY is returned if the WRITER -** lock cannot be obtained. Or, if the WRITER lock can be obtained but there -** are conflicts with a committed transaction, SQLITE_BUSY_SNAPSHOT. Finally, -** if an error (i.e. an OOM condition or IO error), an SQLite error code -** is returned. -*/ -int sqlite3WalLockForCommit( - Wal *pWal, - PgHdr *pPg1, - Bitvec *pAllRead, - Pgno *piConflict -){ - int rc = walWriteLock(pWal); - - /* If the database has been modified since this transaction was started, - ** check if it is still possible to commit. The transaction can be - ** committed if: - ** - ** a) None of the pages in pList have been modified since the - ** transaction opened, and - ** - ** b) The database schema cookie has not been modified since the - ** transaction was started. - */ - if( rc==SQLITE_OK ){ - WalIndexHdr head; - - if( walIndexLoadHdr(pWal, &head) ){ - /* This branch is taken if the wal-index header is corrupted. This - ** occurs if some other writer has crashed while committing a - ** transaction to this database since the current concurrent transaction - ** was opened. */ - rc = SQLITE_BUSY_SNAPSHOT; - }else if( memcmp(&pWal->hdr, (void*)&head, sizeof(WalIndexHdr))!=0 ){ - int bWal2 = isWalMode2(pWal); - int iHash; - int nLoop = 1+(bWal2 && walidxGetFile(&head)!=walidxGetFile(&pWal->hdr)); - int iLoop; - - if( pPg1==0 ){ - /* If pPg1==0, then the current transaction modified the database - ** schema. This means it conflicts with all other transactions. */ - *piConflict = 1; - rc = SQLITE_BUSY_SNAPSHOT; - } - - assert( nLoop==1 || nLoop==2 ); - for(iLoop=0; rc==SQLITE_OK && iLoophdr.mxFrame (which will be - ** set to the size of the old, now overwritten, wal file). This - ** doesn't come up in wal2 mode, as in wal2 mode the client always - ** has a PART lock on one of the wal files, preventing it from being - ** checkpointed or overwritten. */ - iFirst = pWal->hdr.mxFrame+1; - if( memcmp(pWal->hdr.aSalt, (u32*)head.aSalt, sizeof(u32)*2) ){ - assert( pWal->readLock==0 ); - iFirst = 1; - } - mxFrame = head.mxFrame; - }else{ - int iA = walidxGetFile(&pWal->hdr); - if( iLoop==0 ){ - iFirst = walExternalEncode(iA, 1+walidxGetMxFrame(&pWal->hdr, iA)); - mxFrame = walExternalEncode(iA, walidxGetMxFrame(&head, iA)); - }else{ - iFirst = walExternalEncode(!iA, 1); - mxFrame = walExternalEncode(!iA, walidxGetMxFrame(&head, !iA)); - } - } - iLastHash = walFramePage(mxFrame); - - for(iHash=walFramePage(iFirst); iHash<=iLastHash; iHash += (1+bWal2)){ - WalHashLoc sLoc; - - rc = walHashGet(pWal, iHash, &sLoc); - if( rc==SQLITE_OK ){ - u32 i, iMin, iMax; - assert( mxFrame>=sLoc.iZero ); - iMin = (sLoc.iZero >= iFirst) ? 1 : (iFirst - sLoc.iZero); - iMax = (iHash==0) ? HASHTABLE_NPAGE_ONE : HASHTABLE_NPAGE; - if( iMax>(mxFrame-sLoc.iZero) ) iMax = (mxFrame-sLoc.iZero); - for(i=iMin; rc==SQLITE_OK && i<=iMax; i++){ - PgHdr *pPg; - if( sLoc.aPgno[i-1]==1 ){ - /* Check that the schema cookie has not been modified. If - ** it has not, the commit can proceed. */ - u8 aNew[4]; - u8 *aOld = &((u8*)pPg1->pData)[40]; - int sz; - i64 iOff; - u32 iFrame = sLoc.iZero + i; - int iWal = 0; - if( bWal2 ){ - iWal = walExternalDecode(iFrame, &iFrame); - } - sz = pWal->hdr.szPage; - sz = (sz&0xfe00) + ((sz&0x0001)<<16); - iOff = walFrameOffset(iFrame, sz) + WAL_FRAME_HDRSIZE + 40; - rc = sqlite3OsRead(pWal->apWalFd[iWal],aNew,sizeof(aNew),iOff); - if( rc==SQLITE_OK && memcmp(aOld, aNew, sizeof(aNew)) ){ - rc = SQLITE_BUSY_SNAPSHOT; - } - }else if( sqlite3BitvecTestNotNull(pAllRead, sLoc.aPgno[i-1]) ){ - *piConflict = sLoc.aPgno[i-1]; - rc = SQLITE_BUSY_SNAPSHOT; - }else - if( (pPg = sqlite3PagerLookup(pPg1->pPager, sLoc.aPgno[i-1])) ){ - /* Page aPgno[i], which is present in the pager cache, has been - ** modified since the current CONCURRENT transaction was - ** started. However it was not read by the current - ** transaction, so is not a conflict. There are two - ** possibilities: (a) the page was allocated at the of the file - ** by the current transaction or (b) was present in the cache - ** at the start of the transaction. - ** - ** For case (a), do nothing. This page will be moved within the - ** database file by the commit code to avoid the conflict. The - ** call to PagerUnref() is to release the reference grabbed by - ** the sqlite3PagerLookup() above. - ** - ** In case (b), drop the page from the cache - otherwise - ** following the snapshot upgrade the cache would be - ** inconsistent with the database as stored on disk. */ - if( sqlite3PagerIswriteable(pPg) ){ - sqlite3PagerUnref(pPg); - }else{ - sqlite3PcacheDrop(pPg); - } - } - } - } - if( rc!=SQLITE_OK ) break; - } - } - } - } - - pWal->nPriorFrame = pWal->hdr.mxFrame; - return rc; -} - -/* !defined(SQLITE_OMIT_CONCURRENT) -** -** This function is called as part of committing an CONCURRENT transaction. -** It is assumed that sqlite3WalLockForCommit() has already been successfully -** called and so (a) the WRITER lock is held and (b) it is known that the -** wal-index-header stored in shared memory is not corrupt. -** -** Before returning, this function upgrades the client so that it is -** operating on the database snapshot currently at the head of the wal file -** (even if the CONCURRENT transaction ran against an older snapshot). -** -** SQLITE_OK is returned if successful, or an SQLite error code otherwise. -*/ -int sqlite3WalUpgradeSnapshot(Wal *pWal){ - int rc = SQLITE_OK; - assert( pWal->writeLock ); - memcpy(&pWal->hdr, (void*)walIndexHdr(pWal), sizeof(WalIndexHdr)); - - /* If this client has its read-lock on slot aReadmark[0] and the entire - ** wal has not been checkpointed, switch it to a different slot. Otherwise - ** any reads performed between now and committing the transaction will - ** read from the old snapshot - not the one just upgraded to. */ - if( pWal->readLock==0 && pWal->hdr.mxFrame!=walCkptInfo(pWal)->nBackfill ){ - assert( isWalMode2(pWal)==0 ); - rc = walUpgradeReadlock(pWal); - } - return rc; -} -#endif /* SQLITE_OMIT_CONCURRENT */ + + /* Cannot start a write transaction without first holding a read + ** transaction. */ + assert( pWal->readLock>=0 ); + assert( pWal->writeLock==0 && pWal->iReCksum==0 ); + + if( pWal->readOnly ){ + return SQLITE_READONLY; + } + + /* Only one writer allowed at a time. Get the write lock. Return + ** SQLITE_BUSY if unable. + */ + rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); + if( rc ){ + return rc; + } + pWal->writeLock = 1; + + /* If another connection has written to the database file since the + ** time the read transaction on this connection was started, then + ** the write is disallowed. + */ + if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + pWal->writeLock = 0; + rc = SQLITE_BUSY_SNAPSHOT; + } + + return rc; +} /* ** End a write transaction. The commit has already been done. This ** routine merely releases the lock. */ @@ -4364,57 +3345,25 @@ ** returned to the caller. ** ** Otherwise, if the callback function does not return an error, this ** function returns SQLITE_OK. */ -int sqlite3WalUndo( - Wal *pWal, - int (*xUndo)(void *, Pgno), - void *pUndoCtx, - int bConcurrent /* True if this is a CONCURRENT transaction */ -){ +int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){ int rc = SQLITE_OK; - if( pWal->writeLock ){ - int iWal = walidxGetFile(&pWal->hdr); - Pgno iMax = walidxGetMxFrame(&pWal->hdr, iWal); - Pgno iNew; + if( ALWAYS(pWal->writeLock) ){ + Pgno iMax = pWal->hdr.mxFrame; Pgno iFrame; - - assert( isWalMode2(pWal) || iWal==0 ); - + /* Restore the clients cache of the wal-index header to the state it ** was in before the client began writing to the database. */ memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); - iNew = walidxGetMxFrame(&pWal->hdr, walidxGetFile(&pWal->hdr)); - - /* BEGIN CONCURRENT transactions are different, as the header just - ** memcpy()d into pWal->hdr may not be the same as the current header - ** when the transaction was started. Instead, pWal->hdr now contains - ** the header written by the most recent successful COMMIT. Because - ** Wal.writeLock is set, if this is a BEGIN CONCURRENT transaction, - ** the rollback must be taking place because an error occurred during - ** a COMMIT. - ** - ** The code below is still valid. All frames between (iNew+1) and iMax - ** must have been written by this transaction before the error occurred. - ** The exception is in wal2 mode - if the current wal file at the time - ** of the last COMMIT is not wal file iWal, then the error must have - ** occurred in WalLockForCommit(), before any pages were written - ** to the database file. In this case return early. */ -#ifndef SQLITE_OMIT_CONCURRENT - if( bConcurrent ){ - pWal->hdr.aCksum[0]++; - } - if( walidxGetFile(&pWal->hdr)!=iWal ){ - assert( bConcurrent && isWalMode2(pWal) ); - return SQLITE_OK; - } -#endif - assert( walidxGetFile(&pWal->hdr)==iWal ); - - for(iFrame=iNew+1; ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; iFrame++){ + + for(iFrame=pWal->hdr.mxFrame+1; + ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; + iFrame++ + ){ /* This call cannot fail. Unless the page for which the page number ** is passed as the second argument is (a) in the cache and ** (b) has an outstanding reference, then xUndo is either a no-op ** (if (a) is false) or simply expels the page from the cache (if (b) ** is false). @@ -4422,20 +3371,14 @@ ** If the upper layer is doing a rollback, it is guaranteed that there ** are no outstanding references to any page other than page 1. And ** page 1 is never written to the log until the transaction is ** committed. As a result, the call to xUndo may not fail. */ - Pgno pgno; - if( isWalMode2(pWal) ){ - pgno = walFramePgno2(pWal, iWal, iFrame); - }else{ - pgno = walFramePgno(pWal, iFrame); - } - assert( pgno!=1 ); - rc = xUndo(pUndoCtx, pgno); - } - if( iMax!=iNew ) walCleanupHash(pWal); + assert( walFramePgno(pWal, iFrame)!=1 ); + rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + } + if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } return rc; } /* @@ -4443,16 +3386,15 @@ ** values. This function populates the array with values required to ** "rollback" the write position of the WAL handle back to the current ** point in the event of a savepoint rollback (via WalSavepointUndo()). */ void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){ - int iWal = walidxGetFile(&pWal->hdr); - assert( isWalMode2(pWal) || iWal==0 ); - aWalData[0] = walidxGetMxFrame(&pWal->hdr, iWal); + assert( pWal->writeLock ); + aWalData[0] = pWal->hdr.mxFrame; aWalData[1] = pWal->hdr.aFrameCksum[0]; aWalData[2] = pWal->hdr.aFrameCksum[1]; - aWalData[3] = isWalMode2(pWal) ? iWal : pWal->nCkpt; + aWalData[3] = pWal->nCkpt; } /* ** Move the write position of the WAL back to the point identified by ** the values in the aWalData[] array. aWalData must point to an array @@ -4459,28 +3401,25 @@ ** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated ** by a call to WalSavepoint(). */ int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ int rc = SQLITE_OK; - int iWal = walidxGetFile(&pWal->hdr); - int iCmp = isWalMode2(pWal) ? iWal : pWal->nCkpt; - - assert( pWal->writeLock || aWalData[0]==pWal->hdr.mxFrame ); - assert( isWalMode2(pWal) || iWal==0 ); - assert( aWalData[3]!=iCmp || aWalData[0]<=walidxGetMxFrame(&pWal->hdr,iWal) ); - - if( aWalData[3]!=iCmp ){ + + assert( pWal->writeLock ); + assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame ); + + if( aWalData[3]!=pWal->nCkpt ){ /* This savepoint was opened immediately after the write-transaction ** was started. Right after that, the writer decided to wrap around ** to the start of the log. Update the savepoint values to match. */ aWalData[0] = 0; - aWalData[3] = iCmp; + aWalData[3] = pWal->nCkpt; } - if( aWalData[0]hdr, iWal) ){ - walidxSetMxFrame(&pWal->hdr, iWal, aWalData[0]); + if( aWalData[0]hdr.mxFrame ){ + pWal->hdr.mxFrame = aWalData[0]; pWal->hdr.aFrameCksum[0] = aWalData[1]; pWal->hdr.aFrameCksum[1] = aWalData[2]; walCleanupHash(pWal); } @@ -4488,65 +3427,29 @@ } /* ** This function is called just before writing a set of frames to the log ** file (see sqlite3WalFrames()). It checks to see if, instead of appending -** to the current log file, it is possible and desirable to switch to the -** other log file and write the new transaction to the start of it. -** If so, the wal-index header is updated accordingly - both in heap memory -** and in the *-shm file. +** to the current log file, it is possible to overwrite the start of the +** existing log file with the new frames (i.e. "reset" the log). If so, +** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left +** unchanged. ** ** SQLITE_OK is returned if no error is encountered (regardless of whether -** or not the wal-index header is modified). An SQLite error code is returned +** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned ** if an error occurs. */ static int walRestartLog(Wal *pWal){ int rc = SQLITE_OK; - - if( isWalMode2(pWal) ){ - int iApp = walidxGetFile(&pWal->hdr); - int nWalSize = WAL_DEFAULT_WALSIZE; - if( pWal->mxWalSize>0 ){ - nWalSize = (pWal->mxWalSize-WAL_HDRSIZE+pWal->szPage+WAL_FRAME_HDRSIZE-1) - / (pWal->szPage+WAL_FRAME_HDRSIZE); - nWalSize = MAX(nWalSize, 1); - } - - assert( 1==WAL_LOCK_PART1 ); - assert( 4==WAL_LOCK_PART2 ); - assert( 1+(iApp*3)==WAL_LOCK_PART1 || 1+(iApp*3)==WAL_LOCK_PART2 ); - if( pWal->readLock==1+(iApp*3) - && walidxGetMxFrame(&pWal->hdr, iApp)>=nWalSize - ){ - volatile WalCkptInfo *pInfo = walCkptInfo(pWal); - u32 mxFrame = walidxGetMxFrame(&pWal->hdr, !iApp); - if( mxFrame==0 || pInfo->nBackfill ){ - rc = wal2RestartOk(pWal, iApp); - if( rc==SQLITE_OK ){ - int iNew = !iApp; - pWal->nCkpt++; - walidxSetFile(&pWal->hdr, iNew); - walidxSetMxFrame(&pWal->hdr, iNew, 0); - sqlite3Put4byte((u8*)&pWal->hdr.aSalt[0], pWal->hdr.aFrameCksum[0]); - sqlite3Put4byte((u8*)&pWal->hdr.aSalt[1], pWal->hdr.aFrameCksum[1]); - walIndexWriteHdr(pWal); - pInfo->nBackfill = 0; - wal2RestartFinished(pWal, iApp); - walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - pWal->readLock = iNew ? WAL_LOCK_PART2_FULL1 : WAL_LOCK_PART1_FULL2; - rc = walLockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - }else if( rc==SQLITE_BUSY ){ - rc = SQLITE_OK; - } - } - } - }else if( pWal->readLock==0 ){ + int cnt; + + if( pWal->readLock==0 ){ volatile WalCkptInfo *pInfo = walCkptInfo(pWal); assert( pInfo->nBackfill==pWal->hdr.mxFrame ); if( pInfo->nBackfill>0 ){ u32 salt1; - sqlite3FastRandomness(&pWal->sPrng, 4, &salt1); + sqlite3_randomness(4, &salt1); rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); if( rc==SQLITE_OK ){ /* If all readers are using WAL_READ_LOCK(0) (in other words if no ** readers are currently using the WAL), then the transactions ** frames will overwrite the start of the existing log. Update the @@ -4556,27 +3459,26 @@ ** at this point. But updating the actual wal-index header is also ** safe and means there is no special case for sqlite3WalUndo() ** to handle if this transaction is rolled back. */ walRestartHdr(pWal, salt1); walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); - pWal->nPriorFrame = 0; }else if( rc!=SQLITE_BUSY ){ return rc; } } - - /* Regardless of whether or not the wal file was restarted, change the - ** read-lock held by this client to a slot other than aReadmark[0]. - ** Clients with a lock on aReadmark[0] read from the database file - ** only - never from the wal file. This means that if a writer holding - ** a lock on aReadmark[0] were to commit a transaction but not close the - ** read-transaction, subsequent read operations would read directly from - ** the database file - ignoring the new pages just appended - ** to the wal file. */ - rc = walUpgradeReadlock(pWal); - } - + walUnlockShared(pWal, WAL_READ_LOCK(0)); + pWal->readLock = -1; + cnt = 0; + do{ + int notUsed; + rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + }while( rc==WAL_RETRY ); + assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ + testcase( (rc&0xff)==SQLITE_IOERR ); + testcase( rc==SQLITE_PROTOCOL ); + testcase( rc==SQLITE_OK ); + } return rc; } /* ** Information about the current state of the WAL file and where @@ -4631,22 +3533,10 @@ sqlite3_int64 iOffset /* Byte offset at which to write */ ){ int rc; /* Result code from subfunctions */ void *pData; /* Data actually written */ u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */ - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) - { - int iWal = walidxGetFile(&p->pWal->hdr); - int iFrame = 1 + (iOffset / (WAL_FRAME_HDRSIZE + p->pWal->szPage)); - assert( p->pWal->apWalFd[iWal]==p->pFd ); - WALTRACE(("WAL%p: page %d written to frame %d of wal %d\n", - p->pWal, (int)pPage->pgno, iFrame, iWal - )); - } -#endif - pData = pPage->pData; walEncodeFrame(p->pWal, pPage->pgno, nTruncate, pData, aFrame); rc = walWriteToLog(p, aFrame, sizeof(aFrame), iOffset); if( rc ) return rc; /* Write the page data */ @@ -4661,17 +3551,16 @@ ** with the earliest to have been overwritten. ** ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ static int walRewriteChecksums(Wal *pWal, u32 iLast){ - int rc = SQLITE_OK; /* Return code */ const int szPage = pWal->szPage;/* Database page size */ + int rc = SQLITE_OK; /* Return code */ u8 *aBuf; /* Buffer to load data from wal file into */ u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-headers in */ u32 iRead; /* Next frame to read from wal file */ i64 iCksumOff; - sqlite3_file *pWalFd = pWal->apWalFd[walidxGetFile(&pWal->hdr)]; aBuf = sqlite3_malloc(szPage + WAL_FRAME_HDRSIZE); if( aBuf==0 ) return SQLITE_NOMEM_BKPT; /* Find the checksum values to use as input for the recalculating the @@ -4683,26 +3572,26 @@ if( pWal->iReCksum==1 ){ iCksumOff = 24; }else{ iCksumOff = walFrameOffset(pWal->iReCksum-1, szPage) + 16; } - rc = sqlite3OsRead(pWalFd, aBuf, sizeof(u32)*2, iCksumOff); + rc = sqlite3OsRead(pWal->pWalFd, aBuf, sizeof(u32)*2, iCksumOff); pWal->hdr.aFrameCksum[0] = sqlite3Get4byte(aBuf); pWal->hdr.aFrameCksum[1] = sqlite3Get4byte(&aBuf[sizeof(u32)]); iRead = pWal->iReCksum; pWal->iReCksum = 0; for(; rc==SQLITE_OK && iRead<=iLast; iRead++){ i64 iOff = walFrameOffset(iRead, szPage); - rc = sqlite3OsRead(pWalFd, aBuf, szPage+WAL_FRAME_HDRSIZE, iOff); + rc = sqlite3OsRead(pWal->pWalFd, aBuf, szPage+WAL_FRAME_HDRSIZE, iOff); if( rc==SQLITE_OK ){ u32 iPgno, nDbSize; iPgno = sqlite3Get4byte(aBuf); nDbSize = sqlite3Get4byte(&aBuf[4]); walEncodeFrame(pWal, iPgno, nDbSize, &aBuf[WAL_FRAME_HDRSIZE], aFrame); - rc = sqlite3OsWrite(pWalFd, aFrame, sizeof(aFrame), iOff); + rc = sqlite3OsWrite(pWal->pWalFd, aFrame, sizeof(aFrame), iOff); } } sqlite3_free(aBuf); return rc; @@ -4728,81 +3617,63 @@ int szFrame; /* The size of a single frame */ i64 iOffset; /* Next byte to write in WAL file */ WalWriter w; /* The writer */ u32 iFirst = 0; /* First frame that may be overwritten */ WalIndexHdr *pLive; /* Pointer to shared header */ - int iApp; - int bWal2 = isWalMode2(pWal); assert( pList ); assert( pWal->writeLock ); /* If this frame set completes a transaction, then nTruncate>0. If ** nTruncate==0 then this frame set does not complete the transaction. */ assert( (isCommit!=0)==(nTruncate!=0) ); + +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) + { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} + WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n", + pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill")); + } +#endif pLive = (WalIndexHdr*)walIndexHdr(pWal); if( memcmp(&pWal->hdr, (void *)pLive, sizeof(WalIndexHdr))!=0 ){ - /* if( isWalMode2(pWal)==0 ) */ - iFirst = walidxGetMxFrame(pLive, walidxGetFile(pLive))+1; + iFirst = pLive->mxFrame+1; } /* See if it is possible to write these frames into the start of the ** log file, instead of appending to it at pWal->hdr.mxFrame. */ - else if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ + if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ return rc; } /* If this is the first frame written into the log, write the WAL ** header to the start of the WAL file. See comments at the top of ** this source file for a description of the WAL header format. */ - iApp = walidxGetFile(&pWal->hdr); - iFrame = walidxGetMxFrame(&pWal->hdr, iApp); - assert( iApp==0 || bWal2 ); - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) - { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} - WALTRACE(("WAL%p: frame write begin. %d frames. iWal=%d. mxFrame=%d. %s\n", - pWal, cnt, iApp, iFrame, isCommit ? "Commit" : "Spill")); - } -#endif - + iFrame = pWal->hdr.mxFrame; if( iFrame==0 ){ - u32 iCkpt = 0; u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */ u32 aCksum[2]; /* Checksum for wal-header */ sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN)); - sqlite3Put4byte(&aWalHdr[4], pWal->hdr.iVersion); + sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION); sqlite3Put4byte(&aWalHdr[8], szPage); - if( bWal2 ){ - if( walidxGetMxFrame(&pWal->hdr, !iApp)>0 ){ - u8 aPrev[4]; - rc = sqlite3OsRead(pWal->apWalFd[!iApp], aPrev, 4, 12); - if( rc!=SQLITE_OK ){ - return rc; - } - iCkpt = (sqlite3Get4byte(aPrev) + 1) & 0x0F; - } - }else{ - iCkpt = pWal->nCkpt; - } - sqlite3Put4byte(&aWalHdr[12], iCkpt); + sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt); + if( pWal->nCkpt==0 ) sqlite3_randomness(8, pWal->hdr.aSalt); memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8); walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum); sqlite3Put4byte(&aWalHdr[24], aCksum[0]); sqlite3Put4byte(&aWalHdr[28], aCksum[1]); - + pWal->szPage = szPage; pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN; pWal->hdr.aFrameCksum[0] = aCksum[0]; pWal->hdr.aFrameCksum[1] = aCksum[1]; pWal->truncateOnCommit = 1; - rc = sqlite3OsWrite(pWal->apWalFd[iApp], aWalHdr, sizeof(aWalHdr), 0); + rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0); WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok")); if( rc!=SQLITE_OK ){ return rc; } @@ -4812,19 +3683,19 @@ ** database corruption. See the ticket: ** ** https://sqlite.org/src/info/ff5be73dee */ if( pWal->syncHeader ){ - rc = sqlite3OsSync(pWal->apWalFd[iApp], CKPT_SYNC_FLAGS(sync_flags)); + rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags)); if( rc ) return rc; } } assert( (int)pWal->szPage==szPage ); /* Setup information needed to write frames into the WAL */ w.pWal = pWal; - w.pFd = pWal->apWalFd[iApp]; + w.pFd = pWal->pWalFd; w.iSyncPoint = 0; w.syncFlags = sync_flags; w.szPage = szPage; iOffset = walFrameOffset(iFrame+1, szPage); szFrame = szPage + WAL_FRAME_HDRSIZE; @@ -4837,23 +3708,20 @@ ** the current transaction. If so, overwrite the existing frame and ** set Wal.writeLock to WAL_WRITELOCK_RECKSUM - indicating that ** checksums must be recomputed when the transaction is committed. */ if( iFirst && (p->pDirty || isCommit==0) ){ u32 iWrite = 0; - VVA_ONLY(rc =) walSearchWal(pWal, iApp, p->pgno, &iWrite); + VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite); assert( rc==SQLITE_OK || iWrite==0 ); - if( iWrite && bWal2 ){ - walExternalDecode(iWrite, &iWrite); - } if( iWrite>=iFirst ){ i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE; void *pData; if( pWal->iReCksum==0 || iWriteiReCksum ){ pWal->iReCksum = iWrite; } pData = p->pData; - rc = sqlite3OsWrite(pWal->apWalFd[iApp], pData, szPage, iOff); + rc = sqlite3OsWrite(pWal->pWalFd, pData, szPage, iOff); if( rc ) return rc; p->flags &= ~PGHDR_WAL_APPEND; continue; } } @@ -4865,11 +3733,10 @@ if( rc ) return rc; pLast = p; iOffset += szFrame; p->flags |= PGHDR_WAL_APPEND; } - /* Recalculate checksums within the wal file if required. */ if( isCommit && pWal->iReCksum ){ rc = walRewriteChecksums(pWal, iFrame); if( rc ) return rc; @@ -4890,11 +3757,11 @@ ** past the sector boundary is written after the sync. */ if( isCommit && WAL_SYNC_FLAGS(sync_flags)!=0 ){ int bSync = 1; if( pWal->padToSectorBoundary ){ - int sectorSize = sqlite3SectorSize(w.pFd); + int sectorSize = sqlite3SectorSize(pWal->pWalFd); w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize; bSync = (w.iSyncPoint==iOffset); testcase( bSync ); while( iOffsethdr, iApp); + iFrame = pWal->hdr.mxFrame; for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){ if( (p->flags & PGHDR_WAL_APPEND)==0 ) continue; iFrame++; - rc = walIndexAppend(pWal, iApp, iFrame, p->pgno); + rc = walIndexAppend(pWal, iFrame, p->pgno); } assert( pLast!=0 || nExtra==0 ); while( rc==SQLITE_OK && nExtra>0 ){ iFrame++; nExtra--; - rc = walIndexAppend(pWal, iApp, iFrame, pLast->pgno); + rc = walIndexAppend(pWal, iFrame, pLast->pgno); } if( rc==SQLITE_OK ){ /* Update the private copy of the header. */ pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); testcase( szPage<=32768 ); testcase( szPage>=65536 ); - walidxSetMxFrame(&pWal->hdr, iApp, iFrame); + pWal->hdr.mxFrame = iFrame; if( isCommit ){ pWal->hdr.iChange++; pWal->hdr.nPage = nTruncate; } /* If this is a commit, update the wal-index header too. */ if( isCommit ){ walIndexWriteHdr(pWal); - if( bWal2 ){ - int iOther = !walidxGetFile(&pWal->hdr); - if( walidxGetMxFrame(&pWal->hdr, iOther) - && !walCkptInfo(pWal)->nBackfill - ){ - pWal->iCallback = walidxGetMxFrame(&pWal->hdr, 0); - pWal->iCallback += walidxGetMxFrame(&pWal->hdr, 1); - } - }else{ - pWal->iCallback = iFrame; - } + pWal->iCallback = iFrame; } } WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok")); return rc; @@ -5060,48 +3917,31 @@ } } /* Copy data from the log to the database file. */ if( rc==SQLITE_OK ){ - if( (walPagesize(pWal)!=nBuf) - && ((pWal->hdr.mxFrame2 & 0x7FFFFFFF) || pWal->hdr.mxFrame) - ){ + + if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ rc = SQLITE_CORRUPT_BKPT; }else{ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); } /* If no error occurred, set the output variables. */ if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ - if( pnLog ){ - *pnLog = walidxGetMxFrame(&pWal->hdr,0)+walidxGetMxFrame(&pWal->hdr,1); - } - if( pnCkpt ){ - if( isWalMode2(pWal) ){ - if( (int)(walCkptInfo(pWal)->nBackfill) ){ - *pnCkpt = walidxGetMxFrame(&pWal->hdr, !walidxGetFile(&pWal->hdr)); - }else{ - *pnCkpt = 0; - } - }else{ - *pnCkpt = walCkptInfo(pWal)->nBackfill; - } - } + if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; + if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); } } - if( isChanged && pWal->bClosing==0 ){ + if( isChanged ){ /* If a new wal-index header was loaded before the checkpoint was ** performed, then the pager-cache associated with pWal is now ** out of date. So zero the cached wal-index header to ensure that ** next time the pager opens a snapshot on this database it knows that ** the cache needs to be reset. - ** - ** Except, do not do this if the wal is being closed. In this case - ** the caller needs the wal-index header to check if the database is - ** in wal2 mode and the "other" wal file also needs to be checkpointed. - ** Besides, the pager cache will not be used again in this case. */ + */ memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); } walDisableBlocking(pWal); sqlite3WalDb(pWal, 0); @@ -5157,28 +3997,26 @@ ** should acquire the database exclusive lock prior to invoking ** the op==1 case. */ int sqlite3WalExclusiveMode(Wal *pWal, int op){ int rc; - assert( pWal->writeLock==0 ); assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 ); /* pWal->readLock is usually set, but might be -1 if there was a ** prior error while attempting to acquire are read-lock. This cannot ** happen if the connection is actually in exclusive mode (as no xShmLock ** locks are taken in this case). Nor should the pager attempt to ** upgrade to exclusive-mode following such an error. */ - assert( pWal->readLock!=WAL_LOCK_NONE || pWal->lockError ); - assert( pWal->readLock!=WAL_LOCK_NONE || (op<=0 && pWal->exclusiveMode==0) ); + assert( pWal->readLock>=0 || pWal->lockError ); + assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); if( op==0 ){ - if( pWal->exclusiveMode ){ + if( pWal->exclusiveMode!=WAL_NORMAL_MODE ){ pWal->exclusiveMode = WAL_NORMAL_MODE; - rc = walLockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - if( rc!=SQLITE_OK ){ + if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; } rc = pWal->exclusiveMode==WAL_NORMAL_MODE; }else{ /* Already in locking_mode=NORMAL */ @@ -5213,16 +4051,13 @@ int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot){ int rc = SQLITE_OK; WalIndexHdr *pRet; static const u32 aZero[4] = { 0, 0, 0, 0 }; - /* Snapshots may not be used with wal2 mode databases. */ - if( isWalMode2(pWal) ) return SQLITE_ERROR; - assert( pWal->readLock>=0 && pWal->writeLock==0 ); - if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,8)==0 ){ + if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,16)==0 ){ *ppSnapshot = 0; return SQLITE_ERROR; } pRet = (WalIndexHdr*)sqlite3_malloc(sizeof(WalIndexHdr)); if( pRet==0 ){ @@ -5272,14 +4107,10 @@ ** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER ** lock is released before returning. */ int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){ int rc; - - /* Snapshots may not be used with wal2 mode databases. */ - if( isWalMode2(pWal) ) return SQLITE_ERROR; - rc = walLockShared(pWal, WAL_CKPT_LOCK); if( rc==SQLITE_OK ){ WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot; if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) || pNew->mxFramenBackfillAttempted @@ -5316,35 +4147,9 @@ #endif /* Return the sqlite3_file object for the WAL file */ sqlite3_file *sqlite3WalFile(Wal *pWal){ - return pWal->apWalFd[0]; -} - -/* -** Return the values required by sqlite3_wal_info(). -*/ -int sqlite3WalInfo(Wal *pWal, u32 *pnPrior, u32 *pnFrame){ - int rc = SQLITE_OK; - if( pWal ){ - *pnPrior = pWal->nPriorFrame; - *pnFrame = walidxGetMxFrame(&pWal->hdr, walidxGetFile(&pWal->hdr)); - } - return rc; -} - -/* -** Return the journal mode used by this Wal object. -*/ -int sqlite3WalJournalMode(Wal *pWal){ - assert( pWal ); - return (isWalMode2(pWal) ? PAGER_JOURNALMODE_WAL2 : PAGER_JOURNALMODE_WAL); -} - -void sqlite3WalIsSchemaVersion(Wal *pWal, u64 *a){ - if( pWal ){ - pWal->aSchemaVersion = a; - } + return pWal->pWalFd; } #endif /* #ifndef SQLITE_OMIT_WAL */ Index: src/wal.h ================================================================== --- src/wal.h +++ src/wal.h @@ -24,19 +24,19 @@ */ #define WAL_SYNC_FLAGS(X) ((X)&0x03) #define CKPT_SYNC_FLAGS(X) (((X)>>2)&0x03) #ifdef SQLITE_OMIT_WAL -# define sqlite3WalOpen(w,x,y,z) 0 +# define sqlite3WalOpen(x,y,z) 0 # define sqlite3WalLimit(x,y) # define sqlite3WalClose(v,w,x,y,z) 0 # define sqlite3WalBeginReadTransaction(y,z) 0 # define sqlite3WalEndReadTransaction(z) # define sqlite3WalDbsize(y) 0 # define sqlite3WalBeginWriteTransaction(y) 0 # define sqlite3WalEndWriteTransaction(x) 0 -# define sqlite3WalUndo(w,x,y,z) 0 +# define sqlite3WalUndo(x,y,z) 0 # define sqlite3WalSavepoint(y,z) # define sqlite3WalSavepointUndo(y,z) 0 # define sqlite3WalFrames(u,v,w,x,y,z) 0 # define sqlite3WalCheckpoint(q,r,s,t,u,v,w,x,y,z) 0 # define sqlite3WalCallback(z) 0 @@ -43,11 +43,10 @@ # define sqlite3WalExclusiveMode(y,z) 0 # define sqlite3WalHeapMemory(z) 0 # define sqlite3WalFramesize(z) 0 # define sqlite3WalFindFrame(x,y,z) 0 # define sqlite3WalFile(x) 0 -# define sqlite3WalJournalMode(x) 0 #else #define WAL_SAVEPOINT_NDATA 4 /* Connection to a write-ahead log (WAL) file. @@ -54,11 +53,11 @@ ** There is one object of this type for each pager. */ typedef struct Wal Wal; /* Open and close a connection to a write-ahead log. */ -int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *,int,i64,int,Wal**); +int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**); int sqlite3WalClose(Wal *pWal, sqlite3*, int sync_flags, int, u8 *); /* Set the limiting size of a WAL file. */ void sqlite3WalLimit(Wal*, i64); @@ -82,11 +81,11 @@ /* Obtain or release the WRITER lock. */ int sqlite3WalBeginWriteTransaction(Wal *pWal); int sqlite3WalEndWriteTransaction(Wal *pWal); /* Undo any frames written (but not committed) to the log */ -int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx, int); +int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx); /* Return an integer that records the current (uncommitted) write ** position in the WAL */ void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData); @@ -135,19 +134,10 @@ int sqlite3WalSnapshotRecover(Wal *pWal); int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot); void sqlite3WalSnapshotUnlock(Wal *pWal); #endif -#ifndef SQLITE_OMIT_CONCURRENT -/* Tell the wal layer that we want to commit a concurrent transaction */ -int sqlite3WalLockForCommit(Wal *pWal, PgHdr *pPg, Bitvec *pRead, Pgno*); - -/* Upgrade the state of the client to take into account changes written -** by other connections */ -int sqlite3WalUpgradeSnapshot(Wal *pWal); -#endif /* SQLITE_OMIT_CONCURRENT */ - #ifdef SQLITE_ENABLE_ZIPVFS /* If the WAL file is not empty, return the number of bytes of content ** stored in each frame (i.e. the db page-size when the WAL was created). */ int sqlite3WalFramesize(Wal *pWal); @@ -154,23 +144,12 @@ #endif /* Return the sqlite3_file object for the WAL file */ sqlite3_file *sqlite3WalFile(Wal *pWal); -/* Return the journal mode (WAL or WAL2) used by this Wal object. */ -int sqlite3WalJournalMode(Wal *pWal); - #ifdef SQLITE_ENABLE_SETLK_TIMEOUT int sqlite3WalWriteLock(Wal *pWal, int bLock); void sqlite3WalDb(Wal *pWal, sqlite3 *db); #endif -/* sqlite3_wal_info() data */ -int sqlite3WalInfo(Wal *pWal, u32 *pnPrior, u32 *pnFrame); - -/* sqlite3_wal_info() data */ -int sqlite3WalInfo(Wal *pWal, u32 *pnPrior, u32 *pnFrame); - -void sqlite3WalIsSchemaVersion(Wal *pWal, u64 *a); - #endif /* ifndef SQLITE_OMIT_WAL */ #endif /* SQLITE_WAL_H */ Index: src/where.c ================================================================== --- src/where.c +++ src/where.c @@ -65,11 +65,11 @@ ** terms means that no sorting is needed at all. A return that ** is positive but less than the number of ORDER BY terms means that ** block sorting is required. */ int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ - return pWInfo->nOBSat; + return pWInfo->nOBSat<0 ? 0 : pWInfo->nOBSat; } /* ** In the ORDER BY LIMIT optimization, if the inner-most loop is known ** to emit rows in increasing order, and if the last row emitted by the @@ -2285,30 +2285,18 @@ while( pWInfo->pLoops ){ WhereLoop *p = pWInfo->pLoops; pWInfo->pLoops = p->pNextLoop; whereLoopDelete(db, p); } - assert( pWInfo->pExprMods==0 ); while( pWInfo->pMemToFree ){ WhereMemBlock *pNext = pWInfo->pMemToFree->pNext; sqlite3DbNNFreeNN(db, pWInfo->pMemToFree); pWInfo->pMemToFree = pNext; } sqlite3DbNNFreeNN(db, pWInfo); } -/* Undo all Expr node modifications -*/ -static void whereUndoExprMods(WhereInfo *pWInfo){ - while( pWInfo->pExprMods ){ - WhereExprMod *p = pWInfo->pExprMods; - pWInfo->pExprMods = p->pNext; - memcpy(p->pExpr, &p->orig, sizeof(p->orig)); - sqlite3DbFree(pWInfo->pParse->db, p); - } -} - /* ** Return TRUE if all of the following are true: ** ** (1) X has the same or lower cost, or returns the same or fewer rows, ** than Y. @@ -3256,10 +3244,98 @@ return 1; } } return 0; } + +/* +** Structure passed to the whereIsCoveringIndex Walker callback. +*/ +struct CoveringIndexCheck { + Index *pIdx; /* The index */ + int iTabCur; /* Cursor number for the corresponding table */ +}; + +/* +** Information passed in is pWalk->u.pCovIdxCk. Call is pCk. +** +** If the Expr node references the table with cursor pCk->iTabCur, then +** make sure that column is covered by the index pCk->pIdx. We know that +** all columns less than 63 (really BMS-1) are covered, so we don't need +** to check them. But we do need to check any column at 63 or greater. +** +** If the index does not cover the column, then set pWalk->eCode to +** non-zero and return WRC_Abort to stop the search. +** +** If this node does not disprove that the index can be a covering index, +** then just return WRC_Continue, to continue the search. +*/ +static int whereIsCoveringIndexWalkCallback(Walker *pWalk, Expr *pExpr){ + int i; /* Loop counter */ + const Index *pIdx; /* The index of interest */ + const i16 *aiColumn; /* Columns contained in the index */ + u16 nColumn; /* Number of columns in the index */ + if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_AGG_COLUMN ) return WRC_Continue; + if( pExpr->iColumn<(BMS-1) ) return WRC_Continue; + if( pExpr->iTable!=pWalk->u.pCovIdxCk->iTabCur ) return WRC_Continue; + pIdx = pWalk->u.pCovIdxCk->pIdx; + aiColumn = pIdx->aiColumn; + nColumn = pIdx->nColumn; + for(i=0; iiColumn ) return WRC_Continue; + } + pWalk->eCode = 1; + return WRC_Abort; +} + + +/* +** pIdx is an index that covers all of the low-number columns used by +** pWInfo->pSelect (columns from 0 through 62). But there are columns +** in pWInfo->pSelect beyond 62. This routine tries to answer the question +** of whether pIdx covers *all* columns in the query. +** +** Return 0 if pIdx is a covering index. Return non-zero if pIdx is +** not a covering index or if we are unable to determine if pIdx is a +** covering index. +** +** This routine is an optimization. It is always safe to return non-zero. +** But returning zero when non-zero should have been returned can lead to +** incorrect bytecode and assertion faults. +*/ +static SQLITE_NOINLINE u32 whereIsCoveringIndex( + WhereInfo *pWInfo, /* The WHERE clause context */ + Index *pIdx, /* Index that is being tested */ + int iTabCur /* Cursor for the table being indexed */ +){ + int i; + struct CoveringIndexCheck ck; + Walker w; + if( pWInfo->pSelect==0 ){ + /* We don't have access to the full query, so we cannot check to see + ** if pIdx is covering. Assume it is not. */ + return 1; + } + for(i=0; inColumn; i++){ + if( pIdx->aiColumn[i]>=BMS-1 ) break; + } + if( i>=pIdx->nColumn ){ + /* pIdx does not index any columns greater than 62, but we know from + ** colMask that columns greater than 62 are used, so this is not a + ** covering index */ + return 1; + } + ck.pIdx = pIdx; + ck.iTabCur = iTabCur; + memset(&w, 0, sizeof(w)); + w.xExprCallback = whereIsCoveringIndexWalkCallback; + w.xSelectCallback = sqlite3SelectWalkNoop; + w.u.pCovIdxCk = &ck; + w.eCode = 0; + sqlite3WalkSelect(&w, pWInfo->pSelect); + return w.eCode; +} /* ** Add all WhereLoop objects for a single table of the join where the table ** is identified by pBuilder->pNew->iTab. That table is guaranteed to be ** a b-tree table, not a virtual table. @@ -3474,10 +3550,13 @@ if( pProbe->isCovering ){ pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; m = 0; }else{ m = pSrc->colUsed & pProbe->colNotIdxed; + if( m==TOPBIT ){ + m = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor); + } pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; } /* Full scan via index */ if( b @@ -4699,11 +4778,10 @@ */ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxChoice; /* Maximum number of simultaneous paths tracked */ int nLoop; /* Number of terms in the join */ Parse *pParse; /* Parsing context */ - sqlite3 *db; /* The database connection */ int iLoop; /* Loop counter over the terms of the join */ int ii, jj; /* Loop counters */ int mxI = 0; /* Index of next entry to replace */ int nOrderBy; /* Number of ORDER BY clause terms */ LogEst mxCost = 0; /* Maximum cost of a set of paths */ @@ -4718,11 +4796,10 @@ LogEst *aSortCost = 0; /* Sorting and partial sorting costs */ char *pSpace; /* Temporary memory used by this routine */ int nSpace; /* Bytes of space allocated at pSpace */ pParse = pWInfo->pParse; - db = pParse->db; nLoop = pWInfo->nLevel; /* TUNING: For simple queries, only the best path is tracked. ** For 2-way joins, the 5 best paths are followed. ** For joins of 3 or more tables, track the 10 best paths */ mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); @@ -4741,11 +4818,11 @@ } /* Allocate and initialize space for aTo, aFrom and aSortCost[] */ nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2; nSpace += sizeof(LogEst) * nOrderBy; - pSpace = sqlite3DbMallocRawNN(db, nSpace); + pSpace = sqlite3StackAllocRawNN(pParse->db, nSpace); if( pSpace==0 ) return SQLITE_NOMEM_BKPT; aTo = (WherePath*)pSpace; aFrom = aTo+mxChoice; memset(aFrom, 0, sizeof(aFrom[0])); pX = (WhereLoop**)(aFrom+mxChoice); @@ -4999,11 +5076,11 @@ nFrom = nTo; } if( nFrom==0 ){ sqlite3ErrorMsg(pParse, "no query solution"); - sqlite3DbFreeNN(db, pSpace); + sqlite3StackFreeNN(pParse->db, pSpace); return SQLITE_ERROR; } /* Find the lowest cost path. pFrom will be left pointing to that path */ pFrom = aFrom; @@ -5081,12 +5158,11 @@ pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ - assert( db!=0 ); - sqlite3DbNNFreeNN(db, pSpace); + sqlite3StackFreeNN(pParse->db, pSpace); return SQLITE_OK; } /* ** Most queries use only a single table (they are not joins) and have @@ -5380,10 +5456,81 @@ } } nSearch += pLoop->nOut; } } + +/* +** This is an sqlite3ParserAddCleanup() callback that is invoked to +** free the Parse->pIdxExpr list when the Parse object is destroyed. +*/ +static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){ + Parse *pParse = (Parse*)pObject; + while( pParse->pIdxExpr!=0 ){ + IndexedExpr *p = pParse->pIdxExpr; + pParse->pIdxExpr = p->pIENext; + sqlite3ExprDelete(db, p->pExpr); + sqlite3DbFreeNN(db, p); + } +} + +/* +** The index pIdx is used by a query and contains one or more expressions. +** In other words pIdx is an index on an expression. iIdxCur is the cursor +** number for the index and iDataCur is the cursor number for the corresponding +** table. +** +** This routine adds IndexedExpr entries to the Parse->pIdxExpr field for +** each of the expressions in the index so that the expression code generator +** will know to replace occurrences of the indexed expression with +** references to the corresponding column of the index. +*/ +static SQLITE_NOINLINE void whereAddIndexedExpr( + Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxExpr */ + Index *pIdx, /* The index-on-expression that contains the expressions */ + int iIdxCur, /* Cursor number for pIdx */ + SrcItem *pTabItem /* The FROM clause entry for the table */ +){ + int i; + IndexedExpr *p; + Table *pTab; + assert( pIdx->bHasExpr ); + pTab = pIdx->pTable; + for(i=0; inColumn; i++){ + Expr *pExpr; + int j = pIdx->aiColumn[i]; + int bMaybeNullRow; + if( j==XN_EXPR ){ + pExpr = pIdx->aColExpr->a[i].pExpr; + testcase( pTabItem->fg.jointype & JT_LEFT ); + testcase( pTabItem->fg.jointype & JT_RIGHT ); + testcase( pTabItem->fg.jointype & JT_LTORJ ); + bMaybeNullRow = (pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0; + }else if( j>=0 && (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)!=0 ){ + pExpr = sqlite3ColumnExpr(pTab, &pTab->aCol[j]); + bMaybeNullRow = 0; + }else{ + continue; + } + if( sqlite3ExprIsConstant(pExpr) ) continue; + p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); + if( p==0 ) break; + p->pIENext = pParse->pIdxExpr; + p->pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); + p->iDataCur = pTabItem->iCursor; + p->iIdxCur = iIdxCur; + p->iIdxCol = i; + p->bMaybeNullRow = bMaybeNullRow; +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + p->zIdxName = pIdx->zName; +#endif + pParse->pIdxExpr = p; + if( p->pIENext==0 ){ + sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse); + } + } +} /* ** Generate the beginning of the loop used for WHERE clause processing. ** The return value is a pointer to an opaque structure that contains ** information needed to terminate the loop. Later, the calling routine @@ -5475,11 +5622,11 @@ Parse *pParse, /* The parser context */ SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ Expr *pWhere, /* The WHERE clause */ ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ ExprList *pResultSet, /* Query result set. Req'd for DISTINCT */ - Select *pLimit, /* Use this LIMIT/OFFSET clause, if any */ + Select *pSelect, /* The entire SELECT statement */ u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */ int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number ** If WHERE_USE_LIMIT, then the limit amount */ ){ int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ @@ -5544,21 +5691,21 @@ goto whereBeginError; } pWInfo->pParse = pParse; pWInfo->pTabList = pTabList; pWInfo->pOrderBy = pOrderBy; +#if WHERETRACE_ENABLED pWInfo->pWhere = pWhere; +#endif pWInfo->pResultSet = pResultSet; pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1; pWInfo->nLevel = nTabList; pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(pParse); pWInfo->wctrlFlags = wctrlFlags; pWInfo->iLimit = iAuxArg; pWInfo->savedNQueryLoop = pParse->nQueryLoop; -#ifndef SQLITE_OMIT_VIRTUALTABLE - pWInfo->pLimit = pLimit; -#endif + pWInfo->pSelect = pSelect; memset(&pWInfo->nOBSat, 0, offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat)); memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel)); assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */ pMaskSet = &pWInfo->sMaskSet; @@ -5623,11 +5770,13 @@ #endif } /* Analyze all of the subexpressions. */ sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC); - sqlite3WhereAddLimit(&pWInfo->sWC, pLimit); + if( pSelect && pSelect->pLimit ){ + sqlite3WhereAddLimit(&pWInfo->sWC, pSelect); + } if( pParse->nErr ) goto whereBeginError; /* Special case: WHERE terms that do not refer to any tables in the join ** (constant expressions). Evaluate each such term, and jump over all the ** generated code if the result is not true. @@ -5926,10 +6075,13 @@ }else if( iAuxArg && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ){ iIndexCur = iAuxArg; op = OP_ReopenIdx; }else{ iIndexCur = pParse->nTab++; + if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){ + whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem); + } } pLevel->iIdxCur = iIndexCur; assert( pIx!=0 ); assert( pIx->pSchema==pTab->pSchema ); assert( iIndexCur>=0 ); @@ -6048,12 +6200,10 @@ return pWInfo; /* Jump here if malloc fails */ whereBeginError: if( pWInfo ){ - testcase( pWInfo->pExprMods!=0 ); - whereUndoExprMods(pWInfo); pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } return 0; } @@ -6268,11 +6418,10 @@ VdbeModuleComment((v, "End WHERE-loop%d: %s", i, pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); } assert( pWInfo->nLevel<=pTabList->nSrc ); - if( pWInfo->pExprMods ) whereUndoExprMods(pWInfo); for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ int k, last; VdbeOp *pOp, *pLastOp; Index *pIdx = 0; SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; @@ -6321,10 +6470,20 @@ ){ if( pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable) ){ last = iEnd; }else{ last = pWInfo->iEndWhere; + } + if( pIdx->bHasExpr ){ + IndexedExpr *p = pParse->pIdxExpr; + while( p ){ + if( p->iIdxCur==pLevel->iIdxCur ){ + p->iDataCur = -1; + p->iIdxCur = -1; + } + p = p->pIENext; + } } k = pLevel->addrBody + 1; #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeAddopTrace ){ printf("TRANSLATE opcodes in range %d..%d\n", k, last-1); Index: src/whereInt.h ================================================================== --- src/whereInt.h +++ src/whereInt.h @@ -376,11 +376,11 @@ /* ** An instance of the following structure keeps track of a mapping ** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. ** ** The VDBE cursor numbers are small integers contained in -** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE +** SrcItem.iCursor and Expr.iTable fields. For any given WHERE ** clause, the cursor numbers might not begin with 0 and they might ** contain gaps in the numbering sequence. But we want to make maximum ** use of the bits in our bitmasks. This structure provides a mapping ** from the sparse cursor numbers into consecutive integers beginning ** with 0. @@ -447,24 +447,10 @@ #endif #ifndef SQLITE_QUERY_PLANNER_LIMIT_INCR # define SQLITE_QUERY_PLANNER_LIMIT_INCR 1000 #endif -/* -** Each instance of this object records a change to a single node -** in an expression tree to cause that node to point to a column -** of an index rather than an expression or a virtual column. All -** such transformations need to be undone at the end of WHERE clause -** processing. -*/ -typedef struct WhereExprMod WhereExprMod; -struct WhereExprMod { - WhereExprMod *pNext; /* Next translation on a list of them all */ - Expr *pExpr; /* The Expr node that was transformed */ - Expr orig; /* Original value of the Expr node */ -}; - /* ** The WHERE clause processing routine has two halves. The ** first part does the start of the WHERE loop and the second ** half does the tail of the WHERE loop. An instance of ** this structure is returned by the first half and passed @@ -476,14 +462,14 @@ struct WhereInfo { Parse *pParse; /* Parsing and code generating context */ SrcList *pTabList; /* List of tables in the join */ ExprList *pOrderBy; /* The ORDER BY clause or NULL */ ExprList *pResultSet; /* Result set of the query */ +#if WHERETRACE_ENABLED Expr *pWhere; /* The complete WHERE clause */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - Select *pLimit; /* Used to access LIMIT expr/registers for vtabs */ #endif + Select *pSelect; /* The entire SELECT statement containing WHERE */ int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */ @@ -498,11 +484,10 @@ unsigned sorted :1; /* True if really sorted (not just grouped) */ LogEst nRowOut; /* Estimated number of output rows */ int iTop; /* The very beginning of the WHERE loop */ int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ - WhereExprMod *pExprMods; /* Expression modifications */ WhereMemBlock *pMemToFree;/* Memory to free when this object destroyed */ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ WhereClause sWC; /* Decomposition of the WHERE clause */ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ WhereLevel a[1]; /* Information about each nest loop in WHERE */ Index: src/wherecode.c ================================================================== --- src/wherecode.c +++ src/wherecode.c @@ -1215,147 +1215,10 @@ assert( nReg==1 || pParse->nErr ); sqlite3ExprCode(pParse, p, iReg); } } -/* An instance of the IdxExprTrans object carries information about a -** mapping from an expression on table columns into a column in an index -** down through the Walker. -*/ -typedef struct IdxExprTrans { - Expr *pIdxExpr; /* The index expression */ - int iTabCur; /* The cursor of the corresponding table */ - int iIdxCur; /* The cursor for the index */ - int iIdxCol; /* The column for the index */ - int iTabCol; /* The column for the table */ - WhereInfo *pWInfo; /* Complete WHERE clause information */ - sqlite3 *db; /* Database connection (for malloc()) */ -} IdxExprTrans; - -/* -** Preserve pExpr on the WhereETrans list of the WhereInfo. -*/ -static void preserveExpr(IdxExprTrans *pTrans, Expr *pExpr){ - WhereExprMod *pNew; - pNew = sqlite3DbMallocRaw(pTrans->db, sizeof(*pNew)); - if( pNew==0 ) return; - pNew->pNext = pTrans->pWInfo->pExprMods; - pTrans->pWInfo->pExprMods = pNew; - pNew->pExpr = pExpr; - memcpy(&pNew->orig, pExpr, sizeof(*pExpr)); -} - -/* The walker node callback used to transform matching expressions into -** a reference to an index column for an index on an expression. -** -** If pExpr matches, then transform it into a reference to the index column -** that contains the value of pExpr. -*/ -static int whereIndexExprTransNode(Walker *p, Expr *pExpr){ - IdxExprTrans *pX = p->u.pIdxTrans; - if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){ - pExpr = sqlite3ExprSkipCollate(pExpr); - preserveExpr(pX, pExpr); - pExpr->affExpr = sqlite3ExprAffinity(pExpr); - pExpr->op = TK_COLUMN; - pExpr->iTable = pX->iIdxCur; - pExpr->iColumn = pX->iIdxCol; - testcase( ExprHasProperty(pExpr, EP_Unlikely) ); - ExprClearProperty(pExpr, EP_Skip|EP_Unlikely|EP_WinFunc|EP_Subrtn); - pExpr->y.pTab = 0; - return WRC_Prune; - }else{ - return WRC_Continue; - } -} - -#ifndef SQLITE_OMIT_GENERATED_COLUMNS -/* A walker node callback that translates a column reference to a table -** into a corresponding column reference of an index. -*/ -static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){ - if( pExpr->op==TK_COLUMN ){ - IdxExprTrans *pX = p->u.pIdxTrans; - if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){ - assert( ExprUseYTab(pExpr) && pExpr->y.pTab!=0 ); - preserveExpr(pX, pExpr); - pExpr->affExpr = sqlite3TableColumnAffinity(pExpr->y.pTab,pExpr->iColumn); - pExpr->iTable = pX->iIdxCur; - pExpr->iColumn = pX->iIdxCol; - pExpr->y.pTab = 0; - } - } - return WRC_Continue; -} -#endif /* SQLITE_OMIT_GENERATED_COLUMNS */ - -/* -** For an indexes on expression X, locate every instance of expression X -** in pExpr and change that subexpression into a reference to the appropriate -** column of the index. -** -** 2019-10-24: Updated to also translate references to a VIRTUAL column in -** the table into references to the corresponding (stored) column of the -** index. -*/ -static void whereIndexExprTrans( - Index *pIdx, /* The Index */ - int iTabCur, /* Cursor of the table that is being indexed */ - int iIdxCur, /* Cursor of the index itself */ - WhereInfo *pWInfo /* Transform expressions in this WHERE clause */ -){ - int iIdxCol; /* Column number of the index */ - ExprList *aColExpr; /* Expressions that are indexed */ - Table *pTab; - Walker w; - IdxExprTrans x; - aColExpr = pIdx->aColExpr; - if( aColExpr==0 && !pIdx->bHasVCol ){ - /* The index does not reference any expressions or virtual columns - ** so no translations are needed. */ - return; - } - pTab = pIdx->pTable; - memset(&w, 0, sizeof(w)); - w.u.pIdxTrans = &x; - x.iTabCur = iTabCur; - x.iIdxCur = iIdxCur; - x.pWInfo = pWInfo; - x.db = pWInfo->pParse->db; - for(iIdxCol=0; iIdxColnColumn; iIdxCol++){ - i16 iRef = pIdx->aiColumn[iIdxCol]; - if( iRef==XN_EXPR ){ - assert( aColExpr!=0 && aColExpr->a[iIdxCol].pExpr!=0 ); - x.pIdxExpr = aColExpr->a[iIdxCol].pExpr; - if( sqlite3ExprIsConstant(x.pIdxExpr) ) continue; - w.xExprCallback = whereIndexExprTransNode; -#ifndef SQLITE_OMIT_GENERATED_COLUMNS - }else if( iRef>=0 - && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0 - && ((pTab->aCol[iRef].colFlags & COLFLAG_HASCOLL)==0 - || sqlite3StrICmp(sqlite3ColumnColl(&pTab->aCol[iRef]), - sqlite3StrBINARY)==0) - ){ - /* Check to see if there are direct references to generated columns - ** that are contained in the index. Pulling the generated column - ** out of the index is an optimization only - the main table is always - ** available if the index cannot be used. To avoid unnecessary - ** complication, omit this optimization if the collating sequence for - ** the column is non-standard */ - x.iTabCol = iRef; - w.xExprCallback = whereIndexExprTransColumn; -#endif /* SQLITE_OMIT_GENERATED_COLUMNS */ - }else{ - continue; - } - x.iIdxCol = iIdxCol; - sqlite3WalkExpr(&w, pWInfo->pWhere); - sqlite3WalkExprList(&w, pWInfo->pOrderBy); - sqlite3WalkExprList(&w, pWInfo->pResultSet); - } -} - /* ** The pTruth expression is always true because it is the WHERE clause ** a partial index that is driving a query loop. Look through all of the ** WHERE clause terms on the query, and if any of those terms must be ** true because pTruth is true, then mark those WHERE clause terms as @@ -1420,10 +1283,12 @@ assert( pTerm!=0 ); assert( pTerm->pExpr!=0 ); testcase( pTerm->wtFlags & TERM_VIRTUAL ); regRowid = sqlite3GetTempReg(pParse); regRowid = codeEqualityTerm(pParse, pTerm, pLevel, 0, 0, regRowid); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_MustBeInt, regRowid, addrNxt); + VdbeCoverage(pParse->pVdbe); sqlite3VdbeAddOp4Int(pParse->pVdbe, OP_Filter, pLevel->regFilter, addrNxt, regRowid, 1); VdbeCoverage(pParse->pVdbe); }else{ u16 nEq = pLoop->u.btree.nEq; @@ -1571,13 +1436,13 @@ codeExprOrVector(pParse, pRight, iTarget, 1); if( pTerm->eMatchOp==SQLITE_INDEX_CONSTRAINT_OFFSET && pLoop->u.vtab.bOmitOffset ){ assert( pTerm->eOperator==WO_AUX ); - assert( pWInfo->pLimit!=0 ); - assert( pWInfo->pLimit->iOffset>0 ); - sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pLimit->iOffset); + assert( pWInfo->pSelect!=0 ); + assert( pWInfo->pSelect->iOffset>0 ); + sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pSelect->iOffset); VdbeComment((v,"Zero OFFSET counter")); } } } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); @@ -1681,10 +1546,12 @@ iReleaseReg = ++pParse->nMem; iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); addrNxt = pLevel->addrNxt; if( pLevel->regFilter ){ + sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); + VdbeCoverage(v); sqlite3VdbeAddOp4Int(v, OP_Filter, pLevel->regFilter, addrNxt, iRowidReg, 1); VdbeCoverage(v); filterPullDown(pParse, pWInfo, iLevel, addrNxt, notReady); } @@ -2032,10 +1899,15 @@ ** of entries in the tree, so basing the number of steps to try ** on the estimated number of rows in the btree seems like a good ** guess. */ addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10); + if( pRangeStart ){ + sqlite3VdbeChangeP5(v, 1); + sqlite3VdbeChangeP2(v, addrSeekScan, sqlite3VdbeCurrentAddr(v)+1); + addrSeekScan = 0; + } VdbeCoverage(v); } sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); VdbeCoverage(v); VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind ); @@ -2170,31 +2042,10 @@ sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont, iRowidReg, pPk->nKeyCol); VdbeCoverage(v); } if( pLevel->iLeftJoin==0 ){ - /* If pIdx is an index on one or more expressions, then look through - ** all the expressions in pWInfo and try to transform matching expressions - ** into reference to index columns. Also attempt to translate references - ** to virtual columns in the table into references to (stored) columns - ** of the index. - ** - ** Do not do this for the RHS of a LEFT JOIN. This is because the - ** expression may be evaluated after OP_NullRow has been executed on - ** the cursor. In this case it is important to do the full evaluation, - ** as the result of the expression may not be NULL, even if all table - ** column values are. https://www.sqlite.org/src/info/7fa8049685b50b5a - ** - ** Also, do not do this when processing one index an a multi-index - ** OR clause, since the transformation will become invalid once we - ** move forward to the next index. - ** https://sqlite.org/src/info/4e8e4857d32d401f - */ - if( (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))==0 ){ - whereIndexExprTrans(pIdx, iCur, iIdxCur, pWInfo); - } - /* If a partial index is driving the loop, try to eliminate WHERE clause ** terms from the query that must be true due to the WHERE clause of ** the partial index. ** ** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work @@ -2303,11 +2154,11 @@ */ if( pWInfo->nLevel>1 ){ int nNotReady; /* The number of notReady tables */ SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3StackAllocRaw(db, + pOrTab = sqlite3DbMallocRawNN(db, sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); pOrTab->nSrc = pOrTab->nAlloc; memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem)); @@ -2556,11 +2407,11 @@ ** indent everything in between the this point and the final OP_Return. ** See tag-20220407a in vdbe.c and shell.c */ assert( pLevel->op==OP_Return ); pLevel->p2 = sqlite3VdbeCurrentAddr(v); - if( pWInfo->nLevel>1 ){ sqlite3StackFree(db, pOrTab); } + if( pWInfo->nLevel>1 ){ sqlite3DbFreeNN(db, pOrTab); } if( !untestedTerms ) disableTerm(pLevel, pTerm); }else #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ { Index: src/whereexpr.c ================================================================== --- src/whereexpr.c +++ src/whereexpr.c @@ -264,11 +264,11 @@ ** 2019-09-03 https://sqlite.org/src/info/0f0428096f17252a */ if( pLeft->op!=TK_COLUMN || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT || (ALWAYS( ExprUseYTab(pLeft) ) - && pLeft->y.pTab + && ALWAYS(pLeft->y.pTab) && IsVirtual(pLeft->y.pTab)) /* Might be numeric */ ){ int isNum; double rDummy; isNum = sqlite3AtoF(zNew, &rDummy, iTo, SQLITE_UTF8); @@ -381,12 +381,11 @@ ** ** vtab_column MATCH expression ** MATCH(expression,vtab_column) */ pCol = pList->a[1].pExpr; - assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); - testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); + assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) ); if( ExprIsVtab(pCol) ){ for(i=0; iu.zToken, aOp[i].zOp)==0 ){ *peOp2 = aOp[i].eOp2; @@ -407,11 +406,11 @@ ** names. But for this use case, xFindFunction is expected to deal ** with function names in an arbitrary case. */ pCol = pList->a[0].pExpr; assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); - testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); + assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) ); if( ExprIsVtab(pCol) ){ sqlite3_vtab *pVtab; sqlite3_module *pMod; void (*xNotUsed)(sqlite3_context*,int,sqlite3_value**); void *pNotUsed; @@ -432,17 +431,16 @@ } }else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){ int res = 0; Expr *pLeft = pExpr->pLeft; Expr *pRight = pExpr->pRight; - assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); - testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 ); + assert( pLeft->op!=TK_COLUMN || (ExprUseYTab(pLeft) && pLeft->y.pTab!=0) ); if( ExprIsVtab(pLeft) ){ res++; } - assert( pRight==0 || pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); - testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 ); + assert( pRight==0 || pRight->op!=TK_COLUMN + || (ExprUseYTab(pRight) && pRight->y.pTab!=0) ); if( pRight && ExprIsVtab(pRight) ){ res++; SWAP(Expr*, pLeft, pRight); } *ppLeft = pLeft; @@ -987,10 +985,11 @@ iCur = pFrom->a[i].iCursor; for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr==0 ) continue; for(i=0; inKeyCol; i++){ if( pIdx->aiColumn[i]!=XN_EXPR ) continue; + assert( pIdx->bHasExpr ); if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){ aiCurCol[0] = iCur; aiCurCol[1] = XN_EXPR; return 1; } @@ -1600,13 +1599,13 @@ ** ** LIMIT and OFFSET terms are ignored by most of the planner code. They ** exist only so that they may be passed to the xBestIndex method of the ** single virtual table in the FROM clause of the SELECT. */ -void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ - assert( p==0 || (p->pGroupBy==0 && (p->selFlags & SF_Aggregate)==0) ); - if( (p && p->pLimit) /* 1 */ +void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ + assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */ + if( p->pGroupBy==0 && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ ){ ExprList *pOrderBy = p->pOrderBy; int iCsr = p->pSrc->a[0].iCursor; DELETED test/bc_test1.c Index: test/bc_test1.c ================================================================== --- test/bc_test1.c +++ /dev/null @@ -1,556 +0,0 @@ -/* -** 2016-05-07 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -*/ - - -#include -#include -#include -#include "tt3_core.c" - -#ifdef USE_OSINST -# include "../src/test_osinst.c" -#else -# define vfslog_time() 0 -#endif - -typedef struct Config Config; -typedef struct ThreadCtx ThreadCtx; - -#define THREAD_TIME_INSERT 0 -#define THREAD_TIME_COMMIT 1 -#define THREAD_TIME_ROLLBACK 2 -#define THREAD_TIME_WRITER 3 -#define THREAD_TIME_CKPT 4 - -struct ThreadCtx { - Config *pConfig; - Sqlite *pDb; - Error *pErr; - sqlite3_int64 aTime[5]; -}; - -struct Config { - int nIPT; /* --inserts-per-transaction */ - int nThread; /* --threads */ - int nSecond; /* --seconds */ - int bMutex; /* --mutex */ - int nAutoCkpt; /* --autockpt */ - int bRm; /* --rm */ - int bClearCache; /* --clear-cache */ - int nMmap; /* mmap limit in MB */ - char *zFile; - int bOsinst; /* True to use osinst */ - - ThreadCtx *aCtx; /* Array of size nThread */ - - pthread_cond_t cond; - pthread_mutex_t mutex; - int nCondWait; /* Number of threads waiting on hCond */ - sqlite3_vfs *pVfs; -}; - - -typedef struct VfsWrapperFd VfsWrapperFd; -struct VfsWrapperFd { - sqlite3_file base; /* Base class */ - int bWriter; /* True if holding shm WRITER lock */ - int iTid; - Config *pConfig; - sqlite3_file *pFd; /* Underlying file descriptor */ -}; - -/* Methods of the wrapper VFS */ -static int vfsWrapOpen(sqlite3_vfs*, const char*, sqlite3_file*, int, int*); -static int vfsWrapDelete(sqlite3_vfs*, const char*, int); -static int vfsWrapAccess(sqlite3_vfs*, const char*, int, int*); -static int vfsWrapFullPathname(sqlite3_vfs*, const char *, int, char*); -static void *vfsWrapDlOpen(sqlite3_vfs*, const char*); -static void vfsWrapDlError(sqlite3_vfs*, int, char*); -static void (*vfsWrapDlSym(sqlite3_vfs*,void*, const char*))(void); -static void vfsWrapDlClose(sqlite3_vfs*, void*); -static int vfsWrapRandomness(sqlite3_vfs*, int, char*); -static int vfsWrapSleep(sqlite3_vfs*, int); -static int vfsWrapCurrentTime(sqlite3_vfs*, double*); -static int vfsWrapGetLastError(sqlite3_vfs*, int, char*); -static int vfsWrapCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); -static int vfsWrapSetSystemCall(sqlite3_vfs*, const char*, sqlite3_syscall_ptr); -static sqlite3_syscall_ptr vfsWrapGetSystemCall(sqlite3_vfs*, const char*); -static const char *vfsWrapNextSystemCall(sqlite3_vfs*, const char*); - -/* Methods of wrapper sqlite3_io_methods object (see vfsWrapOpen()) */ -static int vfsWrapClose(sqlite3_file*); -static int vfsWrapRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); -static int vfsWrapWrite(sqlite3_file*, const void*, int iAmt, sqlite3_int64); -static int vfsWrapTruncate(sqlite3_file*, sqlite3_int64 size); -static int vfsWrapSync(sqlite3_file*, int flags); -static int vfsWrapFileSize(sqlite3_file*, sqlite3_int64 *pSize); -static int vfsWrapLock(sqlite3_file*, int); -static int vfsWrapUnlock(sqlite3_file*, int); -static int vfsWrapCheckReservedLock(sqlite3_file*, int *pResOut); -static int vfsWrapFileControl(sqlite3_file*, int op, void *pArg); -static int vfsWrapSectorSize(sqlite3_file*); -static int vfsWrapDeviceCharacteristics(sqlite3_file*); -static int vfsWrapShmMap(sqlite3_file*, int iPg, int, int, void volatile**); -static int vfsWrapShmLock(sqlite3_file*, int offset, int n, int flags); -static void vfsWrapShmBarrier(sqlite3_file*); -static int vfsWrapShmUnmap(sqlite3_file*, int deleteFlag); -static int vfsWrapFetch(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **); -static int vfsWrapUnfetch(sqlite3_file*, sqlite3_int64 iOfst, void *p); - -static int vfsWrapOpen( - sqlite3_vfs *pVfs, - const char *zName, - sqlite3_file *pFd, - int flags, - int *fout -){ - static sqlite3_io_methods methods = { - 3, - vfsWrapClose, vfsWrapRead, vfsWrapWrite, - vfsWrapTruncate, vfsWrapSync, vfsWrapFileSize, - vfsWrapLock, vfsWrapUnlock, vfsWrapCheckReservedLock, - vfsWrapFileControl, vfsWrapSectorSize, vfsWrapDeviceCharacteristics, - vfsWrapShmMap, vfsWrapShmLock, vfsWrapShmBarrier, - vfsWrapShmUnmap, vfsWrapFetch, vfsWrapUnfetch - }; - - Config *pConfig = (Config*)pVfs->pAppData; - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - int rc; - - memset(pWrapper, 0, sizeof(VfsWrapperFd)); - if( flags & SQLITE_OPEN_MAIN_DB ){ - pWrapper->iTid = (int)sqlite3_uri_int64(zName, "tid", 0); - } - - pWrapper->pFd = (sqlite3_file*)&pWrapper[1]; - pWrapper->pConfig = pConfig; - rc = pConfig->pVfs->xOpen(pConfig->pVfs, zName, pWrapper->pFd, flags, fout); - if( rc==SQLITE_OK ){ - pWrapper->base.pMethods = &methods; - } - return rc; -} - -static int vfsWrapDelete(sqlite3_vfs *pVfs, const char *a, int b){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xDelete(pConfig->pVfs, a, b); -} -static int vfsWrapAccess(sqlite3_vfs *pVfs, const char *a, int b, int *c){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xAccess(pConfig->pVfs, a, b, c); -} -static int vfsWrapFullPathname(sqlite3_vfs *pVfs, const char *a, int b, char*c){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xFullPathname(pConfig->pVfs, a, b, c); -} -static void *vfsWrapDlOpen(sqlite3_vfs *pVfs, const char *a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xDlOpen(pConfig->pVfs, a); -} -static void vfsWrapDlError(sqlite3_vfs *pVfs, int a, char *b){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xDlError(pConfig->pVfs, a, b); -} -static void (*vfsWrapDlSym(sqlite3_vfs *pVfs, void *a, const char *b))(void){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xDlSym(pConfig->pVfs, a, b); -} -static void vfsWrapDlClose(sqlite3_vfs *pVfs, void *a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xDlClose(pConfig->pVfs, a); -} -static int vfsWrapRandomness(sqlite3_vfs *pVfs, int a, char *b){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xRandomness(pConfig->pVfs, a, b); -} -static int vfsWrapSleep(sqlite3_vfs *pVfs, int a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xSleep(pConfig->pVfs, a); -} -static int vfsWrapCurrentTime(sqlite3_vfs *pVfs, double *a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xCurrentTime(pConfig->pVfs, a); -} -static int vfsWrapGetLastError(sqlite3_vfs *pVfs, int a, char *b){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xGetLastError(pConfig->pVfs, a, b); -} -static int vfsWrapCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xCurrentTimeInt64(pConfig->pVfs, a); -} -static int vfsWrapSetSystemCall( - sqlite3_vfs *pVfs, - const char *a, - sqlite3_syscall_ptr b -){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xSetSystemCall(pConfig->pVfs, a, b); -} -static sqlite3_syscall_ptr vfsWrapGetSystemCall( - sqlite3_vfs *pVfs, - const char *a -){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xGetSystemCall(pConfig->pVfs, a); -} -static const char *vfsWrapNextSystemCall(sqlite3_vfs *pVfs, const char *a){ - Config *pConfig = (Config*)pVfs->pAppData; - return pConfig->pVfs->xNextSystemCall(pConfig->pVfs, a); -} - -static int vfsWrapClose(sqlite3_file *pFd){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - pWrapper->pFd->pMethods->xClose(pWrapper->pFd); - pWrapper->pFd = 0; - return SQLITE_OK; -} -static int vfsWrapRead(sqlite3_file *pFd, void *a, int b, sqlite3_int64 c){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xRead(pWrapper->pFd, a, b, c); -} -static int vfsWrapWrite( - sqlite3_file *pFd, - const void *a, int b, - sqlite3_int64 c -){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xWrite(pWrapper->pFd, a, b, c); -} -static int vfsWrapTruncate(sqlite3_file *pFd, sqlite3_int64 a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xTruncate(pWrapper->pFd, a); -} -static int vfsWrapSync(sqlite3_file *pFd, int a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xSync(pWrapper->pFd, a); -} -static int vfsWrapFileSize(sqlite3_file *pFd, sqlite3_int64 *a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xFileSize(pWrapper->pFd, a); -} -static int vfsWrapLock(sqlite3_file *pFd, int a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xLock(pWrapper->pFd, a); -} -static int vfsWrapUnlock(sqlite3_file *pFd, int a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xUnlock(pWrapper->pFd, a); -} -static int vfsWrapCheckReservedLock(sqlite3_file *pFd, int *a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xCheckReservedLock(pWrapper->pFd, a); -} -static int vfsWrapFileControl(sqlite3_file *pFd, int a, void *b){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xFileControl(pWrapper->pFd, a, b); -} -static int vfsWrapSectorSize(sqlite3_file *pFd){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xSectorSize(pWrapper->pFd); -} -static int vfsWrapDeviceCharacteristics(sqlite3_file *pFd){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xDeviceCharacteristics(pWrapper->pFd); -} -static int vfsWrapShmMap( - sqlite3_file *pFd, - int a, int b, int c, - void volatile **d -){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xShmMap(pWrapper->pFd, a, b, c, d); -} -static int vfsWrapShmLock(sqlite3_file *pFd, int offset, int n, int flags){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - Config *pConfig = pWrapper->pConfig; - int bMutex = 0; - int rc; - - if( (offset==0 && n==1) - && (flags & SQLITE_SHM_LOCK) && (flags & SQLITE_SHM_EXCLUSIVE) - ){ - pthread_mutex_lock(&pConfig->mutex); - pWrapper->bWriter = 1; - bMutex = 1; - if( pWrapper->iTid ){ - sqlite3_int64 t = vfslog_time(); - pConfig->aCtx[pWrapper->iTid-1].aTime[THREAD_TIME_WRITER] -= t; - } - } - - rc = pWrapper->pFd->pMethods->xShmLock(pWrapper->pFd, offset, n, flags); - - if( (rc!=SQLITE_OK && bMutex) - || (offset==0 && (flags & SQLITE_SHM_UNLOCK) && pWrapper->bWriter) - ){ - assert( pWrapper->bWriter ); - pthread_mutex_unlock(&pConfig->mutex); - pWrapper->bWriter = 0; - if( pWrapper->iTid ){ - sqlite3_int64 t = vfslog_time(); - pConfig->aCtx[pWrapper->iTid-1].aTime[THREAD_TIME_WRITER] += t; - } - } - - return rc; -} -static void vfsWrapShmBarrier(sqlite3_file *pFd){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xShmBarrier(pWrapper->pFd); -} -static int vfsWrapShmUnmap(sqlite3_file *pFd, int a){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xShmUnmap(pWrapper->pFd, a); -} -static int vfsWrapFetch(sqlite3_file *pFd, sqlite3_int64 a, int b, void **c){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xFetch(pWrapper->pFd, a, b, c); -} -static int vfsWrapUnfetch(sqlite3_file *pFd, sqlite3_int64 a, void *b){ - VfsWrapperFd *pWrapper = (VfsWrapperFd*)pFd; - return pWrapper->pFd->pMethods->xUnfetch(pWrapper->pFd, a, b); -} - -static void create_vfs(Config *pConfig){ - static sqlite3_vfs vfs = { - 3, 0, 0, 0, "wrapper", 0, - vfsWrapOpen, vfsWrapDelete, vfsWrapAccess, - vfsWrapFullPathname, vfsWrapDlOpen, vfsWrapDlError, - vfsWrapDlSym, vfsWrapDlClose, vfsWrapRandomness, - vfsWrapSleep, vfsWrapCurrentTime, vfsWrapGetLastError, - vfsWrapCurrentTimeInt64, vfsWrapSetSystemCall, vfsWrapGetSystemCall, - vfsWrapNextSystemCall - }; - sqlite3_vfs *pVfs; - - pVfs = sqlite3_vfs_find(0); - vfs.mxPathname = pVfs->mxPathname; - vfs.szOsFile = pVfs->szOsFile + sizeof(VfsWrapperFd); - vfs.pAppData = (void*)pConfig; - pConfig->pVfs = pVfs; - - sqlite3_vfs_register(&vfs, 1); -} - - -/* -** Wal hook used by connections in thread_main(). -*/ -static int thread_wal_hook( - void *pArg, /* Pointer to ThreadCtx object */ - sqlite3 *db, - const char *zDb, - int nFrame -){ - ThreadCtx *pCtx = (ThreadCtx*)pArg; - Config *pConfig = pCtx->pConfig; - - if( pConfig->nAutoCkpt && nFrame>=pConfig->nAutoCkpt ){ - pCtx->aTime[THREAD_TIME_CKPT] -= vfslog_time(); - pthread_mutex_lock(&pConfig->mutex); - if( pConfig->nCondWait>=0 ){ - pConfig->nCondWait++; - if( pConfig->nCondWait==pConfig->nThread ){ - execsql(pCtx->pErr, pCtx->pDb, "PRAGMA wal_checkpoint"); - pthread_cond_broadcast(&pConfig->cond); - }else{ - pthread_cond_wait(&pConfig->cond, &pConfig->mutex); - } - pConfig->nCondWait--; - } - pthread_mutex_unlock(&pConfig->mutex); - pCtx->aTime[THREAD_TIME_CKPT] += vfslog_time(); - } - - return SQLITE_OK; -} - - -static char *thread_main(int iTid, void *pArg){ - Config *pConfig = (Config*)pArg; - Error err = {0}; /* Error code and message */ - Sqlite db = {0}; /* SQLite database connection */ - int nAttempt = 0; /* Attempted transactions */ - int nCommit = 0; /* Successful transactions */ - int j; - ThreadCtx *pCtx = &pConfig->aCtx[iTid-1]; - char *zUri = 0; - -#ifdef USE_OSINST - char *zOsinstName = 0; - char *zLogName = 0; - if( pConfig->bOsinst ){ - zOsinstName = sqlite3_mprintf("osinst%d", iTid); - zLogName = sqlite3_mprintf("bc_test1.log.%d.%d", (int)getpid(), iTid); - zUri = sqlite3_mprintf( - "file:%s?vfs=%s&tid=%d", pConfig->zFile, zOsinstName, iTid - ); - sqlite3_vfslog_new(zOsinstName, 0, zLogName); - opendb(&err, &db, zUri, 0); - }else -#endif - { - zUri = sqlite3_mprintf("file:%s?tid=%d", pConfig->zFile, iTid); - opendb(&err, &db, zUri, 0); - } - - sqlite3_busy_handler(db.db, 0, 0); - sql_script_printf(&err, &db, - "PRAGMA wal_autocheckpoint = 0;" - "PRAGMA synchronous = 0;" - "PRAGMA mmap_size = %lld;", - (i64)(pConfig->nMmap) * 1024 * 1024 - ); - - pCtx->pConfig = pConfig; - pCtx->pErr = &err; - pCtx->pDb = &db; - sqlite3_wal_hook(db.db, thread_wal_hook, (void*)pCtx); - - while( !timetostop(&err) ){ - execsql(&err, &db, "BEGIN CONCURRENT"); - - pCtx->aTime[THREAD_TIME_INSERT] -= vfslog_time(); - for(j=0; jnIPT; j++){ - execsql(&err, &db, - "INSERT INTO t1 VALUES" - "(randomblob(10), randomblob(20), randomblob(30), randomblob(200))" - ); - } - pCtx->aTime[THREAD_TIME_INSERT] += vfslog_time(); - - pCtx->aTime[THREAD_TIME_COMMIT] -= vfslog_time(); - execsql(&err, &db, "COMMIT"); - pCtx->aTime[THREAD_TIME_COMMIT] += vfslog_time(); - - pCtx->aTime[THREAD_TIME_ROLLBACK] -= vfslog_time(); - nAttempt++; - if( err.rc==SQLITE_OK ){ - nCommit++; - }else{ - clear_error(&err, SQLITE_BUSY); - execsql(&err, &db, "ROLLBACK"); - } - pCtx->aTime[THREAD_TIME_ROLLBACK] += vfslog_time(); - - if( pConfig->bClearCache ){ - sqlite3_db_release_memory(db.db); - } - } - - closedb(&err, &db); - -#ifdef USE_OSINST - if( pConfig->bOsinst ){ - sqlite3_vfslog_finalize(zOsinstName); - sqlite3_free(zOsinstName); - sqlite3_free(zLogName); - } -#endif - sqlite3_free(zUri); - - pthread_mutex_lock(&pConfig->mutex); - pConfig->nCondWait = -1; - pthread_cond_broadcast(&pConfig->cond); - pthread_mutex_unlock(&pConfig->mutex); - - return sqlite3_mprintf("commits: %d/%d insert: %dms" - " commit: %dms" - " rollback: %dms" - " writer: %dms" - " checkpoint: %dms", - nCommit, nAttempt, - (int)(pCtx->aTime[THREAD_TIME_INSERT]/1000), - (int)(pCtx->aTime[THREAD_TIME_COMMIT]/1000), - (int)(pCtx->aTime[THREAD_TIME_ROLLBACK]/1000), - (int)(pCtx->aTime[THREAD_TIME_WRITER]/1000), - (int)(pCtx->aTime[THREAD_TIME_CKPT]/1000) - ); -} - -int main(int argc, const char **argv){ - Error err = {0}; /* Error code and message */ - Sqlite db = {0}; /* SQLite database connection */ - Threadset threads = {0}; /* Test threads */ - Config conf = {5, 3, 5}; - int i; - - CmdlineArg apArg[] = { - { "-seconds", CMDLINE_INT, offsetof(Config, nSecond) }, - { "-inserts", CMDLINE_INT, offsetof(Config, nIPT) }, - { "-threads", CMDLINE_INT, offsetof(Config, nThread) }, - { "-mutex", CMDLINE_BOOL, offsetof(Config, bMutex) }, - { "-rm", CMDLINE_BOOL, offsetof(Config, bRm) }, - { "-autockpt",CMDLINE_INT, offsetof(Config, nAutoCkpt) }, - { "-mmap", CMDLINE_INT, offsetof(Config, nMmap) }, - { "-clear-cache", CMDLINE_BOOL, offsetof(Config, bClearCache) }, - { "-file", CMDLINE_STRING, offsetof(Config, zFile) }, - { "-osinst", CMDLINE_BOOL, offsetof(Config, bOsinst) }, - { 0, 0, 0 } - }; - - conf.nAutoCkpt = 1000; - cmdline_process(apArg, argc, argv, (void*)&conf); - if( err.rc==SQLITE_OK ){ - char *z = cmdline_construct(apArg, (void*)&conf); - printf("With: %s\n", z); - sqlite3_free(z); - } - if( conf.zFile==0 ){ - conf.zFile = "xyz.db"; - } - - /* Create the special VFS - "wrapper". And the mutex and condition - ** variable. */ - create_vfs(&conf); - pthread_mutex_init(&conf.mutex, 0); - pthread_cond_init(&conf.cond, 0); - - conf.aCtx = sqlite3_malloc(sizeof(ThreadCtx) * conf.nThread); - memset(conf.aCtx, 0, sizeof(ThreadCtx) * conf.nThread); - - /* Ensure the schema has been created */ - opendb(&err, &db, conf.zFile, conf.bRm); - sql_script(&err, &db, - "PRAGMA journal_mode = wal;" - "CREATE TABLE IF NOT EXISTS t1(a PRIMARY KEY, b, c, d) WITHOUT ROWID;" - "CREATE INDEX IF NOT EXISTS t1b ON t1(b);" - "CREATE INDEX IF NOT EXISTS t1c ON t1(c);" - ); - - setstoptime(&err, conf.nSecond*1000); - if( conf.nThread==1 ){ - char *z = thread_main(1, (void*)&conf); - printf("Thread 0 says: %s\n", (z==0 ? "..." : z)); - fflush(stdout); - }else{ - for(i=0; i = ?) expression must be coerced +# to an integer before the comparison made. +# +do_execsql_test 1.0 { + CREATE TABLE t1(a, b); + CREATE TABLE t2(c INTEGER PRIMARY KEY, d); +} + +do_execsql_test 1.1 { + INSERT INTO t1 VALUES('hello', 'world'); + INSERT INTO t2 VALUES(14, 'fourteen'); +} + +do_execsql_test 1.2 { + ANALYZE sqlite_schema; + INSERT INTO sqlite_stat1 VALUES('t2','idx1','6 6'); + ANALYZE sqlite_schema; +} + +do_execsql_test 1.3 { + SELECT 'affinity!' FROM t1 CROSS JOIN t2 WHERE t2.c = '14'; +} {affinity!} + + +reset_db +do_execsql_test 1.4 { + CREATE TABLE t1(a, b TEXT); + CREATE TABLE t2(c INTEGER PRIMARY KEY, d); + CREATE TABLE t3(e INTEGER PRIMARY KEY, f); + + ANALYZE sqlite_schema; + INSERT INTO sqlite_stat1 VALUES('t1','idx1','600 6'); + INSERT INTO sqlite_stat1 VALUES('t2','idx1','6 6'); + INSERT INTO sqlite_stat1 VALUES('t3','idx2','6 6'); + ANALYZE sqlite_schema; + + INSERT INTO t1 VALUES(1, '123'); + INSERT INTO t2 VALUES(123, 'one'); + INSERT INTO t3 VALUES(123, 'two'); +} + +do_execsql_test 1.5 { + SELECT 'result' FROM t1, t2, t3 + WHERE t2.c=t1.b AND t2.d!='silly' + AND t3.e=t1.b AND t3.f!='silly' +} {result} + +finish_test + Index: test/cast.test ================================================================== --- test/cast.test +++ test/cast.test @@ -479,9 +479,9 @@ CREATE TABLE t0(c0); INSERT INTO t0(c0) VALUES (0); CREATE VIEW v1(c0, c1) AS SELECT CAST(0.0 AS NUMERIC), COUNT(*) OVER () FROM t0; SELECT v1.c0 FROM v1, t0 WHERE v1.c0=0; -} {0.0} +} {0} finish_test Index: test/collate5.test ================================================================== --- test/collate5.test +++ test/collate5.test @@ -17,17 +17,20 @@ # $Id: collate5.test,v 1.7 2008/09/16 11:58:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +set testprefix collate5 + # # Tests are organised as follows: # collate5-1.* - DISTINCT # collate5-2.* - Compound SELECT # collate5-3.* - ORDER BY on compound SELECT # collate5-4.* - GROUP BY +# collate5-5.* - Collation sequence cases # Create the collation sequence 'TEXT', purely for asthetic reasons. The # test cases in this script could just as easily use BINARY. db collate TEXT [list string compare] @@ -286,7 +289,40 @@ do_test collate5-4.3 { execsql { DROP TABLE collate5t1; } } {} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 5.0 { + CREATE TABLE t1(a, b COLLATE nocase); + CREATE TABLE t2(c, d); + INSERT INTO t2 VALUES(1, 'bbb'); +} +do_execsql_test 5.1 { + SELECT * FROM ( + SELECT a, b FROM t1 UNION ALL SELECT c, d FROM t2 + ) WHERE b='BbB'; +} {1 bbb} + +reset_db +do_execsql_test 5.2 { + CREATE TABLE t1(a,b,c COLLATE NOCASE); + INSERT INTO t1 VALUES(NULL,'C','c'); + CREATE VIEW v2 AS + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,b FROM t1 + WHERE 'eT"3qRkL+oJMJjQ9z0'>=b + ORDER BY a,b,c; +} + +do_execsql_test 5.3 { + SELECT * FROM v2; +} { {} C c } + +do_execsql_test 5.4 { + SELECT * FROM v2 WHERE c='c'; +} { {} C c } + finish_test DELETED test/concfault.test Index: test/concfault.test ================================================================== --- test/concfault.test +++ /dev/null @@ -1,86 +0,0 @@ -# 2015 Aug 25 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# This file contains fault injection tests designed to test the concurrent -# transactions feature. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/malloc_common.tcl -set testprefix concfault - -# This test will not work with an in-memory journal, as the database will -# become corrupt if an error is injected into a transaction after it starts -# writing data out to the db file. -ifcapable !concurrent { - finish_test - return -} - -do_test 1-pre1 { - execsql { - PRAGMA journal_mode = wal; - CREATE TABLE t1(a PRIMARY KEY, b); - INSERT INTO t1 VALUES(randomblob(1000), randomblob(100)); - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - DELETE FROM t1 WHERE rowid%2; - } - faultsim_save_and_close -} {} - -do_faultsim_test 1.1 -prep { - faultsim_restore_and_reopen -} -body { - execsql { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1000), randomblob(100)); - COMMIT; - } -} -test { - faultsim_test_result {0 {}} - catchsql { ROLLBACK } - faultsim_integrity_check -} - -do_faultsim_test 1.2 -prep { - faultsim_restore_and_reopen -} -body { - execsql { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1000), randomblob(100)); - ROLLBACK; - } -} -test { - faultsim_test_result {0 {}} - catchsql { ROLLBACK } - faultsim_integrity_check -} - -do_faultsim_test 1.3 -prep { - faultsim_restore_and_reopen -} -body { - execsql { - BEGIN CONCURRENT; - DELETE FROM t1; - COMMIT; - } -} -test { - faultsim_test_result {0 {}} - catchsql { ROLLBACK } - faultsim_integrity_check -} - -finish_test - DELETED test/concfault2.test Index: test/concfault2.test ================================================================== --- test/concfault2.test +++ /dev/null @@ -1,69 +0,0 @@ -# 2018 Dec 28 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# This file contains fault injection tests designed to test the concurrent -# transactions feature. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/malloc_common.tcl -set testprefix concfault2 - -ifcapable !concurrent { - finish_test - return -} - -do_execsql_test 1.0 { - PRAGMA auto_vacuum = 0; - PRAGMA journal_mode = wal2; - CREATE TABLE t1(a PRIMARY KEY, b); - CREATE TABLE t2(a PRIMARY KEY, b); - INSERT INTO t1 VALUES(randomblob(1000), randomblob(100)); - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1; - DELETE FROM t1 WHERE rowid%2; -} {wal2} - -do_test 1.1 { - list [expr [file size test.db-wal]>75000] [file size test.db-shm] -} {1 32768} - -faultsim_save_and_close - -do_faultsim_test 1 -prep { - faultsim_restore_and_reopen - execsql { - SELECT * FROM t1; - BEGIN CONCURRENT; - INSERT INTO t2 VALUES(1, 2); - } - sqlite3 db2 test.db - execsql { - PRAGMA journal_size_limit = 10000; - INSERT INTO t1 VALUES(randomblob(1000), randomblob(1000)); - } db2 - db2 close -} -body { - execsql { COMMIT } -} -test { - faultsim_test_result {0 {}} - catchsql { ROLLBACK } - set res [catchsql { SELECT count(*) FROM t1 }] - if {$res!="0 9"} { error "expected {0 9} got {$res}" } - faultsim_integrity_check -} - -finish_test - DELETED test/concurrent.test Index: test/concurrent.test ================================================================== --- test/concurrent.test +++ /dev/null @@ -1,687 +0,0 @@ -# 2015 July 26 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -set ::testprefix concurrent - -ifcapable !concurrent { - finish_test - return -} - -do_execsql_test 1.0 { - PRAGMA journal_mode = wal; -} {wal} - -do_execsql_test 1.1 { - CREATE TABLE t1(k INTEGER PRIMARY KEY, v); - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(1, 'abcd'); - COMMIT; -} - -do_execsql_test 1.2 { - SELECT * FROM t1; -} {1 abcd} - -do_execsql_test 1.3 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(2, 'efgh'); - ROLLBACK; -} - -do_execsql_test 1.4 { - SELECT * FROM t1; -} {1 abcd} - - -#------------------------------------------------------------------------- -# CONCURRENT transactions cannot do cache spills. -# -foreach {tn trans spill} { - 1 {BEGIN CONCURRENT} 0 - 2 {BEGIN} 1 -} { - do_test 1.5.$tn { - sqlite3 db2 test.db - set walsz [file size test.db-wal] - - execsql { PRAGMA cache_size = 10 } db2 - execsql $trans db2 - execsql { - WITH cnt(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM cnt WHERE i<50) - INSERT INTO t1(v) SELECT randomblob(900) FROM cnt; - } db2 - - expr {[file size test.db-wal]==$walsz} - } [expr !$spill] - - execsql ROLLBACK db2 - db2 close -} - -#------------------------------------------------------------------------- -# CONCURRENT transactions man not be committed while there are active -# readers. -do_execsql_test 1.6.setup { - DROP TABLE t1; - CREATE TABLE t1(a, b); - INSERT INTO t1 VALUES(1, 2); - INSERT INTO t1 VALUES(3, 4); - INSERT INTO t1 VALUES(5, 6); -} -foreach {tn trans commit_ok} { - 1 {BEGIN CONCURRENT} 0 - 2 {BEGIN} 1 -} { - do_test 1.6.$tn.1 { - set stmt [sqlite3_prepare db "SELECT * FROM t1" -1 dummy] - sqlite3_step $stmt - } SQLITE_ROW - do_test 1.6.$tn.2 { - execsql $trans - execsql { INSERT INTO t1 VALUES(7, 8) } - } {} - - if { $commit_ok } { - do_test 1.6.$tn.3 { catchsql COMMIT } {0 {}} - } else { - do_test 1.6.$tn.4 { catchsql COMMIT } {/1 {cannot commit transaction .*}/} - } - - sqlite3_finalize $stmt - catchsql ROLLBACK -} - -#------------------------------------------------------------------------- -# CONCURRENT transactions may not modify the db schema. -# -sqlite3 db2 test.db -foreach {tn sql} { - 1 { CREATE TABLE xx(a, b) } - 2 { DROP TABLE t1 } - 3 { CREATE INDEX i1 ON t1(a) } - 4 { CREATE VIEW v1 AS SELECT * FROM t1 } -} { - do_catchsql_test 1.7.0.$tn.1 " - BEGIN CONCURRENT; - $sql - " {0 {}} - - db2 eval {INSERT INTO t1 DEFAULT VALUES} - - do_catchsql_test 1.7.0.$tn.2 { - COMMIT - } {1 {database is locked}} - - do_execsql_test 1.7.0.$tn.2 ROLLBACK - - do_execsql_test 1.7.0.$tn.3 { - SELECT sql FROM sqlite_master; - SELECT sql FROM sqlite_temp_master; - } {{CREATE TABLE t1(a, b)}} - - #do_execsql_test 1.7.0.$tn.3 COMMIT -} - -# Except the temp db schema. -foreach {tn sql} { - 1 { CREATE TEMP TABLE xx(a, b) } - 2 { DROP TABLE xx } - 3 { CREATE TEMP TABLE yy(a, b) } - 4 { CREATE VIEW temp.v1 AS SELECT * FROM t1 } - 5 { CREATE INDEX yyi1 ON yy(a); } - 6 { CREATE TABLE temp.zz(a, b) } -} { - do_catchsql_test 1.7.1.$tn.1 " - BEGIN CONCURRENT; - $sql - " {0 {}} - - do_execsql_test 1.7.1.$tn.2 COMMIT -} - - -do_execsql_test 1.7.1.x { - SELECT sql FROM sqlite_master; - SELECT sql FROM sqlite_temp_master; -} { - {CREATE TABLE t1(a, b)} - {CREATE TABLE yy(a, b)} - {CREATE VIEW v1 AS SELECT * FROM t1} - {CREATE INDEX yyi1 ON yy(a)} - {CREATE TABLE zz(a, b)} -} - -#------------------------------------------------------------------------- -# If an auto-vacuum database is written within an CONCURRENT transaction, it -# is handled in the same way as for a non-CONCURRENT transaction. -# -reset_db -do_execsql_test 1.8.1 { - PRAGMA auto_vacuum = 1; - PRAGMA journal_mode = wal; - CREATE TABLE t1(x, y); - INSERT INTO t1 VALUES('x', 'y'); -} {wal} - -do_execsql_test 1.8.2 { - BEGIN CONCURRENT; - SELECT * FROM t1; - COMMIT; -} {x y} - -do_catchsql_test 1.8.3 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES('a', 'b'); -} {0 {}} - -do_test 1.8.4 { - sqlite3 db2 test.db - catchsql { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES('c', 'd'); - } db2 -} {1 {database is locked}} - -do_test 1.8.5 { - db eval COMMIT - db2 eval COMMIT -} {} -db close -db2 close - -do_multiclient_test tn { - - #----------------------------------------------------------------------- - # 1. Start an CONCURRENT transaction using [db1]. - # - # 2. Start and then rollback a regular transaction using [db2]. This - # can be done as the ongoing [db1] transaction is CONCURRENT. - # - # 3. The [db1] transaction can now be committed, as [db2] has relinquished - # the write lock. - # - do_test 2.$tn.1.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(k INTEGER PRIMARY KEY, v); - INSERT INTO t1 VALUES(1, 'one'); - } - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(2, 'two'); - } - code1 { sqlite3_get_autocommit db } - } 0 - - do_test 2.$tn.1.2 { - sql2 { - BEGIN; - INSERT INTO t1 VALUES(3, 'three'); - ROLLBACK; - } - } {} - - do_test 2.$tn.1.3 { - sql1 COMMIT - sql2 { SELECT * FROM t1 } - } {1 one 2 two} - - #----------------------------------------------------------------------- - # 1. Start an CONCURRENT transaction using [db1]. - # - # 2. Commit a transaction using [db2]. - # - # 3. Try to commit with [db1]. Check that SQLITE_BUSY_SNAPSHOT is returned, - # and the transaction is not rolled back. - # - do_test 2.$tn.2.1 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(-1, 'hello world'); - } - } {} - - do_test 2.$tn.2.2 { - sql2 { - INSERT INTO t1 VALUES(3, 'three'); - } - } {} - - do_test 2.$tn.2.3.1 { - set rc [catch { sql1 COMMIT } msg] - list $rc $msg - } {1 {database is locked}} - - do_test 2.$tn.2.3.2 { - code1 { list [sqlite3_extended_errcode db] [sqlite3_get_autocommit db] } - } {SQLITE_BUSY_SNAPSHOT 0} - - do_test 2.$tn.2.3.3 { - sql1 { - SELECT * FROM t1; - ROLLBACK; - } - } {-1 {hello world} 1 one 2 two} - - #----------------------------------------------------------------------- - # 1. Start an CONCURRENT transaction using [db1]. - # - # 2. Open a transaction using [db2]. - # - # 3. Try to commit with [db1]. Check that SQLITE_BUSY is returned, - # and the transaction is not rolled back. - # - # 4. Have [db2] roll its transaction back. Then check that [db1] can - # commit. - # - do_test 2.$tn.3.1 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(4, 'four'); - } - } {} - - do_test 2.$tn.3.2 { - sql2 { - BEGIN; - INSERT INTO t1 VALUES(-1, 'xyz'); - } - } {} - - do_test 2.$tn.3.3.1 { - set rc [catch { sql1 COMMIT } msg] - list $rc $msg - } {1 {database is locked}} - - do_test 2.$tn.3.3.2 { - code1 { list [sqlite3_extended_errcode db] [sqlite3_get_autocommit db] } - } {SQLITE_BUSY 0} - - do_test 2.$tn.3.3.3 { - sql1 { SELECT * FROM t1; } - } {1 one 2 two 3 three 4 four} - - do_test 2.$tn.3.4 { - sql2 ROLLBACK - sql1 COMMIT - sql1 { SELECT * FROM t1; } - } {1 one 2 two 3 three 4 four} - - #----------------------------------------------------------------------- - # 1. Create a second table - t2. - # - # 2. Write to t1 with [db] and t2 with [db2]. - # - # 3. See if it worked. - # - do_test 2.$tn.4.1 { - sql1 { CREATE TABLE t2(a, b) } - } {} - do_test 2.$tn.4.2 { - sql2 { - BEGIN CONCURRENT; - INSERT INTO t2 VALUES('i', 'n'); - } - - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(5, 'five'); - COMMIT; - } - - sql2 COMMIT - } {} - - do_test 2.$tn.4.3.1 { - sql2 {SELECT * FROM t1} - } {1 one 2 two 3 three 4 four 5 five} - do_test 2.$tn.4.3.2 { - sql1 {SELECT * FROM t1} - } {1 one 2 two 3 three 4 four 5 five} - - do_test 2.$tn.4.3.3 { sql2 {SELECT * FROM t2} } {i n} - do_test 2.$tn.4.3.4 { sql1 {SELECT * FROM t2} } {i n} - - #----------------------------------------------------------------------- - # The "schema cookie" issue. - # - # 1. Begin and CONCURRENT write to "t1" using [db] - # - # 2. Create an index on t1 using [db2]. - # - # 3. Attempt to commit the CONCURRENT write. This is an SQLITE_BUSY_SNAPSHOT, - # even though there is no page collision. - # - do_test 2.$tn.5.1 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(6, 'six'); - } - } {} - - do_test 2.$tn.5.2 { - sql2 { CREATE INDEX i1 ON t1(v); } - } {} - - do_test 2.$tn.5.3 { - list [catch { sql1 { COMMIT } } msg] $msg [sqlite3_errcode db] - } {1 {database is locked} SQLITE_BUSY_SNAPSHOT} - - do_test 2.$tn.5.4 { - sql2 { PRAGMA integrity_check } - } {ok} - catch { sql1 ROLLBACK } - - #----------------------------------------------------------------------- - # - # 1. Begin an CONCURRENT write to "t1" using [db] - # - # 2. Lots of inserts into t2. Enough to grow the db file and modify page 1. - # - # 3. Check that the CONCURRENT transaction can not be committed. - # - do_test 2.$tn.6.1 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(6, 'six'); - } - } {} - - do_test 2.$tn.6.2 { - sql2 { - WITH src(a,b) AS ( - VALUES(1,1) UNION ALL SELECT a+1,b+1 FROM src WHERE a<10000 - ) INSERT INTO t2 SELECT * FROM src; - } - } {} - - do_test 2.$tn.6.3 { - sql1 { SELECT count(*) FROM t2 } - list [catch { sql1 { COMMIT } } msg] $msg [sqlite3_errcode db] - } {1 {database is locked} SQLITE_BUSY_SNAPSHOT} - sql1 ROLLBACK - - do_test 2.$tn.6.4 { - sql1 { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - } - } {5 10001} - - #----------------------------------------------------------------------- - # - # 1. Begin an big CONCURRENT write to "t1" using [db] - large enough to - # grow the db file. - # - # 2. Lots of inserts into t2. Also enough to grow the db file. - # - # 3. Check that the CONCURRENT transaction cannot be committed (due to a clash - # on page 1 - the db size field). - # - do_test 2.$tn.7.1 { - sql1 { - BEGIN CONCURRENT; - WITH src(a,b) AS ( - VALUES(10000,10000) UNION ALL SELECT a+1,b+1 FROM src WHERE a<20000 - ) INSERT INTO t1 SELECT * FROM src; - } - } {} - - do_test 2.$tn.7.2 { - sql2 { - WITH src(a,b) AS ( - VALUES(1,1) UNION ALL SELECT a+1,b+1 FROM src WHERE a<10000 - ) INSERT INTO t2 SELECT * FROM src; - } - } {} - - do_test 2.$tn.7.3 { - list [catch { sql1 { COMMIT } } msg] $msg [sqlite3_errcode db] - } {0 {} SQLITE_OK} - - do_test 2.$tn.7.4 { sql3 { PRAGMA integrity_check } } ok -} - -#------------------------------------------------------------------------- -# Concurrent transactions may not modify the user_version or application_id. -# -reset_db -do_execsql_test 3.0 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x, y); - INSERT INTO t1 VALUES('a', 'b'); - PRAGMA user_version = 10; -} {wal} -do_execsql_test 3.1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES('c', 'd'); - SELECT * FROM t1; -} {a b c d} -do_catchsql_test 3.2 { - PRAGMA user_version = 11; -} {1 {cannot modify user_version within CONCURRENT transaction}} -do_execsql_test 3.3 { - PRAGMA user_version; - SELECT * FROM t1; -} {10 a b c d} -do_catchsql_test 3.4 { - PRAGMA application_id = 11; -} {1 {cannot modify application_id within CONCURRENT transaction}} -do_execsql_test 3.5 { - COMMIT; - PRAGMA user_version; - PRAGMA application_id; - SELECT * FROM t1; -} {10 0 a b c d} - -#------------------------------------------------------------------------- -# However, another transaction modifying the user_version or application_id -# should not cause a conflict. And committing a concurrent transaction does not -# clobber the modification - even if the concurrent transaction allocates or -# frees database pages. -# -do_multiclient_test tn { - do_test 4.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE ttt(y UNIQUE, z UNIQUE); - PRAGMA user_version = 14; - BEGIN CONCURRENT; - INSERT INTO ttt VALUES('y', 'z'); - } - } {wal} - do_test 4.$tn.2 { - sql2 { PRAGMA user_version = 16 } - sql1 COMMIT - sql1 { PRAGMA user_version } - } {16} - - do_test 4.$tn.3 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO ttt VALUES(randomblob(10000), randomblob(4)); - PRAGMA user_version; - } - } {16} - do_test 4.$tn.4 { - sql2 { PRAGMA user_version = 1234 } - sql1 { - PRAGMA user_version; - COMMIT; - PRAGMA user_version; - PRAGMA integrity_check; - } - } {16 1234 ok} - - do_test 4.$tn.5 { - sql1 { - BEGIN CONCURRENT; - DELETE FROM ttt; - PRAGMA user_version; - } - } {1234} - do_test 4.$tn.4 { - sql2 { PRAGMA user_version = 5678 } - sql1 { - PRAGMA user_version; - COMMIT; - PRAGMA user_version; - PRAGMA integrity_check; - } - } {1234 5678 ok} -} - -do_multiclient_test tn { - do_test 5.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE tt(a INTEGER PRIMARY KEY, b); - CREATE TABLE t2(a INTEGER PRIMARY KEY, b); - INSERT INTO tt VALUES(1, randomblob(400)); - BEGIN CONCURRENT; - } - } {wal} - - do_test 5.$tn.2 { - sql1 { UPDATE t2 SET b=5 WHERE a=3 } - sql2 { INSERT INTO tt VALUES(2, randomblob(6000)) } - } {} - - do_test 5.$tn.3 { - sql1 { COMMIT } - } {} -} - -do_multiclient_test tn { - do_test 6.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(a INTEGER PRIMARY KEY, b); - CREATE TABLE t2(a INTEGER PRIMARY KEY, b); - INSERT INTO t1 VALUES(1, 'one'); - INSERT INTO t2 VALUES(2, 'two'); - } - } {wal} - - do_test 6.$tn.2 { - sql2 { - BEGIN CONCURRENT; - SELECT * FROM t2; - INSERT INTO t1 VALUES(3, 'three'); - } - } {2 two} - - do_test 6.$tn.3 { - sql1 { - INSERT INTO t2 VALUES(3, 'three'); - } - } {} - - do_test 6.$tn.2 { - list [catch { sql2 { COMMIT } } msg] $msg - } {1 {database is locked}} -} - -do_multiclient_test tn { - do_test 7.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(a INTEGER PRIMARY KEY, b); - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<100) - INSERT INTO t1 SELECT NULL, randomblob(400) FROM s; - - CREATE TABLE t2(a INTEGER PRIMARY KEY, b); - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<50000) - INSERT INTO t2 SELECT NULL, randomblob(400) FROM s; - - CREATE TABLE t3(a INTEGER PRIMARY KEY, b); - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<100) - INSERT INTO t3 SELECT NULL, randomblob(400) FROM s; - - CREATE TABLE t4(a INTEGER PRIMARY KEY, b); - - } - set {} {} - } {} - - do_test 7.$tn.2 { - sql2 { - BEGIN CONCURRENT; - SELECT * FROM t1; - INSERT INTO t4 VALUES(1, 2); - } - set {} {} - } {} - - do_test 7.$tn.3 { - sql3 { - BEGIN CONCURRENT; - SELECT * FROM t3; - INSERT INTO t4 VALUES(1, 2); - } - set {} {} - } {} - - do_test 7.$tn.4 { - sql1 { - UPDATE t1 SET b=randomblob(400); - UPDATE t2 SET b=randomblob(400); - UPDATE t3 SET b=randomblob(400); - } - } {} - - do_test 7.$tn.5 { - csql2 { COMMIT } - } {1 {database is locked}} - - do_test 7.$tn.6 { - csql3 { COMMIT } - } {1 {database is locked}} - - - csql2 ROLLBACK - csql3 ROLLBACK - - # The following test works with $tn==1 (sql2 and sql3 use separate - # processes), but is quite slow. So only run it with $tn==2 (all - # connections in the same process). - # - if {$tn==2} { - do_test 7.$tn.7 { - for {set i 1} {$i < 10000} {incr i} { - sql3 { - PRAGMA wal_checkpoint; - BEGIN CONCURRENT; - SELECT * FROM t3; - INSERT INTO t4 VALUES(1, 2); - } - - sql1 { - UPDATE t2 SET b = randomblob(400) WHERE rowid <= $i; - UPDATE t3 SET b = randomblob(400) WHERE rowid = 1; - } - - if {[csql3 COMMIT]!={1 {database is locked}}} { - error "Failed at i=$i" - } - csql3 ROLLBACK - } - } {} - } - -} - -finish_test DELETED test/concurrent2.test Index: test/concurrent2.test ================================================================== --- test/concurrent2.test +++ /dev/null @@ -1,630 +0,0 @@ -# 2015 July 26 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# Miscellaneous tests for transactions started with BEGIN CONCURRENT. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/wal_common.tcl -set ::testprefix concurrent2 - -ifcapable !concurrent { - finish_test - return -} - -do_test 0.1 { - llength [sqlite3_wal_info db main] -} {2} - -do_multiclient_test tn { - - do_test 1.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x); - CREATE TABLE t2(y); - } - } {wal} - do_test 1.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} - - # Test that an CONCURRENT transaction that allocates/frees no pages does - # not conflict with a transaction that does allocate pages. - do_test 1.$tn.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(4); - } - sql2 { - INSERT INTO t2 VALUES(randomblob(1500)); - } - sql1 { - COMMIT; - } - } {} - do_test 1.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} - - # But that an CONCURRENT transaction does conflict with a transaction - # that modifies the db schema. - do_test 1.$tn.3 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(5); - } - sql2 { - CREATE TABLE t3(z); - } - list [catch { sql1 COMMIT } msg] $msg - } {1 {database is locked}} - do_test 1.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} - - # Test that an CONCURRENT transaction that allocates at least one page - # does not conflict with a transaction that allocates no pages. - do_test 1.$tn.4 { - sql1 { - ROLLBACK; - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1500)); - } - sql2 { - INSERT INTO t2 VALUES(8); - } - sql1 { - COMMIT; - } - } {} - - do_test 1.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} -} - -do_multiclient_test tn { - do_test 2.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x UNIQUE); - CREATE TABLE t2(y UNIQUE); - } - } {wal} - - do_test 2.$tn.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1500)); - } - sql2 { - INSERT INTO t2 VALUES(randomblob(1500)); - } - sql1 COMMIT - } {} - - do_test 2.$tn.3 { sql3 { PRAGMA integrity_check } } {ok} - - do_test 2.$tn.4 { - sql1 { - BEGIN CONCURRENT; - DELETE FROM t1; - } - sql2 { - DELETE FROM t2; - } - sql1 COMMIT - } {} - - do_test 2.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} - - do_test 2.$tn.6 { - sql1 { - INSERT INTO t1 VALUES(randomblob(1500)); - INSERT INTO t1 VALUES(randomblob(1500)); - INSERT INTO t2 VALUES(randomblob(1500)); - DELETE FROM t1 WHERE rowid=1; - } - - sql1 { - BEGIN CONCURRENT; - DELETE FROM t1 WHERE rowid=2; - } - - sql2 { - DELETE FROM t2; - } - - sql1 COMMIT - } {} - - do_test 2.$tn.7 { sql3 { PRAGMA integrity_check } } {ok} -} - -#------------------------------------------------------------------------- -# When an CONCURRENT transaction is opened on a database, the nFree and -# iTrunk header fields of the cached version of page 1 are both set -# to 0. This allows an CONCURRENT transaction to use its own private -# free-page-list, which is merged with the main database free-list when -# the transaction is committed. -# -# The following tests check that nFree/iTrunk are correctly restored if -# an CONCURRENT transaction is rolled back, and that savepoint rollbacks -# that occur within CONCURRENT transactions do not incorrectly restore -# these fields to their on-disk values. -# -reset_db -do_execsql_test 3.0 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x, y); - INSERT INTO t1 VALUES(randomblob(1500), randomblob(1500)); - DELETE FROM t1; -} {wal} - -do_execsql_test 3.1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(1, 2); - ROLLBACK; -} - -do_execsql_test 3.2 { PRAGMA integrity_check } {ok} -do_execsql_test 3.3 { PRAGMA freelist_count } {2} - -do_execsql_test 3.4.1 { - BEGIN CONCURRENT; - PRAGMA freelist_count; -} {2} -do_execsql_test 3.4.2 { - SAVEPOINT xyz; - INSERT INTO t1 VALUES(randomblob(1500), NULL); - PRAGMA freelist_count; -} {0} -do_execsql_test 3.4.3 { - ROLLBACK TO xyz; -} {} -do_execsql_test 3.4.4 { PRAGMA freelist_count } {0} -do_execsql_test 3.4.5 { COMMIT; PRAGMA freelist_count } {2} -do_execsql_test 3.4.6 { PRAGMA integrity_check } {ok} - -do_execsql_test 3.5.1 { - BEGIN CONCURRENT; - UPDATE t1 SET x=randomblob(10) WHERE y=555; - PRAGMA freelist_count; -} {0} -do_execsql_test 3.5.2 { - ROLLBACK; - PRAGMA freelist_count; -} {2} -do_execsql_test 3.5.3 { PRAGMA integrity_check } {ok} - -#------------------------------------------------------------------------- -# Test that nothing goes wrong if an CONCURRENT transaction allocates a -# page at the end of the file, frees it within the same transaction, and -# then has to move the same page to avoid a conflict on COMMIT. -# -do_multiclient_test tn { - do_test 4.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x); - CREATE TABLE t2(x); - } - } {wal} - - do_test 4.$tn.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1500)); - INSERT INTO t1 VALUES(randomblob(1500)); - DELETE FROM t1 WHERE rowid = 1; - } - - sql2 { - INSERT INTO t2 VALUES(randomblob(1500)); - INSERT INTO t2 VALUES(randomblob(1500)); - INSERT INTO t2 VALUES(randomblob(1500)); - INSERT INTO t2 VALUES(randomblob(1500)); - DELETE FROM t2 WHERE rowid IN (1, 2); - } - - sql1 COMMIT - } {} -} - -#------------------------------------------------------------------------- -# -do_multiclient_test tn { - do_test 5.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x); - CREATE TABLE t2(x); - INSERT INTO t1 VALUES(randomblob(1500)); - PRAGMA page_count; - } - } {wal 4} - - do_test 5.$tn.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t2 VALUES(randomblob(1500)); - PRAGMA page_count; - } - } {5} - - do_test 5.$tn.3 { - sql2 { - DELETE FROM t1; - PRAGMA freelist_count; - PRAGMA page_count; - } - } {1 4} - - do_test 5.$tn.4 { sql1 COMMIT } {} - do_test 5.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} -} - -#------------------------------------------------------------------------- -# -do_multiclient_test tn { - do_test 6.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(x); - INSERT INTO t1 VALUES(randomblob(1500)); - PRAGMA wal_checkpoint; - } - } {wal 0 5 5} - - do_test 6.$tn.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(1500)); - INSERT INTO t1 VALUES(randomblob(1500)); - } - } {} - - do_test 6.$tn.3 { - sql2 { - BEGIN; - INSERT INTO t1 VALUES(randomblob(1500)); - INSERT INTO t1 VALUES(randomblob(1500)); - COMMIT; - } - } {} - - do_test 6.$tn.4 { - list [catch { sql1 COMMIT } msg] $msg - } {1 {database is locked}} - do_test 6.$tn.5 { sql3 { PRAGMA integrity_check } } {ok} - do_test 6.$tn.5 { sql3 { SELECT count(*) from t1 } } {3} -} - -#------------------------------------------------------------------------- -# Test that if a corrupt wal-index-header is encountered when attempting -# to commit a CONCURRENT transaction, the transaction is not committed -# (or rolled back) and that SQLITE_BUSY_SNAPSHOT is returned to the user. -# -catch { db close } -forcedelete test.db -testvfs tvfs -sqlite3 db test.db -vfs tvfs -do_execsql_test 7.1 { - PRAGMA journal_mode = wal; - BEGIN; - CREATE TABLE t1(a, b, PRIMARY KEY(a)); - INSERT INTO t1 VALUES(1, 2); - INSERT INTO t1 VALUES(3, 4); - COMMIT; - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(5, 6); - INSERT INTO t1 VALUES(7, 8); - SELECT * FROM t1; -} {wal 1 2 3 4 5 6 7 8} - -# Corrupt the wal-index header -incr_tvfs_hdr test.db 11 1 - -do_catchsql_test 7.2.1 { COMMIT } {1 {database is locked}} -do_test 7.2.2 { sqlite3_extended_errcode db } SQLITE_BUSY_SNAPSHOT - -do_execsql_test 7.3.1 { - SELECT * FROM t1; - ROLLBACK; -} {1 2 3 4 5 6 7 8} -do_execsql_test 7.3.2 { - SELECT * FROM t1; -} {1 2 3 4} - -#------------------------------------------------------------------------- -# Test that "PRAGMA integrity_check" works within a concurrent -# transaction. Within a concurrent transaction, "PRAGMA integrity_check" -# is unable to detect unused database pages, but can detect other types -# of corruption. -# -reset_db -do_test 8.1 { - execsql { - PRAGMA journal_mode = wal; - CREATE TABLE kv(k INTEGER PRIMARY KEY, v UNIQUE); - INSERT INTO kv VALUES(NULL, randomblob(750)); - INSERT INTO kv SELECT NULL, randomblob(750) FROM kv; - INSERT INTO kv SELECT NULL, randomblob(750) FROM kv; - INSERT INTO kv SELECT NULL, randomblob(750) FROM kv; - INSERT INTO kv SELECT NULL, randomblob(750) FROM kv; - INSERT INTO kv SELECT NULL, randomblob(750) FROM kv; - DELETE FROM kv WHERE rowid%2; - } - set v [db one {PRAGMA freelist_count}] - expr $v==33 || $v==34 -} {1} -do_execsql_test 8.2 { PRAGMA integrity_check } ok -do_execsql_test 8.3 { - BEGIN CONCURRENT; - PRAGMA integrity_check; -} {ok} -do_execsql_test 8.4 { - INSERT INTO kv VALUES(1100, 1100); - PRAGMA integrity_check; -} {ok} -do_execsql_test 8.5 { - COMMIT; - PRAGMA integrity_check; -} {ok} - -#------------------------------------------------------------------------- -# Test that concurrent transactions do not allow foreign-key constraints -# to be bypassed. -# -do_multiclient_test tn { - do_test 9.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE pp(i INTEGER PRIMARY KEY, j); - CREATE TABLE cc(a, b REFERENCES pp); - - WITH seq(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM seq WHERE i<100) - INSERT INTO pp SELECT i, randomblob(1000) FROM seq; - - PRAGMA foreign_keys = 1; - } - } {wal} - - - do_test 9.$tn.2.1 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO cc VALUES(42, 42); - } - } {} - do_test 9.$tn.2.2 { - sql2 { DELETE FROM pp WHERE i=42 } - list [catch { sql1 COMMIT } msg] $msg - } {1 {database is locked}} - do_test 9.$tn.2.3 { - sql1 ROLLBACK - } {} - - do_test 9.$tn.3.1 { - sql1 { - PRAGMA foreign_keys = 0; - BEGIN CONCURRENT; - INSERT INTO cc VALUES(43, 43); - } - } {} - do_test 9.$tn.3.2 { - sql2 { DELETE FROM pp WHERE i=43 } - list [catch { sql1 COMMIT } msg] $msg - } {0 {}} - - do_test 9.$tn.4.1 { - sql1 { - PRAGMA foreign_keys = on; - BEGIN CONCURRENT; - INSERT INTO cc VALUES(44, 44); - } - } {} - do_test 9.$tn.4.2 { - sql2 { DELETE FROM pp WHERE i=1 } - list [catch { sql1 COMMIT } msg] $msg - } {0 {}} -} - -#------------------------------------------------------------------------- -# Test that even if a SELECT statement appears before all writes within -# a CONCURRENT transaction, the pages it reads are still considered when -# considering whether or not the transaction may be committed. -# -do_multiclient_test tn { - do_test 10.$tn.1.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(a); - CREATE TABLE t2(b); - CREATE TABLE t3(c); - INSERT INTO t1 VALUES(1), (2), (3); - INSERT INTO t2 VALUES(1), (2), (3); - INSERT INTO t3 VALUES(1), (2), (3); - } - } {wal} - - do_test 10.$tn.1.2 { - sql1 { - BEGIN CONCURRENT; - SELECT * FROM t1; - INSERT INTO t2 VALUES(4); - } - } {1 2 3} - - do_test 10.$tn.1.3 { - sql2 { INSERT INTO t1 VALUES(4) } - list [catch {sql1 COMMIT} msg] $msg - } {1 {database is locked}} - sql1 ROLLBACK - - # In this case, because the "SELECT * FROM t1" is first stepped before - # the "BEGIN CONCURRENT", the pages it reads are not recorded by the - # pager object. And so the transaction can be committed. Technically - # this behaviour (the effect of an ongoing SELECT on a BEGIN CONCURRENT - # transacation) is undefined. - # - do_test 10.$tn.2.1 { - code1 { - set ::stmt [sqlite3_prepare db "SELECT * FROM t1" -1 dummy] - sqlite3_step $::stmt - } - } {SQLITE_ROW} - do_test 10.$tn.2.2 { - sql1 { - BEGIN CONCURRENT; - INSERT INTO t2 VALUES(4); - } - code1 { - set res [list] - lappend res [sqlite3_column_int $::stmt 0] - while {[sqlite3_step $::stmt]=="SQLITE_ROW"} { - lappend res [sqlite3_column_int $::stmt 0] - } - sqlite3_finalize $::stmt - set res - } - } {1 2 3 4} - do_test 10.$tn.2.3 { - sql2 { INSERT INTO t1 VALUES(5) } - sql1 COMMIT - } {} - - # More tests surrounding long-lived prepared statements and concurrent - # transactions. - do_test 10.$tn.3.1 { - sql1 { - BEGIN CONCURRENT; - SELECT * FROM t1; - COMMIT; - } - sql1 { - BEGIN CONCURRENT; - INSERT INTO t2 VALUES(5); - } - sql2 { - INSERT INTO t1 VALUES(5); - } - sql1 COMMIT - sql3 { - SELECT * FROM t2; - } - } {1 2 3 4 5} - do_test 10.$tn.3.2 { - sql1 { - BEGIN CONCURRENT; - SELECT * FROM t1; - ROLLBACK; - } - sql1 { - BEGIN CONCURRENT; - INSERT INTO t2 VALUES(6); - } - sql2 { - INSERT INTO t1 VALUES(6); - } - sql1 COMMIT - sql3 { SELECT * FROM t2 } - } {1 2 3 4 5 6} - do_test 10.$tn.3.3 { - sql1 { BEGIN CONCURRENT } - code1 { - set ::stmt [sqlite3_prepare db "SELECT * FROM t1" -1 dummy] - sqlite3_step $::stmt - } - sql1 { - INSERT INTO t2 VALUES(7); - SELECT * FROM t3; - ROLLBACK; - BEGIN CONCURRENT; - } - sql2 { INSERT INTO t3 VALUES(5) } - code1 { sqlite3_finalize $::stmt } - sql1 { - INSERT INTO t2 VALUES(8); - COMMIT; - } - } {} -} - -do_multiclient_test tn { - do_test 11.$tn.1 { - sql1 { - PRAGMA journal_mode = wal; - CREATE TABLE t1(a); - } - } {wal} - - do_test 11.$tn.2 { - code1 { sqlite3_wal_info db main } - } {0 2} - - do_test 11.$tn.3 { - sql1 { INSERT INTO t1 VALUES(1) } - code1 { sqlite3_wal_info db main } - } {2 3} - - do_test 11.$tn.4 { - sql2 { INSERT INTO t1 VALUES(2) } - code2 { sqlite3_wal_info db2 main } - } {3 4} - - do_test 11.$tn.5 { - sql1 { PRAGMA wal_checkpoint } - sql2 { INSERT INTO t1 VALUES(3) } - code2 { sqlite3_wal_info db2 main } - } {0 1} -} - -reset_db -do_execsql_test 12.0 { - PRAGMA journal_mode = wal; - CREATE TABLE tx(a INTEGER PRIMARY KEY, b); -} {wal} -do_test 12.1 { - for {set i 0} {$i < 50} {incr i} { - execsql { - BEGIN CONCURRENT; - INSERT INTO tx(b) VALUES( randomblob( 1200 ) ); - COMMIT; - } - } - execsql { PRAGMA page_size } -} {1024} -do_execsql_test 12.2 { - DELETE FROM tx; -} -do_test 12.3 { - for {set i 0} {$i < 50} {incr i} { - execsql { - BEGIN CONCURRENT; - INSERT INTO tx(b) VALUES( randomblob( 1200 ) ); - COMMIT; - } - } - execsql { PRAGMA page_size } -} {1024} -do_execsql_test 12.4 { - DELETE FROM tx; -} -do_test 12.5 { - execsql { BEGIN CONCURRENT } - for {set i 0} {$i < 5000} {incr i} { - execsql { - INSERT INTO tx(b) VALUES( randomblob( 1200 ) ); - } - } - execsql { COMMIT } - execsql { PRAGMA page_size } -} {1024} - - -finish_test DELETED test/concurrent3.test Index: test/concurrent3.test ================================================================== --- test/concurrent3.test +++ /dev/null @@ -1,231 +0,0 @@ -# 2015 July 26 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# Tests for transactions started with BEGIN CONCURRENT. The tests in this -# file focus on testing that deferred page allocation works properly. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -set ::testprefix concurrent3 - -if {$AUTOVACUUM} { finish_test ; return } -ifcapable !concurrent { - finish_test - return -} - -db close -sqlite3_shutdown -test_sqlite3_log xLog -proc xLog {error_code msg} { - # puts "$error_code: $msg" - # Enable the previous for debugging -} -reset_db - -proc create_schema {} { - db eval { - PRAGMA journal_mode = wal; - - CREATE TABLE t1(x, y); - CREATE TABLE t2(x, y); - CREATE TABLE t3(x, y); - CREATE TABLE t4(x, y); - - CREATE INDEX i1 ON t1(y, x); - CREATE INDEX i2 ON t2(y, x); - CREATE INDEX i3 ON t3(y, x); - CREATE INDEX i4 ON t4(y, x); - } -} - -proc do_sql_op {iTbl iOp} { - set db "db$iTbl" - - switch $iOp { - "i" { - set sql " - WITH cnt(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM cnt WHERE i<10) - INSERT INTO t$iTbl SELECT randomblob(800), randomblob(800) FROM cnt; - " - } - - "d" { - set sql " - DELETE FROM t$iTbl WHERE rowid IN ( - SELECT rowid FROM t$iTbl ORDER BY 1 ASC LIMIT 10 - ) - " - } - - "D" { - set sql " - DELETE FROM t$iTbl WHERE rowid IN ( - SELECT rowid FROM t$iTbl o WHERE ( - SELECT count(*) FROM t$iTbl i WHERE i.rowid$nPg - } {1} - - do_test 1.$tn.7 { - sql2 { PRAGMA integrity_check } - } {ok} - - do_test 1.$tn.8 { - sql1 { - BEGIN CONCURRENT; - CREATE TABLE t4(a, b); - } - sql2 { - INSERT INTO t1 VALUES(2, 2); - } - list [catch { sql1 COMMIT } msg] $msg - } {1 {database is locked}} - sql1 ROLLBACK - - do_test 1.$tn.9 { - sql1 { - BEGIN CONCURRENT; - CREATE TEMP TABLE t5(a, b); - INSERT INTO t2 VALUES('x', 'x'); - } - sql2 { - INSERT INTO t1 VALUES(3, 3); - CREATE TEMP TABLE t1(x, y); - } - sql1 COMMIT - } {} -} - - - -finish_test - - Index: test/corruptA.test ================================================================== --- test/corruptA.test +++ test/corruptA.test @@ -45,11 +45,11 @@ # db close forcecopy test.db test.db-template set unreadable_version 02 -ifcapable wal { set unreadable_version 04 } +ifcapable wal { set unreadable_version 03 } do_test corruptA-2.1 { forcecopy test.db-template test.db hexio_write test.db 19 $unreadable_version ;# the read format number sqlite3 db test.db catchsql {SELECT * FROM t1} Index: test/corruptL.test ================================================================== --- test/corruptL.test +++ test/corruptL.test @@ -1477,15 +1477,28 @@ }]} {} do_execsql_test 19.1 { PRAGMA writable_schema=ON; } - -set err "UNIQUE constraint failed: index 'a'" -ifcapable oversize_cell_check { - set err "database disk image is malformed" -} do_catchsql_test 19.2 { UPDATE t1 SET a=1; -} [list 1 $err] +} {1 {database disk image is malformed}} + +reset_db +do_execsql_test 19.3 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c INTEGER, d TEXT); + CREATE INDEX i1 ON t1((NULL)); + INSERT INTO t1 VALUES(1, NULL, 1, 'text value'); + PRAGMA writable_schema = on; + UPDATE sqlite_schema SET + sql = 'CREATE INDEX i1 ON t1(b, c, d)', + tbl_name = 't1', + type='index' + WHERE name='i1'; +} +db close +sqlite3 db test.db +do_catchsql_test 19.4 { + PRAGMA integrity_check; +} {1 {database disk image is malformed}} finish_test Index: test/corruptN.test ================================================================== --- test/corruptN.test +++ test/corruptN.test @@ -138,10 +138,20 @@ | page 4 offset 12288 | 0: 0a 00 00 00 02 0f f5 00 0f fb 0f f5 00 00 00 00 ................ | 4080: 00 00 00 00 00 05 03 01 01 0d 02 04 03 00 00 00 ................ | end c-b92b.txt.db }]} {} + +# This test only works with the legacy RC4 PRNG +if 0 { + prng_seed 0 db + do_catchsql_test 2.1 { + SELECT count(*) FROM sqlite_schema; + WITH RECURSIVE c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<1000) + INSERT INTO t1(a) SELECT randomblob(null) FROM c; + } {1 {database disk image is malformed}} +} reset_db if {![info exists ::G(perm:presql)]} { do_execsql_test 3.0 { CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y); Index: test/dbpagefault.test ================================================================== --- test/dbpagefault.test +++ test/dbpagefault.test @@ -17,10 +17,15 @@ if {[permutation] == "inmemory_journal"} { finish_test return } + +ifcapable !vtab { + finish_test + return +} set testprefix dbpagefault faultsim_save_and_close do_faultsim_test 1 -prep { @@ -55,19 +60,29 @@ DELETE FROM sqlite_dbpage WHERE pgno=100; UPDATE sqlite_dbpage SET data=null WHERE pgno=100; END; } -do_faultsim_test 3 -prep { - catch { db close } - sqlite3 db test.db - execsql { PRAGMA trusted_schema = true } -} -body { - execsql { INSERT INTO x1 DEFAULT VALUES; } -} -test { - faultsim_test_result {0 {}} -} +# This test case no longer works, as it is no longer possible to use +# virtual table sqlite_dbpage from within a trigger. +# +do_execsql_test 3.1 { + PRAGMA trusted_schema = 1; +} +do_catchsql_test 3.2 { + PRAGMA trusted_schema = 1; + INSERT INTO x1 DEFAULT VALUES; +} {1 {unsafe use of virtual table "sqlite_dbpage"}} +#do_faultsim_test 3 -prep { +# catch { db close } +# sqlite3 db test.db +# execsql { PRAGMA trusted_schema = 1 } +#} -body { +# execsql { INSERT INTO x1 DEFAULT VALUES; } +#} -test { +# faultsim_test_result {0 {}} +#} finish_test Index: test/e_wal.test ================================================================== --- test/e_wal.test +++ test/e_wal.test @@ -13,10 +13,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix e_wal db close +forcedelete test.db-shm testvfs oldvfs -iversion 1 # EVIDENCE-OF: R-58297-14483 WAL databases can be created, read, and # written even if shared memory is unavailable as long as the Index: test/fuzzcheck.c ================================================================== --- test/fuzzcheck.c +++ test/fuzzcheck.c @@ -83,10 +83,11 @@ #include #include #include #include #include "sqlite3.h" +#include "sqlite3recover.h" #define ISSPACE(X) isspace((unsigned char)(X)) #define ISDIGIT(X) isdigit((unsigned char)(X)) #ifdef __unix__ @@ -156,16 +157,14 @@ unsigned int nInvariant; /* Number of invariant checks run */ char zTestName[100]; /* Name of current test */ } g; /* -** Include the external vt02.c module, if requested by compile-time -** options. +** Include the external vt02.c module. */ -#ifdef VT02_SOURCES -# include "vt02.c" -#endif +extern int sqlite3_vt02_init(sqlite3*,char***,void*); + /* ** Print an error message and quit. */ static void fatalError(const char *zFormat, ...){ @@ -627,10 +626,13 @@ /* OOM simulation parameters */ static unsigned int oomCounter = 0; /* Simulate OOM when equals 1 */ static unsigned int oomRepeat = 0; /* Number of OOMs in a row */ static void*(*defaultMalloc)(int) = 0; /* The low-level malloc routine */ +/* Enable recovery */ +static int bNoRecover = 0; + /* This routine is called when a simulated OOM occurs. It is broken ** out as a separate routine to make it easy to set a breakpoint on ** the OOM */ void oomFault(void){ @@ -967,19 +969,63 @@ } return SQLITE_OK; } /* Implementation found in fuzzinvariant.c */ -int fuzz_invariant( +extern int fuzz_invariant( sqlite3 *db, /* The database connection */ sqlite3_stmt *pStmt, /* Test statement stopped on an SQLITE_ROW */ int iCnt, /* Invariant sequence number, starting at 0 */ int iRow, /* The row number for pStmt */ int nRow, /* Total number of output rows */ int *pbCorrupt, /* IN/OUT: Flag indicating a corrupt database file */ int eVerbosity /* How much debugging output */ ); + +/* Implementation of sqlite_dbdata and sqlite_dbptr */ +extern int sqlite3_dbdata_init(sqlite3*,const char**,void*); + + +/* +** This function is used as a callback by the recover extension. Simply +** print the supplied SQL statement to stdout. +*/ +static int recoverSqlCb(void *pCtx, const char *zSql){ + if( eVerbosity>=2 ){ + printf("%s\n", zSql); + } + return SQLITE_OK; +} + +/* +** This function is called to recover data from the database. +*/ +static int recoverDatabase(sqlite3 *db){ + int rc; /* Return code from this routine */ + const char *zLAF = "lost_and_found"; /* Name of "lost_and_found" table */ + int bFreelist = 1; /* True to scan the freelist */ + int bRowids = 1; /* True to restore ROWID values */ + sqlite3_recover *p; /* The recovery object */ + + p = sqlite3_recover_init_sql(db, "main", recoverSqlCb, 0); + sqlite3_recover_config(p, SQLITE_RECOVER_LOST_AND_FOUND, (void*)zLAF); + sqlite3_recover_config(p, SQLITE_RECOVER_ROWIDS, (void*)&bRowids); + sqlite3_recover_config(p, SQLITE_RECOVER_FREELIST_CORRUPT,(void*)&bFreelist); + sqlite3_recover_run(p); + if( sqlite3_recover_errcode(p)!=SQLITE_OK ){ + const char *zErr = sqlite3_recover_errmsg(p); + int errCode = sqlite3_recover_errcode(p); + if( eVerbosity>0 ){ + printf("recovery error: %s (%d)\n", zErr, errCode); + } + } + rc = sqlite3_recover_finish(p); + if( eVerbosity>0 && rc ){ + printf("recovery returns error code %d\n", rc); + } + return rc; +} /* ** Run the SQL text */ static int runDbSql(sqlite3 *db, const char *zSql, unsigned int *pBtsFlags){ @@ -1187,21 +1233,30 @@ /* Block debug pragmas and ATTACH/DETACH. But wait until after ** deserialize to do this because deserialize depends on ATTACH */ sqlite3_set_authorizer(cx.db, block_troublesome_sql, &btsFlags); -#ifdef VT02_SOURCES + /* Add the vt02 virtual table */ sqlite3_vt02_init(cx.db, 0, 0); -#endif + + /* Add support for sqlite_dbdata and sqlite_dbptr virtual tables used + ** by the recovery API */ + sqlite3_dbdata_init(cx.db, 0, 0); /* Consistent PRNG seed */ #ifdef SQLITE_TESTCTRL_PRNG_SEED sqlite3_table_column_metadata(cx.db, 0, "x", 0, 0, 0, 0, 0, 0); sqlite3_test_control(SQLITE_TESTCTRL_PRNG_SEED, 1, cx.db); #else sqlite3_randomness(0,0); #endif + + /* Run recovery on the initial database, just to make sure recovery + ** works. */ + if( !bNoRecover ){ + recoverDatabase(cx.db); + } zSql = sqlite3_malloc( nSql + 1 ); if( zSql==0 ){ fprintf(stderr, "Out of memory!\n"); }else{ @@ -1698,10 +1753,11 @@ " --load-dbsql FILE.. Load dbsqlfuzz outputs into the xsql table\n" " ^^^^------ Use \"-\" for FILE to read filenames from stdin\n" " -m TEXT Add a description to the database\n" " --native-vfs Use the native VFS for initially empty database files\n" " --native-malloc Turn off MEMSYS3/5 and Lookaside\n" +" --no-recover Do not run recovery on dbsqlfuzz databases\n" " --oss-fuzz Enable OSS-FUZZ testing\n" " --prng-seed N Seed value for the PRGN inside of SQLite\n" " -q|--quiet Reduced output\n" " --rebuild Rebuild and vacuum the database file\n" " --result-trace Show the results of each SQL command\n" @@ -1848,10 +1904,13 @@ if( strcmp(z,"native-malloc")==0 ){ nativeMalloc = 1; }else if( strcmp(z,"native-vfs")==0 ){ nativeFlag = 1; + }else + if( strcmp(z,"no-recover")==0 ){ + bNoRecover = 1; }else if( strcmp(z,"oss-fuzz")==0 ){ ossFuzz = 1; }else if( strcmp(z,"prng-seed")==0 ){ Index: test/fuzzdata8.db ================================================================== --- test/fuzzdata8.db +++ test/fuzzdata8.db cannot compute difference between binary files Index: test/fuzzinvariants.c ================================================================== --- test/fuzzinvariants.c +++ test/fuzzinvariants.c @@ -27,11 +27,11 @@ #include #include /* Forward references */ static char *fuzz_invariant_sql(sqlite3_stmt*, int); -static int sameValue(sqlite3_stmt*,int,sqlite3_stmt*,int); +static int sameValue(sqlite3_stmt*,int,sqlite3_stmt*,int,sqlite3_stmt*); static void reportInvariantFailed(sqlite3_stmt*,sqlite3_stmt*,int); /* ** Do an invariant check on pStmt. iCnt determines which invariant check to ** perform. The first check is iCnt==0. @@ -44,15 +44,18 @@ ** ** Return values: ** ** SQLITE_OK This check was successful. ** -** SQLITE_DONE iCnt is out of range. +** SQLITE_DONE iCnt is out of range. The caller typically sets +** up a loop on iCnt starting with zero, and increments +** iCnt until this code is returned. ** ** SQLITE_CORRUPT The invariant failed, but the underlying database ** file is indicating that it is corrupt, which might -** be the cause of the malfunction. +** be the cause of the malfunction. The *pCorrupt +** value will also be set. ** ** SQLITE_INTERNAL The invariant failed, and the database file is not ** corrupt. (This never happens because this function ** will call abort() following an invariant failure.) ** @@ -103,17 +106,20 @@ printf("invariant-sql #%d:\n%s\n", iCnt, zSql); sqlite3_free(zSql); } while( (rc = sqlite3_step(pTestStmt))==SQLITE_ROW ){ for(i=0; i=nCol ) break; } if( rc==SQLITE_DONE ){ /* No matching output row found */ sqlite3_stmt *pCk = 0; + + /* This is not a fault if the database file is corrupt, because anything + ** can happen with a corrupt database file */ rc = sqlite3_prepare_v2(db, "PRAGMA integrity_check", -1, &pCk, 0); if( rc ){ sqlite3_finalize(pCk); sqlite3_finalize(pTestStmt); return rc; @@ -127,10 +133,11 @@ sqlite3_finalize(pCk); sqlite3_finalize(pTestStmt); return SQLITE_CORRUPT; } sqlite3_finalize(pCk); + if( sqlite3_strlike("%group%by%order%by%desc%",sqlite3_sql(pStmt),0)==0 ){ /* dbsqlfuzz crash-647c162051c9b23ce091b7bbbe5125ce5f00e922 ** Original statement is: ** ** SELECT a,c,d,b,'' FROM t1 GROUP BY 1 HAVING d<>345 ORDER BY a DESC; @@ -140,10 +147,50 @@ ** ** SELECT * FROM (...) WHERE "a"==0 */ goto not_a_fault; } + + if( sqlite3_strlike("%limit%)%order%by%", sqlite3_sql(pTestStmt),0)==0 ){ + /* crash-89bd6a6f8c6166e9a4c5f47b3e70b225f69b76c6 + ** Original statement is: + ** + ** SELECT a,b,c* FROM t1 LIMIT 1%5<4 + ** + ** When running: + ** + ** SELECT * FROM (...) ORDER BY 1 + ** + ** A different subset of the rows come out + */ + goto not_a_fault; + } + + /* The original sameValue() comparison assumed a collating sequence + ** of "binary". It can sometimes get an incorrect result for different + ** collating sequences. So rerun the test with no assumptions about + ** collations. + */ + rc = sqlite3_prepare_v2(db, + "SELECT ?1=?2 OR ?1=?2 COLLATE nocase OR ?1=?2 COLLATE rtrim", + -1, &pCk, 0); + if( rc==SQLITE_OK ){ + sqlite3_reset(pTestStmt); + while( (rc = sqlite3_step(pTestStmt))==SQLITE_ROW ){ + for(i=0; i=nCol ){ + sqlite3_finalize(pCk); + goto not_a_fault; + } + } + } + sqlite3_finalize(pCk); + + /* Invariants do not necessarily work if there are virtual tables + ** involved in the query */ rc = sqlite3_prepare_v2(db, "SELECT 1 FROM bytecode(?1) WHERE opcode='VOpen'", -1, &pCk, 0); if( rc==SQLITE_OK ){ sqlite3_bind_pointer(pCk, 1, pStmt, "stmt-pointer", 0); rc = sqlite3_step(pCk); @@ -164,10 +211,28 @@ /* ** Generate SQL used to test a statement invariant. ** ** Return 0 if the iCnt is out of range. +** +** iCnt meanings: +** +** 0 SELECT * FROM () +** 1 SELECT DISTINCT * FROM () +** 2 SELECT * FROM () WHERE ORDER BY 1 +** 3 SELECT DISTINCT * FROM () ORDER BY 1 +** 4 SELECT * FROM () WHERE = +** 5 SELECT DISTINCT * FROM () WHERE ) WHERE = ORDER BY 1 +** 7 SELECT DISTINCT * FROM () WHERE = +** ORDER BY 1 +** N+0 SELECT * FROM () WHERE = +** N+1 SELECT DISTINCT * FROM () WHERE = +** N+2 SELECT * FROM () WHERE = ORDER BY 1 +** N+3 SELECT DISTINCT * FROM () WHERE = +** ORDER BY N +** */ static char *fuzz_invariant_sql(sqlite3_stmt *pStmt, int iCnt){ const char *zIn; size_t nIn; const char *zAnd = "WHERE"; @@ -180,11 +245,10 @@ int mxCnt; int bDistinct = 0; int bOrderBy = 0; int nParam = sqlite3_bind_parameter_count(pStmt); - iCnt++; switch( iCnt % 4 ){ case 1: bDistinct = 1; break; case 2: bOrderBy = 1; break; case 3: bDistinct = bOrderBy = 1; break; } @@ -195,13 +259,14 @@ if( zIn==0 ) return 0; nIn = strlen(zIn); while( nIn>0 && (isspace(zIn[nIn-1]) || zIn[nIn-1]==';') ) nIn--; if( strchr(zIn, '?') ) return 0; pTest = sqlite3_str_new(0); - sqlite3_str_appendf(pTest, "SELECT %s* FROM (%s", - bDistinct ? "DISTINCT " : "", zIn); - sqlite3_str_appendf(pTest, ")"); + sqlite3_str_appendf(pTest, "SELECT %s* FROM (", + bDistinct ? "DISTINCT " : ""); + sqlite3_str_append(pTest, zIn, (int)nIn); + sqlite3_str_append(pTest, ")", 1); rc = sqlite3_prepare_v2(db, sqlite3_str_value(pTest), -1, &pBase, 0); if( rc ){ sqlite3_finalize(pBase); pBase = pStmt; } @@ -214,11 +279,12 @@ ){ /* This is a randomized column name and so cannot be used in the ** WHERE clause. */ continue; } - if( i+1!=iCnt ) continue; + if( iCnt==0 ) continue; + if( iCnt>1 && i+2!=iCnt ) continue; if( zColName==0 ) continue; if( sqlite3_column_type(pStmt, i)==SQLITE_NULL ){ sqlite3_str_appendf(pTest, " %s \"%w\" ISNULL", zAnd, zColName); }else{ sqlite3_str_appendf(pTest, " %s \"%w\"=?%d", zAnd, zColName, @@ -226,19 +292,23 @@ } zAnd = "AND"; } if( pBase!=pStmt ) sqlite3_finalize(pBase); if( bOrderBy ){ - sqlite3_str_appendf(pTest, " ORDER BY 1"); + sqlite3_str_appendf(pTest, " ORDER BY %d", iCnt>2 ? iCnt-1 : 1); } return sqlite3_str_finish(pTest); } /* ** Return true if and only if v1 and is the same as v2. */ -static int sameValue(sqlite3_stmt *pS1, int i1, sqlite3_stmt *pS2, int i2){ +static int sameValue( + sqlite3_stmt *pS1, int i1, /* Value to text on the left */ + sqlite3_stmt *pS2, int i2, /* Value to test on the right */ + sqlite3_stmt *pTestCompare /* COLLATE comparison statement or NULL */ +){ int x = 1; int t1 = sqlite3_column_type(pS1,i1); int t2 = sqlite3_column_type(pS2,i2); if( t1!=t2 ){ if( (t1==SQLITE_INTEGER && t2==SQLITE_FLOAT) @@ -257,14 +327,42 @@ case SQLITE_FLOAT: { x = sqlite3_column_double(pS1,i1)==sqlite3_column_double(pS2,i2); break; } case SQLITE_TEXT: { - const char *z1 = (const char*)sqlite3_column_text(pS1,i1); - const char *z2 = (const char*)sqlite3_column_text(pS2,i2); - x = ((z1==0 && z2==0) || (z1!=0 && z2!=0 && strcmp(z1,z1)==0)); - break; + int e1 = sqlite3_value_encoding(sqlite3_column_value(pS1,i1)); + int e2 = sqlite3_value_encoding(sqlite3_column_value(pS2,i2)); + if( e1!=e2 ){ + const char *z1 = (const char*)sqlite3_column_text(pS1,i1); + const char *z2 = (const char*)sqlite3_column_text(pS2,i2); + x = ((z1==0 && z2==0) || (z1!=0 && z2!=0 && strcmp(z1,z1)==0)); + printf("Encodings differ. %d on left and %d on right\n", e1, e2); + abort(); + } + if( pTestCompare ){ + sqlite3_bind_value(pTestCompare, 1, sqlite3_column_value(pS1,i1)); + sqlite3_bind_value(pTestCompare, 2, sqlite3_column_value(pS2,i2)); + x = sqlite3_step(pTestCompare)==SQLITE_ROW + && sqlite3_column_int(pTestCompare,0)!=0; + sqlite3_reset(pTestCompare); + break; + } + if( e1!=SQLITE_UTF8 ){ + int len1 = sqlite3_column_bytes16(pS1,i1); + const unsigned char *b1 = sqlite3_column_blob(pS1,i1); + int len2 = sqlite3_column_bytes16(pS2,i2); + const unsigned char *b2 = sqlite3_column_blob(pS2,i2); + if( len1!=len2 ){ + x = 0; + }else if( len1==0 ){ + x = 1; + }else{ + x = (b1!=0 && b2!=0 && memcmp(b1,b2,len1)==0); + } + break; + } + /* Fall through into the SQLITE_BLOB case */ } case SQLITE_BLOB: { int len1 = sqlite3_column_bytes(pS1,i1); const unsigned char *b1 = sqlite3_column_blob(pS1,i1); int len2 = sqlite3_column_bytes(pS2,i2); @@ -279,16 +377,28 @@ break; } } return x; } + +/* +** Print binary data as hex +*/ +static void printHex(const unsigned char *a, int n, int mx){ + int j; + for(j=0; j($N/2) && $N2<($N/2)+400 +} 1 + +sqlite3 db3 test.db -shared-schema 1 +sqlite3 db4 test.db -shared-schema 1 +do_test 3.3 { + execsql { SELECT * FROM x1 } db3 + execsql { SELECT * FROM x1 } db4 + set N4 [lindex [sqlite3_db_status db2 SCHEMA_USED 0] 1] + set M [expr 2*($N-$N2)] + set {} {} +} {} +do_test 3.3.1 { expr {(($M / 4) + $N-$M)} } "#/$N4/" + +catch { db1 close } +catch { db2 close } +catch { db3 close } +catch { db4 close } + +#------------------------------------------------------------------------- +# 4.1 Test the REINDEX command. +# 4.2 Test CREATE TEMP ... commands. +# +reset_db +do_execsql_test 4.1.0 { + CREATE TABLE x1(a, b, c); + CREATE INDEX x1a ON x1(a); + CREATE INDEX x1b ON x1(b); + CREATE INDEX x1c ON x1(c); +} +db close +sqlite3 db test.db -shared-schema 1 + +do_execsql_test 4.1.1 { + REINDEX x1; + REINDEX x1a; + REINDEX x1b; + REINDEX x1c; + REINDEX; +} + +do_test 4.1.2 { + for {set i 1} {$i < 5} {incr i} { + forcedelete test.db${i} test.db${i}-wal test.db${i}-journal + forcecopy test.db test.db${i} + execsql "ATTACH 'test.db${i}' AS db${i}" + } + register_schemapool_module db + set {} {} + execsql { + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool + } +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.1.3 { + REINDEX x1; + REINDEX x1a; + REINDEX x1b; + REINDEX x1c; + REINDEX db1.x1a; + REINDEX db2.x1b; + REINDEX db3.x1c; +} + +do_execsql_test 4.1.4 { + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool +} {nref=5 nschema=1 ndelete=28} + +#------------------------------------------------------------------------- +db close +sqlite3 db test.db -shared-schema 1 +register_schemapool_module db +do_execsql_test 4.2.0 { + ATTACH 'test.db1' AS db1; + ATTACH 'test.db2' AS db2; + ATTACH 'test.db3' AS db3; + ATTACH 'test.db4' AS db4; + + SELECT * FROM db1.x1; + SELECT * FROM db2.x1; + SELECT * FROM db3.x1; + SELECT * FROM db4.x1; +} + +do_execsql_test 4.2.1 { + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.2 { + CREATE TEMP TABLE t1(a, b, c); + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.3 { + CREATE INDEX t1a ON t1(a); + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.4 { + CREATE TRIGGER tr1 AFTER INSERT ON t1 BEGIN + SELECT 1,2,3,4; + END; + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.5 { + DROP TABLE t1; + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.6 { + CREATE TEMP TRIGGER tr1 AFTER INSERT ON db2.x1 BEGIN + SELECT 1,2,3,4; + END; + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=0} + +do_execsql_test 4.2.7 { + DROP TRIGGER tr1; + SELECT 'nref=' || nRef, 'nschema=' || nSchema, 'ndelete=' || nDelete + FROM schemapool; +} {nref=5 nschema=1 ndelete=4} + +#-------------------------------------------------------------------------- +reset_db +do_execsql_test 5.0 { + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); +} + +sqlite3 db2 test.db -shared-schema 1 +register_schemapool_module db2 + +do_execsql_test 5.1 { + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET sql='CREATE TABLE t3 a,b' WHERE name = 't3'; +} + +do_test 5.2 { + catchsql { SELECT * FROM t1 } db2 +} {1 {malformed database schema (t3) - near "a": syntax error}} + +do_test 5.3 { + catchsql { SELECT nref,nschema FROM schemapool } db2 +} {1 {vtable constructor failed: schemapool}} + +do_execsql_test 5.4 { + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET sql='CREATE TABLE t3(a,b)' WHERE name = 't3'; +} + +do_test 5.5 { + catchsql { SELECT nref,nschema FROM schemapool } db2 +} {0 {1 1}} + +db2 close +db close +do_test 5.6.1 { + forcedelete test.db2 test.db2-wal test.db2-journal + forcecopy test.db test.db2 + sqlite3 db test.db + sqlite3 db2 test.db -shared-schema 1 + sqlite3 db3 test.db2 -shared-schema 1 + register_schemapool_module db +} {} + +do_execsql_test -db db2 5.6.2 { SELECT * FROM t1 } +do_execsql_test -db db3 5.6.3 { SELECT * FROM t1 } +do_execsql_test 5.6.4 { + SELECT 'nref=' || nRef, 'nschema=' || nSchema FROM schemapool; + CREATE TABLE t4(x); + DROP TABLE t4; +} {nref=2 nschema=1} +do_execsql_test -db db2 5.6.5 { SELECT * FROM t1 } +do_execsql_test -db db3 5.6.6 { SELECT * FROM t1 } +do_execsql_test 5.6.7 { + SELECT 'nref=' || nRef, 'nschema=' || nSchema FROM schemapool; + ATTACH 'test.db2' AS db2; + CREATE TABLE db2.t4(x); + DROP TABLE db2.t4; +} {nref=1 nschema=1 nref=1 nschema=1} +do_execsql_test -db db2 5.6.8 { SELECT * FROM t1 } +do_execsql_test -db db3 5.6.9 { SELECT * FROM t1 } +do_execsql_test 5.6.10 { + SELECT 'nref=' || nRef, 'nschema=' || nSchema FROM schemapool; +} {nref=2 nschema=1} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 6.0 { + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); +} + +do_test 6.1 { + db close + sqlite3 db test.db -shared-schema 1 + for {set i 1} {$i < 5} {incr i} { + set base "test.db$i" + set nm "aux$i" + forcedelete $base $base-wal $base-journal + forcecopy test.db $base + execsql "ATTACH '$base' AS $nm" + } +} {} + +do_test 6.2 { + set N1 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + set N2 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + expr ($N1==0 && $N2==0) +} {1} + +do_test 6.3 { + execsql { SELECT * FROM main.t1 } + set N1 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + set N2 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + expr {$N1>0 && $N2>0 && $N1==$N2} +} {1} + +do_test 6.4 { + execsql { SELECT * FROM aux1.t1 } + set N3 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + set N4 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1] + list $N3 $N4 +} "#/$N1 $N1/" + +finish_test + ADDED test/reuse4.test Index: test/reuse4.test ================================================================== --- /dev/null +++ test/reuse4.test @@ -0,0 +1,173 @@ +# 2019 February 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix reuse4 + +ifcapable !sharedschema { + finish_test + return +} + +foreach {tn sharedschema} { + 1 0 + 2 1 +} { + reset_db + + do_execsql_test 1.$tn.0 { + CREATE TABLE x1(a, b); + CREATE INDEX x1a ON x1(a); + CREATE INDEX x1b ON x1(b); + CREATE TABLE x2(a, b); + } + db close + + do_test 1.$tn.1 { + for {set i 1} {$i<4} {incr i} { + forcedelete test.db$i test.db$i-journal test.db$i-wal + forcecopy test.db test.db$i + } + + sqlite3 db test.db -shared-schema $sharedschema + for {set i 1} {$i<4} {incr i} { + execsql " ATTACH 'test.db$i' AS db$i " + } + } {} + + do_execsql_test 1.$tn.2 { + WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<10 ) + INSERT INTO x1 SELECT i, i FROM s; + + INSERT INTO db3.x2 SELECT * FROM x1; + INSERT INTO db2.x1 SELECT * FROM db3.x2; + CREATE TEMP TRIGGER tr1 AFTER INSERT ON db2.x2 BEGIN + INSERT INTO x1 VALUES(new.a, new.b); + END; + INSERT INTO db2.x2 SELECT * FROM x1 WHERE a%2; + DELETE FROM x1 WHERE a<3; + INSERT INTO db3.x1 SELECT * FROM db2.x2; + + DETACH db3; + ATTACH 'test.db3' AS db3; + + UPDATE db3.x1 SET a=a-10 WHERE b NOT IN (SELECT b FROM db2.x2); + + CREATE TEMP TABLE x1(a, b); + INSERT INTO db2.x2 VALUES(50, 60), (60, 70), (80, 90); + ALTER TABLE x1 RENAME TO x2; + ALTER TABLE x2 ADD COLUMN c; + ALTER TABLE x2 RENAME a TO aaa; + DELETE FROM x1 WHERE b>8; + UPDATE db3.x2 SET b=b*10; + + BEGIN; + CREATE TEMP TABLE x5(x); + INSERT INTO x5 VALUES(1); + ROLLBACK; + + INSERT INTO main.x2 VALUES(123, 456); + } + + integrity_check 1.$tn.3 + + do_execsql_test 1.$tn.4 { + SELECT * FROM main.x1; SELECT 'xxx'; + SELECT * FROM main.x2; SELECT 'xxx'; + SELECT * FROM temp.x2; SELECT 'xxx'; + + SELECT * FROM db1.x1; SELECT 'xxx'; + SELECT * FROM db1.x2; SELECT 'xxx'; + SELECT * FROM db2.x1; SELECT 'xxx'; + SELECT * FROM db2.x2; SELECT 'xxx'; + SELECT * FROM db3.x1; SELECT 'xxx'; + SELECT * FROM db3.x2; SELECT 'xxx'; + } { + 3 3 4 4 5 5 6 6 7 7 8 8 3 3 5 5 7 7 xxx + 123 456 xxx + 50 60 {} 60 70 {} 80 90 {} xxx + xxx + xxx + 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 xxx + 1 1 3 3 5 5 7 7 9 9 50 60 60 70 80 90 xxx + 1 1 3 3 5 5 7 7 9 9 xxx + 1 10 2 20 3 30 4 40 5 50 6 60 7 70 8 80 9 90 10 100 xxx + } + + do_test 1.$tn.5.1 { + sqlite3 db2 test.db + db2 eval { CREATE TABLE x3(x) } + } {} + do_execsql_test 1.$tn.5.2 { + SELECT * FROM main.x1; SELECT 'xxx'; + SELECT * FROM main.x2; SELECT 'xxx'; + SELECT * FROM main.x3; SELECT 'xxx'; + } { + 3 3 4 4 5 5 6 6 7 7 8 8 3 3 5 5 7 7 xxx + 123 456 xxx + xxx + } +} + +#------------------------------------------------------------------------- +# Test some PRAGMA statements with shared-schema connections. +# +reset_db +do_execsql_test 2.0 { + CREATE TABLE t1(a, b, c); + CREATE INDEX t1abc ON t1(a, b, c); +} + +foreach {tn pragma nSchema nDelete} { + 1 "PRAGMA synchronous = OFF" 1 0 + 2 "PRAGMA cache_size = 200" 1 0 + 3 "PRAGMA aux2.integrity_check" 1 0 + 4 "PRAGMA integrity_check" 1 5 + 5 "PRAGMA index_info=t1abc" 1 5 + 6 "PRAGMA aux3.index_info=t1abc" 1 0 + 7 "PRAGMA journal_mode" 1 0 + 8 "PRAGMA aux2.wal_checkpoint" 1 0 + 9 "PRAGMA wal_checkpoint" 1 0 +} { + do_test 2.$tn.1 { + catch { db close } + catch { db2 close } + for {set i 1} {$i < 6} {incr i} { + forcedelete "test.db$i" "test.db${i}-wal" "test.db${i}-journal" + forcecopy test.db test.db$i + } + sqlite3 db2 test.db -shared-schema 1 + for {set i 1} {$i < 6} {incr i} { + execsql "ATTACH 'test.db$i' AS aux$i" db2 + } + } {} + + sqlite3 db test.db + register_schemapool_module db + + do_test 2.$tn.2 { + execsql $pragma db2 + execsql { SELECT 'nschema='||nschema, 'ndelete='||nDelete FROM schemapool } + } "nschema=$nSchema ndelete=$nDelete" + + do_test 2.$tn.3 { + execsql { + SELECT * FROM main.t1,aux1.t1,aux2.t1,aux3.t1,aux4.t1,aux5.t1 + } db2 + execsql { SELECT 'nschema=' || nschema, 'nref=' || nref FROM schemapool } + } "nschema=6 nref=6" +} + +finish_test + ADDED test/reuse5.test Index: test/reuse5.test ================================================================== --- /dev/null +++ test/reuse5.test @@ -0,0 +1,125 @@ +# 2019 February 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix reuse5 +set CLI [test_find_cli] + +ifcapable !sharedschema { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(x); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; +} + +foreach {tn sql out1 out2} { + 1 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(x); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + } { + test.db2 is compatible + } {} + + 2 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(x); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + CREATE TABLE x1(x); + DROP TABLE x1; + } { + test.db2 is NOT compatible (schema cookie) + } { + Fixing test.db2... test.db2 is compatible + } + + 3 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + } { + test.db2 is NOT compatible (objects) + } {} + + 4 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(X); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + } { + test.db2 is NOT compatible (SQL) + } {} + + 5 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1y ON t1(y); + CREATE INDEX t1x ON t1(x); + CREATE VIEW v1 AS SELECT * FROM t2; + } { + test.db2 is NOT compatible (root pages) + } { + Fixing test.db2... test.db2 is compatible + } + + 6 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(x); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + DROP INDEX t1x; + CREATE INDEX t1x ON t1(x); + } { + test.db2 is NOT compatible (order of sqlite_master rows) + } { + Fixing test.db2... test.db2 is compatible + } + +} { + forcedelete test.db2 + sqlite3 db2 test.db2 + db2 eval $sql + db2 close + + if {$out2==""} {set out2 $out1} + + do_test 1.$tn.1 { + catchcmd test.db ".shared-schema check test.db2" + } [list 0 [string trim $out1]] + + do_test 1.$tn.2 { + catchcmd test.db ".shared-schema fix test.db2" + } [list 0 [string trim $out2]] + + do_test 1.$tn.3 { + catchcmd test.db2 "PRAGMA integrity_check" + } [list 0 ok] +} + + +finish_test + ADDED test/reuse6.test Index: test/reuse6.test ================================================================== --- /dev/null +++ test/reuse6.test @@ -0,0 +1,143 @@ +# 2019 February 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix reuse6 + +ifcapable !sharedschema { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b, c); + CREATE INDEX t1x ON t1(x); + CREATE INDEX t1y ON t1(y); + CREATE VIEW v1 AS SELECT * FROM t2; + + INSERT INTO t1 VALUES(1, 2), (3, 4), (5, 6); + INSERT INTO t2 VALUES('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i'); + + ATTACH 'test.db2' AS aux; + CREATE TABLE t3(i, ii); + INSERT INTO t3 VALUES(10, 20); +} + +sqlite3 db1 test.db -shared-schema 1 +sqlite3 db2 test.db -shared-schema 1 + +do_execsql_test -db db1 1.1 { + ATTACH 'test.db2' AS aux; +} + +do_test 1.2 { + execsql {SELECT * FROM t3} db1 +} {10 20} + +do_execsql_test -db db2 1.3 { + ATTACH 'test.db2' AS aux; +} + +do_test 1.3 { + execsql {SELECT * FROM t3} db1 +} {10 20} + +do_execsql_test -db db2 1.5 { + SELECT * FROM t3; +} {10 20} + +do_test 1.6 { + execsql {SELECT * FROM t3} db1 +} {10 20} + +db1 close +db2 close + +#------------------------------------------------------------------------- +reset_db +forcedelete test.db2 +forcedelete test.db3 +do_execsql_test 2.0 { + CREATE TABLE t1(x, y); + ATTACH 'test.db2' AS aux2; + CREATE TABLE aux2.t2(x, y); + ATTACH 'test.db3' AS aux3; + CREATE TABLE aux3.t3(x, y); +} + +sqlite3 db1 test.db -shared-schema 1 +do_execsql_test -db db1 2.1 { + ATTACH 'test.db2' AS aux2; + ATTACH 'test.db3' AS aux3; +} + +do_test 2.2.1 { + catchsql { SELECT * FROM aux2.nosuchtable } db1 +} {1 {no such table: aux2.nosuchtable}} +do_test 2.2.2 { + sqlite3_errcode db1 +} {SQLITE_ERROR} +db1 close + +#------------------------------------------------------------------------- +reset_db +forcedelete test.db2 +ifcapable fts5 { + do_execsql_test 3.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, b); + ATTACH 'test.db2' AS aux; + CREATE TABLE aux.t1(x, y, z); + } + + sqlite3 db1 test.db -shared-schema 1 + do_execsql_test -db db1 3.1 { + ATTACH 'test.db2' AS aux; + } + + do_execsql_test -db db1 3.2 { + SELECT * FROM main.ft, aux.t1; + } + db1 close +} + +#------------------------------------------------------------------------- +reset_db +forcedelete test.db2 +ifcapable fts5 { + do_execsql_test 4.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, b); + } + forcecopy test.db test.db2 + + sqlite3 db1 test.db -shared-schema 1 + do_execsql_test -db db1 4.1 { + ATTACH 'test.db2' AS aux; + SELECT * FROM main.ft; + SELECT * FROM aux.ft; + } + + do_execsql_test -db db1 4.2 { + SELECT * FROM main.ft, aux.ft + } +} + + + + + + + +finish_test + ADDED test/reusefault.test Index: test/reusefault.test ================================================================== --- /dev/null +++ test/reusefault.test @@ -0,0 +1,54 @@ +# 2019 February 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix reusefault + +ifcapable !sharedschema { + finish_test + return +} + +do_execsql_test 1.0 { + PRAGMA cache_size = 10; + CREATE TABLE t1(a UNIQUE, b UNIQUE); + INSERT INTO t1 VALUES(1, 2), (3, 4); +} +faultsim_save_and_close + +do_faultsim_test 1.1 -prep { + faultsim_restore + sqlite3 db test.db -shared-schema 1 +} -body { + execsql { SELECT * FROM t1 } +} -test { + faultsim_test_result {0 {1 2 3 4}} +} + +do_faultsim_test 1.2 -prep { + faultsim_restore + sqlite3 db test.db -shared-schema 1 + execsql { SELECT * FROM t1 } + sqlite3 db2 test.db + db2 eval {CREATE TABLE a(a)} + db2 close +} -body { + execsql { SELECT * FROM t1 } +} -test { + faultsim_test_result {0 {1 2 3 4}} +} + + +finish_test + Index: test/savepoint.test ================================================================== --- test/savepoint.test +++ test/savepoint.test @@ -13,10 +13,12 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/malloc_common.tcl + +forcedelete test2.db #---------------------------------------------------------------------- # The following tests - savepoint-1.* - test that the SAVEPOINT, RELEASE # and ROLLBACK TO comands are correctly parsed, and that the auto-commit # flag is correctly set and unset as a result. @@ -26,11 +28,10 @@ execsql { SAVEPOINT sp1; RELEASE sp1; } } {} -wal_check_journal_mode savepoint-1.1 do_test savepoint-1.2 { execsql { SAVEPOINT sp1; ROLLBACK TO sp1; } @@ -804,12 +805,11 @@ } } {} integrity_check savepoint-11.7 do_test savepoint-11.8 { execsql { ROLLBACK } - db close - sqlite3 db test.db + execsql { PRAGMA wal_checkpoint } file size test.db } {8192} do_test savepoint-11.9 { execsql { Index: test/savepoint6.test ================================================================== --- test/savepoint6.test +++ test/savepoint6.test @@ -13,14 +13,10 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl proc sql {zSql} { - if {0 && $::debug_op} { - puts stderr "$zSql ;" - flush stderr - } uplevel db eval [list $zSql] #puts stderr "$zSql ;" } set DATABASE_SCHEMA { @@ -69,17 +65,15 @@ # # insert_rows XVALUES # delete_rows XVALUES # proc savepoint {zName} { - if {$::debug_op} { puts stderr "savepoint $zName" ; flush stderr } catch { sql "SAVEPOINT $zName" } lappend ::lSavepoint [list $zName [array get ::aEntry]] } proc rollback {zName} { - if {$::debug_op} { puts stderr "rollback $zName" ; flush stderr } catch { sql "ROLLBACK TO $zName" } for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { set zSavepoint [lindex $::lSavepoint $i 0] if {$zSavepoint eq $zName} { unset -nocomplain ::aEntry @@ -93,11 +87,10 @@ } } } proc release {zName} { - if {$::debug_op} { puts stderr "release $zName" ; flush stderr } catch { sql "RELEASE $zName" } for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { set zSavepoint [lindex $::lSavepoint $i 0] if {$zSavepoint eq $zName} { set ::lSavepoint [lreplace $::lSavepoint $i end] @@ -109,11 +102,10 @@ #puts stderr "-- End of transaction!!!!!!!!!!!!!" } } proc insert_rows {lX} { - if {$::debug_op} { puts stderr "insert_rows $lX" ; flush stderr } foreach x $lX { set y [x_to_y $x] # Update database [db] sql "INSERT OR REPLACE INTO t1 VALUES($x, '$y')" @@ -122,11 +114,10 @@ set ::aEntry($x) $y } } proc delete_rows {lX} { - if {$::debug_op} { puts stderr "delete_rows $lX" ; flush stderr } foreach x $lX { # Update database [db] sql "DELETE FROM t1 WHERE x = $x" # Update the Tcl database. @@ -171,15 +162,10 @@ } return $ret } #------------------------------------------------------------------------- -set ::debug_op 0 -proc debug_ops {} { - set ::debug_op 1 -} - proc database_op {} { set i [expr int(rand()*2)] if {$i==0} { insert_rows [random_integers 100 1000] } @@ -196,10 +182,13 @@ set names {one two three four five} set cmds {savepoint savepoint savepoint savepoint release rollback} set C [lindex $cmds [expr int(rand()*6)]] set N [lindex $names [expr int(rand()*5)]] + + #puts stderr " $C $N ; " + #flush stderr $C $N return ok } ADDED test/seekscan1.test Index: test/seekscan1.test ================================================================== --- /dev/null +++ test/seekscan1.test @@ -0,0 +1,63 @@ +# 2022 October 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix seekscan1 + +do_execsql_test 1.0 { + CREATE TABLE t1(a TEXT, b INT, c INT NOT NULL, PRIMARY KEY(a,b,c)); + WITH RECURSIVE c(x) AS (VALUES(0) UNION ALL SELECT x+1 FROM c WHERE x<1997) + INSERT INTO t1(a,b,c) SELECT printf('xyz%d',x/10),x/6,x FROM c; + INSERT INTO t1 VALUES('abc',234,6); + INSERT INTO t1 VALUES('abc',345,7); + ANALYZE; +} + +do_execsql_test 1.1 { + SELECT a,b,c FROM t1 + WHERE b IN (234, 345) AND c BETWEEN 6 AND 6.5 AND a='abc' + ORDER BY a, b; +} { + abc 234 6 +} + +do_execsql_test 1.2 { + SELECT a,b,c FROM t1 + WHERE b IN (234, 345) AND c BETWEEN 6 AND 7 AND a='abc' + ORDER BY a, b; +} { + abc 234 6 + abc 345 7 +} + +do_execsql_test 1.3 { + SELECT a,b,c FROM t1 + WHERE b IN (234, 345) AND c >=6 AND a='abc' + ORDER BY a, b; +} { + abc 234 6 + abc 345 7 +} + +do_execsql_test 1.4 { + SELECT a,b,c FROM t1 + WHERE b IN (234, 345) AND c<=7 AND a='abc' + ORDER BY a, b; +} { + abc 234 6 + abc 345 7 +} + + +finish_test Index: test/selectA.test ================================================================== --- test/selectA.test +++ test/selectA.test @@ -1480,7 +1480,31 @@ do_execsql_test 8.1 { SELECT 'ABCD' FROM t1 WHERE (a=? OR b=?) AND (0 OR (SELECT 'xyz' INTERSECT SELECT a ORDER BY 1)) } {} + +#------------------------------------------------------------------------- +# dbsqlfuzz a34f455c91ad75a0cf8cd9476841903f42930a7a +# +reset_db +do_execsql_test 9.0 { + CREATE TABLE t1(a COLLATE nocase); + CREATE TABLE t2(b COLLATE nocase); + + INSERT INTO t1 VALUES('ABC'); + INSERT INTO t2 VALUES('abc'); +} + +do_execsql_test 9.1 { + SELECT a FROM t1 INTERSECT SELECT b FROM t2; +} {ABC} + +do_execsql_test 9.2 { + SELECT * FROM ( + SELECT a FROM t1 INTERSECT SELECT b FROM t2 + ) WHERE a||'' = 'ABC'; +} {ABC} + + finish_test Index: test/shell2.test ================================================================== --- test/shell2.test +++ test/shell2.test @@ -188,7 +188,19 @@ catchcmd ":memory:" { SELECT 'unclosed;} } {1 {Parse error near line 2: unrecognized token: "'unclosed;" SELECT 'unclosed; ^--- error here}} + +# Verify that safe mode rejects certain UDFs +# Reported at https://sqlite.org/forum/forumpost/07beac8056151b2f +do_test shell2-1.4.8 { + catchcmd "-safe :memory:" { + SELECT edit('DoNotCare');} +} {1 {line 2: cannot use the edit() function in safe mode}} +do_test shell2-1.4.9 { + catchcmd "-safe :memory:" { + SELECT writefile('DoNotCare', x'');} +} {1 {line 2: cannot use the writefile() function in safe mode}} + finish_test Index: test/speedtest1.c ================================================================== --- test/speedtest1.c +++ test/speedtest1.c @@ -5,11 +5,12 @@ */ static const char zHelp[] = "Usage: %s [--options] DATABASE\n" "Options:\n" " --autovacuum Enable AUTOVACUUM mode\n" - " --cachesize N Set the cache size to N\n" + " --big-transactions Add BEGIN/END around all large tests\n" + " --cachesize N Set PRAGMA cache_size=N. Note: N is pages, not bytes\n" " --checkpoint Run PRAGMA wal_checkpoint after each test case\n" " --exclusive Enable locking_mode=EXCLUSIVE\n" " --explain Like --sqlonly but with added EXPLAIN keywords\n" " --heap SZ MIN Memory allocator uses SZ bytes & min allocation MIN\n" " --incrvacuum Enable incremenatal vacuum mode\n" @@ -18,10 +19,11 @@ " --lookaside N SZ Configure lookaside for N slots of SZ bytes each\n" " --memdb Use an in-memory database\n" " --mmap SZ MMAP the first SZ bytes of the database file\n" " --multithread Set multithreaded mode\n" " --nomemstat Disable memory statistics\n" + " --nomutex Open db with SQLITE_OPEN_NOMUTEX\n" " --nosync Set PRAGMA synchronous=OFF\n" " --notnull Add NOT NULL constraints to table columns\n" " --output FILE Store SQL output in FILE\n" " --pagesize N Set the page size to N\n" " --pcache N SZ Configure N pages of pagecache each of size SZ bytes\n" @@ -41,11 +43,12 @@ " --testset T Run test-set T (main, cte, rtree, orm, fp, debug)\n" " --trace Turn on SQL tracing\n" " --threads N Use up to N threads for sorting\n" " --utf16be Set text encoding to UTF-16BE\n" " --utf16le Set text encoding to UTF-16LE\n" - " --verify Run additional verification steps.\n" + " --verify Run additional verification steps\n" + " --vfs NAME Use the given (preinstalled) VFS\n" " --without-rowid Use WITHOUT ROWID where appropriate\n" ; #include "sqlite3.h" #include @@ -95,10 +98,11 @@ int eTemp; /* 0: no TEMP. 9: always TEMP. */ int szTest; /* Scale factor for test iterations */ int nRepeat; /* Repeat selects this many times */ int doCheckpoint; /* Run PRAGMA wal_checkpoint after each trans */ int nReserve; /* Reserve bytes */ + int doBigTransactions; /* Enable transactions on tests 410 and 510 */ const char *zWR; /* Might be WITHOUT ROWID */ const char *zNN; /* Might be NOT NULL */ const char *zPK; /* Might be UNIQUE or PRIMARY KEY */ unsigned int x, y; /* Pseudo-random number generator state */ u64 nResByte; /* Total number of result bytes */ @@ -370,22 +374,29 @@ /* Start a new test case */ #define NAMEWIDTH 60 static const char zDots[] = "......................................................................."; +static int iTestNumber = 0; /* Current test # for begin/end_test(). */ void speedtest1_begin_test(int iTestNum, const char *zTestName, ...){ int n = (int)strlen(zTestName); char *zName; va_list ap; + iTestNumber = iTestNum; va_start(ap, zTestName); zName = sqlite3_vmprintf(zTestName, ap); va_end(ap); n = (int)strlen(zName); if( n>NAMEWIDTH ){ zName[NAMEWIDTH] = 0; n = NAMEWIDTH; } + if( g.pScript ){ + fprintf(g.pScript,"-- begin test %d %.*s\n", iTestNumber, n, zName) + /* maintenance reminder: ^^^ code in ext/wasm expects %d to be + ** field #4 (as in: cut -d' ' -f4). */; + } if( g.bSqlOnly ){ printf("/* %4d - %s%.*s */\n", iTestNum, zName, NAMEWIDTH-n, zDots); }else{ printf("%4d - %s%.*s ", iTestNum, zName, NAMEWIDTH-n, zDots); fflush(stdout); @@ -402,18 +413,23 @@ /* Complete a test case */ void speedtest1_end_test(void){ sqlite3_int64 iElapseTime = speedtest1_timestamp() - g.iStart; if( g.doCheckpoint ) speedtest1_exec("PRAGMA wal_checkpoint;"); + assert( iTestNumber > 0 ); + if( g.pScript ){ + fprintf(g.pScript,"-- end test %d\n", iTestNumber); + } if( !g.bSqlOnly ){ g.iTotal += iElapseTime; printf("%4d.%03ds\n", (int)(iElapseTime/1000), (int)(iElapseTime%1000)); } if( g.pStmt ){ sqlite3_finalize(g.pStmt); g.pStmt = 0; } + iTestNumber = 0; } /* Report end of testing */ void speedtest1_final(void){ if( !g.bSqlOnly ){ @@ -1103,16 +1119,28 @@ speedtest1_run(); } speedtest1_exec("COMMIT"); speedtest1_end_test(); speedtest1_begin_test(410, "%d SELECTS on an IPK", n); + if( g.doBigTransactions ){ + /* Historical note: tests 410 and 510 have historically not used + ** explicit transactions. The --big-transactions flag was added + ** 2022-09-08 to support the WASM/OPFS build, as the run-times + ** approach 1 minute for each of these tests if they're not in an + ** explicit transaction. The run-time effect of --big-transaciions + ** on native builds is negligible. */ + speedtest1_exec("BEGIN"); + } speedtest1_prepare("SELECT b FROM t5 WHERE a=?1; -- %d times",n); for(i=1; i<=n; i++){ x1 = swizzle(i,maxb); sqlite3_bind_int(g.pStmt, 1, (sqlite3_int64)x1); speedtest1_run(); } + if( g.doBigTransactions ){ + speedtest1_exec("COMMIT"); + } speedtest1_end_test(); sz = n = g.szTest*700; zNum[0] = 0; maxb = roundup_allones(sz/3); @@ -1130,17 +1158,24 @@ speedtest1_run(); } speedtest1_exec("COMMIT"); speedtest1_end_test(); speedtest1_begin_test(510, "%d SELECTS on a TEXT PK", n); + if( g.doBigTransactions ){ + /* See notes for test 410. */ + speedtest1_exec("BEGIN"); + } speedtest1_prepare("SELECT b FROM t6 WHERE a=?1; -- %d times",n); for(i=1; i<=n; i++){ x1 = swizzle(i,maxb); speedtest1_numbername(x1, zNum, sizeof(zNum)); sqlite3_bind_text(g.pStmt, 1, zNum, -1, SQLITE_STATIC); speedtest1_run(); } + if( g.doBigTransactions ){ + speedtest1_exec("COMMIT"); + } speedtest1_end_test(); speedtest1_begin_test(520, "%d SELECT DISTINCT", n); speedtest1_exec("SELECT DISTINCT b FROM t5;"); speedtest1_exec("SELECT DISTINCT b FROM t6;"); speedtest1_end_test(); @@ -2160,11 +2195,10 @@ static int xCompileOptions(void *pCtx, int nVal, char **azVal, char **azCol){ printf("-- Compile option: %s\n", azVal[0]); return SQLITE_OK; } - int main(int argc, char **argv){ int doAutovac = 0; /* True for --autovacuum */ int cacheSize = 0; /* Desired cache size. 0 means default */ int doExclusive = 0; /* True for --exclusive */ int nHeap = 0, mnHeap = 0; /* Heap size from --heap */ @@ -2178,11 +2212,14 @@ int doPCache = 0; /* True if --pcache is seen */ int showStats = 0; /* True for --stats */ int nThread = 0; /* --threads value */ int mmapSize = 0; /* How big of a memory map to use */ int memDb = 0; /* --memdb. Use an in-memory database */ + int openFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE + ; /* SQLITE_OPEN_xxx flags. */ char *zTSet = "main"; /* Which --testset torun */ + const char * zVfs = 0; /* --vfs NAME */ int doTrace = 0; /* True for --trace */ const char *zEncoding = 0; /* --utf16be or --utf16le */ const char *zDbName = 0; /* Name of the test database */ void *pHeap = 0; /* Allocated heap space */ @@ -2190,14 +2227,23 @@ void *pPCache = 0; /* Allocated storage for pcache */ int iCur, iHi; /* Stats values, current and "highwater" */ int i; /* Loop counter */ int rc; /* API return code */ +#ifdef SQLITE_SPEEDTEST1_WASM + /* Resetting all state is important for the WASM build, which may + ** call main() multiple times. */ + memset(&g, 0, sizeof(g)); + iTestNumber = 0; +#endif #ifdef SQLITE_CKSUMVFS_STATIC sqlite3_register_cksumvfs(0); #endif - + /* + ** Confirms that argc has at least N arguments following argv[i]. */ +#define ARGC_VALUE_CHECK(N) \ + if( i>=argc-(N) ) fatal_error("missing argument on %s\n", argv[i]) /* Display the version of SQLite being tested */ printf("-- Speedtest1 for SQLite %s %.48s\n", sqlite3_libversion(), sqlite3_sourceid()); /* Process command-line arguments */ @@ -2210,36 +2256,37 @@ const char *z = argv[i]; if( z[0]=='-' ){ do{ z++; }while( z[0]=='-' ); if( strcmp(z,"autovacuum")==0 ){ doAutovac = 1; + }else if( strcmp(z,"big-transactions")==0 ){ + g.doBigTransactions = 1; }else if( strcmp(z,"cachesize")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); - i++; - cacheSize = integerValue(argv[i]); + ARGC_VALUE_CHECK(1); + cacheSize = integerValue(argv[++i]); }else if( strcmp(z,"exclusive")==0 ){ doExclusive = 1; }else if( strcmp(z,"checkpoint")==0 ){ g.doCheckpoint = 1; }else if( strcmp(z,"explain")==0 ){ g.bSqlOnly = 1; g.bExplain = 1; }else if( strcmp(z,"heap")==0 ){ - if( i>=argc-2 ) fatal_error("missing arguments on %s\n", argv[i]); + ARGC_VALUE_CHECK(2); nHeap = integerValue(argv[i+1]); mnHeap = integerValue(argv[i+2]); i += 2; }else if( strcmp(z,"incrvacuum")==0 ){ doIncrvac = 1; }else if( strcmp(z,"journal")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); zJMode = argv[++i]; }else if( strcmp(z,"key")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); zKey = argv[++i]; }else if( strcmp(z,"lookaside")==0 ){ - if( i>=argc-2 ) fatal_error("missing arguments on %s\n", argv[i]); + ARGC_VALUE_CHECK(2); nLook = integerValue(argv[i+1]); szLook = integerValue(argv[i+2]); i += 2; }else if( strcmp(z,"memdb")==0 ){ memDb = 1; @@ -2249,23 +2296,25 @@ }else if( strcmp(z,"nomemstat")==0 ){ sqlite3_config(SQLITE_CONFIG_MEMSTATUS, 0); #endif #if SQLITE_VERSION_NUMBER>=3007017 }else if( strcmp(z, "mmap")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); mmapSize = integerValue(argv[++i]); #endif + }else if( strcmp(z,"nomutex")==0 ){ + openFlags |= SQLITE_OPEN_NOMUTEX; }else if( strcmp(z,"nosync")==0 ){ noSync = 1; }else if( strcmp(z,"notnull")==0 ){ g.zNN = "NOT NULL"; }else if( strcmp(z,"output")==0 ){ #ifdef SPEEDTEST_OMIT_HASH fatal_error("The --output option is not supported with" " -DSPEEDTEST_OMIT_HASH\n"); #else - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); i++; if( strcmp(argv[i],"-")==0 ){ g.hashFile = stdout; }else{ g.hashFile = fopen(argv[i], "wb"); @@ -2273,34 +2322,33 @@ fatal_error("cannot open \"%s\" for writing\n", argv[i]); } } #endif }else if( strcmp(z,"pagesize")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); pageSize = integerValue(argv[++i]); }else if( strcmp(z,"pcache")==0 ){ - if( i>=argc-2 ) fatal_error("missing arguments on %s\n", argv[i]); + ARGC_VALUE_CHECK(2); nPCache = integerValue(argv[i+1]); szPCache = integerValue(argv[i+2]); doPCache = 1; i += 2; }else if( strcmp(z,"primarykey")==0 ){ g.zPK = "PRIMARY KEY"; }else if( strcmp(z,"repeat")==0 ){ - if( i>=argc-1 ) fatal_error("missing arguments on %s\n", argv[i]); - g.nRepeat = integerValue(argv[i+1]); - i += 1; + ARGC_VALUE_CHECK(1); + g.nRepeat = integerValue(argv[++i]); }else if( strcmp(z,"reprepare")==0 ){ g.bReprepare = 1; #if SQLITE_VERSION_NUMBER>=3006000 }else if( strcmp(z,"serialized")==0 ){ sqlite3_config(SQLITE_CONFIG_SERIALIZED); }else if( strcmp(z,"singlethread")==0 ){ sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); #endif }else if( strcmp(z,"script")==0 ){ - if( i>=argc-1 ) fatal_error("missing arguments on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); if( g.pScript ) fclose(g.pScript); g.pScript = fopen(argv[++i], "wb"); if( g.pScript==0 ){ fatal_error("unable to open output file \"%s\"\n", argv[i]); } @@ -2307,28 +2355,28 @@ }else if( strcmp(z,"sqlonly")==0 ){ g.bSqlOnly = 1; }else if( strcmp(z,"shrink-memory")==0 ){ g.bMemShrink = 1; }else if( strcmp(z,"size")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); g.szTest = integerValue(argv[++i]); }else if( strcmp(z,"stats")==0 ){ showStats = 1; }else if( strcmp(z,"temp")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); i++; if( argv[i][0]<'0' || argv[i][0]>'9' || argv[i][1]!=0 ){ fatal_error("argument to --temp should be integer between 0 and 9"); } g.eTemp = argv[i][0] - '0'; }else if( strcmp(z,"testset")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); zTSet = argv[++i]; }else if( strcmp(z,"trace")==0 ){ doTrace = 1; }else if( strcmp(z,"threads")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); nThread = integerValue(argv[++i]); }else if( strcmp(z,"utf16le")==0 ){ zEncoding = "utf16le"; }else if( strcmp(z,"utf16be")==0 ){ zEncoding = "utf16be"; @@ -2335,12 +2383,15 @@ }else if( strcmp(z,"verify")==0 ){ g.bVerify = 1; #ifndef SPEEDTEST_OMIT_HASH HashInit(); #endif + }else if( strcmp(z,"vfs")==0 ){ + ARGC_VALUE_CHECK(1); + zVfs = argv[++i]; }else if( strcmp(z,"reserve")==0 ){ - if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + ARGC_VALUE_CHECK(1); g.nReserve = atoi(argv[++i]); }else if( strcmp(z,"without-rowid")==0 ){ if( strstr(g.zWR,"WITHOUT")!=0 ){ /* no-op */ }else if( strstr(g.zWR,"STRICT")!=0 ){ @@ -2369,11 +2420,11 @@ }else{ fatal_error("surplus argument: %s\nUse \"%s -?\" for help\n", argv[i], argv[0]); } } - if( zDbName!=0 ) unlink(zDbName); +#undef ARGC_VALUE_CHECK #if SQLITE_VERSION_NUMBER>=3006001 if( nHeap>0 ){ pHeap = malloc( nHeap ); if( pHeap==0 ) fatal_error("cannot allocate %d-byte heap\n", nHeap); rc = sqlite3_config(SQLITE_CONFIG_HEAP, pHeap, nHeap, mnHeap); @@ -2391,13 +2442,28 @@ if( nLook>=0 ){ sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 0, 0); } #endif sqlite3_initialize(); + + if( zDbName!=0 ){ + sqlite3_vfs *pVfs = sqlite3_vfs_find(zVfs); + /* For some VFSes, e.g. opfs, unlink() is not sufficient. Use the + ** selected (or default) VFS's xDelete method to delete the + ** database. This is specifically important for the "opfs" VFS + ** when running from a WASM build of speedtest1, so that the db + ** can be cleaned up properly. For historical compatibility, we'll + ** also simply unlink(). */ + if( pVfs!=0 ){ + pVfs->xDelete(pVfs, zDbName, 1); + } + unlink(zDbName); + } /* Open the database and the input file */ - if( sqlite3_open(memDb ? ":memory:" : zDbName, &g.db) ){ + if( sqlite3_open_v2(memDb ? ":memory:" : zDbName, &g.db, + openFlags, zVfs) ){ fatal_error("Cannot open database file: %s\n", zDbName); } #if SQLITE_VERSION_NUMBER>=3006001 if( nLook>0 && szLook>0 ){ pLook = malloc( nLook*szLook ); @@ -2578,5 +2644,15 @@ free( pLook ); free( pPCache ); free( pHeap ); return 0; } + +#ifdef SQLITE_SPEEDTEST1_WASM +/* +** A workaround for some inconsistent behaviour with how +** main() does (or does not) get exported to WASM. +*/ +int wasm_main(int argc, char **argv){ + return main(argc, argv); +} +#endif Index: test/tclsqlite.test ================================================================== --- test/tclsqlite.test +++ test/tclsqlite.test @@ -24,10 +24,13 @@ set testprefix tcl # Check the error messages generated by tclsqlite # set r "sqlite_orig HANDLE ?FILENAME? ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nofollow BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?" +ifcapable sharedschema { + append r " ?-shared-schema BOOLEAN?" +} if {[sqlite3 -has-codec]} { append r " ?-key CODECKEY?" } do_test tcl-1.1 { set v [catch {sqlite3 -bogus} msg] Index: test/tester.tcl ================================================================== --- test/tester.tcl +++ test/tester.tcl @@ -610,11 +610,10 @@ proc reset_db {} { catch {db close} forcedelete test.db forcedelete test.db-journal forcedelete test.db-wal - forcedelete test.db-wal2 sqlite3 db ./test.db set ::DB [sqlite3_connection_pointer db] if {[info exists ::SETUP_SQL]} { db eval $::SETUP_SQL } @@ -1546,10 +1545,51 @@ $addr $I $col $opcode $D $p1 $p2 $p3 $p4 $p5 $comment ] } output2 "---- ------------ ------ ------ ------ ---------------- -- -" } + +proc execsql_pp {sql {db db}} { + set nCol 0 + $db eval $sql A { + if {$nCol==0} { + set nCol [llength $A(*)] + foreach c $A(*) { + set aWidth($c) [string length $c] + lappend data $c + } + } + foreach c $A(*) { + set n [string length $A($c)] + if {$n > $aWidth($c)} { + set aWidth($c) $n + } + lappend data $A($c) + } + } + if {$nCol>0} { + set nTotal 0 + foreach e [array names aWidth] { incr nTotal $aWidth($e) } + incr nTotal [expr ($nCol-1) * 3] + incr nTotal 4 + + set fmt "" + foreach c $A(*) { + lappend fmt "% -$aWidth($c)s" + } + set fmt "| [join $fmt { | }] |" + + puts [string repeat - $nTotal] + for {set i 0} {$i < [llength $data]} {incr i $nCol} { + set vals [lrange $data $i [expr $i+$nCol-1]] + puts [format $fmt {*}$vals] + if {$i==0} { puts [string repeat - $nTotal] } + } + puts [string repeat - $nTotal] + } +} + # Show the VDBE program for an SQL statement but omit the Trace # opcode at the beginning. This procedure can be used to prove # that different SQL statements generate exactly the same VDBE code. # @@ -2240,36 +2280,21 @@ # wal_is_wal_mode # # Returns true if this test should be run in WAL mode. False otherwise. # proc wal_is_wal_mode {} { - if {[permutation] eq "wal"} { return 1 } - if {[permutation] eq "wal2"} { return 2 } - return 0 + expr {[permutation] eq "wal"} } proc wal_set_journal_mode {{db db}} { - switch -- [wal_is_wal_mode] { - 0 { - } - - 1 { - $db eval "PRAGMA journal_mode = WAL" - } - - 2 { - $db eval "PRAGMA journal_mode = WAL2" - } + if { [wal_is_wal_mode] } { + $db eval "PRAGMA journal_mode = WAL" } } proc wal_check_journal_mode {testname {db db}} { if { [wal_is_wal_mode] } { $db eval { SELECT * FROM sqlite_master } - set expected "wal" - if {[wal_is_wal_mode]==2} { - set expected "wal2" - } - do_test $testname [list $db eval "PRAGMA main.journal_mode"] $expected + do_test $testname [list $db eval "PRAGMA main.journal_mode"] {wal} } } proc wal_is_capable {} { ifcapable !wal { return 0 } Index: test/threadtest3.c ================================================================== --- test/threadtest3.c +++ test/threadtest3.c @@ -36,11 +36,11 @@ ** The "Set Error Line" macro. */ #define SEL(e) ((e)->iLine = ((e)->rc ? (e)->iLine : __LINE__)) /* Database functions */ -#define opendb(w,x,y,z) (SEL(w), opendb_x(w,x,y,z)) +#define opendb(w,x,y,z,f) (SEL(w), opendb_x(w,x,y,z,f)) #define closedb(y,z) (SEL(y), closedb_x(y,z)) /* Functions to execute SQL */ #define sql_script(x,y,z) (SEL(x), sql_script_x(x,y,z)) #define integrity_check(x,y) (SEL(x), integrity_check_x(x,y)) @@ -543,15 +543,18 @@ static void opendb_x( Error *pErr, /* IN/OUT: Error code */ Sqlite *pDb, /* OUT: Database handle */ const char *zFile, /* Database file name */ - int bDelete /* True to delete db file before opening */ + int bDelete, /* True to delete db file before opening */ + int flags ){ if( pErr->rc==SQLITE_OK ){ int rc; - int flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_READWRITE | SQLITE_OPEN_URI; + if( flags==0 ){ + flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_READWRITE | SQLITE_OPEN_URI; + } if( bDelete ) unlink(zFile); rc = sqlite3_open_v2(zFile, &pDb->db, flags, 0); if( rc ){ sqlite_error(pErr, pDb, "open"); sqlite3_close(pDb->db); @@ -983,11 +986,11 @@ static char *walthread1_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ int nIter = 0; /* Iterations so far */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ const char *azSql[] = { "SELECT md5sum(x) FROM t1 WHERE rowid != (SELECT max(rowid) FROM t1)", "SELECT x FROM t1 WHERE rowid = (SELECT max(rowid) FROM t1)", }; @@ -1022,11 +1025,11 @@ static char *walthread1_ckpt_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ int nCkpt = 0; /* Checkpoints so far */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ sqlite3_sleep(500); execsql(&err, &db, "PRAGMA wal_checkpoint"); if( err.rc==SQLITE_OK ) nCkpt++; clear_error(&err, SQLITE_BUSY); @@ -1041,11 +1044,11 @@ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ Threadset threads = {0}; /* Test threads */ int i; /* Iterator variable */ - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(x PRIMARY KEY);" "INSERT INTO t1 VALUES(randomblob(100));" "INSERT INTO t1 VALUES(randomblob(100));" @@ -1074,11 +1077,11 @@ while( !timetostop(&err) ){ int journal_exists = 0; int wal_exists = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); sql_script(&err, &db, zJournal); clear_error(&err, SQLITE_BUSY); sql_script(&err, &db, "BEGIN"); sql_script(&err, &db, "INSERT INTO t1 VALUES(NULL, randomblob(100))"); @@ -1104,11 +1107,11 @@ static void walthread2(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "CREATE TABLE t1(x INTEGER PRIMARY KEY, y UNIQUE)"); closedb(&err, &db); setstoptime(&err, nMs); launch_thread(&err, &threads, walthread2_thread, 0); @@ -1124,11 +1127,11 @@ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ i64 iNextWrite; /* Next value this thread will write */ int iArg = PTR2INT(pArg); - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); sql_script(&err, &db, "PRAGMA wal_autocheckpoint = 10"); iNextWrite = iArg+1; while( 1 ){ i64 sum1; @@ -1161,11 +1164,11 @@ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; int i; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(cnt PRIMARY KEY, sum1, sum2);" "CREATE INDEX i1 ON t1(sum1);" "CREATE INDEX i2 ON t1(sum2);" @@ -1184,11 +1187,11 @@ static char *walthread4_reader_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ integrity_check(&err, &db); } closedb(&err, &db); @@ -1199,11 +1202,11 @@ static char *walthread4_writer_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ i64 iRow = 1; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); sql_script(&err, &db, "PRAGMA wal_autocheckpoint = 15;"); while( !timetostop(&err) ){ execsql_i64( &err, &db, "REPLACE INTO t1 VALUES(:iRow, randomblob(300))", &iRow ); @@ -1219,11 +1222,11 @@ static void walthread4(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE);" ); closedb(&err, &db); @@ -1239,11 +1242,11 @@ static char *walthread5_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ i64 nRow; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); nRow = execsql_i64(&err, &db, "SELECT count(*) FROM t1"); closedb(&err, &db); if( nRow!=65536 ) test_error(&err, "Bad row count: %d", (int)nRow); print_and_free_err(&err); @@ -1252,11 +1255,11 @@ static void walthread5(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA wal_autocheckpoint = 0;" "PRAGMA page_size = 1024;" "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(x);" @@ -1343,11 +1346,11 @@ static void cgt_pager_1(int nMs){ void (*xSub)(Error *, Sqlite *); Error err = {0}; Sqlite db = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA cache_size = 2000;" "PRAGMA page_size = 1024;" "CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB);" ); @@ -1372,11 +1375,11 @@ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ int nDrop = 0; int nCreate = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ int i; for(i=1; i<9; i++){ char *zSql = sqlite3_mprintf( @@ -1425,11 +1428,11 @@ Sqlite db = {0}; /* SQLite database connection */ i64 iVal = 0; int nInsert = 0; int nDelete = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ do { iVal = (iVal+1)%100; execsql(&err, &db, "INSERT INTO t1 VALUES(:iX, :iY+1)", &iVal, &iVal); nInsert++; @@ -1450,11 +1453,11 @@ static void dynamic_triggers(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA page_size = 1024;" "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(x, y);" "CREATE TABLE t2(x, y);" @@ -1490,14 +1493,13 @@ #include "tt3_checkpoint.c" #include "tt3_index.c" #include "tt3_lookaside1.c" #include "tt3_vacuum.c" #include "tt3_stress.c" +#include "tt3_reuseschema.c" #include "tt3_shared.c" -#include "tt3_bcwal2.c" - int main(int argc, char **argv){ struct ThreadTest { void (*xTest)(int); /* Routine for running this test */ const char *zTest; /* Name of this test */ int nMs; /* How long to run this test, in milliseconds */ @@ -1517,13 +1519,12 @@ { create_drop_index_1, "create_drop_index_1", 10000 }, { lookaside1, "lookaside1", 10000 }, { vacuum1, "vacuum1", 10000 }, { stress1, "stress1", 10000 }, { stress2, "stress2", 60000 }, + { reuse_schema_1, "reuse_schema_1", 20000 }, { shared1, "shared1", 10000 }, - - { bcwal2_1, "bcwal2_1", 100000 }, }; static char *substArgv[] = { 0, "*", 0 }; int i, iArg; int nTestfound = 0; Index: test/tkt-80e031a00f.test ================================================================== --- test/tkt-80e031a00f.test +++ test/tkt-80e031a00f.test @@ -22,15 +22,14 @@ # EVIDENCE-OF: R-52275-55503 When the right operand is an empty set, the # result of IN is false and the result of NOT IN is true, regardless of # the left operand and even if the left operand is NULL. # -# EVIDENCE-OF: R-13595-45863 Note that SQLite allows the parenthesized +# EVIDENCE-OF: R-64309-54027 Note that SQLite allows the parenthesized # list of scalar values on the right-hand side of an IN or NOT IN -# operator to be an empty list but most other SQL database database -# engines and the SQL92 standard require the list to contain at least -# one element. +# operator to be an empty list but most other SQL database engines and +# the SQL92 standard require the list to contain at least one element. # do_execsql_test tkt-80e031a00f.1 {SELECT 1 IN ()} 0 do_execsql_test tkt-80e031a00f.1b {SELECT 1 IN (2)} 0 do_execsql_test tkt-80e031a00f.1c {SELECT 1 IN (2,3,4,5,6,7,8,9)} 0 do_execsql_test tkt-80e031a00f.2 {SELECT 1 NOT IN ()} 1 DELETED test/tt3_bcwal2.c Index: test/tt3_bcwal2.c ================================================================== --- test/tt3_bcwal2.c +++ /dev/null @@ -1,122 +0,0 @@ -/* -** 2011-02-02 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file is part of the test program "threadtest3". Despite being a C -** file it is not compiled separately, but included by threadtest3.c using -** the #include directive normally used with header files. -** -** This file contains the implementation of test cases: -** -** bcwal2_1 -*/ - -static char *bcwal2_1_checkpointer(int iTid, void *pArg){ - Error err = {0}; /* Error code and message */ - Sqlite db = {0}; /* SQLite database connection */ - int nIter = 0; - - opendb(&err, &db, "test.db", 0); - while( !timetostop(&err) ){ - sql_script(&err, &db, "PRAGMA wal_checkpoint;"); - nIter++; - } - closedb(&err, &db); - - print_and_free_err(&err); - return sqlite3_mprintf("%d iterations", nIter); -} - -static char *bcwal2_1_integrity(int iTid, void *pArg){ - Error err = {0}; /* Error code and message */ - Sqlite db = {0}; /* SQLite database connection */ - int nIter = 0; - - opendb(&err, &db, "test.db", 0); - while( !timetostop(&err) ){ - // integrity_check(&err, &db); - sql_script(&err, &db, "SELECT * FROM t1;"); - nIter++; - } - closedb(&err, &db); - - print_and_free_err(&err); - return sqlite3_mprintf("%d integrity-checks", nIter); -} - -static char *bcwal2_1_writer(int iTid, void *pArg){ - Error err = {0}; /* Error code and message */ - Sqlite db = {0}; /* SQLite database connection */ - int nWrite = 0; /* Writes so far */ - int nBusy = 0; /* Busy errors so far */ - sqlite3_mutex *pMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_APP1); - - opendb(&err, &db, "test.db", 0); - while( !timetostop(&err) ){ - - sql_script(&err, &db, - "PRAGMA wal_autocheckpoint = 0;" - "BEGIN CONCURRENT;" - " REPLACE INTO t1 VALUES( abs(random() % 100000), " - " hex(randomblob( abs( random() % 200 ) + 50 ))" - " );" - ); - - if( err.rc==SQLITE_OK ){ - sqlite3_mutex_enter(pMutex); - sql_script(&err, &db, "COMMIT"); - sqlite3_mutex_leave(pMutex); - if( err.rc==SQLITE_OK ){ - nWrite++; - }else{ - clear_error(&err, SQLITE_BUSY); - sql_script(&err, &db, "ROLLBACK"); - nBusy++; - } - - assert( err.rc!=SQLITE_OK || sqlite3_get_autocommit(db.db)==1 ); - } - } - closedb(&err, &db); - - print_and_free_err(&err); - return sqlite3_mprintf("%d successful writes, %d busy", nWrite, nBusy); -} - -static void bcwal2_1(int nMs){ - Error err = {0}; - Sqlite db = {0}; - Threadset threads = {0}; - - opendb(&err, &db, "test.db", 1); - sql_script(&err, &db, - "PRAGMA page_size = 1024;" - "PRAGMA journal_mode = wal2;" - "CREATE TABLE t1(ii INTEGER PRIMARY KEY, tt TEXT);" - "CREATE INDEX t1tt ON t1(tt);" - ); - - setstoptime(&err, nMs); - - launch_thread(&err, &threads, bcwal2_1_writer, 0); - launch_thread(&err, &threads, bcwal2_1_writer, 0); - launch_thread(&err, &threads, bcwal2_1_writer, 0); - launch_thread(&err, &threads, bcwal2_1_integrity, 0); - launch_thread(&err, &threads, bcwal2_1_checkpointer, 0); - - join_all_threads(&err, &threads); - - /* Do a final integrity-check on the db */ - integrity_check(&err, &db); - closedb(&err, &db); - - print_and_free_err(&err); -} - Index: test/tt3_checkpoint.c ================================================================== --- test/tt3_checkpoint.c +++ test/tt3_checkpoint.c @@ -68,11 +68,11 @@ static char *checkpoint_starvation_reader(int iTid, void *pArg){ Error err = {0}; Sqlite db = {0}; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ i64 iCount1, iCount2; sql_script(&err, &db, "BEGIN"); iCount1 = execsql_i64(&err, &db, "SELECT count(x) FROM t1"); sqlite3_sleep(CHECKPOINT_STARVATION_READMS); @@ -94,11 +94,11 @@ Sqlite db = {0}; Threadset threads = {0}; int nInsert = 0; int i; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "PRAGMA page_size = 1024;" "PRAGMA journal_mode = WAL;" "CREATE TABLE t1(x);" ); DELETED test/tt3_core.c Index: test/tt3_core.c ================================================================== --- test/tt3_core.c +++ /dev/null @@ -1,1035 +0,0 @@ -/* -** 2016-05-07 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -*/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* -** The "Set Error Line" macro. -*/ -#define SEL(e) ((e)->iLine = ((e)->rc ? (e)->iLine : __LINE__)) - -/* Database functions */ -#define opendb(w,x,y,z) (SEL(w), opendb_x(w,x,y,z)) -#define closedb(y,z) (SEL(y), closedb_x(y,z)) - -/* Functions to execute SQL */ -#define sql_script(x,y,z) (SEL(x), sql_script_x(x,y,z)) -#define integrity_check(x,y) (SEL(x), integrity_check_x(x,y)) -#define execsql_i64(x,y,...) (SEL(x), execsql_i64_x(x,y,__VA_ARGS__)) -#define execsql_text(x,y,z,...) (SEL(x), execsql_text_x(x,y,z,__VA_ARGS__)) -#define execsql(x,y,...) (SEL(x), (void)execsql_i64_x(x,y,__VA_ARGS__)) -#define sql_script_printf(x,y,z,...) ( \ - SEL(x), sql_script_printf_x(x,y,z,__VA_ARGS__) \ -) - -/* Thread functions */ -#define launch_thread(w,x,y,z) (SEL(w), launch_thread_x(w,x,y,z)) -#define join_all_threads(y,z) (SEL(y), join_all_threads_x(y,z)) - -/* Timer functions */ -#define setstoptime(y,z) (SEL(y), setstoptime_x(y,z)) -#define timetostop(z) (SEL(z), timetostop_x(z)) - -/* Report/clear errors. */ -#define test_error(z, ...) test_error_x(z, sqlite3_mprintf(__VA_ARGS__)) -#define clear_error(y,z) clear_error_x(y, z) - -/* File-system operations */ -#define filesize(y,z) (SEL(y), filesize_x(y,z)) -#define filecopy(x,y,z) (SEL(x), filecopy_x(x,y,z)) - -#define PTR2INT(x) ((int)((intptr_t)x)) -#define INT2PTR(x) ((void*)((intptr_t)x)) - -/* -** End of test code/infrastructure interface macros. -*************************************************************************/ - - -/************************************************************************ -** Start of command line processing utilities. -*/ -#define CMDLINE_INT 1 -#define CMDLINE_BOOL 2 -#define CMDLINE_STRING 3 - -typedef struct CmdlineArg CmdlineArg; -struct CmdlineArg { - const char *zSwitch; - int eType; - int iOffset; -}; - -static void cmdline_error(const char *zFmt, ...){ - va_list ap; /* ... arguments */ - char *zMsg = 0; - va_start(ap, zFmt); - zMsg = sqlite3_vmprintf(zFmt, ap); - fprintf(stderr, "%s\n", zMsg); - sqlite3_free(zMsg); - va_end(ap); - exit(-1); -} - -static void cmdline_usage(const char *zPrg, CmdlineArg *apArg){ - int i; - fprintf(stderr, "Usage: %s SWITCHES\n", zPrg); - fprintf(stderr, "\n"); - fprintf(stderr, "where switches are\n"); - for(i=0; apArg[i].zSwitch; i++){ - const char *zExtra = ""; - switch( apArg[i].eType ){ - case CMDLINE_STRING: zExtra = "STRING"; break; - case CMDLINE_INT: zExtra = "N"; break; - case CMDLINE_BOOL: zExtra = ""; break; - default: - zExtra = "???"; - break; - } - fprintf(stderr, " %s %s\n", apArg[i].zSwitch, zExtra); - } - fprintf(stderr, "\n"); - exit(-2); -} - -static char *cmdline_construct(CmdlineArg *apArg, void *pObj){ - unsigned char *p = (unsigned char*)pObj; - char *zRet = 0; - int iArg; - - for(iArg=0; apArg[iArg].zSwitch; iArg++){ - const char *zSpace = (zRet ? " " : ""); - CmdlineArg *pArg = &apArg[iArg]; - - switch( pArg->eType ){ - case CMDLINE_STRING: { - char *zVal = *(char**)(p + pArg->iOffset); - if( zVal ){ - zRet = sqlite3_mprintf("%z%s%s %s", zRet, zSpace, pArg->zSwitch,zVal); - } - break; - }; - - case CMDLINE_INT: { - zRet = sqlite3_mprintf("%z%s%s %d", zRet, zSpace, pArg->zSwitch, - *(int*)(p + pArg->iOffset) - ); - break; - }; - - case CMDLINE_BOOL: - if( *(int*)(p + pArg->iOffset) ){ - zRet = sqlite3_mprintf("%z%s%s", zRet, zSpace, pArg->zSwitch); - } - break; - - default: - zRet = sqlite3_mprintf("%z%s%s ???", zRet, zSpace, pArg->zSwitch); - } - } - - return zRet; -} - -static void cmdline_process( - CmdlineArg *apArg, - int argc, - const char **argv, - void *pObj -){ - int i; - int iArg; - unsigned char *p = (unsigned char*)pObj; - - for(i=1; i=0 ){ - cmdline_error("ambiguous switch: %s", z); - } - iOpt = iArg; - switch( apArg[iArg].eType ){ - case CMDLINE_INT: - i++; - if( i==argc ){ - cmdline_error("option requires an argument: %s", z); - } - *(int*)(p + apArg[iArg].iOffset) = atoi(argv[i]); - break; - - case CMDLINE_STRING: - i++; - if( i==argc ){ - cmdline_error("option requires an argument: %s", z); - } - *(char**)(p + apArg[iArg].iOffset) = sqlite3_mprintf("%s", argv[i]); - break; - - case CMDLINE_BOOL: - *(int*)(p + apArg[iArg].iOffset) = 1; - break; - - default: - assert( 0 ); - cmdline_error("internal error"); - return; - } - } - } - - if( iOpt<0 ){ - cmdline_usage(argv[0], apArg); - } - } -} - -/* -** End of command line processing utilities. -*************************************************************************/ - - -/* - * This code implements the MD5 message-digest algorithm. - * The algorithm is due to Ron Rivest. This code was - * written by Colin Plumb in 1993, no copyright is claimed. - * This code is in the public domain; do with it what you wish. - * - * Equivalent code is available from RSA Data Security, Inc. - * This code has been tested against that, and is equivalent, - * except that you don't need to include two pages of legalese - * with every copy. - * - * To compute the message digest of a chunk of bytes, declare an - * MD5Context structure, pass it to MD5Init, call MD5Update as - * needed on buffers full of bytes, and then call MD5Final, which - * will fill a supplied 16-byte array with the digest. - */ - -/* - * If compiled on a machine that doesn't have a 32-bit integer, - * you just set "uint32" to the appropriate datatype for an - * unsigned 32-bit integer. For example: - * - * cc -Duint32='unsigned long' md5.c - * - */ -#ifndef uint32 -# define uint32 unsigned int -#endif - -struct MD5Context { - int isInit; - uint32 buf[4]; - uint32 bits[2]; - union { - unsigned char in[64]; - uint32 in32[16]; - } u; -}; -typedef struct MD5Context MD5Context; - -/* - * Note: this code is harmless on little-endian machines. - */ -static void byteReverse (unsigned char *buf, unsigned longs){ - uint32 t; - do { - t = (uint32)((unsigned)buf[3]<<8 | buf[2]) << 16 | - ((unsigned)buf[1]<<8 | buf[0]); - *(uint32 *)buf = t; - buf += 4; - } while (--longs); -} -/* The four core functions - F1 is optimized somewhat */ - -/* #define F1(x, y, z) (x & y | ~x & z) */ -#define F1(x, y, z) (z ^ (x & (y ^ z))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) (x ^ y ^ z) -#define F4(x, y, z) (y ^ (x | ~z)) - -/* This is the central step in the MD5 algorithm. */ -#define MD5STEP(f, w, x, y, z, data, s) \ - ( w += f(x, y, z) + data, w = w<>(32-s), w += x ) - -/* - * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. MD5Update blocks - * the data and converts bytes into longwords for this routine. - */ -static void MD5Transform(uint32 buf[4], const uint32 in[16]){ - register uint32 a, b, c, d; - - a = buf[0]; - b = buf[1]; - c = buf[2]; - d = buf[3]; - - MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22); - - MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21); - - buf[0] += a; - buf[1] += b; - buf[2] += c; - buf[3] += d; -} - -/* - * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious - * initialization constants. - */ -static void MD5Init(MD5Context *ctx){ - ctx->isInit = 1; - ctx->buf[0] = 0x67452301; - ctx->buf[1] = 0xefcdab89; - ctx->buf[2] = 0x98badcfe; - ctx->buf[3] = 0x10325476; - ctx->bits[0] = 0; - ctx->bits[1] = 0; -} - -/* - * Update context to reflect the concatenation of another buffer full - * of bytes. - */ -static -void MD5Update(MD5Context *ctx, const unsigned char *buf, unsigned int len){ - uint32 t; - - /* Update bitcount */ - - t = ctx->bits[0]; - if ((ctx->bits[0] = t + ((uint32)len << 3)) < t) - ctx->bits[1]++; /* Carry from low to high */ - ctx->bits[1] += len >> 29; - - t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */ - - /* Handle any leading odd-sized chunks */ - - if ( t ) { - unsigned char *p = (unsigned char *)ctx->u.in + t; - - t = 64-t; - if (len < t) { - memcpy(p, buf, len); - return; - } - memcpy(p, buf, t); - byteReverse(ctx->u.in, 16); - MD5Transform(ctx->buf, (uint32 *)ctx->u.in); - buf += t; - len -= t; - } - - /* Process data in 64-byte chunks */ - - while (len >= 64) { - memcpy(ctx->u.in, buf, 64); - byteReverse(ctx->u.in, 16); - MD5Transform(ctx->buf, (uint32 *)ctx->u.in); - buf += 64; - len -= 64; - } - - /* Handle any remaining bytes of data. */ - - memcpy(ctx->u.in, buf, len); -} - -/* - * Final wrapup - pad to 64-byte boundary with the bit pattern - * 1 0* (64-bit count of bits processed, MSB-first) - */ -static void MD5Final(unsigned char digest[16], MD5Context *ctx){ - unsigned count; - unsigned char *p; - - /* Compute number of bytes mod 64 */ - count = (ctx->bits[0] >> 3) & 0x3F; - - /* Set the first char of padding to 0x80. This is safe since there is - always at least one byte free */ - p = ctx->u.in + count; - *p++ = 0x80; - - /* Bytes of padding needed to make 64 bytes */ - count = 64 - 1 - count; - - /* Pad out to 56 mod 64 */ - if (count < 8) { - /* Two lots of padding: Pad the first block to 64 bytes */ - memset(p, 0, count); - byteReverse(ctx->u.in, 16); - MD5Transform(ctx->buf, (uint32 *)ctx->u.in); - - /* Now fill the next block with 56 bytes */ - memset(ctx->u.in, 0, 56); - } else { - /* Pad block to 56 bytes */ - memset(p, 0, count-8); - } - byteReverse(ctx->u.in, 14); - - /* Append length in bits and transform */ - ctx->u.in32[14] = ctx->bits[0]; - ctx->u.in32[15] = ctx->bits[1]; - - MD5Transform(ctx->buf, (uint32 *)ctx->u.in); - byteReverse((unsigned char *)ctx->buf, 4); - memcpy(digest, ctx->buf, 16); - memset(ctx, 0, sizeof(*ctx)); /* In case it is sensitive */ -} - -/* -** Convert a 128-bit MD5 digest into a 32-digit base-16 number. -*/ -static void MD5DigestToBase16(unsigned char *digest, char *zBuf){ - static char const zEncode[] = "0123456789abcdef"; - int i, j; - - for(j=i=0; i<16; i++){ - int a = digest[i]; - zBuf[j++] = zEncode[(a>>4)&0xf]; - zBuf[j++] = zEncode[a & 0xf]; - } - zBuf[j] = 0; -} - -/* -** During testing, the special md5sum() aggregate function is available. -** inside SQLite. The following routines implement that function. -*/ -static void md5step(sqlite3_context *context, int argc, sqlite3_value **argv){ - MD5Context *p; - int i; - if( argc<1 ) return; - p = sqlite3_aggregate_context(context, sizeof(*p)); - if( p==0 ) return; - if( !p->isInit ){ - MD5Init(p); - } - for(i=0; izErr); - p->zErr = 0; - p->rc = 0; -} - -static void print_err(Error *p){ - if( p->rc!=SQLITE_OK ){ - int isWarn = 0; - if( p->rc==SQLITE_SCHEMA ) isWarn = 1; - if( sqlite3_strglob("* - no such table: *",p->zErr)==0 ) isWarn = 1; - printf("%s: (%d) \"%s\" at line %d\n", isWarn ? "Warning" : "Error", - p->rc, p->zErr, p->iLine); - if( !isWarn ) nGlobalErr++; - fflush(stdout); - } -} - -static void print_and_free_err(Error *p){ - print_err(p); - free_err(p); -} - -static void system_error(Error *pErr, int iSys){ - pErr->rc = iSys; - pErr->zErr = (char *)sqlite3_malloc(512); - strerror_r(iSys, pErr->zErr, 512); - pErr->zErr[511] = '\0'; -} - -static void sqlite_error( - Error *pErr, - Sqlite *pDb, - const char *zFunc -){ - pErr->rc = sqlite3_errcode(pDb->db); - pErr->zErr = sqlite3_mprintf( - "sqlite3_%s() - %s (%d)", zFunc, sqlite3_errmsg(pDb->db), - sqlite3_extended_errcode(pDb->db) - ); -} - -static void test_error_x( - Error *pErr, - char *zErr -){ - if( pErr->rc==SQLITE_OK ){ - pErr->rc = 1; - pErr->zErr = zErr; - }else{ - sqlite3_free(zErr); - } -} - -static void clear_error_x( - Error *pErr, - int rc -){ - if( pErr->rc==rc ){ - pErr->rc = SQLITE_OK; - sqlite3_free(pErr->zErr); - pErr->zErr = 0; - } -} - -static int busyhandler(void *pArg, int n){ - usleep(10*1000); - return 1; -} - -static void opendb_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* OUT: Database handle */ - const char *zFile, /* Database file name */ - int bDelete /* True to delete db file before opening */ -){ - if( pErr->rc==SQLITE_OK ){ - int rc; - int flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_READWRITE | SQLITE_OPEN_URI; - if( bDelete ) unlink(zFile); - rc = sqlite3_open_v2(zFile, &pDb->db, flags, 0); - if( rc ){ - sqlite_error(pErr, pDb, "open"); - sqlite3_close(pDb->db); - pDb->db = 0; - }else{ - sqlite3_create_function( - pDb->db, "md5sum", -1, SQLITE_UTF8, 0, 0, md5step, md5finalize - ); - sqlite3_busy_handler(pDb->db, busyhandler, 0); - sqlite3_exec(pDb->db, "PRAGMA synchronous=OFF", 0, 0, 0); - } - } -} - -static void closedb_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb /* OUT: Database handle */ -){ - int rc; - int i; - Statement *pIter; - Statement *pNext; - for(pIter=pDb->pCache; pIter; pIter=pNext){ - pNext = pIter->pNext; - sqlite3_finalize(pIter->pStmt); - sqlite3_free(pIter); - } - for(i=0; inText; i++){ - sqlite3_free(pDb->aText[i]); - } - sqlite3_free(pDb->aText); - rc = sqlite3_close(pDb->db); - if( rc && pErr->rc==SQLITE_OK ){ - pErr->zErr = sqlite3_mprintf("%s", sqlite3_errmsg(pDb->db)); - } - memset(pDb, 0, sizeof(Sqlite)); -} - -static void sql_script_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - const char *zSql /* SQL script to execute */ -){ - if( pErr->rc==SQLITE_OK ){ - pErr->rc = sqlite3_exec(pDb->db, zSql, 0, 0, &pErr->zErr); - } -} - -static void sql_script_printf_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - const char *zFormat, /* SQL printf format string */ - ... /* Printf args */ -){ - va_list ap; /* ... printf arguments */ - va_start(ap, zFormat); - if( pErr->rc==SQLITE_OK ){ - char *zSql = sqlite3_vmprintf(zFormat, ap); - pErr->rc = sqlite3_exec(pDb->db, zSql, 0, 0, &pErr->zErr); - sqlite3_free(zSql); - } - va_end(ap); -} - -static Statement *getSqlStatement( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - const char *zSql /* SQL statement */ -){ - Statement *pRet; - int rc; - - for(pRet=pDb->pCache; pRet; pRet=pRet->pNext){ - if( 0==strcmp(sqlite3_sql(pRet->pStmt), zSql) ){ - return pRet; - } - } - - pRet = sqlite3_malloc(sizeof(Statement)); - rc = sqlite3_prepare_v2(pDb->db, zSql, -1, &pRet->pStmt, 0); - if( rc!=SQLITE_OK ){ - sqlite_error(pErr, pDb, "prepare_v2"); - return 0; - } - assert( 0==strcmp(sqlite3_sql(pRet->pStmt), zSql) ); - - pRet->pNext = pDb->pCache; - pDb->pCache = pRet; - return pRet; -} - -static sqlite3_stmt *getAndBindSqlStatement( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - va_list ap /* SQL followed by parameters */ -){ - Statement *pStatement; /* The SQLite statement wrapper */ - sqlite3_stmt *pStmt; /* The SQLite statement to return */ - int i; /* Used to iterate through parameters */ - - pStatement = getSqlStatement(pErr, pDb, va_arg(ap, const char *)); - if( !pStatement ) return 0; - pStmt = pStatement->pStmt; - for(i=1; i<=sqlite3_bind_parameter_count(pStmt); i++){ - const char *zName = sqlite3_bind_parameter_name(pStmt, i); - void * pArg = va_arg(ap, void*); - - switch( zName[1] ){ - case 'i': - sqlite3_bind_int64(pStmt, i, *(i64 *)pArg); - break; - - default: - pErr->rc = 1; - pErr->zErr = sqlite3_mprintf("Cannot discern type: \"%s\"", zName); - pStmt = 0; - break; - } - } - - return pStmt; -} - -static i64 execsql_i64_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - ... /* SQL and pointers to parameter values */ -){ - i64 iRet = 0; - if( pErr->rc==SQLITE_OK ){ - sqlite3_stmt *pStmt; /* SQL statement to execute */ - va_list ap; /* ... arguments */ - va_start(ap, pDb); - pStmt = getAndBindSqlStatement(pErr, pDb, ap); - if( pStmt ){ - int first = 1; - while( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( first && sqlite3_column_count(pStmt)>0 ){ - iRet = sqlite3_column_int64(pStmt, 0); - } - first = 0; - } - if( SQLITE_OK!=sqlite3_reset(pStmt) ){ - sqlite_error(pErr, pDb, "reset"); - } - } - va_end(ap); - } - return iRet; -} - -static char * execsql_text_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb, /* Database handle */ - int iSlot, /* Db handle slot to store text in */ - ... /* SQL and pointers to parameter values */ -){ - char *zRet = 0; - - if( iSlot>=pDb->nText ){ - int nByte = sizeof(char *)*(iSlot+1); - pDb->aText = (char **)sqlite3_realloc(pDb->aText, nByte); - memset(&pDb->aText[pDb->nText], 0, sizeof(char*)*(iSlot+1-pDb->nText)); - pDb->nText = iSlot+1; - } - - if( pErr->rc==SQLITE_OK ){ - sqlite3_stmt *pStmt; /* SQL statement to execute */ - va_list ap; /* ... arguments */ - va_start(ap, iSlot); - pStmt = getAndBindSqlStatement(pErr, pDb, ap); - if( pStmt ){ - int first = 1; - while( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( first && sqlite3_column_count(pStmt)>0 ){ - zRet = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0)); - sqlite3_free(pDb->aText[iSlot]); - pDb->aText[iSlot] = zRet; - } - first = 0; - } - if( SQLITE_OK!=sqlite3_reset(pStmt) ){ - sqlite_error(pErr, pDb, "reset"); - } - } - va_end(ap); - } - - return zRet; -} - -static void integrity_check_x( - Error *pErr, /* IN/OUT: Error code */ - Sqlite *pDb /* Database handle */ -){ - if( pErr->rc==SQLITE_OK ){ - Statement *pStatement; /* Statement to execute */ - char *zErr = 0; /* Integrity check error */ - - pStatement = getSqlStatement(pErr, pDb, "PRAGMA integrity_check"); - if( pStatement ){ - sqlite3_stmt *pStmt = pStatement->pStmt; - while( SQLITE_ROW==sqlite3_step(pStmt) ){ - const char *z = (const char*)sqlite3_column_text(pStmt, 0); - if( strcmp(z, "ok") ){ - if( zErr==0 ){ - zErr = sqlite3_mprintf("%s", z); - }else{ - zErr = sqlite3_mprintf("%z\n%s", zErr, z); - } - } - } - sqlite3_reset(pStmt); - - if( zErr ){ - pErr->zErr = zErr; - pErr->rc = 1; - } - } - } -} - -static void *launch_thread_main(void *pArg){ - Thread *p = (Thread *)pArg; - return (void *)p->xProc(p->iTid, p->pArg); -} - -static void launch_thread_x( - Error *pErr, /* IN/OUT: Error code */ - Threadset *pThreads, /* Thread set */ - char *(*xProc)(int, void*), /* Proc to run */ - void *pArg /* Argument passed to thread proc */ -){ - if( pErr->rc==SQLITE_OK ){ - int iTid = ++pThreads->iMaxTid; - Thread *p; - int rc; - - p = (Thread *)sqlite3_malloc(sizeof(Thread)); - memset(p, 0, sizeof(Thread)); - p->iTid = iTid; - p->pArg = pArg; - p->xProc = xProc; - - rc = pthread_create(&p->tid, NULL, launch_thread_main, (void *)p); - if( rc!=0 ){ - system_error(pErr, rc); - sqlite3_free(p); - }else{ - p->pNext = pThreads->pThread; - pThreads->pThread = p; - } - } -} - -static void join_all_threads_x( - Error *pErr, /* IN/OUT: Error code */ - Threadset *pThreads /* Thread set */ -){ - Thread *p; - Thread *pNext; - for(p=pThreads->pThread; p; p=pNext){ - void *ret; - pNext = p->pNext; - int rc; - rc = pthread_join(p->tid, &ret); - if( rc!=0 ){ - if( pErr->rc==SQLITE_OK ) system_error(pErr, rc); - }else{ - printf("Thread %d says: %s\n", p->iTid, (ret==0 ? "..." : (char *)ret)); - fflush(stdout); - } - sqlite3_free(p); - } - pThreads->pThread = 0; -} - -static i64 filesize_x( - Error *pErr, - const char *zFile -){ - i64 iRet = 0; - if( pErr->rc==SQLITE_OK ){ - struct stat sStat; - if( stat(zFile, &sStat) ){ - iRet = -1; - }else{ - iRet = sStat.st_size; - } - } - return iRet; -} - -static void filecopy_x( - Error *pErr, - const char *zFrom, - const char *zTo -){ - if( pErr->rc==SQLITE_OK ){ - i64 nByte = filesize_x(pErr, zFrom); - if( nByte<0 ){ - test_error_x(pErr, sqlite3_mprintf("no such file: %s", zFrom)); - }else{ - i64 iOff; - char aBuf[1024]; - int fd1; - int fd2; - unlink(zTo); - - fd1 = open(zFrom, O_RDONLY); - if( fd1<0 ){ - system_error(pErr, errno); - return; - } - fd2 = open(zTo, O_RDWR|O_CREAT|O_EXCL, 0644); - if( fd2<0 ){ - system_error(pErr, errno); - close(fd1); - return; - } - - iOff = 0; - while( iOffnByte ){ - nCopy = nByte - iOff; - } - if( nCopy!=read(fd1, aBuf, nCopy) ){ - system_error(pErr, errno); - break; - } - if( nCopy!=write(fd2, aBuf, nCopy) ){ - system_error(pErr, errno); - break; - } - iOff += nCopy; - } - - close(fd1); - close(fd2); - } - } -} - -/* -** Used by setstoptime() and timetostop(). -*/ -static double timelimit = 0.0; - -static double currentTime(void){ - double t; - static sqlite3_vfs *pTimelimitVfs = 0; - if( pTimelimitVfs==0 ) pTimelimitVfs = sqlite3_vfs_find(0); - if( pTimelimitVfs->iVersion>=2 && pTimelimitVfs->xCurrentTimeInt64!=0 ){ - sqlite3_int64 tm; - pTimelimitVfs->xCurrentTimeInt64(pTimelimitVfs, &tm); - t = tm/86400000.0; - }else{ - pTimelimitVfs->xCurrentTime(pTimelimitVfs, &t); - } - return t; -} - -static void setstoptime_x( - Error *pErr, /* IN/OUT: Error code */ - int nMs /* Milliseconds until "stop time" */ -){ - if( pErr->rc==SQLITE_OK ){ - double t = currentTime(); - timelimit = t + ((double)nMs)/(1000.0*60.0*60.0*24.0); - } -} - -static int timetostop_x( - Error *pErr /* IN/OUT: Error code */ -){ - int ret = 1; - if( pErr->rc==SQLITE_OK ){ - double t = currentTime(); - ret = (t >= timelimit); - } - return ret; -} - Index: test/tt3_index.c ================================================================== --- test/tt3_index.c +++ test/tt3_index.c @@ -17,11 +17,11 @@ static char *create_drop_index_thread(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ while( !timetostop(&err) ){ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); sql_script(&err, &db, "DROP INDEX IF EXISTS i1;" "DROP INDEX IF EXISTS i2;" "DROP INDEX IF EXISTS i3;" @@ -49,11 +49,11 @@ static void create_drop_index_1(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "CREATE TABLE t11(a, b, c, d);" "WITH data(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM data WHERE x<100) " "INSERT INTO t11 SELECT x,x,x,x FROM data;" ); Index: test/tt3_lookaside1.c ================================================================== --- test/tt3_lookaside1.c +++ test/tt3_lookaside1.c @@ -20,11 +20,11 @@ static char *lookaside1_thread_reader(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ sqlite3_stmt *pStmt = 0; int rc; @@ -45,11 +45,11 @@ static char *lookaside1_thread_writer(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); do{ sql_script(&err, &db, "BEGIN;" "UPDATE t3 SET i=i+1 WHERE x=1;" @@ -66,11 +66,11 @@ static void lookaside1(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "CREATE TABLE t1(x PRIMARY KEY) WITHOUT ROWID;" "WITH data(x,y) AS (" " SELECT 1, quote(randomblob(750)) UNION ALL " " SELECT x*2, y||y FROM data WHERE x<5) " ADDED test/tt3_reuseschema.c Index: test/tt3_reuseschema.c ================================================================== --- /dev/null +++ test/tt3_reuseschema.c @@ -0,0 +1,73 @@ +/* +** 2014 December 9 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** reuse_schema_1 +*/ + + +static char *reuse_schema_thread(int iTid, void *pArg){ + Error err = {0}; /* Error code and message */ + Sqlite db = {0}; /* SQLite database connection */ + int iRep = 0; + + while( !timetostop(&err) ){ + int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_SHARED_SCHEMA; + opendb(&err, &db, "test.db", 0, f); + + execsql_i64(&err, &db, "SELECT count(*) FROM t1"); + sql_script(&err, &db, "ATTACH 'test.db2' AS aux"); + execsql_i64(&err, &db, "SELECT count(*) FROM t1"); + + closedb(&err, &db); + iRep++; + } + + print_and_free_err(&err); + return sqlite3_mprintf("%d", iRep); +} + +static void reuse_schema_1(int nMs){ + Error err = {0}; + Sqlite db = {0}; + Threadset threads = {0}; + + opendb(&err, &db, "test.db", 1, 0); + sql_script(&err, &db, + "CREATE TABLE t1(a, b, c, d);" + "WITH data(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM data WHERE x<100) " + "INSERT INTO t1 SELECT x,x,x,x FROM data;" + ); + closedb(&err, &db); + opendb(&err, &db, "test.db2", 1, 0); + sql_script(&err, &db, +#ifdef SQLITE_ENABLE_FTS5 + "CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, d);" +#else + "CREATE TABLE t2(a, b, c, d);" +#endif + "WITH data(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM data WHERE x<100) " + "INSERT INTO t2 SELECT x*2,x*2,x*2,x*2 FROM data;" + ); + closedb(&err, &db); + + setstoptime(&err, nMs); + + launch_thread(&err, &threads, reuse_schema_thread, 0); + launch_thread(&err, &threads, reuse_schema_thread, 0); + launch_thread(&err, &threads, reuse_schema_thread, 0); + launch_thread(&err, &threads, reuse_schema_thread, 0); + launch_thread(&err, &threads, reuse_schema_thread, 0); + + join_all_threads(&err, &threads); + sqlite3_enable_shared_cache(0); + print_and_free_err(&err); +} Index: test/tt3_stress.c ================================================================== --- test/tt3_stress.c +++ test/tt3_stress.c @@ -19,11 +19,11 @@ */ static char *stress_thread_1(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ sql_script(&err, &db, "CREATE TABLE IF NOT EXISTS t1(a PRIMARY KEY, b)"); clear_error(&err, SQLITE_LOCKED); sql_script(&err, &db, "DROP TABLE IF EXISTS t1"); clear_error(&err, SQLITE_LOCKED); @@ -38,11 +38,11 @@ */ static char *stress_thread_2(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ while( !timetostop(&err) ){ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); sql_script(&err, &db, "SELECT * FROM sqlite_schema;"); clear_error(&err, SQLITE_LOCKED); closedb(&err, &db); } print_and_free_err(&err); @@ -57,11 +57,11 @@ Sqlite db = {0}; /* SQLite database connection */ int i1 = 0; int i2 = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ sql_script(&err, &db, "SELECT * FROM t1 ORDER BY a;"); i1++; if( err.rc ) i2++; clear_error(&err, SQLITE_LOCKED); @@ -80,15 +80,15 @@ Sqlite db = {0}; /* SQLite database connection */ int i1 = 0; int i2 = 0; int iArg = PTR2INT(pArg); - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ if( iArg ){ closedb(&err, &db); - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); } sql_script(&err, &db, "WITH loop(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM loop LIMIT 200) " "INSERT INTO t1 VALUES(randomblob(60), randomblob(60));" ); @@ -111,16 +111,16 @@ int iArg = PTR2INT(pArg); int i1 = 0; int i2 = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ i64 i = (i1 % 4); if( iArg ){ closedb(&err, &db); - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); } execsql(&err, &db, "DELETE FROM t1 WHERE (rowid % 4)==:i", &i); i1++; if( err.rc ) i2++; clear_error(&err, SQLITE_LOCKED); @@ -263,11 +263,11 @@ static char *stress2_workload19(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ const char *zDb = (const char*)pArg; while( !timetostop(&err) ){ - opendb(&err, &db, zDb, 0); + opendb(&err, &db, zDb, 0, 0); sql_script(&err, &db, "SELECT * FROM sqlite_schema;"); clear_error(&err, SQLITE_LOCKED); closedb(&err, &db); } print_and_free_err(&err); @@ -288,11 +288,11 @@ int i1 = 0; int i2 = 0; while( !timetostop(&err) ){ int cnt; - opendb(&err, &db, pCtx->zDb, 0); + opendb(&err, &db, pCtx->zDb, 0, 0); for(cnt=0; err.rc==SQLITE_OK && cntxProc(&err, &db, i1); i2 += (err.rc==SQLITE_OK); clear_error(&err, SQLITE_LOCKED); i1++; @@ -340,11 +340,11 @@ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; /* To make sure the db file is empty before commencing */ - opendb(&err, &db, zDb, 1); + opendb(&err, &db, zDb, 1, 0); sql_script(&err, &db, "CREATE TABLE IF NOT EXISTS t0(x PRIMARY KEY, y, z);" "CREATE INDEX IF NOT EXISTS i0 ON t0(y);" ); closedb(&err, &db); Index: test/tt3_vacuum.c ================================================================== --- test/tt3_vacuum.c +++ test/tt3_vacuum.c @@ -23,11 +23,11 @@ static char *vacuum1_thread_writer(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ i64 i = 0; - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); while( !timetostop(&err) ){ i++; /* Insert lots of rows. Then delete some. */ execsql(&err, &db, @@ -50,11 +50,11 @@ } static char *vacuum1_thread_vacuumer(int iTid, void *pArg){ Error err = {0}; /* Error code and message */ Sqlite db = {0}; /* SQLite database connection */ - opendb(&err, &db, "test.db", 0); + opendb(&err, &db, "test.db", 0, 0); do{ sql_script(&err, &db, "VACUUM"); clear_error(&err, SQLITE_LOCKED); }while( !timetostop(&err) ); @@ -67,11 +67,11 @@ static void vacuum1(int nMs){ Error err = {0}; Sqlite db = {0}; Threadset threads = {0}; - opendb(&err, &db, "test.db", 1); + opendb(&err, &db, "test.db", 1, 0); sql_script(&err, &db, "CREATE TABLE t1(x PRIMARY KEY, y BLOB);" "CREATE INDEX i1 ON t1(y);" ); closedb(&err, &db); Index: test/unionall.test ================================================================== --- test/unionall.test +++ test/unionall.test @@ -52,10 +52,31 @@ do_execsql_test 1.3 { SELECT a, b FROM i1, t1 WHERE a=x ORDER BY a } {1 one 2 two 5 five 6 six} + +# 2022-10-31 part of ticket 57c47526c34f01e8 +# The queries below were causing an assertion fault in +# the comparison operators of the VDBE. +# +reset_db +database_never_corrupt +optimization_control db all 0 +do_execsql_test 1.10 { + CREATE TABLE t0(c0 INT); + INSERT INTO t0 VALUES(0); + CREATE TABLE t1_a(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t1_a VALUES(1,'one'); + CREATE TABLE t1_b(c INTEGER PRIMARY KEY, d TEXT); + INSERT INTO t1_b VALUES(2,'two'); + CREATE VIEW t1 AS SELECT a, b FROM t1_a UNION ALL SELECT c, c FROM t1_b; + SELECT * FROM (SELECT t1.a, t1.b AS b, t0.c0 FROM t0, t1); +} {1 one 0 2 2 0} +do_execsql_test 1.11 { + SELECT * FROM (SELECT t1.a, t1.b AS b, t0.c0 FROM t0, t1) WHERE b=2; +} {2 2 0} #------------------------------------------------------------------------- reset_db do_execsql_test 2.1.0 { @@ -361,7 +382,63 @@ WITH c1(x) AS (VALUES(0) UNION ALL SELECT 100+x FROM c1 WHERE x<100 UNION ALL SELECT 1+x FROM c1 WHERE x<1) SELECT x, y, '|' FROM c1 AS x1, (SELECT x+1 AS y FROM c1 WHERE x<1 UNION ALL SELECT 1+x FROM c1 WHERE 1'0',t1) WHERE b=2; +} {2 2 0 {}} +do_execsql_test 8.3 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b=2.0; +} {} +do_execsql_test 8.4 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b='2'; +} {2 2 0 {}} +optimization_control db query-flattener,push-down 0 +do_execsql_test 8.5 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b=2; +} {2 2 0 {}} +do_execsql_test 8.6 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b=2.0; +} {} +do_execsql_test 8.7 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b='2'; +} {2 2 0 {}} +optimization_control db all 0 +do_execsql_test 8.8 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b=2; +} {2 2 0 {}} +do_execsql_test 8.9 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b=2.0; +} {} +do_execsql_test 8.10 { + SELECT * FROM (SELECT t1.a, t1.b, t0.c0 AS c, v0.c0 AS d FROM t0 LEFT JOIN v0 ON v0.c0>'0',t1) WHERE b='2'; +} {2 2 0 {}} + finish_test Index: test/uri.test ================================================================== --- test/uri.test +++ test/uri.test @@ -280,15 +280,15 @@ PRAGMA aux.journal_mode = WAL; INSERT INTO t1 VALUES('x', 'y'); INSERT INTO t2 VALUES('x', 'y'); } lsort [array names ::T1] - } {test.db1 test.db1-journal test.db1-wal test.db1-wal2} + } {test.db1 test.db1-journal test.db1-wal} do_test 5.1.2 { lsort [array names ::T2] - } {test.db2 test.db2-journal test.db2-wal test.db2-wal2} + } {test.db2 test.db2-journal test.db2-wal} db close tvfs1 delete tvfs2 delete } Index: test/vacuum-into.test ================================================================== --- test/vacuum-into.test +++ test/vacuum-into.test @@ -130,7 +130,61 @@ PRAGMA page_size; PRAGMA integrity_check; } } {1024 ok} } + +#------------------------------------------------------------------------- + +testvfs tvfs -default 1 +tvfs filter xSync +tvfs script xSyncCb +proc xSyncCb {method file fileid flags} { + incr ::sync($flags) +} + +reset_db + +do_execsql_test vacuum-into-700 { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); +} + +foreach {tn pragma res} { + 710 { + PRAGMA synchronous = normal + } {normal 2} + 720 { + PRAGMA synchronous = full + } {normal 3} + 730 { + PRAGMA synchronous = off + } {} + 740 { + PRAGMA synchronous = extra; + } {normal 3} + 750 { + PRAGMA fullfsync = 1; + PRAGMA synchronous = full; + } {full|dataonly 1 full 2} +} { + + forcedelete test.db2 + array unset ::sync + do_execsql_test vacuum-into-$tn.1 " + $pragma ; + VACUUM INTO 'test.db2' + " + + do_test vacuum-into-$tn.2 { + array get ::sync + } $res +} + +db close +tvfs delete + finish_test + + + ADDED test/vt02.c Index: test/vt02.c ================================================================== --- /dev/null +++ test/vt02.c @@ -0,0 +1,1019 @@ +/* +** This file implements an eponymous, read-only table-valued function +** (a virtual table) designed to be used for testing. We are not aware +** of any practical real-world use case for the virtual table. +** +** This virtual table originated in the TH3 test suite. It is still used +** there, but has now been copied into the public SQLite source tree and +** reused for a variety of testing purpose. The name "vt02" comes from the +** fact that there are many different testing virtual tables in TH3, of which +** this one is the second. +** +** ## SUBJECT TO CHANGE +** +** Because this virtual table is intended for testing, its interface is not +** guaranteed to be stable across releases. Future releases may contain +** changes in the vt02 design and interface. +** +** ## OVERVIEW +** +** The vt02 table-valued function has 10000 rows with 5 data columns. +** Column X contains all integer values between 0 and 9999 inclusive. +** Columns A, B, C, and D contain the individual base-10 digits associated +** with each X value: +** +** X A B C D +** ---- - - - - +** 0 0 0 0 0 +** 1 0 0 0 1 +** 2 0 0 0 2 +** ... +** 4998 4 9 9 8 +** 4999 4 9 9 9 +** 5000 5 0 0 0 +** ... +** 9995 9 9 9 5 +** 9996 9 9 9 6 +** 9997 9 9 9 7 +** +** The xBestIndex method recognizes a variety of equality constraints +** and attempts to optimize its output accordingly. +** +** x=... +** a=... +** a=... AND b=... +** a=... AND b=... AND c=... +** a=... AND b=... AND c=... AND d=... +** +** Various ORDER BY constraints are also recognized and consumed. The +** OFFSET constraint is recognized and consumed. +** +** ## TABLE-VALUED FUNCTION +** +** The vt02 virtual table is eponymous and has two hidden columns, meaning +** that it can functions a table-valued function. The two hidden columns +** are "flags" and "logtab", in that order. The "flags" column can be set +** to an integer where various bits enable or disable behaviors of the +** virtual table. The "logtab" can set to the name of an ordinary SQLite +** table into which is written information about each call to xBestIndex. +** +** The bits of "flags" are as follows: +** +** 0x01 Ignore the aConstraint[].usable flag. This might +** result in the xBestIndex method incorrectly using +** unusable entries in the aConstraint[] array, which +** should result in the SQLite core detecting and +** reporting that the virtual table is not behaving +** to spec. +** +** 0x02 Do not set the orderByConsumed flag, even if it +** could be set. +** +** 0x04 Do not consume the OFFSET constraint, if there is +** one. Instead, let the generated byte-code visit +** and ignore the first few columns of output. +** +** 0x08 Use sqlite3_mprintf() to allocate an idxStr string. +** The string is never used, but allocating it does +** test the idxStr deallocation logic inside of the +** SQLite core. +** +** 0x10 Cause the xBestIndex method to generate an idxNum +** that xFilter does not understand, thus causing +** the OP_VFilter opcode to raise an error. +** +** 0x20 Set the omit flag for all equality constraints on +** columns X, A, B, C, and D that are used to limit +** the search. +** +** 0x40 Add all constraints against X,A,B,C,D to the +** vector of results sent to xFilter. Only the first +** few are used, as required by idxNum. +** +** Because these flags take effect during xBestIndex, the RHS of the +** flag= constraint must be accessible. In other words, the RHS of flag= +** needs to be an integer literal, not another column of a join or a +** bound parameter. +** +** ## LOGGING OUTPUT +** +** If the "logtab" columns is set, then each call to the xBestIndex method +** inserts multiple rows into the table identified by "logtab". These +** rows collectively show the content of the sqlite3_index_info object and +** other context associated with the xBestIndex call. +** +** If the table named by "logtab" does not previously exist, it is created +** automatically. The schema for the logtab table is like this: +** +** CREATE TEMP TABLE vt02_log( +** bi INT, -- BestIndex call counter +** vn TEXT, -- Variable Name +** ix INT, -- Index or value +** cn TEXT, -- Column Name +** op INT, -- Opcode or "DESC" value +** ux INT, -- "Usable" flag +** ra BOOLEAN, -- Right-hand side Available. +** rhs ANY, -- Right-Hand Side value +** cs TEXT -- Collating Sequence for this constraint +** ); +** +** Because logging happens during xBestIindex, the RHS value of "logtab" must +** be known to xBestIndex, which means it must be a string literal, not a +** column in a join, or a bound parameter. +** +** ## VIRTUAL TABLE SCHEMA +** +** CREATE TABLE vt02( +** x INT, -- integer between 0 and 9999 inclusive +** a INT, -- The 1000s digit +** b INT, -- The 100s digit +** c INT, -- The 10s digit +** d INT, -- The 1s digit +** flags INT HIDDEN, -- Option flags +** logtab TEXT HIDDEN, -- Name of table into which to log xBestIndex +** ); +** +** ## COMPILING AND RUNNING +** +** This file can also be compiled separately as a loadable extension +** for SQLite (as long as the -DTH3_VERSION is not defined). To compile as a +** loadable extension do his: +** +** gcc -Wall -g -shared -fPIC -I. -DSQLITE_DEBUG vt02.c -o vt02.so +** +** Or on Windows: +** +** cl vt02.c -link -dll -out:vt02.dll +** +** Then load into the CLI using: +** +** .load ./vt02 sqlite3_vt02_init +** +** ## IDXNUM SUMMARY +** +** The xBestIndex method communicates the query plan to xFilter using +** the idxNum value, as follows: +** +** 0 unconstrained +** 1 X=argv[0] +** 2 A=argv[0] +** 3 A=argv[0], B=argv[1] +** 4 A=argv[0], B=argv[1], C=argv[2] +** 5 A=argv[0], B=argv[1], C=argv[2], D=argv[3] +** 6 A=argv[0], D IN argv[2] +** 7 A=argv[0], B=argv[2], D IN argv[3] +** 8 A=argv[0], B=argv[2], C=argv[3], D IN argv[4] +** 1x increment by 10 +** 2x increment by 100 +** 3x increment by 1000 +** 1xx Use offset provided by argv[N] +*/ +#ifndef TH3_VERSION + /* These bits for separate compilation as a loadable extension, only */ + #include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 + #include + #include + #include +#endif + +/* Forward declarations */ +typedef struct vt02_vtab vt02_vtab; +typedef struct vt02_cur vt02_cur; + +/* +** The complete virtual table +*/ +struct vt02_vtab { + sqlite3_vtab parent; /* Base clase. Must be first. */ + sqlite3 *db; /* Database connection */ + int busy; /* Currently running xBestIndex */ +}; + +#define VT02_IGNORE_USABLE 0x0001 /* Ignore usable flags */ +#define VT02_NO_SORT_OPT 0x0002 /* Do not do any sorting optimizations */ +#define VT02_NO_OFFSET 0x0004 /* Omit the offset optimization */ +#define VT02_ALLOC_IDXSTR 0x0008 /* Alloate an idxStr */ +#define VT02_BAD_IDXNUM 0x0010 /* Generate an invalid idxNum */ + +/* +** A cursor +*/ +struct vt02_cur { + sqlite3_vtab_cursor parent; /* Base class. Must be first */ + sqlite3_int64 i; /* Current entry */ + sqlite3_int64 iEof; /* Indicate EOF when reaching this value */ + int iIncr; /* Amount by which to increment */ + unsigned int mD; /* Mask of allowed D-column values */ +}; + +/* The xConnect method */ +int vt02Connect( + sqlite3 *db, /* The database connection */ + void *pAux, /* Pointer to an alternative schema */ + int argc, /* Number of arguments */ + const char *const*argv, /* Text of the arguments */ + sqlite3_vtab **ppVTab, /* Write the new vtab here */ + char **pzErr /* Error message written here */ +){ + vt02_vtab *pVtab; + int rc; + const char *zSchema = (const char*)pAux; + static const char zDefaultSchema[] = + "CREATE TABLE x(x INT, a INT, b INT, c INT, d INT," + " flags INT HIDDEN, logtab TEXT HIDDEN);"; +#define VT02_COL_X 0 +#define VT02_COL_A 1 +#define VT02_COL_B 2 +#define VT02_COL_C 3 +#define VT02_COL_D 4 +#define VT02_COL_FLAGS 5 +#define VT02_COL_LOGTAB 6 +#define VT02_COL_NONE 7 + + pVtab = sqlite3_malloc( sizeof(*pVtab) ); + if( pVtab==0 ){ + *pzErr = sqlite3_mprintf("out of memory"); + return SQLITE_NOMEM; + } + memset(pVtab, 0, sizeof(*pVtab)); + pVtab->db = db; + rc = sqlite3_declare_vtab(db, zSchema ? zSchema : zDefaultSchema); + if( rc ){ + sqlite3_free(pVtab); + }else{ + *ppVTab = &pVtab->parent; + } + return rc; +} + +/* the xDisconnect method +*/ +int vt02Disconnect(sqlite3_vtab *pVTab){ + sqlite3_free(pVTab); + return SQLITE_OK; +} + +/* Put an error message into the zErrMsg string of the virtual table. +*/ +static void vt02ErrMsg(sqlite3_vtab *pVtab, const char *zFormat, ...){ + va_list ap; + sqlite3_free(pVtab->zErrMsg); + va_start(ap, zFormat); + pVtab->zErrMsg = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + + +/* Open a cursor for scanning +*/ +static int vt02Open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + vt02_cur *pCur; + pCur = sqlite3_malloc( sizeof(*pCur) ); + if( pCur==0 ){ + vt02ErrMsg(pVTab, "out of memory"); + return SQLITE_NOMEM; + } + *ppCursor = &pCur->parent; + pCur->i = -1; + return SQLITE_OK; +} + +/* Close a cursor +*/ +static int vt02Close(sqlite3_vtab_cursor *pCursor){ + vt02_cur *pCur = (vt02_cur*)pCursor; + sqlite3_free(pCur); + return SQLITE_OK; +} + +/* Return TRUE if we are at the end of the BVS and there are +** no more entries. +*/ +static int vt02Eof(sqlite3_vtab_cursor *pCursor){ + vt02_cur *pCur = (vt02_cur*)pCursor; + return pCur->i<0 || pCur->i>=pCur->iEof; +} + +/* Advance the cursor to the next row in the table +*/ +static int vt02Next(sqlite3_vtab_cursor *pCursor){ + vt02_cur *pCur = (vt02_cur*)pCursor; + do{ + pCur->i += pCur->iIncr; + if( pCur->i<0 ) pCur->i = pCur->iEof; + }while( (pCur->mD & (1<<(pCur->i%10)))==0 && pCur->iiEof ); + return SQLITE_OK; +} + +/* Rewind a cursor back to the beginning of its scan. +** +** Scanning is always increasing. +** +** idxNum +** 0 unconstrained +** 1 X=argv[0] +** 2 A=argv[0] +** 3 A=argv[0], B=argv[1] +** 4 A=argv[0], B=argv[1], C=argv[2] +** 5 A=argv[0], B=argv[1], C=argv[2], D=argv[3] +** 6 A=argv[0], D IN argv[2] +** 7 A=argv[0], B=argv[2], D IN argv[3] +** 8 A=argv[0], B=argv[2], C=argv[3], D IN argv[4] +** 1x increment by 10 +** 2x increment by 100 +** 3x increment by 1000 +** 1xx Use offset provided by argv[N] +*/ +static int vt02Filter( + sqlite3_vtab_cursor *pCursor, /* The cursor to rewind */ + int idxNum, /* Search strategy */ + const char *idxStr, /* Not used */ + int argc, /* Not used */ + sqlite3_value **argv /* Not used */ +){ + vt02_cur *pCur = (vt02_cur*)pCursor; /* The vt02 cursor */ + int bUseOffset = 0; /* True to use OFFSET value */ + int iArg = 0; /* argv[] values used so far */ + int iOrigIdxNum = idxNum; /* Original value for idxNum */ + + pCur->iIncr = 1; + pCur->mD = 0x3ff; + if( idxNum>=100 ){ + bUseOffset = 1; + idxNum -= 100; + } + if( idxNum<0 || idxNum>38 ) goto vt02_bad_idxnum; + while( idxNum>=10 ){ + pCur->iIncr *= 10; + idxNum -= 10; + } + if( idxNum==0 ){ + pCur->i = 0; + pCur->iEof = 10000; + }else if( idxNum==1 ){ + pCur->i = sqlite3_value_int64(argv[0]); + if( pCur->i<0 ) pCur->i = -1; + if( pCur->i>9999 ) pCur->i = 10000; + pCur->iEof = pCur->i+1; + if( pCur->i<0 || pCur->i>9999 ) pCur->i = pCur->iEof; + }else if( idxNum>=2 && idxNum<=5 ){ + int i, e, m; + e = idxNum - 2; + assert( e<=argc-1 ); + pCur->i = 0; + for(m=1000, i=0; i<=e; i++, m /= 10){ + sqlite3_int64 v = sqlite3_value_int64(argv[iArg++]); + if( v<0 ) v = 0; + if( v>9 ) v = 9; + pCur->i += m*v; + pCur->iEof = pCur->i+m; + } + }else if( idxNum>=6 && idxNum<=8 ){ + int i, e, m, rc; + sqlite3_value *pIn, *pVal; + e = idxNum - 6; + assert( e<=argc-2 ); + pCur->i = 0; + for(m=1000, i=0; i<=e; i++, m /= 10){ + sqlite3_int64 v; + pVal = 0; + if( sqlite3_vtab_in_first(0, &pVal)!=SQLITE_MISUSE + || sqlite3_vtab_in_first(argv[iArg], &pVal)!=SQLITE_MISUSE + ){ + vt02ErrMsg(pCursor->pVtab, + "unexpected success from sqlite3_vtab_in_first()"); + return SQLITE_ERROR; + } + v = sqlite3_value_int64(argv[iArg++]); + if( v<0 ) v = 0; + if( v>9 ) v = 9; + pCur->i += m*v; + pCur->iEof = pCur->i+m; + } + pCur->mD = 0; + pIn = argv[iArg++]; + assert( sqlite3_value_type(pIn)==SQLITE_NULL ); + for( rc = sqlite3_vtab_in_first(pIn, &pVal); + rc==SQLITE_OK && pVal!=0; + rc = sqlite3_vtab_in_next(pIn, &pVal) + ){ + int eType = sqlite3_value_numeric_type(pVal); + if( eType==SQLITE_FLOAT ){ + double r = sqlite3_value_double(pVal); + if( r<0.0 || r>9.0 || r!=(int)r ) continue; + }else if( eType!=SQLITE_INTEGER ){ + continue; + } + i = sqlite3_value_int(pVal); + if( i<0 || i>9 ) continue; + pCur->mD |= 1<pVtab, "Error from sqlite3_vtab_in_first/next()"); + return rc; + } + }else{ + goto vt02_bad_idxnum; + } + if( bUseOffset ){ + int nSkip = sqlite3_value_int(argv[iArg]); + while( nSkip-- > 0 ) vt02Next(pCursor); + } + return SQLITE_OK; + +vt02_bad_idxnum: + vt02ErrMsg(pCursor->pVtab, "invalid idxNum for vt02: %d", iOrigIdxNum); + return SQLITE_ERROR; +} + +/* Return the Nth column of the current row. +*/ +static int vt02Column( + sqlite3_vtab_cursor *pCursor, + sqlite3_context *context, + int N +){ + vt02_cur *pCur = (vt02_cur*)pCursor; + int v = pCur->i; + if( N==VT02_COL_X ){ + sqlite3_result_int(context, v); + }else if( N>=VT02_COL_A && N<=VT02_COL_D ){ + static const int iDivisor[] = { 1, 1000, 100, 10, 1 }; + v = (v/iDivisor[N])%10; + sqlite3_result_int(context, v); + } + return SQLITE_OK; +} + +/* Return the rowid of the current row +*/ +static int vt02Rowid(sqlite3_vtab_cursor *pCursor, sqlite3_int64 *pRowid){ + vt02_cur *pCur = (vt02_cur*)pCursor; + *pRowid = pCur->i+1; + return SQLITE_OK; +} + +/************************************************************************* +** Logging Subsystem +** +** The sqlite3BestIndexLog() routine implements a logging system for +** xBestIndex calls. This code is portable to any virtual table. +** +** sqlite3BestIndexLog() is the main routine, sqlite3RunSql() is a +** helper routine used for running various SQL statements as part of +** creating the log. +** +** These two routines should be portable to other virtual tables. Simply +** extract this code and call sqlite3BestIndexLog() near the end of the +** xBestIndex method in cases where logging is desired. +*/ +/* +** Run SQL on behalf of sqlite3BestIndexLog. +** +** Construct the SQL using the zFormat string and subsequent arguments. +** Or if zFormat is NULL, take the SQL as the first argument after the +** zFormat. In either case, the dynamically allocated SQL string is +** freed after it has been run. If something goes wrong with the SQL, +** then an error is left in pVTab->zErrMsg. +*/ +static void sqlite3RunSql( + sqlite3 *db, /* Run the SQL on this database connection */ + sqlite3_vtab *pVTab, /* Report errors to this virtual table */ + const char *zFormat, /* Format string for SQL, or NULL */ + ... /* Arguments, according to the format string */ +){ + char *zSql; + + va_list ap; + va_start(ap, zFormat); + if( zFormat==0 ){ + zSql = va_arg(ap, char*); + }else{ + zSql = sqlite3_vmprintf(zFormat, ap); + } + va_end(ap); + if( zSql ){ + char *zErrMsg = 0; + (void)sqlite3_exec(db, zSql, 0, 0, &zErrMsg); + if( zErrMsg ){ + if( pVTab->zErrMsg==0 ){ + pVTab->zErrMsg = sqlite3_mprintf("%s in [%s]", zErrMsg, zSql); + } + sqlite3_free(zErrMsg); + } + sqlite3_free(zSql); + } +} + +/* +** Record information about each xBestIndex method call in a separate +** table: +** +** CREATE TEMP TABLE [log-table-name] ( +** bi INT, -- BestIndex call number +** vn TEXT, -- Variable Name +** ix INT, -- Index or value +** cn TEXT, -- Column Name +** op INT, -- Opcode or argvIndex +** ux INT, -- "usable" or "omit" flag +** rx BOOLEAN, -- True if has a RHS value +** rhs ANY, -- The RHS value +** cs TEXT, -- Collating Sequence +** inop BOOLEAN -- True if this is a batchable IN operator +** ); +** +** If an error occurs, leave an error message in pVTab->zErrMsg. +*/ +static void sqlite3BestIndexLog( + sqlite3_index_info *pInfo, /* The sqlite3_index_info object */ + const char *zLogTab, /* Log into this table */ + sqlite3 *db, /* Database connection containing zLogTab */ + const char **azColname, /* Names of columns in the virtual table */ + sqlite3_vtab *pVTab /* Record errors into this object */ +){ + int i, rc; + sqlite3_str *pStr; + int iBI; + + if( sqlite3_table_column_metadata(db,0,zLogTab,0,0,0,0,0,0) ){ + /* The log table does not previously exist. Create it. */ + sqlite3RunSql(db,pVTab, + "CREATE TABLE IF NOT EXISTS temp.\"%w\"(\n" + " bi INT, -- BestIndex call number\n" + " vn TEXT, -- Variable Name\n" + " ix INT, -- Index or value\n" + " cn TEXT, -- Column Name\n" + " op INT, -- Opcode or argvIndex\n" + " ux INT, -- usable for omit flag\n" + " rx BOOLEAN, -- Right-hand side value is available\n" + " rhs ANY, -- RHS value\n" + " cs TEXT, -- Collating Sequence\n" + " inop BOOLEAN -- IN operator capable of batch reads\n" + ");", zLogTab + ); + iBI = 1; + }else{ + /* The log table does already exist. We assume that it has the + ** correct schema and proceed to find the largest prior "bi" value. + ** If the schema is wrong, errors might result. The code is able + ** to deal with this. */ + sqlite3_stmt *pStmt; + char *zSql; + zSql = sqlite3_mprintf("SELECT max(bi) FROM temp.\"%w\"",zLogTab); + if( zSql==0 ){ + sqlite3_free(pVTab->zErrMsg); + pVTab->zErrMsg = sqlite3_mprintf("out of memory"); + return; + } + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc ){ + sqlite3_free(pVTab->zErrMsg); + pVTab->zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + iBI = 0; + }else if( sqlite3_step(pStmt)==SQLITE_ROW ){ + iBI = sqlite3_column_int(pStmt, 0)+1; + }else{ + iBI = 1; + } + sqlite3_finalize(pStmt); + } + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix) VALUES(%d,'nConstraint',%d)", + zLogTab, iBI, pInfo->nConstraint + ); + for(i=0; inConstraint; i++){ + sqlite3_value *pVal; + char *zSql; + int iCol = pInfo->aConstraint[i].iColumn; + int op = pInfo->aConstraint[i].op; + const char *zCol; + if( op==SQLITE_INDEX_CONSTRAINT_LIMIT + || op==SQLITE_INDEX_CONSTRAINT_OFFSET + ){ + zCol = ""; + }else if( iCol<0 ){ + zCol = "rowid"; + }else{ + zCol = azColname[iCol]; + } + pStr = sqlite3_str_new(0); + sqlite3_str_appendf(pStr, + "INSERT INTO temp.\"%w\"(bi,vn,ix,cn,op,ux,rx,rhs,cs,inop)" + "VALUES(%d,'aConstraint',%d,%Q,%d,%d", + zLogTab, iBI, + i, + zCol, + op, + pInfo->aConstraint[i].usable); + pVal = 0; + rc = sqlite3_vtab_rhs_value(pInfo, i, &pVal); + assert( pVal!=0 || rc!=SQLITE_OK ); + if( rc==SQLITE_OK ){ + sqlite3_str_appendf(pStr,",1,?1"); + }else{ + sqlite3_str_appendf(pStr,",0,NULL"); + } + sqlite3_str_appendf(pStr,",%Q,%d)", + sqlite3_vtab_collation(pInfo,i), + sqlite3_vtab_in(pInfo,i,-1)); + zSql = sqlite3_str_finish(pStr); + if( zSql==0 ){ + if( pVTab->zErrMsg==0 ) pVTab->zErrMsg = sqlite3_mprintf("out of memory"); + }else{ + sqlite3_stmt *pStmt = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc ){ + if( pVTab->zErrMsg==0 ){ + pVTab->zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + }else{ + if( pVal ) sqlite3_bind_value(pStmt, 1, pVal); + sqlite3_step(pStmt); + rc = sqlite3_reset(pStmt); + if( rc && pVTab->zErrMsg==0 ){ + pVTab->zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + } + sqlite3_finalize(pStmt); + sqlite3_free(zSql); + } + } + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix) VALUES(%d,'nOrderBy',%d)", + zLogTab, iBI, pInfo->nOrderBy + ); + for(i=0; inOrderBy; i++){ + int iCol = pInfo->aOrderBy[i].iColumn; + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix,cn,op)VALUES(%d,'aOrderBy',%d,%Q,%d)", + zLogTab, iBI, + i, + iCol>=0 ? azColname[iCol] : "rowid", + pInfo->aOrderBy[i].desc + ); + } + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix) VALUES(%d,'sqlite3_vtab_distinct',%d)", + zLogTab, iBI, sqlite3_vtab_distinct(pInfo) + ); + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix) VALUES(%d,'colUsed',%lld)", + zLogTab, iBI, pInfo->colUsed + ); + for(i=0; inConstraint; i++){ + int iCol = pInfo->aConstraint[i].iColumn; + int op = pInfo->aConstraint[i].op; + const char *zCol; + if( op==SQLITE_INDEX_CONSTRAINT_LIMIT + || op==SQLITE_INDEX_CONSTRAINT_OFFSET + ){ + zCol = ""; + }else if( iCol<0 ){ + zCol = "rowid"; + }else{ + zCol = azColname[iCol]; + } + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix,cn,op,ux)" + "VALUES(%d,'aConstraintUsage',%d,%Q,%d,%d)", + zLogTab, iBI, + i, + zCol, + pInfo->aConstraintUsage[i].argvIndex, + pInfo->aConstraintUsage[i].omit + ); + } + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'idxNum',%d)", + zLogTab, iBI, pInfo->idxNum + ); + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'estimatedCost',%f)", + zLogTab, iBI, pInfo->estimatedCost + ); + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'estimatedRows',%lld)", + zLogTab, iBI, pInfo->estimatedRows + ); + if( pInfo->idxStr ){ + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'idxStr',%Q)", + zLogTab, iBI, pInfo->idxStr + ); + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'needToFreeIdxStr',%d)", + zLogTab, iBI, pInfo->needToFreeIdxStr + ); + } + if( pInfo->nOrderBy ){ + sqlite3RunSql(db,pVTab, + "INSERT INTO temp.\"%w\"(bi,vn,ix)VALUES(%d,'orderByConsumed',%d)", + zLogTab, iBI, pInfo->orderByConsumed + ); + } +} +/* +** End of Logging Subsystem +*****************************************************************************/ + + +/* Find an estimated cost of running a query against vt02. +*/ +static int vt02BestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; /* Loop counter */ + int isEq[5]; /* Equality constraints on X, A, B, C, and D */ + int isUsed[5]; /* Other non-== cosntraints X, A, B, C, and D */ + int argvIndex = 0; /* Next available argv[] slot */ + int iOffset = -1; /* Constraint for OFFSET */ + void *pX = 0; /* idxStr value */ + int flags = 0; /* RHS value for flags= */ + const char *zLogTab = 0; /* RHS value for logtab= */ + int iFlagTerm = -1; /* Constraint term for flags= */ + int iLogTerm = -1; /* Constraint term for logtab= */ + int iIn = -1; /* Index of the IN constraint */ + vt02_vtab *pSelf; /* This virtual table */ + + pSelf = (vt02_vtab*)pVTab; + if( pSelf->busy ){ + vt02ErrMsg(pVTab, "recursive use of vt02 prohibited"); + return SQLITE_CONSTRAINT; + } + pSelf->busy++; + + + /* Do an initial scan for flags=N and logtab=TAB constraints with + ** usable RHS values */ + for(i=0; inConstraint; i++){ + sqlite3_value *pVal; + if( !pInfo->aConstraint[i].usable ) continue; + if( pInfo->aConstraint[i].op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + switch( pInfo->aConstraint[i].iColumn ){ + case VT02_COL_FLAGS: + if( sqlite3_vtab_rhs_value(pInfo, i, &pVal)==SQLITE_OK + && sqlite3_value_type(pVal)==SQLITE_INTEGER + ){ + flags = sqlite3_value_int(pVal); + } + iFlagTerm = i; + break; + case VT02_COL_LOGTAB: + if( sqlite3_vtab_rhs_value(pInfo, i, &pVal)==SQLITE_OK + && sqlite3_value_type(pVal)==SQLITE_TEXT + ){ + zLogTab = (const char*)sqlite3_value_text(pVal); + } + iLogTerm = i; + break; + } + } + + /* Do a second scan to actually analyze the index information */ + memset(isEq, 0xff, sizeof(isEq)); + memset(isUsed, 0xff, sizeof(isUsed)); + for(i=0; inConstraint; i++){ + int j = pInfo->aConstraint[i].iColumn; + if( j>=VT02_COL_FLAGS ) continue; + if( pInfo->aConstraint[i].usable==0 + && (flags & VT02_IGNORE_USABLE)==0 ) continue; + if( j<0 ) j = VT02_COL_X; + switch( pInfo->aConstraint[i].op ){ + case SQLITE_INDEX_CONSTRAINT_FUNCTION: + case SQLITE_INDEX_CONSTRAINT_EQ: + isEq[j] = i; + break; + case SQLITE_INDEX_CONSTRAINT_LT: + case SQLITE_INDEX_CONSTRAINT_LE: + case SQLITE_INDEX_CONSTRAINT_GT: + case SQLITE_INDEX_CONSTRAINT_GE: + isUsed[j] = i; + break; + case SQLITE_INDEX_CONSTRAINT_OFFSET: + iOffset = i; + break; + } + } + + /* Use the analysis to find an appropriate query plan */ + if( isEq[0]>=0 ){ + /* A constraint of X= takes priority */ + pInfo->estimatedCost = 1; + pInfo->aConstraintUsage[isEq[0]].argvIndex = ++argvIndex; + if( flags & 0x20 ) pInfo->aConstraintUsage[isEq[0]].omit = 1; + pInfo->idxNum = 1; + }else if( isEq[1]<0 ){ + /* If there is no X= nor A= then we have to do a full scan */ + pInfo->idxNum = 0; + pInfo->estimatedCost = 10000; + }else{ + int v = 1000; + pInfo->aConstraintUsage[isEq[1]].argvIndex = ++argvIndex; + if( flags & 0x20 ) pInfo->aConstraintUsage[isEq[1]].omit = 1; + for(i=2; i<=4 && isEq[i]>=0; i++){ + if( i==4 && sqlite3_vtab_in(pInfo, isEq[4], 0) ) break; + pInfo->aConstraintUsage[isEq[i]].argvIndex = ++argvIndex; + if( flags & 0x20 ) pInfo->aConstraintUsage[isEq[i]].omit = 1; + v /= 10; + } + pInfo->idxNum = i; + if( isEq[4]>=0 && sqlite3_vtab_in(pInfo,isEq[4],1) ){ + iIn = isEq[4]; + pInfo->aConstraintUsage[iIn].argvIndex = ++argvIndex; + if( flags & 0x20 ) pInfo->aConstraintUsage[iIn].omit = 1; + v /= 5; + i++; + pInfo->idxNum += 4; + } + pInfo->estimatedCost = v; + } + pInfo->estimatedRows = (sqlite3_int64)pInfo->estimatedCost; + + /* Attempt to consume the ORDER BY clause. Except, always leave + ** orderByConsumed set to 0 for vt02_no_sort_opt. In this way, + ** we can compare vt02 and vt02_no_sort_opt to ensure they get + ** the same answer. + */ + if( pInfo->nOrderBy>0 && (flags & VT02_NO_SORT_OPT)==0 ){ + if( pInfo->idxNum==1 ){ + /* There will only be one row of output. So it is always sorted. */ + pInfo->orderByConsumed = 1; + }else + if( pInfo->aOrderBy[0].iColumn<=0 + && pInfo->aOrderBy[0].desc==0 + ){ + /* First column of order by is X ascending */ + pInfo->orderByConsumed = 1; + }else + if( sqlite3_vtab_distinct(pInfo)>=1 ){ + unsigned int x = 0; + for(i=0; inOrderBy; i++){ + int iCol = pInfo->aOrderBy[i].iColumn; + if( iCol<0 ) iCol = 0; + x |= 1<idxNum += 30; + pInfo->orderByConsumed = 1; + }else if( x==0x06 ){ + /* DISTINCT A,B */ + pInfo->idxNum += 20; + pInfo->orderByConsumed = 1; + }else if( x==0x0e ){ + /* DISTINCT A,B,C */ + pInfo->idxNum += 10; + pInfo->orderByConsumed = 1; + }else if( x & 0x01 ){ + /* DISTINCT X */ + pInfo->orderByConsumed = 1; + }else if( x==0x1e ){ + /* DISTINCT A,B,C,D */ + pInfo->orderByConsumed = 1; + } + }else{ + if( x==0x02 ){ + /* GROUP BY A */ + pInfo->orderByConsumed = 1; + }else if( x==0x06 ){ + /* GROUP BY A,B */ + pInfo->orderByConsumed = 1; + }else if( x==0x0e ){ + /* GROUP BY A,B,C */ + pInfo->orderByConsumed = 1; + }else if( x & 0x01 ){ + /* GROUP BY X */ + pInfo->orderByConsumed = 1; + }else if( x==0x1e ){ + /* GROUP BY A,B,C,D */ + pInfo->orderByConsumed = 1; + } + } + } + } + + if( flags & VT02_ALLOC_IDXSTR ){ + pInfo->idxStr = sqlite3_mprintf("test"); + pInfo->needToFreeIdxStr = 1; + } + if( flags & VT02_BAD_IDXNUM ){ + pInfo->idxNum += 1000; + } + + if( iOffset>=0 ){ + pInfo->aConstraintUsage[iOffset].argvIndex = ++argvIndex; + if( (flags & VT02_NO_OFFSET)==0 + && (pInfo->nOrderBy==0 || pInfo->orderByConsumed) + ){ + pInfo->aConstraintUsage[iOffset].omit = 1; + pInfo->idxNum += 100; + } + } + + + /* Always omit flags= and logtab= constraints to prevent them from + ** interfering with the bytecode. Put them at the end of the argv[] + ** array to keep them out of the way. + */ + if( iFlagTerm>=0 ){ + pInfo->aConstraintUsage[iFlagTerm].omit = 1; + pInfo->aConstraintUsage[iFlagTerm].argvIndex = ++argvIndex; + } + if( iLogTerm>=0 ){ + pInfo->aConstraintUsage[iLogTerm].omit = 1; + pInfo->aConstraintUsage[iLogTerm].argvIndex = ++argvIndex; + } + + /* The 0x40 flag means add all usable constraints to the output set */ + if( flags & 0x40 ){ + for(i=0; inConstraint; i++){ + if( pInfo->aConstraint[i].usable + && pInfo->aConstraintUsage[i].argvIndex==0 + ){ + pInfo->aConstraintUsage[i].argvIndex = ++argvIndex; + if( flags & 0x20 ) pInfo->aConstraintUsage[i].omit = 1; + } + } + } + + + /* Generate the log if requested */ + if( zLogTab ){ + static const char *azColname[] = { + "x", "a", "b", "c", "d", "flags", "logtab" + }; + sqlite3 *db = ((vt02_vtab*)pVTab)->db; + sqlite3BestIndexLog(pInfo, zLogTab, db, azColname, pVTab); + } + pSelf->busy--; + + /* Try to do a memory allocation solely for the purpose of causing + ** an error under OOM testing loops */ + pX = sqlite3_malloc(800); + if( pX==0 ) return SQLITE_NOMEM; + sqlite3_free(pX); + + return pVTab->zErrMsg!=0 ? SQLITE_ERROR : SQLITE_OK; +} + +/* This is the sqlite3_module definition for the the virtual table defined +** by this include file. +*/ +const sqlite3_module vt02Module = { + /* iVersion */ 2, + /* xCreate */ 0, /* This is an eponymous table */ + /* xConnect */ vt02Connect, + /* xBestIndex */ vt02BestIndex, + /* xDisconnect */ vt02Disconnect, + /* xDestroy */ vt02Disconnect, + /* xOpen */ vt02Open, + /* xClose */ vt02Close, + /* xFilter */ vt02Filter, + /* xNext */ vt02Next, + /* xEof */ vt02Eof, + /* xColumn */ vt02Column, + /* xRowid */ vt02Rowid, + /* xUpdate */ 0, + /* xBegin */ 0, + /* xSync */ 0, + /* xCommit */ 0, + /* xRollback */ 0, + /* xFindFunction */ 0, + /* xRename */ 0, + /* xSavepoint */ 0, + /* xRelease */ 0, + /* xRollbackTo */ 0 +}; + +static void vt02CoreInit(sqlite3 *db){ + static const char zPkXSchema[] = + "CREATE TABLE x(x INT NOT NULL PRIMARY KEY, a INT, b INT, c INT, d INT," + " flags INT HIDDEN, logtab TEXT HIDDEN);"; + static const char zPkABCDSchema[] = + "CREATE TABLE x(x INT, a INT NOT NULL, b INT NOT NULL, c INT NOT NULL, " + "d INT NOT NULL, flags INT HIDDEN, logtab TEXT HIDDEN, " + "PRIMARY KEY(a,b,c,d));"; + sqlite3_create_module(db, "vt02", &vt02Module, 0); + sqlite3_create_module(db, "vt02pkx", &vt02Module, (void*)zPkXSchema); + sqlite3_create_module(db, "vt02pkabcd", &vt02Module, (void*)zPkABCDSchema); +} + +#ifdef TH3_VERSION +static void vt02_init(th3state *p, int iDb, char *zArg){ + vt02CoreInit(th3dbPointer(p, iDb)); +} +#else +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_vt02_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi); + vt02CoreInit(db); + return SQLITE_OK; +} +#endif /* TH3_VERSION */ Index: test/wal.test ================================================================== --- test/wal.test +++ test/wal.test @@ -1173,28 +1173,28 @@ 7 8192 1 8 16384 1 9 32768 1 10 65536 1 11 131072 0 - 12 1016 0 + 11 1016 0 } { if {$::SQLITE_MAX_PAGE_SIZE < $pgsz} { set works 0 } for {set pg 1} {$pg <= 3} {incr pg} { forcecopy testX.db test.db forcedelete test.db-wal - + # Check that the database now exists and consists of three pages. And # that there is no associated wal file. # do_test wal-18.2.$tn.$pg.1 { file exists test.db-wal } 0 do_test wal-18.2.$tn.$pg.2 { file exists test.db } 1 do_test wal-18.2.$tn.$pg.3 { file size test.db } [expr 1024*3] - + do_test wal-18.2.$tn.$pg.4 { # Create a wal file that contains a single frame (database page # number $pg) with the commit flag set. The frame checksum is # correct, but the contents of the database page are corrupt. @@ -1222,20 +1222,20 @@ fconfigure $fd -encoding binary -translation binary puts -nonewline $fd $walhdr puts -nonewline $fd $framehdr puts -nonewline $fd $framebody close $fd - + file size test.db-wal } [wal_file_size 1 $pgsz] - + do_test wal-18.2.$tn.$pg.5 { sqlite3 db test.db set rc [catch { db one {PRAGMA integrity_check} } msg] expr { $rc!=0 || $msg!="ok" } } $works - + db close } } #------------------------------------------------------------------------- Index: test/wal2.test ================================================================== --- test/wal2.test +++ test/wal2.test @@ -32,10 +32,47 @@ ifcapable !dirsync { incr sqlite_sync_count $adj } } } + +proc set_tvfs_hdr {file args} { + + # Set $nHdr to the number of bytes in the wal-index header: + set nHdr 48 + set nInt [expr {$nHdr/4}] + + if {[llength $args]>2} { + error {wrong # args: should be "set_tvfs_hdr fileName ?val1? ?val2?"} + } + + set blob [tvfs shm $file] + if {$::tcl_platform(byteOrder)=="bigEndian"} {set fmt I} {set fmt i} + + if {[llength $args]} { + set ia [lindex $args 0] + set ib $ia + if {[llength $args]==2} { + set ib [lindex $args 1] + } + binary scan $blob a[expr $nHdr*2]a* dummy tail + set blob [binary format ${fmt}${nInt}${fmt}${nInt}a* $ia $ib $tail] + tvfs shm $file $blob + } + + binary scan $blob ${fmt}${nInt} ints + return $ints +} + +proc incr_tvfs_hdr {file idx incrval} { + set ints [set_tvfs_hdr $file] + set v [lindex $ints $idx] + incr v $incrval + lset ints $idx $v + set_tvfs_hdr $file $ints +} + #------------------------------------------------------------------------- # Test case wal2-1.*: # # Set up a small database containing a single table. The database is not DELETED test/wal2big.test Index: test/wal2big.test ================================================================== --- test/wal2big.test +++ /dev/null @@ -1,71 +0,0 @@ -# 2017 September 19 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2big -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - CREATE INDEX t1a ON t1(a); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 10000000; - - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<200000 - ) - INSERT INTO t1 SELECT random(), random(), random() FROM s; -} {wal2 10000000} - -do_execsql_test 1.1 { - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<200000 - ) - INSERT INTO t1 SELECT random(), random(), random() FROM s; -} - -do_test 1.2 { - list [expr [file size test.db-wal]>10000000] \ - [expr [file size test.db-wal2]>10000000] -} {1 1} - -do_test 1.3 { - sqlite3 db2 test.db - execsql { - SELECT count(*) FROM t1; - PRAGMA integrity_check; - } db2 -} {400000 ok} - -do_test 1.4 { - db2 close - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcecopy test.db-wal2 test.db2-wal2 - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - PRAGMA integrity_check; - } -} {400000 ok} - -finish_test DELETED test/wal2concurrent.test Index: test/wal2concurrent.test ================================================================== --- test/wal2concurrent.test +++ /dev/null @@ -1,164 +0,0 @@ -# 2018 December 6 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -set ::testprefix wal2concurrent - -ifcapable !concurrent { - finish_test - return -} - - -#------------------------------------------------------------------------- -# Warm-body test. -# -foreach tn {1 2} { - reset_db - sqlite3 db2 test.db - do_execsql_test 1.0 { - PRAGMA page_size = 1024; - CREATE TABLE t1(x); - CREATE TABLE t2(y); - PRAGMA journal_size_limit = 5000; - PRAGMA journal_mode = wal2; - } {5000 wal2} - - do_execsql_test 1.1 { - INSERT INTO t1 VALUES(1); - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(2); - } {} - - do_test 1.2 { - execsql { - PRAGMA journal_size_limit = 5000; - INSERT INTO t1 VALUES(3) - } db2 - catchsql { COMMIT } - } {1 {database is locked}} - - do_catchsql_test 1.3 { COMMIT } {1 {database is locked}} - do_catchsql_test 1.4 { ROLLBACK } {0 {}} - - do_test 1.5 { - list [file size test.db-wal] [file size test.db-wal2] - } {2128 0} - - do_execsql_test 1.6 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(2); - } {} - - do_test 1.7 { - execsql { INSERT INTO t2 VALUES(randomblob(4000)) } db2 - list [file size test.db-wal] [file size test.db-wal2] - } {7368 0} - - if {$tn==1} { - do_test 1.8 { - execsql { - INSERT INTO t2 VALUES(1); - INSERT INTO t1 VALUES(5); - } db2 - list [file size test.db-wal] [file size test.db-wal2] - } {7368 2128} - - do_catchsql_test 1.9 { COMMIT } {1 {database is locked}} - do_catchsql_test 1.10 { ROLLBACK } {0 {}} - db close - sqlite3 db test.db - do_execsql_test 1.11 { SELECT * FROM t1 } {1 3 5} - do_execsql_test 1.12 { SELECT count(*) FROM t2 } {2} - } else { - do_test 1.8 { - execsql { - INSERT INTO t2 VALUES(1); - } db2 - list [file size test.db-wal] [file size test.db-wal2] - } {7368 1080} - - do_catchsql_test 1.9 { COMMIT } {0 {}} - db close - sqlite3 db test.db - do_execsql_test 1.11 { SELECT * FROM t1 } {1 3 2} - do_execsql_test 1.12 { SELECT count(*) FROM t2 } {2} - - do_test 1.13 { - list [file size test.db-wal] [file size test.db-wal2] - } {7368 2128} - } -} - -do_multiclient_test tn { - do_test 2.$tn.1 { - sql1 { - PRAGMA auto_vacuum = OFF; - CREATE TABLE t1(x UNIQUE); - CREATE TABLE t2(x UNIQUE); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 15000; - } - } {wal2 15000} - - do_test 2.$tn.2 { - sql1 { - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<=10 - ) - INSERT INTO t1 SELECT randomblob(800) FROM s; - } - } {} - - do_test 2.$tn.3 { - sql1 { DELETE FROM t1 WHERE (rowid%4)==0 } - list [expr [file size test.db-wal]>15000] \ - [expr [file size test.db-wal2]>15000] - } {1 0} - - do_test 2.$tn.4 { - sql1 { PRAGMA wal_checkpoint; } - sql1 { - BEGIN CONCURRENT; - INSERT INTO t1 VALUES(randomblob(800)); - } - } {} - - do_test 2.$tn.5 { - sql2 { - PRAGMA journal_size_limit = 15000; - INSERT INTO t2 VALUES(randomblob(800)); - INSERT INTO t2 VALUES(randomblob(800)); - INSERT INTO t2 VALUES(randomblob(800)); - INSERT INTO t2 VALUES(randomblob(800)); - INSERT INTO t2 VALUES(randomblob(800)); - DELETE FROM t2; - } - list [expr [file size test.db-wal]>15000] \ - [expr [file size test.db-wal2]>15000] - } {1 1} - - do_test 2.$tn.6 { - sql1 { - INSERT INTO t1 VALUES(randomblob(800)); - COMMIT; - PRAGMA integrity_check; - } - } {ok} -} - - - -finish_test - DELETED test/wal2fault.test Index: test/wal2fault.test ================================================================== --- test/wal2fault.test +++ /dev/null @@ -1,52 +0,0 @@ -# 2010 May 03 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/malloc_common.tcl -source $testdir/lock_common.tcl - -ifcapable !wal {finish_test ; return } -set testprefix wal2fault - -do_execsql_test 1.0 { - CREATE TABLE t1(x,y); - PRAGMA journal_mode = wal2; - WITH s(i) AS ( SELECT 100 UNION ALL SELECT i-1 FROM s WHERE (i-1)>0 ) - INSERT INTO t1 SELECT i, randomblob(i) FROM s; - WITH s(i) AS ( SELECT 100 UNION ALL SELECT i-1 FROM s WHERE (i-1)>0 ) - INSERT INTO t1 SELECT i, randomblob(i) FROM s; -} {wal2} - -do_test 1.1 { - expr [file size test.db-wal]>10000 -} {1} -faultsim_save_and_close - -do_faultsim_test 1 -prep { - faultsim_restore_and_reopen - execsql { - PRAGMA journal_size_limit = 10000; - SELECT count(*) FROM sqlite_master; - } -} -body { - execsql { - INSERT INTO t1 VALUES(1, 2); - } -} -test { - faultsim_test_result {0 {}} -} - -finish_test DELETED test/wal2lock.test Index: test/wal2lock.test ================================================================== --- test/wal2lock.test +++ /dev/null @@ -1,106 +0,0 @@ -# 2018 December 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2lock -ifcapable !wal {finish_test ; return } - -db close -testvfs tvfs -sqlite3 db test.db -vfs tvfs - -do_execsql_test 1.0 { - PRAGMA journal_mode = wal2; - CREATE TABLE y1(y, yy); - CREATE INDEX y1y ON y1(y); - CREATE INDEX y1yy ON y1(yy); - INSERT INTO y1 VALUES(1, 2), (3, 4), (5, 6); -} {wal2} - -tvfs script vfs_callback -tvfs filter xShmLock - -set ::lock [list] -proc vfs_callback {func file name lock} { - lappend ::lock $lock - return SQLITE_OK -} - -do_execsql_test 1.1.1 { - SELECT * FROM y1 -} {1 2 3 4 5 6} -do_test 1.1.2 { - set ::lock -} {{4 1 lock shared} {4 1 unlock shared}} - -set ::bFirst 1 -proc vfs_callback {func file name lock} { - if {$::bFirst} { - set ::bFirst 0 - return SQLITE_BUSY - } - return SQLITE_OK -} -do_execsql_test 1.2 { - SELECT * FROM y1 -} {1 2 3 4 5 6} - -set ::bFirst 1 -proc vfs_callback {func file name lock} { - if {$::bFirst} { - set ::bFirst 0 - return SQLITE_IOERR - } - return SQLITE_OK -} -do_catchsql_test 1.3 { - SELECT * FROM y1 -} {1 {disk I/O error}} - -puts "# Warning: This next test case causes SQLite to call xSleep(1) 100 times." -puts "# Normally this equates to a delay of roughly 10 seconds, but if SQLite" -puts "# is built on unix without HAVE_USLEEP defined, it may be much longer." -proc vfs_callback {func file name lock} { return SQLITE_BUSY } -do_catchsql_test 1.4 { - SELECT * FROM y1 -} {1 {locking protocol}} -proc vfs_callback {func file name lock} { return SQLITE_OK } - -sqlite3 db2 test.db -vfs tvfs -set ::bFirst 1 - -proc vfs_callback {func file name lock} { - if {$::bFirst} { - set ::bFirst 0 - db2 eval { INSERT INTO y1 VALUES(7, 8) } - } -} - -do_execsql_test 1.5.1 { - SELECT * FROM y1 -} {1 2 3 4 5 6 7 8} -do_execsql_test 1.5.2 { - SELECT * FROM y1 -} {1 2 3 4 5 6 7 8} - -db close -db2 close -tvfs delete -finish_test DELETED test/wal2openclose.test Index: test/wal2openclose.test ================================================================== --- test/wal2openclose.test +++ /dev/null @@ -1,81 +0,0 @@ -# 2017 September 19 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2openclose -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - PRAGMA journal_mode = wal2; - PRAGMA wal_autocheckpoint = 0; - PRAGMA journal_size_limit = 75000; -} {wal2 0 75000} - -do_test 1.1 { - for {set ii 1} {$ii <= 200} {incr ii} { - execsql { - INSERT INTO t1 VALUES($ii, $ii, $ii); - } - } - expr ([file size test.db-wal2] - 75000) > 30000 -} {1} - -do_test 1.2 { - db close - list [file exists test.db-wal] [file exists test.db-wal2] -} {0 0} - -sqlite3 db test.db -do_execsql_test 1.3 { - SELECT sum(c) FROM t1 -} {20100} -db close - -#------------------------------------------------------------------------- -reset_db -do_execsql_test 2.0 { - CREATE TABLE t1(a, b, c); - PRAGMA journal_mode = wal2; - INSERT INTO t1 VALUES(1, 2, 3); -} {wal2} -db_save_and_close - -db_restore_and_reopen -do_execsql_test 2.1 { - SELECT * FROM t1; -} {1 2 3} - -do_test 2.2 { - sqlite3 db2 test.db - db2 eval {INSERT INTO t1 VALUES(4, 5, 6)} - db2 close -} {} - -breakpoint -db close -sqlite3 db test.db -do_execsql_test 2.2 { - SELECT * FROM t1; -} {1 2 3 4 5 6} - - - -finish_test DELETED test/wal2recover.test Index: test/wal2recover.test ================================================================== --- test/wal2recover.test +++ /dev/null @@ -1,265 +0,0 @@ -# 2018 December 13 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2recover -ifcapable !wal {finish_test ; return } - -proc db_copy {from to} { - forcecopy $from $to - forcecopy ${from}-wal ${to}-wal - forcecopy ${from}-wal2 ${to}-wal2 -} - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - CREATE INDEX t1a ON t1(a); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 15000; - PRAGMA wal_autocheckpoint = 0; -} {wal2 15000 0} - -do_test 1.1 { - for {set i 1} {$i <= 1000} {incr i} { - execsql { INSERT INTO t1 VALUES(random(), random(), random()) } - db_copy test.db test.db2 - sqlite3 db2 test.db - set res [execsql { - SELECT count(*) FROM t1; - PRAGMA integrity_check; - } db2] - db2 close - if {$res != [list $i ok]} { - error "failure on iteration $i" - } - } - set {} {} -} {} - -#-------------------------------------------------------------------------- -reset_db -do_execsql_test 2.0 { - CREATE TABLE t1(x UNIQUE); - CREATE TABLE t2(x UNIQUE); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 10000; - PRAGMA wal_autocheckpoint = 0; - BEGIN; - INSERT INTO t1 VALUES(randomblob(4000)); - INSERT INTO t1 VALUES(randomblob(4000)); - INSERT INTO t1 VALUES(randomblob(4000)); - COMMIT; - BEGIN; - INSERT INTO t2 VALUES(randomblob(4000)); - INSERT INTO t2 VALUES(randomblob(4000)); - INSERT INTO t2 VALUES(randomblob(4000)); - COMMIT; -} {wal2 10000 0} -do_test 2.0.1 { - list [file size test.db] [file size test.db-wal] [file size test.db-wal2] -} {5120 28328 28328} - -# Test recovery with both wal files intact. -# -do_test 2.1 { - db_copy test.db test.db2 - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {3 3 ok} - -do_test 2.2 { - db2 close - db_copy test.db test.db2 - hexio_write test.db2-wal 16 12345678 - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - } db2 -} {0 3} - -do_test 2.3 { - db2 close - db_copy test.db test.db2 - hexio_write test.db2-wal2 16 12345678 - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {3 0 ok} - -do_test 2.4 { - db2 close - db_copy test.db test.db2 - forcecopy test.db-wal test.db2-wal2 - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {3 0 ok} - -do_test 2.5 { - db2 close - db_copy test.db test.db2 - forcecopy test.db-wal test.db2-wal2 - forcecopy test.db-wal2 test.db2-wal - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {3 3 ok} - -do_test 2.6 { - db2 close - db_copy test.db test.db2 - forcecopy test.db-wal test.db2-wal2 - close [open test.db-wal w] - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {3 0 ok} - -do_test 2.7 { - db2 close - db_copy test.db test.db2 - forcedelete test.db2-wal - sqlite3 db2 test.db2 - execsql { - SELECT count(*) FROM t1; - SELECT count(*) FROM t2; - PRAGMA integrity_check; - } db2 -} {0 0 ok} - -#------------------------------------------------------------------------- -# -reset_db -do_execsql_test 3.0 { - CREATE TABLE t1(a TEXT, b TEXT, c TEXT); - CREATE INDEX t1a ON t1(a); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 10000; - PRAGMA wal_autocheckpoint = 0; - PRAGMA cache_size = 5; -} {wal2 10000 0} - -do_execsql_test 3.1 { - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 200) - INSERT INTO t1 SELECT i, i, i FROM s; - - INSERT INTO t1 VALUES(201, 201, 201); -} {} - -do_test 3.2 { - list [file size test.db] [file size test.db-wal] [file size test.db-wal2] -} {5120 15752 4224} - -do_test 3.3 { - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcecopy test.db-wal2 test.db2-wal2 - sqlite3 db2 test.db2 - execsql { - PRAGMA journal_size_limit = 10000; - PRAGMA wal_autocheckpoint = 0; - PRAGMA cache_size = 5; - BEGIN; - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 200) - INSERT INTO t1 SELECT i, i, i FROM s; - } db2 - list [file size test.db2] [file size test.db2-wal] [file size test.db2-wal2] -} {5120 15752 23088} - -do_test 3.4 { - set fd [open test.db2-shm] - fconfigure $fd -encoding binary -translation binary - set data [read $fd] - close $fd - - set fd [open test.db-shm w] - fconfigure $fd -encoding binary -translation binary - puts -nonewline $fd $data - close $fd - - execsql { - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 10) - INSERT INTO t1 SELECT i, i, i FROM s; - SELECT count(*) FROM t1; - PRAGMA integrity_check; - } -} {211 ok} - -do_test 3.5 { - list [file size test.db] [file size test.db-wal] [file size test.db-wal2] -} {5120 15752 18896} - -#------------------------------------------------------------------------- -# -reset_db -do_execsql_test 4.0 { - PRAGMA journal_mode = wal2; - CREATE TABLE xyz(x, y, z); - INSERT INTO xyz VALUES('x', 'y', 'z'); -} {wal2} -db close -do_test 4.1 { - close [open test.db-wal w] - file mkdir test.db-wal2 - sqlite3 db test.db - catchsql { SELECT * FROM xyz } -} {1 {unable to open database file}} -db close -file delete test.db-wal2 - -do_test 4.2 { - sqlite3 db test.db - execsql { - INSERT INTO xyz VALUES('a', 'b', 'c'); - } - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcedelete test.db2-wal2 - file mkdir test.db2-wal2 - sqlite3 db2 test.db2 - catchsql { SELECT * FROM xyz } db2 -} {1 {unable to open database file}} -db2 close -file delete test.db2-wal2 - - -finish_test - DELETED test/wal2recover2.test Index: test/wal2recover2.test ================================================================== --- test/wal2recover2.test +++ /dev/null @@ -1,312 +0,0 @@ -# 2018 December 13 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2recover2 -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(x); - CREATE TABLE t2(x); - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t1 SELECT i FROM s; - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t2 SELECT i FROM s; - - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 10000; -} {wal2 10000} - -set ::L 1125750 -set ::M 1126500 -set ::H 1127250 - -do_execsql_test 1.1 { - UPDATE t1 SET x=x+1; - UPDATE t2 SET x=x+1 WHERE rowid<=750; - - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; -} [list $H $M] - -do_test 1.2 { - list [file size test.db] [file size test.db-wal] [file size test.db-wal2] -} {31744 14704 7368} - -proc cksum {zIn data} { - if {[string length $zIn]==0} { - set s0 0 - set s1 0 - } else { - set s0 [hexio_get_int [string range $zIn 0 7]] - set s1 [hexio_get_int [string range $zIn 8 15]] - } - set n [expr [string length $data] / 8] - - for {set i 0} {$i < $n} {incr i 2} { - set x0 [hexio_get_int -l [string range $data [expr $i*8] [expr $i*8+7]]] - set x1 [hexio_get_int -l [string range $data [expr $i*8+8] [expr $i*8+8+7]]] - - set s0 [expr ($s0 + $x0 + $s1) & 0xFFFFFFFF] - set s1 [expr ($s1 + $x1 + $s0) & 0xFFFFFFFF] - } - - return "[hexio_render_int32 $s0][hexio_render_int32 $s1]" -} - -proc fix_wal_cksums {file} { - # Fix the checksum on the wal header. - set data [hexio_read $file 0 32] - set cksum [cksum {} [string range $data 0 47]] - set salt [hexio_read $file 16 8] - hexio_write $file 24 $cksum - - # Fix the checksums for all pages in the wal file. - set pgsz [hexio_get_int [hexio_read $file 8 4]] - set sz [file size $file] - for {set off 32} {$off < $sz} {incr off [expr $pgsz+24]} { - set e [hexio_read $file $off 8] - set cksum [cksum $cksum $e] - - set p [hexio_read $file [expr $off+24] $pgsz] - set cksum [cksum $cksum $p] - - hexio_write $file [expr $off+8] $salt - hexio_write $file [expr $off+16] $cksum - } -} - -proc wal_incr_hdrfield {file field} { - switch -- $field { - nCkpt { set offset 12 } - salt0 { set offset 16 } - salt1 { set offset 20 } - default { - error "unknown field $field - should be \"nCkpt\", \"salt0\" or \"salt1\"" - } - } - - # Increment the value in the wal header. - set v [hexio_get_int [hexio_read $file $offset 4]] - incr v - hexio_write $file $offset [hexio_render_int32 $v] - - # Fix various checksums - fix_wal_cksums $file -} - -proc wal_set_nckpt {file val} { - # Increment the value in the wal header. - hexio_write $file 12 [hexio_render_int32 $val] - - # Fix various checksums - fix_wal_cksums $file -} - -proc wal_set_follow {file prevfile} { - set pgsz [hexio_get_int [hexio_read $prevfile 8 4]] - set sz [file size $prevfile] - set cksum [hexio_read $prevfile [expr $sz-$pgsz-8] 8] - - hexio_write $file 16 $cksum - fix_wal_cksums $file -} - -foreach {tn file field} { - 1 test.db2-wal salt0 - 2 test.db2-wal salt1 - 3 test.db2-wal nCkpt - 4 test.db2-wal2 salt0 - 5 test.db2-wal2 salt1 - 6 test.db2-wal2 nCkpt -} { - do_test 1.3.$tn { - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcecopy test.db-wal2 test.db2-wal2 - wal_incr_hdrfield $file $field - sqlite3 db2 test.db2 - execsql { - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; - } db2 - } [list $H $L] - db2 close -} - -do_test 1.4 { - forcecopy test.db test.db2 - forcecopy test.db-wal2 test.db2-wal - forcedelete test.db2-wal2 - sqlite3 db2 test.db2 - execsql { - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; - } db2 -} [list $L $M] - -do_test 1.5 { - forcecopy test.db test.db2 - forcecopy test.db-wal2 test.db2-wal - forcecopy test.db-wal test.db2-wal2 - sqlite3 db2 test.db2 - execsql { - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; - } db2 -} [list $H $M] - -foreach {tn file field} { - 1 test.db2-wal salt0 - 2 test.db2-wal salt1 - 3 test.db2-wal2 salt0 - 4 test.db2-wal2 salt1 -} { - do_test 1.6.$tn { - forcecopy test.db test.db2 - forcecopy test.db-wal2 test.db2-wal - forcecopy test.db-wal test.db2-wal2 - wal_incr_hdrfield $file $field - sqlite3 db2 test.db2 - execsql { - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; - } db2 - } [list $H $L] - db2 close -} - -foreach {tn nCkpt1 nCkpt2 res} [list \ - 1 2 1 "$H $M" \ - 2 2 2 "$L $M" \ - 3 3 1 "$H $L" \ - 4 15 14 "$H $M" \ - 5 0 15 "$H $M" \ - 6 1 15 "$L $M" \ -] { - do_test 1.7.$tn { - forcecopy test.db test.db2 - forcecopy test.db-wal2 test.db2-wal - forcecopy test.db-wal test.db2-wal2 - - wal_set_nckpt test.db2-wal2 $nCkpt2 - wal_set_nckpt test.db2-wal $nCkpt1 - wal_set_follow test.db2-wal test.db2-wal2 - - - sqlite3 db2 test.db2 - execsql { - SELECT sum(x) FROM t1; - SELECT sum(x) FROM t2; - } db2 - } $res - db2 close -} - - -#------------------------------------------------------------------------- -reset_db -do_execsql_test 1.8.1 { - PRAGMA autovacuum = 0; - PRAGMA page_size = 4096; - CREATE TABLE t1(x); - CREATE TABLE t2(x); - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t1 SELECT i FROM s; - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t2 SELECT i FROM s; - - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 10000; - - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t2 SELECT i FROM s; -} {wal2 10000} - -do_test 1.8.2 { - list [file size test.db-wal] [file size test.db-wal2] -} {24752 0} - -do_execsql_test 1.8.3 { PRAGMA user_version = 123 } -do_test 1.8.4 { - list [file size test.db-wal] [file size test.db-wal2] -} {24752 4152} - -do_test 1.8.5 { - hexio_write test.db-wal2 [expr 56+16] 0400 - fix_wal_cksums test.db-wal2 -} {} - -do_test 1.8.6 { - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcecopy test.db-wal2 test.db2-wal2 - sqlite3 db2 test.db2 - catchsql { SELECT * FROM sqlite_master } db2 -} {1 {malformed database schema (?)}} -db2 close - -#------------------------------------------------------------------------- -reset_db -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - CREATE INDEX t1a ON t1(a); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - PRAGMA journal_mode = wal2; - - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - PRAGMA journal_size_limit = 5000; - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); -} {wal2 5000} - -do_test 2.1 { - forcecopy test.db test.db2 - forcecopy test.db-wal2 test.db2-wal - forcecopy test.db-wal test.db2-wal2 - - hexio_write test.db2-wal 5000 1234567890 -} {5} - -do_test 2.2 { - sqlite3 db2 test.db2 - breakpoint - execsql { - SELECT count(*) FROM t1; - PRAGMA integrity_check - } db2 -} {4 ok} - -do_test 2.3 { - execsql { - INSERT INTO t1 VALUES(randomblob(50), randomblob(50), randomblob(50)); - SELECT count(*) FROM t1; - PRAGMA integrity_check - } db2 -} {5 ok} - - -finish_test DELETED test/wal2recover3.test Index: test/wal2recover3.test ================================================================== --- test/wal2recover3.test +++ /dev/null @@ -1,52 +0,0 @@ -# 2022 June 28 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2recover3 -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(x); - CREATE TABLE t2(x); - PRAGMA journal_mode = wal2; - PRAGMA wal_autocheckpoint = 0; - PRAGMA journal_size_limit = 10000; -} {wal2 0 10000} - -do_execsql_test 1.1 { - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t1 SELECT i FROM s; - WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1500 ) - INSERT INTO t2 SELECT i FROM s; -} - -db_save_and_close -set fd [open sv_test.db-wal2 r+] -seek $fd 4000 -puts -nonewline $fd 0 -close $fd - -db_restore_and_reopen -do_execsql_test 1.2 { - SELECT sql FROM sqlite_schema; -} {{CREATE TABLE t1(x)} {CREATE TABLE t2(x)}} - -finish_test - DELETED test/wal2rewrite.test Index: test/wal2rewrite.test ================================================================== --- test/wal2rewrite.test +++ /dev/null @@ -1,92 +0,0 @@ -# 2017 September 19 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2rewrite -ifcapable !wal {finish_test ; return } - -proc filesize {filename} { - if {[file exists $filename]} { - return [file size $filename] - } - return 0 -} - -foreach {tn jrnlmode} { - 1 wal - 2 wal2 -} { - reset_db - execsql "PRAGMA journal_mode = $jrnlmode" - do_execsql_test $tn.1 { - PRAGMA journal_size_limit = 10000; - PRAGMA cache_size = 5; - PRAGMA wal_autocheckpoint = 10; - - CREATE TABLE t1(a INTEGER PRIMARY KEY, b INTEGER, c BLOB); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - - WITH s(i) AS ( - SELECT 1 UNION SELECT i+1 FROM s WHERE i<10 - ) - INSERT INTO t1 SELECT i, i, randomblob(800) FROM s; - } {10000 10} - - for {set i 0} {$i < 4} {incr i} { - do_execsql_test $tn.$i.1 { - UPDATE t1 SET c=randomblob(800) WHERE (b%10)==5 AND ($i%2) - } - do_execsql_test $tn.$i.2 { - BEGIN; - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - UPDATE t1 SET b=b+10, c=randomblob(800); - } - execsql COMMIT - - do_test $tn.$i.3 { expr [filesize test.db-wal] < 100000 } 1 - do_test $tn.$i.4 { expr [filesize test.db-wal2] < 100000 } 1 - - set sum [db eval {SELECT sum(b), md5sum(c) FROM t1}] - - do_test $tn.$i.5 { - foreach f [glob -nocomplain test.db2*] {forcedelete $f} - foreach f [glob -nocomplain test.db*] { - forcecopy $f [string map {test.db test.db2} $f] - } - - sqlite3 db2 test.db2 - db2 eval {SELECT sum(b), md5sum(c) FROM t1} - } $sum - db2 close - } -} - - - -finish_test DELETED test/wal2rollback.test Index: test/wal2rollback.test ================================================================== --- test/wal2rollback.test +++ /dev/null @@ -1,62 +0,0 @@ -# 2017 September 19 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2rollback -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - CREATE TABLE t2(a, b, c); - CREATE INDEX i1 ON t1(a); - CREATE INDEX i2 ON t1(b); - PRAGMA journal_mode = wal2; - PRAGMA cache_size = 5; - PRAGMA journal_size_limit = 10000; - WITH s(i) AS ( - SELECT 1 UNION ALL SELECT i+1 FROM s LIMIT 1000 - ) - INSERT INTO t1 SELECT i, i, randomblob(200) FROM s; -} {wal2 10000} - -do_test 1.1 { - expr [file size test.db-wal] > 10000 -} 1 - -do_test 1.2 { - execsql { - BEGIN; - UPDATE t1 SET b=b+1; - INSERT INTO t2 VALUES(1,2,3); - } - expr [file size test.db-wal2] > 10000 -} {1} - -breakpoint -do_execsql_test 1.3 { - ROLLBACK; - SELECT * FROM t2; - SELECT count(*) FROM t1 WHERE a=b; - PRAGMA integrity_check; -} {1000 ok} - - - -finish_test DELETED test/wal2savepoint.test Index: test/wal2savepoint.test ================================================================== --- test/wal2savepoint.test +++ /dev/null @@ -1,74 +0,0 @@ -# 2018 December 13 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2savepoint -ifcapable !wal {finish_test ; return } - -do_execsql_test 1.0 { - CREATE TABLE t1(a, b, c); - CREATE INDEX t1a ON t1(a); - CREATE INDEX t1b ON t1(b); - CREATE INDEX t1c ON t1(c); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 15000; - PRAGMA wal_autocheckpoint = 0; - PRAGMA cache_size = 5; -} {wal2 15000 0} - -do_execsql_test 1.1 { - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 200) - INSERT INTO t1 SELECT random(), random(), random() FROM s; -} {} - -do_test 1.2 { - list [file size test.db] [file size test.db-wal2] \ - [expr [file size test.db-wal]>20000] -} {5120 0 1} - -do_execsql_test 1.3 { - BEGIN; - SAVEPOINT abc; - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 100) - INSERT INTO t1 SELECT random(), random(), random() FROM s; - ROLLBACK TO abc; - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 10) - INSERT INTO t1 SELECT random(), random(), random() FROM s; - COMMIT; - SELECT count(*) FROM t1; - PRAGMA integrity_check; -} {210 ok} - -do_execsql_test 1.4 { - BEGIN; - SAVEPOINT abc; - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 100) - INSERT INTO t1 SELECT random(), random(), random() FROM s; - ROLLBACK TO abc; - WITH s(i) AS ( SELECT 1 UNION ALL SELECT i+1 FROM s where i < 10) - INSERT INTO t1 SELECT random(), random(), random() FROM s; - COMMIT; - SELECT count(*) FROM t1; - PRAGMA integrity_check; -} {220 ok} - - -finish_test - DELETED test/wal2simple.test Index: test/wal2simple.test ================================================================== --- test/wal2simple.test +++ /dev/null @@ -1,475 +0,0 @@ -# 2017 September 19 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -source $testdir/lock_common.tcl -source $testdir/malloc_common.tcl -source $testdir/wal_common.tcl - -set testprefix wal2simple -ifcapable !wal {finish_test ; return } - -#------------------------------------------------------------------------- -# The following tests verify that a client can switch in and out of wal -# and wal2 mode. But that it is not possible to change directly from wal -# to wal2, or from wal2 to wal mode. -# -do_execsql_test 1.1.0 { - PRAGMA journal_mode = wal2 -} {wal2} -execsql { SELECT * FROM sqlite_master} -do_execsql_test 1.x { - PRAGMA journal_mode; - PRAGMA main.journal_mode; -} {wal2 wal2} -db close -do_test 1.1.1 { file size test.db } {1024} -do_test 1.1.2 { hexio_read test.db 18 2 } 0303 - -sqlite3 db test.db -do_execsql_test 1.2.0 { - SELECT * FROM sqlite_master; - PRAGMA journal_mode = delete; -} {delete} -db close -do_test 1.2.1 { file size test.db } {1024} -do_test 1.2.2 { hexio_read test.db 18 2 } 0101 - -sqlite3 db test.db -do_execsql_test 1.3.0 { - SELECT * FROM sqlite_master; - PRAGMA journal_mode = wal; -} {wal} -db close -do_test 1.3.1 { file size test.db } {1024} -do_test 1.3.2 { hexio_read test.db 18 2 } 0202 - -sqlite3 db test.db -do_catchsql_test 1.4.0 { - PRAGMA journal_mode = wal2; -} {1 {cannot change from wal to wal2 mode}} -do_execsql_test 1.4.1 { - PRAGMA journal_mode = wal; - PRAGMA journal_mode = delete; - PRAGMA journal_mode = wal2; - PRAGMA journal_mode = wal2; -} {wal delete wal2 wal2} -do_catchsql_test 1.4.2 { - PRAGMA journal_mode = wal; -} {1 {cannot change from wal2 to wal mode}} -db close -do_test 1.4.3 { hexio_read test.db 18 2 } 0303 - -#------------------------------------------------------------------------- -# Test that recovery in wal2 mode works. -# -forcedelete test.db test.db-wal test.db-wal2 -reset_db -do_execsql_test 2.0 { - CREATE TABLE t1(a INTEGER PRIMARY KEY, b); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 5000; -} {wal2 5000} - -proc wal_hook {DB nm nFrame} { $DB eval { PRAGMA wal_checkpoint } } -db wal_hook {wal_hook db} - -for {set i 1} {$i <= 200} {incr i} { - execsql { INSERT INTO t1 VALUES(NULL, randomblob(100)) } - set res [db eval { SELECT sum(a), md5sum(b) FROM t1 }] - - do_test 2.1.$i { - foreach f [glob -nocomplain test.db2*] { forcedelete $f } - forcecopy test.db test.db2 - forcecopy test.db-wal test.db2-wal - forcecopy test.db-wal2 test.db2-wal2 - - sqlite3 db2 test.db2 - db2 eval { SELECT sum(a), md5sum(b) FROM t1 } - } $res - - db2 close -} - -#------------------------------------------------------------------------- - -reset_db -do_execsql_test 3.0 { - CREATE TABLE t1(x BLOB, y INTEGER PRIMARY KEY); - CREATE INDEX i1 ON t1(x); - PRAGMA cache_size = 5; - PRAGMA journal_mode = wal2; -} {wal2} - -do_test 3.1 { - execsql BEGIN - for {set i 1} {$i < 1000} {incr i} { - execsql { INSERT INTO t1 VALUES(randomblob(800), $i) } - } - execsql COMMIT -} {} - -do_execsql_test 3.2 { - PRAGMA integrity_check; -} {ok} - -#------------------------------------------------------------------------- -catch { db close } -foreach f [glob -nocomplain test.db*] { forcedelete $f } -reset_db -do_execsql_test 4.0 { - CREATE TABLE t1(x, y); - PRAGMA journal_mode = wal2; -} {wal2} - -do_execsql_test 4.1 { - SELECT * FROM t1; -} {} - -do_execsql_test 4.2 { - INSERT INTO t1 VALUES(1, 2); -} {} - -do_execsql_test 4.3 { - SELECT * FROM t1; -} {1 2} - -do_test 4.4 { - sqlite3 db2 test.db - execsql { SELECT * FROM t1 } db2 -} {1 2} - -do_test 4.5 { - lsort [glob test.db*] -} {test.db test.db-shm test.db-wal test.db-wal2} - -do_test 4.6 { - db close - db2 close - sqlite3 db test.db - execsql { SELECT * FROM t1 } -} {1 2} - -do_execsql_test 4.7 { - PRAGMA journal_size_limit = 4000; - INSERT INTO t1 VALUES(3, 4); - INSERT INTO t1 VALUES(5, 6); - INSERT INTO t1 VALUES(7, 8); - INSERT INTO t1 VALUES(9, 10); - INSERT INTO t1 VALUES(11, 12); - INSERT INTO t1 VALUES(13, 14); - INSERT INTO t1 VALUES(15, 16); - INSERT INTO t1 VALUES(17, 18); - SELECT * FROM t1; -} {4000 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18} - -do_test 4.8 { - sqlite3 db2 test.db - execsql { SELECT * FROM t1 } db2 -} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18} - -do_test 4.9 { - db close - db2 close - lsort [glob test.db*] -} {test.db} - -#------------------------------------------------------------------------- -reset_db -do_execsql_test 5.0 { - CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); - CREATE INDEX i1 ON t1(b, c); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 4000; -} {wal2 4000} - -proc wal_hook {DB nm nFrame} { - $DB eval { PRAGMA wal_checkpoint } -} -db wal_hook [list wal_hook db] - - -foreach js {4000 8000 12000} { - foreach NROW [list 100 200 300 400 500 600 1000] { - do_test 5.$js.$NROW.1 { - db eval "DELETE FROM t1" - db eval "PRAGMA journal_size_limit = $js" - set nTotal 0 - for {set i 0} {$i < $NROW} {incr i} { - db eval { INSERT INTO t1 VALUES($i, $i, randomblob(abs(random()%50))) } - incr nTotal $i - } - set {} {} - } {} - - do_test 5.$js.$NROW.2 { - sqlite3 db2 test.db - db2 eval { - PRAGMA integrity_check; - SELECT count(*), sum(b) FROM t1; - } - } [list ok $NROW $nTotal] - - db2 close - } -} - - -#------------------------------------------------------------------------- -reset_db -do_execsql_test 6.0 { - CREATE TABLE tx(x); - PRAGMA journal_mode = wal2; - PRAGMA journal_size_limit = 3500; -} {wal2 3500} - -do_test 6.1 { - for {set i 0} {$i < 10} {incr i} { - execsql "CREATE TABLE t$i (x);" - } -} {} - -do_test 6.2.1 { - foreach f [glob -nocomplain test.db2*] { forcedelete $f } - forcecopy test.db-wal2 test.db2-wal2 - sqlite3 db2 test.db2 - db2 eval { SELECT * FROM sqlite_master } -} {} -do_test 6.2.2 { - db2 eval { - PRAGMA journal_mode = wal2; - SELECT * FROM sqlite_master; - } -} {wal2} - -do_test 6.3.1 { - db2 close - foreach f [glob -nocomplain test.db2*] { forcedelete $f } - forcecopy test.db-wal2 test.db2-wal2 - forcecopy test.db test.db2 - sqlite3 db2 test.db2 - db2 eval { SELECT * FROM sqlite_master } -} {table tx tx 2 {CREATE TABLE tx(x)}} -do_test 6.3.2 { - db2 eval { - PRAGMA journal_mode = wal2; - SELECT * FROM sqlite_master; - } -} {wal2 table tx tx 2 {CREATE TABLE tx(x)}} - -do_test 6.4.1 { - db2 close - foreach f [glob -nocomplain test.db2*] { forcedelete $f } - forcecopy test.db-wal2 test.db2-wal2 - forcecopy test.db-wal test.db2-wal - sqlite3 db2 test.db2 - db2 eval { SELECT * FROM sqlite_master } -} {} -do_test 6.4.2 { - db2 eval { - PRAGMA journal_mode = wal2; - SELECT * FROM sqlite_master; - } -} {wal2} -db2 close - -#------------------------------------------------------------------------- -reset_db -sqlite3 db2 test.db -do_execsql_test 7.0 { - PRAGMA journal_size_limit = 10000; - PRAGMA journal_mode = wal2; - PRAGMA wal_autocheckpoint = 0; - BEGIN; - CREATE TABLE t1(a); - INSERT INTO t1 VALUES( randomblob(8000) ); - COMMIT; -} {10000 wal2 0} - -do_test 7.1 { - list [file size test.db-wal] [file size test.db-wal2] -} {9464 0} - -# Connection db2 is holding a PART1 lock. -# -# 7.2.2: Test that the PART1 does not prevent db from switching to the -# other wal file. -# -# 7.2.3: Test that the PART1 does prevent a checkpoint of test.db-wal. -# -# 7.2.4: Test that after the PART1 is released the checkpoint is possible. -# -do_test 7.2.1 { - execsql { - BEGIN; - SELECT count(*) FROM t1; - } db2 -} {1} -do_test 7.2.2 { - execsql { - INSERT INTO t1 VALUES( randomblob(800) ); - INSERT INTO t1 VALUES( randomblob(800) ); - } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {13656 3176 1024} -do_test 7.2.3 { - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {13656 3176 1024} -do_test 7.2.4 { - execsql { END } db2 - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {13656 3176 11264} - -# Connection db2 is holding a PART2_FULL1 lock. -# -# 7.3.2: Test that the lock does not prevent checkpointing. -# -# 7.3.3: Test that the lock does prevent the writer from overwriting -# test.db-wal. -# -# 7.3.4: Test that after the PART2_FULL1 is released the writer can -# switch wal files and overwrite test.db-wal -# -db close -db2 close -sqlite3 db test.db -sqlite3 db2 test.db -do_test 7.3.1 { - execsql { - PRAGMA wal_autocheckpoint = 0; - PRAGMA journal_size_limit = 10000; - INSERT INTO t1 VALUES(randomblob(10000)); - INSERT INTO t1 VALUES(randomblob(500)); - } - execsql { - BEGIN; - SELECT count(*) FROM t1; - } db2 - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 3176 12288} -do_test 7.3.2 { - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 3176 22528} -do_test 7.3.3 { - execsql { - INSERT INTO t1 VALUES(randomblob(10000)); - INSERT INTO t1 VALUES(randomblob(500)); - } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 18896 22528} -do_test 7.3.4 { - execsql END db2 - execsql { INSERT INTO t1 VALUES(randomblob(5000)); } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 18896 22528} - -# Connection db2 is holding a PART2 lock. -# -# 7.4.2: Test that the lock does not prevent writer switching to test.db-wal. -# -# 7.3.3: Test that the lock does prevent checkpointing of test.db-wal2. -# -# 7.3.4: Test that after the PART2 is released test.db-wal2 can be -# checkpointed. -# -db close -db2 close -breakpoint -sqlite3 db test.db -sqlite3 db2 test.db -do_test 7.4.1 { - execsql { - PRAGMA wal_autocheckpoint = 0; - PRAGMA journal_size_limit = 10000; - INSERT INTO t1 VALUES(randomblob(10000)); - INSERT INTO t1 VALUES(randomblob(10000)); - PRAGMA wal_checkpoint; - } - execsql { - BEGIN; - SELECT count(*) FROM t1; - } db2 - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 50176} -do_test 7.4.2 { - execsql { - INSERT INTO t1 VALUES(randomblob(5000)); - } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 50176} -do_test 7.4.3 { - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 50176} -do_test 7.4.4 { - execsql END db2 - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 60416} - -# Connection db2 is holding a PART1_FULL2 lock. -# -# 7.5.2: Test that the lock does not prevent a checkpoint of test.db-wal2. -# -# 7.5.3: Test that the lock does prevent the writer from overwriting -# test.db-wal2. -# -# 7.5.4: Test that after the PART1_FULL2 lock is released, the writer -# can switch to test.db-wal2. -# -db close -db2 close -sqlite3 db test.db -sqlite3 db2 test.db -do_test 7.5.1 { - execsql { - PRAGMA wal_autocheckpoint = 0; - PRAGMA journal_size_limit = 10000; - INSERT INTO t1 VALUES(randomblob(10000)); - INSERT INTO t1 VALUES(randomblob(10000)); - PRAGMA wal_checkpoint; - INSERT INTO t1 VALUES(randomblob(5000)); - } - execsql { - BEGIN; - SELECT count(*) FROM t1; - } db2 - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 76800} -do_test 7.5.2 { - execsql { PRAGMA wal_checkpoint } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {12608 12608 87040} -do_test 7.5.3.1 { - execsql { INSERT INTO t1 VALUES(randomblob(5000)) } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {14704 12608 87040} -do_test 7.5.3.2 { - execsql { INSERT INTO t1 VALUES(randomblob(5000)) } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {22040 12608 87040} -do_test 7.5.4 { - execsql END db2 - execsql { INSERT INTO t1 VALUES(randomblob(5000)) } - list [file size test.db-wal] [file size test.db-wal2] [file size test.db] -} {22040 12608 87040} - - -finish_test - DELETED test/wal2snapshot.test Index: test/wal2snapshot.test ================================================================== --- test/wal2snapshot.test +++ /dev/null @@ -1,94 +0,0 @@ -# 2018 December 5 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this file is testing the operation of the library in -# "PRAGMA journal_mode=WAL2" mode. -# - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -set testprefix wal2snapshot -ifcapable !wal {finish_test ; return } -ifcapable !snapshot {finish_test; return} - -foreach {tn mode} {1 wal 2 wal2} { - reset_db - do_execsql_test $tn.1 "PRAGMA journal_mode = $mode" $mode - - do_execsql_test $tn.2 { - CREATE TABLE t1(a, b); - INSERT INTO t1 VALUES(1, 2); - INSERT INTO t1 VALUES(3, 4); - BEGIN; - } - - # Check that sqlite3_snapshot_get() is an error for a wal2 db. - # - if {$tn==1} { - do_test 1.3 { - set S [sqlite3_snapshot_get db main] - sqlite3_snapshot_free $S - } {} - } else { - do_test 2.3 { - list [catch { sqlite3_snapshot_get db main } msg] $msg - } {1 SQLITE_ERROR} - } - - # Check that sqlite3_snapshot_recover() is an error for a wal2 db. - # - do_execsql_test $tn.4 COMMIT - if {$tn==1} { - do_test 1.5 { - sqlite3_snapshot_recover db main - } {} - } else { - do_test 2.5 { - list [catch { sqlite3_snapshot_recover db main } msg] $msg - } {1 SQLITE_ERROR} - } - - # Check that sqlite3_snapshot_open() is an error for a wal2 db. - # - if {$tn==1} { - do_test 1.6 { - execsql BEGIN - set SNAPSHOT [sqlite3_snapshot_get_blob db main] - sqlite3_snapshot_open_blob db main $SNAPSHOT - execsql COMMIT - } {} - } else { - - do_test 2.6.1 { - execsql BEGIN - set res [ - list [catch { sqlite3_snapshot_open_blob db main $SNAPSHOT } msg] $msg - ] - execsql COMMIT - set res - } {1 SQLITE_ERROR} - do_test 2.6.2 { - execsql BEGIN - execsql {SELECT * FROM sqlite_master} - set res [ - list [catch { sqlite3_snapshot_open_blob db main $SNAPSHOT } msg] $msg - ] - execsql COMMIT - set res - } {1 SQLITE_ERROR} - } -} - - -finish_test - - Index: test/wal_common.tcl ================================================================== --- test/wal_common.tcl +++ test/wal_common.tcl @@ -88,46 +88,6 @@ wal_cksum_intlist c1 c2 [lrange $hdr 0 9] lset hdr 10 $c1 lset hdr 11 $c2 } -# This command assumes that $file is the name of a database file opened -# in wal mode using a [testvfs] VFS. It returns a list of the 12 32-bit -# integers that make up the wal-index-header for the named file. -# -proc set_tvfs_hdr {file args} { - - # Set $nHdr to the number of bytes in the wal-index header: - set nHdr 48 - set nInt [expr {$nHdr/4}] - - if {[llength $args]>2} { - error {wrong # args: should be "set_tvfs_hdr fileName ?val1? ?val2?"} - } - - set blob [tvfs shm $file] - if {$::tcl_platform(byteOrder)=="bigEndian"} {set fmt I} {set fmt i} - - if {[llength $args]} { - set ia [lindex $args 0] - set ib $ia - if {[llength $args]==2} { - set ib [lindex $args 1] - } - binary scan $blob a[expr $nHdr*2]a* dummy tail - set blob [binary format ${fmt}${nInt}${fmt}${nInt}a* $ia $ib $tail] - tvfs shm $file $blob - } - - binary scan $blob ${fmt}${nInt} ints - return $ints -} - -proc incr_tvfs_hdr {file idx incrval} { - set ints [set_tvfs_hdr $file] - set v [lindex $ints $idx] - incr v $incrval - lset ints $idx $v - set_tvfs_hdr $file $ints -} - Index: test/walprotocol2.test ================================================================== --- test/walprotocol2.test +++ test/walprotocol2.test @@ -83,11 +83,11 @@ if {$lock=="0 1 lock exclusive"} { proc lock_callback {method filename handle lock} {} db2 eval { INSERT INTO x VALUES('x') } } } -db timeout 1100 +db timeout 10 do_catchsql_test 2.4 { BEGIN EXCLUSIVE; } {0 {}} do_execsql_test 2.5 { SELECT * FROM x; Index: test/walrofault.test ================================================================== --- test/walrofault.test +++ test/walrofault.test @@ -52,7 +52,9 @@ } -body { execsql { SELECT * FROM t1 } } -test { faultsim_test_result {0 {hello world ! world hello}} } + + finish_test Index: test/wapptest.tcl ================================================================== --- test/wapptest.tcl +++ test/wapptest.tcl @@ -474,11 +474,11 @@ # Build the "test" select widget. set lOpt [list Normal Veryquick Smoketest Build-Only] generate_select_widget Test control_test $lOpt $G(test) # Build the "jobs" select widget. Options are 1 to 8. - generate_select_widget Jobs control_jobs {1 2 3 4 5 6 7 8} $G(jobs) + generate_select_widget Jobs control_jobs {1 2 3 4 5 6 7 8 12 16} $G(jobs) switch $G(state) { config { set txt "Run Tests!" set id control_run Index: test/where.test ================================================================== --- test/where.test +++ test/where.test @@ -1614,7 +1614,24 @@ } { 1 1 15 999 19 5 } + +# 2022-12-07 Yong Heng [https://sqlite.org/forum/forumpost/dfe8084751] +# +ifcapable vtab { + do_execsql_test where-29.1 { + SELECT DISTINCT 'xyz' FROM pragma_cache_size + WHERE rowid OR abs(0) + ORDER BY + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1; + } {xyz} +} finish_test ADDED test/widetab1.test Index: test/widetab1.test ================================================================== --- /dev/null +++ test/widetab1.test @@ -0,0 +1,156 @@ +# 2022-10-24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements test cases for wide table (tables with more than +# 64 columns) and indexes that reference columns beyond the 63rd or 64th +# column. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix widetab1 + + +# In order to pick the better index in the following query, SQLite needs to +# be able to detect when an index that references later columns in a wide +# table is a covering index. +# +do_execsql_test 100 { + CREATE TABLE a( + a00, a01, a02, a03, a04, a05, a06, a07, a08, a09, + a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, + a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, + a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, + a40, a41, a42, a43, a44, a45, a46, a47, a48, a49, + a50, a51, a52, a53, a54, a55, a56, a57, a58, a59, + pd, bn, vb, bc, cn, ie, qm); + CREATE INDEX a1 on a(pd, bn, vb, bc, cn); -- preferred index + CREATE INDEX a2 on a(pd, bc, ie, qm); -- suboptimal index + CREATE TABLE b(bg, bc, bn, iv, ln, mg); + CREATE INDEX b1 on b(bn, iv, bg); +} +do_eqp_test 110 { + SELECT dc, count(cn) + FROM (SELECT coalesce(b.bg, a.bc) as dc, cn + FROM a LEFT JOIN b + ON a.bn = b.bn + AND CASE WHEN a.vb IS NOT NULL THEN 1 ELSE 0 END = b.iv + WHERE pd BETWEEN 0 AND 10) + GROUP BY dc; +} { + QUERY PLAN + |--SEARCH a USING COVERING INDEX a1 (pd>? AND pd IF DEFINED __ECHO SET OVERWRITE=^^^> Index: tool/build-all-msvc.bat ================================================================== --- tool/build-all-msvc.bat +++ tool/build-all-msvc.bat @@ -127,10 +127,12 @@ REM SET __ECHO=ECHO REM SET __ECHO2=ECHO REM SET __ECHO3=ECHO IF NOT DEFINED _AECHO (SET _AECHO=REM) IF NOT DEFINED _CECHO (SET _CECHO=REM) +IF NOT DEFINED _CECHO2 (SET _CECHO2=REM) +IF NOT DEFINED _CECHO3 (SET _CECHO3=REM) IF NOT DEFINED _VECHO (SET _VECHO=REM) SET REDIRECT=^> IF DEFINED __ECHO SET REDIRECT=^^^> @@ -175,10 +177,11 @@ REM REM NOTE: Change the current directory to the root of the source tree, saving REM the current directory on the directory stack. REM +%_CECHO2% PUSHD "%ROOT%" %__ECHO2% PUSHD "%ROOT%" IF ERRORLEVEL 1 ( ECHO Could not change directory to "%ROOT%". GOTO errors @@ -522,10 +525,11 @@ REM "%ComSpec%" /C ( REM REM NOTE: Attempt to setup the MSVC environment for this platform. REM + %_CECHO3% CALL "%VCVARSALL%" %%P %__ECHO3% CALL "%VCVARSALL%" %%P IF ERRORLEVEL 1 ( ECHO Failed to call "%VCVARSALL%" for platform %%P. GOTO errors @@ -747,10 +751,11 @@ ) REM REM NOTE: Restore the saved current directory from the directory stack. REM +%_CECHO2% POPD %__ECHO2% POPD IF ERRORLEVEL 1 ( ECHO Could not restore directory. GOTO errors Index: tool/mkctimec.tcl ================================================================== --- tool/mkctimec.tcl +++ tool/mkctimec.tcl @@ -41,11 +41,11 @@ /* ** Include the configuration header output by 'configure' if we're using the ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -#include \"config.h\" +#include \"sqlite_cfg.h\" #define SQLITECONFIG_H 1 #endif /* These macros are provided to \"stringify\" the value of the define ** for those options in which the value is meaningful. */ @@ -302,10 +302,11 @@ SQLITE_DEFAULT_SECTOR_SIZE SQLITE_DEFAULT_SYNCHRONOUS SQLITE_DEFAULT_WAL_AUTOCHECKPOINT SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_WORKER_THREADS + SQLITE_DQS SQLITE_ENABLE_8_3_NAMES SQLITE_ENABLE_CEROD SQLITE_ENABLE_LOCKING_STYLE SQLITE_EXTRA_INIT SQLITE_EXTRA_SHUTDOWN Index: tool/mkpragmatab.tcl ================================================================== --- tool/mkpragmatab.tcl +++ tool/mkpragmatab.tcl @@ -10,11 +10,11 @@ # new pragma in ../src/pragma.c. # # Flag meanings: set flagMeaning(NeedSchema) {Force schema load before running} -set flagMeaning(ReadOnly) {Read-only HEADER_VALUE} +set flagMeaning(OneSchema) {Only a single schema required} set flagMeaning(Result0) {Acts as query when no argument} set flagMeaning(Result1) {Acts as query when has one argument} set flagMeaning(SchemaReq) {Schema required - "main" is default} set flagMeaning(SchemaOpt) {Schema restricts name search if present} set flagMeaning(NoColumns) {OP_ResultRow called with zero columns} @@ -105,16 +105,10 @@ TYPE: FLAG ARG: SQLITE_VdbeEQP IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS) IF: defined(SQLITE_DEBUG) - NAME: noop_update - TYPE: FLAG - ARG: SQLITE_NoopUpdate - IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS) - IF: defined(SQLITE_ENABLE_NOOP_UPDATE) - NAME: ignore_check_constraints TYPE: FLAG ARG: SQLITE_IgnoreChecks IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS) IF: !defined(SQLITE_OMIT_CHECK) @@ -154,11 +148,11 @@ NAME: cell_size_check TYPE: FLAG ARG: SQLITE_CellSizeCk NAME: default_cache_size - FLAG: NeedSchema Result0 SchemaReq NoColumns1 + FLAG: NeedSchema Result0 SchemaReq NoColumns1 OneSchema COLS: cache_size IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) NAME: page_size FLAG: Result0 SchemaReq NoColumns1 @@ -167,43 +161,43 @@ NAME: secure_delete FLAG: Result0 IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: page_count - FLAG: NeedSchema Result0 SchemaReq + FLAG: NeedSchema Result0 SchemaReq OneSchema IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: max_page_count TYPE: PAGE_COUNT - FLAG: NeedSchema Result0 SchemaReq + FLAG: NeedSchema Result0 SchemaReq OneSchema IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: locking_mode FLAG: Result0 SchemaReq IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: journal_mode - FLAG: NeedSchema Result0 SchemaReq + FLAG: NeedSchema Result0 SchemaReq OneSchema IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: journal_size_limit FLAG: Result0 SchemaReq IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: cache_size - FLAG: NeedSchema Result0 SchemaReq NoColumns1 + FLAG: NeedSchema Result0 SchemaReq NoColumns1 OneSchema IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: mmap_size IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: auto_vacuum - FLAG: NeedSchema Result0 SchemaReq NoColumns1 + FLAG: NeedSchema Result0 SchemaReq NoColumns1 OneSchema IF: !defined(SQLITE_OMIT_AUTOVACUUM) NAME: incremental_vacuum - FLAG: NeedSchema NoColumns + FLAG: NeedSchema NoColumns OneSchema IF: !defined(SQLITE_OMIT_AUTOVACUUM) NAME: temp_store FLAG: Result0 NoColumns1 IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -219,11 +213,11 @@ NAME: lock_proxy_file FLAG: NoColumns1 IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE NAME: synchronous - FLAG: NeedSchema Result0 SchemaReq NoColumns1 + FLAG: NeedSchema Result0 SchemaReq NoColumns1 OneSchema IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) NAME: table_info FLAG: NeedSchema Result1 SchemaOpt ARG: 0 @@ -242,11 +236,11 @@ FLAG: NeedSchema Result1 COLS: schema name type ncol wr strict IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) NAME: stats - FLAG: NeedSchema Result0 SchemaReq + FLAG: NeedSchema Result0 SchemaReq OneSchema COLS: tbl idx wdth hght flgs IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) && defined(SQLITE_DEBUG) NAME: index_info TYPE: INDEX_INFO @@ -294,11 +288,11 @@ FLAG: Result0 COLS: seq name IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) NAME: foreign_key_list - FLAG: NeedSchema Result1 SchemaOpt + FLAG: NeedSchema Result1 SchemaOpt OneSchema COLS: id seq table from to on_update on_delete match IF: !defined(SQLITE_OMIT_FOREIGN_KEY) NAME: foreign_key_check FLAG: NeedSchema Result0 Result1 SchemaOpt @@ -340,18 +334,18 @@ FLAG: NoColumns1 Result0 IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) NAME: data_version TYPE: HEADER_VALUE - ARG: BTREE_DATA_VERSION - FLAG: ReadOnly Result0 + ARG: BTREE_DATA_VERSION|PRAGMA_HEADER_VALUE_READONLY + FLAG: Result0 IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) NAME: freelist_count TYPE: HEADER_VALUE - ARG: BTREE_FREE_PAGE_COUNT - FLAG: ReadOnly Result0 + ARG: BTREE_FREE_PAGE_COUNT|PRAGMA_HEADER_VALUE_READONLY + FLAG: Result0 IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) NAME: application_id TYPE: HEADER_VALUE ARG: BTREE_APPLICATION_ID @@ -361,11 +355,11 @@ NAME: compile_options FLAG: Result0 IF: !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) NAME: wal_checkpoint - FLAG: NeedSchema + FLAG: NeedSchema OneSchema COLS: busy log checkpointed IF: !defined(SQLITE_OMIT_WAL) NAME: wal_autocheckpoint IF: !defined(SQLITE_OMIT_WAL) @@ -519,10 +513,16 @@ foreach f [lsort [array names allflags]] { puts $fd [format {#define PragFlg_%-10s 0x%02x /* %s */} \ $f $fv $flagMeaning($f)] set fv [expr {$fv*2}] } + +puts $fd "\n/* For PragTyp_HEADER_VALUE pragmas the Pragma.iArg value is set" +puts $fd "** to the index of the header field to access (always 10 or less)." +puts $fd "** Ored with HEADER_VALUE_READONLY if the field is read only. */" +puts $fd "#define PRAGMA_HEADER_VALUE_READONLY 0x0100" +puts $fd "#define PRAGMA_HEADER_VALUE_MASK 0x00FF\n" # Sort the column lists so that longer column lists occur first # proc colscmp {a b} { return [expr {[llength $b] - [llength $a]}] Index: tool/mkshellc.tcl ================================================================== --- tool/mkshellc.tcl +++ tool/mkshellc.tcl @@ -32,15 +32,15 @@ */} set in [open $topdir/src/shell.c.in] fconfigure $in -translation binary proc omit_redundant_typedefs {line} { global typedef_seen - if {[regexp {^typedef .*;} $line]} { - if {[info exists typedef_seen($line)]} { - return "/* $line */" + if {[regexp {^typedef .*\y([a-zA-Z0-9_]+);} $line all typename]} { + if {[info exists typedef_seen($typename)]} { + return "/* [string map {/* // */ //} $line] */" } - set typedef_seen($line) 1 + set typedef_seen($typename) 1 } return $line } set iLine 0 while {1} { Index: tool/mksqlite3c.tcl ================================================================== --- tool/mksqlite3c.tcl +++ tool/mksqlite3c.tcl @@ -353,10 +353,11 @@ utf.c util.c hash.c opcodes.c + os_kv.c os_unix.c os_win.c memdb.c bitvec.c Index: tool/speed-check.sh ================================================================== --- tool/speed-check.sh +++ tool/speed-check.sh @@ -1,13 +1,13 @@ #!/bin/bash # # This is a template for a script used for day-to-day size and # performance monitoring of SQLite. Typical usage: # -# sh run-speed-test.sh trunk # Baseline measurement of trunk -# sh run-speed-test.sh x1 # Measure some experimental change -# fossil test-diff --tk cout-trunk.txt cout-x1.txt # View chanages +# sh speed-check.sh trunk # Baseline measurement of trunk +# sh speed-check.sh x1 # Measure some experimental change +# fossil xdiff --tk cout-trunk.txt cout-x1.txt # View chanages # # There are multiple output files, all with a base name given by # the first argument: # # summary-$BASE.txt # Copy of standard output ADDED tool/stripccomments.c Index: tool/stripccomments.c ================================================================== --- /dev/null +++ tool/stripccomments.c @@ -0,0 +1,228 @@ +/** + Strips C- and C++-style comments from stdin, sending the results to + stdout. It assumes that its input is legal C-like code, and does + only little error handling. + + It treats string literals as anything starting and ending with + matching double OR single quotes OR backticks (for use with + scripting languages which use those). It assumes that a quote + character within a string which uses the same quote type is escaped + by a backslash. It should not be used on any code which might + contain C/C++ comments inside heredocs, and similar constructs, as + it will strip those out. + + Usage: $0 [--keep-first|-k] < input > output + + The --keep-first (-k) flag tells it to retain the first comment in the + input stream (which is often a license or attribution block). It + may be given repeatedly, each one incrementing the number of + retained comments by one. + + License: Public Domain + Author: Stephan Beal (stephan@wanderinghorse.net) +*/ +#include +#include +#include + +#if 1 +#define MARKER(pfexp) \ + do{ printf("MARKER: %s:%d:\t",__FILE__,__LINE__); \ + printf pfexp; \ + } while(0) +#else +#define MARKER(exp) if(0) printf +#endif + +struct { + FILE * input; + FILE * output; + int rc; + int keepFirst; +} App = { + 0/*input*/, + 0/*output*/, + 0/*rc*/, + 0/*keepFirst*/ +}; + +void do_it_all(void){ + enum states { + S_NONE = 0 /* not in comment */, + S_SLASH1 = 1 /* slash - possibly comment prefix */, + S_CPP = 2 /* in C++ comment */, + S_C = 3 /* in C comment */ + }; + int ch, prev = EOF; + FILE * out = App.output; + int const slash = '/'; + int const star = '*'; + int line = 1; + int col = 0; + enum states state = S_NONE /* current state */; + int elide = 0 /* true if currently eliding output */; + int state3Col = -99 + /* huge kludge for odd corner case: */ + /*/ <--- here. state3Col marks the source column in which a C-style + comment starts, so that it can tell if star-slash inside a + C-style comment is the end of the comment or is the weird corner + case marked at the start of _this_ comment block. */; + for( ; EOF != (ch = fgetc(App.input)); prev = ch, + ++col){ + switch(state){ + case S_NONE: + if('\''==ch || '"'==ch || '`'==ch){ + /* Read string literal... + needed to properly catch comments in strings. */ + int const quote = ch, + startLine = line, startCol = col; + int ch2, escaped = 0, endOfString = 0; + fputc(ch, out); + for( ++col; !endOfString && EOF != (ch2 = fgetc(App.input)); + ++col ){ + switch(ch2){ + case '\\': escaped = !escaped; + break; + case '`': + case '\'': + case '"': + if(!escaped && quote == ch2) endOfString = 1; + escaped = 0; + break; + default: + escaped = 0; + break; + } + if('\n'==ch2){ + ++line; + col = 0; + } + fputc(ch2, out); + } + if(EOF == ch2){ + fprintf(stderr, "Unexpected EOF while reading %s literal " + "on line %d column %d.\n", + ('\''==ch) ? "char" : "string", + startLine, startCol); + App.rc = 1; + return; + } + break; + } + else if(slash == ch){ + /* MARKER(("state 0 ==> 1 @ %d:%d\n", line, col)); */ + state = S_SLASH1; + break; + } + fputc(ch, out); + break; + case S_SLASH1: /* 1 slash */ + /* MARKER(("SLASH1 @ %d:%d App.keepFirst=%d\n", + line, col, App.keepFirst)); */ + switch(ch){ + case '*': + /* Enter C comment */ + if(App.keepFirst>0){ + elide = 0; + --App.keepFirst; + }else{ + elide = 1; + } + /*MARKER(("state 1 ==> 3 @ %d:%d\n", line, col));*/ + state = S_C; + state3Col = col-1; + if(!elide){ + fputc(prev, out); + fputc(ch, out); + } + break; + case '/': + /* Enter C++ comment */ + if(App.keepFirst>0){ + elide = 0; + --App.keepFirst; + }else{ + elide = 1; + } + /*MARKER(("state 1 ==> 2 @ %d:%d\n", line, col));*/ + state = S_CPP; + if(!elide){ + fputc(prev, out); + fputc(ch, out); + } + break; + default: + /* It wasn't a comment after all. */ + state = S_NONE; + if(!elide){ + fputc(prev, out); + fputc(ch, out); + } + } + break; + case S_CPP: /* C++ comment */ + if('\n' == ch){ + /* MARKER(("state 2 ==> 0 @ %d:%d\n", line, col)); */ + state = S_NONE; + elide = 0; + } + if(!elide){ + fputc(ch, out); + } + break; + case S_C: /* C comment */ + if(!elide){ + fputc(ch, out); + } + if(slash == ch){ + if(star == prev){ + /* MARKER(("state 3 ==> 0 @ %d:%d\n", line, col)); */ + /* Corner case which breaks this: */ + /*/ <-- slash there */ + /* That shows up twice in a piece of 3rd-party + code i use. */ + /* And thus state3Col was introduced :/ */ + if(col!=state3Col+2){ + state = S_NONE; + elide = 0; + state3Col = -99; + } + } + } + break; + default: + assert(!"impossible!"); + break; + } + if('\n' == ch){ + ++line; + col = 0; + state3Col = -99; + } + } +} + +static void usage(char const *zAppName){ + fprintf(stderr, "Strips C- and C++-style comments from stdin and sends " + "the results to stdout.\n"); + fprintf(stderr, "Usage: %s [--keep-first|-k] < input > output\n", zAppName); +} + +int main( int argc, char const * const * argv ){ + int i; + for(i = 1; i < argc; ++i){ + char const * zArg = argv[i]; + while( '-'==*zArg ) ++zArg; + if( 0==strcmp(zArg,"k") + || 0==strcmp(zArg,"keep-first") ){ + ++App.keepFirst; + }else{ + usage(argv[0]); + return 1; + } + } + App.input = stdin; + App.output = stdout; + do_it_all(); + return App.rc ? 1 : 0; +} DELETED tool/tserver.c Index: tool/tserver.c ================================================================== --- tool/tserver.c +++ /dev/null @@ -1,643 +0,0 @@ -/* -** 2017 June 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** Simple multi-threaded server used for informal testing of concurrency -** between connections in different threads. Listens for tcp/ip connections -** on port 9999 of the 127.0.0.1 interface only. To build: -** -** gcc -g $(TOP)/tool/tserver.c sqlite3.o -lpthread -o tserver -** -** To run using "x.db" as the db file: -** -** ./tserver x.db -** -** To connect, open a client socket on port 9999 and start sending commands. -** Commands are either SQL - which must be terminated by a semi-colon, or -** dot-commands, which must be terminated by a newline. If an SQL statement -** is seen, it is prepared and added to an internal list. -** -** Dot-commands are: -** -** .list Display all SQL statements in the list. -** .quit Disconnect. -** .run Run all SQL statements in the list. -** .repeats N Configure the number of repeats per ".run". -** .seconds N Configure the number of seconds to ".run" for. -** .mutex_commit Add a "COMMIT" protected by a g.commit_mutex -** to the current SQL. -** .stop Stop the tserver process - exit(0). -** .checkpoint N -** .integrity_check -** -** Example input: -** -** BEGIN; -** INSERT INTO t1 VALUES(randomblob(10), randomblob(100)); -** INSERT INTO t1 VALUES(randomblob(10), randomblob(100)); -** INSERT INTO t1 VALUES(randomblob(10), randomblob(100)); -** COMMIT; -** .repeats 100000 -** .run -** -*/ -#define TSERVER_PORTNUMBER 9999 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sqlite3.h" - -#define TSERVER_DEFAULT_CHECKPOINT_THRESHOLD 3900 - -/* Global variables */ -struct TserverGlobal { - char *zDatabaseName; /* Database used by this server */ - char *zVfs; - sqlite3_mutex *commit_mutex; - sqlite3 *db; /* Global db handle */ - - /* The following use native pthreads instead of a portable interface. This - ** is because a condition variable, as well as a mutex, is required. */ - pthread_mutex_t ckpt_mutex; - pthread_cond_t ckpt_cond; - int nThreshold; /* Checkpoint when wal is this large */ - int bCkptRequired; /* True if wal checkpoint is required */ - int nRun; /* Number of clients in ".run" */ - int nWait; /* Number of clients waiting on ckpt_cond */ -}; - -static struct TserverGlobal g = {0}; - -typedef struct ClientSql ClientSql; -struct ClientSql { - sqlite3_stmt *pStmt; - int flags; -}; - -#define TSERVER_CLIENTSQL_MUTEX 0x0001 -#define TSERVER_CLIENTSQL_INTEGRITY 0x0002 - -typedef struct ClientCtx ClientCtx; -struct ClientCtx { - sqlite3 *db; /* Database handle for this client */ - int fd; /* Client fd */ - int nRepeat; /* Number of times to repeat SQL */ - int nSecond; /* Number of seconds to run for */ - ClientSql *aPrepare; /* Array of prepared statements */ - int nPrepare; /* Valid size of apPrepare[] */ - int nAlloc; /* Allocated size of apPrepare[] */ - - int nClientThreshold; /* Threshold for checkpointing */ - int bClientCkptRequired; /* True to do a checkpoint */ -}; - -static int is_eol(int i){ - return (i=='\n' || i=='\r'); -} -static int is_whitespace(int i){ - return (i==' ' || i=='\t' || is_eol(i)); -} - -/* -** Implementation of SQL scalar function usleep(). -*/ -static void usleepFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int nUs; - sqlite3_vfs *pVfs = (sqlite3_vfs*)sqlite3_user_data(context); - assert( argc==1 ); - nUs = sqlite3_value_int64(argv[0]); - pVfs->xSleep(pVfs, nUs); -} - -static void trim_string(const char **pzStr, int *pnStr){ - const char *zStr = *pzStr; - int nStr = *pnStr; - - while( nStr>0 && is_whitespace(zStr[0]) ){ - zStr++; - nStr--; - } - while( nStr>0 && is_whitespace(zStr[nStr-1]) ){ - nStr--; - } - - *pzStr = zStr; - *pnStr = nStr; -} - -static int send_message(ClientCtx *p, const char *zFmt, ...){ - char *zMsg; - va_list ap; /* Vararg list */ - va_start(ap, zFmt); - int res = -1; - - zMsg = sqlite3_vmprintf(zFmt, ap); - if( zMsg ){ - res = write(p->fd, zMsg, strlen(zMsg)); - } - sqlite3_free(zMsg); - va_end(ap); - - return (res<0); -} - -static int handle_some_sql(ClientCtx *p, const char *zSql, int nSql){ - const char *zTail = zSql; - int nTail = nSql; - int rc = SQLITE_OK; - - while( rc==SQLITE_OK ){ - if( p->nPrepare>=p->nAlloc ){ - int nByte = (p->nPrepare+32) * sizeof(ClientSql); - ClientSql *aNew = sqlite3_realloc(p->aPrepare, nByte); - if( aNew ){ - memset(&aNew[p->nPrepare], 0, sizeof(ClientSql)*32); - p->aPrepare = aNew; - p->nAlloc = p->nPrepare+32; - }else{ - rc = SQLITE_NOMEM; - break; - } - } - rc = sqlite3_prepare_v2( - p->db, zTail, nTail, &p->aPrepare[p->nPrepare].pStmt, &zTail - ); - if( rc!=SQLITE_OK ){ - send_message(p, "error - %s (eec=%d)\n", sqlite3_errmsg(p->db), - sqlite3_extended_errcode(p->db) - ); - rc = 1; - break; - } - if( p->aPrepare[p->nPrepare].pStmt==0 ){ - break; - } - p->nPrepare++; - nTail = nSql - (zTail-zSql); - rc = send_message(p, "ok (%d SQL statements)\n", p->nPrepare); - } - - return rc; -} - -/* -** Return a micro-seconds resolution timer. -*/ -static sqlite3_int64 get_timer(void){ - struct timeval t; - gettimeofday(&t, 0); - return (sqlite3_int64)t.tv_usec + ((sqlite3_int64)t.tv_sec * 1000000); -} - -static void clear_sql(ClientCtx *p){ - int j; - for(j=0; jnPrepare; j++){ - sqlite3_finalize(p->aPrepare[j].pStmt); - } - p->nPrepare = 0; -} - -/* -** The sqlite3_wal_hook() callback used by all client database connections. -*/ -static int clientWalHook(void *pArg, sqlite3 *db, const char *zDb, int nFrame){ - if( g.nThreshold>0 ){ - if( nFrame>=g.nThreshold ){ - g.bCkptRequired = 1; - } - }else{ - ClientCtx *pCtx = (ClientCtx*)pArg; - if( pCtx->nClientThreshold && nFrame>=pCtx->nClientThreshold ){ - pCtx->bClientCkptRequired = 1; - } - } - return SQLITE_OK; -} - -static int handle_run_command(ClientCtx *p){ - int i, j; - int nBusy = 0; - sqlite3_int64 t0 = get_timer(); - sqlite3_int64 t1 = t0; - sqlite3_int64 tCommit = 0; - int nT1 = 0; - int nTBusy1 = 0; - int rc = SQLITE_OK; - - pthread_mutex_lock(&g.ckpt_mutex); - g.nRun++; - pthread_mutex_unlock(&g.ckpt_mutex); - - for(j=0; (p->nRepeat<=0 || jnRepeat) && rc==SQLITE_OK; j++){ - sqlite3_int64 t2; - - for(i=0; inPrepare && rc==SQLITE_OK; i++){ - sqlite3_stmt *pStmt = p->aPrepare[i].pStmt; - - /* If the MUTEX flag is set, grab g.commit_mutex before executing - ** the SQL statement (which is always "COMMIT" in this case). */ - if( p->aPrepare[i].flags & TSERVER_CLIENTSQL_MUTEX ){ - sqlite3_mutex_enter(g.commit_mutex); - tCommit -= get_timer(); - } - - /* Execute the statement */ - if( p->aPrepare[i].flags & TSERVER_CLIENTSQL_INTEGRITY ){ - sqlite3_step(pStmt); - if( sqlite3_stricmp("ok", (const char*)sqlite3_column_text(pStmt, 0)) ){ - send_message(p, "error - integrity_check failed: %s\n", - sqlite3_column_text(pStmt, 0) - ); - } - sqlite3_reset(pStmt); - } - while( sqlite3_step(pStmt)==SQLITE_ROW ); - rc = sqlite3_reset(pStmt); - - /* Relinquish the g.commit_mutex mutex if required. */ - if( p->aPrepare[i].flags & TSERVER_CLIENTSQL_MUTEX ){ - tCommit += get_timer(); - sqlite3_mutex_leave(g.commit_mutex); - } - - if( (rc & 0xFF)==SQLITE_BUSY ){ - if( sqlite3_get_autocommit(p->db)==0 ){ - sqlite3_exec(p->db, "ROLLBACK", 0, 0, 0); - } - nBusy++; - rc = SQLITE_OK; - break; - } - else if( rc!=SQLITE_OK ){ - send_message(p, "error - %s (eec=%d)\n", sqlite3_errmsg(p->db), - sqlite3_extended_errcode(p->db) - ); - } - } - - t2 = get_timer(); - if( t2>=(t1+1000000) ){ - sqlite3_int64 nUs = (t2 - t1); - sqlite3_int64 nDone = (j+1 - nBusy - nT1); - - rc = send_message( - p, "(%d done @ %lld per second, %d busy)\n", - (int)nDone, (1000000*nDone + nUs/2) / nUs, nBusy - nTBusy1 - ); - t1 = t2; - nT1 = j+1 - nBusy; - nTBusy1 = nBusy; - if( p->nSecond>0 && ((sqlite3_int64)p->nSecond*1000000)<=t1-t0 ) break; - } - - /* Global checkpoint handling. */ - if( g.nThreshold>0 ){ - pthread_mutex_lock(&g.ckpt_mutex); - if( rc==SQLITE_OK && g.bCkptRequired ){ - if( g.nWait==g.nRun-1 ){ - /* All other clients are already waiting on the condition variable. - ** Run the checkpoint, signal the condition and move on. */ - rc = sqlite3_wal_checkpoint(p->db, "main"); - g.bCkptRequired = 0; - pthread_cond_broadcast(&g.ckpt_cond); - }else{ - assert( g.nWaitbClientCkptRequired ){ - rc = sqlite3_wal_checkpoint(p->db, "main"); - if( rc==SQLITE_BUSY ) rc = SQLITE_OK; - assert( rc==SQLITE_OK ); - p->bClientCkptRequired = 0; - } - } - - if( rc==SQLITE_OK ){ - int nMs = (get_timer() - t0) / 1000; - send_message(p, "ok (%d/%d SQLITE_BUSY)\n", nBusy, j); - if( p->nRepeat<=0 ){ - send_message(p, "### ok %d busy %d ms %d commit-ms %d\n", - j-nBusy, nBusy, nMs, (int)(tCommit / 1000) - ); - } - } - clear_sql(p); - - pthread_mutex_lock(&g.ckpt_mutex); - g.nRun--; - pthread_mutex_unlock(&g.ckpt_mutex); - - return rc; -} - -static int handle_dot_command(ClientCtx *p, const char *zCmd, int nCmd){ - int n; - int rc = 0; - const char *z = &zCmd[1]; - const char *zArg; - int nArg; - - assert( zCmd[0]=='.' ); - for(n=0; n<(nCmd-1); n++){ - if( is_whitespace(z[n]) ) break; - } - - zArg = &z[n]; - nArg = nCmd-n; - trim_string(&zArg, &nArg); - - if( n>=1 && n<=4 && 0==strncmp(z, "list", n) ){ - int i; - for(i=0; rc==0 && inPrepare; i++){ - const char *zSql = sqlite3_sql(p->aPrepare[i].pStmt); - int nSql = strlen(zSql); - trim_string(&zSql, &nSql); - rc = send_message(p, "%d: %.*s\n", i, nSql, zSql); - } - } - - else if( n>=1 && n<=4 && 0==strncmp(z, "quit", n) ){ - rc = -1; - } - - else if( n>=2 && n<=7 && 0==strncmp(z, "repeats", n) ){ - if( nArg ){ - p->nRepeat = strtol(zArg, 0, 0); - if( p->nRepeat>0 ) p->nSecond = 0; - } - rc = send_message(p, "ok (repeat=%d)\n", p->nRepeat); - } - - else if( n>=2 && n<=3 && 0==strncmp(z, "run", n) ){ - rc = handle_run_command(p); - } - - else if( n>=2 && n<=7 && 0==strncmp(z, "seconds", n) ){ - if( nArg ){ - p->nSecond = strtol(zArg, 0, 0); - if( p->nSecond>0 ) p->nRepeat = 0; - } - rc = send_message(p, "ok (seconds=%d)\n", p->nSecond); - } - - else if( n>=1 && n<=12 && 0==strncmp(z, "mutex_commit", n) ){ - rc = handle_some_sql(p, "COMMIT;", 7); - if( rc==SQLITE_OK ){ - p->aPrepare[p->nPrepare-1].flags |= TSERVER_CLIENTSQL_MUTEX; - } - } - - else if( n>=1 && n<=10 && 0==strncmp(z, "checkpoint", n) ){ - if( nArg ){ - p->nClientThreshold = strtol(zArg, 0, 0); - } - rc = send_message(p, "ok (checkpoint=%d)\n", p->nClientThreshold); - } - - else if( n>=2 && n<=4 && 0==strncmp(z, "stop", n) ){ - sqlite3_close(g.db); - exit(0); - } - - else if( n>=2 && n<=15 && 0==strncmp(z, "integrity_check", n) ){ - rc = handle_some_sql(p, "PRAGMA integrity_check;", 23); - if( rc==SQLITE_OK ){ - p->aPrepare[p->nPrepare-1].flags |= TSERVER_CLIENTSQL_INTEGRITY; - } - } - - else{ - send_message(p, - "unrecognized dot command: %.*s\n" - "should be \"list\", \"run\", \"repeats\", \"mutex_commit\", " - "\"checkpoint\", \"integrity_check\" or \"seconds\"\n", n, z - ); - rc = 1; - } - - return rc; -} - -static void *handle_client(void *pArg){ - char zCmd[32*1024]; /* Read buffer */ - int nCmd = 0; /* Valid bytes in zCmd[] */ - int res; /* Result of read() call */ - int rc = SQLITE_OK; - - ClientCtx ctx; - memset(&ctx, 0, sizeof(ClientCtx)); - - ctx.fd = (int)(intptr_t)pArg; - ctx.nRepeat = 1; - rc = sqlite3_open_v2(g.zDatabaseName, &ctx.db, - SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE, g.zVfs - ); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "sqlite3_open(): %s\n", sqlite3_errmsg(ctx.db)); - return 0; - } - sqlite3_create_function( - ctx.db, "usleep", 1, SQLITE_UTF8, (void*)sqlite3_vfs_find(0), - usleepFunc, 0, 0 - ); - - /* Register the wal-hook with the new client connection */ - sqlite3_wal_hook(ctx.db, clientWalHook, (void*)&ctx); - - while( rc==SQLITE_OK ){ - int i; - int iStart; - int nConsume; - res = read(ctx.fd, &zCmd[nCmd], sizeof(zCmd)-nCmd-1); - if( res<=0 ) break; - nCmd += res; - if( nCmd>=sizeof(zCmd)-1 ){ - fprintf(stderr, "oversized (>32KiB) message\n"); - res = 0; - break; - } - zCmd[nCmd] = '\0'; - - do { - nConsume = 0; - - /* Gobble up any whitespace */ - iStart = 0; - while( is_whitespace(zCmd[iStart]) ) iStart++; - - if( zCmd[iStart]=='.' ){ - /* This is a dot-command. Search for end-of-line. */ - for(i=iStart; i0 ){ - nCmd = nCmd-nConsume; - if( nCmd>0 ){ - memmove(zCmd, &zCmd[nConsume], nCmd); - } - } - }while( rc==SQLITE_OK && nConsume>0 ); - } - - fprintf(stdout, "Client %d disconnects (rc=%d)\n", ctx.fd, rc); - fflush(stdout); - close(ctx.fd); - clear_sql(&ctx); - sqlite3_free(ctx.aPrepare); - sqlite3_close(ctx.db); - return 0; -} - -static void usage(const char *zExec){ - fprintf(stderr, "Usage: %s ?-vfs VFS? DATABASE\n", zExec); - exit(1); -} - -int main(int argc, char *argv[]) { - int sfd; - int rc; - int yes = 1; - struct sockaddr_in server; - int i; - - /* Ignore SIGPIPE. Otherwise the server exits if a client disconnects - ** abruptly. */ - signal(SIGPIPE, SIG_IGN); - - g.nThreshold = TSERVER_DEFAULT_CHECKPOINT_THRESHOLD; - if( (argc%2) ) usage(argv[0]); - for(i=1; i<(argc-1); i+=2){ - int n = strlen(argv[i]); - if( n>=2 && 0==sqlite3_strnicmp("-walautocheckpoint", argv[i], n) ){ - g.nThreshold = strtol(argv[i+1], 0, 0); - }else - if( n>=2 && 0==sqlite3_strnicmp("-vfs", argv[i], n) ){ - g.zVfs = argv[i+1]; - } - } - g.zDatabaseName = argv[argc-1]; - - g.commit_mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - pthread_mutex_init(&g.ckpt_mutex, 0); - pthread_cond_init(&g.ckpt_cond, 0); - - rc = sqlite3_open_v2(g.zDatabaseName, &g.db, - SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE, g.zVfs - ); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "sqlite3_open(): %s\n", sqlite3_errmsg(g.db)); - return 1; - } - - rc = sqlite3_exec(g.db, "SELECT * FROM sqlite_master", 0, 0, 0); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "sqlite3_exec(): %s\n", sqlite3_errmsg(g.db)); - return 1; - } - - sfd = socket(AF_INET, SOCK_STREAM, 0); - if( sfd<0 ){ - fprintf(stderr, "socket() failed\n"); - return 1; - } - - rc = setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); - if( rc<0 ){ - perror("setsockopt"); - return 1; - } - - memset(&server, 0, sizeof(server)); - server.sin_family = AF_INET; - server.sin_addr.s_addr = inet_addr("127.0.0.1"); - server.sin_port = htons(TSERVER_PORTNUMBER); - - rc = bind(sfd, (struct sockaddr *)&server, sizeof(struct sockaddr)); - if( rc<0 ){ - fprintf(stderr, "bind() failed\n"); - return 1; - } - - rc = listen(sfd, 8); - if( rc<0 ){ - fprintf(stderr, "listen() failed\n"); - return 1; - } - - while( 1 ){ - pthread_t tid; - int cfd = accept(sfd, NULL, NULL); - if( cfd<0 ){ - perror("accept()"); - return 1; - } - - fprintf(stdout, "Client %d connects\n", cfd); - fflush(stdout); - rc = pthread_create(&tid, NULL, handle_client, (void*)(intptr_t)cfd); - if( rc!=0 ){ - perror("pthread_create()"); - return 1; - } - - pthread_detach(tid); - } - - return 0; -} DELETED tool/tserver_test.tcl Index: tool/tserver_test.tcl ================================================================== --- tool/tserver_test.tcl +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/tclsh -# -# This script is used to run the performance test cases described in -# README-server-edition.html. -# - - -package require sqlite3 - -# Default values for command line switches: -set O(-database) "" -set O(-rows) [expr 5000000] -set O(-mode) wal2 -set O(-tserver) "./tserver" -set O(-seconds) 20 -set O(-writers) 1 -set O(-readers) 0 -set O(-integrity) 0 -set O(-verbose) 0 -set O(-external) 0 - - -proc error_out {err} { - puts stderr $err - exit -1 -} - -proc usage {} { - puts stderr "Usage: $::argv0 ?OPTIONS?" - puts stderr "" - puts stderr "Where OPTIONS are:" - puts stderr " -database (default: test.db)" - puts stderr " -mode (default: wal2)" - puts stderr " -rows (default: 5000000)" - puts stderr " -tserver (default: ./tserver)" - puts stderr " -seconds