Index: Makefile.in
==================================================================
--- Makefile.in
+++ Makefile.in
@@ -445,10 +445,11 @@
$(TOP)/ext/misc/nextchar.c \
$(TOP)/ext/misc/normalize.c \
$(TOP)/ext/misc/percentile.c \
$(TOP)/ext/misc/prefixes.c \
$(TOP)/ext/misc/qpvtab.c \
+ $(TOP)/ext/misc/randomjson.c \
$(TOP)/ext/misc/regexp.c \
$(TOP)/ext/misc/remember.c \
$(TOP)/ext/misc/series.c \
$(TOP)/ext/misc/spellfix.c \
$(TOP)/ext/misc/totype.c \
@@ -598,10 +599,11 @@
SHELL_OPT += -DSQLITE_ENABLE_STMTVTAB
SHELL_OPT += -DSQLITE_ENABLE_DBPAGE_VTAB
SHELL_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB
SHELL_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB
SHELL_OPT += -DSQLITE_ENABLE_OFFSET_SQL_FUNC
+SHELL_OPT += -DSQLITE_STRICT_SUBTYPE=1
FUZZERSHELL_OPT =
FUZZCHECK_OPT += -I$(TOP)/test
FUZZCHECK_OPT += -I$(TOP)/ext/recover
FUZZCHECK_OPT += \
-DSQLITE_OSS_FUZZ \
@@ -628,18 +630,21 @@
-DSQLITE_ENABLE_STMT_SCANSTATUS \
-DSQLITE_MAX_MEMORY=50000000 \
-DSQLITE_MAX_MMAP_SIZE=0 \
-DSQLITE_OMIT_LOAD_EXTENSION \
-DSQLITE_PRINTF_PRECISION_LIMIT=1000 \
- -DSQLITE_PRIVATE=""
+ -DSQLITE_PRIVATE="" \
+ -DSQLITE_STRICT_SUBTYPE=1 \
+ -DSQLITE_STATIC_RANDOMJSON
FUZZCHECK_SRC += $(TOP)/test/fuzzcheck.c
FUZZCHECK_SRC += $(TOP)/test/ossfuzz.c
FUZZCHECK_SRC += $(TOP)/test/fuzzinvariants.c
FUZZCHECK_SRC += $(TOP)/ext/recover/dbdata.c
FUZZCHECK_SRC += $(TOP)/ext/recover/sqlite3recover.c
FUZZCHECK_SRC += $(TOP)/test/vt02.c
+FUZZCHECK_SRC += $(TOP)/ext/misc/randomjson.c
DBFUZZ_OPT =
ST_OPT = -DSQLITE_OS_KV_OPTIONAL
# In wasi-sdk builds, disable the CLI shell build in the "all" target.
@@ -707,10 +712,25 @@
fuzzcheck-asan$(TEXE): $(FUZZCHECK_SRC) sqlite3.c sqlite3.h $(FUZZCHECK_DEP)
$(LTLINK) -o $@ -fsanitize=address $(FUZZCHECK_OPT) $(FUZZCHECK_SRC) sqlite3.c $(TLIBS)
fuzzcheck-ubsan$(TEXE): $(FUZZCHECK_SRC) sqlite3.c sqlite3.h $(FUZZCHECK_DEP)
$(LTLINK) -o $@ -fsanitize=undefined $(FUZZCHECK_OPT) $(FUZZCHECK_SRC) sqlite3.c $(TLIBS)
+
+# Usage: FUZZDB=filename make run-fuzzcheck
+#
+# Where filename is a fuzzcheck database, this target builds and runs
+# fuzzcheck, fuzzcheck-asan, and fuzzcheck-ubsan on that database.
+#
+# FUZZDB can be a glob pattern of two or more databases. Example:
+#
+# FUZZDB=test/fuzzdata*.db make run-fuzzcheck
+#
+run-fuzzcheck: fuzzcheck$(TEXE) fuzzcheck-asan$(TEXE) fuzzcheck-ubsan$(TEXE)
+ @if test "$(FUZZDB)" = ""; then echo 'ERROR: No FUZZDB specified. Rerun with FUZZDB=filename'; exit 1; fi
+ ./fuzzcheck$(TEXE) --spinner $(FUZZDB)
+ ./fuzzcheck-asan$(TEXE) --spinner $(FUZZDB)
+ ./fuzzcheck-ubsan$(TEXE) --spinner $(FUZZDB)
ossshell$(TEXE): $(TOP)/test/ossfuzz.c $(TOP)/test/ossshell.c sqlite3.c sqlite3.h
$(LTLINK) -o $@ $(FUZZCHECK_OPT) $(TOP)/test/ossshell.c \
$(TOP)/test/ossfuzz.c sqlite3.c $(TLIBS)
@@ -1133,34 +1153,34 @@
./mkkeywordhash$(BEXE) >keywordhash.h
# Source files that go into making shell.c
SHELL_SRC = \
$(TOP)/src/shell.c.in \
- $(TOP)/ext/misc/appendvfs.c \
+ $(TOP)/ext/consio/console_io.c \
+ $(TOP)/ext/consio/console_io.h \
+ $(TOP)/ext/misc/appendvfs.c \
$(TOP)/ext/misc/completion.c \
- $(TOP)/ext/consio/console_io.c \
- $(TOP)/ext/consio/console_io.h \
- $(TOP)/ext/misc/decimal.c \
- $(TOP)/ext/misc/basexx.c \
- $(TOP)/ext/misc/base64.c \
- $(TOP)/ext/misc/base85.c \
+ $(TOP)/ext/misc/decimal.c \
+ $(TOP)/ext/misc/basexx.c \
+ $(TOP)/ext/misc/base64.c \
+ $(TOP)/ext/misc/base85.c \
$(TOP)/ext/misc/fileio.c \
- $(TOP)/ext/misc/ieee754.c \
- $(TOP)/ext/misc/regexp.c \
- $(TOP)/ext/misc/series.c \
+ $(TOP)/ext/misc/ieee754.c \
+ $(TOP)/ext/misc/regexp.c \
+ $(TOP)/ext/misc/series.c \
$(TOP)/ext/misc/shathree.c \
$(TOP)/ext/misc/sqlar.c \
- $(TOP)/ext/misc/uint.c \
+ $(TOP)/ext/misc/uint.c \
$(TOP)/ext/expert/sqlite3expert.c \
$(TOP)/ext/expert/sqlite3expert.h \
$(TOP)/ext/misc/zipfile.c \
$(TOP)/ext/misc/memtrace.c \
$(TOP)/ext/misc/pcachetrace.c \
$(TOP)/ext/recover/dbdata.c \
$(TOP)/ext/recover/sqlite3recover.c \
$(TOP)/ext/recover/sqlite3recover.h \
- $(TOP)/src/test_windirent.c
+ $(TOP)/src/test_windirent.c
shell.c: $(SHELL_SRC) $(TOP)/tool/mkshellc.tcl has_tclsh84
$(TCLSH_CMD) $(TOP)/tool/mkshellc.tcl >shell.c
@@ -1274,10 +1294,12 @@
TESTFIXTURE_FLAGS += -DSQLITE_DEFAULT_PAGE_SIZE=1024
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_STMTVTAB
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_DBPAGE_VTAB
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_BYTECODE_VTAB
TESTFIXTURE_FLAGS += -DSQLITE_CKSUMVFS_STATIC
+TESTFIXTURE_FLAGS += -DSQLITE_STATIC_RANDOMJSON
+TESTFIXTURE_FLAGS += -DSQLITE_STRICT_SUBTYPE=1
TESTFIXTURE_SRC0 = $(TESTSRC2) libsqlite3.la
TESTFIXTURE_SRC1 = sqlite3.c
TESTFIXTURE_SRC = $(TESTSRC) $(TOP)/src/tclsqlite.c
TESTFIXTURE_SRC += $(TESTFIXTURE_SRC$(USE_AMALGAMATION))
Index: Makefile.msc
==================================================================
--- Makefile.msc
+++ Makefile.msc
@@ -1582,10 +1582,11 @@
$(TOP)\ext\misc\nextchar.c \
$(TOP)\ext\misc\normalize.c \
$(TOP)\ext\misc\percentile.c \
$(TOP)\ext\misc\prefixes.c \
$(TOP)\ext\misc\qpvtab.c \
+ $(TOP)\ext\misc\randomjson.c \
$(TOP)\ext\misc\regexp.c \
$(TOP)\ext\misc\remember.c \
$(TOP)\ext\misc\series.c \
$(TOP)\ext\misc\spellfix.c \
$(TOP)\ext\misc\totype.c \
@@ -1690,10 +1691,11 @@
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_FTS4=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_OFFSET_SQL_FUNC=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_STMT_SCANSTATUS=1
+SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_STRICT_SUBTYPE=1
!ENDIF
# <>
# Extra compiler options for various test tools.
#
@@ -1726,10 +1728,12 @@
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MEMORY=50000000
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MMAP_SIZE=0
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OMIT_LOAD_EXTENSION
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRINTF_PRECISION_LIMIT=1000
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRIVATE=""
+FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_STRICT_SUBTYPE=1
+FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_STATIC_RANDOMJSON
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MEMORY=50000000
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRINTF_PRECISION_LIMIT=1000
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OMIT_LOAD_EXTENSION
FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS4
@@ -1742,10 +1746,11 @@
FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\ossfuzz.c
FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\fuzzinvariants.c
FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\vt02.c
FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\dbdata.c
FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\sqlite3recover.c
+FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\misc\randomjson.c
OSSSHELL_SRC = $(TOP)\test\ossshell.c $(TOP)\test\ossfuzz.c
DBFUZZ_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_OMIT_LOAD_EXTENSION
KV_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_DIRECT_OVERFLOW_READ
ST_COMPILE_OPTS = -DSQLITE_THREADSAFE=0
@@ -1821,12 +1826,12 @@
$(SQLITE3EXE): shell.c $(SHELL_CORE_DEP) $(LIBRESOBJS) $(SHELL_CORE_SRC) $(SQLITE3H)
$(LTLINK) $(SHELL_COMPILE_OPTS) $(READLINE_FLAGS) shell.c $(SHELL_CORE_SRC) \
/link $(SQLITE3EXEPDB) $(LDFLAGS) $(LTLINKOPTS) $(SHELL_LINK_OPTS) $(LTLIBPATHS) $(LIBRESOBJS) $(LIBREADLINE) $(LTLIBS) $(TLIBS)
# <>
-sqldiff.exe: $(TOP)\tool\sqldiff.c $(SQLITE3C) $(SQLITE3H) $(LIBRESOBJS)
- $(LTLINK) $(NO_WARN) $(TOP)\tool\sqldiff.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) $(LIBRESOBJS)
+sqldiff.exe: $(TOP)\tool\sqldiff.c $(TOP)\ext\consio\console_io.h $(TOP)\ext\consio\console_io.c $(SQLITE3C) $(SQLITE3H) $(LIBRESOBJS)
+ $(LTLINK) $(NO_WARN) -I$(TOP)\ext\consio $(TOP)\tool\sqldiff.c $(TOP)\ext\consio\console_io.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) $(LIBRESOBJS)
dbhash.exe: $(TOP)\tool\dbhash.c $(SQLITE3C) $(SQLITE3H)
$(LTLINK) $(NO_WARN) $(TOP)\tool\dbhash.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS)
scrub.exe: $(TOP)\ext\misc\scrub.c $(SQLITE3C) $(SQLITE3H)
@@ -2431,10 +2436,12 @@
TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_STMTVTAB=1
TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_DBPAGE_VTAB=1
TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_BYTECODE_VTAB=1
TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_CKSUMVFS_STATIC=1
TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) $(TEST_CCONV_OPTS)
+TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_STATIC_RANDOMJSON
+TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_STRICT_SUBTYPE=1
TESTFIXTURE_SRC0 = $(TESTEXT) $(TESTSRC2)
TESTFIXTURE_SRC1 = $(TESTEXT) $(SQLITE3C)
!IF $(USE_AMALGAMATION)==0
TESTFIXTURE_SRC = $(TESTSRC) $(TOP)\src\tclsqlite.c $(TESTFIXTURE_SRC0)
@@ -2541,11 +2548,11 @@
.\testfixture.exe $(TOP)\test\main.test $(TESTOPTS)
shelltest: $(TESTPROGS)
.\testfixture.exe $(TOP)\test\permutations.test shell
-sqlite3_analyzer.c: $(SQLITE3C) $(SQLITE3H) $(TOP)\src\tclsqlite.c $(TOP)\tool\spaceanal.tcl $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in $(SQLITE_TCL_DEP)
+sqlite3_analyzer.c: $(SQLITE3C) $(SQLITE3H) $(TOP)\src\tclsqlite.c $(TOP)\tool\spaceanal.tcl $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in $(TOP)\ext\consio\console_io.h $(TOP)\ext\consio\console_io.c $(SQLITE_TCL_DEP)
$(TCLSH_CMD) $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in > $@
sqlite3_analyzer.exe: sqlite3_analyzer.c $(LIBRESOBJS)
$(LTLINK) $(NO_WARN) -DBUILD_sqlite -I$(TCLINCDIR) sqlite3_analyzer.c \
/link $(LDFLAGS) $(LTLINKOPTS) $(TCLLIBPATHS) $(LTLIBPATHS) $(LIBRESOBJS) $(TCLLIBS) $(LTLIBS) $(TLIBS)
Index: README.md
==================================================================
--- README.md
+++ README.md
@@ -157,11 +157,11 @@
(Historical note: SQLite began as a Tcl
extension and only later escaped to the wild as an independent library.)
Test scripts and programs are found in the **test/** subdirectory.
Additional test code is found in other source repositories.
-See [How SQLite Is Tested](http://www.sqlite.org/testing.html) for
+See [How SQLite Is Tested](https://www.sqlite.org/testing.html) for
additional information.
The **ext/** subdirectory contains code for extensions. The
Full-text search engine is in **ext/fts3**. The R-Tree engine is in
**ext/rtree**. The **ext/misc** subdirectory contains a number of
@@ -181,11 +181,11 @@
fill it with all the source files needed to build SQLite, both
manually-edited files and automatically-generated files.
The SQLite interface is defined by the **sqlite3.h** header file, which is
generated from src/sqlite.h.in, ./manifest.uuid, and ./VERSION. The
-[Tcl script](http://www.tcl.tk) at tool/mksqlite3h.tcl does the conversion.
+[Tcl script](https://www.tcl.tk) at tool/mksqlite3h.tcl does the conversion.
The manifest.uuid file contains the SHA3 hash of the particular check-in
and is used to generate the SQLITE\_SOURCE\_ID macro. The VERSION file
contains the current SQLite version number. The sqlite3.h header is really
just a copy of src/sqlite.h.in with the source-id and version number inserted
at just the right spots. Note that comment text in the sqlite3.h file is
@@ -248,18 +248,18 @@
individual source file exceeds 32K lines in length.
## How It All Fits Together
SQLite is modular in design.
-See the [architectural description](http://www.sqlite.org/arch.html)
+See the [architectural description](https://www.sqlite.org/arch.html)
for details. Other documents that are useful in
(helping to understand how SQLite works include the
-[file format](http://www.sqlite.org/fileformat2.html) description,
-the [virtual machine](http://www.sqlite.org/opcode.html) that runs
+[file format](https://www.sqlite.org/fileformat2.html) description,
+the [virtual machine](https://www.sqlite.org/opcode.html) that runs
prepared statements, the description of
-[how transactions work](http://www.sqlite.org/atomiccommit.html), and
-the [overview of the query planner](http://www.sqlite.org/optoverview.html).
+[how transactions work](https://www.sqlite.org/atomiccommit.html), and
+the [overview of the query planner](https://www.sqlite.org/optoverview.html).
Years of effort have gone into optimizing SQLite, both
for small size and high performance. And optimizations tend to result in
complex code. So there is a lot of complexity in the current SQLite
implementation. It will not be the easiest library in the world to hack.
@@ -351,9 +351,9 @@
accidental changes to the source tree, but malicious changes could be
hidden by also modifying the makefiles.
## Contacts
-The main SQLite website is [http:/sqlite.org/](http://sqlite.org/)
+The main SQLite website is [https://sqlite.org/](https://sqlite.org/)
with geographically distributed backups at
-[http://www2.sqlite.org/](http://www2.sqlite.org) and
-[http://www3.sqlite.org/](http://www3.sqlite.org).
+[https://www2.sqlite.org/](https://www2.sqlite.org) and
+[https://www3.sqlite.org/](https://www3.sqlite.org).
Index: VERSION
==================================================================
--- VERSION
+++ VERSION
@@ -1,1 +1,1 @@
-3.44.3
+3.46.0
Index: autoconf/Makefile.msc
==================================================================
--- autoconf/Makefile.msc
+++ autoconf/Makefile.msc
@@ -988,10 +988,11 @@
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_FTS4=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_OFFSET_SQL_FUNC=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION=1
SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_STMT_SCANSTATUS=1
+SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_STRICT_SUBTYPE=1
!ENDIF
# This is the default Makefile target. The objects listed here
# are what get build when you type just "make" with no arguments.
Index: autoconf/tea/configure.ac
==================================================================
--- autoconf/tea/configure.ac
+++ autoconf/tea/configure.ac
@@ -17,11 +17,11 @@
# so you can encode the package version directly into the source files.
# This will also define a special symbol for Windows (BUILD_
# so that we create the export library with the dll.
#-----------------------------------------------------------------------
-AC_INIT([sqlite],[3.44.3])
+AC_INIT([sqlite],[3.46.0])
#--------------------------------------------------------------------
# Call TEA_INIT as the first TEA_ macro to set up initial vars.
# This will define a ${TEA_PLATFORM} variable == "unix" or "windows"
# as well as PKG_LIB_FILE and PKG_STUB_LIB_FILE.
Index: configure
==================================================================
--- configure
+++ configure
@@ -1,8 +1,8 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for sqlite 3.44.3.
+# Generated by GNU Autoconf 2.69 for sqlite 3.46.0.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
#
#
@@ -724,12 +724,12 @@
MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='sqlite'
PACKAGE_TARNAME='sqlite'
-PACKAGE_VERSION='3.44.3'
-PACKAGE_STRING='sqlite 3.44.3'
+PACKAGE_VERSION='3.46.0'
+PACKAGE_STRING='sqlite 3.46.0'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
# Factoring default headers for most tests.
ac_includes_default="\
@@ -1470,11 +1470,11 @@
#
if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures sqlite 3.44.3 to adapt to many kinds of systems.
+\`configure' configures sqlite 3.46.0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
To assign environment variables (e.g., CC, CFLAGS...), specify them as
VAR=VALUE. See below for descriptions of some of the useful variables.
@@ -1535,11 +1535,11 @@
_ACEOF
fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of sqlite 3.44.3:";;
+ short | recursive ) echo "Configuration of sqlite 3.46.0:";;
esac
cat <<\_ACEOF
Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
@@ -1666,11 +1666,11 @@
fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-sqlite configure 3.44.3
+sqlite configure 3.46.0
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
@@ -2085,11 +2085,11 @@
} # ac_fn_c_check_header_mongrel
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by sqlite $as_me 3.44.3, which was
+It was created by sqlite $as_me 3.46.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
_ACEOF
@@ -12479,11 +12479,11 @@
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# Save the log message, to keep $0 and so on meaningful, and to
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by sqlite $as_me 3.44.3, which was
+This file was extended by sqlite $as_me 3.46.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
CONFIG_LINKS = $CONFIG_LINKS
@@ -12545,11 +12545,11 @@
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-sqlite config.status 3.44.3
+sqlite config.status 3.46.0
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
Index: configure.ac
==================================================================
--- configure.ac
+++ configure.ac
@@ -72,11 +72,11 @@
# This configure.in file is easy to reuse on other projects. Just
# change the argument to AC_INIT. And disable any features that
# you don't need (for example BLT) by erasing or commenting out
# the corresponding code.
#
-AC_INIT([sqlite],[m4_esyscmd(cat VERSION | tr -d '\n')])
+AC_INIT([sqlite],m4_esyscmd(cat VERSION | tr -d '\n'))
dnl Make sure the local VERSION file matches this configure script
sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'`
if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then
AC_MSG_ERROR([configure script is out of date:
Index: doc/compile-for-windows.md
==================================================================
--- doc/compile-for-windows.md
+++ doc/compile-for-windows.md
@@ -1,9 +1,9 @@
# Notes On Compiling SQLite On Windows 11
Here are step-by-step instructions on how to build SQLite from
-canonical source on a new Windows 11 PC, as of 2023-08-16:
+canonical source on a new Windows 11 PC, as of 2023-11-01:
1. Install Microsoft Visual Studio. The free "community edition"
will work fine. Do a standard install for C++ development.
SQLite only needs the
"cl" compiler and the "nmake" build tool.
@@ -82,10 +82,22 @@
a command like:
`set PATH=c:\tcl32\bin;%PATH%`
+## Building a DLL
+
+The command the developers use for building the deliverable DLL on the
+[download page](https://sqlite.org/download.html) is as follows:
+
+> ~~~~
+nmake /f Makefile.msc sqlite3.dll USE_NATIVE_LIBPATHS=1 "OPTS=-DSQLITE_ENABLE_FTS3=1 -DSQLITE_ENABLE_FTS4=1 -DSQLITE_ENABLE_FTS5=1 -DSQLITE_ENABLE_RTREE=1 -DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_GEOPOLY=1 -DSQLITE_ENABLE_SESSION=1 -DSQLITE_ENABLE_PREUPDATE_HOOK=1 -DSQLITE_ENABLE_SERIALIZE=1 -DSQLITE_ENABLE_MATH_FUNCTIONS=1"
+~~~~
+
+That command generates both the sqlite3.dll and sqlite3.def files. The same
+command works for both 32-bit and 64-bit builds.
+
## Statically Linking The TCL Library
Some utility programs associated with SQLite need to be linked
with TCL in order to function. The [sqlite3_analyzer.exe program](https://sqlite.org/sqlanalyze.html)
is an example. You can build as described above, and then
ADDED doc/jsonb.md
Index: doc/jsonb.md
==================================================================
--- /dev/null
+++ doc/jsonb.md
@@ -0,0 +1,290 @@
+# The JSONB Format
+
+This document describes SQLite's JSONB binary encoding of
+JSON.
+
+## 1.0 What Is JSONB?
+
+Beginning with version 3.45.0 (circa 2024-01-01), SQLite supports an
+alternative binary encoding of JSON which we call "JSONB". JSONB is
+a binary format that stored as a BLOB.
+
+The advantage of JSONB over ordinary text RFC 8259 JSON is that JSONB
+is both slightly smaller (by between 5% and 10% in most cases) and
+can be processed in less than half the number of CPU cycles. The built-in
+[JSON SQL functions] of SQLite can accept either ordinary text JSON
+or the binary JSONB encoding for any of their JSON inputs.
+
+The "JSONB" name is inspired by [PostgreSQL](https://postgresql.org), but the
+on-disk format for SQLite's JSONB is not the same as PostgreSQL's.
+The two formats have the same name, but they have wildly different internal
+representations and are not in any way binary compatible.
+
+The central idea behind this JSONB specification is that each element
+begins with a header that includes the size and type of that element.
+The header takes the place of punctuation such as double-quotes,
+curly-brackes, square-brackets, commas, and colons. Since the size
+and type of each element is contained in its header, the element can
+be read faster since it is no longer necessary to carefully scan forward
+looking for the closing delimiter. The payload of JSONB is the same
+as for corresponding text JSON. The same payload bytes occur in the
+same order. The only real difference between JSONB and ordinary text
+JSON is that JSONB includes a binary header on
+each element and omits delimiter and separator punctuation.
+
+### 1.1 Internal Use Only
+
+The details of the JSONB are not intended to be visible to application
+developers. Application developers should look at JSONB as an opaque BLOB
+used internally by SQLite. Nevertheless, we want the format to be backwards
+compatible across all future versions of SQLite. To that end, the format
+is documented by this file in the source tree. But this file should be
+used only by SQLite core developers, not by developers of applications
+that only use SQLite.
+
+## 2.0 The Purpose Of This Document
+
+JSONB is not intended as an external format to be used by
+applications. JSONB is designed for internal use by SQLite only.
+Programmers do not need to understand the JSONB format in order to
+use it effectively.
+Applications should access JSONB only through the [JSON SQL functions],
+not by looking at individual bytes of the BLOB.
+
+However, JSONB is intended to be portable and backwards compatible
+for all future versions of SQLite. In other words, you should not have
+to export and reimport your SQLite database files when you upgrade to
+a newer SQLite version. For that reason, the JSONB format needs to
+be well-defined.
+
+This document is therefore similar in purpose to the
+[SQLite database file format] document that describes the on-disk
+format of an SQLite database file. Applications are not expected
+to directly read and write the bits and bytes of SQLite database files.
+The SQLite database file format is carefully documented so that it
+can be stable and enduring. In the same way, the JSONB representation
+of JSON is documented here so that it too can be stable and enduring,
+not so that applications can read or writes individual bytes.
+
+## 3.0 Encoding
+
+JSONB is a direct translation of the underlying text JSON. The difference
+is that JSONB uses a binary encoding that is faster to parse compared to
+the detailed syntax of text JSON.
+
+Each JSON element is encoded as a header and a payload. The header
+determines type of element (string, numeric, boolean, null, object, or
+array) and the size of the payload. The header can be between 1 and
+9 bytes in size. The payload can be any size from zero bytes up to the
+maximum allowed BLOB size.
+
+### 3.1 Payload Size
+
+The upper four bits of the first byte of the header determine size of the
+header and possibly also the size of the payload.
+If the upper four bits have a value between 0 and 11, then the header is
+exactly one byte in size and the payload size is determined by those
+upper four bits. If the upper four bits have a value between 12 and 15,
+that means that the total header size is 2, 3, 5, or 9 bytes and the
+payload size is unsigned big-endian integer that is contained in the
+subsequent bytes. The size integer is the one byte that following the
+initial header byte if the upper four bits
+are 12, two bytes if the upper bits are 13, four bytes if the upper bits
+are 14, and eight bytes if the upper bits are 15. The current design
+of SQLite does not support BLOB values larger than 2GiB, so the eight-byte
+variant of the payload size integer will never be used by the current code.
+The eight-byte payload size integer is included in the specification
+to allow for future expansion.
+
+The header for an element does *not* need to be in its simplest
+form. For example, consider the JSON numeric value "`1`".
+That element can be encode in five different ways:
+
+ * `0x13 0x31`
+ * `0xc3 0x01 0x31`
+ * `0xd3 0x00 0x01 0x31`
+ * `0xe3 0x00 0x00 0x00 0x01 0x31`
+ * `0xf3 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01 0x31`
+
+The shortest encoding is preferred, of course, and usually happens with
+primitive elements such as numbers. However the total size of an array
+or object might not be known exactly when the header of the element is
+first generated. It is convenient to reserve space for the largest
+possible header and then go back and fill in the correct payload size
+at the end. This technique can result in array or object headers that
+are larger than absolutely necessary.
+
+### 3.2 Element Type
+
+The least-significant four bits of the first byte of the header (the first
+byte masked against 0x0f) determine element type. The following codes are
+used:
+
+
+
NULL →
+The element is a JSON "null". The payload size for a true JSON NULL must
+must be zero. Future versions of SQLite might extend the JSONB format
+with elements that have a zero element type but a non-zero size. In that
+way, legacy versions of SQLite will interpret the element as a NULL
+for backwards compatibility while newer versions will interpret the
+element in some other way.
+
+
TRUE →
+The element is a JSON "true". The payload size must be zero for a actual
+"true" value. Elements with type 1 and a non-zero payload size are
+reserved for future expansion. Legacy implementations that see an element
+type of 1 with a non-zero payload size should continue to interpret that
+element as "true" for compatibility.
+
+
FALSE →
+The element is a JSON "false". The payload size must be zero for a actual
+"false" value. Elements with type 2 and a non-zero payload size are
+reserved for future expansion. Legacy implementations that see an element
+type of 2 with a non-zero payload size should continue to interpret that
+element as "false" for compatibility.
+
+
INT →
+The element is a JSON integer value in the canonical
+RFC 8259 format, without extensions. The payload is the ASCII
+text representation of that numeric value.
+
+
INT5 →
+The element is a JSON integer value that is not in the
+canonical format. The payload is the ASCII
+text representation of that numeric value. Because the payload is in a
+non-standard format, it will need to be translated when the JSONB is
+converted into RFC 8259 text JSON.
+
+
FLOAT →
+The element is a JSON floating-point value in the canonical
+RFC 8259 format, without extensions. The payload is the ASCII
+text representation of that numeric value.
+
+
FLOAT5 →
+The element is a JSON floating-point value that is not in the
+canonical format. The payload is the ASCII
+text representation of that numeric value. Because the payload is in a
+non-standard format, it will need to be translated when the JSONB is
+converted into RFC 8259 text JSON.
+
+
TEXT →
+The element is a JSON string value that does not contain
+any escapes nor any characters that need to be escaped for either SQL or
+JSON. The payload is the UTF8 text representation of the string value.
+The payload does not include string delimiters.
+
+
TEXTJ →
+The element is a JSON string value that contains
+RFC 8259 character escapes (such as "\n" or "\u0020").
+Those escapes will need to be translated into actual UTF8 if this element
+is [json_extract|extracted] into SQL.
+The payload is the UTF8 text representation of the escaped string value.
+The payload does not include string delimiters.
+
+
TEXT5 →
+The element is a JSON string value that contains
+character escapes, including some character escapes that part of JSON5
+and which are not found in the canonical RFC 8259 spec.
+Those escapes will need to be translated into standard JSON prior to
+rendering the JSON as text, or into their actual UTF8 characters if this
+element is [json_extract|extracted] into SQL.
+The payload is the UTF8 text representation of the escaped string value.
+The payload does not include string delimiters.
+
+
TEXTRAW →
+The element is a JSON string value that contains
+UTF8 characters that need to be escaped if this string is rendered into
+standard JSON text.
+The payload does not include string delimiters.
+
+
ARRAY →
+The element is a JSON array. The payload contains
+JSONB elements that comprise values contained within the array.
+
+
OBJECT →
+The element is a JSON object. The payload contains
+pairs of JSONB elements that comprise entries for the JSON object.
+The first element in each pair must be a string (types 7 through 10).
+The second element of each pair may be any types, including nested
+arrays or objects.
+
+
RESERVED-13 →
+Reserved for future expansion. Legacy implements that encounter this
+element type should raise an error.
+
+
RESERVED-14 →
+Reserved for future expansion. Legacy implements that encounter this
+element type should raise an error.
+
+
RESERVED-15 →
+Reserved for future expansion. Legacy implements that encounter this
+element type should raise an error.
+
+
+Element types outside the range of 0 to 12 are reserved for future
+expansion. The current implement raises an error if see an element type
+other than those listed above. However, future versions of SQLite might
+use of the three remaining element types to implement indexing or similar
+optimizations, to speed up lookup against large JSON arrays and/or objects.
+
+### 3.3 Design Rationale For Element Types
+
+A key goal of JSONB is that it should be quick to translate
+to and from text JSON and/or be constructed from SQL values.
+When converting from text into JSONB, we do not want the
+converter subroutine to burn CPU cycles converting elements
+values into some standard format which might never be used.
+Format conversion is "lazy" - it is deferred until actually
+needed. This has implications for the JSONB format design:
+
+ 1. Numeric values are stored as text, not a numbers. The values are
+ a direct copy of the text JSON values from which they are derived.
+
+ 2. There are multiple element types depending on the details of value
+ formats. For example, INT is used for pure RFC-8259 integer
+ literals and INT5 exists for JSON5 extensions such as hexadecimal
+ notation. FLOAT is used for pure RFC-8259 floating point literals
+ and FLOAT5 is used for JSON5 extensions. There are four different
+ representations of strings, depending on where the string came from
+ and how special characters within the string are escaped.
+
+A second goal of JSONB is that it should be capable of serving as the
+"parse tree" for JSON when a JSON value is being processed by the
+various [JSON SQL functions] built into SQLite. Before JSONB was
+developed, operations such [json_replace()] and [json_patch()]
+and similar worked in three stages:
+
+
+ 1. Translate the text JSON into a internal format that is
+ easier to scan and edit.
+ 2. Perform the requested operation on the JSON.
+ 3. Translate the internal format back into text.
+
+JSONB seeks to serve as the internal format directly - bypassing
+the first and third stages of that process. Since most of the CPU
+cycles are spent on the first and third stages, that suggests that
+JSONB processing will be much faster than text JSON processing.
+
+So when processing JSONB, only the second stage of the three-stage
+process is required. But when processing text JSON, it is still necessary
+to do stages one and three. If JSONB is to be used as the internal
+binary representation, this is yet another reason to store numeric
+values as text. Storing numbers as text minimizes the amount of
+conversion work needed for stages one and three. This is also why
+there are four different representations of text in JSONB. Different
+text representations are used for text coming from different sources
+(RFC-8259 JSON, JSON5, or SQL string values) and conversions only
+happen if and when they are actually needed.
+
+### 3.4 Valid JSONB BLOBs
+
+A valid JSONB BLOB consists of a single JSON element. The element must
+exactly fill the BLOB. This one element is often a JSON object or array
+and those usually contain additional elements as its payload, but the
+element can be a primite value such a string, number, boolean, or null.
+
+When the built-in JSON functions are attempting to determine if a BLOB
+argument is a JSONB or just a random BLOB, they look at the header of
+the outer element to see that it is well-formed and that the element
+completely fills the BLOB. If these conditions are met, then the BLOB
+is accepted as a JSONB value.
Index: doc/lemon.html
==================================================================
--- doc/lemon.html
+++ doc/lemon.html
@@ -681,10 +681,11 @@
When the generated parser has the choice of matching an input against
the wildcard token and some other token, the other token is always used.
The wildcard token is only matched if there are no alternatives.
+
+
4.4.26 The %realloc and %free directives
+
+
The %realloc and %free directives defines function
+that allocate and free heap memory. The signatures of these functions
+should be the same as the realloc() and free() functions from the standard
+C library.
+
+
If both of these functions are defined
+then these functions are used to allocate and free
+memory for supplemental parser stack space, if the initial
+parse stack space is exceeded. The initial parser stack size
+is specified by either %stack_size or the
+-DYYSTACKDEPTH compile-time flag.
+
5.0 Error Processing
After extensive experimentation over several years, it has been
discovered that the error recovery strategy used by yacc is about
@@ -1221,10 +1238,11 @@
%parse_failure routine
is invoked and the parser resets itself to its start state, ready
to begin parsing a new file. This is what will happen at the very
first syntax error, of course, if there are no instances of the
"error" non-terminal in your grammar.
+
6.0 History of Lemon
Lemon was originally written by Richard Hipp sometime in the late
Index: doc/testrunner.md
==================================================================
--- doc/testrunner.md
+++ doc/testrunner.md
@@ -1,9 +1,29 @@
# The testrunner.tcl Script
+
+
+
# 1. Overview
testrunner.tcl is a Tcl script used to run multiple SQLite tests using
multiple jobs. It supports the following types of tests:
@@ -42,10 +62,11 @@
Sometimes testrunner.tcl uses the [testfixture] binary that it is run with
to run tests (see "Binary Tests" below). Sometimes it builds testfixture and
other binaries in specific configurations to test (see "Source Tests").
+
# 2. Binary Tests
The commands described in this section all run various combinations of the Tcl
test scripts using the [testfixture] binary used to run the testrunner.tcl
script (i.e. they do not invoke the compiler to build new binaries, or the
@@ -59,10 +80,11 @@
perhaps with various options.
The following sub-sections describe the various options that can be
passed to testrunner.tcl to test binary testfixture builds.
+
## 2.1. Organization of Tcl Tests
Tcl tests are stored in files that match the pattern *\*.test*. They are
found in both the $TOP/test/ directory, and in the various sub-directories
of the $TOP/ext/ directory of the source tree. Not all *\*.test* files
@@ -89,10 +111,11 @@
Running **all** tests is to run all tests in the full test set, plus a dozen
or so permutations. The specific permutations that are run as part of "all"
are defined in file *testrunner_data.tcl*.
+
## 2.2. Commands to Run Tests
To run the "veryquick" test set, use either of the following:
```
@@ -111,10 +134,16 @@
```
./testfixture $TESTDIR/testrunner.tcl fts5%
./testfixture $TESTDIR/testrunner.tcl 'fts5*'
```
+
+Strictly speaking, for a test to be run the pattern must match the script
+filename, not including the directory, using the rules of Tcl's
+\[string match\] command. Except that before the matching is done, any "%"
+characters specified as part of the pattern are transformed to "\*".
+
To run "all" tests (full + permutations):
```
./testfixture $TESTDIR/testrunner.tcl all
@@ -139,10 +168,11 @@
./testfixture $TESTDIR/testrunner.tcl $PERMUTATION $PATH_TO_SCRIPT
```
TODO: An example instead of "$PERMUTATION" and $PATH\_TO\_SCRIPT?
+
# 3. Source Code Tests
The commands described in this section invoke the C compiler to build
binaries from the source tree, then use those binaries to run Tcl and
other tests. The advantages of this are that:
@@ -157,11 +187,12 @@
either a *testfixture* (or testfixture.exe) build, or with any other Tcl
shell that supports SQLite 3.31.1 or newer via "package require sqlite3".
TODO: ./configure + Makefile.msc build systems.
-## Commands to Run SQLite Tests
+
+## 3.1. Commands to Run SQLite Tests
The **mdevtest** command is equivalent to running the veryquick tests and
the [make fuzztest] target once for each of two --enable-all builds - one
with debugging enabled and one without:
@@ -199,11 +230,22 @@
```
tclsh $TESTDIR/testrunner.tcl release
```
-## Running ZipVFS Tests
+As with source code tests, one or more patterns
+may be appended to any of the above commands (mdevtest, sdevtest or release).
+In that case only Tcl tests (no fuzz or other tests) that match the specified
+pattern are run. For example, to run the just the Tcl rtree tests in all
+builds and configurations supported by "release":
+
+```
+ tclsh $TESTDIR/testrunner.tcl release rtree%
+```
+
+
+## 3.2. Running ZipVFS Tests
testrunner.tcl can build a zipvfs-enabled testfixture and use it to run
tests from the Zipvfs project with the following command:
```
@@ -215,11 +257,12 @@
```
tclsh $TESTDIR/testrunner.tcl --zipvfs $PATH_TO_ZIPVFS mdevtest
```
-## Investigating Source Code Test Failures
+
+## 3.3. Investigating Source Code Test Failures
Investigating a test failure that occurs during source code testing is a
two step process:
1. Recreating the build configuration in which the test failed, and
@@ -242,13 +285,35 @@
The generated bash or \*.bat file script accepts a single argument - a makefile
target to build. This may be used either to run a [make] command test directly,
or else to build a testfixture (or testfixture.exe) binary with which to
run a Tcl test script, as described above.
+
+# 4. Extra testrunner.tcl Options
+
+The testrunner.tcl script options in this section may be used with both source
+code and binary tests.
+
+The **--buildonly** option instructs testrunner.tcl just to build the binaries
+required by a test, not to run any actual tests. For example:
+
+```
+ # Build binaries required by release test.
+ tclsh $TESTDIR/testrunner.tcl --buildonly release"
+```
+
+The **--dryrun** option prevents testrunner.tcl from building any binaries
+or running any tests. Instead, it just writes the shell commands that it
+would normally execute into the testrunner.log file. Example:
+```
+ # Log the shell commmands that make up the mdevtest test.
+ tclsh $TESTDIR/testrunner.tcl --dryrun mdevtest"
+```
-# 4. Controlling CPU Core Utilization
+
+# 5. Controlling CPU Core Utilization
When running either binary or source code tests, testrunner.tcl reports the
number of jobs it intends to use to stdout. e.g.
```
@@ -272,13 +337,8 @@
testrunner.log and testrunner.db files:
```
$ ./testfixture $TESTDIR/testrunner.tcl njob $NEW_NUMBER_OF_JOBS
```
-
-
-
-
-
Index: ext/consio/console_io.c
==================================================================
--- ext/consio/console_io.c
+++ ext/consio/console_io.c
@@ -22,13 +22,18 @@
# include
# include
# include
# include
# include
-# include "console_io.h"
# include "sqlite3.h"
#endif
+#ifndef HAVE_CONSOLE_IO_H
+# include "console_io.h"
+#endif
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+#endif
#ifndef SQLITE_CIO_NO_TRANSLATE
# if (defined(_WIN32) || defined(WIN32)) && !SQLITE_OS_WINRT
# ifndef SHELL_NO_SYSINC
# include
@@ -122,10 +127,14 @@
ppst->pf = pf;
ppst->reachesConsole = ( (short)isatty(fileno(pf)) );
return ppst->reachesConsole;
# endif
}
+
+# ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+# define ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
+# endif
# if CIO_WIN_WC_XLATE
/* Define console modes for use with the Windows Console API. */
# define SHELL_CONI_MODE \
(ENABLE_ECHO_INPUT | ENABLE_INSERT_MODE | ENABLE_LINE_INPUT | 0x80 \
@@ -547,32 +556,31 @@
return z;
}
#endif /*!(defined(SQLITE_CIO_NO_UTF8SCAN)&&defined(SQLITE_CIO_NO_TRANSLATE))*/
#ifndef SQLITE_CIO_NO_TRANSLATE
-
-#ifdef CONSIO_SPUTB
+# ifdef CONSIO_SPUTB
SQLITE_INTERNAL_LINKAGE int
fPutbUtf8(FILE *pfO, const char *cBuf, int nAccept){
assert(pfO!=0);
-# if CIO_WIN_WC_XLATE
+# if CIO_WIN_WC_XLATE
PerStreamTags pst = PST_INITIALIZER; /* for unknown streams */
PerStreamTags *ppst = getEmitStreamInfo(0, &pst, &pfO);
if( pstReachesConsole(ppst) ){
int rv;
maybeSetupAsConsole(ppst, 1);
rv = conZstrEmit(ppst, cBuf, nAccept);
if( 0 == isKnownWritable(ppst->pf) ) restoreConsoleArb(ppst);
return rv;
}else {
-# endif
+# endif
return (int)fwrite(cBuf, 1, nAccept, pfO);
-# if CIO_WIN_WC_XLATE
+# if CIO_WIN_WC_XLATE
}
-# endif
+# endif
}
-#endif /* defined(CONSIO_SPUTB) */
+# endif
SQLITE_INTERNAL_LINKAGE int
oPutbUtf8(const char *cBuf, int nAccept){
FILE *pfOut;
PerStreamTags pst = PST_INITIALIZER; /* for unknown streams */
@@ -673,7 +681,11 @@
# if CIO_WIN_WC_XLATE
}
# endif
}
#endif /* !defined(SQLITE_CIO_NO_TRANSLATE) */
+
+#if defined(_MSC_VER)
+# pragma warning(default : 4204)
+#endif
#undef SHELL_INVALID_FILE_PTR
Index: ext/consio/console_io.h
==================================================================
--- ext/consio/console_io.h
+++ ext/consio/console_io.h
@@ -26,11 +26,11 @@
** When this .h file and its companion .c are directly incorporated into
** a source conglomeration (such as shell.c), the preprocessor symbol
** CIO_WIN_WC_XLATE is defined as 0 or 1, reflecting whether console I/O
** translation for Windows is effected for the build.
*/
-
+#define HAVE_CONSOLE_IO_H 1
#ifndef SQLITE_INTERNAL_LINKAGE
# define SQLITE_INTERNAL_LINKAGE extern /* external to translation unit */
# include
#else
# define SHELL_NO_SYSINC /* Better yet, modify mkshellc.tcl for this. */
@@ -164,12 +164,12 @@
** Returns the number of accepted char values.
*/
#ifdef CONSIO_SPUTB
SQLITE_INTERNAL_LINKAGE int
fPutbUtf8(FILE *pfOut, const char *cBuf, int nAccept);
-#endif
/* Like fPutbUtf8 except stream is always the designated output. */
+#endif
SQLITE_INTERNAL_LINKAGE int
oPutbUtf8(const char *cBuf, int nAccept);
/* Like fPutbUtf8 except stream is always the designated error. */
#ifdef CONSIO_EPUTB
SQLITE_INTERNAL_LINKAGE int
Index: ext/fts3/fts3.c
==================================================================
--- ext/fts3/fts3.c
+++ ext/fts3/fts3.c
@@ -4004,43 +4004,36 @@
/*
** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual
** table.
*/
-static int fts3Integrity(
+static int fts3IntegrityMethod(
sqlite3_vtab *pVtab, /* The virtual table to be checked */
const char *zSchema, /* Name of schema in which pVtab lives */
const char *zTabname, /* Name of the pVTab table */
int isQuick, /* True if this is a quick_check */
char **pzErr /* Write error message here */
){
Fts3Table *p = (Fts3Table*)pVtab;
- char *zSql;
- int rc;
- char *zErr = 0;
+ int rc = SQLITE_OK;
+ int bOk = 0;
- assert( pzErr!=0 );
- assert( *pzErr==0 );
UNUSED_PARAMETER(isQuick);
- zSql = sqlite3_mprintf(
- "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');",
- zSchema, zTabname, zTabname);
- if( zSql==0 ){
- return SQLITE_NOMEM;
- }
- rc = sqlite3_exec(p->db, zSql, 0, 0, &zErr);
- sqlite3_free(zSql);
- if( (rc&0xff)==SQLITE_CORRUPT ){
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
+ assert( rc!=SQLITE_CORRUPT_VTAB );
+ if( rc==SQLITE_ERROR || (rc&0xFF)==SQLITE_CORRUPT ){
+ *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
+ " FTS%d table %s.%s: %s",
+ p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc));
+ if( *pzErr ) rc = SQLITE_OK;
+ }else if( rc==SQLITE_OK && bOk==0 ){
*pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s",
p->bFts4 ? 4 : 3, zSchema, zTabname);
- }else if( rc!=SQLITE_OK ){
- *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
- " FTS%d table %s.%s: %s",
- p->bFts4 ? 4 : 3, zSchema, zTabname, zErr);
+ if( *pzErr==0 ) rc = SQLITE_NOMEM;
}
- sqlite3_free(zErr);
- return SQLITE_OK;
+ sqlite3Fts3SegmentsClose(p);
+ return rc;
}
static const sqlite3_module fts3Module = {
@@ -4066,11 +4059,11 @@
/* xRename */ fts3RenameMethod,
/* xSavepoint */ fts3SavepointMethod,
/* xRelease */ fts3ReleaseMethod,
/* xRollbackTo */ fts3RollbackToMethod,
/* xShadowName */ fts3ShadowName,
- /* xIntegrity */ fts3Integrity,
+ /* xIntegrity */ fts3IntegrityMethod,
};
/*
** This function is registered as the module destructor (called when an
** FTS3 enabled database connection is closed). It frees the memory
Index: ext/fts3/fts3Int.h
==================================================================
--- ext/fts3/fts3Int.h
+++ ext/fts3/fts3Int.h
@@ -650,8 +650,10 @@
int sqlite3FtsUnicodeIsalnum(int);
int sqlite3FtsUnicodeIsdiacritic(int);
#endif
int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*);
+
+int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk);
#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */
#endif /* _FTSINT_H */
Index: ext/fts3/fts3_write.c
==================================================================
--- ext/fts3/fts3_write.c
+++ ext/fts3/fts3_write.c
@@ -5292,11 +5292,11 @@
** to false before returning.
**
** If an error occurs (e.g. an OOM or IO error), return an SQLite error
** code. The final value of *pbOk is undefined in this case.
*/
-static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
+int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){
int rc = SQLITE_OK; /* Return code */
u64 cksum1 = 0; /* Checksum based on FTS index contents */
u64 cksum2 = 0; /* Checksum based on %_content contents */
sqlite3_stmt *pAllLangid = 0; /* Statement to return all language-ids */
@@ -5370,11 +5370,16 @@
}
sqlite3_finalize(pStmt);
}
- *pbOk = (cksum1==cksum2);
+ if( rc==SQLITE_CORRUPT_VTAB ){
+ rc = SQLITE_OK;
+ *pbOk = 0;
+ }else{
+ *pbOk = (rc==SQLITE_OK && cksum1==cksum2);
+ }
return rc;
}
/*
** Run the integrity-check. If no error occurs and the current contents of
@@ -5410,11 +5415,11 @@
static int fts3DoIntegrityCheck(
Fts3Table *p /* FTS3 table handle */
){
int rc;
int bOk = 0;
- rc = fts3IntegrityCheck(p, &bOk);
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB;
return rc;
}
/*
Index: ext/fts5/extract_api_docs.tcl
==================================================================
--- ext/fts5/extract_api_docs.tcl
+++ ext/fts5/extract_api_docs.tcl
@@ -221,14 +221,16 @@
}
Fts5ExtensionApi {
set struct [get_fts5_struct $data "^struct Fts5ExtensionApi" "^.;"]
set map [list]
+ set lKey [list]
foreach {k v} [get_struct_members $data] {
if {[string match x* $k]==0} continue
- lappend map $k "$k"
+ lappend lKey $k
}
+ foreach k [lsort -decr $lKey] { lappend map $k "$k" }
output [string map $map $struct]
}
api {
get_api_docs $data
Index: ext/fts5/fts5.h
==================================================================
--- ext/fts5/fts5.h
+++ ext/fts5/fts5.h
@@ -86,23 +86,28 @@
**
** This function may be quite inefficient if used with an FTS5 table
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
** of (*pz) and (*pn) are undefined.
**
** xPhraseCount:
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
** the query within the current row. Return SQLITE_OK if successful, or
** an error code (i.e. SQLITE_NOMEM) if an error occurs.
@@ -114,16 +119,17 @@
**
** xInst:
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
**
** xRowid:
@@ -144,10 +150,14 @@
** row visited, the callback function passed as the fourth argument
** is invoked. The context and API objects passed to the callback
** function may be used to access the properties of each matched row.
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
+**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
** Otherwise, the error code is propagated upwards.
@@ -259,13 +269,46 @@
** significantly more efficient than those alternatives when used with
** "detail=column" tables.
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
- int iVersion; /* Currently always set to 2 */
+ int iVersion; /* Currently always set to 3 */
void *(*xUserData)(Fts5Context*);
int (*xColumnCount)(Fts5Context*);
int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
@@ -296,10 +339,17 @@
int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
** CUSTOM AUXILIARY FUNCTIONS
*************************************************************************/
Index: ext/fts5/fts5Int.h
==================================================================
--- ext/fts5/fts5Int.h
+++ ext/fts5/fts5Int.h
@@ -194,10 +194,11 @@
int eContent; /* An FTS5_CONTENT value */
int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */
char *zContent; /* content table */
char *zContentRowid; /* "content_rowid=" option value */
int bColumnsize; /* "columnsize=" option value (dflt==1) */
+ int bTokendata; /* "tokendata=" option value (dflt==0) */
int eDetail; /* FTS5_DETAIL_XXX value */
char *zContentExprlist;
Fts5Tokenizer *pTok;
fts5_tokenizer *pTokApi;
int bLock; /* True when table is preparing statement */
@@ -382,21 +383,23 @@
#define sqlite3Fts5IterEof(x) ((x)->bEof)
/*
** Values used as part of the flags argument passed to IndexQuery().
*/
-#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
-#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
-#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
-#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
+#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
+#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
+#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
+#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
/* The following are used internally by the fts5_index.c module. They are
** defined here only to make it easier to avoid clashes with the flags
** above. */
-#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
-#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
-#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
+#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
+#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080
+#define FTS5INDEX_QUERY_SCANONETERM 0x0100
/*
** Create/destroy an Fts5Index object.
*/
int sqlite3Fts5IndexOpen(Fts5Config *pConfig, int bCreate, Fts5Index**, char**);
@@ -461,10 +464,14 @@
int sqlite3Fts5IterNextScan(Fts5IndexIter*);
void *sqlite3Fts5StructureRef(Fts5Index*);
void sqlite3Fts5StructureRelease(void*);
int sqlite3Fts5StructureTest(Fts5Index*, void*);
+/*
+** Used by xInstToken():
+*/
+int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*);
/*
** Insert or remove data to or from the index. Each time a document is
** added to or removed from the index, this function is called one or more
** times.
@@ -537,10 +544,17 @@
int sqlite3Fts5IndexLoadConfig(Fts5Index *p);
int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin);
int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid);
+
+void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*);
+
+/* Used to populate hash tables for xInstToken in detail=none/column mode. */
+int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff
+);
/*
** End of interface to code in fts5_index.c.
**************************************************************************/
@@ -643,10 +657,11 @@
);
void sqlite3Fts5HashScanNext(Fts5Hash*);
int sqlite3Fts5HashScanEof(Fts5Hash*);
void sqlite3Fts5HashScanEntry(Fts5Hash *,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
);
@@ -768,10 +783,14 @@
void sqlite3Fts5ExprCheckPoslists(Fts5Expr*, i64);
int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**);
int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *);
+
+int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*);
+int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*);
+void sqlite3Fts5ExprClearTokens(Fts5Expr*);
/*******************************************
** The fts5_expr.c API above this point is used by the other hand-written
** C code in this module. The interfaces below this point are called by
** the parser code in fts5parse.y. */
Index: ext/fts5/fts5_aux.c
==================================================================
--- ext/fts5/fts5_aux.c
+++ ext/fts5/fts5_aux.c
@@ -209,10 +209,18 @@
rc = fts5CInstIterNext(&p->iter);
}
}
if( iPos==p->iRangeEnd ){
+ if( p->bOpen ){
+ if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
+ }
+ fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->bOpen = 0;
+ }
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
p->iOff = iEndOff;
}
return rc;
@@ -242,12 +250,14 @@
memset(&ctx, 0, sizeof(HighlightContext));
ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]);
ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
ctx.iRangeEnd = -1;
rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn);
-
- if( ctx.zIn ){
+ if( rc==SQLITE_RANGE ){
+ sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC);
+ rc = SQLITE_OK;
+ }else if( ctx.zIn ){
if( rc==SQLITE_OK ){
rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter);
}
if( rc==SQLITE_OK ){
Index: ext/fts5/fts5_buffer.c
==================================================================
--- ext/fts5/fts5_buffer.c
+++ ext/fts5/fts5_buffer.c
@@ -66,10 +66,11 @@
u32 nData,
const u8 *pData
){
if( nData ){
if( fts5BufferGrow(pRc, pBuf, nData) ) return;
+ assert( pBuf->p!=0 );
memcpy(&pBuf->p[pBuf->n], pData, nData);
pBuf->n += nData;
}
}
@@ -167,17 +168,19 @@
const u8 *a, int n, /* Buffer containing poslist */
int *pi, /* IN/OUT: Offset within a[] */
i64 *piOff /* IN/OUT: Current offset */
){
int i = *pi;
+ assert( a!=0 || i==0 );
if( i>=n ){
/* EOF */
*piOff = -1;
return 1;
}else{
i64 iOff = *piOff;
u32 iVal;
+ assert( a!=0 );
fts5FastGetVarint32(a, i, iVal);
if( iVal<=1 ){
if( iVal==0 ){
*pi = i;
return 0;
Index: ext/fts5/fts5_config.c
==================================================================
--- ext/fts5/fts5_config.c
+++ ext/fts5/fts5_config.c
@@ -395,10 +395,20 @@
if( (rc = fts5ConfigSetEnum(aDetail, zArg, &pConfig->eDetail)) ){
*pzErr = sqlite3_mprintf("malformed detail=... directive");
}
return rc;
}
+
+ if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed tokendata=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bTokendata = (zArg[0]=='1');
+ }
+ return rc;
+ }
*pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd);
return SQLITE_ERROR;
}
Index: ext/fts5/fts5_expr.c
==================================================================
--- ext/fts5/fts5_expr.c
+++ ext/fts5/fts5_expr.c
@@ -98,11 +98,13 @@
** or term prefix.
*/
struct Fts5ExprTerm {
u8 bPrefix; /* True for a prefix term */
u8 bFirst; /* True if token must be first in column */
- char *zTerm; /* nul-terminated term */
+ char *pTerm; /* Term data */
+ int nQueryTerm; /* Effective size of term in bytes */
+ int nFullTerm; /* Size of term in bytes incl. tokendata */
Fts5IndexIter *pIter; /* Iterator for this term */
Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */
};
/*
@@ -965,11 +967,11 @@
if( p->pIter ){
sqlite3Fts5IterClose(p->pIter);
p->pIter = 0;
}
rc = sqlite3Fts5IndexQuery(
- pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
+ pExpr->pIndex, p->pTerm, p->nQueryTerm,
(pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
(pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
pNear->pColset,
&p->pIter
);
@@ -1602,11 +1604,11 @@
int i;
for(i=0; inTerm; i++){
Fts5ExprTerm *pSyn;
Fts5ExprTerm *pNext;
Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
- sqlite3_free(pTerm->zTerm);
+ sqlite3_free(pTerm->pTerm);
sqlite3Fts5IterClose(pTerm->pIter);
for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){
pNext = pSyn->pSynonym;
sqlite3Fts5IterClose(pSyn->pIter);
fts5BufferFree((Fts5Buffer*)&pSyn[1]);
@@ -1700,10 +1702,11 @@
}
typedef struct TokenCtx TokenCtx;
struct TokenCtx {
Fts5ExprPhrase *pPhrase;
+ Fts5Config *pConfig;
int rc;
};
/*
** Callback for tokenizing terms used by ParseTerm().
@@ -1733,12 +1736,16 @@
pSyn = (Fts5ExprTerm*)sqlite3_malloc64(nByte);
if( pSyn==0 ){
rc = SQLITE_NOMEM;
}else{
memset(pSyn, 0, (size_t)nByte);
- pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
- memcpy(pSyn->zTerm, pToken, nToken);
+ pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
+ pSyn->nFullTerm = pSyn->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata ){
+ pSyn->nQueryTerm = (int)strlen(pSyn->pTerm);
+ }
+ memcpy(pSyn->pTerm, pToken, nToken);
pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym;
pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn;
}
}else{
Fts5ExprTerm *pTerm;
@@ -1759,11 +1766,15 @@
}
if( rc==SQLITE_OK ){
pTerm = &pPhrase->aTerm[pPhrase->nTerm++];
memset(pTerm, 0, sizeof(Fts5ExprTerm));
- pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->nFullTerm = pTerm->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){
+ pTerm->nQueryTerm = (int)strlen(pTerm->pTerm);
+ }
}
}
pCtx->rc = rc;
return rc;
@@ -1826,10 +1837,11 @@
int rc; /* Tokenize return code */
char *z = 0;
memset(&sCtx, 0, sizeof(TokenCtx));
sCtx.pPhrase = pAppend;
+ sCtx.pConfig = pConfig;
rc = fts5ParseStringFromToken(pToken, &z);
if( rc==SQLITE_OK ){
int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_PREFIX : 0);
int n;
@@ -1873,16 +1885,19 @@
Fts5Expr *pExpr,
int iPhrase,
Fts5Expr **ppNew
){
int rc = SQLITE_OK; /* Return code */
- Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
+ Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */
Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
- TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
-
- pOrig = pExpr->apExprPhrase[iPhrase];
- pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ rc = SQLITE_RANGE;
+ }else{
+ pOrig = pExpr->apExprPhrase[iPhrase];
+ pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ }
if( rc==SQLITE_OK ){
pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprPhrase*));
}
if( rc==SQLITE_OK ){
@@ -1891,11 +1906,11 @@
}
if( rc==SQLITE_OK ){
pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*));
}
- if( rc==SQLITE_OK ){
+ if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){
Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset;
if( pColsetOrig ){
sqlite3_int64 nByte;
Fts5Colset *pColset;
nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int);
@@ -1905,30 +1920,31 @@
}
pNew->pRoot->pNear->pColset = pColset;
}
}
- if( pOrig->nTerm ){
- int i; /* Used to iterate through phrase terms */
- for(i=0; rc==SQLITE_OK && inTerm; i++){
- int tflags = 0;
- Fts5ExprTerm *p;
- for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
- const char *zTerm = p->zTerm;
- rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
- 0, 0);
- tflags = FTS5_TOKEN_COLOCATED;
- }
- if( rc==SQLITE_OK ){
- sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
- sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
- }
- }
- }else{
- /* This happens when parsing a token or quoted phrase that contains
- ** no token characters at all. (e.g ... MATCH '""'). */
- sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
+ if( rc==SQLITE_OK ){
+ if( pOrig->nTerm ){
+ int i; /* Used to iterate through phrase terms */
+ sCtx.pConfig = pExpr->pConfig;
+ for(i=0; rc==SQLITE_OK && inTerm; i++){
+ int tflags = 0;
+ Fts5ExprTerm *p;
+ for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
+ rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0);
+ tflags = FTS5_TOKEN_COLOCATED;
+ }
+ if( rc==SQLITE_OK ){
+ sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ }
+ }
+ }else{
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
+ }
}
if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){
/* All the allocations succeeded. Put the expression object together. */
pNew->pIndex = pExpr->pIndex;
@@ -2294,15 +2310,17 @@
);
if( pPhrase ){
if( parseGrowPhraseArray(pParse) ){
fts5ExprPhraseFree(pPhrase);
}else{
+ Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii];
+ Fts5ExprTerm *pTo = &pPhrase->aTerm[0];
pParse->apPhrase[pParse->nPhrase++] = pPhrase;
pPhrase->nTerm = 1;
- pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup(
- &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1
- );
+ pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm);
+ pTo->nQueryTerm = p->nQueryTerm;
+ pTo->nFullTerm = p->nFullTerm;
pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING,
0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase)
);
}
}
@@ -2483,20 +2501,21 @@
Fts5ExprTerm *p;
char *zQuoted;
/* Determine the maximum amount of space required. */
for(p=pTerm; p; p=p->pSynonym){
- nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2;
+ nByte += pTerm->nQueryTerm * 2 + 3 + 2;
}
zQuoted = sqlite3_malloc64(nByte);
if( zQuoted ){
int i = 0;
for(p=pTerm; p; p=p->pSynonym){
- char *zIn = p->zTerm;
+ char *zIn = p->pTerm;
+ char *zEnd = &zIn[p->nQueryTerm];
zQuoted[i++] = '"';
- while( *zIn ){
+ while( zInpSynonym ) zQuoted[i++] = '|';
@@ -2570,12 +2589,14 @@
for(i=0; inPhrase; i++){
Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
zRet = fts5PrintfAppend(zRet, " {");
for(iTerm=0; zRet && iTermnTerm; iTerm++){
- char *zTerm = pPhrase->aTerm[iTerm].zTerm;
- zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm);
+ Fts5ExprTerm *p = &pPhrase->aTerm[iTerm];
+ zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ",
+ p->nQueryTerm, p->pTerm
+ );
if( pPhrase->aTerm[iTerm].bPrefix ){
zRet = fts5PrintfAppend(zRet, "*");
}
}
@@ -2971,10 +2992,21 @@
for(i=0; inCol; i++){
if( pColset->aiCol[i]==iCol ) return 1;
}
return 0;
}
+
+/*
+** pToken is a buffer nToken bytes in size that may or may not contain
+** an embedded 0x00 byte. If it does, return the number of bytes in
+** the buffer before the 0x00. If it does not, return nToken.
+*/
+static int fts5QueryTerm(const char *pToken, int nToken){
+ int ii;
+ for(ii=0; iipExpr;
int i;
+ int nQuery = nToken;
+ i64 iRowid = pExpr->pRoot->iRowid;
UNUSED_PARAM2(iUnused1, iUnused2);
- if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+ if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE;
+ if( pExpr->pConfig->bTokendata ){
+ nQuery = fts5QueryTerm(pToken, nQuery);
+ }
if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++;
for(i=0; inPhrase; i++){
- Fts5ExprTerm *pTerm;
+ Fts5ExprTerm *pT;
if( p->aPopulator[i].bOk==0 ) continue;
- for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){
- int nTerm = (int)strlen(pTerm->zTerm);
- if( (nTerm==nToken || (nTermbPrefix))
- && memcmp(pTerm->zTerm, pToken, nTerm)==0
+ for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){
+ if( (pT->nQueryTerm==nQuery || (pT->nQueryTermbPrefix))
+ && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0
){
int rc = sqlite3Fts5PoslistWriterAppend(
&pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff
);
+ if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){
+ int iCol = p->iOff>>32;
+ int iTokOff = p->iOff & 0x7FFFFFFF;
+ rc = sqlite3Fts5IndexIterWriteTokendata(
+ pT->pIter, pToken, nToken, iRowid, iCol, iTokOff
+ );
+ }
if( rc ) return rc;
break;
}
}
}
@@ -3133,5 +3176,82 @@
*pnCollist = 0;
}
return rc;
}
+
+/*
+** Does the work of the fts5_api.xQueryToken() API method.
+*/
+int sqlite3Fts5ExprQueryToken(
+ Fts5Expr *pExpr,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+
+ *ppOut = pPhrase->aTerm[iToken].pTerm;
+ *pnOut = pPhrase->aTerm[iToken].nFullTerm;
+ return SQLITE_OK;
+}
+
+/*
+** Does the work of the fts5_api.xInstToken() API method.
+*/
+int sqlite3Fts5ExprInstToken(
+ Fts5Expr *pExpr,
+ i64 iRowid,
+ int iPhrase,
+ int iCol,
+ int iOff,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+ Fts5ExprTerm *pTerm = 0;
+ int rc = SQLITE_OK;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+ pTerm = &pPhrase->aTerm[iToken];
+ if( pTerm->bPrefix==0 ){
+ if( pExpr->pConfig->bTokendata ){
+ rc = sqlite3Fts5IterToken(
+ pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut
+ );
+ }else{
+ *ppOut = pTerm->pTerm;
+ *pnOut = pTerm->nFullTerm;
+ }
+ }
+ return rc;
+}
+
+/*
+** Clear the token mappings for all Fts5IndexIter objects mannaged by
+** the expression passed as the only argument.
+*/
+void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){
+ int ii;
+ for(ii=0; iinPhrase; ii++){
+ Fts5ExprTerm *pT;
+ for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){
+ sqlite3Fts5IndexIterClearTokendata(pT->pIter);
+ }
+ }
+}
Index: ext/fts5/fts5_hash.c
==================================================================
--- ext/fts5/fts5_hash.c
+++ ext/fts5/fts5_hash.c
@@ -34,14 +34,19 @@
Fts5HashEntry **aSlot; /* Array of hash slots */
};
/*
** Each entry in the hash table is represented by an object of the
-** following type. Each object, its key (a nul-terminated string) and
-** its current data are stored in a single memory allocation. The
-** key immediately follows the object in memory. The position list
-** data immediately follows the key data in memory.
+** following type. Each object, its key, and its current data are stored
+** in a single memory allocation. The key immediately follows the object
+** in memory. The position list data immediately follows the key data
+** in memory.
+**
+** The key is Fts5HashEntry.nKey bytes in size. It consists of a single
+** byte identifying the index (either the main term index or a prefix-index),
+** followed by the term data. For example: "0token". There is no
+** nul-terminator - in this case nKey=6.
**
** The data that follows the key is in a similar, but not identical format
** to the doclist data stored in the database. It is:
**
** * Rowid, as a varint
@@ -172,12 +177,11 @@
for(i=0; inSlot; i++){
while( apOld[i] ){
unsigned int iHash;
Fts5HashEntry *p = apOld[i];
apOld[i] = p->pHashNext;
- iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p),
- (int)strlen(fts5EntryKey(p)));
+ iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey);
p->pHashNext = apNew[iHash];
apNew[iHash] = p;
}
}
@@ -257,11 +261,11 @@
/* Attempt to locate an existing hash entry */
iHash = fts5HashKey2(pHash->nSlot, (u8)bByte, (const u8*)pToken, nToken);
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
char *zKey = fts5EntryKey(p);
if( zKey[0]==bByte
- && p->nKey==nToken
+ && p->nKey==nToken+1
&& memcmp(&zKey[1], pToken, nToken)==0
){
break;
}
}
@@ -287,13 +291,13 @@
p->nAlloc = (int)nByte;
zKey = fts5EntryKey(p);
zKey[0] = bByte;
memcpy(&zKey[1], pToken, nToken);
assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) );
- p->nKey = nToken;
+ p->nKey = nToken+1;
zKey[nToken+1] = '\0';
- p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry);
+ p->nData = nToken+1 + sizeof(Fts5HashEntry);
p->pHashNext = pHash->aSlot[iHash];
pHash->aSlot[iHash] = p;
pHash->nEntry++;
/* Add the first rowid field to the hash-entry */
@@ -406,16 +410,21 @@
p2 = 0;
}else if( p2==0 ){
*ppOut = p1;
p1 = 0;
}else{
- int i = 0;
char *zKey1 = fts5EntryKey(p1);
char *zKey2 = fts5EntryKey(p2);
- while( zKey1[i]==zKey2[i] ) i++;
+ int nMin = MIN(p1->nKey, p2->nKey);
+
+ int cmp = memcmp(zKey1, zKey2, nMin);
+ if( cmp==0 ){
+ cmp = p1->nKey - p2->nKey;
+ }
+ assert( cmp!=0 );
- if( ((u8)zKey1[i])>((u8)zKey2[i]) ){
+ if( cmp>0 ){
/* p2 is smaller */
*ppOut = p2;
ppOut = &p2->pScanNext;
p2 = p2->pScanNext;
}else{
@@ -453,11 +462,11 @@
for(iSlot=0; iSlotnSlot; iSlot++){
Fts5HashEntry *pIter;
for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){
if( pTerm==0
- || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
+ || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
){
Fts5HashEntry *pEntry = pIter;
pEntry->pScanNext = 0;
for(i=0; ap[i]; i++){
pEntry = fts5HashEntryMerge(pEntry, ap[i]);
@@ -492,16 +501,15 @@
char *zKey = 0;
Fts5HashEntry *p;
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
zKey = fts5EntryKey(p);
- assert( p->nKey+1==(int)strlen(zKey) );
- if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break;
+ if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break;
}
if( p ){
- int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1;
+ int nHashPre = sizeof(Fts5HashEntry) + nTerm;
int nList = p->nData - nHashPre;
u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10));
if( pRet ){
Fts5HashEntry *pFaux = (Fts5HashEntry*)&pRet[nPre-nHashPre];
memcpy(&pRet[nPre], &((u8*)p)[nHashPre], nList);
@@ -558,22 +566,25 @@
}
void sqlite3Fts5HashScanEntry(
Fts5Hash *pHash,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
){
Fts5HashEntry *p;
if( (p = pHash->pScan) ){
char *zKey = fts5EntryKey(p);
- int nTerm = (int)strlen(zKey);
+ int nTerm = p->nKey;
fts5HashAddPoslistSize(pHash, p, 0);
*pzTerm = zKey;
- *ppDoclist = (const u8*)&zKey[nTerm+1];
- *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1);
+ *pnTerm = nTerm;
+ *ppDoclist = (const u8*)&zKey[nTerm];
+ *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm);
}else{
*pzTerm = 0;
+ *pnTerm = 0;
*ppDoclist = 0;
*pnDoclist = 0;
}
}
Index: ext/fts5/fts5_index.c
==================================================================
--- ext/fts5/fts5_index.c
+++ ext/fts5/fts5_index.c
@@ -321,10 +321,13 @@
typedef struct Fts5DoclistIter Fts5DoclistIter;
typedef struct Fts5SegWriter Fts5SegWriter;
typedef struct Fts5Structure Fts5Structure;
typedef struct Fts5StructureLevel Fts5StructureLevel;
typedef struct Fts5StructureSegment Fts5StructureSegment;
+typedef struct Fts5TokenDataIter Fts5TokenDataIter;
+typedef struct Fts5TokenDataMap Fts5TokenDataMap;
+typedef struct Fts5TombstoneArray Fts5TombstoneArray;
struct Fts5Data {
u8 *p; /* Pointer to buffer containing record */
int nn; /* Size of record in bytes */
int szLeaf; /* Size of leaf without page-index */
@@ -364,10 +367,11 @@
sqlite3_stmt *pWriter; /* "INSERT ... %_data VALUES(?,?)" */
sqlite3_stmt *pDeleter; /* "DELETE FROM %_data ... id>=? AND id<=?" */
sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */
sqlite3_stmt *pIdxSelect;
+ sqlite3_stmt *pIdxNextSelect;
int nRead; /* Total number of blocks read */
sqlite3_stmt *pDeleteFromIdx;
sqlite3_stmt *pDataVersion;
@@ -517,12 +521,11 @@
int flags; /* Mask of configuration flags */
int iLeafPgno; /* Current leaf page number */
Fts5Data *pLeaf; /* Current leaf data */
Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */
i64 iLeafOffset; /* Byte offset within current leaf */
- Fts5Data **apTombstone; /* Array of tombstone pages */
- int nTombstone;
+ Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */
/* Next method */
void (*xNext)(Fts5Index*, Fts5SegIter*, int*);
/* The page and offset from which the current term was read. The offset
@@ -544,10 +547,19 @@
Fts5Buffer term; /* Current term */
i64 iRowid; /* Current rowid */
int nPos; /* Number of bytes in current position list */
u8 bDel; /* True if the delete flag is set */
};
+
+/*
+** Array of tombstone pages. Reference counted.
+*/
+struct Fts5TombstoneArray {
+ int nRef; /* Number of pointers to this object */
+ int nTombstone;
+ Fts5Data *apTombstone[1]; /* Array of tombstone pages */
+};
/*
** Argument is a pointer to an Fts5Data structure that contains a
** leaf page.
*/
@@ -589,13 +601,20 @@
** the smallest key overall. aFirst[0] is unused.
**
** poslist:
** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered.
** There is no way to tell if this is populated or not.
+**
+** pColset:
+** If not NULL, points to an object containing a set of column indices.
+** Only matches that occur in one of these columns will be returned.
+** The Fts5Iter does not own the Fts5Colset object, and so it is not
+** freed when the iterator is closed - it is owned by the upper layer.
*/
struct Fts5Iter {
Fts5IndexIter base; /* Base class containing output vars */
+ Fts5TokenDataIter *pTokenDataIter;
Fts5Index *pIndex; /* Index that owns this iterator */
Fts5Buffer poslist; /* Buffer containing current poslist */
Fts5Colset *pColset; /* Restrict matches to these columns */
@@ -608,11 +627,10 @@
i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */
Fts5CResult *aFirst; /* Current merge state (see above) */
Fts5SegIter aSeg[1]; /* Array of segment iterators */
};
-
/*
** An instance of the following type is used to iterate through the contents
** of a doclist-index record.
**
@@ -1527,13 +1545,13 @@
for(iOff=pLvl->iOff; iOffnn; iOff++){
if( pData->p[iOff] ) break;
}
if( iOffnn ){
- i64 iVal;
+ u64 iVal;
pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1;
- iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal);
+ iOff += fts5GetVarint(&pData->p[iOff], &iVal);
pLvl->iRowid += iVal;
pLvl->iOff = iOff;
}else{
pLvl->bEof = 1;
}
@@ -1908,22 +1926,24 @@
pIter->xNext = fts5SegIterNext;
}
}
/*
-** Allocate a tombstone hash page array (pIter->apTombstone) for the
-** iterator passed as the second argument. If an OOM error occurs, leave
-** an error in the Fts5Index object.
+** Allocate a tombstone hash page array object (pIter->pTombArray) for
+** the iterator passed as the second argument. If an OOM error occurs,
+** leave an error in the Fts5Index object.
*/
static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){
const int nTomb = pIter->pSeg->nPgTombstone;
if( nTomb>0 ){
- Fts5Data **apTomb = 0;
- apTomb = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data)*nTomb);
- if( apTomb ){
- pIter->apTombstone = apTomb;
- pIter->nTombstone = nTomb;
+ int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray);
+ Fts5TombstoneArray *pNew;
+ pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte);
+ if( pNew ){
+ pNew->nTombstone = nTomb;
+ pNew->nRef = 1;
+ pIter->pTombArray = pNew;
}
}
}
/*
@@ -2176,19 +2196,20 @@
pIter->iLeafOffset = iOff;
fts5SegIterLoadTerm(p, pIter, nKeep);
}else{
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList;
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
if( pList==0 ) goto next_none_eof;
pIter->pLeaf->p = (u8*)pList;
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList;
- sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
}
if( pbNewTerm ) *pbNewTerm = 1;
}else{
@@ -2250,26 +2271,26 @@
pIter->iLeafOffset = iOff;
}else if( pIter->pSeg==0 ){
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList = 0;
assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm );
if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
}
if( pList==0 ){
fts5DataRelease(pIter->pLeaf);
pIter->pLeaf = 0;
}else{
pIter->pLeaf->p = (u8*)pList;
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList+1;
- sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm),
- (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
*pbNewTerm = 1;
}
}else{
iOff = 0;
@@ -2651,11 +2672,11 @@
if( pIter->pLeaf ){
fts5LeafSeek(p, bGe, pIter, pTerm, nTerm);
}
- if( p->rc==SQLITE_OK && bGe==0 ){
+ if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){
pIter->flags |= FTS5_SEGITER_ONETERM;
if( pIter->pLeaf ){
if( flags & FTS5INDEX_QUERY_DESC ){
pIter->flags |= FTS5_SEGITER_REVERSE;
}
@@ -2667,11 +2688,13 @@
}
}
}
fts5SegIterSetNext(p, pIter);
- fts5SegIterAllocTombstone(p, pIter);
+ if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){
+ fts5SegIterAllocTombstone(p, pIter);
+ }
/* Either:
**
** 1) an error has occurred, or
** 2) the iterator points to EOF, or
@@ -2684,10 +2707,83 @@
|| fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)==0 /* 3 */
|| (bGe && fts5BufferCompareBlob(&pIter->term, pTerm, nTerm)>0) /* 4 */
);
}
+
+/*
+** SQL used by fts5SegIterNextInit() to find the page to open.
+*/
+static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){
+ if( p->pIdxNextSelect==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf(
+ "SELECT pgno FROM '%q'.'%q_idx' WHERE "
+ "segid=? AND term>? ORDER BY term ASC LIMIT 1",
+ pConfig->zDb, pConfig->zName
+ ));
+
+ }
+ return p->pIdxNextSelect;
+}
+
+/*
+** This is similar to fts5SegIterSeekInit(), except that it initializes
+** the segment iterator to point to the first term following the page
+** with pToken/nToken on it.
+*/
+static void fts5SegIterNextInit(
+ Fts5Index *p,
+ const char *pTerm, int nTerm,
+ Fts5StructureSegment *pSeg, /* Description of segment */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ int iPg = -1; /* Page of segment to open */
+ int bDlidx = 0;
+ sqlite3_stmt *pSel = 0; /* SELECT to find iPg */
+
+ pSel = fts5IdxNextStmt(p);
+ if( pSel ){
+ assert( p->rc==SQLITE_OK );
+ sqlite3_bind_int(pSel, 1, pSeg->iSegid);
+ sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC);
+
+ if( sqlite3_step(pSel)==SQLITE_ROW ){
+ i64 val = sqlite3_column_int64(pSel, 0);
+ iPg = (int)(val>>1);
+ bDlidx = (val & 0x0001);
+ }
+ p->rc = sqlite3_reset(pSel);
+ sqlite3_bind_null(pSel, 2);
+ if( p->rc ) return;
+ }
+
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->pSeg = pSeg;
+ pIter->flags |= FTS5_SEGITER_ONETERM;
+ if( iPg>=0 ){
+ pIter->iLeafPgno = iPg - 1;
+ fts5SegIterNextPage(p, pIter);
+ fts5SegIterSetNext(p, pIter);
+ }
+ if( pIter->pLeaf ){
+ const u8 *a = pIter->pLeaf->p;
+ int iTermOff = 0;
+
+ pIter->iPgidxOff = pIter->pLeaf->szLeaf;
+ pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff);
+ pIter->iLeafOffset = iTermOff;
+ fts5SegIterLoadTerm(p, pIter, 0);
+ fts5SegIterLoadNPos(p, pIter);
+ if( bDlidx ) fts5SegIterLoadDlidx(p, pIter);
+
+ assert( p->rc!=SQLITE_OK ||
+ fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0
+ );
+ }
+}
+
/*
** Initialize the object pIter to point to term pTerm/nTerm within the
** in-memory hash table. If there is no such term in the hash-table, the
** iterator is set to EOF.
**
@@ -2710,12 +2806,11 @@
if( pTerm==0 || (flags & FTS5INDEX_QUERY_SCAN) ){
const u8 *pList = 0;
p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm);
- sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList);
- n = (z ? (int)strlen((const char*)z) : 0);
+ sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList);
if( pList ){
pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data));
if( pLeaf ){
pLeaf->p = (u8*)pList;
}
@@ -2769,19 +2864,36 @@
fts5DataRelease(ap[ii]);
}
sqlite3_free(ap);
}
}
+
+/*
+** Decrement the ref-count of the object passed as the only argument. If it
+** reaches 0, free it and its contents.
+*/
+static void fts5TombstoneArrayDelete(Fts5TombstoneArray *p){
+ if( p ){
+ p->nRef--;
+ if( p->nRef<=0 ){
+ int ii;
+ for(ii=0; iinTombstone; ii++){
+ fts5DataRelease(p->apTombstone[ii]);
+ }
+ sqlite3_free(p);
+ }
+ }
+}
/*
** Zero the iterator passed as the only argument.
*/
static void fts5SegIterClear(Fts5SegIter *pIter){
fts5BufferFree(&pIter->term);
fts5DataRelease(pIter->pLeaf);
fts5DataRelease(pIter->pNextLeaf);
- fts5IndexFreeArray(pIter->apTombstone, pIter->nTombstone);
+ fts5TombstoneArrayDelete(pIter->pTombArray);
fts5DlidxIterFree(pIter->pDlidx);
sqlite3_free(pIter->aRowidOffset);
memset(pIter, 0, sizeof(Fts5SegIter));
}
@@ -3022,11 +3134,10 @@
if( bRev!=0 && pIter->iRowid<=iMatch ) break;
bMove = 1;
}while( p->rc==SQLITE_OK );
}
-
/*
** Free the iterator object passed as the second argument.
*/
static void fts5MultiIterFree(Fts5Iter *pIter){
if( pIter ){
@@ -3167,28 +3278,29 @@
** if there is no tombstone or if the iterator is already at EOF.
*/
static int fts5MultiIterIsDeleted(Fts5Iter *pIter){
int iFirst = pIter->aFirst[1].iFirst;
Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
+ Fts5TombstoneArray *pArray = pSeg->pTombArray;
- if( pSeg->pLeaf && pSeg->nTombstone ){
+ if( pSeg->pLeaf && pArray ){
/* Figure out which page the rowid might be present on. */
- int iPg = ((u64)pSeg->iRowid) % pSeg->nTombstone;
+ int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone;
assert( iPg>=0 );
/* If tombstone hash page iPg has not yet been loaded from the
** database, load it now. */
- if( pSeg->apTombstone[iPg]==0 ){
- pSeg->apTombstone[iPg] = fts5DataRead(pIter->pIndex,
+ if( pArray->apTombstone[iPg]==0 ){
+ pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex,
FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg)
);
- if( pSeg->apTombstone[iPg]==0 ) return 0;
+ if( pArray->apTombstone[iPg]==0 ) return 0;
}
return fts5IndexTombstoneQuery(
- pSeg->apTombstone[iPg],
- pSeg->nTombstone,
+ pArray->apTombstone[iPg],
+ pArray->nTombstone,
pSeg->iRowid
);
}
return 0;
@@ -3723,10 +3835,36 @@
}
}
}
}
+/*
+** All the component segment-iterators of pIter have been set up. This
+** functions finishes setup for iterator pIter itself.
+*/
+static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){
+ int iIter;
+ for(iIter=pIter->nSeg-1; iIter>0; iIter--){
+ int iEq;
+ if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){
+ Fts5SegIter *pSeg = &pIter->aSeg[iEq];
+ if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
+ fts5MultiIterAdvanced(p, pIter, iEq, iIter);
+ }
+ }
+ fts5MultiIterSetEof(pIter);
+ fts5AssertMultiIterSetup(p, pIter);
+
+ if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter))
+ || fts5MultiIterIsDeleted(pIter)
+ ){
+ fts5MultiIterNext(p, pIter, 0, 0);
+ }else if( pIter->base.bEof==0 ){
+ Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
+ pIter->xSetOutputs(pIter, pSeg);
+ }
+}
/*
** Allocate a new Fts5Iter object.
**
** The new object will be used to iterate through data in structure pStruct.
@@ -3804,35 +3942,16 @@
}
}
assert( iIter==nSeg );
}
- /* If the above was successful, each component iterators now points
+ /* If the above was successful, each component iterator now points
** to the first entry in its segment. In this case initialize the
** aFirst[] array. Or, if an error has occurred, free the iterator
** object and set the output variable to NULL. */
if( p->rc==SQLITE_OK ){
- for(iIter=pNew->nSeg-1; iIter>0; iIter--){
- int iEq;
- if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){
- Fts5SegIter *pSeg = &pNew->aSeg[iEq];
- if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
- fts5MultiIterAdvanced(p, pNew, iEq, iIter);
- }
- }
- fts5MultiIterSetEof(pNew);
- fts5AssertMultiIterSetup(p, pNew);
-
- if( (pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew))
- || fts5MultiIterIsDeleted(pNew)
- ){
- fts5MultiIterNext(p, pNew, 0, 0);
- }else if( pNew->base.bEof==0 ){
- Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst];
- pNew->xSetOutputs(pNew, pSeg);
- }
-
+ fts5MultiIterFinishSetup(p, pNew);
}else{
fts5MultiIterFree(pNew);
*ppOut = 0;
}
@@ -3853,11 +3972,10 @@
){
Fts5Iter *pNew;
pNew = fts5MultiIterAlloc(p, 2);
if( pNew ){
Fts5SegIter *pIter = &pNew->aSeg[1];
-
pIter->flags = FTS5_SEGITER_ONETERM;
if( pData->szLeaf>0 ){
pIter->pLeaf = pData;
pIter->iLeafOffset = fts5GetVarint(pData->p, (u64*)&pIter->iRowid);
pIter->iEndofDoclist = pData->nn;
@@ -4217,11 +4335,11 @@
}else{
bDone = 1;
}
if( pDlidx->bPrevValid ){
- iVal = iRowid - pDlidx->iPrev;
+ iVal = (u64)iRowid - (u64)pDlidx->iPrev;
}else{
i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno);
assert( pDlidx->buf.n==0 );
sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, !bDone);
sqlite3Fts5BufferAppendVarint(&p->rc, &pDlidx->buf, iPgno);
@@ -5340,14 +5458,14 @@
*/
static void fts5FlushSecureDelete(
Fts5Index *p,
Fts5Structure *pStruct,
const char *zTerm,
+ int nTerm,
i64 iRowid
){
const int f = FTS5INDEX_QUERY_SKIPHASH;
- int nTerm = (int)strlen(zTerm);
Fts5Iter *pIter = 0; /* Used to find term instance */
fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter);
if( fts5MultiIterEof(p, pIter)==0 ){
i64 iThis = fts5MultiIterRowid(pIter);
@@ -5417,12 +5535,11 @@
int nTerm; /* Size of zTerm in bytes */
const u8 *pDoclist; /* Pointer to doclist for this term */
int nDoclist; /* Size of doclist in bytes */
/* Get the term and doclist for this entry. */
- sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
- nTerm = (int)strlen(zTerm);
+ sqlite3Fts5HashScanEntry(pHash, &zTerm, &nTerm, &pDoclist, &nDoclist);
if( bSecureDelete==0 ){
fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
if( p->rc!=SQLITE_OK ) break;
assert( writer.bFirstRowidInPage==0 );
}
@@ -5448,21 +5565,21 @@
** in fact a delete, then edit the existing segments directly
** using fts5FlushSecureDelete(). */
if( bSecureDelete ){
if( eDetail==FTS5_DETAIL_NONE ){
if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
iOff++;
continue;
}
}
@@ -6084,11 +6201,11 @@
int bDesc, /* True for "ORDER BY rowid DESC" */
int iIdx, /* Index to scan for data */
u8 *pToken, /* Buffer containing prefix to match */
int nToken, /* Size of buffer pToken in bytes */
Fts5Colset *pColset, /* Restrict matches to these columns */
- Fts5Iter **ppIter /* OUT: New iterator */
+ Fts5Iter **ppIter /* OUT: New iterator */
){
Fts5Structure *pStruct;
Fts5Buffer *aBuf;
int nBuf = 32;
int nMerge = 1;
@@ -6105,12 +6222,13 @@
xAppend = fts5AppendPoslist;
}
aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf);
pStruct = fts5StructureRead(p);
+ assert( p->rc!=SQLITE_OK || (aBuf && pStruct) );
- if( aBuf && pStruct ){
+ if( p->rc==SQLITE_OK ){
const int flags = FTS5INDEX_QUERY_SCAN
| FTS5INDEX_QUERY_SKIPEMPTY
| FTS5INDEX_QUERY_NOOUTPUT;
int i;
i64 iLastRowid = 0;
@@ -6118,10 +6236,16 @@
Fts5Data *pData;
Fts5Buffer doclist;
int bNewTerm = 1;
memset(&doclist, 0, sizeof(doclist));
+
+ /* If iIdx is non-zero, then it is the number of a prefix-index for
+ ** prefixes 1 character longer than the prefix being queried for. That
+ ** index contains all the doclists required, except for the one
+ ** corresponding to the prefix itself. That one is extracted from the
+ ** main term index here. */
if( iIdx!=0 ){
int dummy = 0;
const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT;
pToken[0] = FTS5_MAIN_PREFIX;
fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1);
@@ -6141,10 +6265,11 @@
}
pToken[0] = FTS5_MAIN_PREFIX + iIdx;
fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1);
fts5IterSetOutputCb(&p->rc, p1);
+
for( /* no-op */ ;
fts5MultiIterEof(p, p1)==0;
fts5MultiIterNext2(p, p1, &bNewTerm)
){
Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ];
@@ -6156,11 +6281,10 @@
if( bNewTerm ){
if( nTermbase.nData==0 ) continue;
-
if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){
for(i=0; p->rc==SQLITE_OK && doclist.n; i++){
int i1 = i*nMerge;
int iStore;
assert( i1+nMerge<=nBuf );
@@ -6195,11 +6319,11 @@
fts5BufferFree(&aBuf[iFree]);
}
}
fts5MultiIterFree(p1);
- pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING);
+ pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING);
if( pData ){
pData->p = (u8*)&pData[1];
pData->nn = pData->szLeaf = doclist.n;
if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n);
fts5MultiIterNew2(p, pData, bDesc, ppIter);
@@ -6338,10 +6462,11 @@
sqlite3_finalize(p->pWriter);
sqlite3_finalize(p->pDeleter);
sqlite3_finalize(p->pIdxWriter);
sqlite3_finalize(p->pIdxDeleter);
sqlite3_finalize(p->pIdxSelect);
+ sqlite3_finalize(p->pIdxNextSelect);
sqlite3_finalize(p->pDataVersion);
sqlite3_finalize(p->pDeleteFromIdx);
sqlite3Fts5HashFree(p->pHash);
sqlite3_free(p->zDataTbl);
sqlite3_free(p);
@@ -6432,10 +6557,458 @@
}
}
return rc;
}
+
+/*
+** pToken points to a buffer of size nToken bytes containing a search
+** term, including the index number at the start, used on a tokendata=1
+** table. This function returns true if the term in buffer pBuf matches
+** token pToken/nToken.
+*/
+static int fts5IsTokendataPrefix(
+ Fts5Buffer *pBuf,
+ const u8 *pToken,
+ int nToken
+){
+ return (
+ pBuf->n>=nToken
+ && 0==memcmp(pBuf->p, pToken, nToken)
+ && (pBuf->n==nToken || pBuf->p[nToken]==0x00)
+ );
+}
+
+/*
+** Ensure the segment-iterator passed as the only argument points to EOF.
+*/
+static void fts5SegIterSetEOF(Fts5SegIter *pSeg){
+ fts5DataRelease(pSeg->pLeaf);
+ pSeg->pLeaf = 0;
+}
+
+/*
+** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an
+** array of these for each row it visits. Or, for an iterator used by an
+** "ORDER BY rank" query, it accumulates an array of these for the entire
+** query.
+**
+** Each instance in the array indicates the iterator (and therefore term)
+** associated with position iPos of rowid iRowid. This is used by the
+** xInstToken() API.
+*/
+struct Fts5TokenDataMap {
+ i64 iRowid; /* Row this token is located in */
+ i64 iPos; /* Position of token */
+ int iIter; /* Iterator token was read from */
+};
+
+/*
+** An object used to supplement Fts5Iter for tokendata=1 iterators.
+*/
+struct Fts5TokenDataIter {
+ int nIter;
+ int nIterAlloc;
+
+ int nMap;
+ int nMapAlloc;
+ Fts5TokenDataMap *aMap;
+
+ Fts5PoslistReader *aPoslistReader;
+ int *aPoslistToIter;
+ Fts5Iter *apIter[1];
+};
+
+/*
+** This function appends iterator pAppend to Fts5TokenDataIter pIn and
+** returns the result.
+*/
+static Fts5TokenDataIter *fts5AppendTokendataIter(
+ Fts5Index *p, /* Index object (for error code) */
+ Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */
+ Fts5Iter *pAppend /* Append this iterator */
+){
+ Fts5TokenDataIter *pRet = pIn;
+
+ if( p->rc==SQLITE_OK ){
+ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){
+ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16;
+ int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter);
+ Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte);
+
+ if( pNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ if( pIn==0 ) memset(pNew, 0, nByte);
+ pRet = pNew;
+ pNew->nIterAlloc = nAlloc;
+ }
+ }
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pAppend);
+ }else{
+ pRet->apIter[pRet->nIter++] = pAppend;
+ }
+ assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc );
+
+ return pRet;
+}
+
+/*
+** Delete an Fts5TokenDataIter structure and its contents.
+*/
+static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){
+ if( pSet ){
+ int ii;
+ for(ii=0; iinIter; ii++){
+ fts5MultiIterFree(pSet->apIter[ii]);
+ }
+ sqlite3_free(pSet->aPoslistReader);
+ sqlite3_free(pSet->aMap);
+ sqlite3_free(pSet);
+ }
+}
+
+/*
+** Append a mapping to the token-map belonging to object pT.
+*/
+static void fts5TokendataIterAppendMap(
+ Fts5Index *p,
+ Fts5TokenDataIter *pT,
+ int iIter,
+ i64 iRowid,
+ i64 iPos
+){
+ if( p->rc==SQLITE_OK ){
+ if( pT->nMap==pT->nMapAlloc ){
+ int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64;
+ int nByte = nNew * sizeof(Fts5TokenDataMap);
+ Fts5TokenDataMap *aNew;
+
+ aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte);
+ if( aNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ return;
+ }
+
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->aMap[pT->nMap].iPos = iPos;
+ pT->aMap[pT->nMap].iIter = iIter;
+ pT->nMap++;
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function sets the iterator output
+** variables (pIter->base.*) according to the contents of the current
+** row.
+*/
+static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){
+ int ii;
+ int nHit = 0;
+ i64 iRowid = SMALLEST_INT64;
+ int iMin = 0;
+
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ pIter->base.nData = 0;
+ pIter->base.pData = 0;
+
+ for(ii=0; iinIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0 ){
+ if( nHit==0 || p->base.iRowidbase.iRowid;
+ nHit = 1;
+ pIter->base.pData = p->base.pData;
+ pIter->base.nData = p->base.nData;
+ iMin = ii;
+ }else if( p->base.iRowid==iRowid ){
+ nHit++;
+ }
+ }
+ }
+
+ if( nHit==0 ){
+ pIter->base.bEof = 1;
+ }else{
+ int eDetail = pIter->pIndex->pConfig->eDetail;
+ pIter->base.bEof = 0;
+ pIter->base.iRowid = iRowid;
+
+ if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){
+ fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1);
+ }else
+ if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){
+ int nReader = 0;
+ int nByte = 0;
+ i64 iPrev = 0;
+
+ /* Allocate array of iterators if they are not already allocated. */
+ if( pT->aPoslistReader==0 ){
+ pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero(
+ &pIter->pIndex->rc,
+ pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int))
+ );
+ if( pT->aPoslistReader==0 ) return;
+ pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter];
+ }
+
+ /* Populate an iterator for each poslist that will be merged */
+ for(ii=0; iinIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( iRowid==p->base.iRowid ){
+ pT->aPoslistToIter[nReader] = ii;
+ sqlite3Fts5PoslistReaderInit(
+ p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++]
+ );
+ nByte += p->base.nData;
+ }
+ }
+
+ /* Ensure the output buffer is large enough */
+ if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){
+ return;
+ }
+
+ /* Ensure the token-mapping is large enough */
+ if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){
+ int nNew = (pT->nMapAlloc + nByte) * 2;
+ Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc(
+ pT->aMap, nNew*sizeof(Fts5TokenDataMap)
+ );
+ if( aNew==0 ){
+ pIter->pIndex->rc = SQLITE_NOMEM;
+ return;
+ }
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pIter->poslist.n = 0;
+
+ while( 1 ){
+ i64 iMinPos = LARGEST_INT64;
+
+ /* Find smallest position */
+ iMin = 0;
+ for(ii=0; iiaPoslistReader[ii];
+ if( pReader->bEof==0 ){
+ if( pReader->iPosiPos;
+ iMin = ii;
+ }
+ }
+ }
+
+ /* If all readers were at EOF, break out of the loop. */
+ if( iMinPos==LARGEST_INT64 ) break;
+
+ sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos);
+ sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]);
+
+ if( eDetail==FTS5_DETAIL_FULL ){
+ pT->aMap[pT->nMap].iPos = iMinPos;
+ pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin];
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->nMap++;
+ }
+ }
+
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+ }
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function advances the iterator. If
+** argument bFrom is false, then the iterator is advanced to the next
+** entry. Or, if bFrom is true, it is advanced to the first entry with
+** a rowid of iFrom or greater.
+*/
+static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){
+ int ii;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ for(ii=0; iinIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0
+ && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidpIndex, p, bFrom, iFrom);
+ while( bFrom && p->base.bEof==0
+ && p->base.iRowidpIndex->rc==SQLITE_OK
+ ){
+ fts5MultiIterNext(p->pIndex, p, 0, 0);
+ }
+ }
+ }
+
+ fts5IterSetOutputsTokendata(pIter);
+}
+
+/*
+** If the segment-iterator passed as the first argument is at EOF, then
+** set pIter->term to a copy of buffer pTerm.
+*/
+static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){
+ if( pIter && pIter->aSeg[0].pLeaf==0 ){
+ fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p);
+ }
+}
+
+/*
+** This function sets up an iterator to use for a non-prefix query on a
+** tokendata=1 table.
+*/
+static Fts5Iter *fts5SetupTokendataIter(
+ Fts5Index *p, /* FTS index to query */
+ const u8 *pToken, /* Buffer containing query term */
+ int nToken, /* Size of buffer pToken in bytes */
+ Fts5Colset *pColset /* Colset to filter on */
+){
+ Fts5Iter *pRet = 0;
+ Fts5TokenDataIter *pSet = 0;
+ Fts5Structure *pStruct = 0;
+ const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN;
+
+ Fts5Buffer bSeek = {0, 0, 0};
+ Fts5Buffer *pSmall = 0;
+
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
+
+ while( p->rc==SQLITE_OK ){
+ Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0;
+ Fts5Iter *pNew = 0;
+ Fts5SegIter *pNewIter = 0;
+ Fts5SegIter *pPrevIter = 0;
+
+ int iLvl, iSeg, ii;
+
+ pNew = fts5MultiIterAlloc(p, pStruct->nSegment);
+ if( pSmall ){
+ fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p);
+ fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0");
+ }else{
+ fts5BufferSet(&p->rc, &bSeek, nToken, pToken);
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ pNewIter = &pNew->aSeg[0];
+ pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0);
+ for(iLvl=0; iLvlnLevel; iLvl++){
+ for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ int bDone = 0;
+
+ if( pPrevIter ){
+ if( fts5BufferCompare(pSmall, &pPrevIter->term) ){
+ memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter));
+ memset(pPrevIter, 0, sizeof(Fts5SegIter));
+ bDone = 1;
+ }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){
+ fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter);
+ bDone = 1;
+ }
+ }
+
+ if( bDone==0 ){
+ fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter);
+ }
+
+ if( pPrevIter ){
+ if( pPrevIter->pTombArray ){
+ pNewIter->pTombArray = pPrevIter->pTombArray;
+ pNewIter->pTombArray->nRef++;
+ }
+ }else{
+ fts5SegIterAllocTombstone(p, pNewIter);
+ }
+
+ pNewIter++;
+ if( pPrevIter ) pPrevIter++;
+ if( p->rc ) break;
+ }
+ }
+ fts5TokendataSetTermIfEof(pPrev, pSmall);
+
+ pNew->bSkipEmpty = 1;
+ pNew->pColset = pColset;
+ fts5IterSetOutputCb(&p->rc, pNew);
+
+ /* Loop through all segments in the new iterator. Find the smallest
+ ** term that any segment-iterator points to. Iterator pNew will be
+ ** used for this term. Also, set any iterator that points to a term that
+ ** does not match pToken/nToken to point to EOF */
+ pSmall = 0;
+ for(ii=0; iinSeg; ii++){
+ Fts5SegIter *pII = &pNew->aSeg[ii];
+ if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){
+ fts5SegIterSetEOF(pII);
+ }
+ if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){
+ pSmall = &pII->term;
+ }
+ }
+
+ /* If pSmall is still NULL at this point, then the new iterator does
+ ** not point to any terms that match the query. So delete it and break
+ ** out of the loop - all required iterators have been collected. */
+ if( pSmall==0 ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ /* Append this iterator to the set and continue. */
+ pSet = fts5AppendTokendataIter(p, pSet, pNew);
+ }
+
+ if( p->rc==SQLITE_OK && pSet ){
+ int ii;
+ for(ii=0; iinIter; ii++){
+ Fts5Iter *pIter = pSet->apIter[ii];
+ int iSeg;
+ for(iSeg=0; iSegnSeg; iSeg++){
+ pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM;
+ }
+ fts5MultiIterFinishSetup(p, pIter);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pRet = fts5MultiIterAlloc(p, 0);
+ }
+ if( pRet ){
+ pRet->pTokenDataIter = pSet;
+ if( pSet ){
+ fts5IterSetOutputsTokendata(pRet);
+ }else{
+ pRet->base.bEof = 1;
+ }
+ }else{
+ fts5TokendataIterDelete(pSet);
+ }
+
+ fts5StructureRelease(pStruct);
+ fts5BufferFree(&bSeek);
+ return pRet;
+}
+
/*
** Open a new iterator to iterate though all rowid that match the
** specified token or token prefix.
*/
@@ -6454,11 +7027,16 @@
assert( (flags & FTS5INDEX_QUERY_SCAN)==0 || flags==FTS5INDEX_QUERY_SCAN );
if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){
int iIdx = 0; /* Index to search */
int iPrefixIdx = 0; /* +1 prefix index */
+ int bTokendata = pConfig->bTokendata;
if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken);
+
+ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){
+ bTokendata = 0;
+ }
/* Figure out which index to search and set iIdx accordingly. If this
** is a prefix query for which there is no prefix index, set iIdx to
** greater than pConfig->nPrefix to indicate that the query will be
** satisfied by scanning multiple terms in the main index.
@@ -6481,11 +7059,14 @@
if( nIdxChar==nChar ) break;
if( nIdxChar==nChar+1 ) iPrefixIdx = iIdx;
}
}
- if( iIdx<=pConfig->nPrefix ){
+ if( bTokendata && iIdx==0 ){
+ buf.p[0] = '0';
+ pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset);
+ }else if( iIdx<=pConfig->nPrefix ){
/* Straight index lookup */
Fts5Structure *pStruct = fts5StructureRead(p);
buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx);
if( pStruct ){
fts5MultiIterNew(p, pStruct, flags | FTS5INDEX_QUERY_SKIPEMPTY,
@@ -6528,11 +7109,15 @@
** Move to the next matching rowid.
*/
int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
assert( pIter->pIndex->rc==SQLITE_OK );
- fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 0, 0);
+ }else{
+ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ }
return fts5IndexReturn(pIter->pIndex);
}
/*
** Move to the next matching term/rowid. Used by the fts5vocab module.
@@ -6561,11 +7146,15 @@
** definition of "at or after" depends on whether this iterator iterates
** in ascending or descending rowid order.
*/
int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
- fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 1, iMatch);
+ }else{
+ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ }
return fts5IndexReturn(pIter->pIndex);
}
/*
** Return the current term.
@@ -6575,18 +7164,112 @@
const char *z = (const char*)fts5MultiIterTerm((Fts5Iter*)pIndexIter, &n);
assert_nc( z || n<=1 );
*pn = n-1;
return (z ? &z[1] : 0);
}
+
+/*
+** This is used by xInstToken() to access the token at offset iOff, column
+** iCol of row iRowid. The token is returned via output variables *ppOut
+** and *pnOut. The iterator passed as the first argument must be a tokendata=1
+** iterator (pIter->pTokenDataIter!=0).
+*/
+int sqlite3Fts5IterToken(
+ Fts5IndexIter *pIndexIter,
+ i64 iRowid,
+ int iCol,
+ int iOff,
+ const char **ppOut, int *pnOut
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5TokenDataMap *aMap = pT->aMap;
+ i64 iPos = (((i64)iCol)<<32) + iOff;
+
+ int i1 = 0;
+ int i2 = pT->nMap;
+ int iTest = 0;
+
+ while( i2>i1 ){
+ iTest = (i1 + i2) / 2;
+
+ if( aMap[iTest].iRowidiRowid ){
+ i2 = iTest;
+ }else{
+ if( aMap[iTest].iPosiPos ){
+ i2 = iTest;
+ }else{
+ break;
+ }
+ }
+ }
+
+ if( i2>i1 ){
+ Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter];
+ *ppOut = (const char*)pMap->aSeg[0].term.p+1;
+ *pnOut = pMap->aSeg[0].term.n-1;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Clear any existing entries from the token-map associated with the
+** iterator passed as the only argument.
+*/
+void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ if( pIter && pIter->pTokenDataIter ){
+ pIter->pTokenDataIter->nMap = 0;
+ }
+}
+
+/*
+** Set a token-mapping for the iterator passed as the first argument. This
+** is used in detail=column or detail=none mode when a token is requested
+** using the xInstToken() API. In this case the caller tokenizers the
+** current row and configures the token-mapping via multiple calls to this
+** function.
+*/
+int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter *pIndexIter,
+ const char *pToken, int nToken,
+ i64 iRowid, int iCol, int iOff
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5Index *p = pIter->pIndex;
+ int ii;
+
+ assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL );
+ assert( pIter->pTokenDataIter );
+
+ for(ii=0; iinIter; ii++){
+ Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term;
+ if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break;
+ }
+ if( iinIter ){
+ fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff);
+ }
+ return fts5IndexReturn(p);
+}
/*
** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery().
*/
void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){
if( pIndexIter ){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
Fts5Index *pIndex = pIter->pIndex;
+ fts5TokendataIterDelete(pIter->pTokenDataIter);
fts5MultiIterFree(pIter);
sqlite3Fts5IndexCloseReader(pIndex);
}
}
@@ -7090,11 +7773,13 @@
u64 *pCksum /* IN/OUT: Checksum value */
){
int eDetail = p->pConfig->eDetail;
u64 cksum = *pCksum;
Fts5IndexIter *pIter = 0;
- int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter);
+ int rc = sqlite3Fts5IndexQuery(
+ p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter
+ );
while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){
i64 rowid = pIter->iRowid;
if( eDetail==FTS5_DETAIL_NONE ){
@@ -7787,10 +8472,28 @@
sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp);
}
}
#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
+
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
+static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){
+ int ii;
+ fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1);
+ if( *pRc==SQLITE_OK ){
+ for(ii=0; iin; ii++){
+ if( pTerm->p[ii]==0x00 ){
+ pBuf->p[pBuf->n++] = '\\';
+ pBuf->p[pBuf->n++] = '0';
+ }else{
+ pBuf->p[pBuf->n++] = pTerm->p[ii];
+ }
+ }
+ pBuf->p[pBuf->n] = 0x00;
+ }
+}
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The implementation of user-defined scalar function fts5_decode().
*/
@@ -7895,13 +8598,12 @@
/* Read the term data for the next term*/
iOff += fts5GetVarint32(&a[iOff], nAppend);
term.n = nKeep;
fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]);
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " term=");
+ fts5BufferAppendTerm(&rc, &s, &term);
iOff += nAppend;
/* Figure out where the doclist for this term ends */
if( iPgidxOffnOrderBy==1 ){
int iSort = pInfo->aOrderBy[0].iColumn;
if( iSort==(pConfig->nCol+1) && bSeenMatch ){
idxFlags |= FTS5_BI_ORDER_RANK;
- }else if( iSort==-1 ){
+ }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){
idxFlags |= FTS5_BI_ORDER_ROWID;
}
if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){
pInfo->orderByConsumed = 1;
if( pInfo->aOrderBy[0].desc ){
@@ -910,10 +913,20 @@
assert( (pCsr->ePlan<3)==
(pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE)
);
assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) );
+
+ /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table,
+ ** clear any token mappings accumulated at the fts5_index.c level. In
+ ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH,
+ ** we need to retain the mappings for the entire query. */
+ if( pCsr->ePlan==FTS5_PLAN_MATCH
+ && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata
+ ){
+ sqlite3Fts5ExprClearTokens(pCsr->pExpr);
+ }
if( pCsr->ePlan<3 ){
int bSkip = 0;
if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc;
rc = sqlite3Fts5ExprNext(pCsr->pExpr, pCsr->iLastRowid);
@@ -1899,11 +1912,14 @@
const char **pz,
int *pn
){
int rc = SQLITE_OK;
Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
+ Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
+ if( iCol<0 || iCol>=pTab->pConfig->nCol ){
+ rc = SQLITE_RANGE;
+ }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
|| pCsr->ePlan==FTS5_PLAN_SPECIAL
){
*pz = 0;
*pn = 0;
}else{
@@ -1924,12 +1940,13 @@
){
Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig;
int rc = SQLITE_OK;
int bLive = (pCsr->pSorter==0);
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
-
+ if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){
+ rc = SQLITE_RANGE;
+ }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
Fts5PoslistPopulator *aPopulator;
int i;
aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive);
if( aPopulator==0 ) rc = SQLITE_NOMEM;
@@ -1949,18 +1966,24 @@
}
}
CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST);
}
- if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
- Fts5Sorter *pSorter = pCsr->pSorter;
- int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
- *pn = pSorter->aIdx[iPhrase] - i1;
- *pa = &pSorter->aPoslist[i1];
+ if( rc==SQLITE_OK ){
+ if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
+ *pn = pSorter->aIdx[iPhrase] - i1;
+ *pa = &pSorter->aPoslist[i1];
+ }else{
+ *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ }
}else{
- *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ *pa = 0;
+ *pn = 0;
}
+
return rc;
}
/*
@@ -2064,16 +2087,10 @@
if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
|| SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
){
if( iIdx<0 || iIdx>=pCsr->nInstCount ){
rc = SQLITE_RANGE;
-#if 0
- }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){
- *piPhrase = pCsr->aInst[iIdx*3];
- *piCol = pCsr->aInst[iIdx*3 + 2];
- *piOff = -1;
-#endif
}else{
*piPhrase = pCsr->aInst[iIdx*3];
*piCol = pCsr->aInst[iIdx*3 + 1];
*piOff = pCsr->aInst[iIdx*3 + 2];
}
@@ -2324,17 +2341,60 @@
}
return rc;
}
+/*
+** xQueryToken() API implemenetation.
+*/
+static int fts5ApiQueryToken(
+ Fts5Context* pCtx,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut);
+}
+
+/*
+** xInstToken() API implemenetation.
+*/
+static int fts5ApiInstToken(
+ Fts5Context *pCtx,
+ int iIdx,
+ int iToken,
+ const char **ppOut, int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int rc = SQLITE_OK;
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
+ || SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
+ ){
+ if( iIdx<0 || iIdx>=pCsr->nInstCount ){
+ rc = SQLITE_RANGE;
+ }else{
+ int iPhrase = pCsr->aInst[iIdx*3];
+ int iCol = pCsr->aInst[iIdx*3 + 1];
+ int iOff = pCsr->aInst[iIdx*3 + 2];
+ i64 iRowid = fts5CursorRowid(pCsr);
+ rc = sqlite3Fts5ExprInstToken(
+ pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut
+ );
+ }
+ }
+ return rc;
+}
+
static int fts5ApiQueryPhrase(Fts5Context*, int, void*,
int(*)(const Fts5ExtensionApi*, Fts5Context*, void*)
);
static const Fts5ExtensionApi sFts5Api = {
- 2, /* iVersion */
+ 3, /* iVersion */
fts5ApiUserData,
fts5ApiColumnCount,
fts5ApiRowCount,
fts5ApiColumnTotalSize,
fts5ApiTokenize,
@@ -2350,10 +2410,12 @@
fts5ApiGetAuxdata,
fts5ApiPhraseFirst,
fts5ApiPhraseNext,
fts5ApiPhraseFirstColumn,
fts5ApiPhraseNextColumn,
+ fts5ApiQueryToken,
+ fts5ApiInstToken
};
/*
** Implementation of API function xQueryPhrase().
*/
@@ -2907,32 +2969,27 @@
const char *zTabname, /* Name of the table itself */
int isQuick, /* True if this is a quick-check */
char **pzErr /* Write error message here */
){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
- Fts5Config *pConfig = pTab->p.pConfig;
- char *zSql;
- char *zErr = 0;
int rc;
+
assert( pzErr!=0 && *pzErr==0 );
UNUSED_PARAM(isQuick);
- zSql = sqlite3_mprintf(
- "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');",
- zSchema, zTabname, pConfig->zName);
- if( zSql==0 ) return SQLITE_NOMEM;
- rc = sqlite3_exec(pConfig->db, zSql, 0, 0, &zErr);
- sqlite3_free(zSql);
+ rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0);
if( (rc&0xff)==SQLITE_CORRUPT ){
*pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s",
zSchema, zTabname);
+ rc = (*pzErr) ? SQLITE_OK : SQLITE_NOMEM;
}else if( rc!=SQLITE_OK ){
*pzErr = sqlite3_mprintf("unable to validate the inverted index for"
" FTS5 table %s.%s: %s",
- zSchema, zTabname, zErr);
+ zSchema, zTabname, sqlite3_errstr(rc));
}
- sqlite3_free(zErr);
- return SQLITE_OK;
+ sqlite3Fts5IndexCloseReader(pTab->p.pIndex);
+
+ return rc;
}
static int fts5Init(sqlite3 *db){
static const sqlite3_module fts5Mod = {
/* iVersion */ 4,
Index: ext/fts5/fts5_storage.c
==================================================================
--- ext/fts5/fts5_storage.c
+++ ext/fts5/fts5_storage.c
@@ -671,11 +671,11 @@
if( rc==SQLITE_OK ){
rc = fts5StorageLoadTotals(p, 1);
}
if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
+ rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg);
}
while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){
i64 iRowid = sqlite3_column_int64(pScan, 0);
Index: ext/fts5/fts5_tcl.c
==================================================================
--- ext/fts5/fts5_tcl.c
+++ ext/fts5/fts5_tcl.c
@@ -242,10 +242,13 @@
{ "xGetAuxdata", 1, "CLEAR" }, /* 13 */
{ "xSetAuxdataInt", 1, "INTEGER" }, /* 14 */
{ "xGetAuxdataInt", 1, "CLEAR" }, /* 15 */
{ "xPhraseForeach", 4, "IPHRASE COLVAR OFFVAR SCRIPT" }, /* 16 */
{ "xPhraseColumnForeach", 3, "IPHRASE COLVAR SCRIPT" }, /* 17 */
+
+ { "xQueryToken", 2, "IPHRASE ITERM" }, /* 18 */
+ { "xInstToken", 2, "IDX ITERM" }, /* 19 */
{ 0, 0, 0}
};
int rc;
int iSub = 0;
@@ -494,10 +497,42 @@
if( rc!=TCL_OK ){
if( rc==TCL_BREAK ) rc = TCL_OK;
break;
}
}
+
+ break;
+ }
+
+ CASE(18, "xQueryToken") {
+ const char *pTerm = 0;
+ int nTerm = 0;
+ int iPhrase = 0;
+ int iTerm = 0;
+
+ if( Tcl_GetIntFromObj(interp, objv[2], &iPhrase) ) return TCL_ERROR;
+ if( Tcl_GetIntFromObj(interp, objv[3], &iTerm) ) return TCL_ERROR;
+ rc = p->pApi->xQueryToken(p->pFts, iPhrase, iTerm, &pTerm, &nTerm);
+ if( rc==SQLITE_OK ){
+ Tcl_SetObjResult(interp, Tcl_NewStringObj(pTerm, nTerm));
+ }
+
+ break;
+ }
+
+ CASE(19, "xInstToken") {
+ const char *pTerm = 0;
+ int nTerm = 0;
+ int iIdx = 0;
+ int iTerm = 0;
+
+ if( Tcl_GetIntFromObj(interp, objv[2], &iIdx) ) return TCL_ERROR;
+ if( Tcl_GetIntFromObj(interp, objv[3], &iTerm) ) return TCL_ERROR;
+ rc = p->pApi->xInstToken(p->pFts, iIdx, iTerm, &pTerm, &nTerm);
+ if( rc==SQLITE_OK ){
+ Tcl_SetObjResult(interp, Tcl_NewStringObj(pTerm, nTerm));
+ }
break;
}
default:
@@ -1114,10 +1149,180 @@
Tcl_SetResult(interp, (char*)sqlite3ErrName(rc), TCL_VOLATILE);
return TCL_ERROR;
}
return TCL_OK;
}
+
+typedef struct OriginTextCtx OriginTextCtx;
+struct OriginTextCtx {
+ sqlite3 *db;
+ fts5_api *pApi;
+};
+
+typedef struct OriginTextTokenizer OriginTextTokenizer;
+struct OriginTextTokenizer {
+ Fts5Tokenizer *pTok; /* Underlying tokenizer object */
+ fts5_tokenizer tokapi; /* API implementation for pTok */
+};
+
+/*
+** Delete the OriginTextCtx object indicated by the only argument.
+*/
+static void f5tOrigintextTokenizerDelete(void *pCtx){
+ OriginTextCtx *p = (OriginTextCtx*)pCtx;
+ ckfree((char*)p);
+}
+
+static int f5tOrigintextCreate(
+ void *pCtx,
+ const char **azArg,
+ int nArg,
+ Fts5Tokenizer **ppOut
+){
+ OriginTextCtx *p = (OriginTextCtx*)pCtx;
+ OriginTextTokenizer *pTok = 0;
+ void *pTokCtx = 0;
+ int rc = SQLITE_OK;
+
+ pTok = (OriginTextTokenizer*)sqlite3_malloc(sizeof(OriginTextTokenizer));
+ if( pTok==0 ){
+ rc = SQLITE_NOMEM;
+ }else if( nArg<1 ){
+ rc = SQLITE_ERROR;
+ }else{
+ /* Locate the underlying tokenizer */
+ rc = p->pApi->xFindTokenizer(p->pApi, azArg[0], &pTokCtx, &pTok->tokapi);
+ }
+
+ /* Create the new tokenizer instance */
+ if( rc==SQLITE_OK ){
+ rc = pTok->tokapi.xCreate(pTokCtx, &azArg[1], nArg-1, &pTok->pTok);
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(pTok);
+ pTok = 0;
+ }
+ *ppOut = (Fts5Tokenizer*)pTok;
+ return rc;
+}
+
+static void f5tOrigintextDelete(Fts5Tokenizer *pTokenizer){
+ OriginTextTokenizer *p = (OriginTextTokenizer*)pTokenizer;
+ if( p->pTok ){
+ p->tokapi.xDelete(p->pTok);
+ }
+ sqlite3_free(p);
+}
+
+typedef struct OriginTextCb OriginTextCb;
+struct OriginTextCb {
+ void *pCtx;
+ const char *pText;
+ int nText;
+ int (*xToken)(void *, int, const char *, int, int, int);
+
+ char *aBuf; /* Buffer to use */
+ int nBuf; /* Allocated size of aBuf[] */
+};
+
+static int xOriginToken(
+ void *pCtx, /* Copy of 2nd argument to xTokenize() */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Pointer to buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStart, /* Byte offset of token within input text */
+ int iEnd /* Byte offset of end of token within input */
+){
+ OriginTextCb *p = (OriginTextCb*)pCtx;
+ int ret = 0;
+
+ if( nToken==(iEnd-iStart) && 0==memcmp(pToken, &p->pText[iStart], nToken) ){
+ /* Token exactly matches document text. Pass it through as is. */
+ ret = p->xToken(p->pCtx, tflags, pToken, nToken, iStart, iEnd);
+ }else{
+ int nReq = nToken + 1 + (iEnd-iStart);
+ if( nReq>p->nBuf ){
+ sqlite3_free(p->aBuf);
+ p->aBuf = sqlite3_malloc(nReq*2);
+ if( p->aBuf==0 ) return SQLITE_NOMEM;
+ p->nBuf = nReq*2;
+ }
+
+ memcpy(p->aBuf, pToken, nToken);
+ p->aBuf[nToken] = '\0';
+ memcpy(&p->aBuf[nToken+1], &p->pText[iStart], iEnd-iStart);
+ ret = p->xToken(p->pCtx, tflags, p->aBuf, nReq, iStart, iEnd);
+ }
+
+ return ret;
+}
+
+
+static int f5tOrigintextTokenize(
+ Fts5Tokenizer *pTokenizer,
+ void *pCtx,
+ int flags, /* Mask of FTS5_TOKENIZE_* flags */
+ const char *pText, int nText,
+ int (*xToken)(void *, int, const char *, int, int, int)
+){
+ OriginTextTokenizer *p = (OriginTextTokenizer*)pTokenizer;
+ OriginTextCb cb;
+ int ret;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.pCtx = pCtx;
+ cb.pText = pText;
+ cb.nText = nText;
+ cb.xToken = xToken;
+
+ ret = p->tokapi.xTokenize(p->pTok,(void*)&cb,flags,pText,nText,xOriginToken);
+ sqlite3_free(cb.aBuf);
+ return ret;
+}
+
+/*
+** sqlite3_fts5_register_origintext DB
+**
+** Description...
+*/
+static int SQLITE_TCLAPI f5tRegisterOriginText(
+ void * clientData,
+ Tcl_Interp *interp,
+ int objc,
+ Tcl_Obj *CONST objv[]
+){
+ sqlite3 *db = 0;
+ fts5_api *pApi = 0;
+ int rc;
+ fts5_tokenizer tok = {0, 0, 0};
+ OriginTextCtx *pCtx = 0;
+
+ if( objc!=2 ){
+ Tcl_WrongNumArgs(interp, 1, objv, "DB");
+ return TCL_ERROR;
+ }
+ if( f5tDbAndApi(interp, objv[1], &db, &pApi) ) return TCL_ERROR;
+
+ pCtx = (OriginTextCtx*)ckalloc(sizeof(OriginTextCtx));
+ pCtx->db = db;
+ pCtx->pApi = pApi;
+
+ tok.xCreate = f5tOrigintextCreate;
+ tok.xDelete = f5tOrigintextDelete;
+ tok.xTokenize = f5tOrigintextTokenize;
+ rc = pApi->xCreateTokenizer(
+ pApi, "origintext", (void*)pCtx, &tok, f5tOrigintextTokenizerDelete
+ );
+
+ Tcl_ResetResult(interp);
+ if( rc!=SQLITE_OK ){
+ Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), 0);
+ return TCL_ERROR;
+ }
+ return TCL_OK;
+}
/*
** Entry point.
*/
int Fts5tcl_Init(Tcl_Interp *interp){
@@ -1131,11 +1336,12 @@
{ "sqlite3_fts5_tokenize", f5tTokenize, 0 },
{ "sqlite3_fts5_create_function", f5tCreateFunction, 0 },
{ "sqlite3_fts5_may_be_corrupt", f5tMayBeCorrupt, 0 },
{ "sqlite3_fts5_token_hash", f5tTokenHash, 0 },
{ "sqlite3_fts5_register_matchinfo", f5tRegisterMatchinfo, 0 },
- { "sqlite3_fts5_register_fts5tokenize", f5tRegisterTok, 0 }
+ { "sqlite3_fts5_register_fts5tokenize", f5tRegisterTok, 0 },
+ { "sqlite3_fts5_register_origintext",f5tRegisterOriginText, 0 }
};
int i;
F5tTokenizerContext *pContext;
pContext = (F5tTokenizerContext*)ckalloc(sizeof(F5tTokenizerContext));
Index: ext/fts5/fts5_tokenize.c
==================================================================
--- ext/fts5/fts5_tokenize.c
+++ ext/fts5/fts5_tokenize.c
@@ -226,10 +226,16 @@
*zOut++ = 0x80 + (unsigned char)(c & 0x3F); \
} \
}
#endif /* ifndef SQLITE_AMALGAMATION */
+
+#define FTS5_SKIP_UTF8(zIn) { \
+ if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \
+ while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \
+ } \
+}
typedef struct Unicode61Tokenizer Unicode61Tokenizer;
struct Unicode61Tokenizer {
unsigned char aTokenChar[128]; /* ASCII range token characters */
char *aFold; /* Buffer to fold text into */
@@ -1262,10 +1268,11 @@
** Start of trigram implementation.
*/
typedef struct TrigramTokenizer TrigramTokenizer;
struct TrigramTokenizer {
int bFold; /* True to fold to lower-case */
+ int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */
};
/*
** Free a trigram tokenizer.
*/
@@ -1288,22 +1295,34 @@
if( pNew==0 ){
rc = SQLITE_NOMEM;
}else{
int i;
pNew->bFold = 1;
+ pNew->iFoldParam = 0;
for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0');
}
+ }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
+ if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
+ rc = SQLITE_ERROR;
+ }else{
+ pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0;
+ }
}else{
rc = SQLITE_ERROR;
}
}
+
+ if( pNew->iFoldParam!=0 && pNew->bFold==0 ){
+ rc = SQLITE_ERROR;
+ }
+
if( rc!=SQLITE_OK ){
fts5TriDelete((Fts5Tokenizer*)pNew);
pNew = 0;
}
}
@@ -1322,44 +1341,66 @@
int (*xToken)(void*, int, const char*, int, int, int)
){
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
int rc = SQLITE_OK;
char aBuf[32];
+ char *zOut = aBuf;
+ int ii;
const unsigned char *zIn = (const unsigned char*)pText;
const unsigned char *zEof = &zIn[nText];
u32 iCode;
+ int aStart[3]; /* Input offset of each character in aBuf[] */
UNUSED_PARAM(unusedFlags);
- while( 1 ){
- char *zOut = aBuf;
- int iStart = zIn - (const unsigned char*)pText;
- const unsigned char *zNext;
-
- READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- zNext = zIn;
- if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
- READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- }else{
- break;
- }
- if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
- READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
- }else{
- break;
- }
- rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf);
- if( rc!=SQLITE_OK ) break;
- zIn = zNext;
+
+ /* Populate aBuf[] with the characters for the first trigram. */
+ for(ii=0; ii<3; ii++){
+ do {
+ aStart[ii] = zIn - (const unsigned char*)pText;
+ READ_UTF8(zIn, zEof, iCode);
+ if( iCode==0 ) return SQLITE_OK;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+ WRITE_UTF8(zOut, iCode);
+ }
+
+ /* At the start of each iteration of this loop:
+ **
+ ** aBuf: Contains 3 characters. The 3 characters of the next trigram.
+ ** zOut: Points to the byte following the last character in aBuf.
+ ** aStart[3]: Contains the byte offset in the input text corresponding
+ ** to the start of each of the three characters in the buffer.
+ */
+ assert( zIn<=zEof );
+ while( 1 ){
+ int iNext; /* Start of character following current tri */
+ const char *z1;
+
+ /* Read characters from the input up until the first non-diacritic */
+ do {
+ iNext = zIn - (const unsigned char*)pText;
+ READ_UTF8(zIn, zEof, iCode);
+ if( iCode==0 ) break;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+
+ /* Pass the current trigram back to fts5 */
+ rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext);
+ if( iCode==0 || rc!=SQLITE_OK ) break;
+
+ /* Remove the first character from buffer aBuf[]. Append the character
+ ** with codepoint iCode. */
+ z1 = aBuf;
+ FTS5_SKIP_UTF8(z1);
+ memmove(aBuf, z1, zOut - z1);
+ zOut -= (z1 - aBuf);
+ WRITE_UTF8(zOut, iCode);
+
+ /* Update the aStart[] array */
+ aStart[0] = aStart[1];
+ aStart[1] = aStart[2];
+ aStart[2] = iNext;
}
return rc;
}
@@ -1378,11 +1419,13 @@
int (*xCreate)(void*, const char**, int, Fts5Tokenizer**),
Fts5Tokenizer *pTok
){
if( xCreate==fts5TriCreate ){
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
- return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ if( p->iFoldParam==0 ){
+ return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ }
}
return FTS5_PATTERN_NONE;
}
/*
Index: ext/fts5/fts5_vocab.c
==================================================================
--- ext/fts5/fts5_vocab.c
+++ ext/fts5/fts5_vocab.c
@@ -627,11 +627,11 @@
if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++];
if( pEq ){
zTerm = (const char *)sqlite3_value_text(pEq);
nTerm = sqlite3_value_bytes(pEq);
- f = 0;
+ f = FTS5INDEX_QUERY_NOTOKENDATA;
}else{
if( pGe ){
zTerm = (const char *)sqlite3_value_text(pGe);
nTerm = sqlite3_value_bytes(pGe);
}
Index: ext/fts5/test/fts5_common.tcl
==================================================================
--- ext/fts5/test/fts5_common.tcl
+++ ext/fts5/test/fts5_common.tcl
@@ -58,18 +58,28 @@
$cmd xPhraseColumnForeach $i c { lappend res $i.$c }
}
set res
}
+
+proc fts5_collist {cmd iPhrase} {
+ set res [list]
+ $cmd xPhraseColumnForeach $iPhrase c { lappend res $c }
+ set res
+}
proc fts5_test_columnsize {cmd} {
set res [list]
for {set i 0} {$i < [$cmd xColumnCount]} {incr i} {
lappend res [$cmd xColumnSize $i]
}
set res
}
+
+proc fts5_columntext {cmd iCol} {
+ $cmd xColumnText $iCol
+}
proc fts5_test_columntext {cmd} {
set res [list]
for {set i 0} {$i < [$cmd xColumnCount]} {incr i} {
lappend res [$cmd xColumnText $i]
@@ -122,10 +132,17 @@
$cmd xQueryPhrase $i [list test_queryphrase_cb cnt]
lappend res $cnt
}
set res
}
+
+proc fts5_queryphrase {cmd iPhrase} {
+ set cnt [list]
+ for {set j 0} {$j < [$cmd xColumnCount]} {incr j} { lappend cnt 0 }
+ $cmd xQueryPhrase $iPhrase [list test_queryphrase_cb cnt]
+ set cnt
+}
proc fts5_test_phrasecount {cmd} {
$cmd xPhraseCount
}
@@ -152,10 +169,13 @@
fts5_test_rowcount
fts5_test_all
fts5_test_queryphrase
fts5_test_phrasecount
+ fts5_columntext
+ fts5_queryphrase
+ fts5_collist
} {
sqlite3_fts5_create_function $db $f $f
}
}
@@ -436,10 +456,24 @@
}
proc detail_is_none {} { detail_check ; expr {$::detail == "none"} }
proc detail_is_col {} { detail_check ; expr {$::detail == "col" } }
proc detail_is_full {} { detail_check ; expr {$::detail == "full"} }
+proc foreach_tokenizer_mode {prefix script} {
+ set saved $::testprefix
+ foreach {d mapping} {
+ "" {}
+ "-origintext" {, tokenize="origintext unicode61", tokendata=1}
+ } {
+ set s [string map [list %TOKENIZER% $mapping] $script]
+ set ::testprefix "$prefix$d"
+ reset_db
+ sqlite3_fts5_register_origintext db
+ uplevel $s
+ }
+ set ::testprefix $saved
+}
#-------------------------------------------------------------------------
# Convert a poslist of the type returned by fts5_test_poslist() to a
# collist as returned by fts5_test_collist().
#
Index: ext/fts5/test/fts5aa.test
==================================================================
--- ext/fts5/test/fts5aa.test
+++ ext/fts5/test/fts5aa.test
@@ -20,10 +20,11 @@
finish_test
return
}
foreach_detail_mode $::testprefix {
+foreach_tokenizer_mode $::testprefix {
do_execsql_test 1.0 {
CREATE VIRTUAL TABLE t1 USING fts5(a, b, c);
SELECT name, sql FROM sqlite_master;
} {
@@ -42,11 +43,11 @@
#-------------------------------------------------------------------------
#
do_execsql_test 2.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%);
}
do_execsql_test 2.1 {
INSERT INTO t1 VALUES('a b c', 'd e f');
}
@@ -71,12 +72,13 @@
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 3.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
}
foreach {i x y} {
1 {g f d b f} {h h e i a}
2 {f i g j e} {i j c f f}
3 {e e i f a} {e h f d f}
@@ -95,12 +97,13 @@
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 4.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
}
foreach {i x y} {
1 {g f d b f} {h h e i a}
2 {f i g j e} {i j c f f}
@@ -119,12 +122,13 @@
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 5.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
}
foreach {i x y} {
1 {dd abc abc abc abcde} {aaa dd ddd ddd aab}
2 {dd aab d aaa b} {abcde c aaa aaa aaa}
@@ -143,12 +147,13 @@
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 6.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
}
do_execsql_test 6.1 {
INSERT INTO t1(rowid, x, y) VALUES(22, 'a b c', 'c b a');
@@ -179,10 +184,11 @@
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
expr srand(0)
do_execsql_test 7.0 {
CREATE VIRTUAL TABLE t1 USING fts5(x,y,z);
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
}
@@ -220,10 +226,11 @@
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 8.0 {
CREATE VIRTUAL TABLE t1 USING fts5(x, prefix="1,2,3");
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
}
@@ -234,10 +241,11 @@
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
expr srand(0)
do_execsql_test 9.0 {
CREATE VIRTUAL TABLE t1 USING fts5(x,y,z, prefix="1,2,3");
@@ -278,12 +286,13 @@
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 10.0 {
- CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
}
set d10 {
1 {g f d b f} {h h e i a}
2 {f i g j e} {i j c f f}
3 {e e i f a} {e h f d f}
@@ -312,23 +321,23 @@
do_execsql_test 10.4.2 { INSERT INTO t1(t1) VALUES('integrity-check') }
#-------------------------------------------------------------------------
#
do_catchsql_test 11.1 {
- CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rank, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rank, detail=%DETAIL% %TOKENIZER%);
} {1 {reserved fts5 column name: rank}}
do_catchsql_test 11.2 {
- CREATE VIRTUAL TABLE rank USING fts5(a, b, c, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE rank USING fts5(a, b, c, detail=%DETAIL% %TOKENIZER%);
} {1 {reserved fts5 table name: rank}}
do_catchsql_test 11.3 {
- CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rowid, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rowid, detail=%DETAIL% %TOKENIZER%);
} {1 {reserved fts5 column name: rowid}}
#-------------------------------------------------------------------------
#
do_execsql_test 12.1 {
- CREATE VIRTUAL TABLE t2 USING fts5(x,y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t2 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%);
} {}
do_catchsql_test 12.2 {
SELECT t2 FROM t2 WHERE t2 MATCH '*stuff'
} {1 {unknown special query: stuff}}
@@ -339,12 +348,13 @@
} {1}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 13.1 {
- CREATE VIRTUAL TABLE t1 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1(rowid, x) VALUES(1, 'o n e'), (2, 't w o');
} {}
do_execsql_test 13.2 {
SELECT rowid FROM t1 WHERE t1 MATCH 'o';
@@ -363,12 +373,13 @@
} {}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 14.1 {
- CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1(t1, rank) VALUES('pgsz', 32);
WITH d(x,y) AS (
SELECT NULL, 'xyz xyz xyz xyz xyz xyz'
UNION ALL
SELECT NULL, 'xyz xyz xyz xyz xyz xyz' FROM d
@@ -447,12 +458,13 @@
# {1 {SQL logic error}}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 17.1 {
- CREATE VIRTUAL TABLE b2 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE b2 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
INSERT INTO b2 VALUES('a');
INSERT INTO b2 VALUES('b');
INSERT INTO b2 VALUES('c');
}
@@ -464,22 +476,24 @@
set res
} {{a b c} {a b c} {a b c}}
if {[string match n* %DETAIL%]==0} {
reset_db
+ sqlite3_fts5_register_origintext db
do_execsql_test 17.3 {
- CREATE VIRTUAL TABLE c2 USING fts5(x, y, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE c2 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%);
INSERT INTO c2 VALUES('x x x', 'x x x');
SELECT rowid FROM c2 WHERE c2 MATCH 'y:x';
} {1}
}
#-------------------------------------------------------------------------
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 17.1 {
- CREATE VIRTUAL TABLE uio USING fts5(ttt, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE uio USING fts5(ttt, detail=%DETAIL% %TOKENIZER%);
INSERT INTO uio VALUES(NULL);
INSERT INTO uio SELECT NULL FROM uio;
INSERT INTO uio SELECT NULL FROM uio;
INSERT INTO uio SELECT NULL FROM uio;
INSERT INTO uio SELECT NULL FROM uio;
@@ -522,12 +536,12 @@
} {-9223372036854775808 9 10}
#--------------------------------------------------------------------
#
do_execsql_test 18.1 {
- CREATE VIRTUAL TABLE t1 USING fts5(a, b, detail=%DETAIL%);
- CREATE VIRTUAL TABLE t2 USING fts5(c, d, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t1 USING fts5(a, b, detail=%DETAIL% %TOKENIZER%);
+ CREATE VIRTUAL TABLE t2 USING fts5(c, d, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1 VALUES('abc*', NULL);
INSERT INTO t2 VALUES(1, 'abcdefg');
}
do_execsql_test 18.2 {
SELECT t1.rowid, t2.rowid FROM t1, t2 WHERE t2 MATCH t1.a AND t1.rowid = t2.c
@@ -538,23 +552,25 @@
#--------------------------------------------------------------------
# fts5 table in the temp schema.
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 19.0 {
- CREATE VIRTUAL TABLE temp.t1 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE temp.t1 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t1 VALUES('x y z');
INSERT INTO t1 VALUES('w x 1');
SELECT rowid FROM t1 WHERE t1 MATCH 'x';
} {1 2}
#--------------------------------------------------------------------
# Test that 6 and 7 byte varints can be read.
#
reset_db
+sqlite3_fts5_register_origintext db
do_execsql_test 20.0 {
- CREATE VIRTUAL TABLE temp.tmp USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE temp.tmp USING fts5(x, detail=%DETAIL% %TOKENIZER%);
}
set ::ids [list \
0 [expr 1<<36] [expr 2<<36] [expr 1<<43] [expr 2<<43]
]
do_test 20.1 {
@@ -568,11 +584,11 @@
# Test that a DROP TABLE may be executed within a transaction that
# writes to an FTS5 table.
#
do_execsql_test 21.0 {
CREATE TEMP TABLE t8(a, b);
- CREATE VIRTUAL TABLE ft USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE ft USING fts5(x, detail=%DETAIL% %TOKENIZER%);
}
do_execsql_test 21.1 {
BEGIN;
INSERT INTO ft VALUES('a b c');
@@ -579,11 +595,11 @@
DROP TABLE t8;
COMMIT;
}
do_execsql_test 22.0 {
- CREATE VIRTUAL TABLE t9 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t9 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t9(rowid, x) VALUES(2, 'bbb');
BEGIN;
INSERT INTO t9(rowid, x) VALUES(1, 'aaa');
DELETE FROM t9 WHERE rowid = 2;
INSERT INTO t9(rowid, x) VALUES(3, 'bbb');
@@ -594,11 +610,11 @@
SELECT rowid FROM t9('a*')
} {1}
#-------------------------------------------------------------------------
do_execsql_test 23.0 {
- CREATE VIRTUAL TABLE t10 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t10 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
CREATE TABLE t11(x);
}
do_execsql_test 23.1 {
SELECT * FROM t11, t10 WHERE t11.x = t10.x AND t10.rowid IS NULL;
}
@@ -606,30 +622,33 @@
SELECT * FROM t11, t10 WHERE t10.rowid IS NULL;
}
#-------------------------------------------------------------------------
do_execsql_test 24.0 {
- CREATE VIRTUAL TABLE t12 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t12 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
INSERT INTO t12 VALUES('aaaa');
}
do_execsql_test 24.1 {
BEGIN;
DELETE FROM t12 WHERE rowid=1;
SELECT * FROM t12('aaaa');
INSERT INTO t12 VALUES('aaaa');
END;
}
+execsql_pp {
+ SELECT rowid, hex(block) FROM t12_data
+}
do_execsql_test 24.2 {
INSERT INTO t12(t12) VALUES('integrity-check');
}
do_execsql_test 24.3 {
SELECT * FROM t12('aaaa');
} {aaaa}
#-------------------------------------------------------------------------
do_execsql_test 25.0 {
- CREATE VIRTUAL TABLE t13 USING fts5(x, detail=%DETAIL%);
+ CREATE VIRTUAL TABLE t13 USING fts5(x, detail=%DETAIL% %TOKENIZER%);
}
do_execsql_test 25.1 {
BEGIN;
INSERT INTO t13 VALUES('AAAA');
SELECT * FROM t13('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB*');
@@ -636,9 +655,10 @@
END;
}
+}
}
expand_all_sql db
finish_test
Index: ext/fts5/test/fts5aux.test
==================================================================
--- ext/fts5/test/fts5aux.test
+++ ext/fts5/test/fts5aux.test
@@ -331,7 +331,50 @@
}
do_execsql_test 11.2 {
SELECT fts5_hitcount(x1) FROM x1('one') LIMIT 1;
} {5}
+
+#-------------------------------------------------------------------------
+# Test that xColumnText returns SQLITE_RANGE when it should.
+#
+reset_db
+fts5_aux_test_functions db
+do_execsql_test 12.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(a, b, c);
+ INSERT INTO t1 VALUES('one', 'two', 'three');
+ INSERT INTO t1 VALUES('one', 'one', 'one');
+ INSERT INTO t1 VALUES('two', 'two', 'two');
+ INSERT INTO t1 VALUES('three', 'three', 'three');
+}
+
+do_catchsql_test 12.1.1 {
+ SELECT fts5_columntext(t1, -1) FROM t1('two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.1.2 {
+ SELECT fts5_columntext(t1, 3) FROM t1('two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.1.2 {
+ SELECT fts5_columntext(t1, 1) FROM t1('one AND two');
+} {0 two}
+
+do_catchsql_test 12.2.1 {
+ SELECT fts5_queryphrase(t1, -1) FROM t1('one AND two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.2.2 {
+ SELECT fts5_queryphrase(t1, 2) FROM t1('one AND two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.2.3 {
+ SELECT fts5_queryphrase(t1, 1) FROM t1('one AND two');
+} {0 {{1 2 1}}}
+
+do_catchsql_test 12.3.1 {
+ SELECT fts5_collist(t1, -1) FROM t1('one AND two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.3.2 {
+ SELECT fts5_collist(t1, 2) FROM t1('one AND two');
+} {1 SQLITE_RANGE}
+do_catchsql_test 12.3.3 {
+ SELECT fts5_collist(t1, 1) FROM t1('one AND two');
+} {0 1}
finish_test
Index: ext/fts5/test/fts5content.test
==================================================================
--- ext/fts5/test/fts5content.test
+++ ext/fts5/test/fts5content.test
@@ -290,7 +290,42 @@
SELECT count(*) FROM t1;
} {1 {recursively defined fts5 content table}}
do_catchsql_test 7.2.5 {
SELECT * FROM t1('abc') ORDER BY rank;
} {1 {recursively defined fts5 content table}}
+
+#---------------------------------------------------------------------------
+# Check that if the content table is a view, and that view contains an
+# error, a reasonable error message is returned if the user tries to
+# read from the view via the fts5 table.
+#
+reset_db
+do_execsql_test 8.1 {
+ CREATE VIEW a1 AS
+ SELECT 1 AS r, text_value(1) AS t
+ UNION ALL
+ SELECT 2 AS r, text_value(2) AS t;
+
+ CREATE VIRTUAL TABLE t1 USING fts5(t, content='a1', content_rowid='r');
+}
+
+foreach {tn sql} {
+ 1 "SELECT * FROM t1"
+ 2 "INSERT INTO t1(t1) VALUES('rebuild')"
+ 3 "SELECT * FROM t1 WHERE rowid=1"
+} {
+ do_catchsql_test 8.2.$tn $sql {1 {no such function: text_value}}
+}
+
+proc text_value {i} {
+ if {$i==1} { return "one" }
+ if {$i==2} { return "two" }
+ return "many"
+}
+db func text_value text_value
+
+do_execsql_test 8.3.1 { SELECT * FROM t1 } {one two}
+do_execsql_test 8.3.2 { INSERT INTO t1(t1) VALUES('rebuild') }
+do_execsql_test 8.3.3 { SELECT * FROM t1 WHERE rowid=1 } {one}
+do_execsql_test 8.3.4 { SELECT rowid FROM t1('two') } {2}
finish_test
Index: ext/fts5/test/fts5corrupt5.test
==================================================================
--- ext/fts5/test/fts5corrupt5.test
+++ ext/fts5/test/fts5corrupt5.test
@@ -966,10 +966,266 @@
UPDATE t1 SET content=randomblob(500) WHERE t1;
} {1 {constraint failed}}
#-------------------------------------------------------------------------
reset_db
+do_test 7.0 {
+ sqlite3 db {}
+ db deserialize [decode_hexdb {
+.open --hexdb
+| size 40960 pagesize 4096 filename crash-d8b4a99207c10b.db
+| page 1 offset 0
+| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3.
+| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 0a .....@ ........
+| 32: 00 00 00 00 00 00 00 00 00 00 00 0d 00 00 00 04 ................
+| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................
+| 96: 00 00 00 00 0d 00 00 00 0d 0b 62 00 0f 97 0f 40 ..........b....@
+| 112: 0e d5 0e 75 0e 18 0d c0 0d 66 0d 0f 0c a4 0c 44 ...u.....f.....D
+| 128: 0b ec 0b a7 0b 62 00 00 00 00 00 00 00 00 00 00 .....b..........
+| 2912: 00 00 43 0d 06 17 11 11 08 75 74 61 62 6c 65 74 ..C......utablet
+| 2928: 34 74 34 43 52 45 41 54 45 20 56 49 52 54 55 41 4t4CREATE VIRTUA
+| 2944: 4c 20 54 41 42 4c 45 20 74 34 20 55 53 49 4e 47 L TABLE t4 USING
+| 2960: 20 66 74 73 35 76 6f 63 61 62 28 27 74 32 27 2c fts5vocab('t2',
+| 2976: 20 27 72 6f 77 27 29 43 0c 06 17 11 11 08 75 74 'row')C......ut
+| 2992: 61 62 6c 65 74 33 74 33 43 52 45 41 54 45 20 56 ablet3t3CREATE V
+| 3008: 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 33 20 IRTUAL TABLE t3
+| 3024: 55 53 49 4e 47 20 66 74 73 35 76 6f 63 61 62 28 USING fts5vocab(
+| 3040: 27 74 31 27 2c 20 27 72 6f 77 27 29 56 0b 06 17 't1', 'row')V...
+| 3056: 1f 1f 01 7d 74 61 62 6c 65 74 32 5f 63 6f 6e 66 ....tablet2_conf
+| 3072: 69 67 74 32 5f 63 6f 6e 66 69 67 0a 43 52 45 41 igt2_config.CREA
+| 3088: 54 45 20 54 41 42 4c 45 20 27 74 32 5f 63 6f 6e TE TABLE 't2_con
+| 3104: 66 69 67 27 28 6b 20 50 52 49 4d 41 52 59 20 4b fig'(k PRIMARY K
+| 3120: 45 59 2c 20 76 29 20 57 49 54 48 4f 55 54 20 52 EY, v) WITHOUT R
+| 3136: 4f 57 49 44 5e 0a 07 17 21 21 01 81 07 74 61 62 OWID^...!!...tab
+| 3152: 6c 65 74 32 5f 63 6f 6e 74 65 6e 74 74 32 5f 63 let2_contentt2_c
+| 3168: 6f 6e 74 65 6e 74 09 43 52 45 41 54 45 20 54 41 ontent.CREATE TA
+| 3184: 42 4c 45 20 27 74 32 5f 63 6f 6e 74 65 6e 74 27 BLE 't2_content'
+| 3200: 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 4d (id INTEGER PRIM
+| 3216: 41 52 59 20 4b 45 59 2c 20 63 30 2c 20 63 31 2c ARY KEY, c0, c1,
+| 3232: 20 63 32 29 69 09 07 17 19 19 01 81 2d 74 61 62 c2)i.......-tab
+| 3248: 6c 65 74 32 5f 69 64 78 74 32 5f 69 64 78 08 43 let2_idxt2_idx.C
+| 3264: 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 32 5f REATE TABLE 't2_
+| 3280: 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 6d idx'(segid, term
+| 3296: 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 20 , pgno, PRIMARY
+| 3312: 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d 29 KEY(segid, term)
+| 3328: 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 55 ) WITHOUT ROWIDU
+| 3344: 08 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 32 5f ........tablet2_
+| 3360: 64 61 74 61 74 32 5f 64 61 74 61 07 43 52 45 41 datat2_data.CREA
+| 3376: 54 45 20 54 41 42 4c 45 20 27 74 32 5f 64 61 74 TE TABLE 't2_dat
+| 3392: 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 a'(id INTEGER PR
+| 3408: 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 6b IMARY KEY, block
+| 3424: 20 42 4c 4f 42 29 58 07 07 17 11 11 08 81 1d 74 BLOB)X........t
+| 3440: 61 62 6c 65 74 32 74 32 43 52 45 41 54 45 20 56 ablet2t2CREATE V
+| 3456: 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 32 20 IRTUAL TABLE t2
+| 3472: 55 53 49 4e 47 20 66 74 73 35 28 27 61 27 2c 5b USING fts5('a',[
+| 3488: 62 5d 2c 22 63 22 2c 64 65 74 61 69 6c 3d 6e 6f b],.c.,detail=no
+| 3504: 6e 65 2c 63 6f 6c 75 6d 6e 73 69 7a 65 3d 30 29 ne,columnsize=0)
+| 3520: 56 06 06 17 1f 1f 01 7d 74 61 62 6c 65 74 31 5f V.......tablet1_
+| 3536: 63 6f 6e 66 69 67 74 31 5f 63 6f 6e 66 69 67 06 configt1_config.
+| 3552: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1
+| 3568: 5f 63 6f 6e 66 69 67 27 28 6b 20 50 52 49 4d 41 _config'(k PRIMA
+| 3584: 52 59 20 4b 45 59 2c 20 76 29 20 57 49 54 48 4f RY KEY, v) WITHO
+| 3600: 55 54 20 52 4f 57 49 44 5b 05 07 17 21 21 01 81 UT ROWID[...!!..
+| 3616: 01 74 61 62 6c 65 74 31 5f 64 6f 63 73 69 7a 65 .tablet1_docsize
+| 3632: 74 31 5f 64 6f 63 73 69 7a 65 05 43 52 45 41 54 t1_docsize.CREAT
+| 3648: 45 20 54 41 42 4c 45 20 27 74 31 5f 64 6f 63 73 E TABLE 't1_docs
+| 3664: 69 7a 65 27 28 69 64 20 49 4e 54 45 47 45 52 20 ize'(id INTEGER
+| 3680: 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 73 7a 20 PRIMARY KEY, sz
+| 3696: 42 4c 4f 42 29 5e 04 07 17 21 21 01 81 07 74 61 BLOB)^...!!...ta
+| 3712: 62 6c 65 74 31 5f 63 6f 6e 74 65 6e 74 74 31 5f blet1_contentt1_
+| 3728: 63 6f 6e 74 65 6e 74 04 43 52 45 41 54 45 20 54 content.CREATE T
+| 3744: 41 42 4c 45 20 27 74 31 5f 63 6f 6e 74 65 6e 74 ABLE 't1_content
+| 3760: 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 '(id INTEGER PRI
+| 3776: 4d 41 52 59 20 4b 45 59 2c 20 63 30 2c 20 63 31 MARY KEY, c0, c1
+| 3792: 2c 20 63 32 29 69 03 07 17 19 19 01 81 2d 74 61 , c2)i.......-ta
+| 3808: 62 6c 65 74 31 5f 69 64 78 74 31 5f 69 64 78 03 blet1_idxt1_idx.
+| 3824: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1
+| 3840: 5f 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 _idx'(segid, ter
+| 3856: 6d 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 m, pgno, PRIMARY
+| 3872: 20 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d KEY(segid, term
+| 3888: 29 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 )) WITHOUT ROWID
+| 3904: 55 02 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 U........tablet1
+| 3920: 5f 64 61 74 61 74 31 5f 64 61 74 61 02 43 52 45 _datat1_data.CRE
+| 3936: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 64 61 ATE TABLE 't1_da
+| 3952: 74 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 ta'(id INTEGER P
+| 3968: 52 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 RIMARY KEY, bloc
+| 3984: 6b 20 42 4c 4f 42 29 67 01 07 17 11 11 08 81 3b k BLOB)g.......;
+| 4000: 74 61 62 6c 65 74 31 74 31 43 52 45 41 54 45 20 tablet1t1CREATE
+| 4016: 56 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 31 VIRTUAL TABLE t1
+| 4032: 20 55 53 49 4e 47 20 66 74 73 35 28 61 2c 62 20 USING fts5(a,b
+| 4048: 75 6e 69 6e 64 65 78 65 64 2c 63 2c 74 6f 6b 65 unindexed,c,toke
+| 4064: 6e 69 7a 65 3d 22 70 6f 72 74 65 72 20 61 73 63 nize=.porter asc
+| 4080: 69 69 22 2c 74 6f 6b 65 6e 64 61 74 61 3d 31 29 ii.,tokendata=1)
+| page 2 offset 4096
+| 0: 0d 0f 68 00 05 0f 13 00 0f e6 0f 13 0f a8 0f 7c ..h............|
+| 16: 0f 2a 00 00 00 00 00 00 00 00 00 00 00 00 00 00 .*..............
+| 3856: 00 00 00 15 0a 03 00 30 00 00 00 00 01 03 03 00 .......0........
+| 3872: 03 01 01 01 02 01 01 03 01 01 37 8c 80 80 80 80 ..........7.....
+| 3888: 01 03 00 74 00 00 00 2e 02 30 61 03 02 02 01 01 ...t.....0a.....
+| 3904: 62 03 02 03 01 01 63 03 02 04 01 01 67 03 06 01 b.....c.....g...
+| 3920: 02 02 01 01 68 03 06 01 02 03 01 01 69 03 06 01 ....h.......i...
+| 3936: 02 04 04 06 06 06 08 08 0f ef 00 14 2a 00 00 00 ............*...
+| 3952: 00 01 02 02 00 02 01 01 01 02 01 01 25 88 80 80 ............%...
+| 3968: 80 80 01 03 00 50 00 00 00 1f 02 30 67 02 08 02 .....P.....0g...
+| 3984: 01 02 02 01 01 68 02 08 03 01 02 03 01 01 69 02 .....h........i.
+| 4000: 08 04 01 02 04 04 09 09 37 84 80 80 80 7f f1 03 ........7.......
+| 4016: 00 74 00 00 00 2e 02 30 61 01 02 02 01 01 62 01 .t.....0a.....b.
+| 4032: 02 03 01 01 63 01 02 04 01 01 67 01 06 01 02 02 ....c.....g.....
+| 4048: 01 01 68 01 06 01 02 03 01 01 69 01 06 01 02 04 ..h.......i.....
+| 4064: 04 06 06 06 08 08 07 01 03 00 14 03 09 00 09 00 ................
+| 4080: 00 00 11 24 00 00 00 00 01 01 01 00 01 01 01 01 ...$............
+| page 3 offset 8192
+| 0: 0a 00 00 00 03 0f ec 00 0f fa 0f f3 0f ec 00 00 ................
+| 4064: 00 00 00 00 00 00 00 00 00 00 00 00 06 04 01 0c ................
+| 4080: 01 03 02 06 04 01 0c 01 02 02 05 04 09 0c 01 02 ................
+| page 4 offset 12288
+| 0: 0d 00 00 00 03 0f be 00 0f ea 0f d4 0f be 00 00 ................
+| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 14 03 ................
+| 4032: 05 00 17 17 17 61 20 62 20 63 67 20 68 20 69 67 .....a b cg h ig
+| 4048: 20 68 20 69 14 02 05 00 17 17 17 67 20 68 20 69 h i.......g h i
+| 4064: 61 20 62 20 63 67 20 68 20 69 14 01 05 00 17 17 a b cg h i......
+| 4080: 17 61 20 62 20 63 64 20 65 20 66 67 20 68 20 69 .a b cd e fg h i
+| page 5 offset 16384
+| 0: 0d 00 00 00 03 0f e8 00 0f f8 0f f0 0f e8 00 00 ................
+| 4064: 00 00 00 00 00 00 00 00 06 03 03 00 12 03 00 03 ................
+| 4080: 06 02 03 00 12 03 00 03 06 01 03 00 12 03 00 03 ................
+| page 6 offset 20480
+| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................
+| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version.
+| page 7 offset 24576
+| 0: 0d 00 00 00 03 0f 9e 00 0f e6 0f ef 0f 9e 00 00 ................
+| 3984: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 41 84 ..............A.
+| 4000: 80 80 80 80 01 04 00 81 06 00 00 00 34 02 30 61 ............4.0a
+| 4016: 01 01 01 01 01 62 01 01 01 01 01 63 01 01 01 01 .....b.....c....
+| 4032: 01 64 01 01 01 65 01 01 01 66 01 01 01 67 01 01 .d...e...f...g..
+| 4048: 01 01 01 68 01 01 01 01 01 69 01 01 01 04 06 06 ...h.....i......
+| 4064: 06 04 04 04 06 06 07 01 03 00 14 03 09 09 09 0f ................
+| 4080: 0a 03 00 24 00 00 00 00 01 01 01 00 01 01 01 01 ...$............
+| page 8 offset 28672
+| 0: 0a 00 00 00 01 0f fa 00 0f fa 00 00 00 00 00 00 ................
+| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................
+| page 9 offset 32768
+| 0: 0d 00 00 00 03 0f be 00 0f ea 0f d4 0f be 00 00 ................
+| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 14 03 ................
+| 4032: 05 00 17 17 17 61 20 62 20 63 67 20 68 20 69 67 .....a b cg h ig
+| 4048: 20 68 20 69 14 02 05 00 17 17 17 67 20 68 20 69 h i.......g h i
+| 4064: 61 20 62 20 63 67 20 68 20 69 14 01 05 00 17 17 a b cg h i......
+| 4080: 17 61 20 62 20 63 64 20 65 20 66 67 20 68 20 69 .a b cd e fg h i
+| page 10 offset 36864
+| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................
+| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version.
+| end crash-d8b4a99207c10b.db
+}]} {}
+
+do_catchsql_test 7.1 {
+ SELECT snippet(t1, -1, '.', '..', '[', ']'),
+ highlight(t1, 2, '[', ']')
+ FROM t1('g + h')
+ WHERE rank MATCH 'bm25(1.0, 1.0)' ORDER BY rank;
+} {1 {database disk image is malformed}}
+
+#-------------------------------------------------------------------------
+reset_db
+do_test 8.0 {
+ sqlite3 db {}
+ db deserialize [decode_hexdb {
+.open --hexdb
+| size 20480 pagesize 4096 filename crash-d57c01958e48ab.db
+| page 1 offset 0
+| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3.
+| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 05 .....@ ........
+| 32: 00 00 00 00 00 00 00 00 00 00 00 05 00 00 00 04 ................
+| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................
+| 96: 00 00 00 00 0d 00 00 00 05 0e 10 00 0f 97 0f 40 ...............@
+| 112: 0e d5 0e 68 0e 10 01 00 00 00 00 00 00 00 00 00 ...h............
+| 3600: 56 05 06 17 1f 1f 01 7d 74 61 62 6c 65 74 31 5f V.......tablet1_
+| 3616: 63 6f 6e 66 69 67 74 31 5f 63 6f 6e 66 69 67 05 configt1_config.
+| 3632: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1
+| 3648: 5f 63 6f 6e 66 69 67 27 28 6b 20 50 52 49 4d 41 _config'(k PRIMA
+| 3664: 52 59 20 4b 45 59 2c 20 76 29 20 57 49 54 48 4f RY KEY, v) WITHO
+| 3680: 55 54 20 52 4f 57 49 44 6b 04 07 17 21 21 01 81 UT ROWIDk...!!..
+| 3696: 21 74 61 62 6c 65 74 31 5f 64 6f 63 73 69 7a 65 !tablet1_docsize
+| 3712: 74 31 5f 64 6f 63 73 69 7a 65 04 43 52 45 41 54 t1_docsize.CREAT
+| 3728: 45 20 54 41 42 4c 45 20 27 74 31 5f 64 6f 63 73 E TABLE 't1_docs
+| 3744: 69 7a 65 27 28 69 64 20 49 4e 54 45 47 45 52 20 ize'(id INTEGER
+| 3760: 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 73 7a 20 PRIMARY KEY, sz
+| 3776: 42 4c 4f 42 2c 20 6f 72 69 67 69 6e 20 49 4e 54 BLOB, origin INT
+| 3792: 45 47 45 52 29 69 03 07 17 19 19 01 81 2d 74 61 EGER)i.......-ta
+| 3808: 62 6c 65 74 31 5f 69 64 78 74 31 5f 69 64 78 03 blet1_idxt1_idx.
+| 3824: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1
+| 3840: 5f 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 _idx'(segid, ter
+| 3856: 6d 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 m, pgno, PRIMARY
+| 3872: 20 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d KEY(segid, term
+| 3888: 29 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 )) WITHOUT ROWID
+| 3904: 55 02 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 U........tablet1
+| 3920: 5f 64 61 74 61 74 31 5f 64 61 74 61 02 43 52 45 _datat1_data.CRE
+| 3936: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 64 61 ATE TABLE 't1_da
+| 3952: 74 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 ta'(id INTEGER P
+| 3968: 52 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 RIMARY KEY, bloc
+| 3984: 6b 20 42 4c 4f 42 29 67 01 07 17 11 11 08 81 3b k BLOB)g.......;
+| 4000: 74 61 62 6c 65 74 31 74 31 43 52 45 41 54 45 20 tablet1t1CREATE
+| 4016: 56 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 31 VIRTUAL TABLE t1
+| 4032: 20 55 53 49 4e 47 20 66 74 73 35 28 61 2c 20 62 USING fts5(a, b
+| 4048: 2c 20 63 6f 6e 74 65 6e 74 3d 27 27 2c 20 63 6f , content='', co
+| 4064: 6e 74 65 6e 74 6c 65 73 73 5f 64 65 6c 65 74 65 ntentless_delete
+| 4080: 3d 31 2c 20 74 6f 6b 65 6e 64 61 74 61 3d 31 29 =1, tokendata=1)
+| page 2 offset 4096
+| 0: 0d 0f eb 00 03 0e 17 00 0f e2 0e 17 0e 31 00 00 .............1..
+| 16: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+| 3600: 00 00 00 00 00 00 00 18 0a 03 00 36 00 00 00 00 ...........6....
+| 3616: ff 00 00 01 01 01 01 00 01 01 01 01 01 01 00 00 ................
+| 3632: 07 83 29 84 80 80 80 80 01 04 00 86 56 00 00 01 ..).........V...
+| 3648: 96 04 30 61 61 61 01 02 02 01 04 02 04 01 08 02 ..0aaa..........
+| 3664: 04 04 04 01 10 02 04 04 04 04 04 04 04 01 20 02 .............. .
+| 3680: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 01 ................
+| 3696: 40 02 04 04 04 04 04 04 04 04 04 04 04 04 04 04 @...............
+| 3712: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................
+| 3728: 04 01 81 00 02 04 04 04 04 04 04 04 04 04 04 04 ................
+| 3744: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................
+| 3760: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................
+| 3776: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................
+| 3792: 04 04 04 04 02 02 62 63 01 06 01 01 02 01 03 62 ......bc.......b
+| 3808: 62 62 02 02 03 01 04 03 06 01 08 03 06 06 06 01 bb..............
+| 3824: 10 03 06 06 06 06 06 06 06 01 20 03 06 06 06 06 .......... .....
+| 3840: 06 06 06 06 06 06 06 06 06 06 06 01 40 03 06 06 ............@...
+| 3856: 06 06 06 06 06 06 06 06 06 06 06 06 06 06 06 06 ................
+| 3872: 06 06 06 06 06 06 06 06 06 06 16 06 06 02 02 63 ...............c
+| 3888: 64 02 06 01 01 02 01 03 63 63 63 03 02 05 01 04 d.......ccc.....
+| 3904: 05 0a 01 08 05 0a 0a 0a 01 10 05 0a 0a 0a 0a 0a ................
+| 3920: 0a 0a 01 20 05 0a 0a 0a 0a 0a 0a 0a 0a 0a 0a 0a ... ............
+| 3936: 0a 0a 0a 0a 02 02 64 65 03 06 01 01 02 01 03 64 ......de.......d
+| 3952: 64 64 04 02 09 01 04 09 12 01 08 09 12 12 12 01 dd..............
+| 3968: 10 09 12 12 12 12 12 12 12 02 02 65 66 04 06 01 ...........ef...
+| 3984: 01 02 01 03 65 65 65 05 02 11 01 04 11 22 01 08 ....eee.........
+| 4000: 11 22 22 22 02 02 66 67 05 06 01 01 02 01 03 66 ......fg.......f
+| 4016: 56 66 06 02 21 01 04 21 42 02 02 67 68 06 06 01 Vf..!..!B..gh...
+| 4032: 01 02 cb 03 67 67 67 07 02 41 02 02 68 69 07 06 ....ggg..A..hi..
+| 4048: 01 01 02 04 81 13 09 50 09 2e 09 1c 09 12 09 0c .......P........
+| 4064: 09 08 07 01 03 00 14 07 81 77 07 00 00 00 15 22 .........w......
+| 4080: 00 00 00 00 ff 00 00 01 00 00 00 00 00 00 05 0c ................
+| page 3 offset 8192
+| 0: 0a 00 00 00 01 0f fa 00 0f fa 00 00 00 00 00 00 ................
+| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................
+| page 4 offset 12288
+| 0: 0d 00 00 00 07 0f c8 00 0f f8 0f f0 0f e8 0f e0 ................
+| 16: 0f d8 0f d0 0f c8 00 00 00 00 00 00 00 00 00 00 ................
+| 4032: 00 00 00 00 00 00 00 00 06 07 04 00 10 09 7f 01 ................
+| 4048: 06 06 04 00 10 09 3f 01 06 05 04 00 10 09 1f 01 ......?.........
+| 4064: 06 04 04 00 10 09 0f 01 06 03 04 00 10 09 07 01 ................
+| 4080: 06 02 04 00 10 09 03 01 06 01 04 00 10 09 01 01 ................
+| page 5 offset 16384
+| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................
+| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version.
+| end crash-d57c01958e48ab.db
+}]} {}
+
+do_catchsql_test 8.1 {
+ SELECT rowid FROM t1('a* NOT ý‘') ;
+} {0 {1 2 3 4 5 6 7}}
+
+#-------------------------------------------------------------------------
+reset_db
do_test 9.0 {
sqlite3 db {}
db deserialize [decode_hexdb {
.open --hexdb
| size 32768 pagesize 4096 filename crash-c76a16c24c8ba6.db
@@ -1195,5 +1451,6 @@
DELETE FROM t1;
} {1 {database disk image is malformed}}
sqlite3_fts5_may_be_corrupt 0
finish_test
+
Index: ext/fts5/test/fts5fault8.test
==================================================================
--- ext/fts5/test/fts5fault8.test
+++ ext/fts5/test/fts5fault8.test
@@ -55,11 +55,10 @@
}
}
} ;# foreach_detail_mode...
-
do_execsql_test 4.0 {
CREATE VIRTUAL TABLE x2 USING fts5(a);
INSERT INTO x2(x2, rank) VALUES('crisismerge', 2);
INSERT INTO x2(x2, rank) VALUES('pgsz', 32);
INSERT INTO x2 VALUES('a b c d');
@@ -77,8 +76,21 @@
} -body {
execsql { INSERT INTO x2(x2) VALUES('optimize') }
} -test {
faultsim_test_result {0 {}} {1 SQLITE_NOMEM}
}
+
+set TMPDBERROR {1 {unable to open a temporary database file for storing temporary tables}}
+
+do_faultsim_test 5 -faults oom-t* -prep {
+ faultsim_restore_and_reopen
+ execsql { PRAGMA temp_store = memory }
+} -body {
+ execsql { PRAGMA integrity_check }
+} -test {
+ if {[string match {*error code=7*} $testresult]==0} {
+ faultsim_test_result {0 ok} {1 SQLITE_NOMEM} $::TMPDBERROR
+ }
+}
finish_test
ADDED ext/fts5/test/fts5faultH.test
Index: ext/fts5/test/fts5faultH.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5faultH.test
@@ -0,0 +1,141 @@
+# 2010 June 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+source $testdir/malloc_common.tcl
+set testprefix fts5faultG
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+set ::testprefix fts5faultH
+
+sqlite3_fts5_register_origintext db
+
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1
+ );
+
+ BEGIN;
+ INSERT INTO t1 VALUES('oNe tWo thRee');
+ INSERT INTO t1 VALUES('One Two Three');
+ INSERT INTO t1 VALUES('onE twO threE');
+ COMMIT;
+ BEGIN;
+ INSERT INTO t1 VALUES('one two three');
+ INSERT INTO t1 VALUES('one two three');
+ INSERT INTO t1 VALUES('one two three');
+ COMMIT;
+}
+
+do_faultsim_test 1 -faults oom* -prep {
+} -body {
+ execsql {
+ SELECT rowid FROM t1('three');
+ }
+} -test {
+ faultsim_integrity_check
+ faultsim_test_result {0 {1 2 3 4 5 6}}
+}
+
+
+reset_db
+sqlite3_fts5_register_origintext db
+do_execsql_test 2.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1
+ );
+ INSERT INTO t1(t1, rank) VALUES('pgsz', 64);
+
+ BEGIN;
+ INSERT INTO t1(rowid, x) VALUES(10, 'aaa bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(12, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(13, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(14, 'bbb BBB bbb');
+ INSERT INTO t1(rowid, x) VALUES(15, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(16, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(17, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(18, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(19, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(20, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(21, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(22, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(23, 'bbb bbb bbb');
+ INSERT INTO t1(rowid, x) VALUES(24, 'aaa bbb BBB');
+ COMMIT;
+}
+
+do_faultsim_test 2 -faults oom* -prep {
+} -body {
+ execsql {
+ SELECT rowid FROM t1('BBB AND AAA');
+ }
+} -test {
+ faultsim_integrity_check
+ faultsim_test_result {0 {10 24}}
+}
+
+reset_db
+sqlite3_fts5_register_origintext db
+do_execsql_test 3.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1
+ );
+ INSERT INTO t1(t1, rank) VALUES('pgsz', 64);
+
+ INSERT INTO t1(rowid, x) VALUES(9, 'bbb Bbb BBB');
+ BEGIN;
+ INSERT INTO t1(rowid, x) VALUES(10, 'aaa bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(11, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(12, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(13, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(14, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(15, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(16, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(17, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(18, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(19, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(20, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(21, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(22, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(23, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(24, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(25, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(26, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(27, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(28, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(29, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(30, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(31, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(32, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(33, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(34, 'bbb Bbb BBB');
+ INSERT INTO t1(rowid, x) VALUES(35, 'aaa bbb BBB');
+ COMMIT;
+}
+
+do_faultsim_test 3 -faults oom* -prep {
+} -body {
+ execsql {
+ SELECT rowid FROM t1('BBB AND AAA');
+ }
+} -test {
+ faultsim_integrity_check
+ faultsim_test_result {0 {10 35}}
+}
+
+
+finish_test
Index: ext/fts5/test/fts5integrity.test
==================================================================
--- ext/fts5/test/fts5integrity.test
+++ ext/fts5/test/fts5integrity.test
@@ -352,7 +352,33 @@
} {ok}
do_execsql_test 11.4 {
DROP TABLE t1;
PRAGMA integrity_check(t2);
} {ok}
+
+#-------------------------------------------------------------------
+reset_db
+
+do_execsql_test 12.1 {
+ CREATE VIRTUAL TABLE x1 USING fts5(a, b);
+ INSERT INTO x1 VALUES('one', 'two');
+ INSERT INTO x1 VALUES('three', 'four');
+ INSERT INTO x1 VALUES('five', 'six');
+}
+
+do_execsql_test 12.2 {
+ PRAGMA integrity_check
+} {ok}
+
+db close
+sqlite3 db test.db -readonly 1
+
+explain_i {
+ PRAGMA integrity_check
+ }
+do_execsql_test 12.3 {
+ PRAGMA integrity_check
+} {ok}
+
+
finish_test
ADDED ext/fts5/test/fts5origintext.test
Index: ext/fts5/test/fts5origintext.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5origintext.test
@@ -0,0 +1,297 @@
+# 2014 Jan 08
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests focused on phrase queries.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5origintext
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+foreach_detail_mode $testprefix {
+
+sqlite3_fts5_register_origintext db
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", detail=%DETAIL%
+ );
+ CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance);
+}
+
+do_execsql_test 1.1 {
+ INSERT INTO ft VALUES('Hello world');
+}
+
+do_execsql_test 1.2 {
+ INSERT INTO ft(ft) VALUES('integrity-check');
+}
+
+proc b {x} { string map [list "\0" "."] $x }
+db func b b
+
+do_execsql_test 1.3 {
+ select b(term) from vocab;
+} {
+ hello.Hello
+ world
+}
+
+do_execsql_test 1.4 {
+ SELECT rowid FROM ft('Hello');
+} {1}
+
+#-------------------------------------------------------------------------
+reset_db
+
+# Return a random integer between 0 and n-1.
+#
+proc random {n} {
+ expr {abs(int(rand()*$n))}
+}
+
+proc select_one {list} {
+ set n [llength $list]
+ lindex $list [random $n]
+}
+
+proc term {} {
+ set first_letter {
+ a b c d e f g h i j k l m n o p q r s t u v w x y z
+ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
+ }
+
+ set term [select_one $first_letter]
+ append term [random 100]
+}
+
+proc document {} {
+ set nTerm [expr [random 5] + 5]
+ set doc ""
+ for {set ii 0} {$ii < $nTerm} {incr ii} {
+ lappend doc [term]
+ }
+ set doc
+}
+db func document document
+
+sqlite3_fts5_register_origintext db
+do_execsql_test 2.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", detail=%DETAIL%
+ );
+ INSERT INTO ft(ft, rank) VALUES('pgsz', 128);
+ CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance);
+}
+
+do_test 2.1 {
+ for {set ii 0} {$ii < 500} {incr ii} {
+ execsql { INSERT INTO ft VALUES( document() ) }
+ }
+} {}
+
+do_execsql_test 2.2 {
+ INSERT INTO ft(ft) VALUES('integrity-check');
+}
+
+do_execsql_test 2.3 {
+ INSERT INTO ft(ft, rank) VALUES('merge', 16);
+}
+
+do_execsql_test 2.4 {
+ INSERT INTO ft(ft) VALUES('integrity-check');
+}
+
+do_execsql_test 2.5 {
+ INSERT INTO ft(ft) VALUES('optimize');
+}
+
+#-------------------------------------------------------------------------
+reset_db
+
+sqlite3_fts5_register_origintext db
+do_execsql_test 3.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", detail=%DETAIL%
+ );
+ CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance);
+
+ INSERT INTO ft(rowid, x) VALUES(1, 'hello');
+ INSERT INTO ft(rowid, x) VALUES(2, 'Hello');
+ INSERT INTO ft(rowid, x) VALUES(3, 'HELLO');
+}
+
+#proc b {x} { string map [list "\0" "."] $x }
+#db func b b
+#execsql_pp { SELECT b(term) FROM vocab }
+
+do_execsql_test 3.1.1 { SELECT rowid FROM ft('hello') } 1
+do_execsql_test 3.1.2 { SELECT rowid FROM ft('Hello') } 2
+do_execsql_test 3.1.3 { SELECT rowid FROM ft('HELLO') } 3
+
+do_execsql_test 3.2 {
+ CREATE VIRTUAL TABLE ft2 USING fts5(x,
+ tokenize="origintext unicode61",
+ tokendata=1,
+ detail=%DETAIL%
+ );
+ CREATE VIRTUAL TABLE vocab2 USING fts5vocab(ft2, instance);
+
+ INSERT INTO ft2(rowid, x) VALUES(1, 'hello');
+ INSERT INTO ft2(rowid, x) VALUES(2, 'Hello');
+ INSERT INTO ft2(rowid, x) VALUES(3, 'HELLO');
+
+ INSERT INTO ft2(rowid, x) VALUES(10, 'helloooo');
+}
+
+#proc b {x} { string map [list "\0" "."] $x }
+#db func b b
+#execsql_pp { SELECT b(term) FROM vocab }
+
+do_execsql_test 3.3.1 { SELECT rowid FROM ft2('hello') } {1 2 3}
+do_execsql_test 3.3.2 { SELECT rowid FROM ft2('Hello') } {1 2 3}
+do_execsql_test 3.3.3 { SELECT rowid FROM ft2('HELLO') } {1 2 3}
+
+do_execsql_test 3.3.4 { SELECT rowid FROM ft2('hello*') } {1 2 3 10}
+
+#-------------------------------------------------------------------------
+#
+reset_db
+sqlite3_fts5_register_origintext db
+proc querytoken {cmd iPhrase iToken} {
+ set txt [$cmd xQueryToken $iPhrase $iToken]
+ string map [list "\0" "."] $txt
+}
+sqlite3_fts5_create_function db querytoken querytoken
+
+do_execsql_test 4.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize='origintext unicode61', tokendata=1, detail=%DETAIL%
+ );
+ INSERT INTO ft VALUES('one two three four');
+}
+
+do_execsql_test 4.1 {
+ SELECT rowid, querytoken(ft, 0, 0) FROM ft('TwO')
+} {1 two.TwO}
+do_execsql_test 4.2 {
+ SELECT rowid, querytoken(ft, 0, 0) FROM ft('one TWO ThreE')
+} {1 one}
+do_execsql_test 4.3 {
+ SELECT rowid, querytoken(ft, 1, 0) FROM ft('one TWO ThreE')
+} {1 two.TWO}
+
+if {"%DETAIL%"=="full"} {
+ # Phrase queries are only supported for detail=full.
+ #
+ do_execsql_test 4.4 {
+ SELECT rowid, querytoken(ft, 0, 2) FROM ft('"one TWO ThreE"')
+ } {1 three.ThreE}
+ do_catchsql_test 4.5 {
+ SELECT rowid, querytoken(ft, 0, 3) FROM ft('"one TWO ThreE"')
+ } {1 SQLITE_RANGE}
+ do_catchsql_test 4.6 {
+ SELECT rowid, querytoken(ft, 1, 0) FROM ft('"one TWO ThreE"')
+ } {1 SQLITE_RANGE}
+ do_catchsql_test 4.7 {
+ SELECT rowid, querytoken(ft, -1, 0) FROM ft('"one TWO ThreE"')
+ } {1 SQLITE_RANGE}
+}
+
+#-------------------------------------------------------------------------
+#
+reset_db
+sqlite3_fts5_register_origintext db
+proc insttoken {cmd iIdx iToken} {
+ set txt [$cmd xInstToken $iIdx $iToken]
+ string map [list "\0" "."] $txt
+}
+sqlite3_fts5_create_function db insttoken insttoken
+fts5_aux_test_functions db
+
+do_execsql_test 5.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize='origintext unicode61', tokendata=1, detail=%DETAIL%
+ );
+ INSERT INTO ft VALUES('one ONE One oNe oNE one');
+}
+
+do_execsql_test 5.1 {
+ SELECT insttoken(ft, 0, 0),
+ insttoken(ft, 1, 0),
+ insttoken(ft, 2, 0),
+ insttoken(ft, 3, 0),
+ insttoken(ft, 4, 0),
+ insttoken(ft, 5, 0)
+ FROM ft('one');
+} {
+ one one.ONE one.One one.oNe one.oNE one
+}
+
+do_execsql_test 5.2 {
+ SELECT insttoken(ft, 1, 0) FROM ft('one');
+} {
+ one.ONE
+}
+
+do_execsql_test 5.3 {
+ SELECT fts5_test_poslist(ft) FROM ft('one');
+} {
+ {0.0.0 0.0.1 0.0.2 0.0.3 0.0.4 0.0.5}
+}
+
+#-------------------------------------------------------------------------
+# Test the xInstToken() API with:
+#
+# * a non tokendata=1 table.
+# * prefix queries.
+#
+reset_db
+sqlite3_fts5_register_origintext db
+do_execsql_test 6.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, y, tokenize='origintext unicode61', detail=%DETAIL%
+ );
+
+ INSERT INTO ft VALUES('One Two', 'Three two');
+ INSERT INTO ft VALUES('three Three', 'one One');
+}
+proc tokens {cmd} {
+ set ret [list]
+ for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} {
+ set txt [$cmd xInstToken $iTok 0]
+ set txt [string map [list "\0" "."] $txt]
+ lappend ret $txt
+ }
+ set ret
+}
+sqlite3_fts5_create_function db tokens tokens
+
+do_execsql_test 6.1 {
+ SELECT rowid, tokens(ft) FROM ft('One');
+} {1 one.One 2 one.One}
+
+do_execsql_test 6.2 {
+ SELECT rowid, tokens(ft) FROM ft('on*');
+} {1 {{}} 2 {{} {}}}
+
+do_execsql_test 6.3 {
+ SELECT rowid, tokens(ft) FROM ft('Three*');
+} {1 {{}} 2 {{}}}
+
+}
+
+finish_test
+
ADDED ext/fts5/test/fts5origintext2.test
Index: ext/fts5/test/fts5origintext2.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5origintext2.test
@@ -0,0 +1,146 @@
+# 2014 Jan 08
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests focused on phrase queries.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5origintext2
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+sqlite3_fts5_register_origintext db
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1
+ );
+}
+
+do_execsql_test 1.1 {
+ BEGIN;
+ INSERT INTO ft VALUES('Hello');
+ INSERT INTO ft VALUES('hello');
+ INSERT INTO ft VALUES('HELLO');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('World');
+ INSERT INTO ft VALUES('world');
+ INSERT INTO ft VALUES('WORLD');
+ COMMIT;
+}
+
+do_execsql_test 1.2 { SELECT rowid FROM ft('hello'); } {1 2 3}
+do_execsql_test 1.3 { SELECT rowid FROM ft('today'); } {4 5 6}
+do_execsql_test 1.4 { SELECT rowid FROM ft('world'); } {7 8 9}
+
+do_execsql_test 1.5 {
+ SELECT count(*) FROM ft_data
+} 3
+
+do_execsql_test 1.6 {
+ DELETE FROM ft;
+ INSERT INTO ft(ft, rank) VALUES('pgsz', 64);
+ BEGIN;
+ WITH s(i) AS (
+ SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100
+ )
+ INSERT INTO ft SELECT 'Hello Hello Hello Hello Hello Hello Hello' FROM s;
+ INSERT INTO ft VALUES ('hELLO hELLO hELLO');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('today today today today today today today');
+ INSERT INTO ft VALUES('World World World World World World World');
+ INSERT INTO ft VALUES('world world world world world world world');
+ INSERT INTO ft VALUES('WORLD WORLD WORLD WORLD WORLD WORLD WORLD');
+ INSERT INTO ft VALUES('World World World World World World World');
+ INSERT INTO ft VALUES('world world world world world world world');
+ INSERT INTO ft VALUES('WORLD WORLD WORLD WORLD WORLD WORLD WORLD');
+ COMMIT;
+}
+
+do_execsql_test 1.7 {
+ SELECT count(*) FROM ft_data;
+} 23
+
+do_execsql_test 1.8 { SELECT rowid FROM ft('hello') WHERE rowid>100; } {101}
+
+do_execsql_test 1.9 {
+ DELETE FROM ft;
+ INSERT INTO ft(ft) VALUES('optimize');
+ SELECT count(*) FROM ft_data;
+} {2}
+do_execsql_test 1.10 {
+ BEGIN;
+ INSERT INTO ft VALUES('Hello');
+ INSERT INTO ft VALUES('hello');
+ INSERT INTO ft VALUES('HELLO');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('today');
+ INSERT INTO ft VALUES('World');
+ INSERT INTO ft VALUES('world');
+ INSERT INTO ft VALUES('WORLD');
+}
+
+do_execsql_test 1.11 { SELECT rowid FROM ft('hello'); } {1 2 3}
+do_execsql_test 1.12 { SELECT rowid FROM ft('today'); } {4 5 6}
+do_execsql_test 1.13 { SELECT rowid FROM ft('world'); } {7 8 9}
+do_execsql_test 1.14 { SELECT rowid FROM ft('hello') ORDER BY rank; } {1 2 3}
+
+#------------------------------------------------------------------------
+reset_db
+sqlite3_fts5_register_origintext db
+proc tokens {cmd} {
+ set ret [list]
+ for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} {
+ set txt [$cmd xInstToken $iTok 0]
+ set txt [string map [list "\0" "."] $txt]
+ lappend ret $txt
+ }
+ set ret
+}
+sqlite3_fts5_create_function db tokens tokens
+
+do_execsql_test 2.0 {
+ CREATE VIRTUAL TABLE x1 USING fts5(
+ v, tokenize="origintext unicode61", tokendata=1, detail=none
+ );
+
+ INSERT INTO x1 VALUES('xxx Xxx XXX yyy YYY yyy');
+ INSERT INTO x1 VALUES('xxx yyy xxx yyy yyy yyy');
+}
+
+do_execsql_test 2.1 {
+ SELECT tokens(x1) FROM x1('xxx');
+} {
+ {xxx xxx.Xxx xxx.XXX} {xxx xxx}
+}
+
+do_execsql_test 2.2 {
+ UPDATE x1_content SET c0 = 'xxx xxX xxx yyy yyy yyy' WHERE id=1;
+}
+
+do_execsql_test 2.3 {
+ SELECT tokens(x1) FROM x1('xxx');
+} {
+ {xxx {} xxx} {xxx xxx}
+}
+
+finish_test
+
ADDED ext/fts5/test/fts5origintext3.test
Index: ext/fts5/test/fts5origintext3.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5origintext3.test
@@ -0,0 +1,101 @@
+# 2023 November 22
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests focused on phrase queries.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5origintext3
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+foreach_detail_mode $testprefix {
+ reset_db
+
+ sqlite3_fts5_register_origintext db
+ fts5_aux_test_functions db
+ proc insttoken {cmd iIdx iToken} {
+ set txt [$cmd xInstToken $iIdx $iToken]
+ string map [list "\0" "."] $txt
+ }
+ sqlite3_fts5_create_function db insttoken insttoken
+
+ do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL%
+ );
+ }
+
+ do_execsql_test 1.1 {
+ INSERT INTO ft VALUES('Hello world HELLO WORLD hello');
+ }
+
+ do_execsql_test 1.2 {
+ SELECT fts5_test_poslist(ft) FROM ft('hello');
+ } {{0.0.0 0.0.2 0.0.4}}
+
+ do_execsql_test 1.3 {
+ SELECT
+ insttoken(ft, 0, 0),
+ insttoken(ft, 1, 0),
+ insttoken(ft, 2, 0)
+ FROM ft('hello');
+ } {hello.Hello hello.HELLO hello}
+
+ do_execsql_test 1.4 {
+ SELECT
+ insttoken(ft, 0, 0),
+ insttoken(ft, 1, 0),
+ insttoken(ft, 2, 0)
+ FROM ft('hello') ORDER BY rank;
+ } {hello.Hello hello.HELLO hello}
+
+ do_execsql_test 1.5 {
+ CREATE VIRTUAL TABLE ft2 USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL%
+ );
+ INSERT INTO ft2(rowid, x) VALUES(1, 'ONE one two three ONE');
+ INSERT INTO ft2(rowid, x) VALUES(2, 'TWO one two three TWO');
+ INSERT INTO ft2(rowid, x) VALUES(3, 'THREE one two three THREE');
+ }
+
+ do_execsql_test 1.6 {
+ SELECT insttoken(ft2, 0, 0), rowid FROM ft2('three') ORDER BY rank;
+ } {three.THREE 3 three 1 three 2}
+
+ do_execsql_test 1.7 {
+ INSERT INTO ft2(rowid, x) VALUES(10, 'aaa bbb BBB');
+ INSERT INTO ft2(rowid, x) VALUES(12, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(13, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(14, 'bbb BBB bbb');
+ INSERT INTO ft2(rowid, x) VALUES(15, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(16, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(17, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(18, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(19, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(20, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(21, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(22, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(23, 'bbb bbb bbb');
+ INSERT INTO ft2(rowid, x) VALUES(24, 'aaa bbb BBB');
+ }
+
+ do_execsql_test 1.8 { SELECT rowid FROM ft2('aaa AND bbb'); } {10 24}
+ do_execsql_test 1.9 { SELECT rowid FROM ft2('bbb AND aaa'); } {10 24}
+
+}
+
+finish_test
+
ADDED ext/fts5/test/fts5origintext4.test
Index: ext/fts5/test/fts5origintext4.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5origintext4.test
@@ -0,0 +1,80 @@
+# 2023 November 22
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests focused on phrase queries.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5origintext4
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+# The tests below verify that a doclist-index is used to limit the number
+# of pages loaded into the cache. It does this by querying sqlite3_db_status()
+# for the amount of memory used by the pager cache.
+#
+# memsubsys1 effectively limits the page-cache to 24 pages. Which masks
+# the effect tested by the tests in this file. And "mmap" prevents the
+# cache from being used, also preventing these tests from working.
+#
+if {[permutation]=="memsubsys1" || [permutation]=="mmap"} {
+ finish_test
+ return
+}
+
+sqlite3_fts5_register_origintext db
+do_execsql_test 1.0 {
+ PRAGMA page_size = 4096;
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", tokendata=1
+ );
+}
+
+do_execsql_test 1.1 {
+ BEGIN;
+ INSERT INTO ft SELECT 'the first thing';
+
+ WITH s(i) AS (
+ SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<90000
+ )
+ INSERT INTO ft SELECT 'The second thing' FROM s;
+
+ INSERT INTO ft SELECT 'the first thing';
+ COMMIT;
+ INSERT INTO ft(ft) VALUES('optimize');
+}
+
+foreach {tn sql expr} {
+ 1 { SELECT rowid FROM ft('the') } {$mem > 250000}
+ 2 { SELECT rowid FROM ft('first') } {$mem < 50000}
+ 3 { SELECT rowid FROM ft('the first') } {$mem < 50000}
+} {
+ db close
+ sqlite3 db test.db
+ sqlite3_fts5_register_origintext db
+
+ execsql $sql
+ do_test 1.2.$tn {
+ set mem [lindex [sqlite3_db_status db CACHE_USED 0] 1]
+ expr $expr
+ } 1
+}
+
+proc b {x} { string map [list "\0" "."] $x }
+db func b b
+# execsql_pp { SELECT segid, b(term), pgno from ft_idx }
+
+finish_test
+
ADDED ext/fts5/test/fts5origintext5.test
Index: ext/fts5/test/fts5origintext5.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5origintext5.test
@@ -0,0 +1,273 @@
+# 2023 Dec 04
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests for tables that use both tokendata=1 and contentless_delete=1.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5origintext
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+# Return a random integer between 0 and n-1.
+#
+proc random {n} { expr {abs(int(rand()*$n))} }
+
+# Select an element of the list passed as the only argument at random and
+# return it.
+#
+proc select_one {list} {
+ set n [llength $list]
+ lindex $list [random $n]
+}
+
+# Given a term that consists entirely of alphabet characters, return all
+# permutations of the term using upper and lower case characters. e.g.
+#
+# "abc" -> {CBA cBA CbA cbA CBa cBa Cba cba}
+#
+proc casify {term {lRet {{}}}} {
+ if {$term==""} { return $lRet }
+ set t [string range $term 1 end]
+ set f1 [string toupper [string range $term 0 0]]
+ set f2 [string tolower [string range $term 0 0]]
+ set ret [list]
+ foreach x $lRet {
+ lappend ret "$x$f1"
+ lappend ret "$x$f2"
+ }
+ return [casify $t $ret]
+}
+
+proc vocab {} {
+ list abc def ghi jkl mno pqr stu vwx yza
+}
+
+# Return a random 3 letter term.
+#
+proc term {} {
+ if {[info exists ::expanded_vocab]==0} {
+ foreach v [vocab] { lappend ::expanded_vocab {*}[casify $v] }
+ }
+
+ select_one $::expanded_vocab
+}
+
+# Return a document - between 3 and 10 terms.
+#
+proc document {} {
+ set nTerm [expr [random 3] + 7]
+ set doc ""
+ for {set ii 0} {$ii < $nTerm} {incr ii} {
+ lappend doc [term]
+ }
+ set doc
+}
+db func document document
+
+#-------------------------------------------------------------------------
+
+expr srand(6)
+
+set NDOC 200
+set NLOOP 50
+
+sqlite3_fts5_register_origintext db
+
+proc tokens {cmd} {
+ set ret [list]
+ for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} {
+ set txt [$cmd xInstToken $iTok 0]
+ set txt [string map [list "\0" "."] $txt]
+ lappend ret $txt
+ }
+ set ret
+}
+sqlite3_fts5_create_function db tokens tokens
+
+proc rankfunc {cmd} {
+ $cmd xRowid
+}
+sqlite3_fts5_create_function db rankfunc rankfunc
+
+proc ctrl_tokens {term args} {
+ set ret [list]
+ set term [string tolower $term]
+ foreach doc $args {
+ foreach a $doc {
+ if {[string tolower $a]==$term} {
+ if {$a==$term} {
+ lappend ret $a
+ } else {
+ lappend ret [string tolower $a].$a
+ }
+ }
+ }
+ }
+ set ret
+}
+db func ctrl_tokens ctrl_tokens
+
+proc do_all_vocab_test {tn} {
+ foreach ::v [concat [vocab] nnn] {
+ set answer [execsql {
+ SELECT id, ctrl_tokens($::v, x) FROM ctrl WHERE x LIKE '%' || $::v || '%'
+ }]
+ do_execsql_test $tn.$::v.1 {
+ SELECT rowid, tokens(ft) FROM ft($::v)
+ } $answer
+ do_execsql_test $tn.$::v.2 {
+ SELECT rowid, tokens(ft) FROM ft($::v) ORDER BY rank
+ } $answer
+ }
+}
+
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE ft USING fts5(
+ x, tokenize="origintext unicode61", content=, contentless_delete=1,
+ tokendata=1
+ );
+
+ CREATE TABLE ctrl(id INTEGER PRIMARY KEY, x TEXT);
+ INSERT INTO ft(ft, rank) VALUES('pgsz', 64);
+ INSERT INTO ft(ft, rank) VALUES('rank', 'rankfunc()');
+}
+do_test 1.1 {
+ for {set ii 0} {$ii < $NDOC} {incr ii} {
+ set doc [document]
+ execsql {
+ INSERT INTO ft(rowid, x) VALUES($ii, $doc);
+ INSERT INTO ctrl(id, x) VALUES($ii, $doc);
+ }
+ }
+} {}
+
+#execsql_pp { SELECT * FROM ctrl }
+#execsql_pp { SELECT * FROM ft }
+#fts5_aux_test_functions db
+#execsql_pp { SELECT rowid, tokens(ft), fts5_test_poslist(ft) FROM ft('ghi'); }
+
+do_all_vocab_test 1.2
+
+for {set ii 0} {$ii < $NLOOP} {incr ii} {
+ set lRowid [execsql { SELECT id FROM ctrl WHERE random() % 2 }]
+ foreach r $lRowid {
+ execsql { DELETE FROM ft WHERE rowid = $r }
+ execsql { DELETE FROM ctrl WHERE rowid = $r }
+
+ set doc [document]
+ execsql { INSERT INTO ft(rowid, x) VALUES($r, $doc) }
+ execsql { INSERT INTO ctrl(id, x) VALUES($r, $doc) }
+ }
+ do_all_vocab_test 1.3.$ii
+}
+
+#-------------------------------------------------------------------------
+
+do_execsql_test 2.0 {
+ CREATE VIRTUAL TABLE ft2 USING fts5(
+ x, y, tokenize="origintext unicode61", content=, contentless_delete=1,
+ tokendata=1
+ );
+
+ CREATE TABLE ctrl2(id INTEGER PRIMARY KEY, x TEXT, y TEXT);
+ INSERT INTO ft2(ft2, rank) VALUES('pgsz', 64);
+ INSERT INTO ft2(ft2, rank) VALUES('rank', 'rankfunc()');
+}
+do_test 2.1 {
+ for {set ii 0} {$ii < $NDOC} {incr ii} {
+ set doc1 [document]
+ set doc2 [document]
+ execsql {
+ INSERT INTO ft2(rowid, x, y) VALUES($ii, $doc, $doc2);
+ INSERT INTO ctrl2(id, x, y) VALUES($ii, $doc, $doc2);
+ }
+ }
+} {}
+
+proc do_all_vocab_test2 {tn} {
+ foreach ::v [vocab] {
+ set answer [execsql {
+ SELECT id, ctrl_tokens($::v, x, y) FROM ctrl2
+ WHERE x LIKE '%' || $::v || '%' OR y LIKE '%' || $::v || '%';
+ }]
+ do_execsql_test $tn.$::v.1 {
+ SELECT rowid, tokens(ft2) FROM ft2($::v)
+ } $answer
+ do_execsql_test $tn.$::v.2 {
+ SELECT rowid, tokens(ft2) FROM ft2($::v) ORDER BY rank
+ } $answer
+ }
+}
+
+do_all_vocab_test2 2.2
+
+for {set ii 0} {$ii < $NLOOP} {incr ii} {
+ set lRowid [execsql { SELECT id FROM ctrl2 WHERE random() % 2 }]
+ foreach r $lRowid {
+ execsql { DELETE FROM ft2 WHERE rowid = $r }
+ execsql { DELETE FROM ctrl2 WHERE rowid = $r }
+
+ set doc1 [document]
+ set doc2 [document]
+ execsql { INSERT INTO ft2(rowid, x, y) VALUES($r, $doc, $doc1) }
+ execsql { INSERT INTO ctrl2(id, x, y) VALUES($r, $doc, $doc2) }
+ }
+ do_all_vocab_test 2.3.$ii
+}
+
+#-------------------------------------------------------------------------
+
+unset -nocomplain ::expanded_vocab
+proc vocab {} {
+ list abcde fghij klmno
+}
+
+proc do_all_vocab_test3 {tn} {
+ foreach ::v [concat [vocab] nnn] {
+ set answer [execsql {
+ SELECT rowid, ctrl_tokens($::v, w) FROM ctrl3 WHERE w LIKE '%' || $::v || '%'
+ }]
+ do_execsql_test $tn.$::v.1 {
+ SELECT rowid, tokens(ft3) FROM ft3($::v)
+ } $answer
+ do_execsql_test $tn.$::v.2 {
+ SELECT rowid, tokens(ft3) FROM ft3($::v) ORDER BY rank
+ } $answer
+ }
+}
+
+do_execsql_test 3.0 {
+ CREATE VIRTUAL TABLE ft3 USING fts5(
+ w, tokenize="origintext unicode61", content=, contentless_delete=1,
+ tokendata=1
+ );
+ INSERT INTO ft3(ft3, rank) VALUES('rank', 'rankfunc()');
+ CREATE TABLE ctrl3(w);
+}
+
+do_execsql_test 3.1 {
+ WITH s(i) AS (
+ SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<2
+ )
+ INSERT INTO ctrl3 SELECT document() FROM s;
+ INSERT INTO ft3(rowid, w) SELECT rowid, w FROM ctrl3;
+}
+
+do_all_vocab_test3 3.2
+
+
+finish_test
+
Index: ext/fts5/test/fts5secure3.test
==================================================================
--- ext/fts5/test/fts5secure3.test
+++ ext/fts5/test/fts5secure3.test
@@ -84,75 +84,80 @@
#-------------------------------------------------------------------------
# Tests with large/small rowid values.
#
-reset_db
-
-expr srand(0)
-
-set vocab {
- Popper Poppins Popsicle Porfirio Porrima Porsche
- Porter Portia Portland Portsmouth Portugal Portuguese
- Poseidon Post PostgreSQL Potemkin Potomac Potsdam
- Pottawatomie Potter Potts Pound Poussin Powell
- PowerPC PowerPoint Powers Powhatan Poznan Prada
- Prado Praetorian Prague Praia Prakrit Pratchett
- Pratt Pravda Praxiteles Preakness Precambrian Preminger
- Premyslid Prensa Prentice Pres Presbyterian Presbyterianism
-}
-proc newdoc {} {
- for {set i 0} {$i<8} {incr i} {
- lappend ret [lindex $::vocab [expr int(abs(rand()) * [llength $::vocab])]]
- }
- set ret
-}
-db func newdoc newdoc
-
-do_execsql_test 3.0 {
- CREATE VIRTUAL TABLE fff USING fts5(y);
- INSERT INTO fff(fff, rank) VALUES('pgsz', 64);
-
- WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
- INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
-
- WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
- INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
-
- WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
- INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
-
- INSERT INTO fff(fff, rank) VALUES('secure-delete', 1);
-}
-
-proc lshuffle {in} {
- set out [list]
- while {[llength $in]>0} {
- set idx [expr int(abs(rand()) * [llength $in])]
- lappend out [lindex $in $idx]
- set in [lreplace $in $idx $idx]
- }
- set out
-}
-
-#dump fff
-
-set iTest 1
-foreach ii [lshuffle [db eval {SELECT rowid FROM fff}]] {
- #if {$iTest==1} { dump fff }
- #if {$iTest==1} { breakpoint }
- do_execsql_test 3.1.$iTest.$ii {
- DELETE FROM fff WHERE rowid=$ii;
- }
- #if {$iTest==1} { dump fff }
- if {($iTest % 20)==0} {
- do_execsql_test 3.1.$iTest.$ii.ic {
- INSERT INTO fff(fff) VALUES('integrity-check');
- }
- }
- #if {$iTest==1} { break }
- incr iTest
+foreach {tn cfg} {
+ 1 ""
+ 2 "INSERT INTO fff(fff, rank) VALUES('secure-delete', 1)"
+} {
+ reset_db
+
+ expr srand(0)
+
+ set vocab {
+ Popper Poppins Popsicle Porfirio Porrima Porsche
+ Porter Portia Portland Portsmouth Portugal Portuguese
+ Poseidon Post PostgreSQL Potemkin Potomac Potsdam
+ Pottawatomie Potter Potts Pound Poussin Powell
+ PowerPC PowerPoint Powers Powhatan Poznan Prada
+ Prado Praetorian Prague Praia Prakrit Pratchett
+ Pratt Pravda Praxiteles Preakness Precambrian Preminger
+ Premyslid Prensa Prentice Pres Presbyterian Presbyterianism
+ }
+ proc newdoc {} {
+ for {set i 0} {$i<8} {incr i} {
+ lappend ret [lindex $::vocab [expr int(abs(rand()) * [llength $::vocab])]]
+ }
+ set ret
+ }
+ db func newdoc newdoc
+
+ do_execsql_test 3.$tn.0 {
+ CREATE VIRTUAL TABLE fff USING fts5(y);
+ INSERT INTO fff(fff, rank) VALUES('pgsz', 64);
+
+ WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
+ INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
+
+ WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
+ INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
+
+ WITH s(x) AS ( VALUES(1) UNION ALL SELECT x+1 FROM s WHERE x<1000 )
+ INSERT INTO fff(rowid, y) SELECT random() , newdoc() FROM s;
+ }
+
+ execsql $cfg
+
+ proc lshuffle {in} {
+ set out [list]
+ while {[llength $in]>0} {
+ set idx [expr int(abs(rand()) * [llength $in])]
+ lappend out [lindex $in $idx]
+ set in [lreplace $in $idx $idx]
+ }
+ set out
+ }
+
+ #dump fff
+
+ set iTest 1
+ foreach ii [lshuffle [db eval {SELECT rowid FROM fff}]] {
+ #if {$iTest==1} { dump fff }
+ #if {$iTest==1} { breakpoint }
+ do_execsql_test 3.$tn.1.$iTest.$ii {
+ DELETE FROM fff WHERE rowid=$ii;
+ }
+ #if {$iTest==1} { dump fff }
+ if {($iTest % 20)==0} {
+ do_execsql_test 3.$tn.1.$iTest.$ii.ic {
+ INSERT INTO fff(fff) VALUES('integrity-check');
+ }
+ }
+ #if {$iTest==1} { break }
+ incr iTest
+ }
}
#execsql_pp { SELECT rowid FROM fff('post') ORDER BY rowid ASC }
#breakpoint
#execsql_pp {
Index: ext/fts5/test/fts5simple2.test
==================================================================
--- ext/fts5/test/fts5simple2.test
+++ ext/fts5/test/fts5simple2.test
@@ -341,11 +341,13 @@
INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb');
INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb');
INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb');
COMMIT;
}
-do_execsql_test 17.1 { SELECT * FROM t2('y:a*') WHERE rowid BETWEEN 10 AND 20 }
+do_execsql_test 17.1 {
+ SELECT * FROM t2('y:a*') WHERE rowid BETWEEN 10 AND 20
+}
do_execsql_test 17.2 {
BEGIN;
INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb');
SELECT * FROM t2('y:a*') WHERE rowid BETWEEN 10 AND 20 ;
}
Index: ext/fts5/test/fts5synonym2.test
==================================================================
--- ext/fts5/test/fts5synonym2.test
+++ ext/fts5/test/fts5synonym2.test
@@ -40,11 +40,11 @@
list [sort_poslist $PL] $CL
}
sqlite3_fts5_create_function db fts5_test_bothlist fts5_test_bothlist
-proc fts5_rowid {cmd} { expr [$cmd xColumnText -1] }
+proc fts5_rowid {cmd} { expr [$cmd xRowid] }
sqlite3_fts5_create_function db fts5_rowid fts5_rowid
do_execsql_test 1.$tok.0.1 "
CREATE VIRTUAL TABLE ss USING fts5(a, b,
tokenize='tclnum $tok', detail=%DETAIL%);
ADDED ext/fts5/test/fts5tokenizer2.test
Index: ext/fts5/test/fts5tokenizer2.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5tokenizer2.test
@@ -0,0 +1,89 @@
+# 2023 Nov 03
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests focusing on the built-in fts5 tokenizers.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+set testprefix fts5tokenizer2
+
+# If SQLITE_ENABLE_FTS5 is defined, omit this file.
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+sqlite3_fts5_create_tokenizer db tst get_tst_tokenizer
+proc get_tst_tokenizer {args} {
+ return "tst_tokenizer"
+}
+proc tst_tokenizer {flags txt} {
+ set token ""
+ set lTok [list]
+
+ foreach c [split $txt {}] {
+ if {$token==""} {
+ append token $c
+ } else {
+ set t1 [string is upper $token]
+ set t2 [string is upper $c]
+
+ if {$t1!=$t2} {
+ lappend lTok $token
+ set token ""
+ }
+ append token $c
+ }
+ }
+ if {$token!=""} { lappend lTok $token }
+
+ set iOff 0
+ foreach t $lTok {
+ set n [string length $t]
+ sqlite3_fts5_token $t $iOff [expr $iOff+$n]
+ incr iOff $n
+ }
+}
+
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(t, tokenize=tst);
+}
+
+do_execsql_test 1.1 {
+ INSERT INTO t1 VALUES('AAdontBBmess');
+}
+
+do_execsql_test 1.2 {
+ SELECT snippet(t1, 0, '>', '<', '...', 4) FROM t1('BB');
+} {AAdont>BB', '<') FROM t1('BB');
+} {AAdont>BB', '<') FROM t1('AA');
+} {>AA', '<') FROM t1('dont');
+} {AA>dont', '<') FROM t1('mess');
+} {AAdontBB>mess<}
+
+do_execsql_test 1.7 {
+ SELECT highlight(t1, 0, '>', '<') FROM t1('BB mess');
+} {AAdont>BBmess<}
+
+
+finish_test
ADDED ext/fts5/test/fts5trigram2.test
Index: ext/fts5/test/fts5trigram2.test
==================================================================
--- /dev/null
+++ ext/fts5/test/fts5trigram2.test
@@ -0,0 +1,109 @@
+# 2023 October 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+#
+# Tests for the fts5 "trigram" tokenizer.
+#
+
+source [file join [file dirname [info script]] fts5_common.tcl]
+ifcapable !fts5 { finish_test ; return }
+set ::testprefix fts5trigram2
+
+do_execsql_test 1.0 "
+ CREATE VIRTUAL TABLE t1 USING fts5(y, tokenize='trigram remove_diacritics 1');
+ INSERT INTO t1 VALUES('abc\u0303defghijklm');
+ INSERT INTO t1 VALUES('a\u0303b\u0303c\u0303defghijklm');
+"
+
+do_execsql_test 1.1 {
+ SELECT highlight(t1, 0, '(', ')') FROM t1('abc');
+} [list \
+ "(abc\u0303)defghijklm" \
+ "(a\u0303b\u0303c\u0303)defghijklm" \
+]
+
+do_execsql_test 1.2 {
+ SELECT highlight(t1, 0, '(', ')') FROM t1('bcde');
+} [list \
+ "a(bc\u0303de)fghijklm" \
+ "a\u0303(b\u0303c\u0303de)fghijklm" \
+]
+
+do_execsql_test 1.3 {
+ SELECT highlight(t1, 0, '(', ')') FROM t1('cdef');
+} [list \
+ "ab(c\u0303def)ghijklm" \
+ "a\u0303b\u0303(c\u0303def)ghijklm" \
+]
+
+do_execsql_test 1.4 {
+ SELECT highlight(t1, 0, '(', ')') FROM t1('def');
+} [list \
+ "abc\u0303(def)ghijklm" \
+ "a\u0303b\u0303c\u0303(def)ghijklm" \
+]
+
+
+#-------------------------------------------------------------------------
+do_catchsql_test 2.0 {
+ CREATE VIRTUAL TABLE t2 USING fts5(
+ z, tokenize='trigram case_sensitive 1 remove_diacritics 1'
+ );
+} {1 {error in tokenizer constructor}}
+
+do_execsql_test 2.1 {
+ CREATE VIRTUAL TABLE t2 USING fts5(
+ z, tokenize='trigram case_sensitive 0 remove_diacritics 1'
+ );
+}
+do_execsql_test 2.2 "
+ INSERT INTO t2 VALUES('\u00E3bcdef');
+ INSERT INTO t2 VALUES('b\u00E3cdef');
+ INSERT INTO t2 VALUES('bc\u00E3def');
+ INSERT INTO t2 VALUES('bcd\u00E3ef');
+"
+
+do_execsql_test 2.3 {
+ SELECT highlight(t2, 0, '(', ')') FROM t2('abc');
+} "(\u00E3bc)def"
+do_execsql_test 2.4 {
+ SELECT highlight(t2, 0, '(', ')') FROM t2('bac');
+} "(b\u00E3c)def"
+do_execsql_test 2.5 {
+ SELECT highlight(t2, 0, '(', ')') FROM t2('bca');
+} "(bc\u00E3)def"
+do_execsql_test 2.6 "
+ SELECT highlight(t2, 0, '(', ')') FROM t2('\u00E3bc');
+" "(\u00E3bc)def"
+
+#-------------------------------------------------------------------------
+do_execsql_test 3.0 {
+ CREATE VIRTUAL TABLE t3 USING fts5(
+ z, tokenize='trigram remove_diacritics 1'
+ );
+} {}
+do_execsql_test 3.1 "
+ INSERT INTO t3 VALUES ('\u0303abc\u0303');
+"
+do_execsql_test 3.2 {
+ SELECT highlight(t3, 0, '(', ')') FROM t3('abc');
+} "\u0303(abc\u0303)"
+
+#-------------------------------------------------------------------------
+do_execsql_test 4.0 {
+ CREATE VIRTUAL TABLE t4 USING fts5(z, tokenize=trigram);
+} {}
+
+breakpoint
+do_execsql_test 4.1 {
+ INSERT INTO t4 VALUES('ABCD');
+} {}
+
+finish_test
Index: ext/fts5/test/fts5vocab2.test
==================================================================
--- ext/fts5/test/fts5vocab2.test
+++ ext/fts5/test/fts5vocab2.test
@@ -278,9 +278,33 @@
do_catchsql_test 5.2 {
DELETE FROM t1 WHERE rowid>100;
INSERT INTO t1 SELECT randomblob(3000) FROM v1
} {1 {query aborted}}
+#-------------------------------------------------------------------------
+reset_db
+sqlite3_fts5_may_be_corrupt 1
+
+do_execsql_test 6.0 {
+ BEGIN TRANSACTION;
+ CREATE VIRTUAL TABLE t1 USING fts5(a,b unindexed,c,tokenize="porter ascii",tokendata=1);
+ REPLACE INTO t1_data VALUES(1,X'03090009');
+ REPLACE INTO t1_data VALUES(10,X'000000000103030003010101020101030101');
+ REPLACE INTO t1_data VALUES(137438953473,X'0000002e023061010202010162010203010163010204010167010601020201016801060102030101690106010204040606060808');
+ REPLACE INTO t1_data VALUES(274877906945,X'0000001f013067020802010202010168020803010203010169020804010204040909');
+ REPLACE INTO t1_data VALUES(412316860417,X'0000002e023061030202010162030203010163030204010167030601020201016803060102030101690306010204040606060808');
+ COMMIT;
+}
+
+do_execsql_test 6.1 {
+ CREATE VIRTUAL TABLE t3 USING fts5vocab('t1', 'row');
+}
+
+do_catchsql_test 6.2 {
+ SELECT * FROM t3;
+} {1 {database disk image is malformed}}
+
+sqlite3_fts5_may_be_corrupt 0
finish_test
Index: ext/jni/GNUmakefile
==================================================================
--- ext/jni/GNUmakefile
+++ ext/jni/GNUmakefile
@@ -33,10 +33,12 @@
$(dir.bld.c):
$(mkdir) $@
javac.flags ?= -Xlint:unchecked -Xlint:deprecation
java.flags ?=
+javac.flags += -encoding utf8
+# -------------^^^^^^^^^^^^^^ required for Windows builds
jnicheck ?= 1
ifeq (1,$(jnicheck))
java.flags += -Xcheck:jni
endif
@@ -77,10 +79,11 @@
$(MAKE) -C $(dir.top) version-info
# Be explicit about which Java files to compile so that we can work on
# in-progress files without requiring them to be in a compilable statae.
JAVA_FILES.main := $(patsubst %,$(dir.src.jni)/annotation/%,\
+ Experimental.java \
NotNull.java \
Nullable.java \
) $(patsubst %,$(dir.src.capi)/%,\
AbstractCollationCallback.java \
AggregateFunction.java \
@@ -89,11 +92,11 @@
BusyHandlerCallback.java \
CollationCallback.java \
CollationNeededCallback.java \
CommitHookCallback.java \
ConfigLogCallback.java \
- ConfigSqllogCallback.java \
+ ConfigSqlLogCallback.java \
NativePointerHolder.java \
OutputPointer.java \
PrepareMultiCallback.java \
PreupdateHookCallback.java \
ProgressHandlerCallback.java \
@@ -108,10 +111,11 @@
UpdateHookCallback.java \
ValueHolder.java \
WindowFunction.java \
XDestroyCallback.java \
sqlite3.java \
+ sqlite3_blob.java \
sqlite3_context.java \
sqlite3_stmt.java \
sqlite3_value.java \
) $(patsubst %,$(dir.src.jni)/wrapper1/%,\
AggregateFunction.java \
@@ -118,10 +122,11 @@
ScalarFunction.java \
SqlFunction.java \
Sqlite.java \
SqliteException.java \
ValueHolder.java \
+ WindowFunction.java \
)
JAVA_FILES.unittest := $(patsubst %,$(dir.src.jni)/%,\
capi/Tester1.java \
wrapper1/Tester2.java \
@@ -157,16 +162,17 @@
endif
CLASS_FILES :=
define CLASSFILE_DEPS
all: $(1).class
+$(1).class: $(1).java
CLASS_FILES += $(1).class
endef
$(foreach B,$(basename \
$(JAVA_FILES.main) $(JAVA_FILES.unittest) $(JAVA_FILES.tester)),\
$(eval $(call CLASSFILE_DEPS,$(B))))
-$(CLASS_FILES): $(JAVA_FILES) $(MAKEFILE)
+$(CLASS_FILES): $(MAKEFILE)
$(bin.javac) $(javac.flags) -h $(dir.bld.c) -cp $(classpath) $(JAVA_FILES)
#.PHONY: classfiles
########################################################################
@@ -224,11 +230,12 @@
-DSQLITE_ENABLE_DBSTAT_VTAB \
-DSQLITE_ENABLE_BYTECODE_VTAB \
-DSQLITE_ENABLE_OFFSET_SQL_FUNC \
-DSQLITE_ENABLE_PREUPDATE_HOOK \
-DSQLITE_ENABLE_NORMALIZE \
- -DSQLITE_ENABLE_SQLLOG
+ -DSQLITE_ENABLE_SQLLOG \
+ -DSQLITE_ENABLE_COLUMN_METADATA
endif
ifeq (1,$(opt.debug))
SQLITE_OPT += -DSQLITE_DEBUG -g -DDEBUG -UNDEBUG
else
@@ -314,11 +321,11 @@
test-one: $(test.deps)
$(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 $(Tester1.flags)
$(bin.java) $(test.flags.jvm) org.sqlite.jni.wrapper1.Tester2 $(Tester2.flags)
test-sqllog: $(test.deps)
@echo "Testing with -sqllog..."
- $(bin.java) $(test.flags.jvm) -sqllog
+ $(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 $(Tester1.flags) -sqllog
test-mt: $(test.deps)
@echo "Testing in multi-threaded mode:";
$(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 \
-t 7 -r 50 -shuffle $(Tester1.flags)
$(bin.java) $(test.flags.jvm) org.sqlite.jni.wrapper1.Tester2 \
Index: ext/jni/README.md
==================================================================
--- ext/jni/README.md
+++ ext/jni/README.md
@@ -14,13 +14,12 @@
> **FOREWARNING:** this subproject is very much in development and
subject to any number of changes. Please do not rely on any
information about its API until this disclaimer is removed. The JNI
- bindings released with version 3.43 are a "tech preview" and 3.44
- will be "final," at which point strong backward compatibility
- guarantees will apply.
+ bindings released with version 3.43 are a "tech preview." Once
+ finalized, strong backward compatibility guarantees will apply.
Project goals/requirements:
- A [1-to-1(-ish) mapping of the C API](#1to1ish) to Java via JNI,
insofar as cross-language semantics allow for. A closely-related
@@ -40,16 +39,18 @@
Non-goals:
- Creation of high-level OO wrapper APIs. Clients are free to create
them off of the C-style API.
+
+- Virtual tables are unlikely to be supported due to the amount of
+ glue code needed to fit them into Java.
- Support for mixed-mode operation, where client code accesses SQLite
both via the Java-side API and the C API via their own native
- code. In such cases, proxy functionalities (primarily callback
- handler wrappers of all sorts) may fail because the C-side use of
- the SQLite APIs will bypass those proxies.
+ code. Such cases would be a minefield of potential mis-interactions
+ between this project's JNI bindings and mixed-mode client code.
Hello World
-----------------------------------------------------------------------
@@ -121,19 +122,17 @@
provided which accept or return data in alternative forms or provide
sensible default argument values. In all such cases they are thin
proxies around the corresponding C APIs and do not introduce new
semantics.
-In some very few cases, Java-specific capabilities have been added in
+In a few cases, Java-specific capabilities have been added in
new APIs, all of which have "_java" somewhere in their names.
Examples include:
- `sqlite3_result_java_object()`
- `sqlite3_column_java_object()`
-- `sqlite3_column_java_casted()`
- `sqlite3_value_java_object()`
-- `sqlite3_value_java_casted()`
which, as one might surmise, collectively enable the passing of
arbitrary Java objects from user-defined SQL functions through to the
caller.
@@ -148,27 +147,31 @@
a "zombie," pending finalization when the library detects that all
pending statements have been closed. Be aware that Java garbage
collection _cannot_ close a database or finalize a prepared statement.
Those things require explicit API calls.
+Classes for which it is sensible support Java's `AutoCloseable`
+interface so can be used with try-with-resources constructs.
+
Golden Rule #2: _Never_ Throw from Callbacks (Unless...)
------------------------------------------------------------------------
All routines in this API, barring explicitly documented exceptions,
retain C-like semantics. For example, they are not permitted to throw
or propagate exceptions and must return error information (if any) via
result codes or `null`. The only cases where the C-style APIs may
throw is through client-side misuse, e.g. passing in a null where it
-shouldn't be used. The APIs clearly mark function parameters which
-should not be null, but does not actively defend itself against such
-misuse. Some C-style APIs explicitly accept `null` as a no-op for
-usability's sake, and some of the JNI APIs deliberately return an
-error code, instead of segfaulting, when passed a `null`.
+may cause a `NullPointerException`. The APIs clearly mark function
+parameters which should not be null, but does not generally actively
+defend itself against such misuse. Some C-style APIs explicitly accept
+`null` as a no-op for usability's sake, and some of the JNI APIs
+deliberately return an error code, instead of segfaulting, when passed
+a `null`.
Client-defined callbacks _must never throw exceptions_ unless _very
-explicitly documented_ as being throw-safe. Exceptions are generally
+explitly documented_ as being throw-safe. Exceptions are generally
reserved for higher-level bindings which are constructed to
specifically deal with them and ensure that they do not leak C-level
resources. In some cases, callback handlers are permitted to throw, in
which cases they get translated to C-level result codes and/or
messages. If a callback which is not permitted to throw throws, its
@@ -290,18 +293,18 @@
Java? That's as-yet undetermined. If not, it will be removed.
`SQLFunction` is not used directly, but is instead instantiated via
one of its three subclasses:
-- `SQLFunction.Scalar` implements simple scalar functions using but a
+- `ScalarFunction` implements simple scalar functions using but a
single callback.
-- `SQLFunction.Aggregate` implements aggregate functions using two
+- `AggregateFunction` implements aggregate functions using two
callbacks.
-- `SQLFunction.Window` implements window functions using four
+- `WindowFunction` implements window functions using four
callbacks.
-Search [`Tester1.java`](/file/ext/jni/src/org/sqlite/jni/Tester1.java) for
+Search [`Tester1.java`](/file/ext/jni/src/org/sqlite/jni/capi/Tester1.java) for
`SQLFunction` for how it's used.
Reminder: see the disclaimer at the top of this document regarding the
in-flux nature of this API.
Index: ext/jni/src/c/sqlite3-jni.c
==================================================================
--- ext/jni/src/c/sqlite3-jni.c
+++ ext/jni/src/c/sqlite3-jni.c
@@ -13,17 +13,18 @@
** org.sqlite.jni.capi.CApi (from which sqlite3-jni.h is generated).
*/
/*
** If you found this comment by searching the code for
-** CallStaticObjectMethod then you're the victim of an OpenJDK bug:
+** CallStaticObjectMethod because it appears in console output then
+** you're probably the victim of an OpenJDK bug:
**
** https://bugs.openjdk.org/browse/JDK-8130659
**
-** It's known to happen with OpenJDK v8 but not with v19.
-**
-** This code does not use JNI's CallStaticObjectMethod().
+** It's known to happen with OpenJDK v8 but not with v19. It was
+** triggered by this code long before it made any use of
+** CallStaticObjectMethod().
*/
/*
** Define any SQLITE_... config defaults we want if they aren't
** overridden by the builder. Please keep these alphabetized.
@@ -88,16 +89,10 @@
#if !SQLITE_JNI_FATAL_OOM
#undef SQLITE_JNI_FATAL_OOM
#endif
#endif
-/**********************************************************************/
-/* SQLITE_M... */
-#ifndef SQLITE_MAX_ALLOCATION_SIZE
-# define SQLITE_MAX_ALLOCATION_SIZE 0x1fffffff
-#endif
-
/**********************************************************************/
/* SQLITE_O... */
#ifndef SQLITE_OMIT_DEPRECATED
# define SQLITE_OMIT_DEPRECATED 1
#endif
@@ -109,10 +104,16 @@
#endif
#ifdef SQLITE_OMIT_UTF16
/* UTF16 is required for java */
# undef SQLITE_OMIT_UTF16 1
#endif
+
+/**********************************************************************/
+/* SQLITE_S... */
+#ifndef SQLITE_STRICT_SUBTYPE
+# define SQLITE_STRICT_SUBTYPE 1
+#endif
/**********************************************************************/
/* SQLITE_T... */
#ifndef SQLITE_TEMP_STORE
# define SQLITE_TEMP_STORE 2
@@ -189,10 +190,12 @@
** otherwise get complaints that we're casting between different-sized
** int types.
**
** This use of intptr_t is the _only_ reason we require
** which, in turn, requires building with -std=c99 (or later).
+**
+** See also: the notes for LongPtrGet_T.
*/
#define S3JniCast_L2P(JLongAsPtr) (void*)((intptr_t)(JLongAsPtr))
#define S3JniCast_P2L(PTR) (jlong)((intptr_t)(PTR))
/*
@@ -205,12 +208,12 @@
** only rarely needed in this code), but to be pedantically correct we
** need the proper type in the signature.
**
** https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/design.html#jni_interface_functions_and_pointers
*/
-#define JniArgsEnvObj JNIEnv * const env, jobject jSelf
-#define JniArgsEnvClass JNIEnv * const env, jclass jKlazz
+#define JniArgsEnvObj JNIEnv * env, jobject jSelf
+#define JniArgsEnvClass JNIEnv * env, jclass jKlazz
/*
** Helpers to account for -Xcheck:jni warnings about not having
** checked for exceptions.
*/
#define S3JniIfThrew if( (*env)->ExceptionCheck(env) )
@@ -655,10 +658,25 @@
jclass cString /* global ref to java.lang.String */;
jobject oCharsetUtf8 /* global ref to StandardCharset.UTF_8 */;
jmethodID ctorLong1 /* the Long(long) constructor */;
jmethodID ctorStringBA /* the String(byte[],Charset) constructor */;
jmethodID stringGetBytes /* the String.getBytes(Charset) method */;
+
+ /*
+ ByteBuffer may or may not be supported via JNI on any given
+ platform:
+
+ https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/functions.html#nio_support
+
+ We only store a ref to byteBuffer.klazz if JNI support for
+ ByteBuffer is available (which we determine during static init).
+ */
+ struct {
+ jclass klazz /* global ref to java.nio.ByteBuffer */;
+ jmethodID midAlloc /* ByteBuffer.allocateDirect() */;
+ jmethodID midLimit /* ByteBuffer.limit() */;
+ } byteBuffer;
} g;
/*
** The list of Java-side auto-extensions
** (org.sqlite.jni.capi.AutoExtensionCallback objects).
*/
@@ -861,10 +879,62 @@
#define s3jni_jbyteArray_release(jByteArray,jBytes) \
if( jBytes ) (*env)->ReleaseByteArrayElements(env, jByteArray, jBytes, JNI_ABORT)
#define s3jni_jbyteArray_commit(jByteArray,jBytes) \
if( jBytes ) (*env)->ReleaseByteArrayElements(env, jByteArray, jBytes, JNI_COMMIT)
+/*
+** If jbb is-a java.nio.Buffer object and the JNI environment supports
+** it, *pBuf is set to the buffer's memory and *pN is set to its
+** limit() (as opposed to its capacity()). If jbb is NULL, not a
+** Buffer, or the JNI environment does not support that operation,
+** *pBuf is set to 0 and *pN is set to 0.
+**
+** Note that the length of the buffer can be larger than SQLITE_LIMIT
+** but this function does not know what byte range of the buffer is
+** required so cannot check for that violation. The caller is required
+** to ensure that any to-be-bind()ed range fits within SQLITE_LIMIT.
+**
+** Sidebar: it is unfortunate that we cannot get ByteBuffer.limit()
+** via a JNI method like we can for ByteBuffer.capacity(). We instead
+** have to call back into Java to get the limit(). Depending on how
+** the ByteBuffer is used, the limit and capacity might be the same,
+** but when reusing a buffer, the limit may well change whereas the
+** capacity is fixed. The problem with, e.g., read()ing blob data to a
+** ByteBuffer's memory based on its capacity is that Java-level code
+** is restricted to accessing the range specified in
+** ByteBuffer.limit(). If we were to honor only the capacity, we
+** could end up writing to, or reading from, parts of a ByteBuffer
+** which client code itself cannot access without explicitly modifying
+** the limit. The penalty we pay for this correctness is that we must
+** call into Java to get the limit() of every ByteBuffer we work with.
+**
+** An alternative to having to call into ByteBuffer.limit() from here
+** would be to add private native impls of all ByteBuffer-using
+** methods, each of which adds a jint parameter which _must_ be set to
+** theBuffer.limit() by public Java APIs which use those private impls
+** to do the real work.
+*/
+static void s3jni__get_nio_buffer(JNIEnv * const env, jobject jbb, void **pBuf, jint * pN ){
+ *pBuf = 0;
+ *pN = 0;
+ if( jbb ){
+ *pBuf = (*env)->GetDirectBufferAddress(env, jbb);
+ if( *pBuf ){
+ /*
+ ** Maintenance reminder: do not use
+ ** (*env)->GetDirectBufferCapacity(env,jbb), even though it
+ ** would be much faster, for reasons explained in this
+ ** function's comments.
+ */
+ *pN = (*env)->CallIntMethod(env, jbb, SJG.g.byteBuffer.midLimit);
+ S3JniExceptionIsFatal("Error calling ByteBuffer.limit() method.");
+ }
+ }
+}
+#define s3jni_get_nio_buffer(JOBJ,vpOut,jpOut) \
+ s3jni__get_nio_buffer(env,(JOBJ),(vpOut),(jpOut))
+
/*
** Returns the current JNIEnv object. Fails fatally if it cannot find
** the object.
*/
static JNIEnv * s3jni_env(void){
@@ -1058,10 +1128,51 @@
? (*env)->NewString(env, (const jchar *)p, (jsize)(nP/2))
: NULL;
s3jni_oom_check( p ? !!rv : 1 );
return rv;
}
+
+/*
+** Creates a new ByteBuffer instance with a capacity of n. assert()s
+** that SJG.g.byteBuffer.klazz is not 0 and n>0.
+*/
+static jobject s3jni__new_ByteBuffer(JNIEnv * const env, int n){
+ jobject rv = 0;
+ assert( SJG.g.byteBuffer.klazz );
+ assert( SJG.g.byteBuffer.midAlloc );
+ assert( n > 0 );
+ rv = (*env)->CallStaticObjectMethod(env, SJG.g.byteBuffer.klazz,
+ SJG.g.byteBuffer.midAlloc, (jint)n);
+ S3JniIfThrew {
+ S3JniExceptionReport;
+ S3JniExceptionClear;
+ }
+ s3jni_oom_check( rv );
+ return rv;
+}
+
+/*
+** If n>0 and sqlite3_jni_supports_nio() is true then this creates a
+** new ByteBuffer object and copies n bytes from p to it. Returns NULL
+** if n is 0, sqlite3_jni_supports_nio() is false, or on allocation
+** error (unless fatal alloc failures are enabled).
+*/
+static jobject s3jni__blob_to_ByteBuffer(JNIEnv * const env,
+ const void * p, int n){
+ jobject rv = NULL;
+ assert( n >= 0 );
+ if( 0==n || !SJG.g.byteBuffer.klazz ){
+ return NULL;
+ }
+ rv = s3jni__new_ByteBuffer(env, n);
+ if( rv ){
+ void * tgt = (*env)->GetDirectBufferAddress(env, rv);
+ memcpy(tgt, p, (size_t)n);
+ }
+ return rv;
+}
+
/*
** Requires jx to be a Throwable. Calls its toString() method and
** returns its value converted to a UTF-8 string. The caller owns the
** returned string and must eventually sqlite3_free() it. Returns 0
@@ -1470,33 +1581,42 @@
**
** will work, despite the incorrect macro name, so long as the
** argument is a Java sqlite3 object, as this operation only has void
** pointers to work with.
*/
-#define PtrGet_T(T,OBJ) (T*)NativePointerHolder_get(OBJ, S3JniNph(T))
-#define PtrGet_sqlite3(OBJ) PtrGet_T(sqlite3, OBJ)
-#define PtrGet_sqlite3_backup(OBJ) PtrGet_T(sqlite3_backup, OBJ)
-#define PtrGet_sqlite3_blob(OBJ) PtrGet_T(sqlite3_blob, OBJ)
-#define PtrGet_sqlite3_context(OBJ) PtrGet_T(sqlite3_context, OBJ)
-#define PtrGet_sqlite3_stmt(OBJ) PtrGet_T(sqlite3_stmt, OBJ)
-#define PtrGet_sqlite3_value(OBJ) PtrGet_T(sqlite3_value, OBJ)
+#define PtrGet_T(T,JOBJ) (T*)NativePointerHolder_get((JOBJ), S3JniNph(T))
+#define PtrGet_sqlite3(JOBJ) PtrGet_T(sqlite3, (JOBJ))
+#define PtrGet_sqlite3_backup(JOBJ) PtrGet_T(sqlite3_backup, (JOBJ))
+#define PtrGet_sqlite3_blob(JOBJ) PtrGet_T(sqlite3_blob, (JOBJ))
+#define PtrGet_sqlite3_context(JOBJ) PtrGet_T(sqlite3_context, (JOBJ))
+#define PtrGet_sqlite3_stmt(JOBJ) PtrGet_T(sqlite3_stmt, (JOBJ))
+#define PtrGet_sqlite3_value(JOBJ) PtrGet_T(sqlite3_value, (JOBJ))
/*
-** S3JniLongPtr_T(X,Y) expects X to be an unqualified sqlite3 struct
+** LongPtrGet_T(X,Y) expects X to be an unqualified sqlite3 struct
** type name and Y to be a native pointer to such an object in the
** form of a jlong value. The jlong is simply cast to (X*). This
** approach is, as of 2023-09-27, supplanting the former approach. We
** now do the native pointer extraction in the Java side, rather than
** the C side, because it's reportedly significantly faster. The
** intptr_t part here is necessary for compatibility with (at least)
** ARM32.
+**
+** 2023-11-09: testing has not revealed any measurable performance
+** difference between the approach of passing type T to C compared to
+** passing pointer-to-T to C, and adding support for the latter
+** everywhere requires sigificantly more code. As of this writing, the
+** older/simpler approach is being applied except for (A) where the
+** newer approach has already been applied and (B) hot-spot APIs where
+** a difference of microseconds (i.e. below our testing measurement
+** threshold) might add up.
*/
-#define S3JniLongPtr_T(T,JLongAsPtr) (T*)((intptr_t)(JLongAsPtr))
-#define S3JniLongPtr_sqlite3(JLongAsPtr) S3JniLongPtr_T(sqlite3,JLongAsPtr)
-#define S3JniLongPtr_sqlite3_backup(JLongAsPtr) S3JniLongPtr_T(sqlite3_backup,JLongAsPtr)
-#define S3JniLongPtr_sqlite3_blob(JLongAsPtr) S3JniLongPtr_T(sqlite3_blob,JLongAsPtr)
-#define S3JniLongPtr_sqlite3_stmt(JLongAsPtr) S3JniLongPtr_T(sqlite3_stmt,JLongAsPtr)
-#define S3JniLongPtr_sqlite3_value(JLongAsPtr) S3JniLongPtr_T(sqlite3_value,JLongAsPtr)
+#define LongPtrGet_T(T,JLongAsPtr) (T*)((intptr_t)((JLongAsPtr)))
+#define LongPtrGet_sqlite3(JLongAsPtr) LongPtrGet_T(sqlite3,(JLongAsPtr))
+#define LongPtrGet_sqlite3_backup(JLongAsPtr) LongPtrGet_T(sqlite3_backup,(JLongAsPtr))
+#define LongPtrGet_sqlite3_blob(JLongAsPtr) LongPtrGet_T(sqlite3_blob,(JLongAsPtr))
+#define LongPtrGet_sqlite3_stmt(JLongAsPtr) LongPtrGet_T(sqlite3_stmt,(JLongAsPtr))
+#define LongPtrGet_sqlite3_value(JLongAsPtr) LongPtrGet_T(sqlite3_value,(JLongAsPtr))
/*
** Extracts the new S3JniDb instance from the free-list, or allocates
** one if needed, associates it with pDb, and returns. Returns NULL
** on OOM. The returned object MUST, on success of the calling
** operation, subsequently be associated with jDb via
@@ -1551,11 +1671,11 @@
** NULL if pDb is NULL or was not initialized via the JNI interfaces.
*/
#define S3JniDb_from_c(sqlite3Ptr) \
((sqlite3Ptr) ? S3JniDb_from_clientdata(sqlite3Ptr) : 0)
#define S3JniDb_from_jlong(sqlite3PtrAsLong) \
- S3JniDb_from_c(S3JniLongPtr_T(sqlite3,sqlite3PtrAsLong))
+ S3JniDb_from_c(LongPtrGet_T(sqlite3,sqlite3PtrAsLong))
/*
** Unref any Java-side state in (S3JniAutoExtension*) AX and zero out
** AX.
*/
@@ -1668,12 +1788,13 @@
default:
return 0;
}
}
-/* For use with sqlite3_result/value_pointer() */
-static const char * const ResultJavaValuePtrStr = "org.sqlite.jni.capi.ResultJavaVal";
+/* For use with sqlite3_result_pointer(), sqlite3_value_pointer(),
+ sqlite3_bind_java_object(), and sqlite3_column_java_object(). */
+static const char * const s3jni__value_jref_key = "org.sqlite.jni.capi.ResultJavaVal";
/*
** If v is not NULL, it must be a jobject global reference. Its
** reference is relinquished.
*/
@@ -1878,26 +1999,50 @@
return SQLITE_NOMEM;
}
/*
** Requires that jCx and jArgv are sqlite3_context
-** resp. array-of-sqlite3_value values initialized by udf_args(). This
+** resp. array-of-sqlite3_value values initialized by udf_args(). The
+** latter will be 0-and-NULL for UDF types with no arguments. This
** function zeroes out the nativePointer member of jCx and each entry
** in jArgv. This is a safety-net precaution to avoid undefined
-** behavior if a Java-side UDF holds a reference to one of its
-** arguments. This MUST be called from any function which successfully
-** calls udf_args(), after calling the corresponding UDF and checking
-** its exception status. It MUST NOT be called in any other case.
+** behavior if a Java-side UDF holds a reference to its context or one
+** of its arguments. This MUST be called from any function which
+** successfully calls udf_args(), after calling the corresponding UDF
+** and checking its exception status, or which Java-wraps a
+** sqlite3_context for use with a UDF(ish) call. It MUST NOT be called
+** in any other case.
*/
static void udf_unargs(JNIEnv *env, jobject jCx, int argc, jobjectArray jArgv){
int i = 0;
assert(jCx);
NativePointerHolder_set(S3JniNph(sqlite3_context), jCx, 0);
for( ; i < argc; ++i ){
jobject jsv = (*env)->GetObjectArrayElement(env, jArgv, i);
- assert(jsv);
- NativePointerHolder_set(S3JniNph(sqlite3_value), jsv, 0);
+ /*
+ ** There is a potential Java-triggerable case of Undefined
+ ** Behavior here, but it would require intentional misuse of the
+ ** API:
+ **
+ ** If a Java UDF grabs an sqlite3_value from its argv and then
+ ** assigns that element to null, it becomes unreachable to us so
+ ** we cannot clear out its pointer. That Java-side object's
+ ** getNativePointer() will then refer to a stale value, so passing
+ ** it into (e.g.) sqlite3_value_SOMETHING() would invoke UB.
+ **
+ ** High-level wrappers can avoid that possibility if they do not
+ ** expose sqlite3_value directly to clients (as is the case in
+ ** org.sqlite.jni.wrapper1.SqlFunction).
+ **
+ ** One potential (but expensive) workaround for this would be to
+ ** privately store a duplicate argv array in each sqlite3_context
+ ** wrapper object, and clear the native pointers from that copy.
+ */
+ assert(jsv && "Someone illegally modified a UDF argument array.");
+ if( jsv ){
+ NativePointerHolder_set(S3JniNph(sqlite3_value), jsv, 0);
+ }
}
}
/*
@@ -1982,10 +2127,11 @@
(*env)->CallVoidMethod(env, s->jObj, xMethodID, jcx);
S3JniIfThrew{
rc = udf_report_exception(env, isFinal, cx, s->zFuncName,
zFuncType);
}
+ udf_unargs(env, jcx, 0, 0);
S3JniUnrefLocal(jcx);
}else{
if( isFinal ) sqlite3_result_error_nomem(cx);
rc = SQLITE_NOMEM;
}
@@ -2055,16 +2201,16 @@
return rv; \
}
/** Create a trivial JNI wrapper for (int CName(sqlite3_stmt*)). */
#define WRAP_INT_STMT(JniNameSuffix,CName) \
JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt){ \
- return (jint)CName(S3JniLongPtr_sqlite3_stmt(jpStmt)); \
+ return (jint)CName(LongPtrGet_sqlite3_stmt(jpStmt)); \
}
/** Create a trivial JNI wrapper for (int CName(sqlite3_stmt*,int)). */
#define WRAP_INT_STMT_INT(JniNameSuffix,CName) \
JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt, jint n){ \
- return (jint)CName(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)n); \
+ return (jint)CName(LongPtrGet_sqlite3_stmt(jpStmt), (int)n); \
}
/** Create a trivial JNI wrapper for (boolean CName(sqlite3_stmt*)). */
#define WRAP_BOOL_STMT(JniNameSuffix,CName) \
JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jobject jStmt){ \
return CName(PtrGet_sqlite3_stmt(jStmt)) ? JNI_TRUE : JNI_FALSE; \
@@ -2071,45 +2217,45 @@
}
/** Create a trivial JNI wrapper for (jstring CName(sqlite3_stmt*,int)). */
#define WRAP_STR_STMT_INT(JniNameSuffix,CName) \
JniDecl(jstring,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt, jint ndx){ \
return s3jni_utf8_to_jstring( \
- CName(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx), \
+ CName(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx), \
-1); \
}
/** Create a trivial JNI wrapper for (boolean CName(sqlite3*)). */
#define WRAP_BOOL_DB(JniNameSuffix,CName) \
JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \
- return CName(S3JniLongPtr_sqlite3(jpDb)) ? JNI_TRUE : JNI_FALSE; \
+ return CName(LongPtrGet_sqlite3(jpDb)) ? JNI_TRUE : JNI_FALSE; \
}
/** Create a trivial JNI wrapper for (int CName(sqlite3*)). */
#define WRAP_INT_DB(JniNameSuffix,CName) \
JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \
- return (jint)CName(S3JniLongPtr_sqlite3(jpDb)); \
+ return (jint)CName(LongPtrGet_sqlite3(jpDb)); \
}
/** Create a trivial JNI wrapper for (int64 CName(sqlite3*)). */
#define WRAP_INT64_DB(JniNameSuffix,CName) \
JniDecl(jlong,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \
- return (jlong)CName(S3JniLongPtr_sqlite3(jpDb)); \
+ return (jlong)CName(LongPtrGet_sqlite3(jpDb)); \
}
/** Create a trivial JNI wrapper for (jstring CName(sqlite3*,int)). */
#define WRAP_STR_DB_INT(JniNameSuffix,CName) \
JniDecl(jstring,JniNameSuffix)(JniArgsEnvClass, jlong jpDb, jint ndx){ \
return s3jni_utf8_to_jstring( \
- CName(S3JniLongPtr_sqlite3(jpDb), (int)ndx), \
+ CName(LongPtrGet_sqlite3(jpDb), (int)ndx), \
-1); \
}
/** Create a trivial JNI wrapper for (int CName(sqlite3_value*)). */
#define WRAP_INT_SVALUE(JniNameSuffix,CName,DfltOnNull) \
JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpSValue){ \
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSValue); \
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSValue); \
return (jint)(sv ? CName(sv): DfltOnNull); \
}
/** Create a trivial JNI wrapper for (boolean CName(sqlite3_value*)). */
#define WRAP_BOOL_SVALUE(JniNameSuffix,CName,DfltOnNull) \
JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jlong jpSValue){ \
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSValue); \
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSValue); \
return (jint)(sv ? CName(sv) : DfltOnNull) \
? JNI_TRUE : JNI_FALSE; \
}
WRAP_INT_DB(1changes, sqlite3_changes)
@@ -2118,13 +2264,15 @@
WRAP_INT_STMT_INT(1column_1bytes, sqlite3_column_bytes)
WRAP_INT_STMT_INT(1column_1bytes16, sqlite3_column_bytes16)
WRAP_INT_STMT(1column_1count, sqlite3_column_count)
WRAP_STR_STMT_INT(1column_1decltype, sqlite3_column_decltype)
WRAP_STR_STMT_INT(1column_1name, sqlite3_column_name)
+#ifdef SQLITE_ENABLE_COLUMN_METADATA
WRAP_STR_STMT_INT(1column_1database_1name, sqlite3_column_database_name)
WRAP_STR_STMT_INT(1column_1origin_1name, sqlite3_column_origin_name)
WRAP_STR_STMT_INT(1column_1table_1name, sqlite3_column_table_name)
+#endif
WRAP_INT_STMT_INT(1column_1type, sqlite3_column_type)
WRAP_INT_STMT(1data_1count, sqlite3_data_count)
WRAP_STR_DB_INT(1db_1name, sqlite3_db_name)
WRAP_INT_DB(1error_1offset, sqlite3_error_offset)
WRAP_INT_DB(1extended_1errcode, sqlite3_extended_errcode)
@@ -2179,11 +2327,13 @@
: 0))
: 0;
return S3JniCast_P2L(p);
}
-/* Central auto-extension handler. */
+/*
+** Central auto-extension runner for auto-extensions created in Java.
+*/
static int s3jni_run_java_auto_extensions(sqlite3 *pDb, const char **pzErr,
const struct sqlite3_api_routines *ignored){
int rc = 0;
unsigned i, go = 1;
JNIEnv * env = 0;
@@ -2316,21 +2466,21 @@
S3JniApi(sqlite3_backup_finish(),jint,1backup_1finish)(
JniArgsEnvClass, jlong jpBack
){
int rc = 0;
if( jpBack!=0 ){
- rc = sqlite3_backup_finish( S3JniLongPtr_sqlite3_backup(jpBack) );
+ rc = sqlite3_backup_finish( LongPtrGet_sqlite3_backup(jpBack) );
}
return rc;
}
S3JniApi(sqlite3_backup_init(),jobject,1backup_1init)(
JniArgsEnvClass, jlong jpDbDest, jstring jTDest,
jlong jpDbSrc, jstring jTSrc
){
- sqlite3 * const pDest = S3JniLongPtr_sqlite3(jpDbDest);
- sqlite3 * const pSrc = S3JniLongPtr_sqlite3(jpDbSrc);
+ sqlite3 * const pDest = LongPtrGet_sqlite3(jpDbDest);
+ sqlite3 * const pSrc = LongPtrGet_sqlite3(jpDbSrc);
char * const zDest = s3jni_jstring_to_utf8(jTDest, 0);
char * const zSrc = s3jni_jstring_to_utf8(jTSrc, 0);
jobject rv = 0;
if( pDest && pSrc && zDest && zSrc ){
@@ -2349,23 +2499,23 @@
}
S3JniApi(sqlite3_backup_pagecount(),jint,1backup_1pagecount)(
JniArgsEnvClass, jlong jpBack
){
- return sqlite3_backup_pagecount(S3JniLongPtr_sqlite3_backup(jpBack));
+ return sqlite3_backup_pagecount(LongPtrGet_sqlite3_backup(jpBack));
}
S3JniApi(sqlite3_backup_remaining(),jint,1backup_1remaining)(
JniArgsEnvClass, jlong jpBack
){
- return sqlite3_backup_remaining(S3JniLongPtr_sqlite3_backup(jpBack));
+ return sqlite3_backup_remaining(LongPtrGet_sqlite3_backup(jpBack));
}
S3JniApi(sqlite3_backup_step(),jint,1backup_1step)(
JniArgsEnvClass, jlong jpBack, jint nPage
){
- return sqlite3_backup_step(S3JniLongPtr_sqlite3_backup(jpBack), (int)nPage);
+ return sqlite3_backup_step(LongPtrGet_sqlite3_backup(jpBack), (int)nPage);
}
S3JniApi(sqlite3_bind_blob(),jint,1bind_1blob)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jbyteArray baData, jint nMax
){
@@ -2374,53 +2524,159 @@
int rc;
if( pBuf ){
if( nMax>nBA ){
nMax = nBA;
}
- rc = sqlite3_bind_blob(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx,
+ rc = sqlite3_bind_blob(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx,
pBuf, (int)nMax, SQLITE_TRANSIENT);
s3jni_jbyteArray_release(baData, pBuf);
}else{
rc = baData
? SQLITE_NOMEM
- : sqlite3_bind_null( S3JniLongPtr_sqlite3_stmt(jpStmt), ndx );
+ : sqlite3_bind_null( LongPtrGet_sqlite3_stmt(jpStmt), ndx );
}
return (jint)rc;
}
+
+/**
+ Helper for use with s3jni_setup_nio_args().
+*/
+struct S3JniNioArgs {
+ jobject jBuf; /* input - ByteBuffer */
+ jint iOffset; /* input - byte offset */
+ jint iHowMany; /* input - byte count to bind/read/write */
+ jint nBuf; /* output - jBuf's buffer size */
+ void * p; /* output - jBuf's buffer memory */
+ void * pStart; /* output - offset of p to bind/read/write */
+ int nOut; /* output - number of bytes from pStart to bind/read/write */
+};
+typedef struct S3JniNioArgs S3JniNioArgs;
+static const S3JniNioArgs S3JniNioArgs_empty = {
+ 0,0,0,0,0,0,0
+};
+
+/*
+** Internal helper for sqlite3_bind_nio_buffer(),
+** sqlite3_result_nio_buffer(), and similar methods which take a
+** ByteBuffer object as either input or output. Populates pArgs and
+** returns 0 on success, non-0 if the operation should fail. The
+** caller is required to check for SJG.g.byteBuffer.klazz!=0 before calling
+** this and reporting it in a way appropriate for that routine. This
+** function may assert() that SJG.g.byteBuffer.klazz is not 0.
+**
+** The (jBuffer, iOffset, iHowMany) arguments are the (ByteBuffer, offset,
+** length) arguments to the bind/result method.
+**
+** If iHowMany is negative then it's treated as "until the end" and
+** the calculated slice is trimmed to fit if needed. If iHowMany is
+** positive and extends past the end of jBuffer then SQLITE_ERROR is
+** returned.
+**
+** Returns 0 if everything looks to be in order, else some SQLITE_...
+** result code
+*/
+static int s3jni_setup_nio_args(
+ JNIEnv *env, S3JniNioArgs * pArgs,
+ jobject jBuffer, jint iOffset, jint iHowMany
+){
+ jlong iEnd = 0;
+ const int bAllowTruncate = iHowMany<0;
+ *pArgs = S3JniNioArgs_empty;
+ pArgs->jBuf = jBuffer;
+ pArgs->iOffset = iOffset;
+ pArgs->iHowMany = iHowMany;
+ assert( SJG.g.byteBuffer.klazz );
+ if( pArgs->iOffset<0 ){
+ return SQLITE_ERROR
+ /* SQLITE_MISUSE or SQLITE_RANGE would fit better but we use
+ SQLITE_ERROR for consistency with the code documented for a
+ negative target blob offset in sqlite3_blob_read/write(). */;
+ }
+ s3jni_get_nio_buffer(pArgs->jBuf, &pArgs->p, &pArgs->nBuf);
+ if( !pArgs->p ){
+ return SQLITE_MISUSE;
+ }else if( pArgs->iOffset>=pArgs->nBuf ){
+ pArgs->pStart = 0;
+ pArgs->nOut = 0;
+ return 0;
+ }
+ assert( pArgs->nBuf > 0 );
+ assert( pArgs->iOffset < pArgs->nBuf );
+ iEnd = pArgs->iHowMany<0
+ ? pArgs->nBuf - pArgs->iOffset
+ : pArgs->iOffset + pArgs->iHowMany;
+ if( iEnd>(jlong)pArgs->nBuf ){
+ if( bAllowTruncate ){
+ iEnd = pArgs->nBuf - pArgs->iOffset;
+ }else{
+ return SQLITE_ERROR
+ /* again: for consistency with blob_read/write(), though
+ SQLITE_MISUSE or SQLITE_RANGE would be a better fit. */;
+ }
+ }
+ if( iEnd - pArgs->iOffset > (jlong)SQLITE_MAX_LENGTH ){
+ return SQLITE_TOOBIG;
+ }
+ assert( pArgs->iOffset >= 0 );
+ assert( iEnd > pArgs->iOffset );
+ pArgs->pStart = pArgs->p + pArgs->iOffset;
+ pArgs->nOut = (int)(iEnd - pArgs->iOffset);
+ assert( pArgs->nOut > 0 );
+ assert( (pArgs->pStart + pArgs->nOut) <= (pArgs->p + pArgs->nBuf) );
+ return 0;
+}
+
+S3JniApi(sqlite3_bind_nio_buffer(),jint,1bind_1nio_1buffer)(
+ JniArgsEnvClass, jobject jpStmt, jint ndx, jobject jBuffer,
+ jint iOffset, jint iN
+){
+ sqlite3_stmt * pStmt = PtrGet_sqlite3_stmt(jpStmt);
+ S3JniNioArgs args;
+ int rc;
+ if( !pStmt || !SJG.g.byteBuffer.klazz ) return SQLITE_MISUSE;
+ rc = s3jni_setup_nio_args(env, &args, jBuffer, iOffset, iN);
+ if(rc){
+ return rc;
+ }else if( !args.pStart || !args.nOut ){
+ return sqlite3_bind_null(pStmt, ndx);
+ }
+ return sqlite3_bind_blob( pStmt, (int)ndx, args.pStart,
+ args.nOut, SQLITE_TRANSIENT );
+}
S3JniApi(sqlite3_bind_double(),jint,1bind_1double)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jdouble val
){
- return (jint)sqlite3_bind_double(S3JniLongPtr_sqlite3_stmt(jpStmt),
+ return (jint)sqlite3_bind_double(LongPtrGet_sqlite3_stmt(jpStmt),
(int)ndx, (double)val);
}
S3JniApi(sqlite3_bind_int(),jint,1bind_1int)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jint val
){
- return (jint)sqlite3_bind_int(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx, (int)val);
+ return (jint)sqlite3_bind_int(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, (int)val);
}
S3JniApi(sqlite3_bind_int64(),jint,1bind_1int64)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jlong val
){
- return (jint)sqlite3_bind_int64(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx, (sqlite3_int64)val);
+ return (jint)sqlite3_bind_int64(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, (sqlite3_int64)val);
}
/*
** Bind a new global ref to Object `val` using sqlite3_bind_pointer().
*/
S3JniApi(sqlite3_bind_java_object(),jint,1bind_1java_1object)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jobject val
){
- sqlite3_stmt * const pStmt = S3JniLongPtr_sqlite3_stmt(jpStmt);
+ sqlite3_stmt * const pStmt = LongPtrGet_sqlite3_stmt(jpStmt);
int rc = SQLITE_MISUSE;
if(pStmt){
jobject const rv = S3JniRefGlobal(val);
if( rv ){
- rc = sqlite3_bind_pointer(pStmt, ndx, rv, ResultJavaValuePtrStr,
+ rc = sqlite3_bind_pointer(pStmt, ndx, rv, s3jni__value_jref_key,
S3Jni_jobject_finalizer);
}else if(val){
rc = SQLITE_NOMEM;
}else{
rc = sqlite3_bind_null(pStmt, ndx);
@@ -2430,26 +2686,26 @@
}
S3JniApi(sqlite3_bind_null(),jint,1bind_1null)(
JniArgsEnvClass, jlong jpStmt, jint ndx
){
- return (jint)sqlite3_bind_null(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx);
+ return (jint)sqlite3_bind_null(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx);
}
S3JniApi(sqlite3_bind_parameter_count(),jint,1bind_1parameter_1count)(
JniArgsEnvClass, jlong jpStmt
){
- return (jint)sqlite3_bind_parameter_count(S3JniLongPtr_sqlite3_stmt(jpStmt));
+ return (jint)sqlite3_bind_parameter_count(LongPtrGet_sqlite3_stmt(jpStmt));
}
S3JniApi(sqlite3_bind_parameter_index(),jint,1bind_1parameter_1index)(
JniArgsEnvClass, jlong jpStmt, jbyteArray jName
){
int rc = 0;
jbyte * const pBuf = s3jni_jbyteArray_bytes(jName);
if( pBuf ){
- rc = sqlite3_bind_parameter_index(S3JniLongPtr_sqlite3_stmt(jpStmt),
+ rc = sqlite3_bind_parameter_index(LongPtrGet_sqlite3_stmt(jpStmt),
(const char *)pBuf);
s3jni_jbyteArray_release(jName, pBuf);
}
return rc;
}
@@ -2456,11 +2712,11 @@
S3JniApi(sqlite3_bind_parameter_name(),jstring,1bind_1parameter_1name)(
JniArgsEnvClass, jlong jpStmt, jint ndx
){
const char *z =
- sqlite3_bind_parameter_name(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx);
+ sqlite3_bind_parameter_name(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx);
return z ? s3jni_utf8_to_jstring(z, -1) : 0;
}
/*
** Impl of sqlite3_bind_text/text16().
@@ -2478,18 +2734,18 @@
/* Note that we rely on the Java layer having assured that baData
is NUL-terminated if nMax is negative. In order to avoid UB for
such cases, we do not expose the byte-limit arguments in the
public API. */
rc = is16
- ? sqlite3_bind_text16(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx,
+ ? sqlite3_bind_text16(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx,
pBuf, (int)nMax, SQLITE_TRANSIENT)
- : sqlite3_bind_text(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx,
+ : sqlite3_bind_text(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx,
(const char *)pBuf,
(int)nMax, SQLITE_TRANSIENT);
}else{
rc = baData
- ? sqlite3_bind_null(S3JniLongPtr_sqlite3_stmt(jpStmt), (int)ndx)
+ ? sqlite3_bind_null(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx)
: SQLITE_NOMEM;
}
s3jni_jbyteArray_release(baData, pBuf);
return (jint)rc;
@@ -2509,13 +2765,13 @@
S3JniApi(sqlite3_bind_value(),jint,1bind_1value)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jlong jpValue
){
int rc = 0;
- sqlite3_stmt * pStmt = S3JniLongPtr_sqlite3_stmt(jpStmt);
+ sqlite3_stmt * pStmt = LongPtrGet_sqlite3_stmt(jpStmt);
if( pStmt ){
- sqlite3_value *v = S3JniLongPtr_sqlite3_value(jpValue);
+ sqlite3_value *v = LongPtrGet_sqlite3_value(jpValue);
if( v ){
rc = sqlite3_bind_value(pStmt, (int)ndx, v);
}else{
rc = sqlite3_bind_null(pStmt, (int)ndx);
}
@@ -2526,39 +2782,39 @@
}
S3JniApi(sqlite3_bind_zeroblob(),jint,1bind_1zeroblob)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jint n
){
- return (jint)sqlite3_bind_zeroblob(S3JniLongPtr_sqlite3_stmt(jpStmt),
+ return (jint)sqlite3_bind_zeroblob(LongPtrGet_sqlite3_stmt(jpStmt),
(int)ndx, (int)n);
}
S3JniApi(sqlite3_bind_zeroblob64(),jint,1bind_1zeroblob64)(
JniArgsEnvClass, jlong jpStmt, jint ndx, jlong n
){
- return (jint)sqlite3_bind_zeroblob64(S3JniLongPtr_sqlite3_stmt(jpStmt),
+ return (jint)sqlite3_bind_zeroblob64(LongPtrGet_sqlite3_stmt(jpStmt),
(int)ndx, (sqlite3_uint64)n);
}
S3JniApi(sqlite3_blob_bytes(),jint,1blob_1bytes)(
JniArgsEnvClass, jlong jpBlob
){
- return sqlite3_blob_bytes(S3JniLongPtr_sqlite3_blob(jpBlob));
+ return sqlite3_blob_bytes(LongPtrGet_sqlite3_blob(jpBlob));
}
S3JniApi(sqlite3_blob_close(),jint,1blob_1close)(
JniArgsEnvClass, jlong jpBlob
){
- sqlite3_blob * const b = S3JniLongPtr_sqlite3_blob(jpBlob);
+ sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob);
return b ? (jint)sqlite3_blob_close(b) : SQLITE_MISUSE;
}
S3JniApi(sqlite3_blob_open(),jint,1blob_1open)(
JniArgsEnvClass, jlong jpDb, jstring jDbName, jstring jTbl, jstring jCol,
jlong jRowId, jint flags, jobject jOut
){
- sqlite3 * const db = S3JniLongPtr_sqlite3(jpDb);
+ sqlite3 * const db = LongPtrGet_sqlite3(jpDb);
sqlite3_blob * pBlob = 0;
char * zDbName = 0, * zTableName = 0, * zColumnName = 0;
int rc;
if( !db || !jDbName || !jTbl || !jCol ) return SQLITE_MISUSE;
@@ -2588,41 +2844,88 @@
){
jbyte * const pBa = s3jni_jbyteArray_bytes(jTgt);
int rc = jTgt ? (pBa ? SQLITE_MISUSE : SQLITE_NOMEM) : SQLITE_MISUSE;
if( pBa ){
jsize const nTgt = (*env)->GetArrayLength(env, jTgt);
- rc = sqlite3_blob_read(S3JniLongPtr_sqlite3_blob(jpBlob), pBa,
+ rc = sqlite3_blob_read(LongPtrGet_sqlite3_blob(jpBlob), pBa,
(int)nTgt, (int)iOffset);
if( 0==rc ){
s3jni_jbyteArray_commit(jTgt, pBa);
}else{
s3jni_jbyteArray_release(jTgt, pBa);
}
}
return rc;
}
+
+S3JniApi(sqlite3_blob_read_nio_buffer(),jint,1blob_1read_1nio_1buffer)(
+ JniArgsEnvClass, jlong jpBlob, jint iSrcOff, jobject jBB, jint iTgtOff, jint iHowMany
+){
+ sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob);
+ S3JniNioArgs args;
+ int rc;
+ if( !b || !SJG.g.byteBuffer.klazz || iHowMany<0 ){
+ return SQLITE_MISUSE;
+ }else if( iTgtOff<0 || iSrcOff<0 ){
+ return SQLITE_ERROR
+ /* for consistency with underlying sqlite3_blob_read() */;
+ }else if( 0==iHowMany ){
+ return 0;
+ }
+ rc = s3jni_setup_nio_args(env, &args, jBB, iTgtOff, iHowMany);
+ if(rc){
+ return rc;
+ }else if( !args.pStart || !args.nOut ){
+ return 0;
+ }
+ assert( args.iHowMany>0 );
+ return sqlite3_blob_read( b, args.pStart, (int)args.nOut, (int)iSrcOff );
+}
S3JniApi(sqlite3_blob_reopen(),jint,1blob_1reopen)(
JniArgsEnvClass, jlong jpBlob, jlong iNewRowId
){
- return (jint)sqlite3_blob_reopen(S3JniLongPtr_sqlite3_blob(jpBlob),
+ return (jint)sqlite3_blob_reopen(LongPtrGet_sqlite3_blob(jpBlob),
(sqlite3_int64)iNewRowId);
}
S3JniApi(sqlite3_blob_write(),jint,1blob_1write)(
JniArgsEnvClass, jlong jpBlob, jbyteArray jBa, jint iOffset
){
- sqlite3_blob * const b = S3JniLongPtr_sqlite3_blob(jpBlob);
+ sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob);
jbyte * const pBuf = b ? s3jni_jbyteArray_bytes(jBa) : 0;
const jsize nBA = pBuf ? (*env)->GetArrayLength(env, jBa) : 0;
int rc = SQLITE_MISUSE;
if(b && pBuf){
rc = sqlite3_blob_write( b, pBuf, (int)nBA, (int)iOffset );
}
s3jni_jbyteArray_release(jBa, pBuf);
return (jint)rc;
}
+
+S3JniApi(sqlite3_blob_write_nio_buffer(),jint,1blob_1write_1nio_1buffer)(
+ JniArgsEnvClass, jlong jpBlob, jint iTgtOff, jobject jBB, jint iSrcOff, jint iHowMany
+){
+ sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob);
+ S3JniNioArgs args;
+ int rc;
+ if( !b || !SJG.g.byteBuffer.klazz ){
+ return SQLITE_MISUSE;
+ }else if( iTgtOff<0 || iSrcOff<0 ){
+ return SQLITE_ERROR
+ /* for consistency with underlying sqlite3_blob_write() */;
+ }else if( 0==iHowMany ){
+ return 0;
+ }
+ rc = s3jni_setup_nio_args(env, &args, jBB, iSrcOff, iHowMany);
+ if(rc){
+ return rc;
+ }else if( !args.pStart || !args.nOut ){
+ return 0;
+ }
+ return sqlite3_blob_write( b, args.pStart, (int)args.nOut, (int)iTgtOff );
+}
/* Central C-to-Java busy handler proxy. */
static int s3jni_busy_handler(void* pState, int n){
S3JniDb * const ps = (S3JniDb *)pState;
int rc = 0;
@@ -2818,11 +3121,11 @@
S3JniHook_unref(pHook);
}
}else{
jclass const klazz = (*env)->GetObjectClass(env, jHook);
jmethodID const xCallback = (*env)->GetMethodID(
- env, klazz, "call", "(Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)I"
+ env, klazz, "call", "(Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)V"
);
S3JniUnrefLocal(klazz);
S3JniIfThrew {
rc = s3jni_db_exception(ps->pDb, SQLITE_MISUSE,
"Cannot not find matching call() in "
@@ -2867,10 +3170,45 @@
S3JniApi(sqlite3_column_int64(),jlong,1column_1int64)(
JniArgsEnvClass, jobject jpStmt, jint ndx
){
return (jlong)sqlite3_column_int64(PtrGet_sqlite3_stmt(jpStmt), (int)ndx);
}
+
+S3JniApi(sqlite3_column_java_object(),jobject,1column_1java_1object)(
+ JniArgsEnvClass, jlong jpStmt, jint ndx
+){
+ sqlite3_stmt * const stmt = LongPtrGet_sqlite3_stmt(jpStmt);
+ jobject rv = 0;
+ if( stmt ){
+ sqlite3 * const db = sqlite3_db_handle(stmt);
+ sqlite3_value * sv;
+ sqlite3_mutex_enter(sqlite3_db_mutex(db));
+ sv = sqlite3_column_value(stmt, (int)ndx);
+ if( sv ){
+ rv = S3JniRefLocal(
+ sqlite3_value_pointer(sv, s3jni__value_jref_key)
+ );
+ }
+ sqlite3_mutex_leave(sqlite3_db_mutex(db));
+ }
+ return rv;
+}
+
+S3JniApi(sqlite3_column_nio_buffer(),jobject,1column_1nio_1buffer)(
+ JniArgsEnvClass, jobject jStmt, jint ndx
+){
+ sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jStmt);
+ jobject rv = 0;
+ if( stmt ){
+ const void * const p = sqlite3_column_blob(stmt, (int)ndx);
+ if( p ){
+ const int n = sqlite3_column_bytes(stmt, (int)ndx);
+ rv = s3jni__blob_to_ByteBuffer(env, p, n);
+ }
+ }
+ return rv;
+}
S3JniApi(sqlite3_column_text(),jbyteArray,1column_1text)(
JniArgsEnvClass, jobject jpStmt, jint ndx
){
sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jpStmt);
@@ -2923,11 +3261,14 @@
if( hook.jObj ){
rc = isCommit
? (int)(*env)->CallIntMethod(env, hook.jObj, hook.midCallback)
: (int)((*env)->CallVoidMethod(env, hook.jObj, hook.midCallback), 0);
S3JniIfThrew{
- rc = s3jni_db_exception(ps->pDb, SQLITE_ERROR, "hook callback threw");
+ rc = s3jni_db_exception(ps->pDb, SQLITE_ERROR,
+ isCommit
+ ? "Commit hook callback threw"
+ : "Rollback hook callback threw");
}
S3JniHook_localundup(hook);
}
return rc;
}
@@ -3026,11 +3367,11 @@
0==sqlite3_compileoption_used(zUtf8) ? JNI_FALSE : JNI_TRUE;
s3jni_mutf8_release(name, zUtf8);
return rc;
}
-S3JniApi(sqlite3_complete(),int,1complete)(
+S3JniApi(sqlite3_complete(),jint,1complete)(
JniArgsEnvClass, jbyteArray jSql
){
jbyte * const pBuf = s3jni_jbyteArray_bytes(jSql);
const jsize nBA = pBuf ? (*env)->GetArrayLength(env, jSql) : 0;
int rc;
@@ -3042,12 +3383,13 @@
: (jSql ? SQLITE_NOMEM : SQLITE_MISUSE);
s3jni_jbyteArray_release(jSql, pBuf);
return rc;
}
-S3JniApi(sqlite3_config() /*for a small subset of options.*/,
- jint,1config__I)(JniArgsEnvClass, jint n){
+S3JniApi(sqlite3_config() /*for a small subset of options.*/
+ sqlite3_config__enable()/* internal name to avoid name-mangling issues*/,
+ jint,1config_1_1enable)(JniArgsEnvClass, jint n){
switch( n ){
case SQLITE_CONFIG_SINGLETHREAD:
case SQLITE_CONFIG_MULTITHREAD:
case SQLITE_CONFIG_SERIALIZED:
return sqlite3_config( n );
@@ -3073,12 +3415,13 @@
S3JniHook_localundup(hook);
S3JniUnrefLocal(jArg1);
}
}
-S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_LOG */,
- jint, 1config__Lorg_sqlite_jni_ConfigLogCallback_2
+S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_LOG */
+ sqlite3_config__config_log() /* internal name */,
+ jint, 1config_1_1CONFIG_1LOG
)(JniArgsEnvClass, jobject jLog){
S3JniHook * const pHook = &SJG.hook.configlog;
int rc = 0;
S3JniGlobal_mutex_enter;
@@ -3148,13 +3491,14 @@
void sqlite3_init_sqllog(void){
sqlite3_config( SQLITE_CONFIG_SQLLOG, s3jni_config_sqllog, 0 );
}
#endif
-S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_SQLLOG */,
- jint, 1config__Lorg_sqlite_jni_ConfigSqllogCallback_2)(
- JniArgsEnvClass, jobject jLog){
+S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_SQLLOG */
+ sqlite3_config__SQLLOG() /*internal name*/,
+ jint, 1config_1_1SQLLOG
+)(JniArgsEnvClass, jobject jLog){
#ifndef SQLITE_ENABLE_SQLLOG
return SQLITE_MISUSE;
#else
S3JniHook * const pHook = &SJG.hook.sqllog;
int rc = 0;
@@ -3410,11 +3754,10 @@
if( 0==rc && jOut ){
OutputPointer_set_Int32(env, jOut, pOut);
}
break;
}
- case 0:
default:
rc = SQLITE_MISUSE;
}
return (jint)rc;
}
@@ -3473,11 +3816,11 @@
rc = sqlite3_db_readonly(ps ? ps->pDb : 0, zDbName);
sqlite3_free(zDbName);
return (jint)rc;
}
-S3JniApi(sqlite3_db_release_memory(),int,1db_1release_1memory)(
+S3JniApi(sqlite3_db_release_memory(),jint,1db_1release_1memory)(
JniArgsEnvClass, jobject jDb
){
sqlite3 * const pDb = PtrGet_sqlite3(jDb);
return pDb ? sqlite3_db_release_memory(pDb) : SQLITE_MISUSE;
}
@@ -3514,13 +3857,20 @@
}
S3JniApi(sqlite3_errstr(),jstring,1errstr)(
JniArgsEnvClass, jint rcCode
){
- jstring const rv = (*env)->NewStringUTF(env, sqlite3_errstr((int)rcCode))
- /* We know these values to be plain ASCII, so pose no MUTF-8
- ** incompatibility */;
+ jstring rv;
+ const char * z = sqlite3_errstr((int)rcCode);
+ if( !z ){
+ /* This hypothetically cannot happen, but we'll behave like the
+ low-level library would in such a case... */
+ z = "unknown error";
+ }
+ rv = (*env)->NewStringUTF(env, z)
+ /* We know these values to be plain ASCII, so pose no MUTF-8
+ ** incompatibility */;
s3jni_oom_check( rv );
return rv;
}
#ifndef SQLITE_ENABLE_NORMALIZE
@@ -3568,23 +3918,25 @@
#else
return 0;
#endif
}
-S3JniApi(sqlite3_extended_result_codes(),jboolean,1extended_1result_1codes)(
+S3JniApi(sqlite3_extended_result_codes(),jint,1extended_1result_1codes)(
JniArgsEnvClass, jobject jpDb, jboolean onoff
){
sqlite3 * const pDb = PtrGet_sqlite3(jpDb);
- int const rc = pDb ? sqlite3_extended_result_codes(pDb, onoff ? 1 : 0) : 0;
- return rc ? JNI_TRUE : JNI_FALSE;
+ int const rc = pDb
+ ? sqlite3_extended_result_codes(pDb, onoff ? 1 : 0)
+ : SQLITE_MISUSE;
+ return rc;
}
S3JniApi(sqlite3_finalize(),jint,1finalize)(
JniArgsEnvClass, jlong jpStmt
){
return jpStmt
- ? sqlite3_finalize(S3JniLongPtr_sqlite3_stmt(jpStmt))
+ ? sqlite3_finalize(LongPtrGet_sqlite3_stmt(jpStmt))
: 0;
}
S3JniApi(sqlite3_get_auxdata(),jobject,1get_1auxdata)(
JniArgsEnvClass, jobject jCx, jint n
@@ -3621,17 +3973,42 @@
/*
** Uncaches the current JNIEnv from the S3JniGlobal state, clearing
** any resources owned by that cache entry and making that slot
** available for re-use.
*/
-JniDecl(jboolean,1java_1uncache_1thread)(JniArgsEnvClass){
+S3JniApi(sqlite3_java_uncache_thread(), jboolean, 1java_1uncache_1thread)(
+ JniArgsEnvClass
+){
int rc;
S3JniEnv_mutex_enter;
rc = S3JniEnv_uncache(env);
S3JniEnv_mutex_leave;
return rc ? JNI_TRUE : JNI_FALSE;
}
+
+S3JniApi(sqlite3_jni_db_error(), jint, 1jni_1db_1error)(
+ JniArgsEnvClass, jobject jDb, jint jRc, jstring jStr
+){
+ S3JniDb * const ps = S3JniDb_from_java(jDb);
+ int rc = SQLITE_MISUSE;
+ if( ps ){
+ char *zStr;
+ zStr = jStr
+ ? s3jni_jstring_to_utf8( jStr, 0)
+ : NULL;
+ rc = s3jni_db_error( ps->pDb, (int)jRc, zStr );
+ sqlite3_free(zStr);
+ }
+ return rc;
+}
+
+S3JniApi(sqlite3_jni_supports_nio(), jboolean,1jni_1supports_1nio)(
+ JniArgsEnvClass
+){
+ return SJG.g.byteBuffer.klazz ? JNI_TRUE : JNI_FALSE;
+}
+
S3JniApi(sqlite3_keyword_check(),jboolean,1keyword_1check)(
JniArgsEnvClass, jstring jWord
){
int nWord = 0;
@@ -3802,18 +4179,19 @@
sqlite3_free(zVfs);
return (jint)rc;
}
/* Proxy for the sqlite3_prepare[_v2/3]() family. */
-jint sqlite3_jni_prepare_v123( int prepVersion, JNIEnv * const env, jclass self,
- jlong jpDb, jbyteArray baSql,
- jint nMax, jint prepFlags,
- jobject jOutStmt, jobject outTail){
+static jint sqlite3_jni_prepare_v123( int prepVersion, JNIEnv * const env,
+ jclass self,
+ jlong jpDb, jbyteArray baSql,
+ jint nMax, jint prepFlags,
+ jobject jOutStmt, jobject outTail){
sqlite3_stmt * pStmt = 0;
jobject jStmt = 0;
const char * zTail = 0;
- sqlite3 * const pDb = S3JniLongPtr_sqlite3(jpDb);
+ sqlite3 * const pDb = LongPtrGet_sqlite3(jpDb);
jbyte * const pBuf = pDb ? s3jni_jbyteArray_bytes(baSql) : 0;
int rc = SQLITE_ERROR;
assert(prepVersion==1 || prepVersion==2 || prepVersion==3);
if( !pDb || !jOutStmt ){
@@ -3964,15 +4342,15 @@
return s3jni_updatepre_hook_impl(pState, NULL, opId, zDb, zTable, nRowid, 0);
}
#if !defined(SQLITE_ENABLE_PREUPDATE_HOOK)
/* We need no-op impls for preupdate_{count,depth,blobwrite}() */
-S3JniApi(sqlite3_preupdate_blobwrite(),int,1preupdate_1blobwrite)(
+S3JniApi(sqlite3_preupdate_blobwrite(),jint,1preupdate_1blobwrite)(
JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; }
-S3JniApi(sqlite3_preupdate_count(),int,1preupdate_1count)(
+S3JniApi(sqlite3_preupdate_count(),jint,1preupdate_1count)(
JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; }
-S3JniApi(sqlite3_preupdate_depth(),int,1preupdate_1depth)(
+S3JniApi(sqlite3_preupdate_depth(),jint,1preupdate_1depth)(
JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; }
#endif /* !SQLITE_ENABLE_PREUPDATE_HOOK */
/*
** JNI wrapper for both sqlite3_update_hook() and
@@ -4063,11 +4441,11 @@
/* Impl for sqlite3_preupdate_{new,old}(). */
static int s3jni_preupdate_newold(JNIEnv * const env, int isNew, jlong jpDb,
jint iCol, jobject jOut){
#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
- sqlite3 * const pDb = S3JniLongPtr_sqlite3(jpDb);
+ sqlite3 * const pDb = LongPtrGet_sqlite3(jpDb);
int rc = SQLITE_MISUSE;
if( pDb ){
sqlite3_value * pOut = 0;
int (*fOrig)(sqlite3*,int,sqlite3_value**) =
isNew ? sqlite3_preupdate_new : sqlite3_preupdate_old;
@@ -4284,11 +4662,11 @@
){
sqlite3_result_double(PtrGet_sqlite3_context(jpCx), v);
}
S3JniApi(sqlite3_result_error(),void,1result_1error)(
- JniArgsEnvClass, jobject jpCx, jbyteArray baMsg, int eTextRep
+ JniArgsEnvClass, jobject jpCx, jbyteArray baMsg, jint eTextRep
){
const char * zUnspecified = "Unspecified error.";
jsize const baLen = (*env)->GetArrayLength(env, baMsg);
jbyte * const pjBuf = baMsg ? s3jni_jbyteArray_bytes(baMsg) : NULL;
switch( pjBuf ? eTextRep : SQLITE_UTF8 ){
@@ -4349,24 +4727,65 @@
if( !pCx ) return;
else if( v ){
jobject const rjv = S3JniRefGlobal(v);
if( rjv ){
sqlite3_result_pointer(pCx, rjv,
- ResultJavaValuePtrStr, S3Jni_jobject_finalizer);
+ s3jni__value_jref_key, S3Jni_jobject_finalizer);
}else{
sqlite3_result_error_nomem(PtrGet_sqlite3_context(jpCx));
}
}else{
sqlite3_result_null(PtrGet_sqlite3_context(jpCx));
}
}
+
+S3JniApi(sqlite3_result_nio_buffer(),void,1result_1nio_1buffer)(
+ JniArgsEnvClass, jobject jpCtx, jobject jBuffer,
+ jint iOffset, jint iN
+){
+ sqlite3_context * pCx = PtrGet_sqlite3_context(jpCtx);
+ int rc;
+ S3JniNioArgs args;
+ if( !pCx ){
+ return;
+ }else if( !SJG.g.byteBuffer.klazz ){
+ sqlite3_result_error(
+ pCx, "This JVM does not support JNI access to ByteBuffers.", -1
+ );
+ return;
+ }
+ rc = s3jni_setup_nio_args(env, &args, jBuffer, iOffset, iN);
+ if(rc){
+ if( iOffset<0 ){
+ sqlite3_result_error(pCx, "Start index may not be negative.", -1);
+ }else if( SQLITE_TOOBIG==rc ){
+ sqlite3_result_error_toobig(pCx);
+ }else{
+ sqlite3_result_error(
+ pCx, "Invalid arguments to sqlite3_result_nio_buffer().", -1
+ );
+ }
+ }else if( !args.pStart || !args.nOut ){
+ sqlite3_result_null(pCx);
+ }else{
+ sqlite3_result_blob(pCx, args.pStart, args.nOut, SQLITE_TRANSIENT);
+ }
+}
+
S3JniApi(sqlite3_result_null(),void,1result_1null)(
JniArgsEnvClass, jobject jpCx
){
sqlite3_result_null(PtrGet_sqlite3_context(jpCx));
}
+
+S3JniApi(sqlite3_result_subtype(),void,1result_1subtype)(
+ JniArgsEnvClass, jobject jpCx, jint v
+){
+ sqlite3_result_subtype(PtrGet_sqlite3_context(jpCx), (unsigned int)v);
+}
+
S3JniApi(sqlite3_result_text(),void,1result_1text)(
JniArgsEnvClass, jobject jpCx, jbyteArray jBa, jint nMax
){
return result_blob_text(0, SQLITE_UTF8, env,
@@ -4537,24 +4956,12 @@
S3JniEnv_mutex_enter; {
while( SJG.envCache.aHead ){
S3JniEnv_uncache( SJG.envCache.aHead->env );
}
} S3JniEnv_mutex_leave;
-#if 0
- /*
- ** Is automatically closing any still-open dbs a good idea? We will
- ** get rid of the perDb list once sqlite3 gets a per-db client
- ** state, at which point we won't have a central list of databases
- ** to close.
- */
- S3JniDb_mutex_enter;
- while( SJG.perDb.pHead ){
- s3jni_close_db(env, SJG.perDb.pHead->jDb, 2);
- }
- S3JniDb_mutex_leave;
-#endif
- /* Do not clear S3JniGlobal.jvm: it's legal to restart the lib. */
+ /* Do not clear S3JniGlobal.jvm or S3JniGlobal.g: it's legal to
+ ** restart the lib. */
return sqlite3_shutdown();
}
S3JniApi(sqlite3_status(),jint,1status)(
JniArgsEnvClass, jint op, jobject jOutCurrent, jobject jOutHigh,
@@ -4592,11 +4999,11 @@
static int s3jni_strlike_glob(int isLike, JNIEnv *const env,
jbyteArray baG, jbyteArray baT, jint escLike){
int rc = 0;
jbyte * const pG = s3jni_jbyteArray_bytes(baG);
- jbyte * const pT = pG ? s3jni_jbyteArray_bytes(baT) : 0;
+ jbyte * const pT = s3jni_jbyteArray_bytes(baT);
/* Note that we're relying on the byte arrays having been
NUL-terminated on the Java side. */
rc = isLike
? sqlite3_strlike((const char *)pG, (const char *)pT,
@@ -4631,17 +5038,17 @@
}
return rv;
}
S3JniApi(sqlite3_step(),jint,1step)(
- JniArgsEnvClass,jobject jStmt
+ JniArgsEnvClass, jlong jpStmt
){
- sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jStmt);
+ sqlite3_stmt * const pStmt = LongPtrGet_sqlite3_stmt(jpStmt);
return pStmt ? (jint)sqlite3_step(pStmt) : (jint)SQLITE_MISUSE;
}
-S3JniApi(sqlite3_table_column_metadata(),int,1table_1column_1metadata)(
+S3JniApi(sqlite3_table_column_metadata(),jint,1table_1column_1metadata)(
JniArgsEnvClass, jobject jDb, jstring jDbName, jstring jTableName,
jstring jColumnName, jobject jDataType, jobject jCollSeq, jobject jNotNull,
jobject jPrimaryKey, jobject jAutoinc
){
sqlite3 * const db = PtrGet_sqlite3(jDb);
@@ -4813,47 +5220,47 @@
S3JniApi(sqlite3_value_blob(),jbyteArray,1value_1blob)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
const jbyte * pBytes = sv ? sqlite3_value_blob(sv) : 0;
int const nLen = pBytes ? sqlite3_value_bytes(sv) : 0;
s3jni_oom_check( nLen ? !!pBytes : 1 );
return pBytes
? s3jni_new_jbyteArray(pBytes, nLen)
: NULL;
}
-S3JniApi(sqlite3_value_bytes(),int,1value_1bytes)(
+S3JniApi(sqlite3_value_bytes(),jint,1value_1bytes)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return sv ? sqlite3_value_bytes(sv) : 0;
}
-S3JniApi(sqlite3_value_bytes16(),int,1value_1bytes16)(
+S3JniApi(sqlite3_value_bytes16(),jint,1value_1bytes16)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return sv ? sqlite3_value_bytes16(sv) : 0;
}
S3JniApi(sqlite3_value_double(),jdouble,1value_1double)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return (jdouble) (sv ? sqlite3_value_double(sv) : 0.0);
}
S3JniApi(sqlite3_value_dup(),jobject,1value_1dup)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
sqlite3_value * const sd = sv ? sqlite3_value_dup(sv) : 0;
jobject rv = sd ? new_java_sqlite3_value(env, sd) : 0;
if( sd && !rv ) {
/* OOM */
sqlite3_value_free(sd);
@@ -4862,43 +5269,58 @@
}
S3JniApi(sqlite3_value_free(),void,1value_1free)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
if( sv ){
sqlite3_value_free(sv);
}
}
S3JniApi(sqlite3_value_int(),jint,1value_1int)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return (jint) (sv ? sqlite3_value_int(sv) : 0);
}
S3JniApi(sqlite3_value_int64(),jlong,1value_1int64)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return (jlong) (sv ? sqlite3_value_int64(sv) : 0LL);
}
S3JniApi(sqlite3_value_java_object(),jobject,1value_1java_1object)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
return sv
- ? sqlite3_value_pointer(sv, ResultJavaValuePtrStr)
+ ? sqlite3_value_pointer(sv, s3jni__value_jref_key)
: 0;
}
+
+S3JniApi(sqlite3_value_nio_buffer(),jobject,1value_1nio_1buffer)(
+ JniArgsEnvClass, jobject jVal
+){
+ sqlite3_value * const sv = PtrGet_sqlite3_value(jVal);
+ jobject rv = 0;
+ if( sv ){
+ const void * const p = sqlite3_value_blob(sv);
+ if( p ){
+ const int n = sqlite3_value_bytes(sv);
+ rv = s3jni__blob_to_ByteBuffer(env, p, n);
+ }
+ }
+ return rv;
+}
S3JniApi(sqlite3_value_text(),jbyteArray,1value_1text)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
const unsigned char * const p = sv ? sqlite3_value_text(sv) : 0;
int const n = p ? sqlite3_value_bytes(sv) : 0;
return p ? s3jni_new_jbyteArray(p, n) : 0;
}
@@ -4905,21 +5327,21 @@
#if 0
// this impl might prove useful.
S3JniApi(sqlite3_value_text(),jstring,1value_1text)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
const unsigned char * const p = sv ? sqlite3_value_text(sv) : 0;
int const n = p ? sqlite3_value_bytes(sv) : 0;
return p ? s3jni_utf8_to_jstring( (const char *)p, n) : 0;
}
#endif
S3JniApi(sqlite3_value_text16(),jstring,1value_1text16)(
JniArgsEnvClass, jlong jpSVal
){
- sqlite3_value * const sv = S3JniLongPtr_sqlite3_value(jpSVal);
+ sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal);
const int n = sv ? sqlite3_value_bytes16(sv) : 0;
const void * const p = sv ? sqlite3_value_text16(sv) : 0;
return p ? s3jni_text16_to_jstring(env, p, n) : 0;
}
@@ -5497,11 +5919,11 @@
JniDeclFtsXA(jlong,xRowid)(JniArgsEnvObj,jobject jCtx){
Fts5ExtDecl;
return (jlong)ext->xRowid(PtrGet_Fts5Context(jCtx));
}
-JniDeclFtsXA(int,xSetAuxdata)(JniArgsEnvObj,jobject jCtx, jobject jAux){
+JniDeclFtsXA(jint,xSetAuxdata)(JniArgsEnvObj,jobject jCtx, jobject jAux){
Fts5ExtDecl;
int rc;
S3JniFts5AuxData * pAux;
pAux = s3jni_malloc( sizeof(*pAux));
@@ -5890,10 +6312,32 @@
#if S3JNI_METRICS_MUTEX
SJG.metrics.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
s3jni_oom_fatal( SJG.metrics.mutex );
#endif
+
+ {
+ /* Test whether this JVM supports direct memory access via
+ ByteBuffer. */
+ unsigned char buf[16] = {0};
+ jobject bb = (*env)->NewDirectByteBuffer(env, buf, 16);
+ if( bb ){
+ SJG.g.byteBuffer.klazz = S3JniRefGlobal((*env)->GetObjectClass(env, bb));
+ SJG.g.byteBuffer.midAlloc = (*env)->GetStaticMethodID(
+ env, SJG.g.byteBuffer.klazz, "allocateDirect", "(I)Ljava/nio/ByteBuffer;"
+ );
+ S3JniExceptionIsFatal("Error getting ByteBuffer.allocateDirect() method.");
+ SJG.g.byteBuffer.midLimit = (*env)->GetMethodID(
+ env, SJG.g.byteBuffer.klazz, "limit", "()I"
+ );
+ S3JniExceptionIsFatal("Error getting ByteBuffer.limit() method.");
+ S3JniUnrefLocal(bb);
+ }else{
+ SJG.g.byteBuffer.klazz = 0;
+ SJG.g.byteBuffer.midAlloc = 0;
+ }
+ }
sqlite3_shutdown()
/* So that it becomes legal for Java-level code to call
** sqlite3_config(). */;
}
Index: ext/jni/src/c/sqlite3-jni.h
==================================================================
--- ext/jni/src/c/sqlite3-jni.h
+++ ext/jni/src/c/sqlite3-jni.h
@@ -425,12 +425,10 @@
#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_NOFOLLOW 16777216L
#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_EXRESCODE
#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_EXRESCODE 33554432L
#undef org_sqlite_jni_capi_CApi_SQLITE_PREPARE_PERSISTENT
#define org_sqlite_jni_capi_CApi_SQLITE_PREPARE_PERSISTENT 1L
-#undef org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NORMALIZE
-#define org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NORMALIZE 2L
#undef org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NO_VTAB
#define org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NO_VTAB 4L
#undef org_sqlite_jni_capi_CApi_SQLITE_OK
#define org_sqlite_jni_capi_CApi_SQLITE_OK 0L
#undef org_sqlite_jni_capi_CApi_SQLITE_ERROR
@@ -705,12 +703,16 @@
#define org_sqlite_jni_capi_CApi_SQLITE_TXN_WRITE 2L
#undef org_sqlite_jni_capi_CApi_SQLITE_DETERMINISTIC
#define org_sqlite_jni_capi_CApi_SQLITE_DETERMINISTIC 2048L
#undef org_sqlite_jni_capi_CApi_SQLITE_DIRECTONLY
#define org_sqlite_jni_capi_CApi_SQLITE_DIRECTONLY 524288L
+#undef org_sqlite_jni_capi_CApi_SQLITE_SUBTYPE
+#define org_sqlite_jni_capi_CApi_SQLITE_SUBTYPE 1048576L
#undef org_sqlite_jni_capi_CApi_SQLITE_INNOCUOUS
#define org_sqlite_jni_capi_CApi_SQLITE_INNOCUOUS 2097152L
+#undef org_sqlite_jni_capi_CApi_SQLITE_RESULT_SUBTYPE
+#define org_sqlite_jni_capi_CApi_SQLITE_RESULT_SUBTYPE 16777216L
#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_SCAN_UNIQUE
#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_SCAN_UNIQUE 1L
#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_EQ
#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_EQ 2L
#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GT
@@ -773,10 +775,26 @@
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1java_1uncache_1thread
(JNIEnv *, jclass);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_jni_supports_nio
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1jni_1supports_1nio
+ (JNIEnv *, jclass);
+
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_jni_db_error
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1jni_1db_1error
+ (JNIEnv *, jclass, jobject, jint, jstring);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_aggregate_context
* Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Z)J
*/
@@ -869,10 +887,18 @@
* Signature: (JILjava/lang/Object;)I
*/
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1java_1object
(JNIEnv *, jclass, jlong, jint, jobject);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_bind_nio_buffer
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;ILjava/nio/ByteBuffer;II)I
+ */
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1nio_1buffer
+ (JNIEnv *, jclass, jobject, jint, jobject, jint, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_bind_null
* Signature: (JI)I
*/
@@ -973,10 +999,18 @@
* Signature: (J[BI)I
*/
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1read
(JNIEnv *, jclass, jlong, jbyteArray, jint);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_blob_read_nio_buffer
+ * Signature: (JILjava/nio/ByteBuffer;II)I
+ */
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1read_1nio_1buffer
+ (JNIEnv *, jclass, jlong, jint, jobject, jint, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_blob_reopen
* Signature: (JJ)I
*/
@@ -989,10 +1023,18 @@
* Signature: (J[BI)I
*/
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1write
(JNIEnv *, jclass, jlong, jbyteArray, jint);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_blob_write_nio_buffer
+ * Signature: (JILjava/nio/ByteBuffer;II)I
+ */
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1write_1nio_1buffer
+ (JNIEnv *, jclass, jlong, jint, jobject, jint, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_busy_handler
* Signature: (JLorg/sqlite/jni/capi/BusyHandlerCallback;)I
*/
@@ -1085,10 +1127,18 @@
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1count
(JNIEnv *, jclass, jlong);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_column_database_name
+ * Signature: (JI)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1database_1name
+ (JNIEnv *, jclass, jlong, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_column_decltype
* Signature: (JI)Ljava/lang/String;
*/
@@ -1117,10 +1167,18 @@
* Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)J
*/
JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1int64
(JNIEnv *, jclass, jobject, jint);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_column_java_object
+ * Signature: (JI)Ljava/lang/Object;
+ */
+JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1java_1object
+ (JNIEnv *, jclass, jlong, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_column_name
* Signature: (JI)Ljava/lang/String;
*/
@@ -1127,15 +1185,15 @@
JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1name
(JNIEnv *, jclass, jlong, jint);
/*
* Class: org_sqlite_jni_capi_CApi
- * Method: sqlite3_column_database_name
- * Signature: (JI)Ljava/lang/String;
+ * Method: sqlite3_column_nio_buffer
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)Ljava/nio/ByteBuffer;
*/
-JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1database_1name
- (JNIEnv *, jclass, jlong, jint);
+JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1nio_1buffer
+ (JNIEnv *, jclass, jobject, jint);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_column_origin_name
* Signature: (JI)Ljava/lang/String;
@@ -1223,30 +1281,30 @@
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1complete
(JNIEnv *, jclass, jbyteArray);
/*
* Class: org_sqlite_jni_capi_CApi
- * Method: sqlite3_config
+ * Method: sqlite3_config__enable
* Signature: (I)I
*/
-JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config__I
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1enable
(JNIEnv *, jclass, jint);
/*
* Class: org_sqlite_jni_capi_CApi
- * Method: sqlite3_config
- * Signature: (Lorg/sqlite/jni/capi/ConfigSqllogCallback;)I
- */
-JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config__Lorg_sqlite_jni_capi_ConfigSqllogCallback_2
- (JNIEnv *, jclass, jobject);
-
-/*
- * Class: org_sqlite_jni_capi_CApi
- * Method: sqlite3_config
+ * Method: sqlite3_config__CONFIG_LOG
* Signature: (Lorg/sqlite/jni/capi/ConfigLogCallback;)I
*/
-JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config__Lorg_sqlite_jni_capi_ConfigLogCallback_2
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1CONFIG_1LOG
+ (JNIEnv *, jclass, jobject);
+
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_config__SQLLOG
+ * Signature: (Lorg/sqlite/jni/capi/ConfigSqlLogCallback;)I
+ */
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1SQLLOG
(JNIEnv *, jclass, jobject);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_context_db_handle
@@ -1392,13 +1450,13 @@
(JNIEnv *, jclass, jlong);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_extended_result_codes
- * Signature: (Lorg/sqlite/jni/capi/sqlite3;Z)Z
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3;Z)I
*/
-JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1extended_1result_1codes
+JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1extended_1result_1codes
(JNIEnv *, jclass, jobject, jboolean);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_get_autocommit
@@ -1677,18 +1735,10 @@
* Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V
*/
JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1error_1code
(JNIEnv *, jclass, jobject, jint);
-/*
- * Class: org_sqlite_jni_capi_CApi
- * Method: sqlite3_result_null
- * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)V
- */
-JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1null
- (JNIEnv *, jclass, jobject);
-
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_result_int
* Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V
*/
@@ -1709,10 +1759,34 @@
* Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Ljava/lang/Object;)V
*/
JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1java_1object
(JNIEnv *, jclass, jobject, jobject);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_result_nio_buffer
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Ljava/nio/ByteBuffer;II)V
+ */
+JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1nio_1buffer
+ (JNIEnv *, jclass, jobject, jobject, jint, jint);
+
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_result_null
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)V
+ */
+JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1null
+ (JNIEnv *, jclass, jobject);
+
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_result_subtype
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V
+ */
+JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1subtype
+ (JNIEnv *, jclass, jobject, jint);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_result_value
* Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Lorg/sqlite/jni/capi/sqlite3_value;)V
*/
@@ -1848,14 +1922,14 @@
(JNIEnv *, jclass, jint, jobject, jobject, jboolean);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_step
- * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)I
+ * Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1step
- (JNIEnv *, jclass, jobject);
+ (JNIEnv *, jclass, jlong);
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_stmt_busy
* Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Z
@@ -2061,10 +2135,18 @@
* Signature: (J)Ljava/lang/Object;
*/
JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1java_1object
(JNIEnv *, jclass, jlong);
+/*
+ * Class: org_sqlite_jni_capi_CApi
+ * Method: sqlite3_value_nio_buffer
+ * Signature: (Lorg/sqlite/jni/capi/sqlite3_value;)Ljava/nio/ByteBuffer;
+ */
+JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1nio_1buffer
+ (JNIEnv *, jclass, jobject);
+
/*
* Class: org_sqlite_jni_capi_CApi
* Method: sqlite3_value_nochange
* Signature: (J)I
*/
ADDED ext/jni/src/org/sqlite/jni/annotation/Experimental.java
Index: ext/jni/src/org/sqlite/jni/annotation/Experimental.java
==================================================================
--- /dev/null
+++ ext/jni/src/org/sqlite/jni/annotation/Experimental.java
@@ -0,0 +1,30 @@
+/*
+** 2023-09-27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file houses the Experimental annotation for the sqlite3 C API.
+*/
+package org.sqlite.jni.annotation;
+import java.lang.annotation.*;
+
+/**
+ This annotation is for flagging methods, constructors, and types
+ which are expressly experimental and subject to any amount of
+ change or outright removal. Client code should not rely on such
+ features.
+*/
+@Documented
+@Retention(RetentionPolicy.SOURCE)
+@Target({
+ ElementType.METHOD,
+ ElementType.CONSTRUCTOR,
+ ElementType.TYPE
+})
+public @interface Experimental{}
Index: ext/jni/src/org/sqlite/jni/annotation/NotNull.java
==================================================================
--- ext/jni/src/org/sqlite/jni/annotation/NotNull.java
+++ ext/jni/src/org/sqlite/jni/annotation/NotNull.java
@@ -7,38 +7,50 @@
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file houses the NotNull annotaion for the sqlite3 C API.
+** This file houses the NotNull annotation for the sqlite3 C API.
*/
package org.sqlite.jni.annotation;
+import java.lang.annotation.*;
/**
This annotation is for flagging parameters which may not legally be
null or point to closed/finalized C-side resources.
In the case of Java types which map directly to C struct types
- (e.g. {@link org.sqlite.jni.sqlite3}, {@link
- org.sqlite.jni.sqlite3_stmt}, and {@link
- org.sqlite.jni.sqlite3_context}), a closed/finalized resource is
- also considered to be null for purposes this annotation because the
- C-side effect of passing such a handle is the same as if null is
- passed.
+ (e.g. {@link org.sqlite.jni.capi.sqlite3}, {@link
+ org.sqlite.jni.capi.sqlite3_stmt}, and {@link
+ org.sqlite.jni.capi.sqlite3_context}), a closed/finalized resource
+ is also considered to be null for purposes this annotation because
+ the C-side effect of passing such a handle is the same as if null
+ is passed.
When used in the context of Java interfaces which are called
from the C APIs, this annotation communicates that the C API will
never pass a null value to the callback for that parameter.
Passing a null, for this annotation's definition of null, for
any parameter marked with this annoation specifically invokes
- undefined behavior.
+ undefined behavior (see below).
Passing 0 (i.e. C NULL) or a negative value for any long-type
parameter marked with this annoation specifically invokes undefined
- behavior. Such values are treated as C pointers in the JNI
- layer.
+ behavior (see below). Such values are treated as C pointers in the
+ JNI layer.
+
+
Undefined behaviour: the JNI build uses the {@code
+ SQLITE_ENABLE_API_ARMOR} build flag, meaning that the C code
+ invoked with invalid NULL pointers and the like will not invoke
+ undefined behavior in the conventional C sense, but may, for
+ example, return result codes which are not documented for the
+ affected APIs or may otherwise behave unpredictably. In no known
+ cases will such arguments result in C-level code dereferencing a
+ NULL pointer or accessing out-of-bounds (or otherwise invalid)
+ memory. In other words, they may cause unexpected behavior but
+ should never cause an outright crash or security issue.
Note that the C-style API does not throw any exceptions on its
own because it has a no-throw policy in order to retain its C-style
semantics, but it may trigger NullPointerExceptions (or similar) if
passed a null for a parameter flagged with this annotation.
@@ -46,14 +58,14 @@
This annotation is informational only. No policy is in place to
programmatically ensure that NotNull is conformed to in client
code.
This annotation is solely for the use by the classes in the
- org.sqlite package and subpackages, but is made public so that
+ org.sqlite.jni package and subpackages, but is made public so that
javadoc will link to it from the annotated functions. It is not
part of the public API and client-level code must not rely on
it.
*/
-@java.lang.annotation.Documented
-@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.SOURCE)
-@java.lang.annotation.Target(java.lang.annotation.ElementType.PARAMETER)
+@Documented
+@Retention(RetentionPolicy.SOURCE)
+@Target(ElementType.PARAMETER)
public @interface NotNull{}
Index: ext/jni/src/org/sqlite/jni/annotation/Nullable.java
==================================================================
--- ext/jni/src/org/sqlite/jni/annotation/Nullable.java
+++ ext/jni/src/org/sqlite/jni/annotation/Nullable.java
@@ -7,13 +7,14 @@
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file houses the Nullable annotaion for the sqlite3 C API.
+** This file houses the Nullable annotation for the sqlite3 C API.
*/
package org.sqlite.jni.annotation;
+import java.lang.annotation.*;
/**
This annotation is for flagging parameters which may legally be
null, noting that they may behave differently if passed null but
are prepared to expect null as a value. When used in the context of
@@ -24,9 +25,9 @@
This annotation is solely for the use by the classes in this
package but is made public so that javadoc will link to it from the
annotated functions. It is not part of the public API and
client-level code must not rely on it.
*/
-@java.lang.annotation.Documented
-@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.SOURCE)
-@java.lang.annotation.Target(java.lang.annotation.ElementType.PARAMETER)
+@Documented
+@Retention(RetentionPolicy.SOURCE)
+@Target(ElementType.PARAMETER)
public @interface Nullable{}
Index: ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java
+++ ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java
@@ -39,14 +39,80 @@
/**
Optionally override to be notified when the UDF is finalized by
SQLite.
*/
public void xDestroy() {}
+
+ /**
+ PerContextState assists aggregate and window functions in
+ managing their accumulator state across calls to the UDF's
+ callbacks.
+
+
T must be of a type which can be legally stored as a value in
+ java.util.HashMap.
+
+
If a given aggregate or window function is called multiple times
+ in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)...,
+ then the clients need some way of knowing which call is which so
+ that they can map their state between their various UDF callbacks
+ and reset it via xFinal(). This class takes care of such
+ mappings.
+
+
This class works by mapping
+ sqlite3_context.getAggregateContext() to a single piece of
+ state, of a client-defined type (the T part of this class), which
+ persists across a "matching set" of the UDF's callbacks.
+
+
This class is a helper providing commonly-needed functionality
+ - it is not required for use with aggregate or window functions.
+ Client UDFs are free to perform such mappings using custom
+ approaches. The provided {@link AggregateFunction} and {@link
+ WindowFunction} classes use this.
+ */
+ public static final class PerContextState {
+ private final java.util.Map> map
+ = new java.util.HashMap<>();
+
+ /**
+ Should be called from a UDF's xStep(), xValue(), and xInverse()
+ methods, passing it that method's first argument and an initial
+ value for the persistent state. If there is currently no
+ mapping for the given context within the map, one is created
+ using the given initial value, else the existing one is used
+ and the 2nd argument is ignored. It returns a ValueHolder
+ which can be used to modify that state directly without
+ requiring that the client update the underlying map's entry.
+
+
The caller is obligated to eventually call
+ takeAggregateState() to clear the mapping.
+ */
+ public ValueHolder getAggregateState(sqlite3_context cx, T initialValue){
+ final Long key = cx.getAggregateContext(true);
+ ValueHolder rc = null==key ? null : map.get(key);
+ if( null==rc ){
+ map.put(key, rc = new ValueHolder<>(initialValue));
+ }
+ return rc;
+ }
+
+ /**
+ Should be called from a UDF's xFinal() method and passed that
+ method's first argument. This function removes the value
+ associated with cx.getAggregateContext() from the map and
+ returns it, returning null if no other UDF method has been
+ called to set up such a mapping. The latter condition will be
+ the case if a UDF is used in a statement which has no result
+ rows.
+ */
+ public T takeAggregateState(sqlite3_context cx){
+ final ValueHolder h = map.remove(cx.getAggregateContext(false));
+ return null==h ? null : h.value;
+ }
+ }
/** Per-invocation state for the UDF. */
- private final SQLFunction.PerContextState map =
- new SQLFunction.PerContextState<>();
+ private final PerContextState map = new PerContextState<>();
/**
To be called from the implementation's xStep() method, as well
as the xValue() and xInverse() methods of the {@link WindowFunction}
subclass, to fetch the current per-call UDF state. On the
Index: ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java
@@ -18,11 +18,12 @@
Callback for use with {@link CApi#sqlite3_set_authorizer}.
*/
public interface AuthorizerCallback extends CallbackProxy {
/**
Must function as described for the C-level
- sqlite3_set_authorizer() callback.
+ sqlite3_set_authorizer() callback. If it throws, the error is
+ converted to a db-level error and the exception is suppressed.
*/
int call(int opId, @Nullable String s1, @Nullable String s2,
@Nullable String s3, @Nullable String s4);
}
Index: ext/jni/src/org/sqlite/jni/capi/CApi.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/CApi.java
+++ ext/jni/src/org/sqlite/jni/capi/CApi.java
@@ -7,11 +7,11 @@
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file declares JNI bindings for the sqlite3 C API.
+** This file declares the main JNI bindings for the sqlite3 C API.
*/
package org.sqlite.jni.capi;
import java.nio.charset.StandardCharsets;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@@ -30,19 +30,10 @@
import static org.sqlite.jni.capi.CApi.*;
}
The C-side part can be found in sqlite3-jni.c.
-
This class is package-private in order to keep Java clients from
- having direct access to the low-level C-style APIs, a design
- decision made by Java developers based on the C-style API being
- riddled with opportunities for Java developers to proverbially shoot
- themselves in the foot with. Third-party copies of this code may
- eliminate that guard by simply changing this class from
- package-private to public. Its methods which are intended to be
- exposed that way are all public.
-
Only functions which materially differ from their C counterparts
are documented here, and only those material differences are
documented. The C documentation is otherwise applicable for these
APIs:
@@ -76,11 +67,11 @@
require special care when taking input from Java. In particular,
Java strings converted to byte arrays for encoding purposes are not
NUL-terminated, and conversion to a Java byte array must sometimes
be careful to add one. Functions which take a length do not require
this so long as the length is provided. Search the CApi class
- for "\0" for many examples.
+ for "\0" for examples.
Further reading:
@@ -126,13 +117,41 @@
numerous Java-side global references active.
This routine returns false without side effects if the current
JNIEnv is not cached, else returns true, but this information is
primarily for testing of the JNI bindings and is not information
- which client-level code can use to make any informed decisions.
+ which client-level code can use to make any informed
+ decisions. Its return type and semantics are not considered
+ stable and may change at any time.
*/
public static native boolean sqlite3_java_uncache_thread();
+
+ /**
+ Returns true if this JVM has JNI-level support for C-level direct
+ memory access using java.nio.ByteBuffer, else returns false.
+ */
+ @Experimental
+ public static native boolean sqlite3_jni_supports_nio();
+
+ /**
+ For internal use only. Sets the given db's error code and
+ (optionally) string. If rc is 0, it defaults to SQLITE_ERROR.
+
+ On success it returns rc. On error it may return a more serious
+ code, such as SQLITE_NOMEM. Returns SQLITE_MISUSE if db is null.
+ */
+ static native int sqlite3_jni_db_error(@NotNull sqlite3 db,
+ int rc, @Nullable String msg);
+
+ /**
+ Convenience overload which uses e.toString() as the error
+ message.
+ */
+ static int sqlite3_jni_db_error(@NotNull sqlite3 db,
+ int rc, @NotNull Exception e){
+ return sqlite3_jni_db_error(db, rc, e.toString());
+ }
//////////////////////////////////////////////////////////////////////
// Maintenance reminder: please keep the sqlite3_.... functions
// alphabetized. The SQLITE_... values. on the other hand, are
// grouped by category.
@@ -171,17 +190,17 @@
See the AutoExtension class docs for more information.
*/
public static native int sqlite3_auto_extension(@NotNull AutoExtensionCallback callback);
- static native int sqlite3_backup_finish(@NotNull long ptrToBackup);
+ private static native int sqlite3_backup_finish(@NotNull long ptrToBackup);
public static int sqlite3_backup_finish(@NotNull sqlite3_backup b){
- return sqlite3_backup_finish(b.clearNativePointer());
+ return null==b ? 0 : sqlite3_backup_finish(b.clearNativePointer());
}
- static native sqlite3_backup sqlite3_backup_init(
+ private static native sqlite3_backup sqlite3_backup_init(
@NotNull long ptrToDbDest, @NotNull String destTableName,
@NotNull long ptrToDbSrc, @NotNull String srcTableName
);
public static sqlite3_backup sqlite3_backup_init(
@@ -190,37 +209,37 @@
){
return sqlite3_backup_init( dbDest.getNativePointer(), destTableName,
dbSrc.getNativePointer(), srcTableName );
}
- static native int sqlite3_backup_pagecount(@NotNull long ptrToBackup);
+ private static native int sqlite3_backup_pagecount(@NotNull long ptrToBackup);
public static int sqlite3_backup_pagecount(@NotNull sqlite3_backup b){
return sqlite3_backup_pagecount(b.getNativePointer());
}
- static native int sqlite3_backup_remaining(@NotNull long ptrToBackup);
+ private static native int sqlite3_backup_remaining(@NotNull long ptrToBackup);
public static int sqlite3_backup_remaining(@NotNull sqlite3_backup b){
return sqlite3_backup_remaining(b.getNativePointer());
}
- static native int sqlite3_backup_step(@NotNull long ptrToBackup, int nPage);
+ private static native int sqlite3_backup_step(@NotNull long ptrToBackup, int nPage);
public static int sqlite3_backup_step(@NotNull sqlite3_backup b, int nPage){
return sqlite3_backup_step(b.getNativePointer(), nPage);
}
- static native int sqlite3_bind_blob(
+ private static native int sqlite3_bind_blob(
@NotNull long ptrToStmt, int ndx, @Nullable byte[] data, int n
);
/**
If n is negative, SQLITE_MISUSE is returned. If n>data.length
then n is silently truncated to data.length.
*/
- static int sqlite3_bind_blob(
+ public static int sqlite3_bind_blob(
@NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] data, int n
){
return sqlite3_bind_blob(stmt.getNativePointer(), ndx, data, n);
}
@@ -230,41 +249,120 @@
return (null==data)
? sqlite3_bind_null(stmt.getNativePointer(), ndx)
: sqlite3_bind_blob(stmt.getNativePointer(), ndx, data, data.length);
}
- static native int sqlite3_bind_double(
+ /**
+ Convenience overload which is a simple proxy for
+ sqlite3_bind_nio_buffer().
+ */
+ @Experimental
+ /*public*/ static int sqlite3_bind_blob(
+ @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data,
+ int begin, int n
+ ){
+ return sqlite3_bind_nio_buffer(stmt, ndx, data, begin, n);
+ }
+
+ /**
+ Convenience overload which is equivalant to passing its arguments
+ to sqlite3_bind_nio_buffer() with the values 0 and -1 for the
+ final two arguments.
+ */
+ @Experimental
+ /*public*/ static int sqlite3_bind_blob(
+ @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data
+ ){
+ return sqlite3_bind_nio_buffer(stmt, ndx, data, 0, -1);
+ }
+
+ private static native int sqlite3_bind_double(
@NotNull long ptrToStmt, int ndx, double v
);
public static int sqlite3_bind_double(
@NotNull sqlite3_stmt stmt, int ndx, double v
){
return sqlite3_bind_double(stmt.getNativePointer(), ndx, v);
}
- static native int sqlite3_bind_int(
+ private static native int sqlite3_bind_int(
@NotNull long ptrToStmt, int ndx, int v
);
public static int sqlite3_bind_int(
@NotNull sqlite3_stmt stmt, int ndx, int v
){
return sqlite3_bind_int(stmt.getNativePointer(), ndx, v);
}
- static native int sqlite3_bind_int64(
+ private static native int sqlite3_bind_int64(
@NotNull long ptrToStmt, int ndx, long v
);
public static int sqlite3_bind_int64(@NotNull sqlite3_stmt stmt, int ndx, long v){
return sqlite3_bind_int64( stmt.getNativePointer(), ndx, v );
}
- static native int sqlite3_bind_java_object(
+ private static native int sqlite3_bind_java_object(
@NotNull long ptrToStmt, int ndx, @Nullable Object o
);
+
+ /**
+ Binds the contents of the given buffer object as a blob.
+
+ The byte range of the buffer may be restricted by providing a
+ start index and a number of bytes. beginPos may not be negative.
+ Negative howMany is interpretated as the remainder of the buffer
+ past the given start position, up to the buffer's limit() (as
+ opposed its capacity()).
+
+ If beginPos+howMany would extend past the limit() of the buffer
+ then SQLITE_ERROR is returned.
+
+ If any of the following are true, this function behaves like
+ sqlite3_bind_null(): the buffer is null, beginPos is at or past
+ the end of the buffer, howMany is 0, or the calculated slice of
+ the blob has a length of 0.
+
+ If ndx is out of range, it returns SQLITE_RANGE, as documented
+ for sqlite3_bind_blob(). If beginPos is negative or if
+ sqlite3_jni_supports_nio() returns false then SQLITE_MISUSE is
+ returned. Note that this function is bound (as it were) by the
+ SQLITE_LIMIT_LENGTH constraint and SQLITE_TOOBIG is returned if
+ the resulting slice of the buffer exceeds that limit.
+
+ This function does not modify the buffer's streaming-related
+ cursors.
+
+ If the buffer is modified in a separate thread while this
+ operation is running, results are undefined and will likely
+ result in corruption of the bound data or a segmentation fault.
+
+ Design note: this function should arguably take a java.nio.Buffer
+ instead of ByteBuffer, but it can only operate on "direct"
+ buffers and the only such class offered by Java is (apparently)
+ ByteBuffer.
+
+ @see https://docs.oracle.com/javase/8/docs/api/java/nio/Buffer.html
+ */
+ @Experimental
+ /*public*/ static native int sqlite3_bind_nio_buffer(
+ @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data,
+ int beginPos, int howMany
+ );
+
+ /**
+ Convenience overload which binds the given buffer's entire
+ contents, up to its limit() (as opposed to its capacity()).
+ */
+ @Experimental
+ /*public*/ static int sqlite3_bind_nio_buffer(
+ @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data
+ ){
+ return sqlite3_bind_nio_buffer(stmt, ndx, data, 0, -1);
+ }
/**
Binds the given object at the given index. If o is null then this behaves like
sqlite3_bind_null().
@@ -274,17 +372,17 @@
@NotNull sqlite3_stmt stmt, int ndx, @Nullable Object o
){
return sqlite3_bind_java_object(stmt.getNativePointer(), ndx, o);
}
- static native int sqlite3_bind_null(@NotNull long ptrToStmt, int ndx);
+ private static native int sqlite3_bind_null(@NotNull long ptrToStmt, int ndx);
public static int sqlite3_bind_null(@NotNull sqlite3_stmt stmt, int ndx){
return sqlite3_bind_null(stmt.getNativePointer(), ndx);
}
- static native int sqlite3_bind_parameter_count(@NotNull long ptrToStmt);
+ private static native int sqlite3_bind_parameter_count(@NotNull long ptrToStmt);
public static int sqlite3_bind_parameter_count(@NotNull sqlite3_stmt stmt){
return sqlite3_bind_parameter_count(stmt.getNativePointer());
}
@@ -307,19 +405,19 @@
){
final byte[] utf8 = nulTerminateUtf8(paramName);
return null==utf8 ? 0 : sqlite3_bind_parameter_index(stmt.getNativePointer(), utf8);
}
- static native String sqlite3_bind_parameter_name(
+ private static native String sqlite3_bind_parameter_name(
@NotNull long ptrToStmt, int index
);
public static String sqlite3_bind_parameter_name(@NotNull sqlite3_stmt stmt, int index){
return sqlite3_bind_parameter_name(stmt.getNativePointer(), index);
}
- static native int sqlite3_bind_text(
+ private static native int sqlite3_bind_text(
@NotNull long ptrToStmt, int ndx, @Nullable byte[] utf8, int maxBytes
);
/**
Works like the C-level sqlite3_bind_text() but assumes
@@ -359,11 +457,11 @@
return ( null==utf8 )
? sqlite3_bind_null(stmt.getNativePointer(), ndx)
: sqlite3_bind_text(stmt.getNativePointer(), ndx, utf8, utf8.length);
}
- static native int sqlite3_bind_text16(
+ private static native int sqlite3_bind_text16(
@NotNull long ptrToStmt, int ndx, @Nullable byte[] data, int maxBytes
);
/**
Identical to the sqlite3_bind_text() overload with the same
@@ -400,11 +498,11 @@
return (null == data)
? sqlite3_bind_null(stmt.getNativePointer(), ndx)
: sqlite3_bind_text16(stmt.getNativePointer(), ndx, data, data.length);
}
- static native int sqlite3_bind_value(@NotNull long ptrToStmt, int ndx, long ptrToValue);
+ private static native int sqlite3_bind_value(@NotNull long ptrToStmt, int ndx, long ptrToValue);
/**
Functions like the C-level sqlite3_bind_value(), or
sqlite3_bind_null() if val is null.
*/
@@ -411,37 +509,37 @@
public static int sqlite3_bind_value(@NotNull sqlite3_stmt stmt, int ndx, sqlite3_value val){
return sqlite3_bind_value(stmt.getNativePointer(), ndx,
null==val ? 0L : val.getNativePointer());
}
- static native int sqlite3_bind_zeroblob(@NotNull long ptrToStmt, int ndx, int n);
+ private static native int sqlite3_bind_zeroblob(@NotNull long ptrToStmt, int ndx, int n);
public static int sqlite3_bind_zeroblob(@NotNull sqlite3_stmt stmt, int ndx, int n){
return sqlite3_bind_zeroblob(stmt.getNativePointer(), ndx, n);
}
- static native int sqlite3_bind_zeroblob64(
+ private static native int sqlite3_bind_zeroblob64(
@NotNull long ptrToStmt, int ndx, long n
);
public static int sqlite3_bind_zeroblob64(@NotNull sqlite3_stmt stmt, int ndx, long n){
return sqlite3_bind_zeroblob64(stmt.getNativePointer(), ndx, n);
}
- static native int sqlite3_blob_bytes(@NotNull long ptrToBlob);
+ private static native int sqlite3_blob_bytes(@NotNull long ptrToBlob);
public static int sqlite3_blob_bytes(@NotNull sqlite3_blob blob){
return sqlite3_blob_bytes(blob.getNativePointer());
}
- static native int sqlite3_blob_close(@Nullable long ptrToBlob);
+ private static native int sqlite3_blob_close(@Nullable long ptrToBlob);
public static int sqlite3_blob_close(@Nullable sqlite3_blob blob){
- return sqlite3_blob_close(blob.clearNativePointer());
+ return null==blob ? 0 : sqlite3_blob_close(blob.clearNativePointer());
}
- static native int sqlite3_blob_open(
+ private static native int sqlite3_blob_open(
@NotNull long ptrToDb, @NotNull String dbName,
@NotNull String tableName, @NotNull String columnName,
long iRow, int flags, @NotNull OutputPointer.sqlite3_blob out
);
@@ -465,39 +563,225 @@
sqlite3_blob_open(db.getNativePointer(), dbName, tableName, columnName,
iRow, flags, out);
return out.take();
};
- static native int sqlite3_blob_read(
- @NotNull long ptrToBlob, @NotNull byte[] target, int iOffset
+ private static native int sqlite3_blob_read(
+ @NotNull long ptrToBlob, @NotNull byte[] target, int srcOffset
);
+ /**
+ As per C's sqlite3_blob_read(), but writes its output to the
+ given byte array. Note that the final argument is the offset of
+ the source buffer, not the target array.
+ */
public static int sqlite3_blob_read(
- @NotNull sqlite3_blob b, @NotNull byte[] target, int iOffset
+ @NotNull sqlite3_blob src, @NotNull byte[] target, int srcOffset
+ ){
+ return sqlite3_blob_read(src.getNativePointer(), target, srcOffset);
+ }
+
+ /**
+ An internal level of indirection.
+ */
+ @Experimental
+ private static native int sqlite3_blob_read_nio_buffer(
+ @NotNull long ptrToBlob, int srcOffset,
+ @NotNull java.nio.ByteBuffer tgt, int tgtOffset, int howMany
+ );
+
+ /**
+ Reads howMany bytes from offset srcOffset of src into position
+ tgtOffset of tgt.
+
+ Returns SQLITE_MISUSE if src is null, tgt is null, or
+ sqlite3_jni_supports_nio() returns false. Returns SQLITE_ERROR if
+ howMany or either offset are negative. If argument validation
+ succeeds, it returns the result of the underlying call to
+ sqlite3_blob_read() (0 on success).
+ */
+ @Experimental
+ /*public*/ static int sqlite3_blob_read_nio_buffer(
+ @NotNull sqlite3_blob src, int srcOffset,
+ @NotNull java.nio.ByteBuffer tgt, int tgtOffset, int howMany
+ ){
+ return (JNI_SUPPORTS_NIO && src!=null && tgt!=null)
+ ? sqlite3_blob_read_nio_buffer(
+ src.getNativePointer(), srcOffset, tgt, tgtOffset, howMany
+ )
+ : SQLITE_MISUSE;
+ }
+
+ /**
+ Convenience overload which reads howMany bytes from position
+ srcOffset of src and returns the result as a new ByteBuffer.
+
+ srcOffset may not be negative. If howMany is negative, it is
+ treated as all bytes following srcOffset.
+
+ Returns null if sqlite3_jni_supports_nio(), any arguments are
+ invalid, if the number of bytes to read is 0 or is larger than
+ the src blob, or the underlying call to sqlite3_blob_read() fails
+ for any reason.
+ */
+ @Experimental
+ /*public*/ static java.nio.ByteBuffer sqlite3_blob_read_nio_buffer(
+ @NotNull sqlite3_blob src, int srcOffset, int howMany
+ ){
+ if( !JNI_SUPPORTS_NIO || src==null ) return null;
+ else if( srcOffset<0 ) return null;
+ final int nB = sqlite3_blob_bytes(src);
+ if( srcOffset>=nB ) return null;
+ else if( howMany<0 ) howMany = nB - srcOffset;
+ if( srcOffset + howMany > nB ) return null;
+ final java.nio.ByteBuffer tgt =
+ java.nio.ByteBuffer.allocateDirect(howMany);
+ final int rc = sqlite3_blob_read_nio_buffer(
+ src.getNativePointer(), srcOffset, tgt, 0, howMany
+ );
+ return 0==rc ? tgt : null;
+ }
+
+ /**
+ Overload alias for sqlite3_blob_read_nio_buffer().
+ */
+ @Experimental
+ /*public*/ static int sqlite3_blob_read(
+ @NotNull sqlite3_blob src, int srcOffset,
+ @NotNull java.nio.ByteBuffer tgt,
+ int tgtOffset, int howMany
+ ){
+ return sqlite3_blob_read_nio_buffer(
+ src, srcOffset, tgt, tgtOffset, howMany
+ );
+ }
+
+ /**
+ Convenience overload which uses 0 for both src and tgt offsets
+ and reads a number of bytes equal to the smaller of
+ sqlite3_blob_bytes(src) and tgt.limit().
+
+ On success it sets tgt.limit() to the number of bytes read. On
+ error, tgt.limit() is not modified.
+
+ Returns 0 on success. Returns SQLITE_MISUSE is either argument is
+ null or sqlite3_jni_supports_nio() returns false. Else it returns
+ the result of the underlying call to sqlite3_blob_read().
+ */
+ @Experimental
+ /*public*/ static int sqlite3_blob_read(
+ @NotNull sqlite3_blob src,
+ @NotNull java.nio.ByteBuffer tgt
){
- return sqlite3_blob_read(b.getNativePointer(), target, iOffset);
+ if(!JNI_SUPPORTS_NIO || src==null || tgt==null) return SQLITE_MISUSE;
+ final int nSrc = sqlite3_blob_bytes(src);
+ final int nTgt = tgt.limit();
+ final int nRead = nTgt T sqlite3_column_java_object(
+ @NotNull sqlite3_stmt stmt, int ndx, @NotNull Class type
+ ){
+ final Object o = sqlite3_column_java_object(stmt, ndx);
+ return type.isInstance(o) ? (T)o : null;
+ }
+
+ private static native String sqlite3_column_name(@NotNull long ptrToStmt, int ndx);
public static String sqlite3_column_name(@NotNull sqlite3_stmt stmt, int ndx){
return sqlite3_column_name(stmt.getNativePointer(), ndx);
}
- static native String sqlite3_column_database_name(@NotNull long ptrToStmt, int ndx);
-
- public static String sqlite3_column_database_name(@NotNull sqlite3_stmt stmt, int ndx){
- return sqlite3_column_database_name(stmt.getNativePointer(), ndx);
- }
-
- static native String sqlite3_column_origin_name(@NotNull long ptrToStmt, int ndx);
-
+ /**
+ A variant of sqlite3_column_blob() which returns the blob as a
+ ByteBuffer object. Returns null if its argument is null, if
+ sqlite3_jni_supports_nio() is false, or if sqlite3_column_blob()
+ would return null for the same inputs.
+ */
+ @Experimental
+ /*public*/ static native java.nio.ByteBuffer sqlite3_column_nio_buffer(
+ @NotNull sqlite3_stmt stmt, int ndx
+ );
+
+ private static native String sqlite3_column_origin_name(@NotNull long ptrToStmt, int ndx);
+
+ /**
+ Only available if built with SQLITE_ENABLE_COLUMN_METADATA.
+ */
public static String sqlite3_column_origin_name(@NotNull sqlite3_stmt stmt, int ndx){
return sqlite3_column_origin_name(stmt.getNativePointer(), ndx);
}
- static native String sqlite3_column_table_name(@NotNull long ptrToStmt, int ndx);
+ private static native String sqlite3_column_table_name(@NotNull long ptrToStmt, int ndx);
+ /**
+ Only available if built with SQLITE_ENABLE_COLUMN_METADATA.
+ */
public static String sqlite3_column_table_name(@NotNull sqlite3_stmt stmt, int ndx){
return sqlite3_column_table_name(stmt.getNativePointer(), ndx);
}
/**
@@ -670,21 +999,21 @@
// }
// sqlite3_value_free(v);
// return rv;
// }
- static native int sqlite3_column_type(@NotNull long ptrToStmt, int ndx);
+ private static native int sqlite3_column_type(@NotNull long ptrToStmt, int ndx);
public static int sqlite3_column_type(@NotNull sqlite3_stmt stmt, int ndx){
return sqlite3_column_type(stmt.getNativePointer(), ndx);
}
public static native sqlite3_value sqlite3_column_value(
@NotNull sqlite3_stmt stmt, int ndx
);
- static native int sqlite3_collation_needed(
+ private static native int sqlite3_collation_needed(
@NotNull long ptrToDb, @Nullable CollationNeededCallback callback
);
/**
This functions like C's sqlite3_collation_needed16() because
@@ -694,11 +1023,11 @@
@NotNull sqlite3 db, @Nullable CollationNeededCallback callback
){
return sqlite3_collation_needed(db.getNativePointer(), callback);
}
- static native CommitHookCallback sqlite3_commit_hook(
+ private static native CommitHookCallback sqlite3_commit_hook(
@NotNull long ptrToDb, @Nullable CommitHookCallback hook
);
public static CommitHookCallback sqlite3_commit_hook(
@NotNull sqlite3 db, @Nullable CommitHookCallback hook
@@ -724,10 +1053,28 @@
*/
public static int sqlite3_complete(@NotNull String sql){
return sqlite3_complete( nulTerminateUtf8(sql) );
}
+ /**
+ Internal level of indirection for sqlite3_config(int).
+ */
+ private static native int sqlite3_config__enable(int op);
+
+ /**
+ Internal level of indirection for sqlite3_config(ConfigLogCallback).
+ */
+ private static native int sqlite3_config__CONFIG_LOG(
+ @Nullable ConfigLogCallback logger
+ );
+
+ /**
+ Internal level of indirection for sqlite3_config(ConfigSqlLogCallback).
+ */
+ private static native int sqlite3_config__SQLLOG(
+ @Nullable ConfigSqlLogCallback logger
+ );
/**
Works like in the C API with the exception that it only supports
the following subset of configution flags:
@@ -740,16 +1087,18 @@
Note that sqlite3_config() is not threadsafe with regards to
the rest of the library. This must not be called when any other
library APIs are being called.
*/
- public static native int sqlite3_config(int op);
+ public static int sqlite3_config(int op){
+ return sqlite3_config__enable(op);
+ }
/**
If the native library was built with SQLITE_ENABLE_SQLLOG defined
then this acts as a proxy for C's
- sqlite3_config(SQLITE_ENABLE_SQLLOG,...). This sets or clears the
+ sqlite3_config(SQLITE_CONFIG_SQLLOG,...). This sets or clears the
logger. If installation of a logger fails, any previous logger is
retained.
If not built with SQLITE_ENABLE_SQLLOG defined, this returns
SQLITE_MISUSE.
@@ -756,17 +1105,21 @@
Note that sqlite3_config() is not threadsafe with regards to
the rest of the library. This must not be called when any other
library APIs are being called.
*/
- public static native int sqlite3_config( @Nullable ConfigSqllogCallback logger );
+ public static int sqlite3_config( @Nullable ConfigSqlLogCallback logger ){
+ return sqlite3_config__SQLLOG(logger);
+ }
/**
The sqlite3_config() overload for handling the SQLITE_CONFIG_LOG
option.
*/
- public static native int sqlite3_config( @Nullable ConfigLogCallback logger );
+ public static int sqlite3_config( @Nullable ConfigLogCallback logger ){
+ return sqlite3_config__CONFIG_LOG(logger);
+ }
/**
Unlike the C API, this returns null if its argument is
null (as opposed to invoking UB).
*/
@@ -793,11 +1146,11 @@
public static native int sqlite3_create_function(
@NotNull sqlite3 db, @NotNull String functionName,
int nArg, int eTextRep, @NotNull SQLFunction func
);
- static native int sqlite3_data_count(@NotNull long ptrToStmt);
+ private static native int sqlite3_data_count(@NotNull long ptrToStmt);
public static int sqlite3_data_count(@NotNull sqlite3_stmt stmt){
return sqlite3_data_count(stmt.getNativePointer());
}
@@ -805,11 +1158,11 @@
Overload for sqlite3_db_config() calls which take (int,int*)
variadic arguments. Returns SQLITE_MISUSE if op is not one of the
SQLITE_DBCONFIG_... options which uses this call form.
Unlike the C API, this returns SQLITE_MISUSE if its db argument
- are null (as opposed to invoking UB).
+ is null (as opposed to invoking UB).
*/
public static native int sqlite3_db_config(
@NotNull sqlite3 db, int op, int onOff, @Nullable OutputPointer.Int32 out
);
@@ -828,11 +1181,10 @@
public static String sqlite3_db_name(@NotNull sqlite3 db, int ndx){
return null==db ? null : sqlite3_db_name(db.getNativePointer(), ndx);
}
-
public static native String sqlite3_db_filename(
@NotNull sqlite3 db, @NotNull String dbName
);
public static native sqlite3 sqlite3_db_handle(@NotNull sqlite3_stmt stmt);
@@ -848,11 +1200,11 @@
public static native int sqlite3_errcode(@NotNull sqlite3 db);
public static native String sqlite3_errmsg(@NotNull sqlite3 db);
- static native int sqlite3_error_offset(@NotNull long ptrToDb);
+ private static native int sqlite3_error_offset(@NotNull long ptrToDb);
/**
Note that the returned byte offset values assume UTF-8-encoded
inputs, so won't always match character offsets in Java Strings.
*/
@@ -862,31 +1214,31 @@
public static native String sqlite3_errstr(int resultCode);
public static native String sqlite3_expanded_sql(@NotNull sqlite3_stmt stmt);
- static native int sqlite3_extended_errcode(@NotNull long ptrToDb);
+ private static native int sqlite3_extended_errcode(@NotNull long ptrToDb);
public static int sqlite3_extended_errcode(@NotNull sqlite3 db){
return sqlite3_extended_errcode(db.getNativePointer());
}
- public static native boolean sqlite3_extended_result_codes(
- @NotNull sqlite3 db, boolean onoff
+ public static native int sqlite3_extended_result_codes(
+ @NotNull sqlite3 db, boolean on
);
- static native boolean sqlite3_get_autocommit(@NotNull long ptrToDb);
+ private static native boolean sqlite3_get_autocommit(@NotNull long ptrToDb);
public static boolean sqlite3_get_autocommit(@NotNull sqlite3 db){
return sqlite3_get_autocommit(db.getNativePointer());
}
public static native Object sqlite3_get_auxdata(
@NotNull sqlite3_context cx, int n
);
- static native int sqlite3_finalize(long ptrToStmt);
+ private static native int sqlite3_finalize(long ptrToStmt);
public static int sqlite3_finalize(@NotNull sqlite3_stmt stmt){
return null==stmt ? 0 : sqlite3_finalize(stmt.clearNativePointer());
}
@@ -1164,45 +1516,53 @@
A convenience wrapper around sqlite3_prepare_v3() which accepts
an arbitrary amount of input provided as a UTF-8-encoded byte
array. It loops over the input bytes looking for
statements. Each one it finds is passed to p.call(), passing
ownership of it to that function. If p.call() returns 0, looping
- continues, else the loop stops.
+ continues, else the loop stops and p.call()'s result code is
+ returned. If preparation of any given segment fails, looping
+ stops and that result code is returned.
-
If p.call() throws, the exception is propagated.
+
If p.call() throws, the exception is converted to a db-level
+ error and a non-0 code is returned, in order to retain the
+ C-style error semantics of the API.
How each statement is handled, including whether it is finalized
or not, is up to the callback object. e.g. the callback might
collect them for later use. If it does not collect them then it
must finalize them. See PrepareMultiCallback.Finalize for a
simple proxy which does that.
*/
public static int sqlite3_prepare_multi(
@NotNull sqlite3 db, @NotNull byte[] sqlUtf8,
- int preFlags,
+ int prepFlags,
@NotNull PrepareMultiCallback p){
final OutputPointer.Int32 oTail = new OutputPointer.Int32();
int pos = 0, n = 1;
byte[] sqlChunk = sqlUtf8;
int rc = 0;
final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt();
- while(0==rc && pos 0){
+ if( pos>0 ){
sqlChunk = Arrays.copyOfRange(sqlChunk, pos,
sqlChunk.length);
}
if( 0==sqlChunk.length ) break;
- rc = sqlite3_prepare_v3(db, sqlChunk, preFlags, outStmt, oTail);
+ rc = sqlite3_prepare_v3(db, sqlChunk, prepFlags, outStmt, oTail);
if( 0!=rc ) break;
pos = oTail.value;
stmt = outStmt.take();
- if( null == stmt ){
- // empty statement was parsed.
+ if( null==stmt ){
+ // empty statement (whitespace/comments)
continue;
}
- rc = p.call(stmt);
+ try{
+ rc = p.call(stmt);
+ }catch(Exception e){
+ rc = sqlite3_jni_db_error( db, SQLITE_ERROR, e );
+ }
}
return rc;
}
/**
@@ -1254,11 +1614,11 @@
@NotNull sqlite3 db, @NotNull String[] sql,
@NotNull PrepareMultiCallback p){
return sqlite3_prepare_multi(db, sql, 0, p);
}
- static native int sqlite3_preupdate_blobwrite(@NotNull long ptrToDb);
+ private static native int sqlite3_preupdate_blobwrite(@NotNull long ptrToDb);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this
acts as a proxy for C's sqlite3_preupdate_blobwrite(), else it returns
SQLITE_MISUSE with no side effects.
@@ -1265,11 +1625,11 @@
*/
public static int sqlite3_preupdate_blobwrite(@NotNull sqlite3 db){
return sqlite3_preupdate_blobwrite(db.getNativePointer());
}
- static native int sqlite3_preupdate_count(@NotNull long ptrToDb);
+ private static native int sqlite3_preupdate_count(@NotNull long ptrToDb);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this
acts as a proxy for C's sqlite3_preupdate_count(), else it returns
SQLITE_MISUSE with no side effects.
@@ -1276,11 +1636,11 @@
*/
public static int sqlite3_preupdate_count(@NotNull sqlite3 db){
return sqlite3_preupdate_count(db.getNativePointer());
}
- static native int sqlite3_preupdate_depth(@NotNull long ptrToDb);
+ private static native int sqlite3_preupdate_depth(@NotNull long ptrToDb);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this
acts as a proxy for C's sqlite3_preupdate_depth(), else it returns
SQLITE_MISUSE with no side effects.
@@ -1287,11 +1647,11 @@
*/
public static int sqlite3_preupdate_depth(@NotNull sqlite3 db){
return sqlite3_preupdate_depth(db.getNativePointer());
}
- static native PreupdateHookCallback sqlite3_preupdate_hook(
+ private static native PreupdateHookCallback sqlite3_preupdate_hook(
@NotNull long ptrToDb, @Nullable PreupdateHookCallback hook
);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this
@@ -1302,17 +1662,26 @@
@NotNull sqlite3 db, @Nullable PreupdateHookCallback hook
){
return sqlite3_preupdate_hook(db.getNativePointer(), hook);
}
- static native int sqlite3_preupdate_new(@NotNull long ptrToDb, int col,
+ private static native int sqlite3_preupdate_new(@NotNull long ptrToDb, int col,
@NotNull OutputPointer.sqlite3_value out);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined,
this acts as a proxy for C's sqlite3_preupdate_new(), else it
returns SQLITE_MISUSE with no side effects.
+
+ WARNING: client code _must not_ hold a reference to the returned
+ sqlite3_value object beyond the scope of the preupdate hook in
+ which this function is called. Doing so will leave the client
+ holding a stale pointer, the address of which could point to
+ anything at all after the pre-update hook is complete. This API
+ has no way to record such objects and clear/invalidate them at
+ the end of a pre-update hook. We "could" add infrastructure to do
+ so, but would require significant levels of bookkeeping.
*/
public static int sqlite3_preupdate_new(@NotNull sqlite3 db, int col,
@NotNull OutputPointer.sqlite3_value out){
return sqlite3_preupdate_new(db.getNativePointer(), col, out);
}
@@ -1325,17 +1694,20 @@
final OutputPointer.sqlite3_value out = new OutputPointer.sqlite3_value();
sqlite3_preupdate_new(db.getNativePointer(), col, out);
return out.take();
}
- static native int sqlite3_preupdate_old(@NotNull long ptrToDb, int col,
+ private static native int sqlite3_preupdate_old(@NotNull long ptrToDb, int col,
@NotNull OutputPointer.sqlite3_value out);
/**
If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined,
this acts as a proxy for C's sqlite3_preupdate_old(), else it
returns SQLITE_MISUSE with no side effects.
+
+ WARNING: see warning in sqlite3_preupdate_new() regarding the
+ potential for stale sqlite3_value handles.
*/
public static int sqlite3_preupdate_old(@NotNull sqlite3 db, int col,
@NotNull OutputPointer.sqlite3_value out){
return sqlite3_preupdate_old(db.getNativePointer(), col, out);
}
@@ -1376,11 +1748,11 @@
proxies. eTextRep must be one of SQLITE_UTF8 or SQLITE_UTF16 and
msg must be encoded correspondingly. Any other eTextRep value
results in the C-level sqlite3_result_error() being called with a
complaint about the invalid argument.
*/
- static native void sqlite3_result_error(
+ private static native void sqlite3_result_error(
@NotNull sqlite3_context cx, @NotNull byte[] msg, int eTextRep
);
public static void sqlite3_result_error(
@NotNull sqlite3_context cx, @NotNull byte[] utf8
@@ -1430,14 +1802,10 @@
public static native void sqlite3_result_error_code(
@NotNull sqlite3_context cx, int c
);
- public static native void sqlite3_result_null(
- @NotNull sqlite3_context cx
- );
-
public static native void sqlite3_result_int(
@NotNull sqlite3_context cx, int v
);
public static native void sqlite3_result_int64(
@@ -1452,19 +1820,55 @@
This is implemented in terms of C's sqlite3_result_pointer(),
but that function is not exposed to JNI because (A)
cross-language semantic mismatch and (B) Java doesn't need that
argument for its intended purpose (type safety).
-
Note that there is no sqlite3_column_java_object(), as the
- C-level API has no sqlite3_column_pointer() to proxy.
-
@see #sqlite3_value_java_object
@see #sqlite3_bind_java_object
*/
public static native void sqlite3_result_java_object(
@NotNull sqlite3_context cx, @NotNull Object o
);
+
+ /**
+ Similar to sqlite3_bind_nio_buffer(), this works like
+ sqlite3_result_blob() but accepts a java.nio.ByteBuffer as its
+ input source. See sqlite3_bind_nio_buffer() for the semantics of
+ the second and subsequent arguments.
+
+ If cx is null then this function will silently fail. If
+ sqlite3_jni_supports_nio() returns false or iBegin is negative,
+ an error result is set. If (begin+n) extends beyond the end of
+ the buffer, it is silently truncated to fit.
+
+ If any of the following apply, this function behaves like
+ sqlite3_result_null(): the blob is null, the resulting slice of
+ the blob is empty.
+
+ If the resulting slice of the buffer exceeds SQLITE_LIMIT_LENGTH
+ then this function behaves like sqlite3_result_error_toobig().
+ */
+ @Experimental
+ /*public*/ static native void sqlite3_result_nio_buffer(
+ @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob,
+ int begin, int n
+ );
+
+ /**
+ Convenience overload which uses the whole input object
+ as the result blob content.
+ */
+ @Experimental
+ /*public*/ static void sqlite3_result_nio_buffer(
+ @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob
+ ){
+ sqlite3_result_nio_buffer(cx, blob, 0, -1);
+ }
+
+ public static native void sqlite3_result_null(
+ @NotNull sqlite3_context cx
+ );
public static void sqlite3_result_set(
@NotNull sqlite3_context cx, @NotNull Boolean v
){
sqlite3_result_int(cx, v ? 1 : 0);
@@ -1521,10 +1925,14 @@
@NotNull sqlite3_context cx, @Nullable byte[] blob
){
if( null==blob ) sqlite3_result_null(cx);
else sqlite3_result_blob(cx, blob, blob.length);
}
+
+ public static native void sqlite3_result_subtype(
+ @NotNull sqlite3_context cx, int val
+ );
public static native void sqlite3_result_value(
@NotNull sqlite3_context cx, @NotNull sqlite3_value v
);
@@ -1547,10 +1955,33 @@
public static void sqlite3_result_blob(
@NotNull sqlite3_context cx, @Nullable byte[] blob
){
sqlite3_result_blob(cx, blob, (int)(null==blob ? 0 : blob.length));
}
+
+ /**
+ Convenience overload which behaves like
+ sqlite3_result_nio_buffer().
+ */
+ @Experimental
+ /*public*/ static void sqlite3_result_blob(
+ @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob,
+ int begin, int n
+ ){
+ sqlite3_result_nio_buffer(cx, blob, begin, n);
+ }
+
+ /**
+ Convenience overload which behaves like the two-argument overload of
+ sqlite3_result_nio_buffer().
+ */
+ @Experimental
+ /*public*/ static void sqlite3_result_blob(
+ @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob
+ ){
+ sqlite3_result_nio_buffer(cx, blob);
+ }
/**
Binds the given text using C's sqlite3_result_blob64() unless:
@@ -1605,11 +2036,12 @@
/**
Binds the given text using C's sqlite3_result_text64() unless:
-
text is null: translates to a call to sqlite3_result_null()
+
text is null: translates to a call to {@link
+ #sqlite3_result_null}
text is too large: translates to a call to
{@link #sqlite3_result_error_toobig}
The @param encoding argument has an invalid value: translates to
@@ -1649,11 +2081,11 @@
final byte[] b = text.getBytes(StandardCharsets.UTF_16);
sqlite3_result_text64(cx, b, b.length, SQLITE_UTF16);
}
}
- static native RollbackHookCallback sqlite3_rollback_hook(
+ private static native RollbackHookCallback sqlite3_rollback_hook(
@NotNull long ptrToDb, @Nullable RollbackHookCallback hook
);
public static RollbackHookCallback sqlite3_rollback_hook(
@NotNull sqlite3 db, @Nullable RollbackHookCallback hook
@@ -1701,24 +2133,28 @@
public static native int sqlite3_status64(
int op, @NotNull OutputPointer.Int64 pCurrent,
@NotNull OutputPointer.Int64 pHighwater, boolean reset
);
- public static native int sqlite3_step(@NotNull sqlite3_stmt stmt);
+ private static native int sqlite3_step(@NotNull long ptrToStmt);
+
+ public static int sqlite3_step(@NotNull sqlite3_stmt stmt){
+ return null==stmt ? SQLITE_MISUSE : sqlite3_step(stmt.getNativePointer());
+ }
public static native boolean sqlite3_stmt_busy(@NotNull sqlite3_stmt stmt);
- static native int sqlite3_stmt_explain(@NotNull long ptrToStmt, int op);
+ private static native int sqlite3_stmt_explain(@NotNull long ptrToStmt, int op);
public static int sqlite3_stmt_explain(@NotNull sqlite3_stmt stmt, int op){
- return sqlite3_stmt_explain(stmt.getNativePointer(), op);
+ return null==stmt ? SQLITE_MISUSE : sqlite3_stmt_explain(stmt.getNativePointer(), op);
}
- static native int sqlite3_stmt_isexplain(@NotNull long ptrToStmt);
+ private static native int sqlite3_stmt_isexplain(@NotNull long ptrToStmt);
public static int sqlite3_stmt_isexplain(@NotNull sqlite3_stmt stmt){
- return sqlite3_stmt_isexplain(stmt.getNativePointer());
+ return null==stmt ? 0 : sqlite3_stmt_isexplain(stmt.getNativePointer());
}
public static native boolean sqlite3_stmt_readonly(@NotNull sqlite3_stmt stmt);
public static native int sqlite3_stmt_status(
@@ -1735,11 +2171,11 @@
String-to-byte-array conversion in the Java implementation
(sqlite3_strglob(String,String)) than to do that in C, so that
signature is the public-facing one.
*/
private static native int sqlite3_strglob(
- @NotNull byte[] glob, @NotNull byte[] nullTerminatedUtf8
+ @NotNull byte[] glob, @NotNull byte[] nulTerminatedUtf8
);
public static int sqlite3_strglob(
@NotNull String glob, @NotNull String txt
){
@@ -1749,11 +2185,11 @@
/**
The LIKE counterpart of the private sqlite3_strglob() method.
*/
private static native int sqlite3_strlike(
- @NotNull byte[] glob, @NotNull byte[] nullTerminatedUtf8,
+ @NotNull byte[] glob, @NotNull byte[] nulTerminatedUtf8,
int escChar
);
public static int sqlite3_strlike(
@NotNull String glob, @NotNull String txt, char escChar
@@ -1761,11 +2197,11 @@
return sqlite3_strlike(nulTerminateUtf8(glob),
nulTerminateUtf8(txt),
(int)escChar);
}
- static native int sqlite3_system_errno(@NotNull long ptrToDb);
+ private static native int sqlite3_system_errno(@NotNull long ptrToDb);
public static int sqlite3_system_errno(@NotNull sqlite3 db){
return sqlite3_system_errno(db.getNativePointer());
}
@@ -1807,17 +2243,17 @@
) ? out : null;
}
public static native int sqlite3_threadsafe();
- static native int sqlite3_total_changes(@NotNull long ptrToDb);
+ private static native int sqlite3_total_changes(@NotNull long ptrToDb);
public static int sqlite3_total_changes(@NotNull sqlite3 db){
return sqlite3_total_changes(db.getNativePointer());
}
- static native long sqlite3_total_changes64(@NotNull long ptrToDb);
+ private static native long sqlite3_total_changes64(@NotNull long ptrToDb);
public static long sqlite3_total_changes64(@NotNull sqlite3 db){
return sqlite3_total_changes64(db.getNativePointer());
}
@@ -1836,11 +2272,11 @@
public static native int sqlite3_txn_state(
@NotNull sqlite3 db, @Nullable String zSchema
);
- static native UpdateHookCallback sqlite3_update_hook(
+ private static native UpdateHookCallback sqlite3_update_hook(
@NotNull long ptrToDb, @Nullable UpdateHookCallback hook
);
public static UpdateHookCallback sqlite3_update_hook(
@NotNull sqlite3 db, @Nullable UpdateHookCallback hook
@@ -1856,71 +2292,71 @@
Is not relevant in the JNI binding, as its feature is replaced by
the ability to pass an object, including any relevant state, to
sqlite3_create_function().
*/
- static native byte[] sqlite3_value_blob(@NotNull long ptrToValue);
+ private static native byte[] sqlite3_value_blob(@NotNull long ptrToValue);
public static byte[] sqlite3_value_blob(@NotNull sqlite3_value v){
return sqlite3_value_blob(v.getNativePointer());
}
- static native int sqlite3_value_bytes(@NotNull long ptrToValue);
+ private static native int sqlite3_value_bytes(@NotNull long ptrToValue);
public static int sqlite3_value_bytes(@NotNull sqlite3_value v){
return sqlite3_value_bytes(v.getNativePointer());
}
- static native int sqlite3_value_bytes16(@NotNull long ptrToValue);
+ private static native int sqlite3_value_bytes16(@NotNull long ptrToValue);
public static int sqlite3_value_bytes16(@NotNull sqlite3_value v){
return sqlite3_value_bytes16(v.getNativePointer());
}
- static native double sqlite3_value_double(@NotNull long ptrToValue);
+ private static native double sqlite3_value_double(@NotNull long ptrToValue);
public static double sqlite3_value_double(@NotNull sqlite3_value v){
return sqlite3_value_double(v.getNativePointer());
}
- static native sqlite3_value sqlite3_value_dup(@NotNull long ptrToValue);
+ private static native sqlite3_value sqlite3_value_dup(@NotNull long ptrToValue);
public static sqlite3_value sqlite3_value_dup(@NotNull sqlite3_value v){
return sqlite3_value_dup(v.getNativePointer());
}
- static native int sqlite3_value_encoding(@NotNull long ptrToValue);
+ private static native int sqlite3_value_encoding(@NotNull long ptrToValue);
public static int sqlite3_value_encoding(@NotNull sqlite3_value v){
return sqlite3_value_encoding(v.getNativePointer());
}
- static native void sqlite3_value_free(@Nullable long ptrToValue);
+ private static native void sqlite3_value_free(@Nullable long ptrToValue);
public static void sqlite3_value_free(@Nullable sqlite3_value v){
- sqlite3_value_free(v.getNativePointer());
+ if( null!=v ) sqlite3_value_free(v.clearNativePointer());
}
- static native boolean sqlite3_value_frombind(@NotNull long ptrToValue);
+ private static native boolean sqlite3_value_frombind(@NotNull long ptrToValue);
public static boolean sqlite3_value_frombind(@NotNull sqlite3_value v){
return sqlite3_value_frombind(v.getNativePointer());
}
- static native int sqlite3_value_int(@NotNull long ptrToValue);
+ private static native int sqlite3_value_int(@NotNull long ptrToValue);
public static int sqlite3_value_int(@NotNull sqlite3_value v){
return sqlite3_value_int(v.getNativePointer());
}
- static native long sqlite3_value_int64(@NotNull long ptrToValue);
+ private static native long sqlite3_value_int64(@NotNull long ptrToValue);
public static long sqlite3_value_int64(@NotNull sqlite3_value v){
return sqlite3_value_int64(v.getNativePointer());
}
- static native Object sqlite3_value_java_object(@NotNull long ptrToValue);
+ private static native Object sqlite3_value_java_object(@NotNull long ptrToValue);
/**
If the given value was set using {@link
#sqlite3_result_java_object} then this function returns that
object, else it returns null.
@@ -1936,35 +2372,46 @@
A variant of sqlite3_value_java_object() which returns the
fetched object cast to T if the object is an instance of the
given Class, else it returns null.
*/
@SuppressWarnings("unchecked")
- public static T sqlite3_value_java_casted(@NotNull sqlite3_value v,
+ public static T sqlite3_value_java_object(@NotNull sqlite3_value v,
@NotNull Class type){
final Object o = sqlite3_value_java_object(v);
return type.isInstance(o) ? (T)o : null;
}
- static native int sqlite3_value_nochange(@NotNull long ptrToValue);
+ /**
+ A variant of sqlite3_column_blob() which returns the blob as a
+ ByteBuffer object. Returns null if its argument is null, if
+ sqlite3_jni_supports_nio() is false, or if sqlite3_value_blob()
+ would return null for the same input.
+ */
+ @Experimental
+ /*public*/ static native java.nio.ByteBuffer sqlite3_value_nio_buffer(
+ @NotNull sqlite3_value v
+ );
+
+ private static native int sqlite3_value_nochange(@NotNull long ptrToValue);
public static int sqlite3_value_nochange(@NotNull sqlite3_value v){
return sqlite3_value_nochange(v.getNativePointer());
}
- static native int sqlite3_value_numeric_type(@NotNull long ptrToValue);
+ private static native int sqlite3_value_numeric_type(@NotNull long ptrToValue);
public static int sqlite3_value_numeric_type(@NotNull sqlite3_value v){
return sqlite3_value_numeric_type(v.getNativePointer());
}
- static native int sqlite3_value_subtype(@NotNull long ptrToValue);
+ private static native int sqlite3_value_subtype(@NotNull long ptrToValue);
public static int sqlite3_value_subtype(@NotNull sqlite3_value v){
return sqlite3_value_subtype(v.getNativePointer());
}
- static native byte[] sqlite3_value_text(@NotNull long ptrToValue);
+ private static native byte[] sqlite3_value_text(@NotNull long ptrToValue);
/**
Functions identially to the C API, and this note is just to
stress that the returned bytes are encoded as UTF-8. It returns
null if the underlying C-level sqlite3_value_text() returns NULL
@@ -1972,17 +2419,17 @@
*/
public static byte[] sqlite3_value_text(@NotNull sqlite3_value v){
return sqlite3_value_text(v.getNativePointer());
}
- static native String sqlite3_value_text16(@NotNull long ptrToValue);
+ private static native String sqlite3_value_text16(@NotNull long ptrToValue);
public static String sqlite3_value_text16(@NotNull sqlite3_value v){
return sqlite3_value_text16(v.getNativePointer());
}
- static native int sqlite3_value_type(@NotNull long ptrToValue);
+ private static native int sqlite3_value_type(@NotNull long ptrToValue);
public static int sqlite3_value_type(@NotNull sqlite3_value v){
return sqlite3_value_type(v.getNativePointer());
}
@@ -2253,11 +2700,10 @@
public static final int SQLITE_OPEN_NOFOLLOW = 0x01000000 /* Ok for sqlite3_open_v2() */;
public static final int SQLITE_OPEN_EXRESCODE = 0x02000000 /* Extended result codes */;
// prepare flags
public static final int SQLITE_PREPARE_PERSISTENT = 1;
- public static final int SQLITE_PREPARE_NORMALIZE = 2;
public static final int SQLITE_PREPARE_NO_VTAB = 4;
// result codes
public static final int SQLITE_OK = 0;
public static final int SQLITE_ERROR = 1;
@@ -2409,13 +2855,15 @@
public static final int SQLITE_TXN_NONE = 0;
public static final int SQLITE_TXN_READ = 1;
public static final int SQLITE_TXN_WRITE = 2;
// udf flags
- public static final int SQLITE_DETERMINISTIC = 0x000000800;
- public static final int SQLITE_DIRECTONLY = 0x000080000;
- public static final int SQLITE_INNOCUOUS = 0x000200000;
+ public static final int SQLITE_DETERMINISTIC = 0x000000800;
+ public static final int SQLITE_DIRECTONLY = 0x000080000;
+ public static final int SQLITE_SUBTYPE = 0x000100000;
+ public static final int SQLITE_INNOCUOUS = 0x000200000;
+ public static final int SQLITE_RESULT_SUBTYPE = 0x001000000;
// virtual tables
public static final int SQLITE_INDEX_SCAN_UNIQUE = 1;
public static final int SQLITE_INDEX_CONSTRAINT_EQ = 2;
public static final int SQLITE_INDEX_CONSTRAINT_GT = 4;
@@ -2440,10 +2888,10 @@
public static final int SQLITE_VTAB_USES_ALL_SCHEMAS = 4;
public static final int SQLITE_ROLLBACK = 1;
public static final int SQLITE_FAIL = 3;
public static final int SQLITE_REPLACE = 5;
static {
- // This MUST come after the SQLITE_MAX_... values or else
- // attempting to modify them silently fails.
init();
}
+ /* Must come after static init(). */
+ private static final boolean JNI_SUPPORTS_NIO = sqlite3_jni_supports_nio();
}
Index: ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java
+++ ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java
@@ -22,12 +22,13 @@
never throw. Any which do throw but should not might trigger debug
output regarding the error, but the exception will not be
propagated. For callback interfaces which support returning error
info to the core, the JNI binding will convert any exceptions to
C-level error information. For callback interfaces which do not
- support, all exceptions will necessarily be suppressed in order to
- retain the C-style no-throw semantics.
+ support returning error information, all exceptions will
+ necessarily be suppressed in order to retain the C-style no-throw
+ semantics and avoid invoking undefined behavior in the C layer.
Callbacks of this style follow a common naming convention:
1) They use the UpperCamelCase form of the C function they're
proxying for, minus the {@code sqlite3_} prefix, plus a {@code
Index: ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java
@@ -19,10 +19,11 @@
public interface CollationNeededCallback extends CallbackProxy {
/**
Has the same semantics as the C-level sqlite3_create_collation()
callback.
-
If it throws, the exception message is passed on to the db and
- the exception is suppressed.
+
Because the C API has no mechanism for reporting errors
+ from this callbacks, any exceptions thrown by this callback
+ are suppressed.
*/
- int call(sqlite3 db, int eTextRep, String collationName);
+ void call(sqlite3 db, int eTextRep, String collationName);
}
Index: ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java
@@ -17,9 +17,10 @@
Callback for use with {@link CApi#sqlite3_commit_hook}.
*/
public interface CommitHookCallback extends CallbackProxy {
/**
Works as documented for the C-level sqlite3_commit_hook()
- callback. Must not throw.
+ callback. If it throws, the exception is translated into
+ a db-level error.
*/
int call();
}
ADDED ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java
Index: ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java
==================================================================
--- /dev/null
+++ ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java
@@ -0,0 +1,25 @@
+/*
+** 2023-08-23
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file is part of the JNI bindings for the sqlite3 C API.
+*/
+package org.sqlite.jni.capi;
+
+/**
+ A callback for use with sqlite3_config().
+*/
+public interface ConfigSqlLogCallback {
+ /**
+ Must function as described for a C-level callback for
+ {@link CApi#sqlite3_config(ConfigSqlLogCallback)}, with the slight signature change.
+ */
+ void call(sqlite3 db, String msg, int msgType );
+}
DELETED ext/jni/src/org/sqlite/jni/capi/ConfigSqllogCallback.java
Index: ext/jni/src/org/sqlite/jni/capi/ConfigSqllogCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/ConfigSqllogCallback.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
-** 2023-08-23
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file is part of the JNI bindings for the sqlite3 C API.
-*/
-package org.sqlite.jni.capi;
-
-/**
- A callback for use with sqlite3_config().
-*/
-public interface ConfigSqllogCallback {
- /**
- Must function as described for a C-level callback for
- {@link CApi#sqlite3_config(ConfigSqllogCallback)}, with the slight signature change.
- */
- void call(sqlite3 db, String msg, int msgType );
-}
Index: ext/jni/src/org/sqlite/jni/capi/OutputPointer.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/OutputPointer.java
+++ ext/jni/src/org/sqlite/jni/capi/OutputPointer.java
@@ -226,6 +226,28 @@
/** Returns the current value. */
public final byte[] get(){return value;}
/** Sets the current value. */
public final void set(byte[] v){value = v;}
}
+
+ /**
+ Output pointer for use with native routines which return
+ blobs via java.nio.ByteBuffer.
+
+ See {@link org.sqlite.jni.capi.CApi#sqlite3_jni_supports_nio}
+ */
+ public static final class ByteBuffer {
+ /**
+ This is public for ease of use. Accessors are provided for
+ consistency with the higher-level types.
+ */
+ public java.nio.ByteBuffer value;
+ /** Initializes with the value null. */
+ public ByteBuffer(){this(null);}
+ /** Initializes with the value v. */
+ public ByteBuffer(java.nio.ByteBuffer v){value = v;}
+ /** Returns the current value. */
+ public final java.nio.ByteBuffer get(){return value;}
+ /** Sets the current value. */
+ public final void set(java.nio.ByteBuffer v){value = v;}
+ }
}
Index: ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java
@@ -23,11 +23,14 @@
transfering ownership of it to this function.
sqlite3_prepare_multi() will _not_ finalize st - it is up
to the call() implementation how st is handled.
- Must return 0 on success or an SQLITE_... code on error.
+ Must return 0 on success or an SQLITE_... code on error. If it
+ throws, sqlite3_prepare_multi() will transform the exception into
+ a db-level error in order to retain the C-style error semantics
+ of the API.
See the {@link Finalize} class for a wrapper which finalizes the
statement after calling a proxy PrepareMultiCallback.
*/
int call(sqlite3_stmt st);
@@ -35,11 +38,11 @@
/**
A PrepareMultiCallback impl which wraps a separate impl and finalizes
any sqlite3_stmt passed to its callback.
*/
public static final class Finalize implements PrepareMultiCallback {
- private PrepareMultiCallback p;
+ private final PrepareMultiCallback p;
/**
p is the proxy to call() when this.call() is called.
*/
public Finalize( PrepareMultiCallback p ){
this.p = p;
Index: ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java
@@ -17,10 +17,11 @@
Callback for use with {@link CApi#sqlite3_preupdate_hook}.
*/
public interface PreupdateHookCallback extends CallbackProxy {
/**
Must function as described for the C-level sqlite3_preupdate_hook()
- callback.
+ callback. If it throws, the exception is translated to a
+ db-level error and the exception is suppressed.
*/
void call(sqlite3 db, int op, String dbName, String dbTable,
long iKey1, long iKey2 );
}
Index: ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java
@@ -16,10 +16,11 @@
/**
Callback for use with {@link CApi#sqlite3_rollback_hook}.
*/
public interface RollbackHookCallback extends CallbackProxy {
/**
- Works as documented for the C-level sqlite3_rollback_hook()
- callback.
+ Must function as documented for the C-level sqlite3_rollback_hook()
+ callback. If it throws, the exception is translated into
+ a db-level error.
*/
void call();
}
Index: ext/jni/src/org/sqlite/jni/capi/SQLFunction.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/SQLFunction.java
+++ ext/jni/src/org/sqlite/jni/capi/SQLFunction.java
@@ -31,73 +31,6 @@
SQLFunction base class and the method names and signatures used by
the UDF callback interfaces.
*/
public interface SQLFunction {
- /**
- PerContextState assists aggregate and window functions in
- managing their accumulator state across calls to the UDF's
- callbacks.
-
-
T must be of a type which can be legally stored as a value in
- java.util.HashMap.
-
-
If a given aggregate or window function is called multiple times
- in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)...,
- then the clients need some way of knowing which call is which so
- that they can map their state between their various UDF callbacks
- and reset it via xFinal(). This class takes care of such
- mappings.
-
-
This class works by mapping
- sqlite3_context.getAggregateContext() to a single piece of
- state, of a client-defined type (the T part of this class), which
- persists across a "matching set" of the UDF's callbacks.
-
-
This class is a helper providing commonly-needed functionality
- - it is not required for use with aggregate or window functions.
- Client UDFs are free to perform such mappings using custom
- approaches. The provided {@link AggregateFunction} and {@link
- WindowFunction} classes use this.
- */
- public static final class PerContextState {
- private final java.util.Map> map
- = new java.util.HashMap<>();
-
- /**
- Should be called from a UDF's xStep(), xValue(), and xInverse()
- methods, passing it that method's first argument and an initial
- value for the persistent state. If there is currently no
- mapping for the given context within the map, one is created
- using the given initial value, else the existing one is used
- and the 2nd argument is ignored. It returns a ValueHolder
- which can be used to modify that state directly without
- requiring that the client update the underlying map's entry.
-
-
The caller is obligated to eventually call
- takeAggregateState() to clear the mapping.
- */
- public ValueHolder getAggregateState(sqlite3_context cx, T initialValue){
- final Long key = cx.getAggregateContext(true);
- ValueHolder rc = null==key ? null : map.get(key);
- if( null==rc ){
- map.put(key, rc = new ValueHolder<>(initialValue));
- }
- return rc;
- }
-
- /**
- Should be called from a UDF's xFinal() method and passed that
- method's first argument. This function removes the value
- associated with cx.getAggregateContext() from the map and
- returns it, returning null if no other UDF method has been
- called to set up such a mapping. The latter condition will be
- the case if a UDF is used in a statement which has no result
- rows.
- */
- public T takeAggregateState(sqlite3_context cx){
- final ValueHolder h = map.remove(cx.getAggregateContext(false));
- return null==h ? null : h.value;
- }
- }
-
}
Index: ext/jni/src/org/sqlite/jni/capi/Tester1.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/Tester1.java
+++ ext/jni/src/org/sqlite/jni/capi/Tester1.java
@@ -36,19 +36,27 @@
*/
@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD})
@interface SingleThreadOnly{}
+/**
+ Annotation for Tester1 tests which must only be run if
+ sqlite3_jni_supports_nio() is true.
+*/
+@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
+@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD})
+@interface RequiresJniNio{}
+
public class Tester1 implements Runnable {
//! True when running in multi-threaded mode.
private static boolean mtMode = false;
//! True to sleep briefly between tests.
private static boolean takeNaps = false;
//! True to shuffle the order of the tests.
private static boolean shuffle = false;
//! True to dump the list of to-run tests to stdout.
- private static boolean listRunTests = false;
+ private static int listRunTests = 0;
//! True to squelch all out() and outln() output.
private static boolean quietMode = false;
//! Total number of runTests() calls.
private static int nTestRuns = 0;
//! List of test*() methods to run.
@@ -325,11 +333,11 @@
}
}
rc = sqlite3_prepare_v3(db, "INSERT INTO t2(a) VALUES(1),(2),(3)",
- SQLITE_PREPARE_NORMALIZE, outStmt);
+ 0, outStmt);
affirm(0 == rc);
stmt = outStmt.get();
affirm(0 != stmt.getNativePointer());
sqlite3_finalize(stmt);
affirm(0 == stmt.getNativePointer() );
@@ -380,10 +388,19 @@
affirm(sqlite3_changes64(db) > changes64);
affirm(sqlite3_total_changes64(db) > changesT64);
stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;");
affirm( sqlite3_stmt_readonly(stmt) );
affirm( !sqlite3_stmt_busy(stmt) );
+ if( sqlite3_compileoption_used("ENABLE_COLUMN_METADATA") ){
+ /* Unlike in native C code, JNI won't trigger an
+ UnsatisfiedLinkError until these are called (on Linux, at
+ least). */
+ affirm("t".equals(sqlite3_column_table_name(stmt,0)));
+ affirm("main".equals(sqlite3_column_database_name(stmt,0)));
+ affirm("a".equals(sqlite3_column_origin_name(stmt,0)));
+ }
+
int total2 = 0;
while( SQLITE_ROW == sqlite3_step(stmt) ){
affirm( sqlite3_stmt_busy(stmt) );
total2 += sqlite3_column_int(stmt, 0);
sqlite3_value sv = sqlite3_column_value(stmt, 0);
@@ -475,10 +492,11 @@
}
sqlite3_finalize(stmt);
stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;");
StringBuilder sbuf = new StringBuilder();
n = 0;
+ final boolean tryNio = sqlite3_jni_supports_nio();
while( SQLITE_ROW == sqlite3_step(stmt) ){
final sqlite3_value sv = sqlite3_value_dup(sqlite3_column_value(stmt,0));
final String txt = sqlite3_column_text16(stmt, 0);
sbuf.append( txt );
affirm( txt.equals(new String(
@@ -489,10 +507,19 @@
affirm( txt.equals(new String(
sqlite3_value_text(sv),
StandardCharsets.UTF_8)) );
affirm( txt.length() == sqlite3_value_bytes16(sv)/2 );
affirm( txt.equals(sqlite3_value_text16(sv)) );
+ if( tryNio ){
+ java.nio.ByteBuffer bu = sqlite3_value_nio_buffer(sv);
+ byte ba[] = sqlite3_value_blob(sv);
+ affirm( ba.length == bu.capacity() );
+ int i = 0;
+ for( byte b : ba ){
+ affirm( b == bu.get(i++) );
+ }
+ }
sqlite3_value_free(sv);
++n;
}
sqlite3_finalize(stmt);
affirm(3 == n);
@@ -545,10 +572,83 @@
sqlite3_finalize(stmt);
affirm(1 == n);
affirm(total == 0x32 + 0x33 + 0x34);
sqlite3_close_v2(db);
}
+
+ @RequiresJniNio
+ private void testBindByteBuffer(){
+ /* TODO: these tests need to be much more extensive to check the
+ begin/end range handling. */
+
+ java.nio.ByteBuffer zeroCheck =
+ java.nio.ByteBuffer.allocateDirect(0);
+ affirm( null != zeroCheck );
+ zeroCheck = null;
+ sqlite3 db = createNewDb();
+ execSql(db, "CREATE TABLE t(a)");
+
+ final java.nio.ByteBuffer buf = java.nio.ByteBuffer.allocateDirect(10);
+ buf.put((byte)0x31)/*note that we'll skip this one*/
+ .put((byte)0x32)
+ .put((byte)0x33)
+ .put((byte)0x34)
+ .put((byte)0x35)/*we'll skip this one too*/;
+
+ final int expectTotal = buf.get(1) + buf.get(2) + buf.get(3);
+ sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);");
+ affirm( SQLITE_ERROR == sqlite3_bind_blob(stmt, 1, buf, -1, 0),
+ "Buffer offset may not be negative." );
+ affirm( 0 == sqlite3_bind_blob(stmt, 1, buf, 1, 3) );
+ affirm( SQLITE_DONE == sqlite3_step(stmt) );
+ sqlite3_finalize(stmt);
+ stmt = prepare(db, "SELECT a FROM t;");
+ int total = 0;
+ affirm( SQLITE_ROW == sqlite3_step(stmt) );
+ byte blob[] = sqlite3_column_blob(stmt, 0);
+ java.nio.ByteBuffer nioBlob =
+ sqlite3_column_nio_buffer(stmt, 0);
+ affirm(3 == blob.length);
+ affirm(blob.length == nioBlob.capacity());
+ affirm(blob.length == nioBlob.limit());
+ int i = 0;
+ for(byte b : blob){
+ affirm( i<=3 );
+ affirm(b == buf.get(1 + i));
+ affirm(b == nioBlob.get(i));
+ ++i;
+ total += b;
+ }
+ affirm( SQLITE_DONE == sqlite3_step(stmt) );
+ sqlite3_finalize(stmt);
+ affirm(total == expectTotal);
+
+ SQLFunction func =
+ new ScalarFunction(){
+ public void xFunc(sqlite3_context cx, sqlite3_value[] args){
+ sqlite3_result_blob(cx, buf, 1, 3);
+ }
+ };
+
+ affirm( 0 == sqlite3_create_function(db, "myfunc", -1, SQLITE_UTF8, func) );
+ stmt = prepare(db, "SELECT myfunc()");
+ affirm( SQLITE_ROW == sqlite3_step(stmt) );
+ blob = sqlite3_column_blob(stmt, 0);
+ affirm(3 == blob.length);
+ i = 0;
+ total = 0;
+ for(byte b : blob){
+ affirm( i<=3 );
+ affirm(b == buf.get(1 + i++));
+ total += b;
+ }
+ affirm( SQLITE_DONE == sqlite3_step(stmt) );
+ sqlite3_finalize(stmt);
+ affirm(total == expectTotal);
+
+ sqlite3_close_v2(db);
+ }
private void testSql(){
sqlite3 db = createNewDb();
sqlite3_stmt stmt = prepare(db, "SELECT 1");
affirm( "SELECT 1".equals(sqlite3_sql(stmt)) );
@@ -591,13 +691,13 @@
++xDestroyCalled.value;
}
};
final CollationNeededCallback collLoader = new CollationNeededCallback(){
@Override
- public int call(sqlite3 dbArg, int eTextRep, String collationName){
+ public void call(sqlite3 dbArg, int eTextRep, String collationName){
affirm(dbArg == db/* as opposed to a temporary object*/);
- return sqlite3_create_collation(dbArg, "reversi", eTextRep, myCollation);
+ sqlite3_create_collation(dbArg, "reversi", eTextRep, myCollation);
}
};
int rc = sqlite3_collation_needed(db, collLoader);
affirm( 0 == rc );
rc = sqlite3_collation_needed(db, collLoader);
@@ -801,17 +901,21 @@
affirm( testResult.value == db );
rc = sqlite3_bind_java_object(stmt, 1, boundObj);
affirm( 0==rc );
int n = 0;
if( SQLITE_ROW == sqlite3_step(stmt) ){
+ affirm( testResult.value == sqlite3_column_java_object(stmt, 0) );
+ affirm( testResult.value == sqlite3_column_java_object(stmt, 0, sqlite3.class) );
+ affirm( null == sqlite3_column_java_object(stmt, 0, sqlite3_stmt.class) );
+ affirm( null == sqlite3_column_java_object(stmt,1) );
final sqlite3_value v = sqlite3_column_value(stmt, 0);
affirm( testResult.value == sqlite3_value_java_object(v) );
- affirm( testResult.value == sqlite3_value_java_casted(v, sqlite3.class) );
+ affirm( testResult.value == sqlite3_value_java_object(v, sqlite3.class) );
affirm( testResult.value ==
- sqlite3_value_java_casted(v, testResult.value.getClass()) );
- affirm( testResult.value == sqlite3_value_java_casted(v, Object.class) );
- affirm( null == sqlite3_value_java_casted(v, String.class) );
+ sqlite3_value_java_object(v, testResult.value.getClass()) );
+ affirm( testResult.value == sqlite3_value_java_object(v, Object.class) );
+ affirm( null == sqlite3_value_java_object(v, String.class) );
++n;
}
sqlite3_finalize(stmt);
affirm( 1 == n );
affirm( 0==sqlite3_db_release_memory(db) );
@@ -822,19 +926,32 @@
final sqlite3 db = createNewDb();
final ValueHolder xFinalNull =
// To confirm that xFinal() is called with no aggregate state
// when the corresponding result set is empty.
new ValueHolder<>(false);
+ final ValueHolder neverEverDoThisInClientCode = new ValueHolder<>(null);
+ final ValueHolder neverEverDoThisInClientCode2 = new ValueHolder<>(null);
SQLFunction func = new AggregateFunction(){
@Override
public void xStep(sqlite3_context cx, sqlite3_value[] args){
+ if( null==neverEverDoThisInClientCode.value ){
+ /* !!!NEVER!!! hold a reference to an sqlite3_value or
+ sqlite3_context object like this in client code! They
+ are ONLY legal for the duration of their single
+ call. We do it here ONLY to test that the defenses
+ against clients doing this are working. */
+ neverEverDoThisInClientCode.value = args;
+ }
final ValueHolder agg = this.getAggregateState(cx, 0);
agg.value += sqlite3_value_int(args[0]);
affirm( agg == this.getAggregateState(cx, 0) );
}
@Override
public void xFinal(sqlite3_context cx){
+ if( null==neverEverDoThisInClientCode2.value ){
+ neverEverDoThisInClientCode2.value = cx;
+ }
final Integer v = this.takeAggregateState(cx);
if(null == v){
xFinalNull.value = true;
sqlite3_result_null(cx);
}else{
@@ -855,10 +972,14 @@
affirm( 30+v == v2 );
++n;
}
affirm( 1==n );
affirm(!xFinalNull.value);
+ affirm( null!=neverEverDoThisInClientCode.value );
+ affirm( null!=neverEverDoThisInClientCode2.value );
+ affirm( 0 xBusyCalled = new ValueHolder<>(0);
- BusyHandlerCallback handler = new BusyHandlerCallback(){
- @Override public int call(int n){
- //outln("busy handler #"+n);
- return n > 2 ? 0 : ++xBusyCalled.value;
- }
- };
- rc = sqlite3_busy_handler(db2, handler);
- affirm(0 == rc);
-
- // Force a locked condition...
- execSql(db1, "BEGIN EXCLUSIVE");
- rc = sqlite3_prepare_v2(db2, "SELECT * from t", outStmt);
- affirm( SQLITE_BUSY == rc);
- affirm( null == outStmt.get() );
- affirm( 3 == xBusyCalled.value );
- sqlite3_close_v2(db1);
- sqlite3_close_v2(db2);
try{
- final java.io.File f = new java.io.File(dbName);
- f.delete();
- }catch(Exception e){
- /* ignore */
+ final OutputPointer.sqlite3 outDb = new OutputPointer.sqlite3();
+ final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt();
+
+ int rc = sqlite3_open(dbName, outDb);
+ ++metrics.dbOpen;
+ affirm( 0 == rc );
+ final sqlite3 db1 = outDb.get();
+ execSql(db1, "CREATE TABLE IF NOT EXISTS t(a)");
+ rc = sqlite3_open(dbName, outDb);
+ ++metrics.dbOpen;
+ affirm( 0 == rc );
+ affirm( outDb.get() != db1 );
+ final sqlite3 db2 = outDb.get();
+
+ affirm( "main".equals( sqlite3_db_name(db1, 0) ) );
+ rc = sqlite3_db_config(db1, SQLITE_DBCONFIG_MAINDBNAME, "foo");
+ affirm( sqlite3_db_filename(db1, "foo").endsWith(dbName) );
+ affirm( "foo".equals( sqlite3_db_name(db1, 0) ) );
+ affirm( SQLITE_MISUSE == sqlite3_db_config(db1, 0, 0, null) );
+
+ final ValueHolder xBusyCalled = new ValueHolder<>(0);
+ BusyHandlerCallback handler = new BusyHandlerCallback(){
+ @Override public int call(int n){
+ //outln("busy handler #"+n);
+ return n > 2 ? 0 : ++xBusyCalled.value;
+ }
+ };
+ rc = sqlite3_busy_handler(db2, handler);
+ affirm(0 == rc);
+
+ // Force a locked condition...
+ execSql(db1, "BEGIN EXCLUSIVE");
+ rc = sqlite3_prepare_v2(db2, "SELECT * from t", outStmt);
+ affirm( SQLITE_BUSY == rc);
+ affirm( null == outStmt.get() );
+ affirm( 3 == xBusyCalled.value );
+ sqlite3_close_v2(db1);
+ sqlite3_close_v2(db2);
+ }finally{
+ try{(new java.io.File(dbName)).delete();}
+ catch(Exception e){/* ignore */}
}
}
private void testProgress(){
final sqlite3 db = createNewDb();
@@ -1091,10 +1211,11 @@
sqlite3_close_v2(db);
}
private void testCommitHook(){
final sqlite3 db = createNewDb();
+ sqlite3_extended_result_codes(db, true);
final ValueHolder counter = new ValueHolder<>(0);
final ValueHolder hookResult = new ValueHolder<>(0);
final CommitHookCallback theHook = new CommitHookCallback(){
@Override public int call(){
++counter.value;
@@ -1133,11 +1254,11 @@
affirm( newHook == oldHook );
execSql(db, "BEGIN; update t set a='i' where a='h'; COMMIT;");
affirm( 5 == counter.value );
hookResult.value = SQLITE_ERROR;
int rc = execSql(db, false, "BEGIN; update t set a='j' where a='i'; COMMIT;");
- affirm( SQLITE_CONSTRAINT == rc );
+ affirm( SQLITE_CONSTRAINT_COMMITHOOK == rc );
affirm( 6 == counter.value );
sqlite3_close_v2(db);
}
private void testUpdateHook(){
@@ -1352,10 +1473,13 @@
execSql(db, "UPDATE t SET a=1");
affirm( 1 == counter.value );
authRc.value = SQLITE_DENY;
int rc = execSql(db, false, "UPDATE t SET a=2");
affirm( SQLITE_AUTH==rc );
+ sqlite3_set_authorizer(db, null);
+ rc = execSql(db, false, "UPDATE t SET a=2");
+ affirm( 0==rc );
// TODO: expand these tests considerably
sqlite3_close(db);
}
@SingleThreadOnly /* because multiple threads legitimately make these
@@ -1413,11 +1537,11 @@
affirm( err.getMessage().indexOf(toss.value)>0 );
toss.value = null;
val.value = 0;
final AutoExtensionCallback ax2 = new AutoExtensionCallback(){
- @Override public synchronized int call(sqlite3 db){
+ @Override public int call(sqlite3 db){
++val.value;
return 0;
}
};
rc = sqlite3_auto_extension( ax2 );
@@ -1624,20 +1748,100 @@
affirm( 3 == sqlite3_column_int(stmt,0) );
affirm( "def".equals(sqlite3_column_text16(stmt,1)) );
sqlite3_finalize(stmt);
b = sqlite3_blob_open(db, "main", "t", "a",
- sqlite3_last_insert_rowid(db), 1);
+ sqlite3_last_insert_rowid(db), 0);
affirm( null!=b );
rc = sqlite3_blob_reopen(b, 2);
affirm( 0==rc );
final byte[] tgt = new byte[3];
rc = sqlite3_blob_read(b, tgt, 0);
affirm( 0==rc );
affirm( 100==tgt[0] && 101==tgt[1] && 102==tgt[2], "DEF" );
rc = sqlite3_blob_close(b);
affirm( 0==rc );
+
+ if( !sqlite3_jni_supports_nio() ){
+ outln("WARNING: skipping tests for ByteBuffer-using sqlite3_blob APIs ",
+ "because this platform lacks that support.");
+ sqlite3_close_v2(db);
+ return;
+ }
+ /* Sanity checks for the java.nio.ByteBuffer-taking overloads of
+ sqlite3_blob_read/write(). */
+ execSql(db, "UPDATE t SET a=zeroblob(10)");
+ b = sqlite3_blob_open(db, "main", "t", "a", 1, 1);
+ affirm( null!=b );
+ java.nio.ByteBuffer bb = java.nio.ByteBuffer.allocateDirect(10);
+ for( byte i = 0; i < 10; ++i ){
+ bb.put((int)i, (byte)(48+i & 0xff));
+ }
+ rc = sqlite3_blob_write(b, 1, bb, 1, 10);
+ affirm( rc==SQLITE_ERROR, "b length < (srcOffset + bb length)" );
+ rc = sqlite3_blob_write(b, -1, bb);
+ affirm( rc==SQLITE_ERROR, "Target offset may not be negative" );
+ rc = sqlite3_blob_write(b, 0, bb, -1, -1);
+ affirm( rc==SQLITE_ERROR, "Source offset may not be negative" );
+ rc = sqlite3_blob_write(b, 1, bb, 1, 8);
+ affirm( rc==0 );
+ // b's contents: 0 49 50 51 52 53 54 55 56 0
+ // ascii: 0 '1' '2' '3' '4' '5' '6' '7' '8' 0
+ byte br[] = new byte[10];
+ java.nio.ByteBuffer bbr =
+ java.nio.ByteBuffer.allocateDirect(bb.limit());
+ rc = sqlite3_blob_read( b, br, 0 );
+ affirm( rc==0 );
+ rc = sqlite3_blob_read( b, bbr );
+ affirm( rc==0 );
+ java.nio.ByteBuffer bbr2 = sqlite3_blob_read_nio_buffer(b, 0, 12);
+ affirm( null==bbr2, "Read size is too big");
+ bbr2 = sqlite3_blob_read_nio_buffer(b, -1, 3);
+ affirm( null==bbr2, "Source offset is negative");
+ bbr2 = sqlite3_blob_read_nio_buffer(b, 5, 6);
+ affirm( null==bbr2, "Read pos+size is too big");
+ bbr2 = sqlite3_blob_read_nio_buffer(b, 4, 7);
+ affirm( null==bbr2, "Read pos+size is too big");
+ bbr2 = sqlite3_blob_read_nio_buffer(b, 4, 6);
+ affirm( null!=bbr2 );
+ java.nio.ByteBuffer bbr3 =
+ java.nio.ByteBuffer.allocateDirect(2 * bb.limit());
+ java.nio.ByteBuffer bbr4 =
+ java.nio.ByteBuffer.allocateDirect(5);
+ rc = sqlite3_blob_read( b, bbr3 );
+ affirm( rc==0 );
+ rc = sqlite3_blob_read( b, bbr4 );
+ affirm( rc==0 );
+ affirm( sqlite3_blob_bytes(b)==bbr3.limit() );
+ affirm( 5==bbr4.limit() );
+ sqlite3_blob_close(b);
+ affirm( 0==br[0] );
+ affirm( 0==br[9] );
+ affirm( 0==bbr.get(0) );
+ affirm( 0==bbr.get(9) );
+ affirm( bbr2.limit() == 6 );
+ affirm( 0==bbr3.get(0) );
+ {
+ Exception ex = null;
+ try{ bbr3.get(11); }
+ catch(Exception e){ex = e;}
+ affirm( ex instanceof IndexOutOfBoundsException,
+ "bbr3.limit() was reset by read()" );
+ ex = null;
+ }
+ affirm( 0==bbr4.get(0) );
+ for( int i = 1; i < 9; ++i ){
+ affirm( br[i] == 48 + i );
+ affirm( br[i] == bbr.get(i) );
+ affirm( br[i] == bbr3.get(i) );
+ if( i>3 ){
+ affirm( br[i] == bbr2.get(i-4) );
+ }
+ if( i < bbr4.limit() ){
+ affirm( br[i] == bbr4.get(i) );
+ }
+ }
sqlite3_close_v2(db);
}
private void testPrepareMulti(){
final sqlite3 db = createNewDb();
@@ -1646,22 +1850,30 @@
"; insert into t(a) values(1),(2),(3);",
"select a from t;"
};
final List liStmt = new ArrayList();
final PrepareMultiCallback proxy = new PrepareMultiCallback.StepAll();
+ final ValueHolder toss = new ValueHolder<>(null);
PrepareMultiCallback m = new PrepareMultiCallback() {
@Override public int call(sqlite3_stmt st){
liStmt.add(st);
+ if( null!=toss.value ){
+ throw new RuntimeException(toss.value);
+ }
return proxy.call(st);
}
};
int rc = sqlite3_prepare_multi(db, sql, m);
affirm( 0==rc );
affirm( liStmt.size() == 3 );
for( sqlite3_stmt st : liStmt ){
sqlite3_finalize(st);
}
+ toss.value = "This is an exception.";
+ rc = sqlite3_prepare_multi(db, "SELECT 1", m);
+ affirm( SQLITE_ERROR==rc );
+ affirm( sqlite3_errmsg(db).indexOf(toss.value)>0 );
sqlite3_close_v2(db);
}
/* Copy/paste/rename this to add new tests. */
private void _testTemplate(){
@@ -1696,11 +1908,11 @@
affirm( null!=mlist );
if( shuffle ){
mlist = new ArrayList<>( testMethods.subList(0, testMethods.size()) );
java.util.Collections.shuffle(mlist);
}
- if( listRunTests ){
+ if( (!fromThread && listRunTests>0) || listRunTests>1 ){
synchronized(this.getClass()){
if( !fromThread ){
out("Initial test"," list: ");
for(java.lang.reflect.Method m : testMethods){
out(m.getName()+" ");
@@ -1757,13 +1969,16 @@
-shuffle: randomizes the order of most of the test functions.
-naps: sleep small random intervals between tests in order to add
some chaos for cross-thread contention.
+
-list-tests: outputs the list of tests being run, minus some
- which are hard-coded. This is noisy in multi-threaded mode.
+ which are hard-coded. In multi-threaded mode, use this twice to
+ to emit the list run by each thread (which may differ from the initial
+ list, in particular if -shuffle is used).
-fail: forces an exception to be thrown during the test run. Use
with -shuffle to make its appearance unpredictable.
-v: emit some developer-mode info at the end.
@@ -1788,11 +2003,11 @@
}else if(arg.equals("r") || arg.equals("repeat")){
nRepeat = Integer.parseInt(args[i++]);
}else if(arg.equals("shuffle")){
shuffle = true;
}else if(arg.equals("list-tests")){
- listRunTests = true;
+ ++listRunTests;
}else if(arg.equals("fail")){
forceFail = true;
}else if(arg.equals("sqllog")){
sqlLog = true;
}else if(arg.equals("configlog")){
@@ -1807,11 +2022,11 @@
}
}
if( sqlLog ){
if( sqlite3_compileoption_used("ENABLE_SQLLOG") ){
- final ConfigSqllogCallback log = new ConfigSqllogCallback() {
+ final ConfigSqlLogCallback log = new ConfigSqlLogCallback() {
@Override public void call(sqlite3 db, String msg, int op){
switch(op){
case 0: outln("Opening db: ",db); break;
case 1: outln("SQL ",db,": ",msg); break;
case 2: outln("Closing db: ",db); break;
@@ -1818,11 +2033,11 @@
}
}
};
int rc = sqlite3_config( log );
affirm( 0==rc );
- rc = sqlite3_config( (ConfigSqllogCallback)null );
+ rc = sqlite3_config( (ConfigSqlLogCallback)null );
affirm( 0==rc );
rc = sqlite3_config( log );
affirm( 0==rc );
}else{
outln("WARNING: -sqllog is not active because library was built ",
@@ -1856,22 +2071,24 @@
final String name = m.getName();
if( name.equals("testFail") ){
if( forceFail ){
testMethods.add(m);
}
+ }else if( m.isAnnotationPresent( RequiresJniNio.class )
+ && !sqlite3_jni_supports_nio() ){
+ outln("Skipping test for lack of JNI java.nio.ByteBuffer support: ",
+ name,"()\n");
+ ++nSkipped;
}else if( !m.isAnnotationPresent( ManualTest.class ) ){
if( nThread>1 && m.isAnnotationPresent( SingleThreadOnly.class ) ){
- if( 0==nSkipped++ ){
- out("Skipping tests in multi-thread mode:");
- }
- out(" "+name+"()");
+ out("Skipping test in multi-thread mode: ",name,"()\n");
+ ++nSkipped;
}else if( name.startsWith("test") ){
testMethods.add(m);
}
}
}
- if( nSkipped>0 ) out("\n");
}
final long timeStart = System.currentTimeMillis();
int nLoop = 0;
switch( sqlite3_threadsafe() ){ /* Sanity checking */
@@ -1899,10 +2116,11 @@
}
outln("libversion_number: ",
sqlite3_libversion_number(),"\n",
sqlite3_libversion(),"\n",SQLITE_SOURCE_ID,"\n",
"SQLITE_THREADSAFE=",sqlite3_threadsafe());
+ outln("JVM NIO support? ",sqlite3_jni_supports_nio() ? "YES" : "NO");
final boolean showLoopCount = (nRepeat>1 && nThread>1);
if( showLoopCount ){
outln("Running ",nRepeat," loop(s) with ",nThread," thread(s) each.");
}
if( takeNaps ) outln("Napping between tests is enabled.");
Index: ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java
+++ ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java
@@ -17,9 +17,10 @@
Callback for use with {@link CApi#sqlite3_update_hook}.
*/
public interface UpdateHookCallback extends CallbackProxy {
/**
Must function as described for the C-level sqlite3_update_hook()
- callback.
+ callback. If it throws, the exception is translated into
+ a db-level error.
*/
void call(int opId, String dbName, String tableName, long rowId);
}
Index: ext/jni/src/org/sqlite/jni/capi/ValueHolder.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/ValueHolder.java
+++ ext/jni/src/org/sqlite/jni/capi/ValueHolder.java
@@ -7,18 +7,20 @@
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file contains a set of tests for the sqlite3 JNI bindings.
+** This file contains the ValueHolder utility class for the sqlite3
+** JNI bindings.
*/
package org.sqlite.jni.capi;
/**
A helper class which simply holds a single value. Its primary use
is for communicating values out of anonymous classes, as doing so
- requires a "final" reference.
+ requires a "final" reference, as well as communicating aggregate
+ SQL function state across calls to such functions.
*/
public class ValueHolder {
public T value;
public ValueHolder(){}
public ValueHolder(T v){value = v;}
Index: ext/jni/src/org/sqlite/jni/capi/sqlite3.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/sqlite3.java
+++ ext/jni/src/org/sqlite/jni/capi/sqlite3.java
@@ -36,8 +36,8 @@
+"["+((null == fn) ? "" : fn)+"]"
;
}
@Override public void close(){
- CApi.sqlite3_close_v2(this.clearNativePointer());
+ CApi.sqlite3_close_v2(this);
}
}
Index: ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java
+++ ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java
@@ -23,9 +23,8 @@
implements AutoCloseable {
// Only invoked from JNI.
private sqlite3_blob(){}
@Override public void close(){
- CApi.sqlite3_blob_close(this.clearNativePointer());
+ CApi.sqlite3_blob_close(this);
}
-
}
Index: ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java
==================================================================
--- ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java
+++ ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java
@@ -23,8 +23,8 @@
implements AutoCloseable {
// Only invoked from JNI.
private sqlite3_stmt(){}
@Override public void close(){
- CApi.sqlite3_finalize(this.clearNativePointer());
+ CApi.sqlite3_finalize(this);
}
}
Index: ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java
==================================================================
--- ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java
+++ ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java
@@ -623,14 +623,24 @@
);
do_execsql_test(db,
"SELECT fts5_columntext(ft, 1) FROM ft('x') ORDER BY rowid",
"[x, x, x y z, x z, x y z, x]"
);
- do_execsql_test(db,
- "SELECT fts5_columntext(ft, 2) FROM ft('x') ORDER BY rowid",
- "[null, null, null, null, null, null]"
- );
+ boolean threw = false;
+ try{
+ /* columntext() used to return NULLs when given an out-of bounds column
+ but now results in a range error. */
+ do_execsql_test(db,
+ "SELECT fts5_columntext(ft, 2) FROM ft('x') ORDER BY rowid",
+ "[null, null, null, null, null, null]"
+ );
+ }catch(Exception e){
+ threw = true;
+ affirm( e.getMessage().matches(".*column index out of range") );
+ }
+ affirm( threw );
+ threw = false;
/* Test fts5_columntotalsize() */
do_execsql_test(db,
"SELECT fts5_columntotalsize(ft, 0) FROM ft('x') ORDER BY rowid",
"[12, 12, 12, 12, 12, 12]"
Index: ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java
@@ -10,14 +10,10 @@
**
*************************************************************************
** This file is part of the wrapper1 interface for sqlite3.
*/
package org.sqlite.jni.wrapper1;
-import org.sqlite.jni.capi.CApi;
-import org.sqlite.jni.annotation.*;
-import org.sqlite.jni.capi.sqlite3_context;
-import org.sqlite.jni.capi.sqlite3_value;
/**
EXPERIMENTAL/INCOMPLETE/UNTESTED
A SqlFunction implementation for aggregate functions. The T type
@@ -48,14 +44,80 @@
/**
Optionally override to be notified when the UDF is finalized by
SQLite.
*/
public void xDestroy() {}
+
+ /**
+ PerContextState assists aggregate and window functions in
+ managing their accumulator state across calls to the UDF's
+ callbacks.
+
+
T must be of a type which can be legally stored as a value in
+ java.util.HashMap.
+
+
If a given aggregate or window function is called multiple times
+ in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)...,
+ then the clients need some way of knowing which call is which so
+ that they can map their state between their various UDF callbacks
+ and reset it via xFinal(). This class takes care of such
+ mappings.
+
+
This class works by mapping
+ sqlite3_context.getAggregateContext() to a single piece of
+ state, of a client-defined type (the T part of this class), which
+ persists across a "matching set" of the UDF's callbacks.
+
+
This class is a helper providing commonly-needed functionality
+ - it is not required for use with aggregate or window functions.
+ Client UDFs are free to perform such mappings using custom
+ approaches. The provided {@link AggregateFunction} and {@link
+ WindowFunction} classes use this.
+ */
+ public static final class PerContextState {
+ private final java.util.Map> map
+ = new java.util.HashMap<>();
+
+ /**
+ Should be called from a UDF's xStep(), xValue(), and xInverse()
+ methods, passing it that method's first argument and an initial
+ value for the persistent state. If there is currently no
+ mapping for the given context within the map, one is created
+ using the given initial value, else the existing one is used
+ and the 2nd argument is ignored. It returns a ValueHolder
+ which can be used to modify that state directly without
+ requiring that the client update the underlying map's entry.
+
+
The caller is obligated to eventually call
+ takeAggregateState() to clear the mapping.
+ */
+ public ValueHolder getAggregateState(SqlFunction.Arguments args, T initialValue){
+ final Long key = args.getContext().getAggregateContext(true);
+ ValueHolder rc = null==key ? null : map.get(key);
+ if( null==rc ){
+ map.put(key, rc = new ValueHolder<>(initialValue));
+ }
+ return rc;
+ }
+
+ /**
+ Should be called from a UDF's xFinal() method and passed that
+ method's first argument. This function removes the value
+ associated with with the arguments' aggregate context from the
+ map and returns it, returning null if no other UDF method has
+ been called to set up such a mapping. The latter condition will
+ be the case if a UDF is used in a statement which has no result
+ rows.
+ */
+ public T takeAggregateState(SqlFunction.Arguments args){
+ final ValueHolder h = map.remove(args.getContext().getAggregateContext(false));
+ return null==h ? null : h.value;
+ }
+ }
/** Per-invocation state for the UDF. */
- private final SqlFunction.PerContextState map =
- new SqlFunction.PerContextState<>();
+ private final PerContextState map = new PerContextState<>();
/**
To be called from the implementation's xStep() method, as well
as the xValue() and xInverse() methods of the {@link WindowFunction}
subclass, to fetch the current per-call UDF state. On the
Index: ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java
@@ -20,10 +20,18 @@
Base marker interface for SQLite's three types of User-Defined SQL
Functions (UDFs): Scalar, Aggregate, and Window functions.
*/
public interface SqlFunction {
+ public static final int DETERMINISTIC = CApi.SQLITE_DETERMINISTIC;
+ public static final int INNOCUOUS = CApi.SQLITE_INNOCUOUS;
+ public static final int DIRECTONLY = CApi.SQLITE_DIRECTONLY;
+ public static final int SUBTYPE = CApi.SQLITE_SUBTYPE;
+ public static final int RESULT_SUBTYPE = CApi.SQLITE_RESULT_SUBTYPE;
+ public static final int UTF8 = CApi.SQLITE_UTF8;
+ public static final int UTF16 = CApi.SQLITE_UTF16;
+
/**
The Arguments type is an abstraction on top of the lower-level
UDF function argument types. It provides _most_ of the functionality
of the lower-level interface, insofar as possible without "leaking"
those types into this API.
@@ -43,16 +51,122 @@
Passing null for the args is equivalent to passing a length-0
array.
*/
Arguments(sqlite3_context cx, sqlite3_value args[]){
this.cx = cx;
- this.args = args==null ? new sqlite3_value[0] : args;;
+ this.args = args==null ? new sqlite3_value[0] : args;
this.length = this.args.length;
}
/**
- Wrapper for a single SqlFunction argument. Primarily intended
+ Returns the sqlite3_value at the given argument index or throws
+ an IllegalArgumentException exception if ndx is out of range.
+ */
+ private sqlite3_value valueAt(int ndx){
+ if(ndx<0 || ndx>=args.length){
+ throw new IllegalArgumentException(
+ "SQL function argument index "+ndx+" is out of range."
+ );
+ }
+ return args[ndx];
+ }
+
+ //! Returns the underlying sqlite3_context for these arguments.
+ sqlite3_context getContext(){return cx;}
+
+ /**
+ Returns the Sqlite (db) object associated with this UDF call,
+ or null if the UDF is somehow called without such an object or
+ the db has been closed in an untimely manner (e.g. closed by a
+ UDF call).
+ */
+ public Sqlite getDb(){
+ return Sqlite.fromNative( CApi.sqlite3_context_db_handle(cx) );
+ }
+
+ public int getArgCount(){ return args.length; }
+
+ public int getInt(int argNdx){return CApi.sqlite3_value_int(valueAt(argNdx));}
+ public long getInt64(int argNdx){return CApi.sqlite3_value_int64(valueAt(argNdx));}
+ public double getDouble(int argNdx){return CApi.sqlite3_value_double(valueAt(argNdx));}
+ public byte[] getBlob(int argNdx){return CApi.sqlite3_value_blob(valueAt(argNdx));}
+ public byte[] getText(int argNdx){return CApi.sqlite3_value_text(valueAt(argNdx));}
+ public String getText16(int argNdx){return CApi.sqlite3_value_text16(valueAt(argNdx));}
+ public int getBytes(int argNdx){return CApi.sqlite3_value_bytes(valueAt(argNdx));}
+ public int getBytes16(int argNdx){return CApi.sqlite3_value_bytes16(valueAt(argNdx));}
+ public Object getObject(int argNdx){return CApi.sqlite3_value_java_object(valueAt(argNdx));}
+ public T getObject(int argNdx, Class type){
+ return CApi.sqlite3_value_java_object(valueAt(argNdx), type);
+ }
+
+ public int getType(int argNdx){return CApi.sqlite3_value_type(valueAt(argNdx));}
+ public int getSubtype(int argNdx){return CApi.sqlite3_value_subtype(valueAt(argNdx));}
+ public int getNumericType(int argNdx){return CApi.sqlite3_value_numeric_type(valueAt(argNdx));}
+ public int getNoChange(int argNdx){return CApi.sqlite3_value_nochange(valueAt(argNdx));}
+ public boolean getFromBind(int argNdx){return CApi.sqlite3_value_frombind(valueAt(argNdx));}
+ public int getEncoding(int argNdx){return CApi.sqlite3_value_encoding(valueAt(argNdx));}
+
+ public void resultInt(int v){ CApi.sqlite3_result_int(cx, v); }
+ public void resultInt64(long v){ CApi.sqlite3_result_int64(cx, v); }
+ public void resultDouble(double v){ CApi.sqlite3_result_double(cx, v); }
+ public void resultError(String msg){CApi.sqlite3_result_error(cx, msg);}
+ public void resultError(Exception e){CApi.sqlite3_result_error(cx, e);}
+ public void resultErrorTooBig(){CApi.sqlite3_result_error_toobig(cx);}
+ public void resultErrorCode(int rc){CApi.sqlite3_result_error_code(cx, rc);}
+ public void resultObject(Object o){CApi.sqlite3_result_java_object(cx, o);}
+ public void resultNull(){CApi.sqlite3_result_null(cx);}
+ /**
+ Analog to sqlite3_result_value(), using the Value object at the
+ given argument index.
+ */
+ public void resultArg(int argNdx){CApi.sqlite3_result_value(cx, valueAt(argNdx));}
+ public void resultSubtype(int subtype){CApi.sqlite3_result_subtype(cx, subtype);}
+ public void resultZeroBlob(long n){
+ // Throw on error? If n is too big,
+ // sqlite3_result_error_toobig() is automatically called.
+ CApi.sqlite3_result_zeroblob64(cx, n);
+ }
+
+ public void resultBlob(byte[] blob){CApi.sqlite3_result_blob(cx, blob);}
+ public void resultText(byte[] utf8){CApi.sqlite3_result_text(cx, utf8);}
+ public void resultText(String txt){CApi.sqlite3_result_text(cx, txt);}
+ public void resultText16(byte[] utf16){CApi.sqlite3_result_text16(cx, utf16);}
+ public void resultText16(String txt){CApi.sqlite3_result_text16(cx, txt);}
+
+ /**
+ Callbacks should invoke this on OOM errors, instead of throwing
+ OutOfMemoryError, because the latter cannot be propagated
+ through the C API.
+ */
+ public void resultNoMem(){CApi.sqlite3_result_error_nomem(cx);}
+
+ /**
+ Analog to sqlite3_set_auxdata() but throws if argNdx is out of
+ range.
+ */
+ public void setAuxData(int argNdx, Object o){
+ /* From the API docs: https://www.sqlite.org/c3ref/get_auxdata.html
+
+ The value of the N parameter to these interfaces should be
+ non-negative. Future enhancements may make use of negative N
+ values to define new kinds of function caching behavior.
+ */
+ valueAt(argNdx);
+ CApi.sqlite3_set_auxdata(cx, argNdx, o);
+ }
+
+ /**
+ Analog to sqlite3_get_auxdata() but throws if argNdx is out of
+ range.
+ */
+ public Object getAuxData(int argNdx){
+ valueAt(argNdx);
+ return CApi.sqlite3_get_auxdata(cx, argNdx);
+ }
+
+ /**
+ Represents a single SqlFunction argument. Primarily intended
for use with the Arguments class's Iterable interface.
*/
public final static class Arg {
private final Arguments a;
private final int ndx;
@@ -70,11 +184,11 @@
public byte[] getText(){return a.getText(ndx);}
public String getText16(){return a.getText16(ndx);}
public int getBytes(){return a.getBytes(ndx);}
public int getBytes16(){return a.getBytes16(ndx);}
public Object getObject(){return a.getObject(ndx);}
- public T getObjectCasted(Class type){ return a.getObjectCasted(ndx, type); }
+ public T getObject(Class type){ return a.getObject(ndx, type); }
public int getType(){return a.getType(ndx);}
public Object getAuxData(){return a.getAuxData(ndx);}
public void setAuxData(Object o){a.setAuxData(ndx, o);}
}
@@ -85,159 +199,18 @@
proxies[i] = new Arg(this, i);
}
return java.util.Arrays.stream(proxies).iterator();
}
- /**
- Returns the sqlite3_value at the given argument index or throws
- an IllegalArgumentException exception if ndx is out of range.
- */
- private sqlite3_value valueAt(int ndx){
- if(ndx<0 || ndx>=args.length){
- throw new IllegalArgumentException(
- "SQL function argument index "+ndx+" is out of range."
- );
- }
- return args[ndx];
- }
-
- sqlite3_context getContext(){return cx;}
-
- public int getArgCount(){ return args.length; }
-
- public int getInt(int arg){return CApi.sqlite3_value_int(valueAt(arg));}
- public long getInt64(int arg){return CApi.sqlite3_value_int64(valueAt(arg));}
- public double getDouble(int arg){return CApi.sqlite3_value_double(valueAt(arg));}
- public byte[] getBlob(int arg){return CApi.sqlite3_value_blob(valueAt(arg));}
- public byte[] getText(int arg){return CApi.sqlite3_value_text(valueAt(arg));}
- public String getText16(int arg){return CApi.sqlite3_value_text16(valueAt(arg));}
- public int getBytes(int arg){return CApi.sqlite3_value_bytes(valueAt(arg));}
- public int getBytes16(int arg){return CApi.sqlite3_value_bytes16(valueAt(arg));}
- public Object getObject(int arg){return CApi.sqlite3_value_java_object(valueAt(arg));}
- public T getObjectCasted(int arg, Class type){
- return CApi.sqlite3_value_java_casted(valueAt(arg), type);
- }
-
- public int getType(int arg){return CApi.sqlite3_value_type(valueAt(arg));}
- public int getSubtype(int arg){return CApi.sqlite3_value_subtype(valueAt(arg));}
- public int getNumericType(int arg){return CApi.sqlite3_value_numeric_type(valueAt(arg));}
- public int getNoChange(int arg){return CApi.sqlite3_value_nochange(valueAt(arg));}
- public boolean getFromBind(int arg){return CApi.sqlite3_value_frombind(valueAt(arg));}
- public int getEncoding(int arg){return CApi.sqlite3_value_encoding(valueAt(arg));}
-
- public void resultInt(int v){ CApi.sqlite3_result_int(cx, v); }
- public void resultInt64(long v){ CApi.sqlite3_result_int64(cx, v); }
- public void resultDouble(double v){ CApi.sqlite3_result_double(cx, v); }
- public void resultError(String msg){CApi.sqlite3_result_error(cx, msg);}
- public void resultError(Exception e){CApi.sqlite3_result_error(cx, e);}
- public void resultErrorTooBig(){CApi.sqlite3_result_error_toobig(cx);}
- public void resultErrorCode(int rc){CApi.sqlite3_result_error_code(cx, rc);}
- public void resultObject(Object o){CApi.sqlite3_result_java_object(cx, o);}
- public void resultNull(){CApi.sqlite3_result_null(cx);}
- public void resultArg(int argNdx){CApi.sqlite3_result_value(cx, valueAt(argNdx));}
- public void resultZeroBlob(long n){
- // Throw on error? If n is too big,
- // sqlite3_result_error_toobig() is automatically called.
- CApi.sqlite3_result_zeroblob64(cx, n);
- }
-
- public void resultBlob(byte[] blob){CApi.sqlite3_result_blob(cx, blob);}
- public void resultText(byte[] utf8){CApi.sqlite3_result_text(cx, utf8);}
- public void resultText(String txt){CApi.sqlite3_result_text(cx, txt);}
- public void resultText16(byte[] utf16){CApi.sqlite3_result_text16(cx, utf16);}
- public void resultText16(String txt){CApi.sqlite3_result_text16(cx, txt);}
-
- public void setAuxData(int arg, Object o){
- /* From the API docs: https://www.sqlite.org/c3ref/get_auxdata.html
-
- The value of the N parameter to these interfaces should be
- non-negative. Future enhancements may make use of negative N
- values to define new kinds of function caching behavior.
- */
- valueAt(arg);
- CApi.sqlite3_set_auxdata(cx, arg, o);
- }
-
- public Object getAuxData(int arg){
- valueAt(arg);
- return CApi.sqlite3_get_auxdata(cx, arg);
- }
- }
-
- /**
- PerContextState assists aggregate and window functions in
- managing their accumulator state across calls to the UDF's
- callbacks.
-
-
T must be of a type which can be legally stored as a value in
- java.util.HashMap.
-
-
If a given aggregate or window function is called multiple times
- in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)...,
- then the clients need some way of knowing which call is which so
- that they can map their state between their various UDF callbacks
- and reset it via xFinal(). This class takes care of such
- mappings.
-
-
This class works by mapping
- sqlite3_context.getAggregateContext() to a single piece of
- state, of a client-defined type (the T part of this class), which
- persists across a "matching set" of the UDF's callbacks.
-
-
This class is a helper providing commonly-needed functionality
- - it is not required for use with aggregate or window functions.
- Client UDFs are free to perform such mappings using custom
- approaches. The provided {@link AggregateFunction} and {@link
- WindowFunction} classes use this.
- */
- public static final class PerContextState {
- private final java.util.Map> map
- = new java.util.HashMap<>();
-
- /**
- Should be called from a UDF's xStep(), xValue(), and xInverse()
- methods, passing it that method's first argument and an initial
- value for the persistent state. If there is currently no
- mapping for the given context within the map, one is created
- using the given initial value, else the existing one is used
- and the 2nd argument is ignored. It returns a ValueHolder
- which can be used to modify that state directly without
- requiring that the client update the underlying map's entry.
-
-
The caller is obligated to eventually call
- takeAggregateState() to clear the mapping.
- */
- public ValueHolder getAggregateState(SqlFunction.Arguments args, T initialValue){
- final Long key = args.getContext().getAggregateContext(true);
- ValueHolder rc = null==key ? null : map.get(key);
- if( null==rc ){
- map.put(key, rc = new ValueHolder<>(initialValue));
- }
- return rc;
- }
-
- /**
- Should be called from a UDF's xFinal() method and passed that
- method's first argument. This function removes the value
- associated with with the arguments' aggregate context from the
- map and returns it, returning null if no other UDF method has
- been called to set up such a mapping. The latter condition will
- be the case if a UDF is used in a statement which has no result
- rows.
- */
- public T takeAggregateState(SqlFunction.Arguments args){
- final ValueHolder h = map.remove(args.getContext().getAggregateContext(false));
- return null==h ? null : h.value;
- }
}
/**
Internal-use adapter for wrapping this package's ScalarFunction
for use with the org.sqlite.jni.capi.ScalarFunction interface.
*/
static final class ScalarAdapter extends org.sqlite.jni.capi.ScalarFunction {
- final ScalarFunction impl;
+ private final ScalarFunction impl;
ScalarAdapter(ScalarFunction impl){
this.impl = impl;
}
/**
Proxies this.impl.xFunc(), adapting the call arguments to that
@@ -259,12 +232,13 @@
/**
Internal-use adapter for wrapping this package's AggregateFunction
for use with the org.sqlite.jni.capi.AggregateFunction interface.
*/
- static final class AggregateAdapter extends org.sqlite.jni.capi.AggregateFunction {
- final AggregateFunction impl;
+ static /*cannot be final without duplicating the whole body in WindowAdapter*/
+ class AggregateAdapter extends org.sqlite.jni.capi.AggregateFunction {
+ private final AggregateFunction impl;
AggregateAdapter(AggregateFunction impl){
this.impl = impl;
}
/**
@@ -280,22 +254,65 @@
CApi.sqlite3_result_error(cx, e);
}
}
/**
- As for the xFinal() argument of the C API's sqlite3_create_function().
- If the proxied function throws, it is translated into a sqlite3_result_error().
+ As for the xFinal() argument of the C API's
+ sqlite3_create_function(). If the proxied function throws, it
+ is translated into a sqlite3_result_error().
*/
public void xFinal(sqlite3_context cx){
try{
impl.xFinal( new SqlFunction.Arguments(cx, null) );
}catch(Exception e){
CApi.sqlite3_result_error(cx, e);
}
}
+
+ public void xDestroy(){
+ impl.xDestroy();
+ }
+ }
+
+ /**
+ Internal-use adapter for wrapping this package's WindowFunction
+ for use with the org.sqlite.jni.capi.WindowFunction interface.
+ */
+ static final class WindowAdapter extends AggregateAdapter {
+ private final WindowFunction impl;
+ WindowAdapter(WindowFunction impl){
+ super(impl);
+ this.impl = impl;
+ }
+
+ /**
+ Proxies this.impl.xInverse(), adapting the call arguments to that
+ function's signature. If the proxied function throws, it is
+ translated to sqlite_result_error() with the exception's
+ message.
+ */
+ public void xInverse(sqlite3_context cx, sqlite3_value[] args){
+ try{
+ impl.xInverse( new SqlFunction.Arguments(cx, args) );
+ }catch(Exception e){
+ CApi.sqlite3_result_error(cx, e);
+ }
+ }
+
+ /**
+ As for the xValue() argument of the C API's sqlite3_create_window_function().
+ If the proxied function throws, it is translated into a sqlite3_result_error().
+ */
+ public void xValue(sqlite3_context cx){
+ try{
+ impl.xValue( new SqlFunction.Arguments(cx, null) );
+ }catch(Exception e){
+ CApi.sqlite3_result_error(cx, e);
+ }
+ }
public void xDestroy(){
impl.xDestroy();
}
}
}
Index: ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java
@@ -11,15 +11,17 @@
*************************************************************************
** This file is part of the wrapper1 interface for sqlite3.
*/
package org.sqlite.jni.wrapper1;
import java.nio.charset.StandardCharsets;
-import static org.sqlite.jni.capi.CApi.*;
import org.sqlite.jni.capi.CApi;
import org.sqlite.jni.capi.sqlite3;
import org.sqlite.jni.capi.sqlite3_stmt;
+import org.sqlite.jni.capi.sqlite3_backup;
+import org.sqlite.jni.capi.sqlite3_blob;
import org.sqlite.jni.capi.OutputPointer;
+import java.nio.ByteBuffer;
/**
This class represents a database connection, analog to the C-side
sqlite3 class but with added argument validation, exceptions, and
similar "smoothing of sharp edges" to make the API safe to use from
@@ -26,15 +28,299 @@
Java. It also acts as a namespace for other types for which
individual instances are tied to a specific database connection.
*/
public final class Sqlite implements AutoCloseable {
private sqlite3 db;
+ private static final boolean JNI_SUPPORTS_NIO =
+ CApi.sqlite3_jni_supports_nio();
+
+ // Result codes
+ public static final int OK = CApi.SQLITE_OK;
+ public static final int ERROR = CApi.SQLITE_ERROR;
+ public static final int INTERNAL = CApi.SQLITE_INTERNAL;
+ public static final int PERM = CApi.SQLITE_PERM;
+ public static final int ABORT = CApi.SQLITE_ABORT;
+ public static final int BUSY = CApi.SQLITE_BUSY;
+ public static final int LOCKED = CApi.SQLITE_LOCKED;
+ public static final int NOMEM = CApi.SQLITE_NOMEM;
+ public static final int READONLY = CApi.SQLITE_READONLY;
+ public static final int INTERRUPT = CApi.SQLITE_INTERRUPT;
+ public static final int IOERR = CApi.SQLITE_IOERR;
+ public static final int CORRUPT = CApi.SQLITE_CORRUPT;
+ public static final int NOTFOUND = CApi.SQLITE_NOTFOUND;
+ public static final int FULL = CApi.SQLITE_FULL;
+ public static final int CANTOPEN = CApi.SQLITE_CANTOPEN;
+ public static final int PROTOCOL = CApi.SQLITE_PROTOCOL;
+ public static final int EMPTY = CApi.SQLITE_EMPTY;
+ public static final int SCHEMA = CApi.SQLITE_SCHEMA;
+ public static final int TOOBIG = CApi.SQLITE_TOOBIG;
+ public static final int CONSTRAINT = CApi. SQLITE_CONSTRAINT;
+ public static final int MISMATCH = CApi.SQLITE_MISMATCH;
+ public static final int MISUSE = CApi.SQLITE_MISUSE;
+ public static final int NOLFS = CApi.SQLITE_NOLFS;
+ public static final int AUTH = CApi.SQLITE_AUTH;
+ public static final int FORMAT = CApi.SQLITE_FORMAT;
+ public static final int RANGE = CApi.SQLITE_RANGE;
+ public static final int NOTADB = CApi.SQLITE_NOTADB;
+ public static final int NOTICE = CApi.SQLITE_NOTICE;
+ public static final int WARNING = CApi.SQLITE_WARNING;
+ public static final int ROW = CApi.SQLITE_ROW;
+ public static final int DONE = CApi.SQLITE_DONE;
+ public static final int ERROR_MISSING_COLLSEQ = CApi.SQLITE_ERROR_MISSING_COLLSEQ;
+ public static final int ERROR_RETRY = CApi.SQLITE_ERROR_RETRY;
+ public static final int ERROR_SNAPSHOT = CApi.SQLITE_ERROR_SNAPSHOT;
+ public static final int IOERR_READ = CApi.SQLITE_IOERR_READ;
+ public static final int IOERR_SHORT_READ = CApi.SQLITE_IOERR_SHORT_READ;
+ public static final int IOERR_WRITE = CApi.SQLITE_IOERR_WRITE;
+ public static final int IOERR_FSYNC = CApi.SQLITE_IOERR_FSYNC;
+ public static final int IOERR_DIR_FSYNC = CApi.SQLITE_IOERR_DIR_FSYNC;
+ public static final int IOERR_TRUNCATE = CApi.SQLITE_IOERR_TRUNCATE;
+ public static final int IOERR_FSTAT = CApi.SQLITE_IOERR_FSTAT;
+ public static final int IOERR_UNLOCK = CApi.SQLITE_IOERR_UNLOCK;
+ public static final int IOERR_RDLOCK = CApi.SQLITE_IOERR_RDLOCK;
+ public static final int IOERR_DELETE = CApi.SQLITE_IOERR_DELETE;
+ public static final int IOERR_BLOCKED = CApi.SQLITE_IOERR_BLOCKED;
+ public static final int IOERR_NOMEM = CApi.SQLITE_IOERR_NOMEM;
+ public static final int IOERR_ACCESS = CApi.SQLITE_IOERR_ACCESS;
+ public static final int IOERR_CHECKRESERVEDLOCK = CApi.SQLITE_IOERR_CHECKRESERVEDLOCK;
+ public static final int IOERR_LOCK = CApi.SQLITE_IOERR_LOCK;
+ public static final int IOERR_CLOSE = CApi.SQLITE_IOERR_CLOSE;
+ public static final int IOERR_DIR_CLOSE = CApi.SQLITE_IOERR_DIR_CLOSE;
+ public static final int IOERR_SHMOPEN = CApi.SQLITE_IOERR_SHMOPEN;
+ public static final int IOERR_SHMSIZE = CApi.SQLITE_IOERR_SHMSIZE;
+ public static final int IOERR_SHMLOCK = CApi.SQLITE_IOERR_SHMLOCK;
+ public static final int IOERR_SHMMAP = CApi.SQLITE_IOERR_SHMMAP;
+ public static final int IOERR_SEEK = CApi.SQLITE_IOERR_SEEK;
+ public static final int IOERR_DELETE_NOENT = CApi.SQLITE_IOERR_DELETE_NOENT;
+ public static final int IOERR_MMAP = CApi.SQLITE_IOERR_MMAP;
+ public static final int IOERR_GETTEMPPATH = CApi.SQLITE_IOERR_GETTEMPPATH;
+ public static final int IOERR_CONVPATH = CApi.SQLITE_IOERR_CONVPATH;
+ public static final int IOERR_VNODE = CApi.SQLITE_IOERR_VNODE;
+ public static final int IOERR_AUTH = CApi.SQLITE_IOERR_AUTH;
+ public static final int IOERR_BEGIN_ATOMIC = CApi.SQLITE_IOERR_BEGIN_ATOMIC;
+ public static final int IOERR_COMMIT_ATOMIC = CApi.SQLITE_IOERR_COMMIT_ATOMIC;
+ public static final int IOERR_ROLLBACK_ATOMIC = CApi.SQLITE_IOERR_ROLLBACK_ATOMIC;
+ public static final int IOERR_DATA = CApi.SQLITE_IOERR_DATA;
+ public static final int IOERR_CORRUPTFS = CApi.SQLITE_IOERR_CORRUPTFS;
+ public static final int LOCKED_SHAREDCACHE = CApi.SQLITE_LOCKED_SHAREDCACHE;
+ public static final int LOCKED_VTAB = CApi.SQLITE_LOCKED_VTAB;
+ public static final int BUSY_RECOVERY = CApi.SQLITE_BUSY_RECOVERY;
+ public static final int BUSY_SNAPSHOT = CApi.SQLITE_BUSY_SNAPSHOT;
+ public static final int BUSY_TIMEOUT = CApi.SQLITE_BUSY_TIMEOUT;
+ public static final int CANTOPEN_NOTEMPDIR = CApi.SQLITE_CANTOPEN_NOTEMPDIR;
+ public static final int CANTOPEN_ISDIR = CApi.SQLITE_CANTOPEN_ISDIR;
+ public static final int CANTOPEN_FULLPATH = CApi.SQLITE_CANTOPEN_FULLPATH;
+ public static final int CANTOPEN_CONVPATH = CApi.SQLITE_CANTOPEN_CONVPATH;
+ public static final int CANTOPEN_SYMLINK = CApi.SQLITE_CANTOPEN_SYMLINK;
+ public static final int CORRUPT_VTAB = CApi.SQLITE_CORRUPT_VTAB;
+ public static final int CORRUPT_SEQUENCE = CApi.SQLITE_CORRUPT_SEQUENCE;
+ public static final int CORRUPT_INDEX = CApi.SQLITE_CORRUPT_INDEX;
+ public static final int READONLY_RECOVERY = CApi.SQLITE_READONLY_RECOVERY;
+ public static final int READONLY_CANTLOCK = CApi.SQLITE_READONLY_CANTLOCK;
+ public static final int READONLY_ROLLBACK = CApi.SQLITE_READONLY_ROLLBACK;
+ public static final int READONLY_DBMOVED = CApi.SQLITE_READONLY_DBMOVED;
+ public static final int READONLY_CANTINIT = CApi.SQLITE_READONLY_CANTINIT;
+ public static final int READONLY_DIRECTORY = CApi.SQLITE_READONLY_DIRECTORY;
+ public static final int ABORT_ROLLBACK = CApi.SQLITE_ABORT_ROLLBACK;
+ public static final int CONSTRAINT_CHECK = CApi.SQLITE_CONSTRAINT_CHECK;
+ public static final int CONSTRAINT_COMMITHOOK = CApi.SQLITE_CONSTRAINT_COMMITHOOK;
+ public static final int CONSTRAINT_FOREIGNKEY = CApi.SQLITE_CONSTRAINT_FOREIGNKEY;
+ public static final int CONSTRAINT_FUNCTION = CApi.SQLITE_CONSTRAINT_FUNCTION;
+ public static final int CONSTRAINT_NOTNULL = CApi.SQLITE_CONSTRAINT_NOTNULL;
+ public static final int CONSTRAINT_PRIMARYKEY = CApi.SQLITE_CONSTRAINT_PRIMARYKEY;
+ public static final int CONSTRAINT_TRIGGER = CApi.SQLITE_CONSTRAINT_TRIGGER;
+ public static final int CONSTRAINT_UNIQUE = CApi.SQLITE_CONSTRAINT_UNIQUE;
+ public static final int CONSTRAINT_VTAB = CApi.SQLITE_CONSTRAINT_VTAB;
+ public static final int CONSTRAINT_ROWID = CApi.SQLITE_CONSTRAINT_ROWID;
+ public static final int CONSTRAINT_PINNED = CApi.SQLITE_CONSTRAINT_PINNED;
+ public static final int CONSTRAINT_DATATYPE = CApi.SQLITE_CONSTRAINT_DATATYPE;
+ public static final int NOTICE_RECOVER_WAL = CApi.SQLITE_NOTICE_RECOVER_WAL;
+ public static final int NOTICE_RECOVER_ROLLBACK = CApi.SQLITE_NOTICE_RECOVER_ROLLBACK;
+ public static final int WARNING_AUTOINDEX = CApi.SQLITE_WARNING_AUTOINDEX;
+ public static final int AUTH_USER = CApi.SQLITE_AUTH_USER;
+ public static final int OK_LOAD_PERMANENTLY = CApi.SQLITE_OK_LOAD_PERMANENTLY;
+
+ // sqlite3_open() flags
+ public static final int OPEN_READWRITE = CApi.SQLITE_OPEN_READWRITE;
+ public static final int OPEN_CREATE = CApi.SQLITE_OPEN_CREATE;
+ public static final int OPEN_EXRESCODE = CApi.SQLITE_OPEN_EXRESCODE;
+
+ // transaction state
+ public static final int TXN_NONE = CApi.SQLITE_TXN_NONE;
+ public static final int TXN_READ = CApi.SQLITE_TXN_READ;
+ public static final int TXN_WRITE = CApi.SQLITE_TXN_WRITE;
+
+ // sqlite3_status() ops
+ public static final int STATUS_MEMORY_USED = CApi.SQLITE_STATUS_MEMORY_USED;
+ public static final int STATUS_PAGECACHE_USED = CApi.SQLITE_STATUS_PAGECACHE_USED;
+ public static final int STATUS_PAGECACHE_OVERFLOW = CApi.SQLITE_STATUS_PAGECACHE_OVERFLOW;
+ public static final int STATUS_MALLOC_SIZE = CApi.SQLITE_STATUS_MALLOC_SIZE;
+ public static final int STATUS_PARSER_STACK = CApi.SQLITE_STATUS_PARSER_STACK;
+ public static final int STATUS_PAGECACHE_SIZE = CApi.SQLITE_STATUS_PAGECACHE_SIZE;
+ public static final int STATUS_MALLOC_COUNT = CApi.SQLITE_STATUS_MALLOC_COUNT;
+
+ // sqlite3_db_status() ops
+ public static final int DBSTATUS_LOOKASIDE_USED = CApi.SQLITE_DBSTATUS_LOOKASIDE_USED;
+ public static final int DBSTATUS_CACHE_USED = CApi.SQLITE_DBSTATUS_CACHE_USED;
+ public static final int DBSTATUS_SCHEMA_USED = CApi.SQLITE_DBSTATUS_SCHEMA_USED;
+ public static final int DBSTATUS_STMT_USED = CApi.SQLITE_DBSTATUS_STMT_USED;
+ public static final int DBSTATUS_LOOKASIDE_HIT = CApi.SQLITE_DBSTATUS_LOOKASIDE_HIT;
+ public static final int DBSTATUS_LOOKASIDE_MISS_SIZE = CApi.SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE;
+ public static final int DBSTATUS_LOOKASIDE_MISS_FULL = CApi.SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL;
+ public static final int DBSTATUS_CACHE_HIT = CApi.SQLITE_DBSTATUS_CACHE_HIT;
+ public static final int DBSTATUS_CACHE_MISS = CApi.SQLITE_DBSTATUS_CACHE_MISS;
+ public static final int DBSTATUS_CACHE_WRITE = CApi.SQLITE_DBSTATUS_CACHE_WRITE;
+ public static final int DBSTATUS_DEFERRED_FKS = CApi.SQLITE_DBSTATUS_DEFERRED_FKS;
+ public static final int DBSTATUS_CACHE_USED_SHARED = CApi.SQLITE_DBSTATUS_CACHE_USED_SHARED;
+ public static final int DBSTATUS_CACHE_SPILL = CApi.SQLITE_DBSTATUS_CACHE_SPILL;
+
+ // Limits
+ public static final int LIMIT_LENGTH = CApi.SQLITE_LIMIT_LENGTH;
+ public static final int LIMIT_SQL_LENGTH = CApi.SQLITE_LIMIT_SQL_LENGTH;
+ public static final int LIMIT_COLUMN = CApi.SQLITE_LIMIT_COLUMN;
+ public static final int LIMIT_EXPR_DEPTH = CApi.SQLITE_LIMIT_EXPR_DEPTH;
+ public static final int LIMIT_COMPOUND_SELECT = CApi.SQLITE_LIMIT_COMPOUND_SELECT;
+ public static final int LIMIT_VDBE_OP = CApi.SQLITE_LIMIT_VDBE_OP;
+ public static final int LIMIT_FUNCTION_ARG = CApi.SQLITE_LIMIT_FUNCTION_ARG;
+ public static final int LIMIT_ATTACHED = CApi.SQLITE_LIMIT_ATTACHED;
+ public static final int LIMIT_LIKE_PATTERN_LENGTH = CApi.SQLITE_LIMIT_LIKE_PATTERN_LENGTH;
+ public static final int LIMIT_VARIABLE_NUMBER = CApi.SQLITE_LIMIT_VARIABLE_NUMBER;
+ public static final int LIMIT_TRIGGER_DEPTH = CApi.SQLITE_LIMIT_TRIGGER_DEPTH;
+ public static final int LIMIT_WORKER_THREADS = CApi.SQLITE_LIMIT_WORKER_THREADS;
+
+ // sqlite3_prepare_v3() flags
+ public static final int PREPARE_PERSISTENT = CApi.SQLITE_PREPARE_PERSISTENT;
+ public static final int PREPARE_NO_VTAB = CApi.SQLITE_PREPARE_NO_VTAB;
+
+ // sqlite3_trace_v2() flags
+ public static final int TRACE_STMT = CApi.SQLITE_TRACE_STMT;
+ public static final int TRACE_PROFILE = CApi.SQLITE_TRACE_PROFILE;
+ public static final int TRACE_ROW = CApi.SQLITE_TRACE_ROW;
+ public static final int TRACE_CLOSE = CApi.SQLITE_TRACE_CLOSE;
+ public static final int TRACE_ALL = TRACE_STMT | TRACE_PROFILE | TRACE_ROW | TRACE_CLOSE;
+
+ // sqlite3_db_config() ops
+ public static final int DBCONFIG_ENABLE_FKEY = CApi.SQLITE_DBCONFIG_ENABLE_FKEY;
+ public static final int DBCONFIG_ENABLE_TRIGGER = CApi.SQLITE_DBCONFIG_ENABLE_TRIGGER;
+ public static final int DBCONFIG_ENABLE_FTS3_TOKENIZER = CApi.SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER;
+ public static final int DBCONFIG_ENABLE_LOAD_EXTENSION = CApi.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION;
+ public static final int DBCONFIG_NO_CKPT_ON_CLOSE = CApi.SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE;
+ public static final int DBCONFIG_ENABLE_QPSG = CApi.SQLITE_DBCONFIG_ENABLE_QPSG;
+ public static final int DBCONFIG_TRIGGER_EQP = CApi.SQLITE_DBCONFIG_TRIGGER_EQP;
+ public static final int DBCONFIG_RESET_DATABASE = CApi.SQLITE_DBCONFIG_RESET_DATABASE;
+ public static final int DBCONFIG_DEFENSIVE = CApi.SQLITE_DBCONFIG_DEFENSIVE;
+ public static final int DBCONFIG_WRITABLE_SCHEMA = CApi.SQLITE_DBCONFIG_WRITABLE_SCHEMA;
+ public static final int DBCONFIG_LEGACY_ALTER_TABLE = CApi.SQLITE_DBCONFIG_LEGACY_ALTER_TABLE;
+ public static final int DBCONFIG_DQS_DML = CApi.SQLITE_DBCONFIG_DQS_DML;
+ public static final int DBCONFIG_DQS_DDL = CApi.SQLITE_DBCONFIG_DQS_DDL;
+ public static final int DBCONFIG_ENABLE_VIEW = CApi.SQLITE_DBCONFIG_ENABLE_VIEW;
+ public static final int DBCONFIG_LEGACY_FILE_FORMAT = CApi.SQLITE_DBCONFIG_LEGACY_FILE_FORMAT;
+ public static final int DBCONFIG_TRUSTED_SCHEMA = CApi.SQLITE_DBCONFIG_TRUSTED_SCHEMA;
+ public static final int DBCONFIG_STMT_SCANSTATUS = CApi.SQLITE_DBCONFIG_STMT_SCANSTATUS;
+ public static final int DBCONFIG_REVERSE_SCANORDER = CApi.SQLITE_DBCONFIG_REVERSE_SCANORDER;
+
+ // sqlite3_config() ops
+ public static final int CONFIG_SINGLETHREAD = CApi.SQLITE_CONFIG_SINGLETHREAD;
+ public static final int CONFIG_MULTITHREAD = CApi.SQLITE_CONFIG_MULTITHREAD;
+ public static final int CONFIG_SERIALIZED = CApi.SQLITE_CONFIG_SERIALIZED;
+
+ // Encodings
+ public static final int UTF8 = CApi.SQLITE_UTF8;
+ public static final int UTF16 = CApi.SQLITE_UTF16;
+ public static final int UTF16LE = CApi.SQLITE_UTF16LE;
+ public static final int UTF16BE = CApi.SQLITE_UTF16BE;
+ /* We elide the UTF16_ALIGNED from this interface because it
+ is irrelevant for the Java interface. */
+
+ // SQL data type IDs
+ public static final int INTEGER = CApi.SQLITE_INTEGER;
+ public static final int FLOAT = CApi.SQLITE_FLOAT;
+ public static final int TEXT = CApi.SQLITE_TEXT;
+ public static final int BLOB = CApi.SQLITE_BLOB;
+ public static final int NULL = CApi.SQLITE_NULL;
+
+ // Authorizer codes.
+ public static final int DENY = CApi.SQLITE_DENY;
+ public static final int IGNORE = CApi.SQLITE_IGNORE;
+ public static final int CREATE_INDEX = CApi.SQLITE_CREATE_INDEX;
+ public static final int CREATE_TABLE = CApi.SQLITE_CREATE_TABLE;
+ public static final int CREATE_TEMP_INDEX = CApi.SQLITE_CREATE_TEMP_INDEX;
+ public static final int CREATE_TEMP_TABLE = CApi.SQLITE_CREATE_TEMP_TABLE;
+ public static final int CREATE_TEMP_TRIGGER = CApi.SQLITE_CREATE_TEMP_TRIGGER;
+ public static final int CREATE_TEMP_VIEW = CApi.SQLITE_CREATE_TEMP_VIEW;
+ public static final int CREATE_TRIGGER = CApi.SQLITE_CREATE_TRIGGER;
+ public static final int CREATE_VIEW = CApi.SQLITE_CREATE_VIEW;
+ public static final int DELETE = CApi.SQLITE_DELETE;
+ public static final int DROP_INDEX = CApi.SQLITE_DROP_INDEX;
+ public static final int DROP_TABLE = CApi.SQLITE_DROP_TABLE;
+ public static final int DROP_TEMP_INDEX = CApi.SQLITE_DROP_TEMP_INDEX;
+ public static final int DROP_TEMP_TABLE = CApi.SQLITE_DROP_TEMP_TABLE;
+ public static final int DROP_TEMP_TRIGGER = CApi.SQLITE_DROP_TEMP_TRIGGER;
+ public static final int DROP_TEMP_VIEW = CApi.SQLITE_DROP_TEMP_VIEW;
+ public static final int DROP_TRIGGER = CApi.SQLITE_DROP_TRIGGER;
+ public static final int DROP_VIEW = CApi.SQLITE_DROP_VIEW;
+ public static final int INSERT = CApi.SQLITE_INSERT;
+ public static final int PRAGMA = CApi.SQLITE_PRAGMA;
+ public static final int READ = CApi.SQLITE_READ;
+ public static final int SELECT = CApi.SQLITE_SELECT;
+ public static final int TRANSACTION = CApi.SQLITE_TRANSACTION;
+ public static final int UPDATE = CApi.SQLITE_UPDATE;
+ public static final int ATTACH = CApi.SQLITE_ATTACH;
+ public static final int DETACH = CApi.SQLITE_DETACH;
+ public static final int ALTER_TABLE = CApi.SQLITE_ALTER_TABLE;
+ public static final int REINDEX = CApi.SQLITE_REINDEX;
+ public static final int ANALYZE = CApi.SQLITE_ANALYZE;
+ public static final int CREATE_VTABLE = CApi.SQLITE_CREATE_VTABLE;
+ public static final int DROP_VTABLE = CApi.SQLITE_DROP_VTABLE;
+ public static final int FUNCTION = CApi.SQLITE_FUNCTION;
+ public static final int SAVEPOINT = CApi.SQLITE_SAVEPOINT;
+ public static final int RECURSIVE = CApi.SQLITE_RECURSIVE;
//! Used only by the open() factory functions.
private Sqlite(sqlite3 db){
this.db = db;
}
+
+ /** Maps org.sqlite.jni.capi.sqlite3 to Sqlite instances. */
+ private static final java.util.Map nativeToWrapper
+ = new java.util.HashMap<>();
+
+
+ /**
+ When any given thread is done using the SQLite library, calling
+ this will free up any native-side resources which may be
+ associated specifically with that thread. This is not strictly
+ necessary, in particular in applications which only use SQLite
+ from a single thread, but may help free some otherwise errant
+ resources.
+
+ Calling into SQLite from a given thread after this has been
+ called in that thread is harmless. The library will simply start
+ to re-cache certain state for that thread.
+
+ Contrariwise, failing to call this will effectively leak a small
+ amount of cached state for the thread, which may add up to
+ significant amounts if the application uses SQLite from many
+ threads.
+
+ This must never be called while actively using SQLite from this
+ thread, e.g. from within a query loop or a callback which is
+ operating on behalf of the library.
+ */
+ static void uncacheThread(){
+ CApi.sqlite3_java_uncache_thread();
+ }
+
+ /**
+ Returns the Sqlite object associated with the given sqlite3
+ object, or null if there is no such mapping.
+ */
+ static Sqlite fromNative(sqlite3 low){
+ synchronized(nativeToWrapper){
+ return nativeToWrapper.get(low);
+ }
+ }
/**
Returns a newly-opened db connection or throws SqliteException if
opening fails. All arguments are as documented for
sqlite3_open_v2().
@@ -42,31 +328,172 @@
Design question: do we want static factory functions or should
this be reformulated as a constructor?
*/
public static Sqlite open(String filename, int flags, String vfsName){
final OutputPointer.sqlite3 out = new OutputPointer.sqlite3();
- final int rc = sqlite3_open_v2(filename, out, flags, vfsName);
+ final int rc = CApi.sqlite3_open_v2(filename, out, flags, vfsName);
final sqlite3 n = out.take();
if( 0!=rc ){
if( null==n ) throw new SqliteException(rc);
final SqliteException ex = new SqliteException(n);
n.close();
throw ex;
}
- return new Sqlite(n);
+ final Sqlite rv = new Sqlite(n);
+ synchronized(nativeToWrapper){
+ nativeToWrapper.put(n, rv);
+ }
+ runAutoExtensions(rv);
+ return rv;
}
public static Sqlite open(String filename, int flags){
return open(filename, flags, null);
}
public static Sqlite open(String filename){
- return open(filename, SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE, null);
+ return open(filename, OPEN_READWRITE|OPEN_CREATE, null);
+ }
+
+ public static String libVersion(){
+ return CApi.sqlite3_libversion();
+ }
+
+ public static int libVersionNumber(){
+ return CApi.sqlite3_libversion_number();
+ }
+
+ public static String libSourceId(){
+ return CApi.sqlite3_sourceid();
+ }
+
+ /**
+ Returns the value of the native library's build-time value of the
+ SQLITE_THREADSAFE build option.
+ */
+ public static int libThreadsafe(){
+ return CApi.sqlite3_threadsafe();
+ }
+
+ /**
+ Analog to sqlite3_compileoption_get().
+ */
+ public static String compileOptionGet(int n){
+ return CApi.sqlite3_compileoption_get(n);
+ }
+
+ /**
+ Analog to sqlite3_compileoption_used().
+ */
+ public static boolean compileOptionUsed(String optName){
+ return CApi.sqlite3_compileoption_used(optName);
+ }
+
+ private static boolean hasNormalizeSql =
+ compileOptionUsed("ENABLE_NORMALIZE");
+
+ private static boolean hasSqlLog =
+ compileOptionUsed("ENABLE_SQLLOG");
+
+ /**
+ Throws UnsupportedOperationException if check is false.
+ flag is expected to be the name of an SQLITE_ENABLE_...
+ build flag.
+ */
+ private static void checkSupported(boolean check, String flag){
+ if( !check ){
+ throw new UnsupportedOperationException(
+ "Library was built without "+flag
+ );
+ }
+ }
+
+ /**
+ Analog to sqlite3_complete().
+ */
+ public static boolean isCompleteStatement(String sql){
+ switch(CApi.sqlite3_complete(sql)){
+ case 0: return false;
+ case CApi.SQLITE_MISUSE:
+ throw new IllegalArgumentException("Input may not be null.");
+ case CApi.SQLITE_NOMEM:
+ throw new OutOfMemoryError();
+ default:
+ return true;
+ }
+ }
+
+ public static int keywordCount(){
+ return CApi.sqlite3_keyword_count();
+ }
+
+ public static boolean keywordCheck(String word){
+ return CApi.sqlite3_keyword_check(word);
+ }
+
+ public static String keywordName(int index){
+ return CApi.sqlite3_keyword_name(index);
+ }
+
+ public static boolean strglob(String glob, String txt){
+ return 0==CApi.sqlite3_strglob(glob, txt);
+ }
+
+ public static boolean strlike(String glob, String txt, char escChar){
+ return 0==CApi.sqlite3_strlike(glob, txt, escChar);
+ }
+
+ /**
+ Output object for use with status() and libStatus().
+ */
+ public static final class Status {
+ /** The current value for the requested status() or libStatus() metric. */
+ long current;
+ /** The peak value for the requested status() or libStatus() metric. */
+ long peak;
+ };
+
+ /**
+ As per sqlite3_status64(), but returns its current and high-water
+ results as a Status object. Throws if the first argument is
+ not one of the STATUS_... constants.
+ */
+ public static Status libStatus(int op, boolean resetStats){
+ org.sqlite.jni.capi.OutputPointer.Int64 pCurrent =
+ new org.sqlite.jni.capi.OutputPointer.Int64();
+ org.sqlite.jni.capi.OutputPointer.Int64 pHighwater =
+ new org.sqlite.jni.capi.OutputPointer.Int64();
+ checkRcStatic( CApi.sqlite3_status64(op, pCurrent, pHighwater, resetStats) );
+ final Status s = new Status();
+ s.current = pCurrent.value;
+ s.peak = pHighwater.value;
+ return s;
+ }
+
+ /**
+ As per sqlite3_db_status(), but returns its current and
+ high-water results as a Status object. Throws if the first
+ argument is not one of the DBSTATUS_... constants or on any other
+ misuse.
+ */
+ public Status status(int op, boolean resetStats){
+ org.sqlite.jni.capi.OutputPointer.Int32 pCurrent =
+ new org.sqlite.jni.capi.OutputPointer.Int32();
+ org.sqlite.jni.capi.OutputPointer.Int32 pHighwater =
+ new org.sqlite.jni.capi.OutputPointer.Int32();
+ checkRc( CApi.sqlite3_db_status(thisDb(), op, pCurrent, pHighwater, resetStats) );
+ final Status s = new Status();
+ s.current = pCurrent.value;
+ s.peak = pHighwater.value;
+ return s;
}
@Override public void close(){
if(null!=this.db){
+ synchronized(nativeToWrapper){
+ nativeToWrapper.remove(this.db);
+ }
this.db.close();
this.db = null;
}
}
@@ -75,11 +502,11 @@
this instance has been closed. This is very specifically not
public.
*/
sqlite3 nativeHandle(){ return this.db; }
- private sqlite3 affirmOpen(){
+ private sqlite3 thisDb(){
if( null==db || 0==db.getNativePointer() ){
throw new IllegalArgumentException("This database instance is closed.");
}
return this.db;
}
@@ -86,10 +513,451 @@
// private byte[] stringToUtf8(String s){
// return s==null ? null : s.getBytes(StandardCharsets.UTF_8);
// }
- private void affirmRcOk(int rc){
+ /**
+ If rc!=0, throws an SqliteException. If this db is currently
+ opened and has non-0 sqlite3_errcode(), the error state is
+ extracted from it, else only the string form of rc is used. It is
+ the caller's responsibility to filter out non-error codes such as
+ SQLITE_ROW and SQLITE_DONE before calling this.
+
+ As a special case, if rc is SQLITE_NOMEM, an OutOfMemoryError is
+ thrown.
+ */
+ private void checkRc(int rc){
+ if( 0!=rc ){
+ if( CApi.SQLITE_NOMEM==rc ){
+ throw new OutOfMemoryError();
+ }else if( null==db || 0==CApi.sqlite3_errcode(db) ){
+ throw new SqliteException(rc);
+ }else{
+ throw new SqliteException(db);
+ }
+ }
+ }
+
+ /**
+ Like checkRc() but behaves as if that function were
+ called with a null db object.
+ */
+ private static void checkRcStatic(int rc){
if( 0!=rc ){
- throw new SqliteException(db);
+ if( CApi.SQLITE_NOMEM==rc ){
+ throw new OutOfMemoryError();
+ }else{
+ throw new SqliteException(rc);
+ }
}
}
+
+ /**
+ Toggles the use of extended result codes on or off. By default
+ they are turned off, but they can be enabled by default by
+ including the OPEN_EXRESCODE flag when opening a database.
+
+ Because this API reports db-side errors using exceptions,
+ enabling this may change the values returned by
+ SqliteException.errcode().
+ */
+ public void useExtendedResultCodes(boolean on){
+ checkRc( CApi.sqlite3_extended_result_codes(thisDb(), on) );
+ }
+
+ /**
+ Analog to sqlite3_prepare_v3(), this prepares the first SQL
+ statement from the given input string and returns it as a
+ Stmt. It throws an SqliteException if preparation fails or an
+ IllegalArgumentException if the input is empty (e.g. contains
+ only comments or whitespace).
+
+ The first argument must be SQL input in UTF-8 encoding.
+
+ prepFlags must be 0 or a bitmask of the PREPARE_... constants.
+
+ For processing multiple statements from a single input, use
+ prepareMulti().
+
+ Design note: though the C-level API succeeds with a null
+ statement object for empty inputs, that approach is cumbersome to
+ use in higher-level APIs because every prepared statement has to
+ be checked for null before using it.
+ */
+ public Stmt prepare(byte utf8Sql[], int prepFlags){
+ final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt();
+ final int rc = CApi.sqlite3_prepare_v3(thisDb(), utf8Sql, prepFlags, out);
+ checkRc(rc);
+ final sqlite3_stmt q = out.take();
+ if( null==q ){
+ /* The C-level API treats input which is devoid of SQL
+ statements (e.g. all comments or an empty string) as success
+ but returns a NULL sqlite3_stmt object. In higher-level APIs,
+ wrapping a "successful NULL" object that way is tedious to
+ use because it forces clients and/or wrapper-level code to
+ check for that unusual case. In practice, higher-level
+ bindings are generally better-served by treating empty SQL
+ input as an error. */
+ throw new IllegalArgumentException("Input contains no SQL statements.");
+ }
+ return new Stmt(this, q);
+ }
+
+ /**
+ Equivalent to prepare(X, prepFlags), where X is
+ sql.getBytes(StandardCharsets.UTF_8).
+ */
+ public Stmt prepare(String sql, int prepFlags){
+ return prepare( sql.getBytes(StandardCharsets.UTF_8), prepFlags );
+ }
+
+ /**
+ Equivalent to prepare(sql, 0).
+ */
+ public Stmt prepare(String sql){
+ return prepare(sql, 0);
+ }
+
+
+ /**
+ Callback type for use with prepareMulti().
+ */
+ public interface PrepareMulti {
+ /**
+ Gets passed a Stmt which it may handle in arbitrary ways.
+ Ownership of st is passed to this function. It must throw on
+ error.
+ */
+ void call(Sqlite.Stmt st);
+ }
+
+ /**
+ A PrepareMulti implementation which calls another PrepareMulti
+ object and then finalizes its statement.
+ */
+ public static class PrepareMultiFinalize implements PrepareMulti {
+ private final PrepareMulti pm;
+ /**
+ Proxies the given PrepareMulti via this object's call() method.
+ */
+ public PrepareMultiFinalize(PrepareMulti proxy){
+ this.pm = proxy;
+ }
+ /**
+ Passes st to the call() method of the object this one proxies,
+ then finalizes st, propagating any exceptions from call() after
+ finalizing st.
+ */
+ @Override public void call(Stmt st){
+ try{ pm.call(st); }
+ finally{ st.finalizeStmt(); }
+ }
+ }
+
+ /**
+ Equivalent to prepareMulti(sql,0,visitor).
+ */
+ public void prepareMulti(String sql, PrepareMulti visitor){
+ prepareMulti( sql, 0, visitor );
+ }
+
+ /**
+ Equivallent to prepareMulti(X,prepFlags,visitor), where X is
+ sql.getBytes(StandardCharsets.UTF_8).
+ */
+ public void prepareMulti(String sql, int prepFlags, PrepareMulti visitor){
+ prepareMulti(sql.getBytes(StandardCharsets.UTF_8), prepFlags, visitor);
+ }
+
+ /**
+ A variant of prepare() which can handle multiple SQL statements
+ in a single input string. For each statement in the given string,
+ the statement is passed to visitor.call() a single time, passing
+ ownership of the statement to that function. This function does
+ not step() or close() statements - those operations are left to
+ caller or the visitor function.
+
+ Unlike prepare(), this function does not fail if the input
+ contains only whitespace or SQL comments. In that case it is up
+ to the caller to arrange for that to be an error (if desired).
+
+ PrepareMultiFinalize offers a proxy which finalizes each
+ statement after it is passed to another client-defined visitor.
+
+ Be aware that certain legal SQL constructs may fail in the
+ preparation phase, before the corresponding statement can be
+ stepped. Most notably, authorizer checks which disallow access to
+ something in a statement behave that way.
+ */
+ public void prepareMulti(byte sqlUtf8[], int prepFlags, PrepareMulti visitor){
+ int pos = 0, n = 1;
+ byte[] sqlChunk = sqlUtf8;
+ final org.sqlite.jni.capi.OutputPointer.sqlite3_stmt outStmt =
+ new org.sqlite.jni.capi.OutputPointer.sqlite3_stmt();
+ final org.sqlite.jni.capi.OutputPointer.Int32 oTail =
+ new org.sqlite.jni.capi.OutputPointer.Int32();
+ while( pos < sqlChunk.length ){
+ sqlite3_stmt stmt = null;
+ if( pos>0 ){
+ sqlChunk = java.util.Arrays.copyOfRange(sqlChunk, pos, sqlChunk.length);
+ }
+ if( 0==sqlChunk.length ) break;
+ checkRc(
+ CApi.sqlite3_prepare_v3(db, sqlChunk, prepFlags, outStmt, oTail)
+ );
+ pos = oTail.value;
+ stmt = outStmt.take();
+ if( null==stmt ){
+ /* empty statement, e.g. only comments or whitespace, was parsed. */
+ continue;
+ }
+ visitor.call(new Stmt(this, stmt));
+ }
+ }
+
+ public void createFunction(String name, int nArg, int eTextRep, ScalarFunction f){
+ int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep,
+ new SqlFunction.ScalarAdapter(f));
+ if( 0!=rc ) throw new SqliteException(db);
+ }
+
+ public void createFunction(String name, int nArg, ScalarFunction f){
+ this.createFunction(name, nArg, CApi.SQLITE_UTF8, f);
+ }
+
+ public void createFunction(String name, int nArg, int eTextRep, AggregateFunction f){
+ int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep,
+ new SqlFunction.AggregateAdapter(f));
+ if( 0!=rc ) throw new SqliteException(db);
+ }
+
+ public void createFunction(String name, int nArg, AggregateFunction f){
+ this.createFunction(name, nArg, CApi.SQLITE_UTF8, f);
+ }
+
+ public void createFunction(String name, int nArg, int eTextRep, WindowFunction f){
+ int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep,
+ new SqlFunction.WindowAdapter(f));
+ if( 0!=rc ) throw new SqliteException(db);
+ }
+
+ public void createFunction(String name, int nArg, WindowFunction f){
+ this.createFunction(name, nArg, CApi.SQLITE_UTF8, f);
+ }
+
+ public long changes(){
+ return CApi.sqlite3_changes64(thisDb());
+ }
+
+ public long totalChanges(){
+ return CApi.sqlite3_total_changes64(thisDb());
+ }
+
+ public long lastInsertRowId(){
+ return CApi.sqlite3_last_insert_rowid(thisDb());
+ }
+
+ public void setLastInsertRowId(long rowId){
+ CApi.sqlite3_set_last_insert_rowid(thisDb(), rowId);
+ }
+
+ public void interrupt(){
+ CApi.sqlite3_interrupt(thisDb());
+ }
+
+ public boolean isInterrupted(){
+ return CApi.sqlite3_is_interrupted(thisDb());
+ }
+
+ public boolean isAutoCommit(){
+ return CApi.sqlite3_get_autocommit(thisDb());
+ }
+
+ /**
+ Analog to sqlite3_txn_state(). Returns one of TXN_NONE, TXN_READ,
+ or TXN_WRITE to denote this database's current transaction state
+ for the given schema name (or the most restrictive state of any
+ schema if zSchema is null).
+ */
+ public int transactionState(String zSchema){
+ return CApi.sqlite3_txn_state(thisDb(), zSchema);
+ }
+
+ /**
+ Analog to sqlite3_db_name(). Returns null if passed an unknown
+ index.
+ */
+ public String dbName(int dbNdx){
+ return CApi.sqlite3_db_name(thisDb(), dbNdx);
+ }
+
+ /**
+ Analog to sqlite3_db_filename(). Returns null if passed an
+ unknown db name.
+ */
+ public String dbFileName(String dbName){
+ return CApi.sqlite3_db_filename(thisDb(), dbName);
+ }
+
+ /**
+ Analog to sqlite3_db_config() for the call forms which take one
+ of the boolean-type db configuration flags (namely the
+ DBCONFIG_... constants defined in this class). On success it
+ returns the result of that underlying call. Throws on error.
+ */
+ public boolean dbConfig(int op, boolean on){
+ org.sqlite.jni.capi.OutputPointer.Int32 pOut =
+ new org.sqlite.jni.capi.OutputPointer.Int32();
+ checkRc( CApi.sqlite3_db_config(thisDb(), op, on ? 1 : 0, pOut) );
+ return pOut.get()!=0;
+ }
+
+ /**
+ Analog to the variant of sqlite3_db_config() for configuring the
+ SQLITE_DBCONFIG_MAINDBNAME option. Throws on error.
+ */
+ public void setMainDbName(String name){
+ checkRc(
+ CApi.sqlite3_db_config(thisDb(), CApi.SQLITE_DBCONFIG_MAINDBNAME,
+ name)
+ );
+ }
+
+ /**
+ Analog to sqlite3_db_readonly() but throws an SqliteException
+ with result code SQLITE_NOTFOUND if given an unknown database
+ name.
+ */
+ public boolean readOnly(String dbName){
+ final int rc = CApi.sqlite3_db_readonly(thisDb(), dbName);
+ if( 0==rc ) return false;
+ else if( rc>0 ) return true;
+ throw new SqliteException(CApi.SQLITE_NOTFOUND);
+ }
+
+ /**
+ Analog to sqlite3_db_release_memory().
+ */
+ public void releaseMemory(){
+ CApi.sqlite3_db_release_memory(thisDb());
+ }
+
+ /**
+ Analog to sqlite3_release_memory().
+ */
+ public static int libReleaseMemory(int n){
+ return CApi.sqlite3_release_memory(n);
+ }
+
+ /**
+ Analog to sqlite3_limit(). limitId must be one of the
+ LIMIT_... constants.
+
+ Returns the old limit for the given option. If newLimit is
+ negative, it returns the old limit without modifying the limit.
+
+ If sqlite3_limit() returns a negative value, this function throws
+ an SqliteException with the SQLITE_RANGE result code but no
+ further error info (because that case does not qualify as a
+ db-level error). Such errors may indicate an invalid argument
+ value or an invalid range for newLimit (the underlying function
+ does not differentiate between those).
+ */
+ public int limit(int limitId, int newLimit){
+ final int rc = CApi.sqlite3_limit(thisDb(), limitId, newLimit);
+ if( rc<0 ){
+ throw new SqliteException(CApi.SQLITE_RANGE);
+ }
+ return rc;
+ }
+
+ /**
+ Analog to sqlite3_errstr().
+ */
+ static String errstr(int resultCode){
+ return CApi.sqlite3_errstr(resultCode);
+ }
+
+ /**
+ A wrapper object for use with tableColumnMetadata(). They are
+ created and populated only via that interface.
+ */
+ public final class TableColumnMetadata {
+ Boolean pNotNull = null;
+ Boolean pPrimaryKey = null;
+ Boolean pAutoinc = null;
+ String pzCollSeq = null;
+ String pzDataType = null;
+
+ private TableColumnMetadata(){}
+
+ public String getDataType(){ return pzDataType; }
+ public String getCollation(){ return pzCollSeq; }
+ public boolean isNotNull(){ return pNotNull; }
+ public boolean isPrimaryKey(){ return pPrimaryKey; }
+ public boolean isAutoincrement(){ return pAutoinc; }
+ }
+
+ /**
+ Returns data about a database, table, and (optionally) column
+ (which may be null), as per sqlite3_table_column_metadata().
+ Throws if passed invalid arguments, else returns the result as a
+ new TableColumnMetadata object.
+ */
+ TableColumnMetadata tableColumnMetadata(
+ String zDbName, String zTableName, String zColumnName
+ ){
+ org.sqlite.jni.capi.OutputPointer.String pzDataType
+ = new org.sqlite.jni.capi.OutputPointer.String();
+ org.sqlite.jni.capi.OutputPointer.String pzCollSeq
+ = new org.sqlite.jni.capi.OutputPointer.String();
+ org.sqlite.jni.capi.OutputPointer.Bool pNotNull
+ = new org.sqlite.jni.capi.OutputPointer.Bool();
+ org.sqlite.jni.capi.OutputPointer.Bool pPrimaryKey
+ = new org.sqlite.jni.capi.OutputPointer.Bool();
+ org.sqlite.jni.capi.OutputPointer.Bool pAutoinc
+ = new org.sqlite.jni.capi.OutputPointer.Bool();
+ final int rc = CApi.sqlite3_table_column_metadata(
+ thisDb(), zDbName, zTableName, zColumnName,
+ pzDataType, pzCollSeq, pNotNull, pPrimaryKey, pAutoinc
+ );
+ checkRc(rc);
+ TableColumnMetadata rv = new TableColumnMetadata();
+ rv.pzDataType = pzDataType.value;
+ rv.pzCollSeq = pzCollSeq.value;
+ rv.pNotNull = pNotNull.value;
+ rv.pPrimaryKey = pPrimaryKey.value;
+ rv.pAutoinc = pAutoinc.value;
+ return rv;
+ }
+
+ public interface TraceCallback {
+ /**
+ Called by sqlite3 for various tracing operations, as per
+ sqlite3_trace_v2(). Note that this interface elides the 2nd
+ argument to the native trace callback, as that role is better
+ filled by instance-local state.
+
+
These callbacks may throw, in which case their exceptions are
+ converted to C-level error information.
+
+
The 2nd argument to this function, if non-null, will be a an
+ Sqlite or Sqlite.Stmt object, depending on the first argument
+ (see below).
+
+
The final argument to this function is the "X" argument
+ documented for sqlite3_trace() and sqlite3_trace_v2(). Its type
+ depends on value of the first argument:
+
+
- SQLITE_TRACE_STMT: pNative is a Sqlite.Stmt. pX is a String
+ containing the prepared SQL.
+
+
- SQLITE_TRACE_PROFILE: pNative is a sqlite3_stmt. pX is a Long
+ holding an approximate number of nanoseconds the statement took
+ to run.
+
+
- SQLITE_TRACE_ROW: pNative is a sqlite3_stmt. pX is null.
+
+
- SQLITE_TRACE_CLOSE: pNative is a sqlite3. pX is null.
+ */
+ void call(int traceFlag, Object pNative, Object pX);
+ }
@@ -96,42 +964,107 @@
+
+ /**
+ Analog to sqlite3_trace_v2(). traceMask must be a mask of the
+ TRACE_... constants. Pass a null callback to remove tracing.
+
+ Throws on error.
+ */
+ public void trace(int traceMask, TraceCallback callback){
+ final Sqlite self = this;
+ final org.sqlite.jni.capi.TraceV2Callback tc =
+ (null==callback) ? null : new org.sqlite.jni.capi.TraceV2Callback(){
+ @SuppressWarnings("unchecked")
+ @Override public int call(int flag, Object pNative, Object pX){
+ switch(flag){
+ case TRACE_ROW:
+ case TRACE_PROFILE:
+ case TRACE_STMT:
+ callback.call(flag, Sqlite.Stmt.fromNative((sqlite3_stmt)pNative), pX);
+ break;
+ case TRACE_CLOSE:
+ callback.call(flag, self, pX);
+ break;
+ }
+ return 0;
+ }
+ };
+ checkRc( CApi.sqlite3_trace_v2(thisDb(), traceMask, tc) );
+ };
/**
Corresponds to the sqlite3_stmt class. Use Sqlite.prepare() to
create new instances.
*/
- public final class Stmt implements AutoCloseable {
+ public static final class Stmt implements AutoCloseable {
private Sqlite _db = null;
private sqlite3_stmt stmt = null;
+
/** Only called by the prepare() factory functions. */
Stmt(Sqlite db, sqlite3_stmt stmt){
this._db = db;
this.stmt = stmt;
+ synchronized(nativeToWrapper){
+ nativeToWrapper.put(this.stmt, this);
+ }
}
sqlite3_stmt nativeHandle(){
return stmt;
}
- private sqlite3_stmt affirmOpen(){
+ /** Maps org.sqlite.jni.capi.sqlite3_stmt to Stmt instances. */
+ private static final java.util.Map nativeToWrapper
+ = new java.util.HashMap<>();
+
+ /**
+ Returns the Stmt object associated with the given sqlite3_stmt
+ object, or null if there is no such mapping.
+ */
+ static Stmt fromNative(sqlite3_stmt low){
+ synchronized(nativeToWrapper){
+ return nativeToWrapper.get(low);
+ }
+ }
+
+ /**
+ If this statement is still opened, its low-level handle is
+ returned, else an IllegalArgumentException is thrown.
+ */
+ private sqlite3_stmt thisStmt(){
if( null==stmt || 0==stmt.getNativePointer() ){
throw new IllegalArgumentException("This Stmt has been finalized.");
}
return stmt;
}
+
+ /** Throws if n is out of range of this statement's result column
+ count. Intended to be used by the columnXyz() methods. */
+ private sqlite3_stmt checkColIndex(int n){
+ if(n<0 || n>=columnCount()){
+ throw new IllegalArgumentException("Column index "+n+" is out of range.");
+ }
+ return thisStmt();
+ }
/**
Corresponds to sqlite3_finalize(), but we cannot override the
name finalize() here because this one requires a different
signature. It does not throw on error here because "destructors
do not throw." If it returns non-0, the object is still
- finalized.
+ finalized, but the result code is an indication that something
+ went wrong in a prior call into the statement's API, as
+ documented for sqlite3_finalize().
*/
public int finalizeStmt(){
int rc = 0;
if( null!=stmt ){
- sqlite3_finalize(stmt);
+ synchronized(nativeToWrapper){
+ nativeToWrapper.remove(this.stmt);
+ }
+ CApi.sqlite3_finalize(stmt);
stmt = null;
+ _db = null;
}
return rc;
}
@Override public void close(){
@@ -138,81 +1071,921 @@
finalizeStmt();
}
/**
Throws if rc is any value other than 0, SQLITE_ROW, or
- SQLITE_DONE, else returns rc.
+ SQLITE_DONE, else returns rc. Error state for the exception is
+ extracted from this statement object (if it's opened) or the
+ string form of rc.
*/
private int checkRc(int rc){
switch(rc){
case 0:
- case SQLITE_ROW:
- case SQLITE_DONE: return rc;
+ case CApi.SQLITE_ROW:
+ case CApi.SQLITE_DONE: return rc;
+ default:
+ if( null==stmt ) throw new SqliteException(rc);
+ else throw new SqliteException(this);
+ }
+ }
+
+ /**
+ Works like sqlite3_step() but returns true for SQLITE_ROW,
+ false for SQLITE_DONE, and throws SqliteException for any other
+ result.
+ */
+ public boolean step(){
+ switch(checkRc(CApi.sqlite3_step(thisStmt()))){
+ case CApi.SQLITE_ROW: return true;
+ case CApi.SQLITE_DONE: return false;
default:
- throw new SqliteException(this);
+ throw new IllegalStateException(
+ "This \"cannot happen\": all possible result codes were checked already."
+ );
}
}
/**
- Works like sqlite3_step() but throws SqliteException for any
- result other than 0, SQLITE_ROW, or SQLITE_DONE.
+ Works like sqlite3_step(), returning the same result codes as
+ that function unless throwOnError is true, in which case it
+ will throw an SqliteException for any result codes other than
+ Sqlite.ROW or Sqlite.DONE.
+
+ The utility of this overload over the no-argument one is the
+ ability to handle BUSY and LOCKED errors more easily.
+ */
+ public int step(boolean throwOnError){
+ final int rc = (null==stmt)
+ ? Sqlite.MISUSE
+ : CApi.sqlite3_step(stmt);
+ return throwOnError ? checkRc(rc) : rc;
+ }
+
+ /**
+ Returns the Sqlite which prepared this statement, or null if
+ this statement has been finalized.
*/
- public int step(){
- return checkRc(sqlite3_step(affirmOpen()));
- }
-
- public Sqlite db(){ return this._db; }
+ public Sqlite getDb(){ return this._db; }
/**
Works like sqlite3_reset() but throws on error.
*/
public void reset(){
- checkRc(sqlite3_reset(affirmOpen()));
+ checkRc(CApi.sqlite3_reset(thisStmt()));
+ }
+
+ public boolean isBusy(){
+ return CApi.sqlite3_stmt_busy(thisStmt());
+ }
+
+ public boolean isReadOnly(){
+ return CApi.sqlite3_stmt_readonly(thisStmt());
+ }
+
+ public String sql(){
+ return CApi.sqlite3_sql(thisStmt());
+ }
+
+ public String expandedSql(){
+ return CApi.sqlite3_expanded_sql(thisStmt());
+ }
+
+ /**
+ Analog to sqlite3_stmt_explain() but throws if op is invalid.
+ */
+ public void explain(int op){
+ checkRc(CApi.sqlite3_stmt_explain(thisStmt(), op));
+ }
+
+ /**
+ Analog to sqlite3_stmt_isexplain().
+ */
+ public int isExplain(){
+ return CApi.sqlite3_stmt_isexplain(thisStmt());
+ }
+
+ /**
+ Analog to sqlite3_normalized_sql(), but throws
+ UnsupportedOperationException if the library was built without
+ the SQLITE_ENABLE_NORMALIZE flag.
+ */
+ public String normalizedSql(){
+ Sqlite.checkSupported(hasNormalizeSql, "SQLITE_ENABLE_NORMALIZE");
+ return CApi.sqlite3_normalized_sql(thisStmt());
}
public void clearBindings(){
- sqlite3_clear_bindings( affirmOpen() );
- }
- }
-
-
- /**
- prepare() TODOs include:
-
- - overloads taking byte[] and ByteBuffer.
-
- - multi-statement processing, like CApi.sqlite3_prepare_multi()
- but using a callback specific to the higher-level Stmt class
- rather than the sqlite3_stmt class.
- */
- public Stmt prepare(String sql, int prepFlags){
- final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt();
- final int rc = sqlite3_prepare_v3(affirmOpen(), sql, prepFlags, out);
- affirmRcOk(rc);
- return new Stmt(this, out.take());
- }
-
- public Stmt prepare(String sql){
- return prepare(sql, 0);
- }
-
- public void createFunction(String name, int nArg, int eTextRep, ScalarFunction f ){
- int rc = CApi.sqlite3_create_function(affirmOpen(), name, nArg, eTextRep,
- new SqlFunction.ScalarAdapter(f));
- if( 0!=rc ) throw new SqliteException(db);
- }
-
- public void createFunction(String name, int nArg, ScalarFunction f){
- this.createFunction(name, nArg, CApi.SQLITE_UTF8, f);
- }
-
- public void createFunction(String name, int nArg, int eTextRep, AggregateFunction f ){
- int rc = CApi.sqlite3_create_function(affirmOpen(), name, nArg, eTextRep,
- new SqlFunction.AggregateAdapter(f));
- if( 0!=rc ) throw new SqliteException(db);
- }
-
- public void createFunction(String name, int nArg, AggregateFunction f){
- this.createFunction(name, nArg, CApi.SQLITE_UTF8, f);
+ CApi.sqlite3_clear_bindings( thisStmt() );
+ }
+ public void bindInt(int ndx, int val){
+ checkRc(CApi.sqlite3_bind_int(thisStmt(), ndx, val));
+ }
+ public void bindInt64(int ndx, long val){
+ checkRc(CApi.sqlite3_bind_int64(thisStmt(), ndx, val));
+ }
+ public void bindDouble(int ndx, double val){
+ checkRc(CApi.sqlite3_bind_double(thisStmt(), ndx, val));
+ }
+ public void bindObject(int ndx, Object o){
+ checkRc(CApi.sqlite3_bind_java_object(thisStmt(), ndx, o));
+ }
+ public void bindNull(int ndx){
+ checkRc(CApi.sqlite3_bind_null(thisStmt(), ndx));
+ }
+ public int bindParameterCount(){
+ return CApi.sqlite3_bind_parameter_count(thisStmt());
+ }
+ public int bindParameterIndex(String paramName){
+ return CApi.sqlite3_bind_parameter_index(thisStmt(), paramName);
+ }
+ public String bindParameterName(int ndx){
+ return CApi.sqlite3_bind_parameter_name(thisStmt(), ndx);
+ }
+ public void bindText(int ndx, byte[] utf8){
+ checkRc(CApi.sqlite3_bind_text(thisStmt(), ndx, utf8));
+ }
+ public void bindText(int ndx, String asUtf8){
+ checkRc(CApi.sqlite3_bind_text(thisStmt(), ndx, asUtf8));
+ }
+ public void bindText16(int ndx, byte[] utf16){
+ checkRc(CApi.sqlite3_bind_text16(thisStmt(), ndx, utf16));
+ }
+ public void bindText16(int ndx, String asUtf16){
+ checkRc(CApi.sqlite3_bind_text16(thisStmt(), ndx, asUtf16));
+ }
+ public void bindZeroBlob(int ndx, int n){
+ checkRc(CApi.sqlite3_bind_zeroblob(thisStmt(), ndx, n));
+ }
+ public void bindBlob(int ndx, byte[] bytes){
+ checkRc(CApi.sqlite3_bind_blob(thisStmt(), ndx, bytes));
+ }
+
+ public byte[] columnBlob(int ndx){
+ return CApi.sqlite3_column_blob( checkColIndex(ndx), ndx );
+ }
+ public byte[] columnText(int ndx){
+ return CApi.sqlite3_column_text( checkColIndex(ndx), ndx );
+ }
+ public String columnText16(int ndx){
+ return CApi.sqlite3_column_text16( checkColIndex(ndx), ndx );
+ }
+ public int columnBytes(int ndx){
+ return CApi.sqlite3_column_bytes( checkColIndex(ndx), ndx );
+ }
+ public int columnBytes16(int ndx){
+ return CApi.sqlite3_column_bytes16( checkColIndex(ndx), ndx );
+ }
+ public int columnInt(int ndx){
+ return CApi.sqlite3_column_int( checkColIndex(ndx), ndx );
+ }
+ public long columnInt64(int ndx){
+ return CApi.sqlite3_column_int64( checkColIndex(ndx), ndx );
+ }
+ public double columnDouble(int ndx){
+ return CApi.sqlite3_column_double( checkColIndex(ndx), ndx );
+ }
+ public int columnType(int ndx){
+ return CApi.sqlite3_column_type( checkColIndex(ndx), ndx );
+ }
+ public String columnDeclType(int ndx){
+ return CApi.sqlite3_column_decltype( checkColIndex(ndx), ndx );
+ }
+ /**
+ Analog to sqlite3_column_count() but throws if this statement
+ has been finalized.
+ */
+ public int columnCount(){
+ /* We cannot reliably cache the column count in a class
+ member because an ALTER TABLE from a separate statement
+ can invalidate that count and we have no way, short of
+ installing a COMMIT handler or the like, of knowing when
+ to re-read it. We cannot install such a handler without
+ interfering with a client's ability to do so. */
+ return CApi.sqlite3_column_count(thisStmt());
+ }
+ public int columnDataCount(){
+ return CApi.sqlite3_data_count( thisStmt() );
+ }
+ public Object columnObject(int ndx){
+ return CApi.sqlite3_column_java_object( checkColIndex(ndx), ndx );
+ }
+ public T columnObject(int ndx, Class type){
+ return CApi.sqlite3_column_java_object( checkColIndex(ndx), ndx, type );
+ }
+ public String columnName(int ndx){
+ return CApi.sqlite3_column_name( checkColIndex(ndx), ndx );
+ }
+ public String columnDatabaseName(int ndx){
+ return CApi.sqlite3_column_database_name( checkColIndex(ndx), ndx );
+ }
+ public String columnOriginName(int ndx){
+ return CApi.sqlite3_column_origin_name( checkColIndex(ndx), ndx );
+ }
+ public String columnTableName(int ndx){
+ return CApi.sqlite3_column_table_name( checkColIndex(ndx), ndx );
+ }
+ } /* Stmt class */
+
+ /**
+ Interface for auto-extensions, as per the
+ sqlite3_auto_extension() API.
+
+ Design note: the chicken/egg timing of auto-extension execution
+ requires that this feature be entirely re-implemented in Java
+ because the C-level API has no access to the Sqlite type so
+ cannot pass on an object of that type while the database is being
+ opened. One side effect of this reimplementation is that this
+ class's list of auto-extensions is 100% independent of the
+ C-level list so, e.g., clearAutoExtensions() will have no effect
+ on auto-extensions added via the C-level API and databases opened
+ from that level of API will not be passed to this level's
+ AutoExtension instances.
+ */
+ public interface AutoExtension {
+ public void call(Sqlite db);
+ }
+
+ private static final java.util.Set autoExtensions =
+ new java.util.LinkedHashSet<>();
+
+ /**
+ Passes db to all auto-extensions. If any one of them throws,
+ db.close() is called before the exception is propagated.
+ */
+ private static void runAutoExtensions(Sqlite db){
+ AutoExtension list[];
+ synchronized(autoExtensions){
+ /* Avoid that modifications to the AutoExtension list from within
+ auto-extensions affect this execution of this list. */
+ list = autoExtensions.toArray(new AutoExtension[0]);
+ }
+ try {
+ for( AutoExtension ax : list ) ax.call(db);
+ }catch(Exception e){
+ db.close();
+ throw e;
+ }
+ }
+
+ /**
+ Analog to sqlite3_auto_extension(), adds the given object to the
+ list of auto-extensions if it is not already in that list. The
+ given object will be run as part of Sqlite.open(), and passed the
+ being-opened database. If the extension throws then open() will
+ fail.
+
+ This API does not guaranty whether or not manipulations made to
+ the auto-extension list from within auto-extension callbacks will
+ affect the current traversal of the auto-extension list. Whether
+ or not they do is unspecified and subject to change between
+ versions. e.g. if an AutoExtension calls addAutoExtension(),
+ whether or not the new extension will be run on the being-opened
+ database is undefined.
+
+ Note that calling Sqlite.open() from an auto-extension will
+ necessarily result in recursion loop and (eventually) a stack
+ overflow.
+ */
+ public static void addAutoExtension( AutoExtension e ){
+ if( null==e ){
+ throw new IllegalArgumentException("AutoExtension may not be null.");
+ }
+ synchronized(autoExtensions){
+ autoExtensions.add(e);
+ }
+ }
+
+ /**
+ Removes the given object from the auto-extension list if it is in
+ that list, otherwise this has no side-effects beyond briefly
+ locking that list.
+ */
+ public static void removeAutoExtension( AutoExtension e ){
+ synchronized(autoExtensions){
+ autoExtensions.remove(e);
+ }
+ }
+
+ /**
+ Removes all auto-extensions which were added via addAutoExtension().
+ */
+ public static void clearAutoExtensions(){
+ synchronized(autoExtensions){
+ autoExtensions.clear();
+ }
+ }
+
+ /**
+ Encapsulates state related to the sqlite3 backup API. Use
+ Sqlite.initBackup() to create new instances.
+ */
+ public static final class Backup implements AutoCloseable {
+ private sqlite3_backup b = null;
+ private Sqlite dbTo = null;
+ private Sqlite dbFrom = null;
+
+ Backup(Sqlite dbDest, String schemaDest,Sqlite dbSrc, String schemaSrc){
+ this.dbTo = dbDest;
+ this.dbFrom = dbSrc;
+ b = CApi.sqlite3_backup_init(dbDest.nativeHandle(), schemaDest,
+ dbSrc.nativeHandle(), schemaSrc);
+ if(null==b) toss();
+ }
+
+ private void toss(){
+ int rc = CApi.sqlite3_errcode(dbTo.nativeHandle());
+ if(0!=rc) throw new SqliteException(dbTo);
+ rc = CApi.sqlite3_errcode(dbFrom.nativeHandle());
+ if(0!=rc) throw new SqliteException(dbFrom);
+ throw new SqliteException(CApi.SQLITE_ERROR);
+ }
+
+ private sqlite3_backup getNative(){
+ if( null==b ) throw new IllegalStateException("This Backup is already closed.");
+ return b;
+ }
+ /**
+ If this backup is still active, this completes the backup and
+ frees its native resources, otherwise it this is a no-op.
+ */
+ public void finish(){
+ if( null!=b ){
+ CApi.sqlite3_backup_finish(b);
+ b = null;
+ dbTo = null;
+ dbFrom = null;
+ }
+ }
+
+ /** Equivalent to finish(). */
+ @Override public void close(){
+ this.finish();
+ }
+
+ /**
+ Analog to sqlite3_backup_step(). Returns 0 if stepping succeeds
+ or, Sqlite.DONE if the end is reached, Sqlite.BUSY if one of
+ the databases is busy, Sqlite.LOCKED if one of the databases is
+ locked, and throws for any other result code or if this object
+ has been closed. Note that BUSY and LOCKED are not necessarily
+ permanent errors, so do not trigger an exception.
+ */
+ public int step(int pageCount){
+ final int rc = CApi.sqlite3_backup_step(getNative(), pageCount);
+ switch(rc){
+ case 0:
+ case Sqlite.DONE:
+ case Sqlite.BUSY:
+ case Sqlite.LOCKED:
+ return rc;
+ default:
+ toss();
+ return CApi.SQLITE_ERROR/*not reached*/;
+ }
+ }
+
+ /**
+ Analog to sqlite3_backup_pagecount().
+ */
+ public int pageCount(){
+ return CApi.sqlite3_backup_pagecount(getNative());
+ }
+
+ /**
+ Analog to sqlite3_backup_remaining().
+ */
+ public int remaining(){
+ return CApi.sqlite3_backup_remaining(getNative());
+ }
+ }
+
+ /**
+ Analog to sqlite3_backup_init(). If schemaSrc is null, "main" is
+ assumed. Throws if either this db or dbSrc (the source db) are
+ not opened, if either of schemaDest or schemaSrc are null, or if
+ the underlying call to sqlite3_backup_init() fails.
+
+ The returned object must eventually be cleaned up by either
+ arranging for it to be auto-closed (e.g. using
+ try-with-resources) or by calling its finish() method.
+ */
+ public Backup initBackup(String schemaDest, Sqlite dbSrc, String schemaSrc){
+ thisDb();
+ dbSrc.thisDb();
+ if( null==schemaSrc || null==schemaDest ){
+ throw new IllegalArgumentException(
+ "Neither the source nor destination schema name may be null."
+ );
+ }
+ return new Backup(this, schemaDest, dbSrc, schemaSrc);
+ }
+
+
+ /**
+ Callback type for use with createCollation().
+ */
+ public interface Collation {
+ /**
+ Called by the SQLite core to compare inputs. Implementations
+ must compare its two arguments using memcmp(3) semantics.
+
+ Warning: the SQLite core has no mechanism for reporting errors
+ from custom collations and its workflow does not accommodate
+ propagation of exceptions from callbacks. Any exceptions thrown
+ from collations will be silently supressed and sorting results
+ will be unpredictable.
+ */
+ int call(byte[] lhs, byte[] rhs);
+ }
+
+ /**
+ Analog to sqlite3_create_collation().
+
+ Throws if name is null or empty, c is null, or the encoding flag
+ is invalid. The encoding must be one of the UTF8, UTF16, UTF16LE,
+ or UTF16BE constants.
+ */
+ public void createCollation(String name, int encoding, Collation c){
+ thisDb();
+ if( null==name || 0==name.length()){
+ throw new IllegalArgumentException("Collation name may not be null or empty.");
+ }
+ if( null==c ){
+ throw new IllegalArgumentException("Collation may not be null.");
+ }
+ switch(encoding){
+ case UTF8:
+ case UTF16:
+ case UTF16LE:
+ case UTF16BE:
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid Collation encoding.");
+ }
+ checkRc(
+ CApi.sqlite3_create_collation(
+ thisDb(), name, encoding, new org.sqlite.jni.capi.CollationCallback(){
+ @Override public int call(byte[] lhs, byte[] rhs){
+ try{return c.call(lhs, rhs);}
+ catch(Exception e){return 0;}
+ }
+ @Override public void xDestroy(){}
+ }
+ )
+ );
+ }
+
+ /**
+ Callback for use with onCollationNeeded().
+ */
+ public interface CollationNeeded {
+ /**
+ Must behave as documented for the callback for
+ sqlite3_collation_needed().
+
+ Warning: the C API has no mechanism for reporting or
+ propagating errors from this callback, so any exceptions it
+ throws are suppressed.
+ */
+ void call(Sqlite db, int encoding, String collationName);
+ }
+
+ /**
+ Sets up the given object to be called by the SQLite core when it
+ encounters a collation name which it does not know. Pass a null
+ object to disconnect the object from the core. This replaces any
+ existing collation-needed loader, or is a no-op if the given
+ object is already registered. Throws if registering the loader
+ fails.
+ */
+ public void onCollationNeeded( CollationNeeded cn ){
+ org.sqlite.jni.capi.CollationNeededCallback cnc = null;
+ if( null!=cn ){
+ cnc = new org.sqlite.jni.capi.CollationNeededCallback(){
+ @Override public void call(sqlite3 db, int encoding, String collationName){
+ final Sqlite xdb = Sqlite.fromNative(db);
+ if(null!=xdb) cn.call(xdb, encoding, collationName);
+ }
+ };
+ }
+ checkRc( CApi.sqlite3_collation_needed(thisDb(), cnc) );
+ }
+
+ /**
+ Callback for use with busyHandler().
+ */
+ public interface BusyHandler {
+ /**
+ Must function as documented for the C-level
+ sqlite3_busy_handler() callback argument, minus the (void*)
+ argument the C-level function requires.
+
+ If this function throws, it is translated to a database-level
+ error.
+ */
+ int call(int n);
+ }
+
+ /**
+ Analog to sqlite3_busy_timeout().
+ */
+ public void setBusyTimeout(int ms){
+ checkRc(CApi.sqlite3_busy_timeout(thisDb(), ms));
+ }
+
+ /**
+ Analog to sqlite3_busy_handler(). If b is null then any
+ current handler is cleared.
+ */
+ public void setBusyHandler( BusyHandler b ){
+ org.sqlite.jni.capi.BusyHandlerCallback bhc = null;
+ if( null!=b ){
+ bhc = new org.sqlite.jni.capi.BusyHandlerCallback(){
+ @Override public int call(int n){
+ return b.call(n);
+ }
+ };
+ }
+ checkRc( CApi.sqlite3_busy_handler(thisDb(), bhc) );
+ }
+
+ public interface CommitHook {
+ /**
+ Must behave as documented for the C-level sqlite3_commit_hook()
+ callback. If it throws, the exception is translated into
+ a db-level error.
+ */
+ int call();
+ }
+
+ /**
+ A level of indirection to permit setCommitHook() to have similar
+ semantics as the C API, returning the previous hook. The caveat
+ is that if the low-level API is used to install a hook, it will
+ have a different hook type than Sqlite.CommitHook so
+ setCommitHook() will return null instead of that object.
+ */
+ private static class CommitHookProxy
+ implements org.sqlite.jni.capi.CommitHookCallback {
+ final CommitHook commitHook;
+ CommitHookProxy(CommitHook ch){
+ this.commitHook = ch;
+ }
+ @Override public int call(){
+ return commitHook.call();
+ }
+ }
+
+ /**
+ Analog to sqlite3_commit_hook(). Returns the previous hook, if
+ any (else null). Throws if this db is closed.
+
+ Minor caveat: if a commit hook is set on this object's underlying
+ db handle using the lower-level SQLite API, this function may
+ return null when replacing it, despite there being a hook,
+ because it will have a different callback type. So long as the
+ handle is only manipulated via the high-level API, this caveat
+ does not apply.
+ */
+ public CommitHook setCommitHook( CommitHook c ){
+ CommitHookProxy chp = null;
+ if( null!=c ){
+ chp = new CommitHookProxy(c);
+ }
+ final org.sqlite.jni.capi.CommitHookCallback rv =
+ CApi.sqlite3_commit_hook(thisDb(), chp);
+ return (rv instanceof CommitHookProxy)
+ ? ((CommitHookProxy)rv).commitHook
+ : null;
+ }
+
+
+ public interface RollbackHook {
+ /**
+ Must behave as documented for the C-level sqlite3_rollback_hook()
+ callback. If it throws, the exception is translated into
+ a db-level error.
+ */
+ void call();
+ }
+
+ /**
+ A level of indirection to permit setRollbackHook() to have similar
+ semantics as the C API, returning the previous hook. The caveat
+ is that if the low-level API is used to install a hook, it will
+ have a different hook type than Sqlite.RollbackHook so
+ setRollbackHook() will return null instead of that object.
+ */
+ private static class RollbackHookProxy
+ implements org.sqlite.jni.capi.RollbackHookCallback {
+ final RollbackHook rollbackHook;
+ RollbackHookProxy(RollbackHook ch){
+ this.rollbackHook = ch;
+ }
+ @Override public void call(){rollbackHook.call();}
+ }
+
+ /**
+ Analog to sqlite3_rollback_hook(). Returns the previous hook, if
+ any (else null). Throws if this db is closed.
+
+ Minor caveat: if a rollback hook is set on this object's underlying
+ db handle using the lower-level SQLite API, this function may
+ return null when replacing it, despite there being a hook,
+ because it will have a different callback type. So long as the
+ handle is only manipulated via the high-level API, this caveat
+ does not apply.
+ */
+ public RollbackHook setRollbackHook( RollbackHook c ){
+ RollbackHookProxy chp = null;
+ if( null!=c ){
+ chp = new RollbackHookProxy(c);
+ }
+ final org.sqlite.jni.capi.RollbackHookCallback rv =
+ CApi.sqlite3_rollback_hook(thisDb(), chp);
+ return (rv instanceof RollbackHookProxy)
+ ? ((RollbackHookProxy)rv).rollbackHook
+ : null;
+ }
+
+ public interface UpdateHook {
+ /**
+ Must function as described for the C-level sqlite3_update_hook()
+ callback.
+ */
+ void call(int opId, String dbName, String tableName, long rowId);
+ }
+
+ /**
+ A level of indirection to permit setUpdateHook() to have similar
+ semantics as the C API, returning the previous hook. The caveat
+ is that if the low-level API is used to install a hook, it will
+ have a different hook type than Sqlite.UpdateHook so
+ setUpdateHook() will return null instead of that object.
+ */
+ private static class UpdateHookProxy
+ implements org.sqlite.jni.capi.UpdateHookCallback {
+ final UpdateHook updateHook;
+ UpdateHookProxy(UpdateHook ch){
+ this.updateHook = ch;
+ }
+ @Override public void call(int opId, String dbName, String tableName, long rowId){
+ updateHook.call(opId, dbName, tableName, rowId);
+ }
+ }
+
+ /**
+ Analog to sqlite3_update_hook(). Returns the previous hook, if
+ any (else null). Throws if this db is closed.
+
+ Minor caveat: if a update hook is set on this object's underlying
+ db handle using the lower-level SQLite API, this function may
+ return null when replacing it, despite there being a hook,
+ because it will have a different callback type. So long as the
+ handle is only manipulated via the high-level API, this caveat
+ does not apply.
+ */
+ public UpdateHook setUpdateHook( UpdateHook c ){
+ UpdateHookProxy chp = null;
+ if( null!=c ){
+ chp = new UpdateHookProxy(c);
+ }
+ final org.sqlite.jni.capi.UpdateHookCallback rv =
+ CApi.sqlite3_update_hook(thisDb(), chp);
+ return (rv instanceof UpdateHookProxy)
+ ? ((UpdateHookProxy)rv).updateHook
+ : null;
+ }
+
+
+ /**
+ Callback interface for use with setProgressHandler().
+ */
+ public interface ProgressHandler {
+ /**
+ Must behave as documented for the C-level sqlite3_progress_handler()
+ callback. If it throws, the exception is translated into
+ a db-level error.
+ */
+ int call();
+ }
+
+ /**
+ Analog to sqlite3_progress_handler(), sets the current progress
+ handler or clears it if p is null.
+
+ Note that this API, in contrast to setUpdateHook(),
+ setRollbackHook(), and setCommitHook(), cannot return the
+ previous handler. That inconsistency is part of the lower-level C
+ API.
+ */
+ public void setProgressHandler( int n, ProgressHandler p ){
+ org.sqlite.jni.capi.ProgressHandlerCallback phc = null;
+ if( null!=p ){
+ phc = new org.sqlite.jni.capi.ProgressHandlerCallback(){
+ @Override public int call(){ return p.call(); }
+ };
+ }
+ CApi.sqlite3_progress_handler( thisDb(), n, phc );
+ }
+
+
+ /**
+ Callback for use with setAuthorizer().
+ */
+ public interface Authorizer {
+ /**
+ Must function as described for the C-level
+ sqlite3_set_authorizer() callback. If it throws, the error is
+ converted to a db-level error and the exception is suppressed.
+ */
+ int call(int opId, String s1, String s2, String s3, String s4);
+ }
+
+ /**
+ Analog to sqlite3_set_authorizer(), this sets the current
+ authorizer callback, or clears if it passed null.
+ */
+ public void setAuthorizer( Authorizer a ) {
+ org.sqlite.jni.capi.AuthorizerCallback ac = null;
+ if( null!=a ){
+ ac = new org.sqlite.jni.capi.AuthorizerCallback(){
+ @Override public int call(int opId, String s1, String s2, String s3, String s4){
+ return a.call(opId, s1, s2, s3, s4);
+ }
+ };
+ }
+ checkRc( CApi.sqlite3_set_authorizer( thisDb(), ac ) );
+ }
+
+ /**
+ Object type for use with blobOpen()
+ */
+ public final class Blob implements AutoCloseable {
+ private Sqlite db;
+ private sqlite3_blob b;
+ Blob(Sqlite db, sqlite3_blob b){
+ this.db = db;
+ this.b = b;
+ }
+
+ /**
+ If this blob is still opened, its low-level handle is
+ returned, else an IllegalArgumentException is thrown.
+ */
+ private sqlite3_blob thisBlob(){
+ if( null==b || 0==b.getNativePointer() ){
+ throw new IllegalArgumentException("This Blob has been finalized.");
+ }
+ return b;
+ }
+
+ /**
+ Analog to sqlite3_blob_close().
+ */
+ @Override public void close(){
+ if( null!=b ){
+ CApi.sqlite3_blob_close(b);
+ b = null;
+ db = null;
+ }
+ }
+
+ /**
+ Throws if the JVM does not have JNI-level support for
+ ByteBuffer.
+ */
+ private void checkNio(){
+ if( !Sqlite.JNI_SUPPORTS_NIO ){
+ throw new UnsupportedOperationException(
+ "This JVM does not support JNI access to ByteBuffer."
+ );
+ }
+ }
+ /**
+ Analog to sqlite3_blob_reopen() but throws on error.
+ */
+ public void reopen(long newRowId){
+ db.checkRc( CApi.sqlite3_blob_reopen(thisBlob(), newRowId) );
+ }
+
+ /**
+ Analog to sqlite3_blob_write() but throws on error.
+ */
+ public void write( byte[] bytes, int atOffset ){
+ db.checkRc( CApi.sqlite3_blob_write(thisBlob(), bytes, atOffset) );
+ }
+
+ /**
+ Analog to sqlite3_blob_read() but throws on error.
+ */
+ public void read( byte[] dest, int atOffset ){
+ db.checkRc( CApi.sqlite3_blob_read(thisBlob(), dest, atOffset) );
+ }
+
+ /**
+ Analog to sqlite3_blob_bytes().
+ */
+ public int bytes(){
+ return CApi.sqlite3_blob_bytes(thisBlob());
+ }
+ }
+
+ /**
+ Analog to sqlite3_blob_open(). Returns a Blob object for the
+ given database, table, column, and rowid. The blob is opened for
+ read-write mode if writeable is true, else it is read-only.
+
+ The returned object must eventually be freed, before this
+ database is closed, by either arranging for it to be auto-closed
+ or calling its close() method.
+
+ Throws on error.
+ */
+ public Blob blobOpen(String dbName, String tableName, String columnName,
+ long iRow, boolean writeable){
+ final OutputPointer.sqlite3_blob out = new OutputPointer.sqlite3_blob();
+ checkRc(
+ CApi.sqlite3_blob_open(thisDb(), dbName, tableName, columnName,
+ iRow, writeable ? 1 : 0, out)
+ );
+ return new Blob(this, out.take());
+ }
+
+ /**
+ Callback for use with libConfigLog().
+ */
+ public interface ConfigLog {
+ /**
+ Must function as described for a C-level callback for
+ sqlite3_config()'s SQLITE_CONFIG_LOG callback, with the slight
+ signature change. Any exceptions thrown from this callback are
+ necessarily suppressed.
+ */
+ void call(int errCode, String msg);
+ }
+
+ /**
+ Analog to sqlite3_config() with the SQLITE_CONFIG_LOG option,
+ this sets or (if log is null) clears the current logger.
+ */
+ public static void libConfigLog(ConfigLog log){
+ final org.sqlite.jni.capi.ConfigLogCallback l =
+ null==log
+ ? null
+ : new org.sqlite.jni.capi.ConfigLogCallback() {
+ @Override public void call(int errCode, String msg){
+ log.call(errCode, msg);
+ }
+ };
+ checkRcStatic(CApi.sqlite3_config(l));
+ }
+
+ /**
+ Callback for use with libConfigSqlLog().
+ */
+ public interface ConfigSqlLog {
+ /**
+ Must function as described for a C-level callback for
+ sqlite3_config()'s SQLITE_CONFIG_SQLLOG callback, with the
+ slight signature change. Any exceptions thrown from this
+ callback are necessarily suppressed.
+ */
+ void call(Sqlite db, String msg, int msgType);
+ }
+
+ /**
+ Analog to sqlite3_config() with the SQLITE_CONFIG_SQLLOG option,
+ this sets or (if log is null) clears the current logger.
+
+ If SQLite is built without SQLITE_ENABLE_SQLLOG defined then this
+ will throw an UnsupportedOperationException.
+ */
+ public static void libConfigSqlLog(ConfigSqlLog log){
+ Sqlite.checkSupported(hasNormalizeSql, "SQLITE_ENABLE_SQLLOG");
+ final org.sqlite.jni.capi.ConfigSqlLogCallback l =
+ null==log
+ ? null
+ : new org.sqlite.jni.capi.ConfigSqlLogCallback() {
+ @Override public void call(sqlite3 db, String msg, int msgType){
+ try{
+ log.call(fromNative(db), msg, msgType);
+ }catch(Exception e){
+ /* Suppressed */
+ }
+ }
+ };
+ checkRcStatic(CApi.sqlite3_config(l));
+ }
+
+ /**
+ Analog to the C-level sqlite3_config() with one of the
+ SQLITE_CONFIG_... constants defined as CONFIG_... in this
+ class. Throws on error, including passing of an unknown option or
+ if a specified option is not supported by the underlying build of
+ the SQLite library.
+ */
+ public static void libConfigOp( int op ){
+ checkRcStatic(CApi.sqlite3_config(op));
}
}
Index: ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java
@@ -10,24 +10,24 @@
**
*************************************************************************
** This file is part of the wrapper1 interface for sqlite3.
*/
package org.sqlite.jni.wrapper1;
-import static org.sqlite.jni.capi.CApi.*;
+import org.sqlite.jni.capi.CApi;
import org.sqlite.jni.capi.sqlite3;
/**
A wrapper for communicating C-level (sqlite3*) instances with
Java. These wrappers do not own their associated pointer, they
simply provide a type-safe way to communicate it between Java
and C via JNI.
*/
public final class SqliteException extends java.lang.RuntimeException {
- int errCode = SQLITE_ERROR;
- int xerrCode = SQLITE_ERROR;
- int errOffset = -1;
- int sysErrno = 0;
+ private int errCode = CApi.SQLITE_ERROR;
+ private int xerrCode = CApi.SQLITE_ERROR;
+ private int errOffset = -1;
+ private int sysErrno = 0;
/**
Records the given error string and uses SQLITE_ERROR for both the
error code and extended error code.
*/
@@ -36,32 +36,35 @@
}
/**
Uses sqlite3_errstr(sqlite3ResultCode) for the error string and
sets both the error code and extended error code to the given
- value.
+ value. This approach includes no database-level information and
+ systemErrno() will be 0, so is intended only for use with sqlite3
+ APIs for which a result code is not an error but which the
+ higher-level wrapper should treat as one.
*/
public SqliteException(int sqlite3ResultCode){
- super(sqlite3_errstr(sqlite3ResultCode));
+ super(CApi.sqlite3_errstr(sqlite3ResultCode));
errCode = xerrCode = sqlite3ResultCode;
}
/**
Records the current error state of db (which must not be null and
- must refer to an opened db object). Note that this does NOT close
+ must refer to an opened db object). Note that this does not close
the db.
- Design note: closing the db on error is likely only useful during
+ Design note: closing the db on error is really only useful during
a failed db-open operation, and the place(s) where that can
happen are inside this library, not client-level code.
*/
SqliteException(sqlite3 db){
- super(sqlite3_errmsg(db));
- errCode = sqlite3_errcode(db);
- xerrCode = sqlite3_extended_errcode(db);
- errOffset = sqlite3_error_offset(db);
- sysErrno = sqlite3_system_errno(db);
+ super(CApi.sqlite3_errmsg(db));
+ errCode = CApi.sqlite3_errcode(db);
+ xerrCode = CApi.sqlite3_extended_errcode(db);
+ errOffset = CApi.sqlite3_error_offset(db);
+ sysErrno = CApi.sqlite3_system_errno(db);
}
/**
Records the current error state of db (which must not be null and
must refer to an open database).
@@ -69,14 +72,14 @@
public SqliteException(Sqlite db){
this(db.nativeHandle());
}
public SqliteException(Sqlite.Stmt stmt){
- this( stmt.db() );
+ this(stmt.getDb());
}
public int errcode(){ return errCode; }
public int extendedErrcode(){ return xerrCode; }
public int errorOffset(){ return errOffset; }
public int systemErrno(){ return sysErrno; }
}
Index: ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java
@@ -10,18 +10,17 @@
**
*************************************************************************
** This file contains a set of tests for the sqlite3 JNI bindings.
*/
package org.sqlite.jni.wrapper1;
-//import static org.sqlite.jni.capi.CApi.*;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import org.sqlite.jni.capi.*;
+import org.sqlite.jni.capi.CApi;
/**
An annotation for Tester2 tests which we do not want to run in
reflection-driven test mode because either they are not suitable
for multi-threaded threaded mode or we have to control their execution
@@ -44,11 +43,11 @@
//! True to sleep briefly between tests.
private static boolean takeNaps = false;
//! True to shuffle the order of the tests.
private static boolean shuffle = false;
//! True to dump the list of to-run tests to stdout.
- private static boolean listRunTests = false;
+ private static int listRunTests = 0;
//! True to squelch all out() and outln() output.
private static boolean quietMode = false;
//! Total number of runTests() calls.
private static int nTestRuns = 0;
//! List of test*() methods to run.
@@ -123,122 +122,143 @@
public static void affirm(Boolean v){
affirm(v, "Affirmation failed.");
}
- public static void execSql(Sqlite db, String[] sql){
+ public static void execSql(Sqlite db, String sql[]){
execSql(db, String.join("", sql));
}
+ /**
+ Executes all SQL statements in the given string. If throwOnError
+ is true then it will throw for any prepare/step errors, else it
+ will return the corresponding non-0 result code.
+ */
public static int execSql(Sqlite dbw, boolean throwOnError, String sql){
- final sqlite3 db = dbw.nativeHandle();
- OutputPointer.Int32 oTail = new OutputPointer.Int32();
- final byte[] sqlUtf8 = sql.getBytes(StandardCharsets.UTF_8);
- int pos = 0, n = 1;
- byte[] sqlChunk = sqlUtf8;
- int rc = 0;
- sqlite3_stmt stmt = null;
- final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt();
- while(pos < sqlChunk.length){
- if(pos > 0){
- sqlChunk = Arrays.copyOfRange(sqlChunk, pos,
- sqlChunk.length);
- }
- if( 0==sqlChunk.length ) break;
- rc = CApi.sqlite3_prepare_v2(db, sqlChunk, outStmt, oTail);
- if(throwOnError) affirm(0 == rc);
- else if( 0!=rc ) break;
- pos = oTail.value;
- stmt = outStmt.take();
- if( null == stmt ){
- // empty statement was parsed.
- continue;
- }
- affirm(0 != stmt.getNativePointer());
- while( CApi.SQLITE_ROW == (rc = CApi.sqlite3_step(stmt)) ){
- }
- CApi.sqlite3_finalize(stmt);
- affirm(0 == stmt.getNativePointer());
- if(0!=rc && CApi.SQLITE_ROW!=rc && CApi.SQLITE_DONE!=rc){
- break;
- }
- }
- CApi.sqlite3_finalize(stmt);
- if(CApi.SQLITE_ROW==rc || CApi.SQLITE_DONE==rc) rc = 0;
- if( 0!=rc && throwOnError){
- throw new SqliteException(db);
- }
- return rc;
+ final ValueHolder rv = new ValueHolder<>(0);
+ final Sqlite.PrepareMulti pm = new Sqlite.PrepareMulti(){
+ @Override public void call(Sqlite.Stmt stmt){
+ try{
+ while( Sqlite.ROW == (rv.value = stmt.step(throwOnError)) ){}
+ }
+ finally{ stmt.finalizeStmt(); }
+ }
+ };
+ try {
+ dbw.prepareMulti(sql, pm);
+ }catch(SqliteException se){
+ if( throwOnError ){
+ throw se;
+ }else{
+ /* This error (likely) happened in the prepare() phase and we
+ need to preempt it. */
+ rv.value = se.errcode();
+ }
+ }
+ return (rv.value==Sqlite.DONE) ? 0 : rv.value;
}
static void execSql(Sqlite db, String sql){
execSql(db, true, sql);
}
@SingleThreadOnly /* because it's thread-agnostic */
private void test1(){
- affirm(CApi.sqlite3_libversion_number() == CApi.SQLITE_VERSION_NUMBER);
- }
-
- /* Copy/paste/rename this to add new tests. */
- private void _testTemplate(){
- //final sqlite3 db = createNewDb();
- //sqlite3_stmt stmt = prepare(db,"SELECT 1");
- //sqlite3_finalize(stmt);
- //sqlite3_close_v2(db);
+ affirm(Sqlite.libVersionNumber() == CApi.SQLITE_VERSION_NUMBER);
}
private void nap() throws InterruptedException {
if( takeNaps ){
Thread.sleep(java.util.concurrent.ThreadLocalRandom.current().nextInt(3, 17), 0);
}
}
Sqlite openDb(String name){
- final Sqlite db = Sqlite.open(name, CApi.SQLITE_OPEN_READWRITE|
- CApi.SQLITE_OPEN_CREATE|
- CApi.SQLITE_OPEN_EXRESCODE);
+ final Sqlite db = Sqlite.open(name, Sqlite.OPEN_READWRITE|
+ Sqlite.OPEN_CREATE|
+ Sqlite.OPEN_EXRESCODE);
++metrics.dbOpen;
return db;
}
Sqlite openDb(){ return openDb(":memory:"); }
void testOpenDb1(){
Sqlite db = openDb();
affirm( 0!=db.nativeHandle().getNativePointer() );
+ affirm( "main".equals( db.dbName(0) ) );
+ db.setMainDbName("foo");
+ affirm( "foo".equals( db.dbName(0) ) );
+ affirm( db.dbConfig(Sqlite.DBCONFIG_DEFENSIVE, true)
+ /* The underlying function has different mangled names in jdk8
+ vs jdk19, and this call is here to ensure that the build
+ fails if it cannot find both names. */ );
+ affirm( !db.dbConfig(Sqlite.DBCONFIG_DEFENSIVE, false) );
+ SqliteException ex = null;
+ try{ db.dbConfig(0, false); }
+ catch(SqliteException e){ ex = e; }
+ affirm( null!=ex );
+ ex = null;
db.close();
affirm( null==db.nativeHandle() );
- SqliteException ex = null;
- try {
- db = openDb("/no/such/dir/.../probably");
- }catch(SqliteException e){
- ex = e;
- }
+ try{ db = openDb("/no/such/dir/.../probably"); }
+ catch(SqliteException e){ ex = e; }
affirm( ex!=null );
affirm( ex.errcode() != 0 );
affirm( ex.extendedErrcode() != 0 );
affirm( ex.errorOffset() < 0 );
// there's no reliable way to predict what ex.systemErrno() might be
}
void testPrepare1(){
try (Sqlite db = openDb()) {
- Sqlite.Stmt stmt = db.prepare("SELECT 1");
+ Sqlite.Stmt stmt = db.prepare("SELECT ?1");
+ Exception e = null;
affirm( null!=stmt.nativeHandle() );
- affirm( CApi.SQLITE_ROW == stmt.step() );
- affirm( CApi.SQLITE_DONE == stmt.step() );
+ affirm( db == stmt.getDb() );
+ affirm( 1==stmt.bindParameterCount() );
+ affirm( "?1".equals(stmt.bindParameterName(1)) );
+ affirm( null==stmt.bindParameterName(2) );
+ stmt.bindInt64(1, 1);
+ stmt.bindDouble(1, 1.1);
+ stmt.bindObject(1, db);
+ stmt.bindNull(1);
+ stmt.bindText(1, new byte[] {32,32,32});
+ stmt.bindText(1, "123");
+ stmt.bindText16(1, "123".getBytes(StandardCharsets.UTF_16));
+ stmt.bindText16(1, "123");
+ stmt.bindZeroBlob(1, 8);
+ stmt.bindBlob(1, new byte[] {1,2,3,4});
+ stmt.bindInt(1, 17);
+ try{ stmt.bindInt(2,1); }
+ catch(Exception ex){ e = ex; }
+ affirm( null!=e );
+ e = null;
+ affirm( stmt.step() );
+ try{ stmt.columnInt(1); }
+ catch(Exception ex){ e = ex; }
+ affirm( null!=e );
+ e = null;
+ affirm( 17 == stmt.columnInt(0) );
+ affirm( 17L == stmt.columnInt64(0) );
+ affirm( 17.0 == stmt.columnDouble(0) );
+ affirm( "17".equals(stmt.columnText16(0)) );
+ affirm( !stmt.step() );
stmt.reset();
- affirm( CApi.SQLITE_ROW == stmt.step() );
- affirm( CApi.SQLITE_DONE == stmt.step() );
+ affirm( Sqlite.ROW==stmt.step(false) );
+ affirm( !stmt.step() );
affirm( 0 == stmt.finalizeStmt() );
affirm( null==stmt.nativeHandle() );
- stmt = db.prepare("SELECT 1");
- affirm( CApi.SQLITE_ROW == stmt.step() );
- affirm( 0 == stmt.finalizeStmt() )
+ stmt = db.prepare("SELECT ?");
+ stmt.bindObject(1, db);
+ affirm( Sqlite.ROW == stmt.step(false) );
+ affirm( db==stmt.columnObject(0) );
+ affirm( db==stmt.columnObject(0, Sqlite.class ) );
+ affirm( null==stmt.columnObject(0, Sqlite.Stmt.class ) );
+ affirm( 0==stmt.finalizeStmt() )
/* getting a non-0 out of sqlite3_finalize() is tricky */;
affirm( null==stmt.nativeHandle() );
}
}
@@ -247,32 +267,41 @@
try (Sqlite db = openDb()) {
execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)");
final ValueHolder vh = new ValueHolder<>(0);
final ScalarFunction f = new ScalarFunction(){
public void xFunc(SqlFunction.Arguments args){
+ affirm( db == args.getDb() );
for( SqlFunction.Arguments.Arg arg : args ){
vh.value += arg.getInt();
}
+ args.resultInt(vh.value);
}
public void xDestroy(){
++xDestroyCalled.value;
}
};
db.createFunction("myfunc", -1, f);
- execSql(db, "select myfunc(1,2,3)");
+ Sqlite.Stmt q = db.prepare("select myfunc(1,2,3)");
+ affirm( q.step() );
affirm( 6 == vh.value );
+ affirm( 6 == q.columnInt(0) );
+ q.finalizeStmt();
+ affirm( 0 == xDestroyCalled.value );
vh.value = 0;
- execSql(db, "select myfunc(-1,-2,-3)");
+ q = db.prepare("select myfunc(-1,-2,-3)");
+ affirm( q.step() );
affirm( -6 == vh.value );
+ affirm( -6 == q.columnInt(0) );
affirm( 0 == xDestroyCalled.value );
+ q.finalizeStmt();
}
affirm( 1 == xDestroyCalled.value );
}
void testUdfAggregate(){
final ValueHolder xDestroyCalled = new ValueHolder<>(0);
- final ValueHolder vh = new ValueHolder<>(0);
+ Sqlite.Stmt q = null;
try (Sqlite db = openDb()) {
execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)");
final AggregateFunction f = new AggregateFunction(){
public void xStep(SqlFunction.Arguments args){
final ValueHolder agg = this.getAggregateState(args, 0);
@@ -282,32 +311,664 @@
}
public void xFinal(SqlFunction.Arguments args){
final Integer v = this.takeAggregateState(args);
if( null==v ) args.resultNull();
else args.resultInt(v);
- vh.value = v;
}
public void xDestroy(){
++xDestroyCalled.value;
}
};
- db.createFunction("myagg", -1, f);
- execSql(db, "select myagg(a) from t");
- affirm( 6 == vh.value );
+ db.createFunction("summer", 1, f);
+ q = db.prepare(
+ "with cte(v) as ("+
+ "select 3 union all select 5 union all select 7"+
+ ") select summer(v), summer(v+1) from cte"
+ /* ------------------^^^^^^^^^^^ ensures that we're handling
+ sqlite3_aggregate_context() properly. */
+ );
+ affirm( q.step() );
+ affirm( 15==q.columnInt(0) );
+ q.finalizeStmt();
+ q = null;
affirm( 0 == xDestroyCalled.value );
+ db.createFunction("summerN", -1, f);
+
+ q = db.prepare("select summerN(1,8,9), summerN(2,3,4)");
+ affirm( q.step() );
+ affirm( 18==q.columnInt(0) );
+ affirm( 9==q.columnInt(1) );
+ q.finalizeStmt();
+ q = null;
+
+ }/*db*/
+ finally{
+ if( null!=q ) q.finalizeStmt();
+ }
+ affirm( 2 == xDestroyCalled.value
+ /* because we've bound the same instance twice */ );
+ }
+
+ private void testUdfWindow(){
+ final Sqlite db = openDb();
+ /* Example window function, table, and results taken from:
+ https://sqlite.org/windowfunctions.html#udfwinfunc */
+ final WindowFunction func = new WindowFunction(){
+ //! Impl of xStep() and xInverse()
+ private void xStepInverse(SqlFunction.Arguments args, int v){
+ this.getAggregateState(args,0).value += v;
+ }
+ @Override public void xStep(SqlFunction.Arguments args){
+ this.xStepInverse(args, args.getInt(0));
+ }
+ @Override public void xInverse(SqlFunction.Arguments args){
+ this.xStepInverse(args, -args.getInt(0));
+ }
+ //! Impl of xFinal() and xValue()
+ private void xFinalValue(SqlFunction.Arguments args, Integer v){
+ if(null == v) args.resultNull();
+ else args.resultInt(v);
+ }
+ @Override public void xFinal(SqlFunction.Arguments args){
+ xFinalValue(args, this.takeAggregateState(args));
+ affirm( null == this.getAggregateState(args,null).value );
+ }
+ @Override public void xValue(SqlFunction.Arguments args){
+ xFinalValue(args, this.getAggregateState(args,null).value);
+ }
+ };
+ db.createFunction("winsumint", 1, func);
+ execSql(db, new String[] {
+ "CREATE TEMP TABLE twin(x, y); INSERT INTO twin VALUES",
+ "('a', 4),('b', 5),('c', 3),('d', 8),('e', 1)"
+ });
+ final Sqlite.Stmt stmt = db.prepare(
+ "SELECT x, winsumint(y) OVER ("+
+ "ORDER BY x ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING"+
+ ") AS sum_y "+
+ "FROM twin ORDER BY x;"
+ );
+ int n = 0;
+ while( stmt.step() ){
+ final String s = stmt.columnText16(0);
+ final int i = stmt.columnInt(1);
+ switch(++n){
+ case 1: affirm( "a".equals(s) && 9==i ); break;
+ case 2: affirm( "b".equals(s) && 12==i ); break;
+ case 3: affirm( "c".equals(s) && 16==i ); break;
+ case 4: affirm( "d".equals(s) && 12==i ); break;
+ case 5: affirm( "e".equals(s) && 9==i ); break;
+ default: affirm( false /* cannot happen */ );
+ }
+ }
+ stmt.close();
+ affirm( 5 == n );
+ db.close();
+ }
+
+ private void testKeyword(){
+ final int n = Sqlite.keywordCount();
+ affirm( n>0 );
+ affirm( !Sqlite.keywordCheck("_nope_") );
+ affirm( Sqlite.keywordCheck("seLect") );
+ affirm( null!=Sqlite.keywordName(0) );
+ affirm( null!=Sqlite.keywordName(n-1) );
+ affirm( null==Sqlite.keywordName(n) );
+ }
+
+
+ private void testExplain(){
+ final Sqlite db = openDb();
+ Sqlite.Stmt q = db.prepare("SELECT 1");
+ affirm( 0 == q.isExplain() );
+ q.explain(0);
+ affirm( 0 == q.isExplain() );
+ q.explain(1);
+ affirm( 1 == q.isExplain() );
+ q.explain(2);
+ affirm( 2 == q.isExplain() );
+ Exception ex = null;
+ try{
+ q.explain(-1);
+ }catch(Exception e){
+ ex = e;
+ }
+ affirm( ex instanceof SqliteException );
+ q.finalizeStmt();
+ db.close();
+ }
+
+
+ private void testTrace(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ /* Ensure that characters outside of the UTF BMP survive the trip
+ from Java to sqlite3 and back to Java. (At no small efficiency
+ penalty.) */
+ final String nonBmpChar = "😃";
+ db.trace(
+ Sqlite.TRACE_ALL,
+ new Sqlite.TraceCallback(){
+ @Override public void call(int traceFlag, Object pNative, Object x){
+ ++counter.value;
+ //outln("TRACE "+traceFlag+" pNative = "+pNative.getClass().getName());
+ switch(traceFlag){
+ case Sqlite.TRACE_STMT:
+ affirm(pNative instanceof Sqlite.Stmt);
+ //outln("TRACE_STMT sql = "+x);
+ affirm(x instanceof String);
+ affirm( ((String)x).indexOf(nonBmpChar) > 0 );
+ break;
+ case Sqlite.TRACE_PROFILE:
+ affirm(pNative instanceof Sqlite.Stmt);
+ affirm(x instanceof Long);
+ //outln("TRACE_PROFILE time = "+x);
+ break;
+ case Sqlite.TRACE_ROW:
+ affirm(pNative instanceof Sqlite.Stmt);
+ affirm(null == x);
+ //outln("TRACE_ROW = "+sqlite3_column_text16((sqlite3_stmt)pNative, 0));
+ break;
+ case Sqlite.TRACE_CLOSE:
+ affirm(pNative instanceof Sqlite);
+ affirm(null == x);
+ break;
+ default:
+ affirm(false /*cannot happen*/);
+ break;
+ }
+ }
+ });
+ execSql(db, "SELECT coalesce(null,null,'"+nonBmpChar+"'); "+
+ "SELECT 'w"+nonBmpChar+"orld'");
+ affirm( 6 == counter.value );
+ db.close();
+ affirm( 7 == counter.value );
+ }
+
+ private void testStatus(){
+ final Sqlite db = openDb();
+ execSql(db, "create table t(a); insert into t values(1),(2),(3)");
+
+ Sqlite.Status s = Sqlite.libStatus(Sqlite.STATUS_MEMORY_USED, false);
+ affirm( s.current > 0 );
+ affirm( s.peak >= s.current );
+
+ s = db.status(Sqlite.DBSTATUS_SCHEMA_USED, false);
+ affirm( s.current > 0 );
+ affirm( s.peak == 0 /* always 0 for SCHEMA_USED */ );
+
+ db.close();
+ }
+
+ @SingleThreadOnly /* because multiple threads legitimately make these
+ results unpredictable */
+ private synchronized void testAutoExtension(){
+ final ValueHolder val = new ValueHolder<>(0);
+ final ValueHolder toss = new ValueHolder<>(null);
+ final Sqlite.AutoExtension ax = new Sqlite.AutoExtension(){
+ @Override public void call(Sqlite db){
+ ++val.value;
+ if( null!=toss.value ){
+ throw new RuntimeException(toss.value);
+ }
+ }
+ };
+ Sqlite.addAutoExtension(ax);
+ openDb().close();
+ affirm( 1==val.value );
+ openDb().close();
+ affirm( 2==val.value );
+ Sqlite.clearAutoExtensions();
+ openDb().close();
+ affirm( 2==val.value );
+
+ Sqlite.addAutoExtension( ax );
+ Sqlite.addAutoExtension( ax ); // Must not add a second entry
+ Sqlite.addAutoExtension( ax ); // or a third one
+ openDb().close();
+ affirm( 3==val.value );
+
+ Sqlite db = openDb();
+ affirm( 4==val.value );
+ execSql(db, "ATTACH ':memory:' as foo");
+ affirm( 4==val.value, "ATTACH uses the same connection, not sub-connections." );
+ db.close();
+ db = null;
+
+ Sqlite.removeAutoExtension(ax);
+ openDb().close();
+ affirm( 4==val.value );
+ Sqlite.addAutoExtension(ax);
+ Exception err = null;
+ toss.value = "Throwing from auto_extension.";
+ try{
+ openDb();
+ }catch(Exception e){
+ err = e;
+ }
+ affirm( err!=null );
+ affirm( err.getMessage().indexOf(toss.value)>=0 );
+ toss.value = null;
+
+ val.value = 0;
+ final Sqlite.AutoExtension ax2 = new Sqlite.AutoExtension(){
+ @Override public void call(Sqlite db){
+ ++val.value;
+ }
+ };
+ Sqlite.addAutoExtension(ax2);
+ openDb().close();
+ affirm( 2 == val.value );
+ Sqlite.removeAutoExtension(ax);
+ openDb().close();
+ affirm( 3 == val.value );
+ Sqlite.addAutoExtension(ax);
+ openDb().close();
+ affirm( 5 == val.value );
+ Sqlite.removeAutoExtension(ax2);
+ openDb().close();
+ affirm( 6 == val.value );
+ Sqlite.addAutoExtension(ax2);
+ openDb().close();
+ affirm( 8 == val.value );
+
+ Sqlite.clearAutoExtensions();
+ openDb().close();
+ affirm( 8 == val.value );
+ }
+
+ private void testBackup(){
+ final Sqlite dbDest = openDb();
+
+ try (Sqlite dbSrc = openDb()) {
+ execSql(dbSrc, new String[]{
+ "pragma page_size=512; VACUUM;",
+ "create table t(a);",
+ "insert into t(a) values(1),(2),(3);"
+ });
+ Exception e = null;
+ try {
+ dbSrc.initBackup("main",dbSrc,"main");
+ }catch(Exception x){
+ e = x;
+ }
+ affirm( e instanceof SqliteException );
+ e = null;
+ try (Sqlite.Backup b = dbDest.initBackup("main",dbSrc,"main")) {
+ affirm( null!=b );
+ int rc;
+ while( Sqlite.DONE!=(rc = b.step(1)) ){
+ affirm( 0==rc );
+ }
+ affirm( b.pageCount() > 0 );
+ b.finish();
+ }
+ }
+
+ try (Sqlite.Stmt q = dbDest.prepare("SELECT sum(a) from t")) {
+ q.step();
+ affirm( q.columnInt(0) == 6 );
+ }
+ dbDest.close();
+ }
+
+ private void testCollation(){
+ final Sqlite db = openDb();
+ execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')");
+ final Sqlite.Collation myCollation = new Sqlite.Collation() {
+ private String myState =
+ "this is local state. There is much like it, but this is mine.";
+ @Override
+ // Reverse-sorts its inputs...
+ public int call(byte[] lhs, byte[] rhs){
+ int len = lhs.length > rhs.length ? rhs.length : lhs.length;
+ int c = 0, i = 0;
+ for(i = 0; i < len; ++i){
+ c = lhs[i] - rhs[i];
+ if(0 != c) break;
+ }
+ if(0==c){
+ if(i < lhs.length) c = 1;
+ else if(i < rhs.length) c = -1;
+ }
+ return -c;
+ }
+ };
+ final Sqlite.CollationNeeded collLoader = new Sqlite.CollationNeeded(){
+ @Override
+ public void call(Sqlite dbArg, int eTextRep, String collationName){
+ affirm(dbArg == db);
+ db.createCollation("reversi", eTextRep, myCollation);
+ }
+ };
+ db.onCollationNeeded(collLoader);
+ Sqlite.Stmt stmt = db.prepare("SELECT a FROM t ORDER BY a COLLATE reversi");
+ int counter = 0;
+ while( stmt.step() ){
+ final String val = stmt.columnText16(0);
+ ++counter;
+ switch(counter){
+ case 1: affirm("c".equals(val)); break;
+ case 2: affirm("b".equals(val)); break;
+ case 3: affirm("a".equals(val)); break;
+ }
+ }
+ affirm(3 == counter);
+ stmt.finalizeStmt();
+ stmt = db.prepare("SELECT a FROM t ORDER BY a");
+ counter = 0;
+ while( stmt.step() ){
+ final String val = stmt.columnText16(0);
+ ++counter;
+ //outln("Non-REVERSI'd row#"+counter+": "+val);
+ switch(counter){
+ case 3: affirm("c".equals(val)); break;
+ case 2: affirm("b".equals(val)); break;
+ case 1: affirm("a".equals(val)); break;
+ }
+ }
+ affirm(3 == counter);
+ stmt.finalizeStmt();
+ db.onCollationNeeded(null);
+ db.close();
+ }
+
+ @SingleThreadOnly /* because threads inherently break this test */
+ private void testBusy(){
+ final String dbName = "_busy-handler.db";
+ try{
+ Sqlite db1 = openDb(dbName);
+ ++metrics.dbOpen;
+ execSql(db1, "CREATE TABLE IF NOT EXISTS t(a)");
+ Sqlite db2 = openDb(dbName);
+ ++metrics.dbOpen;
+
+ final ValueHolder xBusyCalled = new ValueHolder<>(0);
+ Sqlite.BusyHandler handler = new Sqlite.BusyHandler(){
+ @Override public int call(int n){
+ return n > 2 ? 0 : ++xBusyCalled.value;
+ }
+ };
+ db2.setBusyHandler(handler);
+
+ // Force a locked condition...
+ execSql(db1, "BEGIN EXCLUSIVE");
+ int rc = 0;
+ SqliteException ex = null;
+ try{
+ db2.prepare("SELECT * from t");
+ }catch(SqliteException x){
+ ex = x;
+ }
+ affirm( null!=ex );
+ affirm( Sqlite.BUSY == ex.errcode() );
+ affirm( 3 == xBusyCalled.value );
+ db1.close();
+ db2.close();
+ }finally{
+ try{(new java.io.File(dbName)).delete();}
+ catch(Exception e){/* ignore */}
+ }
+ }
+
+ private void testCommitHook(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ final ValueHolder hookResult = new ValueHolder<>(0);
+ final Sqlite.CommitHook theHook = new Sqlite.CommitHook(){
+ @Override public int call(){
+ ++counter.value;
+ return hookResult.value;
+ }
+ };
+ Sqlite.CommitHook oldHook = db.setCommitHook(theHook);
+ affirm( null == oldHook );
+ execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')");
+ affirm( 2 == counter.value );
+ execSql(db, "BEGIN; SELECT 1; SELECT 2; COMMIT;");
+ affirm( 2 == counter.value /* NOT invoked if no changes are made */ );
+ execSql(db, "BEGIN; update t set a='d' where a='c'; COMMIT;");
+ affirm( 3 == counter.value );
+ oldHook = db.setCommitHook(theHook);
+ affirm( theHook == oldHook );
+ execSql(db, "BEGIN; update t set a='e' where a='d'; COMMIT;");
+ affirm( 4 == counter.value );
+ oldHook = db.setCommitHook(null);
+ affirm( theHook == oldHook );
+ execSql(db, "BEGIN; update t set a='f' where a='e'; COMMIT;");
+ affirm( 4 == counter.value );
+ oldHook = db.setCommitHook(null);
+ affirm( null == oldHook );
+ execSql(db, "BEGIN; update t set a='g' where a='f'; COMMIT;");
+ affirm( 4 == counter.value );
+
+ final Sqlite.CommitHook newHook = new Sqlite.CommitHook(){
+ @Override public int call(){return 0;}
+ };
+ oldHook = db.setCommitHook(newHook);
+ affirm( null == oldHook );
+ execSql(db, "BEGIN; update t set a='h' where a='g'; COMMIT;");
+ affirm( 4 == counter.value );
+ oldHook = db.setCommitHook(theHook);
+ affirm( newHook == oldHook );
+ execSql(db, "BEGIN; update t set a='i' where a='h'; COMMIT;");
+ affirm( 5 == counter.value );
+ hookResult.value = Sqlite.ERROR;
+ int rc = execSql(db, false, "BEGIN; update t set a='j' where a='i'; COMMIT;");
+ affirm( Sqlite.CONSTRAINT_COMMITHOOK == rc );
+ affirm( 6 == counter.value );
+ db.close();
+ }
+
+ private void testRollbackHook(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ final Sqlite.RollbackHook theHook = new Sqlite.RollbackHook(){
+ @Override public void call(){
+ ++counter.value;
+ }
+ };
+ Sqlite.RollbackHook oldHook = db.setRollbackHook(theHook);
+ affirm( null == oldHook );
+ execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')");
+ affirm( 0 == counter.value );
+ execSql(db, false, "BEGIN; SELECT 1; SELECT 2; ROLLBACK;");
+ affirm( 1 == counter.value /* contra to commit hook, is invoked if no changes are made */ );
+
+ final Sqlite.RollbackHook newHook = new Sqlite.RollbackHook(){
+ @Override public void call(){}
+ };
+ oldHook = db.setRollbackHook(newHook);
+ affirm( theHook == oldHook );
+ execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;");
+ affirm( 1 == counter.value );
+ oldHook = db.setRollbackHook(theHook);
+ affirm( newHook == oldHook );
+ execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;");
+ affirm( 2 == counter.value );
+ int rc = execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;");
+ affirm( 0 == rc );
+ affirm( 3 == counter.value );
+ db.close();
+ }
+
+ private void testUpdateHook(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ final ValueHolder expectedOp = new ValueHolder<>(0);
+ final Sqlite.UpdateHook theHook = new Sqlite.UpdateHook(){
+ @Override
+ public void call(int opId, String dbName, String tableName, long rowId){
+ ++counter.value;
+ if( 0!=expectedOp.value ){
+ affirm( expectedOp.value == opId );
+ }
+ }
+ };
+ Sqlite.UpdateHook oldHook = db.setUpdateHook(theHook);
+ affirm( null == oldHook );
+ expectedOp.value = Sqlite.INSERT;
+ execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')");
+ affirm( 3 == counter.value );
+ expectedOp.value = Sqlite.UPDATE;
+ execSql(db, "update t set a='d' where a='c';");
+ affirm( 4 == counter.value );
+ oldHook = db.setUpdateHook(theHook);
+ affirm( theHook == oldHook );
+ expectedOp.value = Sqlite.DELETE;
+ execSql(db, "DELETE FROM t where a='d'");
+ affirm( 5 == counter.value );
+ oldHook = db.setUpdateHook(null);
+ affirm( theHook == oldHook );
+ execSql(db, "update t set a='e' where a='b';");
+ affirm( 5 == counter.value );
+ oldHook = db.setUpdateHook(null);
+ affirm( null == oldHook );
+
+ final Sqlite.UpdateHook newHook = new Sqlite.UpdateHook(){
+ @Override public void call(int opId, String dbName, String tableName, long rowId){
+ }
+ };
+ oldHook = db.setUpdateHook(newHook);
+ affirm( null == oldHook );
+ execSql(db, "update t set a='h' where a='a'");
+ affirm( 5 == counter.value );
+ oldHook = db.setUpdateHook(theHook);
+ affirm( newHook == oldHook );
+ expectedOp.value = Sqlite.UPDATE;
+ execSql(db, "update t set a='i' where a='h'");
+ affirm( 6 == counter.value );
+ db.close();
+ }
+
+ private void testProgress(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ db.setProgressHandler(1, new Sqlite.ProgressHandler(){
+ @Override public int call(){
+ ++counter.value;
+ return 0;
+ }
+ });
+ execSql(db, "SELECT 1; SELECT 2;");
+ affirm( counter.value > 0 );
+ int nOld = counter.value;
+ db.setProgressHandler(0, null);
+ execSql(db, "SELECT 1; SELECT 2;");
+ affirm( nOld == counter.value );
+ db.close();
+ }
+
+ private void testAuthorizer(){
+ final Sqlite db = openDb();
+ final ValueHolder counter = new ValueHolder<>(0);
+ final ValueHolder authRc = new ValueHolder<>(0);
+ final Sqlite.Authorizer auth = new Sqlite.Authorizer(){
+ public int call(int op, String s0, String s1, String s2, String s3){
+ ++counter.value;
+ //outln("xAuth(): "+s0+" "+s1+" "+s2+" "+s3);
+ return authRc.value;
+ }
+ };
+ execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')");
+ db.setAuthorizer(auth);
+ execSql(db, "UPDATE t SET a=1");
+ affirm( 1 == counter.value );
+ authRc.value = Sqlite.DENY;
+ int rc = execSql(db, false, "UPDATE t SET a=2");
+ affirm( Sqlite.AUTH==rc );
+ db.setAuthorizer(null);
+ rc = execSql(db, false, "UPDATE t SET a=2");
+ affirm( 0==rc );
+ db.close();
+ }
+
+ private void testBlobOpen(){
+ final Sqlite db = openDb();
+
+ execSql(db, "CREATE TABLE T(a BLOB);"
+ +"INSERT INTO t(rowid,a) VALUES(1, 'def'),(2, 'XYZ');"
+ );
+ Sqlite.Blob b = db.blobOpen("main", "t", "a",
+ db.lastInsertRowId(), true);
+ affirm( 3==b.bytes() );
+ b.write(new byte[] {100, 101, 102 /*"DEF"*/}, 0);
+ b.close();
+ Sqlite.Stmt stmt = db.prepare("SELECT length(a), a FROM t ORDER BY a");
+ affirm( stmt.step() );
+ affirm( 3 == stmt.columnInt(0) );
+ affirm( "def".equals(stmt.columnText16(1)) );
+ stmt.finalizeStmt();
+
+ b = db.blobOpen("main", "t", "a", db.lastInsertRowId(), false);
+ final byte[] tgt = new byte[3];
+ b.read( tgt, 0 );
+ affirm( 100==tgt[0] && 101==tgt[1] && 102==tgt[2], "DEF" );
+ execSql(db,"UPDATE t SET a=zeroblob(10) WHERE rowid=2");
+ b.close();
+ b = db.blobOpen("main", "t", "a", db.lastInsertRowId(), true);
+ byte[] bw = new byte[]{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
+ };
+ b.write(bw, 0);
+ byte[] br = new byte[10];
+ b.read(br, 0);
+ for( int i = 0; i < br.length; ++i ){
+ affirm(bw[i] == br[i]);
+ }
+ b.close();
+ db.close();
+ }
+
+ void testPrepareMulti(){
+ final ValueHolder fCount = new ValueHolder<>(0);
+ final ValueHolder mCount = new ValueHolder<>(0);
+ try (Sqlite db = openDb()) {
+ execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)");
+ db.createFunction("counter", -1, new ScalarFunction(){
+ @Override public void xFunc(SqlFunction.Arguments args){
+ ++fCount.value;
+ args.resultNull();
+ }
+ }
+ );
+ final Sqlite.PrepareMulti pm = new Sqlite.PrepareMultiFinalize(
+ new Sqlite.PrepareMulti() {
+ @Override public void call(Sqlite.Stmt q){
+ ++mCount.value;
+ while(q.step()){}
+ }
+ }
+ );
+ final String sql = "select counter(*) from t;"+
+ "select counter(*) from t; /* comment */"+
+ "select counter(*) from t; -- comment\n"
+ ;
+ db.prepareMulti(sql, pm);
}
- affirm( 1 == xDestroyCalled.value );
+ affirm( 3 == mCount.value );
+ affirm( 9 == fCount.value );
+ }
+
+
+ /* Copy/paste/rename this to add new tests. */
+ private void _testTemplate(){
+ try (Sqlite db = openDb()) {
+ Sqlite.Stmt stmt = db.prepare("SELECT 1");
+ stmt.finalizeStmt();
+ }
}
private void runTests(boolean fromThread) throws Exception {
List mlist = testMethods;
affirm( null!=mlist );
if( shuffle ){
mlist = new ArrayList<>( testMethods.subList(0, testMethods.size()) );
java.util.Collections.shuffle(mlist);
}
- if( listRunTests ){
+ if( (!fromThread && listRunTests>0) || listRunTests>1 ){
synchronized(this.getClass()){
if( !fromThread ){
out("Initial test"," list: ");
for(java.lang.reflect.Method m : testMethods){
out(m.getName()+" ");
@@ -342,12 +1003,11 @@
}catch(Exception e){
synchronized( listErrors ){
listErrors.add(e);
}
}finally{
- affirm( CApi.sqlite3_java_uncache_thread() );
- affirm( !CApi.sqlite3_java_uncache_thread() );
+ Sqlite.uncacheThread();
}
}
/**
Runs the basic sqlite3 JNI binding sanity-check suite.
@@ -366,11 +1026,13 @@
-naps: sleep small random intervals between tests in order to add
some chaos for cross-thread contention.
-list-tests: outputs the list of tests being run, minus some
- which are hard-coded. This is noisy in multi-threaded mode.
+ which are hard-coded. In multi-threaded mode, use this twice to
+ to emit the list run by each thread (which may differ from the initial
+ list, in particular if -shuffle is used).
-fail: forces an exception to be thrown during the test run. Use
with -shuffle to make its appearance unpredictable.
-v: emit some developer-mode info at the end.
@@ -395,11 +1057,11 @@
}else if(arg.equals("r") || arg.equals("repeat")){
nRepeat = Integer.parseInt(args[i++]);
}else if(arg.equals("shuffle")){
shuffle = true;
}else if(arg.equals("list-tests")){
- listRunTests = true;
+ ++listRunTests;
}else if(arg.equals("fail")){
forceFail = true;
}else if(arg.equals("sqllog")){
sqlLog = true;
}else if(arg.equals("configlog")){
@@ -413,43 +1075,33 @@
}
}
}
if( sqlLog ){
- if( CApi.sqlite3_compileoption_used("ENABLE_SQLLOG") ){
- final ConfigSqllogCallback log = new ConfigSqllogCallback() {
- @Override public void call(sqlite3 db, String msg, int op){
+ if( Sqlite.compileOptionUsed("ENABLE_SQLLOG") ){
+ Sqlite.libConfigSqlLog( new Sqlite.ConfigSqlLog() {
+ @Override public void call(Sqlite db, String msg, int op){
switch(op){
case 0: outln("Opening db: ",db); break;
case 1: outln("SQL ",db,": ",msg); break;
case 2: outln("Closing db: ",db); break;
}
}
- };
- int rc = CApi.sqlite3_config( log );
- affirm( 0==rc );
- rc = CApi.sqlite3_config( (ConfigSqllogCallback)null );
- affirm( 0==rc );
- rc = CApi.sqlite3_config( log );
- affirm( 0==rc );
+ }
+ );
}else{
outln("WARNING: -sqllog is not active because library was built ",
"without SQLITE_ENABLE_SQLLOG.");
}
}
if( configLog ){
- final ConfigLogCallback log = new ConfigLogCallback() {
- @Override public void call(int code, String msg){
- outln("ConfigLogCallback: ",ResultCode.getEntryForInt(code),": ", msg);
- };
- };
- int rc = CApi.sqlite3_config( log );
- affirm( 0==rc );
- rc = CApi.sqlite3_config( (ConfigLogCallback)null );
- affirm( 0==rc );
- rc = CApi.sqlite3_config( log );
- affirm( 0==rc );
+ Sqlite.libConfigLog( new Sqlite.ConfigLog() {
+ @Override public void call(int code, String msg){
+ outln("ConfigLog: ",Sqlite.errstr(code),": ", msg);
+ };
+ }
+ );
}
quietMode = squelchTestOutput;
outln("If you just saw warning messages regarding CallStaticObjectMethod, ",
"you are very likely seeing the side effects of a known openjdk8 ",
@@ -478,43 +1130,20 @@
}
if( nSkipped>0 ) out("\n");
}
final long timeStart = System.currentTimeMillis();
- int nLoop = 0;
- switch( CApi.sqlite3_threadsafe() ){ /* Sanity checking */
- case 0:
- affirm( CApi.SQLITE_ERROR==CApi.sqlite3_config( CApi.SQLITE_CONFIG_SINGLETHREAD ),
- "Could not switch to single-thread mode." );
- affirm( CApi.SQLITE_ERROR==CApi.sqlite3_config( CApi.SQLITE_CONFIG_MULTITHREAD ),
- "Could switch to multithread mode." );
- affirm( CApi.SQLITE_ERROR==CApi.sqlite3_config( CApi.SQLITE_CONFIG_SERIALIZED ),
- "Could not switch to serialized threading mode." );
- outln("This is a single-threaded build. Not using threads.");
- nThread = 1;
- break;
- case 1:
- case 2:
- affirm( 0==CApi.sqlite3_config( CApi.SQLITE_CONFIG_SINGLETHREAD ),
- "Could not switch to single-thread mode." );
- affirm( 0==CApi.sqlite3_config( CApi.SQLITE_CONFIG_MULTITHREAD ),
- "Could not switch to multithread mode." );
- affirm( 0==CApi.sqlite3_config( CApi.SQLITE_CONFIG_SERIALIZED ),
- "Could not switch to serialized threading mode." );
- break;
- default:
- affirm( false, "Unhandled SQLITE_THREADSAFE value." );
- }
outln("libversion_number: ",
- CApi.sqlite3_libversion_number(),"\n",
- CApi.sqlite3_libversion(),"\n",CApi.SQLITE_SOURCE_ID,"\n",
+ Sqlite.libVersionNumber(),"\n",
+ Sqlite.libVersion(),"\n",Sqlite.libSourceId(),"\n",
"SQLITE_THREADSAFE=",CApi.sqlite3_threadsafe());
final boolean showLoopCount = (nRepeat>1 && nThread>1);
if( showLoopCount ){
outln("Running ",nRepeat," loop(s) with ",nThread," thread(s) each.");
}
if( takeNaps ) outln("Napping between tests is enabled.");
+ int nLoop = 0;
for( int n = 0; n < nRepeat; ++n ){
++nLoop;
if( showLoopCount ) out((1==nLoop ? "" : " ")+nLoop);
if( nThread<=1 ){
new Tester2(0).runTests(false);
@@ -552,11 +1181,11 @@
outln("\tAssertions checked: ",affirmCount);
outln("\tDatabases opened: ",metrics.dbOpen);
if( doSomethingForDev ){
CApi.sqlite3_jni_internal_details();
}
- affirm( 0==CApi.sqlite3_release_memory(1) );
+ affirm( 0==Sqlite.libReleaseMemory(1) );
CApi.sqlite3_shutdown();
int nMethods = 0;
int nNatives = 0;
int nCanonical = 0;
final java.lang.reflect.Method[] declaredMethods =
Index: ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java
==================================================================
--- ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java
+++ ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java
@@ -7,17 +7,17 @@
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
-** This file contains a set of tests for the sqlite3 JNI bindings.
+** This file contains the ValueHolder utility class.
*/
package org.sqlite.jni.wrapper1;
/**
A helper class which simply holds a single value. Its primary use
- is for communicating values out of anonymous classes, as doing so
+ is for communicating values out of anonymous callbacks, as doing so
requires a "final" reference.
*/
public class ValueHolder {
public T value;
public ValueHolder(){}
ADDED ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java
Index: ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java
==================================================================
--- /dev/null
+++ ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java
@@ -0,0 +1,42 @@
+/*
+** 2023-10-16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+*************************************************************************
+** This file is part of the wrapper1 interface for sqlite3.
+*/
+package org.sqlite.jni.wrapper1;
+
+/**
+ A SqlFunction implementation for window functions. The T type
+ represents the type of data accumulated by this function while it
+ works. e.g. a SUM()-like UDF might use Integer or Long and a
+ CONCAT()-like UDF might use a StringBuilder or a List.
+*/
+public abstract class WindowFunction extends AggregateFunction {
+
+ /**
+ As for the xInverse() argument of the C API's
+ sqlite3_create_window_function(). If this function throws, the
+ exception is reported via sqlite3_result_error().
+ */
+ public abstract void xInverse(SqlFunction.Arguments args);
+
+ /**
+ As for the xValue() argument of the C API's
+ sqlite3_create_window_function(). If this function throws, it is
+ translated into sqlite3_result_error().
+
+ Note that the passed-in object will not actually contain any
+ arguments for xValue() but will contain the context object needed
+ for setting the call's result or error state.
+ */
+ public abstract void xValue(SqlFunction.Arguments args);
+
+}
Index: ext/misc/noop.c
==================================================================
--- ext/misc/noop.c
+++ ext/misc/noop.c
@@ -35,10 +35,28 @@
sqlite3_value **argv
){
assert( argc==1 );
sqlite3_result_value(context, argv[0]);
}
+
+/*
+** Implementation of the multitype_text() function.
+**
+** The function returns its argument. The result will always have a
+** TEXT value. But if the original input is numeric, it will also
+** have that numeric value.
+*/
+static void multitypeTextFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ assert( argc==1 );
+ (void)argc;
+ (void)sqlite3_value_text(argv[0]);
+ sqlite3_result_value(context, argv[0]);
+}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_noop_init(
@@ -62,7 +80,11 @@
0, noopfunc, 0, 0);
if( rc ) return rc;
rc = sqlite3_create_function(db, "noop_nd", 1,
SQLITE_UTF8,
0, noopfunc, 0, 0);
+ if( rc ) return rc;
+ rc = sqlite3_create_function(db, "multitype_text", 1,
+ SQLITE_UTF8,
+ 0, multitypeTextFunc, 0, 0);
return rc;
}
Index: ext/misc/randomjson.c
==================================================================
--- ext/misc/randomjson.c
+++ ext/misc/randomjson.c
@@ -24,13 +24,18 @@
**
** USING FROM THE CLI:
**
** .load ./randomjson
** SELECT random_json(1);
+** SELECT random_json5(1);
*/
-#include "sqlite3ext.h"
-SQLITE_EXTENSION_INIT1
+#ifdef SQLITE_STATIC_RANDOMJSON
+# include "sqlite3.h"
+#else
+# include "sqlite3ext.h"
+ SQLITE_EXTENSION_INIT1
+#endif
#include
#include
#include
/* Pseudo-random number generator */
@@ -49,21 +54,22 @@
p->x = (p->x>>1) ^ ((1+~(p->x&1)) & 0xd0000001);
p->y = p->y*1103515245 + 12345;
return p->x ^ p->y;
}
-static const char *azJsonAtoms[] = {
-/* JSON JSON-5 */
+static char *azJsonAtoms[] = {
+ /* JSON JSON-5 */
"0", "0",
"1", "1",
"-1", "-1",
"2", "+2",
- "3", "3",
- "2.5", "2.5",
+ "3DDDD", "3DDDD",
+ "2.5DD", "2.5DD",
"0.75", ".75",
"-4.0e2", "-4.e2",
"5.0e-3", "+5e-3",
+ "6.DDe+0DD", "6.DDe+0DD",
"0", "0x0",
"512", "0x200",
"256", "+0x100",
"-2748", "-0xabc",
"true", "true",
@@ -71,16 +77,18 @@
"null", "null",
"9.0e999", "Infinity",
"-9.0e999", "-Infinity",
"9.0e999", "+Infinity",
"null", "NaN",
- "-0.0005123", "-0.0005123",
+ "-0.0005DD", "-0.0005DD",
"4.35e-3", "+4.35e-3",
"\"gem\\\"hay\"", "\"gem\\\"hay\"",
"\"icy'joy\"", "'icy\\'joy\'",
"\"keylog\"", "\"key\\\nlog\"",
"\"mix\\\\\\tnet\"", "\"mix\\\\\\tnet\"",
+ "\"oat\\r\\n\"", "\"oat\\r\\n\"",
+ "\"\\fpan\\b\"", "\"\\fpan\\b\"",
"{}", "{}",
"[]", "[]",
"[]", "[/*empty*/]",
"{}", "{//empty\n}",
"\"ask\"", "\"ask\"",
@@ -87,23 +95,24 @@
"\"bag\"", "\"bag\"",
"\"can\"", "\"can\"",
"\"day\"", "\"day\"",
"\"end\"", "'end'",
"\"fly\"", "\"fly\"",
+ "\"\\u00XX\\u00XX\"", "\"\\xXX\\xXX\"",
+ "\"y\\uXXXXz\"", "\"y\\uXXXXz\"",
"\"\"", "\"\"",
};
-static const char *azJsonTemplate[] = {
+static char *azJsonTemplate[] = {
/* JSON JSON-5 */
- "{\"a\":%,\"b\":%,\"c\":%}", "{a:%,b:%,c:%}",
+ "{\"a\":%,\"b\":%,\"cDD\":%}", "{a:%,b:%,cDD:%}",
"{\"a\":%,\"b\":%,\"c\":%,\"d\":%,\"e\":%}", "{a:%,b:%,c:%,d:%,e:%}",
- "{\"a\":%,\"b\":%,\"c\":%,\"d\":%,\"\":%}", "{a:%,b:%,c:%,d:%,\"\":%}",
+ "{\"a\":%,\"b\":%,\"c\":%,\"d\":%,\"\":%}", "{a:%,b:%,c:%,d:%,'':%}",
"{\"d\":%}", "{d:%}",
"{\"eeee\":%, \"ffff\":%}", "{eeee:% /*and*/, ffff:%}",
- "{\"$g\":%,\"_h_\":%}", "{$g:%,_h_:%,}",
+ "{\"$g\":%,\"_h_\":%,\"a b c d\":%}", "{$g:%,_h_:%,\"a b c d\":%}",
"{\"x\":%,\n \"y\":%}", "{\"x\":%,\n \"y\":%}",
- "{\"a b c d\":%,\"e\":%,\"f\":%,\"x\":%,\"y\":%}",
- "{\"a b c d\":%,e:%,f:%,x:%,y:%}",
+ "{\"\\u00XX\":%,\"\\uXXXX\":%}", "{\"\\xXX\":%,\"\\uXXXX\":%}",
"{\"Z\":%}", "{Z:%,}",
"[%]", "[%,]",
"[%,%]", "[%,%]",
"[%,%,%]", "[%,%,%,]",
"[%,%,%,%]", "[%,%,%,%]",
@@ -120,19 +129,17 @@
Prng *p,
int eType, /* 0 for JSON, 1 for JSON5 */
unsigned int r /* Growth probability 0..1000. 0 means no growth */
){
unsigned int i, j, k;
- const char *z;
+ char *z;
+ char *zX;
size_t n;
+ char zBuf[200];
j = 0;
- if( zSrc==0 ){
- k = prngInt(p)%(count(azJsonTemplate)/2);
- k = k*2 + eType;
- zSrc = azJsonTemplate[k];
- }
+ if( zSrc==0 ) zSrc = "%";
if( strlen(zSrc)>=STRSZ/10 ) r = 0;
for(i=0; zSrc[i]; i++){
if( zSrc[i]!='%' ){
if( j>8)&0xff) ) y += 0x100;
+ while( (y&0xff)==((y>>16)&0xff) || ((y>>8)&0xff)==((y>>16)&0xff) ){
+ y += 0x10000;
+ }
+ memcpy(zBuf, z, n+1);
+ z = zBuf;
+ zX = strstr(z,"XX");
+ while( zX!=0 ){
+ zX[0] = "0123456789abcdef"[y%16]; y /= 16;
+ zX[1] = "0123456789abcdef"[y%16]; y /= 16;
+ zX = strstr(zX, "XX");
+ }
+ }else if( (zX = strstr(z,"DD"))!=0 ){
+ unsigned int y = prngInt(p);
+ memcpy(zBuf, z, n+1);
+ z = zBuf;
+ zX = strstr(z,"DD");
+ while( zX!=0 ){
+ zX[0] = "0123456789"[y%10]; y /= 10;
+ zX[1] = "0123456789"[y%10]; y /= 10;
+ zX = strstr(zX, "DD");
+ }
+ }
+ assert( strstr(z, "XX")==0 );
+ assert( strstr(z, "DD")==0 );
if( j+n=(sqlite3_uint64)LLONG_MAX ){
+static sqlite3_int64 genSeqMember(
+ sqlite3_int64 smBase,
+ sqlite3_int64 smStep,
+ sqlite3_uint64 ix
+){
+ static const sqlite3_uint64 mxI64 =
+ ((sqlite3_uint64)0x7fffffff)<<32 | 0xffffffff;
+ if( ix>=mxI64 ){
/* Get ix into signed i64 range. */
- ix -= (sqlite3_uint64)LLONG_MAX;
+ ix -= mxI64;
/* With 2's complement ALU, this next can be 1 step, but is split into
* 2 for UBSAN's satisfaction (and hypothetical 1's complement ALUs.) */
- smBase += (LLONG_MAX/2) * smStep;
- smBase += (LLONG_MAX - LLONG_MAX/2) * smStep;
+ smBase += (mxI64/2) * smStep;
+ smBase += (mxI64 - mxI64/2) * smStep;
}
/* Under UBSAN (or on 1's complement machines), must do this last term
* in steps to avoid the dreaded (and harmless) signed multiply overlow. */
if( ix>=2 ){
sqlite3_int64 ix2 = (sqlite3_int64)ix/2;
Index: ext/misc/totype.c
==================================================================
--- ext/misc/totype.c
+++ ext/misc/totype.c
@@ -347,10 +347,24 @@
*pResult = result;
/* return true if number and no extra non-whitespace chracters after */
return z>=zEnd && nDigits>0 && eValid && nonNum==0;
}
+
+/*
+** Convert a floating point value to an integer. Or, if this cannot be
+** done in a way that avoids 'outside the range of representable values'
+** warnings from UBSAN, return 0.
+**
+** This function is a modified copy of internal SQLite function
+** sqlite3RealToI64().
+*/
+static sqlite3_int64 totypeDoubleToInt(double r){
+ if( r<-9223372036854774784.0 ) return 0;
+ if( r>+9223372036854774784.0 ) return 0;
+ return (sqlite3_int64)r;
+}
/*
** tointeger(X): If X is any value (integer, double, blob, or string) that
** can be losslessly converted into an integer, then make the conversion and
** return the result. Otherwise, return NULL.
@@ -363,11 +377,11 @@
assert( argc==1 );
(void)argc;
switch( sqlite3_value_type(argv[0]) ){
case SQLITE_FLOAT: {
double rVal = sqlite3_value_double(argv[0]);
- sqlite3_int64 iVal = (sqlite3_int64)rVal;
+ sqlite3_int64 iVal = totypeDoubleToInt(rVal);
if( rVal==(double)iVal ){
sqlite3_result_int64(context, iVal);
}
break;
}
@@ -438,11 +452,11 @@
break;
}
case SQLITE_INTEGER: {
sqlite3_int64 iVal = sqlite3_value_int64(argv[0]);
double rVal = (double)iVal;
- if( iVal==(sqlite3_int64)rVal ){
+ if( iVal==totypeDoubleToInt(rVal) ){
sqlite3_result_double(context, rVal);
}
break;
}
case SQLITE_BLOB: {
Index: ext/recover/dbdata.c
==================================================================
--- ext/recover/dbdata.c
+++ ext/recover/dbdata.c
@@ -580,10 +580,11 @@
/* Load the "byte of payload including overflow" field */
if( bNextPage || iOff>pCsr->nPage ){
bNextPage = 1;
}else{
iOff += dbdataGetVarintU32(&pCsr->aPage[iOff], &nPayload);
+ if( nPayload>0x7fffff00 ) nPayload &= 0x3fff;
}
/* If this is a leaf intkey cell, load the rowid */
if( bHasRowid && !bNextPage && iOffnPage ){
iOff += dbdataGetVarint(&pCsr->aPage[iOff], &pCsr->iIntkey);
Index: ext/recover/test_recover.c
==================================================================
--- ext/recover/test_recover.c
+++ ext/recover/test_recover.c
@@ -234,11 +234,11 @@
}
if( getDbPointer(interp, objv[1], &db) ) return TCL_ERROR;
zDb = Tcl_GetString(objv[2]);
if( zDb[0]=='\0' ) zDb = 0;
- pNew = ckalloc(sizeof(TestRecover));
+ pNew = (TestRecover*)ckalloc(sizeof(TestRecover));
if( bSql==0 ){
zUri = Tcl_GetString(objv[3]);
pNew->p = sqlite3_recover_init(db, zDb, zUri);
}else{
pNew->interp = interp;
Index: ext/rtree/rtree.c
==================================================================
--- ext/rtree/rtree.c
+++ ext/rtree/rtree.c
@@ -694,13 +694,15 @@
/*
** Clear the Rtree.pNodeBlob object
*/
static void nodeBlobReset(Rtree *pRtree){
- sqlite3_blob *pBlob = pRtree->pNodeBlob;
- pRtree->pNodeBlob = 0;
- sqlite3_blob_close(pBlob);
+ if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){
+ sqlite3_blob *pBlob = pRtree->pNodeBlob;
+ pRtree->pNodeBlob = 0;
+ sqlite3_blob_close(pBlob);
+ }
}
/*
** Obtain a reference to an r-tree node.
*/
@@ -715,11 +717,11 @@
/* Check if the requested node is already in the hash table. If so,
** increase its reference count and return it.
*/
if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){
- if( pParent && pParent!=pNode->pParent ){
+ if( pParent && ALWAYS(pParent!=pNode->pParent) ){
RTREE_IS_CORRUPT(pRtree);
return SQLITE_CORRUPT_VTAB;
}
pNode->nRef++;
*ppNode = pNode;
@@ -740,10 +742,11 @@
rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, pRtree->zNodeName,
"data", iNode, 0,
&pRtree->pNodeBlob);
}
if( rc ){
+ nodeBlobReset(pRtree);
*ppNode = 0;
/* If unable to open an sqlite3_blob on the desired row, that can only
** be because the shadow tables hold erroneous data. */
if( rc==SQLITE_ERROR ){
rc = SQLITE_CORRUPT_VTAB;
@@ -799,11 +802,10 @@
rc = SQLITE_CORRUPT_VTAB;
RTREE_IS_CORRUPT(pRtree);
}
*ppNode = pNode;
}else{
- nodeBlobReset(pRtree);
if( pNode ){
pRtree->nNodeRef--;
sqlite3_free(pNode);
}
*ppNode = 0;
@@ -1134,13 +1136,11 @@
assert( pRtree->nCursor>0 );
resetCursor(pCsr);
sqlite3_finalize(pCsr->pReadAux);
sqlite3_free(pCsr);
pRtree->nCursor--;
- if( pRtree->nCursor==0 && pRtree->inWrTrans==0 ){
- nodeBlobReset(pRtree);
- }
+ nodeBlobReset(pRtree);
return SQLITE_OK;
}
/*
** Rtree virtual table module xEof method.
@@ -3224,11 +3224,12 @@
/*
** Called when a transaction starts.
*/
static int rtreeBeginTransaction(sqlite3_vtab *pVtab){
Rtree *pRtree = (Rtree *)pVtab;
- pRtree->inWrTrans = 1;
+ assert( pRtree->inWrTrans==0 );
+ pRtree->inWrTrans++;
return SQLITE_OK;
}
/*
** Called when a transaction completes (either by COMMIT or ROLLBACK).
@@ -3466,11 +3467,11 @@
}else{
rc = SQLITE_NOMEM;
}
sqlite3_free(zSql);
}
- if( pRtree->nAux ){
+ if( pRtree->nAux && rc!=SQLITE_NOMEM ){
pRtree->zReadAuxSql = sqlite3_mprintf(
"SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1",
zDb, zPrefix);
if( pRtree->zReadAuxSql==0 ){
rc = SQLITE_NOMEM;
@@ -4155,19 +4156,17 @@
check.db = db;
check.zDb = zDb;
check.zTab = zTab;
/* Find the number of auxiliary columns */
- if( check.rc==SQLITE_OK ){
- pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
- if( pStmt ){
- nAux = sqlite3_column_count(pStmt) - 2;
- sqlite3_finalize(pStmt);
- }else
- if( check.rc!=SQLITE_NOMEM ){
- check.rc = SQLITE_OK;
- }
+ pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
+ if( pStmt ){
+ nAux = sqlite3_column_count(pStmt) - 2;
+ sqlite3_finalize(pStmt);
+ }else
+ if( check.rc!=SQLITE_NOMEM ){
+ check.rc = SQLITE_OK;
}
/* Find number of dimensions in the rtree table. */
pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.%Q", zDb, zTab);
if( pStmt ){
@@ -4218,10 +4217,11 @@
UNUSED_PARAMETER(isQuick);
rc = rtreeCheckTable(pRtree->db, pRtree->zDb, pRtree->zName, pzErr);
if( rc==SQLITE_OK && *pzErr ){
*pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z",
pRtree->zDb, pRtree->zName, *pzErr);
+ if( (*pzErr)==0 ) rc = SQLITE_NOMEM;
}
return rc;
}
/*
Index: ext/rtree/rtreeJ.test
==================================================================
--- ext/rtree/rtreeJ.test
+++ ext/rtree/rtreeJ.test
@@ -14,14 +14,10 @@
if {![info exists testdir]} {
set testdir [file join [file dirname [info script]] .. .. test]
}
source $testdir/tester.tcl
set testprefix rtreeJ
-ifcapable !rtree {
- finish_test
- return
-}
do_execsql_test 1.0 {
CREATE VIRTUAL TABLE t1 USING rtree(id, x1, x2);
INSERT INTO t1 VALUES(1, 1, 1), (2, 2, 2);
} {}
Index: ext/session/sqlite3session.c
==================================================================
--- ext/session/sqlite3session.c
+++ ext/session/sqlite3session.c
@@ -2346,13 +2346,11 @@
/* Delete all attached table objects. And the contents of their
** associated hash-tables. */
sessionDeleteTable(pSession, pSession->pTable);
- /* Assert that all allocations have been freed and then free the
- ** session object itself. */
- // assert( pSession->nMalloc==0 );
+ /* Free the session object. */
sqlite3_free(pSession);
}
/*
** Set a table filter on a Session Object.
Index: ext/userauth/user-auth.txt
==================================================================
--- ext/userauth/user-auth.txt
+++ ext/userauth/user-auth.txt
@@ -1,5 +1,17 @@
+*********************************** NOTICE ************************************
+* This extension is deprecated. The SQLite developers do not maintain this *
+* extension. At some point in the future, it might disappear from the source *
+* tree. *
+* *
+* If you are using this extension and think it should be supported moving *
+* forward, visit the SQLite Forum (https://sqlite.org/forum) and argue your *
+* case there. *
+* *
+* This deprecation notice was added on 2024-01-22. *
+*******************************************************************************
+
Activate the user authentication logic by including the
ext/userauth/userauth.c source code file in the build and
adding the -DSQLITE_USER_AUTHENTICATION compile-time option.
The ext/userauth/sqlite3userauth.h header file is available to
applications to define the interface.
Index: ext/wasm/GNUmakefile
==================================================================
--- ext/wasm/GNUmakefile
+++ ext/wasm/GNUmakefile
@@ -40,22 +40,55 @@
# limited to:
#
# 1) Consolidate the code generation for sqlite3*.*js into a script
# which generates the makefile code, rather than using $(call) and
# $(eval), or at least centralize the setup of the numerous vars
-# related to each build variant $(JS_BUILD_MODES).
+# related to each build variant $(JS_BUILD_MODES). (Update: an
+# external script was attempted but generating properly-escaped
+# makefile code from within a shell script is even less legible
+# than the $(eval) indirection going on in this file.)
#
default: all
#default: quick
SHELL := $(shell which bash 2>/dev/null)
MAKEFILE := $(lastword $(MAKEFILE_LIST))
CLEAN_FILES :=
DISTCLEAN_FILES := ./--dummy--
release: oz
-# JS_BUILD_MODES exists solely to reduce repetition in documentation
-# below.
+
+########################################################################
+# JS_BUILD_NAMES exists for documentation purposes only. It enumerates
+# the core build styles:
+#
+# - sqlite3 = canonical library build
+#
+# - sqlite3-wasmfs = WASMFS-capable library build
+#
+JS_BUILD_NAMES := sqlite3 sqlite3-wasmfs
+
+########################################################################
+# JS_BUILD_MODES exists for documentation purposes only. It enumerates
+# the various "flavors" of build, each of which requires slight
+# customization of the output:
+#
+# - vanilla = plain-vanilla JS for use in browsers. This is the
+# canonical build mode.
+#
+# - esm = ES6 module, a.k.a. ESM, for use in browsers.
+#
+# - bundler-friendly = esm slightly tweaked for "bundler"
+# tools. Bundlers are invariably based on node.js, so these builds
+# are intended to be read at build-time by node.js but with a final
+# target of browsers.
+#
+# - node = for use by node.js for node.js, as opposed to by node.js on
+# behalf o browser-side code (use bundler-friendly for that). Note
+# that persistent storage (OPFS) is not available in these builds.
+#
JS_BUILD_MODES := vanilla esm bunder-friendly node
+
+########################################################################
# Emscripten SDK home dir and related binaries...
EMSDK_HOME ?= $(word 1,$(wildcard $(HOME)/emsdk $(HOME)/src/emsdk))
emcc.bin ?= $(word 1,$(wildcard $(EMSDK_HOME)/upstream/emscripten/emcc) $(shell which emcc))
ifeq (,$(emcc.bin))
$(error Cannot find emcc.)
@@ -91,15 +124,18 @@
maybe-wasm-strip = echo "not wasm-stripping"
else
maybe-wasm-strip = $(wasm-strip)
endif
+########################################################################
+# dir.top = the top dir of the canonical build tree, where
+# sqlite3.[ch] live.
dir.top := ../..
-# Reminder: some Emscripten flags require absolute paths but we want
-# relative paths for most stuff simply to reduce noise. The
-# $(abspath...) GNU make function can transform relative paths to
-# absolute.
+# Maintenance reminder: some Emscripten flags require absolute paths
+# but we want relative paths for most stuff simply to reduce
+# noise. The $(abspath...) GNU make function can transform relative
+# paths to absolute.
dir.wasm := $(patsubst %/,%,$(dir $(MAKEFILE)))
dir.api := api
dir.jacc := jaccwabyt
dir.common := common
dir.fiddle := fiddle
@@ -141,16 +177,18 @@
########################################################################
# Set up sqlite3.c and sqlite3.h...
#
# To build with SEE (https://sqlite.org/see), either put sqlite3-see.c
-# in the top of this build tree or pass
-# sqlite3.c=PATH_TO_sqlite3-see.c to the build. Note that only
-# encryption modules with no 3rd-party dependencies will currently
-# work here: AES256-OFB, AES128-OFB, and AES128-CCM. Not
-# coincidentally, those 3 modules are included in the sqlite3-see.c
-# bundle.
+# in $(dir.top) or pass sqlite3.c=PATH_TO_sqlite3-see.c to the $(MAKE)
+# invocation. Note that only encryption modules with no 3rd-party
+# dependencies will currently work here: AES256-OFB, AES128-OFB, and
+# AES128-CCM. Not coincidentally, those 3 modules are included in the
+# sqlite3-see.c bundle. Note, however, that distributing an SEE build
+# of the WASM on a public site is in violation of the SEE license
+# because it effectively provides a usable copy of the SEE build to
+# all visitors.
#
# A custom sqlite3.c must not have any spaces in its name.
# $(sqlite3.canonical.c) must point to the sqlite3.c in
# the sqlite3 canonical source tree, as that source file
# is required for certain utility and test code.
@@ -191,10 +229,14 @@
# (and thus sqlite3_js_vfs_create_file()). Those functions are
# deprecated and alternatives are in place, but this crash behavior
# can be used to find errant uses of sqlite3_js_vfs_create_file()
# in client code.
+########################################################################@
+# It's important that sqlite3.h be built to completion before any
+# other parts of the build run, thus we use .NOTPARALLEL to disable
+# parallel build of that file and its dependants.
.NOTPARALLEL: $(sqlite3.h)
$(sqlite3.h):
$(MAKE) -C $(dir.top) sqlite3.c
$(sqlite3.c): $(sqlite3.h)
@@ -240,24 +282,26 @@
ifneq (,$(sqlite3_wasm_extra_init.c))
$(info Enabling SQLITE_EXTRA_INIT via $(sqlite3_wasm_extra_init.c).)
cflags.wasm_extra_init := -DSQLITE_WASM_EXTRA_INIT
endif
+#########################################################################
# bin.version-info = binary to output various sqlite3 version info for
# embedding in the JS files and in building the distribution zip file.
# It must NOT be in $(dir.tmp) because we need it to survive the
# cleanup process for the dist build to work properly.
bin.version-info := $(dir.top)/version-info
.NOTPARALLEL: $(bin.version-info)
$(bin.version-info): $(dir.tool)/version-info.c $(sqlite3.h) $(dir.top)/Makefile
$(MAKE) -C $(dir.top) version-info
+#########################################################################
# bin.stripcomments is used for stripping C/C++-style comments from JS
# files. The JS files contain large chunks of documentation which we
# don't need for all builds. That app's -k flag is of particular
# importance here, as it allows us to retain the opening comment
-# blocks, which contain the license header and version info.
+# block(s), which contain the license header and version info.
bin.stripccomments := $(dir.tool)/stripccomments
$(bin.stripccomments): $(bin.stripccomments).c $(MAKEFILE)
$(CC) -o $@ $<
DISTCLEAN_FILES += $(bin.stripccomments)
@@ -285,10 +329,13 @@
# seems likely to.
#
# c-pp.c was written specifically for the sqlite project's JavaScript
# builds but is maintained as a standalone project:
# https://fossil.wanderinghorse.net/r/c-pp
+#
+# Note that the SQLITE_... build flags used here have NO EFFECT on the
+# JS/WASM build. They are solely for use with $(bin.c-pp) itself.
bin.c-pp := ./c-pp
$(bin.c-pp): c-pp.c $(sqlite3.c) $(MAKEFILE)
$(CC) -O0 -o $@ c-pp.c $(sqlite3.c) '-DCMPP_DEFAULT_DELIM="//#"' -I$(dir.top) \
-DSQLITE_OMIT_LOAD_EXTENSION -DSQLITE_OMIT_DEPRECATED -DSQLITE_OMIT_UTF16 \
-DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_WAL -DSQLITE_THREADSAFE=0 \
@@ -345,10 +392,11 @@
# Much practice has demonstrated that -O2 consistently gives the best
# runtime speeds, but not by a large enough factor to rule out use of
# -Oz when small deliverable size is a priority.
########################################################################
+########################################################################
# EXPORTED_FUNCTIONS.* = files for use with Emscripten's
# -sEXPORTED_FUNCTION flag.
EXPORTED_FUNCTIONS.api.main := $(abspath $(dir.api)/EXPORTED_FUNCTIONS.sqlite3-api)
EXPORTED_FUNCTIONS.api.in := $(EXPORTED_FUNCTIONS.api.main)
ifeq (1,$(SQLITE_C_IS_SEE))
@@ -356,10 +404,11 @@
endif
EXPORTED_FUNCTIONS.api := $(dir.tmp)/EXPORTED_FUNCTIONS.api
$(EXPORTED_FUNCTIONS.api): $(EXPORTED_FUNCTIONS.api.in) $(sqlite3.c) $(MAKEFILE)
cat $(EXPORTED_FUNCTIONS.api.in) > $@
+########################################################################
# sqlite3-license-version.js = generated JS file with the license
# header and version info.
sqlite3-license-version.js := $(dir.tmp)/sqlite3-license-version.js
# sqlite3-license-version-header.js = JS file containing only the
# license header.
@@ -368,24 +417,41 @@
# sqlite3.version object using $(bin.version-info).
sqlite3-api-build-version.js := $(dir.tmp)/sqlite3-api-build-version.js
# sqlite3-api.jses = the list of JS files which make up
# $(sqlite3-api.js.in), in the order they need to be assembled.
sqlite3-api.jses := $(sqlite3-license-version.js)
+# sqlite3-api-prologue.js: initial boostrapping bits:
sqlite3-api.jses += $(dir.api)/sqlite3-api-prologue.js
+# whwhasm.js and jaccwabyt.js: Low-level utils, mostly replacing
+# Emscripten glue:
sqlite3-api.jses += $(dir.common)/whwasmutil.js
sqlite3-api.jses += $(dir.jacc)/jaccwabyt.js
+# sqlite3-api-glue.js Glues the previous part together:
sqlite3-api.jses += $(dir.api)/sqlite3-api-glue.js
+# $(sqlite3-api-build-version.js) = library version info
sqlite3-api.jses += $(sqlite3-api-build-version.js)
+# sqlite3-api-oo1.js = the oo1 API:
sqlite3-api.jses += $(dir.api)/sqlite3-api-oo1.js
+# sqlite3-api-worker.js = the Worker1 API:
sqlite3-api.jses += $(dir.api)/sqlite3-api-worker1.js
-sqlite3-api.jses += $(dir.api)/sqlite3-v-helper.js
+# sqlite3-vfs-helper = helper APIs for VFSes:
+sqlite3-api.jses += $(dir.api)/sqlite3-vfs-helper.c-pp.js
+# sqlite3-vtab-helper = helper APIs for VTABLEs:
+sqlite3-api.jses += $(dir.api)/sqlite3-vtab-helper.c-pp.js
+# sqlite3-vfs-opfs.c-pp.js = the first OPFS VFS:
sqlite3-api.jses += $(dir.api)/sqlite3-vfs-opfs.c-pp.js
+# sqlite3-vfs-opfs-sahpool.c-pp.js = the second OPFS VFS:
sqlite3-api.jses += $(dir.api)/sqlite3-vfs-opfs-sahpool.c-pp.js
+# sqlite3-api-cleanup.js = "finalizes" the build and cleans up
+# any extraneous global symbols which are needed temporarily
+# by the previous files.
sqlite3-api.jses += $(dir.api)/sqlite3-api-cleanup.js
+########################################################################
# SOAP.js is an external API file which is part of our distribution
-# but not part of the sqlite3-api.js amalgamation.
+# but not part of the sqlite3-api.js amalgamation. It's a component of
+# the first OPFS VFS and necessarily an external file.
SOAP.js := $(dir.api)/sqlite3-opfs-async-proxy.js
SOAP.js.bld := $(dir.dout)/$(notdir $(SOAP.js))
sqlite3-api.ext.jses += $(SOAP.js.bld)
$(SOAP.js.bld): $(SOAP.js)
cp $< $@
@@ -436,11 +502,13 @@
########################################################################
# emcc flags for .c/.o.
emcc.cflags :=
emcc.cflags += -std=c99 -fPIC
-# -------------^^^^^^^^ we need c99 for $(sqlite3-wasm.c).
+# -------------^^^^^^^^ we need c99 for $(sqlite3-wasm.c), primarily
+# for variadic macros and snprintf() to implement
+# sqlite3_wasm_enum_json().
emcc.cflags += -I. -I$(dir.top)
########################################################################
# emcc flags specific to building .js/.wasm files...
emcc.jsflags := -fPIC
emcc.jsflags += --minify 0
@@ -457,18 +525,20 @@
emcc.jsflags += -sUSE_CLOSURE_COMPILER=0
emcc.jsflags += -sIMPORTED_MEMORY
emcc.jsflags += -sSTRICT_JS=0
# STRICT_JS disabled due to:
# https://github.com/emscripten-core/emscripten/issues/18610
-# TL;DR: does not work with MODULARIZE or EXPORT_ES6 as of version 3.1.31.
+# TL;DR: does not work with MODULARIZE or EXPORT_ES6 as of version
+# 3.1.31. The fix for that in newer emcc's is to throw a built-time
+# error if STRICT_JS is used together with those options.
# -sENVIRONMENT values for the various build modes:
emcc.environment.vanilla := web,worker
emcc.environment.bundler-friendly := $(emcc.environment.vanilla)
emcc.environment.esm := $(emcc.environment.vanilla)
emcc.environment.node := node
-# Note that adding "node" to the list for the other builds causes
+# Note that adding ",node" to the list for the other builds causes
# Emscripten to generate code which confuses node: it cannot reliably
# determine whether the build is for a browser or for node.
########################################################################
# -sINITIAL_MEMORY: How much memory we need to start with is governed
@@ -516,17 +586,18 @@
# name "sqlite3InitModule" is the one which gets exposed via the
# resulting JS files. That can be accomplished via
# extern-post-js.js. However... using a temporary symbol name here
# and then adding sqlite3InitModule() ourselves results in 2 global
# symbols: we cannot "delete" the Emscripten-defined
-# $(sqlite3.js.init-func) because it's declared with "var".
+# $(sqlite3.js.init-func) from vanilla builds (as opposed to ESM
+# builds) because it's declared with "var".
sqlite3.js.init-func := sqlite3InitModule
emcc.jsflags += -sEXPORT_NAME=$(sqlite3.js.init-func)
emcc.jsflags += -sGLOBAL_BASE=4096 # HYPOTHETICALLY keep func table indexes from overlapping w/ heap addr.
#emcc.jsflags += -sSTRICT # fails due to missing __syscall_...()
#emcc.jsflags += -sALLOW_UNIMPLEMENTED_SYSCALLS
-#emcc.jsflags += -sFILESYSTEM=0 # only for experimentation. sqlite3 needs the FS API
+#emcc.jsflags += -sFILESYSTEM=0 # only for experimentation. fiddle needs the FS API
#emcc.jsflags += -sABORTING_MALLOC # only for experimentation
emcc.jsflags += -sALLOW_TABLE_GROWTH
# ^^^^ -sALLOW_TABLE_GROWTH is required for installing new SQL UDFs
emcc.jsflags += -Wno-limited-postlink-optimizations
# ^^^^ emcc likes to warn when we have "limited optimizations" via the
@@ -566,37 +637,51 @@
# cannot wasm-strip the binary before it gets encoded into the JS
# file. The result is that the generated JS file is, because of the
# -g3 debugging info, _huge_.
########################################################################
+########################################################################
+# $(sqlite3-api-build-version.js) injects the build version info into
+# the bundle in JSON form.
$(sqlite3-api-build-version.js): $(bin.version-info) $(MAKEFILE)
@echo "Making $@..."
@{ \
- echo 'globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){'; \
- echo -n ' sqlite3.version = '; \
- $(bin.version-info) --json; \
- echo ';'; \
- echo '});'; \
+ echo 'globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){'; \
+ echo -n ' sqlite3.version = '; \
+ $(bin.version-info) --json; \
+ echo ';'; \
+ echo '});'; \
} > $@
+
+########################################################################
+# $(sqlite3-license-version.js) contains the license header and
+# in-comment build version info.
+#
+# Maintenance reminder: there are awk binaries out there which do not
+# support -e SCRIPT.
$(sqlite3-license-version.js): $(sqlite3.h) $(sqlite3-license-version-header.js) \
$(MAKEFILE)
@echo "Making $@..."; { \
cat $(sqlite3-license-version-header.js); \
echo '/*'; \
echo '** This code was built from sqlite3 version...'; \
echo "**"; \
- awk -e '/define SQLITE_VERSION/{$$1=""; print "**" $$0}' \
- -e '/define SQLITE_SOURCE_ID/{$$1=""; print "**" $$0}' $(sqlite3.h); \
+ awk '/define SQLITE_VERSION/{$$1=""; print "**" $$0}' $(sqlite3.h); \
+ awk '/define SQLITE_SOURCE_ID/{$$1=""; print "**" $$0}' $(sqlite3.h); \
echo "**"; \
echo "** Using the Emscripten SDK version $(emcc.version)."; \
echo '*/'; \
} > $@
########################################################################
# --post-js and --pre-js are emcc flags we use to append/prepend JS to
# the generated emscripten module file. These rules set up the core
-# pre/post files for use by the various builds.
+# pre/post files for use by the various builds. --pre-js is used to
+# inject code which needs to run as part of the pre-WASM-load phase.
+# --post-js injects code which runs after the WASM module is loaded
+# and includes the entirety of the library plus some
+# Emscripten-specific post-bootstrapping code.
pre-js.js.in := $(dir.api)/pre-js.c-pp.js
post-js.js.in := $(dir.tmp)/post-js.c-pp.js
post-jses.js := \
$(dir.api)/post-js-header.js \
$(sqlite3-api.js.in) \
@@ -610,22 +695,30 @@
done > $@
########################################################################
# call-make-pre-post is a $(call)able which creates rules for
-# pre-js-$(1)-$(2).js. $1 = the base name of the JS file on whose
-# behalf this pre-js is for (one of: sqlite3, sqlite3-wasmfs). $2 is
+# pre-js.$(1)-$(2).js. $1 = the base name of the JS file on whose
+# behalf this pre-js is for (one of: $(JS_BUILD_NAMES)). $2 is
# the build mode: one of $(JS_BUILD_MODES). This sets up
# --[extern-][pre/post]-js flags in $(pre-post-$(1)-$(2).flags) and
# dependencies in $(pre-post-$(1)-$(2).deps). The resulting files get
# filtered using $(C-PP.FILTER). Any flags necessary for such
# filtering need to be set in $(c-pp.D.$(1)-$(2)) before $(call)ing
# this.
+#
+# Maintenance note: a shell script was written to generate these rules
+# with the hope that it would make them more legible and maintainable,
+# but embedding makefile code in another language makes it even less
+# legible than having the level of $(eval) indirection which we have
+# here.
define call-make-pre-post
pre-post-$(1)-$(2).flags ?=
-pre-js.js.$(1)-$(2) := $$(dir.tmp)/pre-js.$(1)-$(2).intermediary.js
-$$(eval $$(call C-PP.FILTER,$$(pre-js.js.in),$$(pre-js.js.$(1)-$(2)),$$(c-pp.D.$(1)-$(2))))
+pre-js.js.$(1)-$(2).intermediary := $$(dir.tmp)/pre-js.$(1)-$(2).intermediary.js
+pre-js.js.$(1)-$(2) := $$(dir.tmp)/pre-js.$(1)-$(2).js
+#$$(error $$(pre-js.js.$(1)-$(2).intermediary) $$(pre-js.js.$(1)-$(2)))
+$$(eval $$(call C-PP.FILTER,$$(pre-js.js.in),$$(pre-js.js.$(1)-$(2).intermediary),$$(c-pp.D.$(1)-$(2))))
post-js.js.$(1)-$(2) := $$(dir.tmp)/post-js.$(1)-$(2).js
$$(eval $$(call C-PP.FILTER,$$(post-js.js.in),$$(post-js.js.$(1)-$(2)),$$(c-pp.D.$(1)-$(2))))
extern-post-js.js.$(1)-$(2) := $$(dir.tmp)/extern-post-js.$(1)-$(2).js
$$(eval $$(call C-PP.FILTER,$$(extern-post-js.js.in),$$(extern-post-js.js.$(1)-$(2)),$$(c-pp.D.$(1)-$(2))))
pre-post-common.flags.$(1)-$(2) := \
@@ -632,23 +725,23 @@
$$(pre-post-common.flags) \
--post-js=$$(post-js.js.$(1)-$(2)) \
--extern-post-js=$$(extern-post-js.js.$(1)-$(2))
pre-post-jses.$(1)-$(2).deps := $$(pre-post-jses.deps.common) \
$$(post-js.js.$(1)-$(2)) $$(extern-post-js.js.$(1)-$(2))
-$$(dir.tmp)/pre-js-$(1)-$(2).js: $$(pre-js.js.$(1)-$(2)) $$(MAKEFILE)
- cp $$(pre-js.js.$(1)-$(2)) $$@
+$$(pre-js.js.$(1)-$(2)): $$(pre-js.js.$(1)-$(2).intermediary) $$(MAKEFILE)
+ cp $$(pre-js.js.$(1)-$(2).intermediary) $$@
@if [ sqlite3-wasmfs = $(1) ]; then \
echo "delete Module[xNameOfInstantiateWasm] /*for WASMFS build*/;"; \
elif [ sqlite3 != $(1) ]; then \
echo "Module[xNameOfInstantiateWasm].uri = '$(1).wasm';"; \
fi >> $$@
pre-post-$(1)-$(2).deps := \
$$(pre-post-jses.$(1)-$(2).deps) \
- $$(dir.tmp)/pre-js-$(1)-$(2).js
+ $$(dir.tmp)/pre-js.$(1)-$(2).js
pre-post-$(1)-$(2).flags += \
$$(pre-post-common.flags.$(1)-$(2)) \
- --pre-js=$$(dir.tmp)/pre-js-$(1)-$(2).js
+ --pre-js=$$(dir.tmp)/pre-js.$(1)-$(2).js
endef
# /post-js and pre-js
########################################################################
# Undocumented Emscripten feature: if the target file extension is
@@ -681,12 +774,12 @@
# instance (only) of /^export default/.
#
# Upstream RFE:
# https://github.com/emscripten-core/emscripten/issues/18237
#
-# Maintenance reminder: Mac sed works differently than GNU sed, so
-# don't use sed for this.
+# Maintenance reminder: Mac sed works differently than GNU sed, so we
+# use awk instead of sed for this.
define SQLITE3.xJS.ESM-EXPORT-DEFAULT
if [ x1 = x$(1) ]; then \
echo "Fragile workaround for emscripten/issues/18237. See SQLITE3.xJS.RECIPE."; \
{\
awk '/^export default/ && !f{f=1; next} 1' $@ > $@.tmp && mv $@.tmp $@; \
@@ -698,10 +791,11 @@
fi; \
fi; \
fi
endef
+########################################################################
# extern-post-js* and extern-pre-js* are files for use with
# Emscripten's --extern-pre-js and --extern-post-js flags.
extern-pre-js.js := $(dir.api)/extern-pre-js.js
extern-post-js.js.in := $(dir.api)/extern-post-js.c-pp.js
# Emscripten flags for --[extern-][pre|post]-js=... for the
@@ -709,15 +803,16 @@
pre-post-common.flags := \
--extern-pre-js=$(sqlite3-license-version.js)
# pre-post-jses.deps.* = a list of dependencies for the
# --[extern-][pre/post]-js files.
pre-post-jses.deps.common := $(extern-pre-js.js) $(sqlite3-license-version.js)
+
########################################################################
# SETUP_LIB_BUILD_MODE is a $(call)'able which sets up numerous pieces
# for one of the build modes.
#
-# $1 = one of: sqlite3, sqlite3-wasmfs
+# $1 = one of: $(JS_BUILD_NAMES)
# $2 = build mode name: one of $(JS_BUILD_MODES)
# $3 = 1 for ESM build mode, else 0
# $4 = resulting sqlite-api JS/MJS file
# $5 = resulting JS/MJS file
# $6 = -D... flags for $(bin.c-pp)
@@ -724,11 +819,12 @@
# $7 = emcc -sXYZ flags (CURRENTLY UNUSED - was factored out)
#
# Maintenance reminder: be careful not to introduce spaces around args
# ($1, $2), otherwise string concatenation will malfunction.
#
-# emcc.environment.$(2) must be set to a value for the -sENVIRONMENT flag.
+# emcc.environment.$(2) must be set to a value for emcc's
+# -sENVIRONMENT flag.
#
# $(cflags.$(1)) and $(cflags.$(1).$(2)) may be defined to append
# CFLAGS to a given build mode.
#
# $(emcc.flags.$(1)) and $(emcc.flags.$(1).$(2)) may be defined to
@@ -779,12 +875,11 @@
sqlite3-node.mjs := $(dir.dout)/sqlite3-node.mjs
#$(info $(call SETUP_LIB_BUILD_MODE,sqlite3,vanilla,0, $(sqlite3-api.js), $(sqlite3.js)))
$(eval $(call SETUP_LIB_BUILD_MODE,sqlite3,vanilla,0,\
$(sqlite3-api.js), $(sqlite3.js)))
$(eval $(call SETUP_LIB_BUILD_MODE,sqlite3,esm,1,\
- $(sqlite3-api.mjs), $(sqlite3.mjs), \
- -Dtarget=es6-module, -sEXPORT_ES6 -sUSE_ES6_IMPORT_META))
+ $(sqlite3-api.mjs), $(sqlite3.mjs), -Dtarget=es6-module))
$(eval $(call SETUP_LIB_BUILD_MODE,sqlite3,bundler-friendly,1,\
$(sqlite3-api-bundler-friendly.mjs),$(sqlite3-bundler-friendly.mjs),\
$(c-pp.D.sqlite3-esm) -Dtarget=es6-bundler-friendly))
$(eval $(call SETUP_LIB_BUILD_MODE,sqlite3,node,1,\
$(sqlite3-api-node.mjs),$(sqlite3-node.mjs),\
@@ -796,11 +891,11 @@
# -Dtarget=node: for node.js builds
#
# -Dtarget=es6-module -Dtarget=es6-bundler-friendly: intended for
# "bundler-friendly" ESM module build. These have some restrictions
# on how URL() objects are constructed in some contexts: URLs which
-# refer to files which are part of this project must be references
+# refer to files which are part of this project must be referenced
# as string literals so that bundlers' static-analysis tools can
# find those files and include them in their bundles.
#
# -Dtarget=es6-module -Dtarget=es6-bundler-friendly -Dtarget=node: is
# intended for use by node.js for node.js, as opposed to by
@@ -852,11 +947,11 @@
# dumps generated by $(speedtest1) and executes them.
dir.sql := sql
speedtest1 := ../../speedtest1
speedtest1.c := ../../test/speedtest1.c
speedtest1.sql := $(dir.sql)/speedtest1.sql
-speedtest1.cliflags := --size 25 --big-transactions
+speedtest1.cliflags := --size 10 --big-transactions
$(speedtest1):
$(MAKE) -C ../.. speedtest1
$(speedtest1.sql): $(speedtest1) $(MAKEFILE)
$(speedtest1) $(speedtest1.cliflags) --script $@
batch-runner.list: $(MAKEFILE) $(speedtest1.sql) $(dir.sql)/000-mandelbrot.sql
@@ -1085,6 +1180,6 @@
include dist.make
endif
# Run local web server for the test/demo pages.
httpd:
- althttpd -max-age 1 -enable-sab -page index.html
+ althttpd -max-age 1 -enable-sab 1 -page index.html
Index: ext/wasm/SQLTester/SQLTester.mjs
==================================================================
--- ext/wasm/SQLTester/SQLTester.mjs
+++ ext/wasm/SQLTester/SQLTester.mjs
@@ -171,16 +171,22 @@
command: /^--(([a-z-]+)( .*)?)$/,
//! "Special" characters - we have to escape output if it contains any.
special: /[\x00-\x20\x22\x5c\x7b\x7d]/,
squiggly: /[{}]/
});
+
+
const Util = newObj({
toss,
- unlink: function(fn){
- return 0==sqlite3.wasm.sqlite3_wasm_vfs_unlink(0,fn);
+ unlink: function f(fn){
+ if(!f.unlink){
+ f.unlink = sqlite3.wasm.xWrap('sqlite3__wasm_vfs_unlink','int',
+ ['*','string']);
+ }
+ return 0==f.unlink(0,fn);
},
argvToString: (list)=>{
const m = [...list];
m.shift() /* strip command name */;
@@ -195,11 +201,11 @@
);
},
utf8Encode: (str)=>__utf8Encoder.encode(str),
- strglob: sqlite3.wasm.xWrap('sqlite3_wasm_SQLTester_strglob','int',
+ strglob: sqlite3.wasm.xWrap('sqlite3__wasm_SQLTester_strglob','int',
['string','string'])
})/*Util*/;
class Outer {
#lnBuf = [];
Index: ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api
==================================================================
--- ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api
+++ ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api
@@ -61,10 +61,11 @@
_sqlite3_extended_result_codes
_sqlite3_file_control
_sqlite3_finalize
_sqlite3_free
_sqlite3_get_auxdata
+_sqlite3_get_autocommit
_sqlite3_initialize
_sqlite3_keyword_count
_sqlite3_keyword_name
_sqlite3_keyword_check
_sqlite3_last_insert_rowid
Index: ext/wasm/api/README.md
==================================================================
--- ext/wasm/api/README.md
+++ ext/wasm/api/README.md
@@ -76,14 +76,16 @@
- **`sqlite3-worker1-promiser.js`**\
Is likewise not part of the amalgamated sources and provides
a Promise-based interface into the Worker #1 API. This is
a far user-friendlier way to interface with databases running
in a Worker thread.
-- **`sqlite3-v-helper.js`**\
- Installs `sqlite3.vfs` and `sqlite3.vtab`, namespaces which contain
- helpers for use by downstream code which creates `sqlite3_vfs`
- and `sqlite3_module` implementations.
+- **`sqlite3-vfs-helper.js`**\
+ Installs the `sqlite3.vfs` namespace, which contain helpers for use
+ by downstream code which creates `sqlite3_vfs` implementations.
+- **`sqlite3-vtab-helper.js`**\
+ Installs the `sqlite3.vtab` namespace, which contain helpers for use
+ by downstream code which creates `sqlite3_module` implementations.
- **`sqlite3-vfs-opfs.c-pp.js`**\
is an sqlite3 VFS implementation which supports the Origin-Private
FileSystem (OPFS) as a storage layer to provide persistent storage
for database files in a browser. It requires...
- **`sqlite3-opfs-async-proxy.js`**\
Index: ext/wasm/api/post-js-header.js
==================================================================
--- ext/wasm/api/post-js-header.js
+++ ext/wasm/api/post-js-header.js
@@ -17,10 +17,12 @@
- common/whwasmutil.js => Replacements for much of Emscripten's glue
- jaccwaby/jaccwabyt.js => Jaccwabyt (C/JS struct binding)
- sqlite3-api-glue.js => glues previous parts together
- sqlite3-api-oo.js => SQLite3 OO API #1
- sqlite3-api-worker1.js => Worker-based API
- - sqlite3-vfs-helper.js => Internal-use utilities for...
- - sqlite3-vfs-opfs.js => OPFS VFS
+ - sqlite3-vfs-helper.c-pp.js => Utilities for VFS impls
+ - sqlite3-vtab-helper.c-pp.js => Utilities for virtual table impls
+ - sqlite3-vfs-opfs.c-pp.js => OPFS VFS
+ - sqlite3-vfs-opfs-sahpool.c-pp.js => OPFS SAHPool VFS
- sqlite3-api-cleanup.js => final API cleanup
- post-js-footer.js => closes this postRun() function
*/
Index: ext/wasm/api/sqlite3-api-glue.js
==================================================================
--- ext/wasm/api/sqlite3-api-glue.js
+++ ext/wasm/api/sqlite3-api-glue.js
@@ -12,11 +12,12 @@
This file glues together disparate pieces of JS which are loaded in
previous steps of the sqlite3-api.js bootstrapping process:
sqlite3-api-prologue.js, whwasmutil.js, and jaccwabyt.js. It
initializes the main API pieces so that the downstream components
- (e.g. sqlite3-api-oo1.js) have all that they need.
+ (e.g. sqlite3-api-oo1.js) have all of the infrastructure that they
+ need.
*/
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
'use strict';
const toss = (...args)=>{throw new Error(args.join(' '))};
const toss3 = sqlite3.SQLite3Error.toss;
@@ -186,10 +187,11 @@
["sqlite3_extended_errcode", "int", "sqlite3*"],
["sqlite3_extended_result_codes", "int", "sqlite3*", "int"],
["sqlite3_file_control", "int", "sqlite3*", "string", "int", "*"],
["sqlite3_finalize", "int", "sqlite3_stmt*"],
["sqlite3_free", undefined,"*"],
+ ["sqlite3_get_autocommit", "int", "sqlite3*"],
["sqlite3_get_auxdata", "*", "sqlite3_context*", "int"],
["sqlite3_initialize", undefined],
/*["sqlite3_interrupt", undefined, "sqlite3*"
^^^ we cannot actually currently support this because JS is
single-threaded and we don't have a portable way to access a DB
@@ -326,10 +328,18 @@
optional features into account. */
wasm.bindingSignatures.push(["sqlite3_normalized_sql", "string", "sqlite3_stmt*"]);
}
if(wasm.exports.sqlite3_activate_see instanceof Function){
+ /**
+ This code is capable of using an SEE build but note that an SEE
+ WASM build is generally incompatible with SEE's license
+ conditions. It is permitted for use internally in organizations
+ which have licensed SEE, but not for public sites because
+ exposing an SEE build of sqlite3.wasm effectively provides all
+ clients with a working copy of the commercial SEE code.
+ */
wasm.bindingSignatures.push(
["sqlite3_key", "int", "sqlite3*", "string", "int"],
["sqlite3_key_v2","int","sqlite3*","string","*","int"],
["sqlite3_rekey", "int", "sqlite3*", "string", "int"],
["sqlite3_rekey_v2", "int", "sqlite3*", "string", "*", "int"],
@@ -338,10 +348,12 @@
}
/**
Functions which require BigInt (int64) support are separated from
the others because we need to conditionally bind them or apply
dummy impls, depending on the capabilities of the environment.
+ (That said: we never actually build without BigInt support,
+ and such builds are untested.)
Note that not all of these functions directly require int64
but are only for use with APIs which require int64. For example,
the vtab-related functions.
*/
@@ -356,11 +368,14 @@
["sqlite3_declare_vtab", "int", ["sqlite3*", "string:flexible"]],
["sqlite3_deserialize", "int", "sqlite3*", "string", "*", "i64", "i64", "int"]
/* Careful! Short version: de/serialize() are problematic because they
might use a different allocator than the user for managing the
deserialized block. de/serialize() are ONLY safe to use with
- sqlite3_malloc(), sqlite3_free(), and its 64-bit variants. */,
+ sqlite3_malloc(), sqlite3_free(), and its 64-bit variants. Because
+ of this, the canonical builds of sqlite3.wasm/js guarantee that
+ sqlite3.wasm.alloc() and friends use those allocators. Custom builds
+ may not guarantee that, however. */,
["sqlite3_drop_modules", "int", ["sqlite3*", "**"]],
["sqlite3_last_insert_rowid", "i64", ["sqlite3*"]],
["sqlite3_malloc64", "*","i64"],
["sqlite3_msize", "i64", "*"],
["sqlite3_overload_function", "int", ["sqlite3*","string","int"]],
@@ -419,12 +434,10 @@
["sqlite3_vtab_rhs_value","int", "sqlite3_index_info*", "int", "**"]
];
// Add session/changeset APIs...
if(wasm.bigIntEnabled && !!wasm.exports.sqlite3changegroup_add){
- /* ACHTUNG: 2022-12-23: the session/changeset API bindings are
- COMPLETELY UNTESTED. */
/**
FuncPtrAdapter options for session-related callbacks with the
native signature "i(ps)". This proxy converts the 2nd argument
from a C string to a JS string before passing the arguments on
to the client-provided JS callback.
@@ -598,20 +611,25 @@
}/*session/changeset APIs*/
/**
Functions which are intended solely for API-internal use by the
WASM components, not client code. These get installed into
- sqlite3.wasm. Some of them get exposed to clients via variants
- named sqlite3_js_...().
+ sqlite3.util. Some of them get exposed to clients via variants
+ in sqlite3_js_...().
+
+ 2024-01-11: these were renamed, with two underscores in the
+ prefix, to ensure that clients do not accidentally depend on
+ them. They have always been documented as internal-use-only, so
+ no clients "should" be depending on the old names.
*/
- wasm.bindingSignatures.wasm = [
- ["sqlite3_wasm_db_reset", "int", "sqlite3*"],
- ["sqlite3_wasm_db_vfs", "sqlite3_vfs*", "sqlite3*","string"],
- ["sqlite3_wasm_vfs_create_file", "int",
+ wasm.bindingSignatures.wasmInternal = [
+ ["sqlite3__wasm_db_reset", "int", "sqlite3*"],
+ ["sqlite3__wasm_db_vfs", "sqlite3_vfs*", "sqlite3*","string"],
+ ["sqlite3__wasm_vfs_create_file", "int",
"sqlite3_vfs*","string","*", "int"],
- ["sqlite3_wasm_posix_create_file", "int", "string","*", "int"],
- ["sqlite3_wasm_vfs_unlink", "int", "sqlite3_vfs*","string"]
+ ["sqlite3__wasm_posix_create_file", "int", "string","*", "int"],
+ ["sqlite3__wasm_vfs_unlink", "int", "sqlite3_vfs*","string"]
];
/**
Install JS<->C struct bindings for the non-opaque struct types we
need... */
@@ -649,11 +667,11 @@
call and all future calls which are passed a
string-equivalent argument.
Use case: sqlite3_bind_pointer() and sqlite3_result_pointer()
call for "a static string and preferably a string
- literal". This converter is used to ensure that the string
+ literal." This converter is used to ensure that the string
value seen by those functions is long-lived and behaves as they
need it to.
*/
wasm.xWrap.argAdapter(
'string:static',
@@ -671,18 +689,19 @@
wasm.bindingSignatures and (B) provide automatic conversion
from higher-level representations, e.g. capi.sqlite3_vfs to
`sqlite3_vfs*` via capi.sqlite3_vfs.pointer.
*/
const __xArgPtr = wasm.xWrap.argAdapter('*');
- const nilType = function(){}/*a class no value can ever be an instance of*/;
+ const nilType = function(){
+ /*a class which no value can ever be an instance of*/
+ };
wasm.xWrap.argAdapter('sqlite3_filename', __xArgPtr)
('sqlite3_context*', __xArgPtr)
('sqlite3_value*', __xArgPtr)
('void*', __xArgPtr)
('sqlite3_changegroup*', __xArgPtr)
('sqlite3_changeset_iter*', __xArgPtr)
- //('sqlite3_rebaser*', __xArgPtr)
('sqlite3_session*', __xArgPtr)
('sqlite3_stmt*', (v)=>
__xArgPtr((v instanceof (sqlite3?.oo1?.Stmt || nilType))
? v.pointer : v))
('sqlite3*', (v)=>
@@ -739,12 +758,12 @@
);
}
for(const e of wasm.bindingSignatures){
capi[e[0]] = wasm.xWrap.apply(null, e);
}
- for(const e of wasm.bindingSignatures.wasm){
- wasm[e[0]] = wasm.xWrap.apply(null, e);
+ for(const e of wasm.bindingSignatures.wasmInternal){
+ util[e[0]] = wasm.xWrap.apply(null, e);
}
/* For C API functions which cannot work properly unless
wasm.bigIntEnabled is true, install a bogus impl which throws
if called when bigIntEnabled is false. The alternative would be
@@ -762,13 +781,13 @@
/* There's no need to expose bindingSignatures to clients,
implicitly making it part of the public interface. */
delete wasm.bindingSignatures;
- if(wasm.exports.sqlite3_wasm_db_error){
+ if(wasm.exports.sqlite3__wasm_db_error){
const __db_err = wasm.xWrap(
- 'sqlite3_wasm_db_error', 'int', 'sqlite3*', 'int', 'string'
+ 'sqlite3__wasm_db_error', 'int', 'sqlite3*', 'int', 'string'
);
/**
Sets the given db's error state. Accepts:
- (sqlite3*, int code, string msg)
@@ -782,11 +801,11 @@
exception, the message string defaults to theError.message.
Returns the resulting code. Pass (pDb,0,0) to clear the error
state.
*/
- util.sqlite3_wasm_db_error = function(pDb, resultCode, message){
+ util.sqlite3__wasm_db_error = function(pDb, resultCode, message){
if(resultCode instanceof sqlite3.WasmAllocError){
resultCode = capi.SQLITE_NOMEM;
message = 0 /*avoid allocating message string*/;
}else if(resultCode instanceof Error){
message = message || ''+resultCode;
@@ -793,21 +812,21 @@
resultCode = (resultCode.resultCode || capi.SQLITE_ERROR);
}
return pDb ? __db_err(pDb, resultCode, message) : resultCode;
};
}else{
- util.sqlite3_wasm_db_error = function(pDb,errCode,msg){
- console.warn("sqlite3_wasm_db_error() is not exported.",arguments);
+ util.sqlite3__wasm_db_error = function(pDb,errCode,msg){
+ console.warn("sqlite3__wasm_db_error() is not exported.",arguments);
return errCode;
};
}
}/*xWrap() bindings*/
{/* Import C-level constants and structs... */
- const cJson = wasm.xCall('sqlite3_wasm_enum_json');
+ const cJson = wasm.xCall('sqlite3__wasm_enum_json');
if(!cJson){
- toss("Maintenance required: increase sqlite3_wasm_enum_json()'s",
+ toss("Maintenance required: increase sqlite3__wasm_enum_json()'s",
"static buffer size!");
}
//console.debug('wasm.ctype length =',wasm.cstrlen(cJson));
wasm.ctype = JSON.parse(wasm.cstrToJs(cJson));
// Groups of SQLITE_xyz macros...
@@ -874,11 +893,11 @@
'sqlite3_index_constraint_usage']){
capi.sqlite3_index_info[k] = capi[k];
delete capi[k];
}
capi.sqlite3_vtab_config = wasm.xWrap(
- 'sqlite3_wasm_vtab_config','int',[
+ 'sqlite3__wasm_vtab_config','int',[
'sqlite3*', 'int', 'int']
);
}/* end vtab-related setup */
}/*end C constant and struct imports*/
@@ -886,20 +905,20 @@
Internal helper to assist in validating call argument counts in
the hand-written sqlite3_xyz() wrappers. We do this only for
consistency with non-special-case wrappings.
*/
const __dbArgcMismatch = (pDb,f,n)=>{
- return util.sqlite3_wasm_db_error(pDb, capi.SQLITE_MISUSE,
+ return util.sqlite3__wasm_db_error(pDb, capi.SQLITE_MISUSE,
f+"() requires "+n+" argument"+
(1===n?"":'s')+".");
};
/** Code duplication reducer for functions which take an encoding
argument and require SQLITE_UTF8. Sets the db error code to
SQLITE_FORMAT and returns that code. */
const __errEncoding = (pDb)=>{
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
pDb, capi.SQLITE_FORMAT, "SQLITE_UTF8 is the only supported encoding."
);
};
/**
@@ -1125,11 +1144,11 @@
if(0===rc && xCompare instanceof Function){
__dbCleanupMap.addCollation(pDb, zName);
}
return rc;
}catch(e){
- return util.sqlite3_wasm_db_error(pDb, e);
+ return util.sqlite3__wasm_db_error(pDb, e);
}
};
capi.sqlite3_create_collation = (pDb,zName,eTextRep,pArg,xCompare)=>{
return (5===arguments.length)
@@ -1251,11 +1270,11 @@
__dbCleanupMap.addFunction(pDb, funcName, nArg);
}
return rc;
}catch(e){
console.error("sqlite3_create_function_v2() setup threw:",e);
- return util.sqlite3_wasm_db_error(pDb, e, "Creation of UDF threw: "+e);
+ return util.sqlite3__wasm_db_error(pDb, e, "Creation of UDF threw: "+e);
}
};
/* Documented in the api object's initializer. */
capi.sqlite3_create_function = function f(
@@ -1296,11 +1315,11 @@
__dbCleanupMap.addWindowFunc(pDb, funcName, nArg);
}
return rc;
}catch(e){
console.error("sqlite3_create_window_function() setup threw:",e);
- return util.sqlite3_wasm_db_error(pDb, e, "Creation of UDF threw: "+e);
+ return util.sqlite3__wasm_db_error(pDb, e, "Creation of UDF threw: "+e);
}
};
/**
A _deprecated_ alias for capi.sqlite3_result_js() which
predates the addition of that function in the public API.
@@ -1391,11 +1410,11 @@
const [xSql, xSqlLen] = __flexiString(sql, sqlLen);
switch(typeof xSql){
case 'string': return __prepare.basic(pDb, xSql, xSqlLen, prepFlags, ppStmt, null);
case 'number': return __prepare.full(pDb, xSql, xSqlLen, prepFlags, ppStmt, pzTail);
default:
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
pDb, capi.SQLITE_MISUSE,
"Invalid SQL argument type for sqlite3_prepare_v2/v3()."
);
}
};
@@ -1435,19 +1454,19 @@
p = wasm.allocFromTypedArray(text);
n = text.byteLength;
}else if('string'===typeof text){
[p, n] = wasm.allocCString(text);
}else{
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
capi.sqlite3_db_handle(pStmt), capi.SQLITE_MISUSE,
"Invalid 3rd argument type for sqlite3_bind_text()."
);
}
return __bindText(pStmt, iCol, p, n, capi.SQLITE_WASM_DEALLOC);
}catch(e){
wasm.dealloc(p);
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
capi.sqlite3_db_handle(pStmt), e
);
}
}/*sqlite3_bind_text()*/;
@@ -1469,19 +1488,19 @@
p = wasm.allocFromTypedArray(pMem);
n = nMem>=0 ? nMem : pMem.byteLength;
}else if('string'===typeof pMem){
[p, n] = wasm.allocCString(pMem);
}else{
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
capi.sqlite3_db_handle(pStmt), capi.SQLITE_MISUSE,
"Invalid 3rd argument type for sqlite3_bind_blob()."
);
}
return __bindBlob(pStmt, iCol, p, n, capi.SQLITE_WASM_DEALLOC);
}catch(e){
wasm.dealloc(p);
- return util.sqlite3_wasm_db_error(
+ return util.sqlite3__wasm_db_error(
capi.sqlite3_db_handle(pStmt), e
);
}
}/*sqlite3_bind_blob()*/;
@@ -1501,15 +1520,15 @@
case capi.SQLITE_CONFIG_MEMSTATUS:// 9 /* boolean */
case capi.SQLITE_CONFIG_SMALL_MALLOC: // 27 /* boolean */
case capi.SQLITE_CONFIG_SORTERREF_SIZE: // 28 /* int nByte */
case capi.SQLITE_CONFIG_STMTJRNL_SPILL: // 26 /* int nByte */
case capi.SQLITE_CONFIG_URI:// 17 /* int */
- return wasm.exports.sqlite3_wasm_config_i(op, args[0]);
+ return wasm.exports.sqlite3__wasm_config_i(op, args[0]);
case capi.SQLITE_CONFIG_LOOKASIDE: // 13 /* int int */
- return wasm.exports.sqlite3_wasm_config_ii(op, args[0], args[1]);
+ return wasm.exports.sqlite3__wasm_config_ii(op, args[0], args[1]);
case capi.SQLITE_CONFIG_MEMDB_MAXSIZE: // 29 /* sqlite3_int64 */
- return wasm.exports.sqlite3_wasm_config_j(op, args[0]);
+ return wasm.exports.sqlite3__wasm_config_j(op, args[0]);
case capi.SQLITE_CONFIG_GETMALLOC: // 5 /* sqlite3_mem_methods* */
case capi.SQLITE_CONFIG_GETMUTEX: // 11 /* sqlite3_mutex_methods* */
case capi.SQLITE_CONFIG_GETPCACHE2: // 19 /* sqlite3_pcache_methods2* */
case capi.SQLITE_CONFIG_GETPCACHE: // 15 /* no-op */
case capi.SQLITE_CONFIG_HEAP: // 8 /* void*, int nByte, int min */
@@ -1571,24 +1590,24 @@
const pKvvfs = capi.sqlite3_vfs_find("kvvfs");
if( pKvvfs ){/* kvvfs-specific glue */
if(util.isUIThread()){
const kvvfsMethods = new capi.sqlite3_kvvfs_methods(
- wasm.exports.sqlite3_wasm_kvvfs_methods()
+ wasm.exports.sqlite3__wasm_kvvfs_methods()
);
delete capi.sqlite3_kvvfs_methods;
- const kvvfsMakeKey = wasm.exports.sqlite3_wasm_kvvfsMakeKeyOnPstack,
+ const kvvfsMakeKey = wasm.exports.sqlite3__wasm_kvvfsMakeKeyOnPstack,
pstack = wasm.pstack;
const kvvfsStorage = (zClass)=>
((115/*=='s'*/===wasm.peek(zClass))
? sessionStorage : localStorage);
/**
Implementations for members of the object referred to by
- sqlite3_wasm_kvvfs_methods(). We swap out the native
+ sqlite3__wasm_kvvfs_methods(). We swap out the native
implementations with these, which use localStorage or
sessionStorage for their backing store.
*/
const kvvfsImpls = {
xRead: (zClass, zKey, zBuf, nBuf)=>{
@@ -1664,7 +1683,183 @@
be used that way but it's not really intended to be. */
capi.sqlite3_vfs_unregister(pKvvfs);
}
}/*pKvvfs*/
+ /* Warn if client-level code makes use of FuncPtrAdapter. */
wasm.xWrap.FuncPtrAdapter.warnOnUse = true;
+
+ const StructBinder = sqlite3.StructBinder
+ /* we require a local alias b/c StructBinder is removed from the sqlite3
+ object during the final steps of the API cleanup. */;
+ /**
+ Installs a StructBinder-bound function pointer member of the
+ given name and function in the given StructBinder.StructType
+ target object.
+
+ It creates a WASM proxy for the given function and arranges for
+ that proxy to be cleaned up when tgt.dispose() is called. Throws
+ on the slightest hint of error, e.g. tgt is-not-a StructType,
+ name does not map to a struct-bound member, etc.
+
+ As a special case, if the given function is a pointer, then
+ `wasm.functionEntry()` is used to validate that it is a known
+ function. If so, it is used as-is with no extra level of proxying
+ or cleanup, else an exception is thrown. It is legal to pass a
+ value of 0, indicating a NULL pointer, with the caveat that 0
+ _is_ a legal function pointer in WASM but it will not be accepted
+ as such _here_. (Justification: the function at address zero must
+ be one which initially came from the WASM module, not a method we
+ want to bind to a virtual table or VFS.)
+
+ This function returns a proxy for itself which is bound to tgt
+ and takes 2 args (name,func). That function returns the same
+ thing as this one, permitting calls to be chained.
+
+ If called with only 1 arg, it has no side effects but returns a
+ func with the same signature as described above.
+
+ ACHTUNG: because we cannot generically know how to transform JS
+ exceptions into result codes, the installed functions do no
+ automatic catching of exceptions. It is critical, to avoid
+ undefined behavior in the C layer, that methods mapped via
+ this function do not throw. The exception, as it were, to that
+ rule is...
+
+ If applyArgcCheck is true then each JS function (as opposed to
+ function pointers) gets wrapped in a proxy which asserts that it
+ is passed the expected number of arguments, throwing if the
+ argument count does not match expectations. That is only intended
+ for dev-time usage for sanity checking, and may leave the C
+ environment in an undefined state.
+ */
+ const installMethod = function callee(
+ tgt, name, func, applyArgcCheck = callee.installMethodArgcCheck
+ ){
+ if(!(tgt instanceof StructBinder.StructType)){
+ toss("Usage error: target object is-not-a StructType.");
+ }else if(!(func instanceof Function) && !wasm.isPtr(func)){
+ toss("Usage errror: expecting a Function or WASM pointer to one.");
+ }
+ if(1===arguments.length){
+ return (n,f)=>callee(tgt, n, f, applyArgcCheck);
+ }
+ if(!callee.argcProxy){
+ callee.argcProxy = function(tgt, funcName, func,sig){
+ return function(...args){
+ if(func.length!==arguments.length){
+ toss("Argument mismatch for",
+ tgt.structInfo.name+"::"+funcName
+ +": Native signature is:",sig);
+ }
+ return func.apply(this, args);
+ }
+ };
+ /* An ondispose() callback for use with
+ StructBinder-created types. */
+ callee.removeFuncList = function(){
+ if(this.ondispose.__removeFuncList){
+ this.ondispose.__removeFuncList.forEach(
+ (v,ndx)=>{
+ if('number'===typeof v){
+ try{wasm.uninstallFunction(v)}
+ catch(e){/*ignore*/}
+ }
+ /* else it's a descriptive label for the next number in
+ the list. */
+ }
+ );
+ delete this.ondispose.__removeFuncList;
+ }
+ };
+ }/*static init*/
+ const sigN = tgt.memberSignature(name);
+ if(sigN.length<2){
+ toss("Member",name,"does not have a function pointer signature:",sigN);
+ }
+ const memKey = tgt.memberKey(name);
+ const fProxy = (applyArgcCheck && !wasm.isPtr(func))
+ /** This middle-man proxy is only for use during development, to
+ confirm that we always pass the proper number of
+ arguments. We know that the C-level code will always use the
+ correct argument count. */
+ ? callee.argcProxy(tgt, memKey, func, sigN)
+ : func;
+ if(wasm.isPtr(fProxy)){
+ if(fProxy && !wasm.functionEntry(fProxy)){
+ toss("Pointer",fProxy,"is not a WASM function table entry.");
+ }
+ tgt[memKey] = fProxy;
+ }else{
+ const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true));
+ tgt[memKey] = pFunc;
+ if(!tgt.ondispose || !tgt.ondispose.__removeFuncList){
+ tgt.addOnDispose('ondispose.__removeFuncList handler',
+ callee.removeFuncList);
+ tgt.ondispose.__removeFuncList = [];
+ }
+ tgt.ondispose.__removeFuncList.push(memKey, pFunc);
+ }
+ return (n,f)=>callee(tgt, n, f, applyArgcCheck);
+ }/*installMethod*/;
+ installMethod.installMethodArgcCheck = false;
+
+ /**
+ Installs methods into the given StructBinder.StructType-type
+ instance. Each entry in the given methods object must map to a
+ known member of the given StructType, else an exception will be
+ triggered. See installMethod() for more details, including the
+ semantics of the 3rd argument.
+
+ As an exception to the above, if any two or more methods in the
+ 2nd argument are the exact same function, installMethod() is
+ _not_ called for the 2nd and subsequent instances, and instead
+ those instances get assigned the same method pointer which is
+ created for the first instance. This optimization is primarily to
+ accommodate special handling of sqlite3_module::xConnect and
+ xCreate methods.
+
+ On success, returns its first argument. Throws on error.
+ */
+ const installMethods = function(
+ structInstance, methods, applyArgcCheck = installMethod.installMethodArgcCheck
+ ){
+ const seen = new Map /* map of */;
+ for(const k of Object.keys(methods)){
+ const m = methods[k];
+ const prior = seen.get(m);
+ if(prior){
+ const mkey = structInstance.memberKey(k);
+ structInstance[mkey] = structInstance[structInstance.memberKey(prior)];
+ }else{
+ installMethod(structInstance, k, m, applyArgcCheck);
+ seen.set(m, k);
+ }
+ }
+ return structInstance;
+ };
+
+ /**
+ Equivalent to calling installMethod(this,...arguments) with a
+ first argument of this object. If called with 1 or 2 arguments
+ and the first is an object, it's instead equivalent to calling
+ installMethods(this,...arguments).
+ */
+ StructBinder.StructType.prototype.installMethod = function callee(
+ name, func, applyArgcCheck = installMethod.installMethodArgcCheck
+ ){
+ return (arguments.length < 3 && name && 'object'===typeof name)
+ ? installMethods(this, ...arguments)
+ : installMethod(this, ...arguments);
+ };
+
+ /**
+ Equivalent to calling installMethods() with a first argument
+ of this object.
+ */
+ StructBinder.StructType.prototype.installMethods = function(
+ methods, applyArgcCheck = installMethod.installMethodArgcCheck
+ ){
+ return installMethods(this, methods, applyArgcCheck);
+ };
+
});
Index: ext/wasm/api/sqlite3-api-oo1.js
==================================================================
--- ext/wasm/api/sqlite3-api-oo1.js
+++ ext/wasm/api/sqlite3-api-oo1.js
@@ -1,5 +1,6 @@
+//#ifnot omit-oo1
/*
2022-07-22
The author disclaims copyright to this source code. In place of a
legal notice, here is a blessing:
@@ -1938,6 +1939,8 @@
return jdb.storageSize(affirmDbOpen(this).filename);
};
}/*main-window-only bits*/
});
-
+//#else
+/* Built with the omit-oo1 flag. */
+//#endif ifnot omit-oo1
Index: ext/wasm/api/sqlite3-api-prologue.js
==================================================================
--- ext/wasm/api/sqlite3-api-prologue.js
+++ ext/wasm/api/sqlite3-api-prologue.js
@@ -1059,11 +1059,11 @@
/**
Sets the current pstack position to the given pointer. Results
are undefined if the passed-in value did not come from
this.pointer.
*/
- restore: wasm.exports.sqlite3_wasm_pstack_restore,
+ restore: wasm.exports.sqlite3__wasm_pstack_restore,
/**
Attempts to allocate the given number of bytes from the
pstack. On success, it zeroes out a block of memory of the
given size, adjusts the pstack pointer, and returns a pointer
to the memory. On error, throws a WasmAllocError. The
@@ -1081,11 +1081,11 @@
*/
alloc: function(n){
if('string'===typeof n && !(n = wasm.sizeofIR(n))){
WasmAllocError.toss("Invalid value for pstack.alloc(",arguments[0],")");
}
- return wasm.exports.sqlite3_wasm_pstack_alloc(n)
+ return wasm.exports.sqlite3__wasm_pstack_alloc(n)
|| WasmAllocError.toss("Could not allocate",n,
"bytes from the pstack.");
},
/**
alloc()'s n chunks, each sz bytes, as a single memory block and
@@ -1161,31 +1161,31 @@
first reserving it via wasm.pstack.alloc() and friends, leads
to undefined results.
*/
pointer: {
configurable: false, iterable: true, writeable: false,
- get: wasm.exports.sqlite3_wasm_pstack_ptr
+ get: wasm.exports.sqlite3__wasm_pstack_ptr
//Whether or not a setter as an alternative to restore() is
//clearer or would just lead to confusion is unclear.
- //set: wasm.exports.sqlite3_wasm_pstack_restore
+ //set: wasm.exports.sqlite3__wasm_pstack_restore
},
/**
sqlite3.wasm.pstack.quota to the total number of bytes
available in the pstack, including any space which is currently
allocated. This value is a compile-time constant.
*/
quota: {
configurable: false, iterable: true, writeable: false,
- get: wasm.exports.sqlite3_wasm_pstack_quota
+ get: wasm.exports.sqlite3__wasm_pstack_quota
},
/**
sqlite3.wasm.pstack.remaining resolves to the amount of space
remaining in the pstack.
*/
remaining: {
configurable: false, iterable: true, writeable: false,
- get: wasm.exports.sqlite3_wasm_pstack_remaining
+ get: wasm.exports.sqlite3__wasm_pstack_remaining
}
})/*wasm.pstack properties*/;
capi.sqlite3_randomness = (...args)=>{
if(1===args.length && util.isTypedArray(args[0])
@@ -1254,18 +1254,18 @@
|| !globalThis.FileSystemFileHandle){
return __wasmfsOpfsDir = "";
}
try{
if(pdir && 0===wasm.xCallWrapped(
- 'sqlite3_wasm_init_wasmfs', 'i32', ['string'], pdir
+ 'sqlite3__wasm_init_wasmfs', 'i32', ['string'], pdir
)){
return __wasmfsOpfsDir = pdir;
}else{
return __wasmfsOpfsDir = "";
}
}catch(e){
- // sqlite3_wasm_init_wasmfs() is not available
+ // sqlite3__wasm_init_wasmfs() is not available
return __wasmfsOpfsDir = "";
}
};
/**
@@ -1363,11 +1363,11 @@
memory boundary!
*/
const zSchema = schema
? (wasm.isPtr(schema) ? schema : wasm.scopedAllocCString(''+schema))
: 0;
- let rc = wasm.exports.sqlite3_wasm_db_serialize(
+ let rc = wasm.exports.sqlite3__wasm_db_serialize(
pDb, zSchema, ppOut, pSize, 0
);
if(rc){
toss3("Database serialization failed with code",
sqlite3.capi.sqlite3_js_rc_str(rc));
@@ -1389,11 +1389,11 @@
C-string pointer, which may be 0), returns a pointer to the
sqlite3_vfs responsible for it. If the given db name is null/0,
or not provided, then "main" is assumed.
*/
capi.sqlite3_js_db_vfs =
- (dbPointer, dbName=0)=>wasm.sqlite3_wasm_db_vfs(dbPointer, dbName);
+ (dbPointer, dbName=0)=>util.sqlite3__wasm_db_vfs(dbPointer, dbName);
/**
A thin wrapper around capi.sqlite3_aggregate_context() which
behaves the same except that it throws a WasmAllocError if that
function returns 0. As a special case, if n is falsy it does
@@ -1447,11 +1447,11 @@
}
try{
if(!util.isInt32(dataLen) || dataLen<0){
SQLite3Error.toss("Invalid 3rd argument for sqlite3_js_posix_create_file().");
}
- const rc = wasm.sqlite3_wasm_posix_create_file(filename, pData, dataLen);
+ const rc = util.sqlite3__wasm_posix_create_file(filename, pData, dataLen);
if(rc) SQLite3Error.toss("Creation of file failed with sqlite3 result code",
capi.sqlite3_js_rc_str(rc));
}finally{
wasm.dealloc(pData);
}
@@ -1549,11 +1549,11 @@
if(!util.isInt32(dataLen) || dataLen<0){
wasm.dealloc(pData);
SQLite3Error.toss("Invalid 4th argument for sqlite3_js_vfs_create_file().");
}
try{
- const rc = wasm.sqlite3_wasm_vfs_create_file(vfs, filename, pData, dataLen);
+ const rc = util.sqlite3__wasm_vfs_create_file(vfs, filename, pData, dataLen);
if(rc) SQLite3Error.toss("Creation of file failed with sqlite3 result code",
capi.sqlite3_js_rc_str(rc));
}finally{
wasm.dealloc(pData);
}
@@ -1670,16 +1670,16 @@
The variants which take `(int, int*)` arguments treat a
missing or falsy pointer argument as 0.
*/
capi.sqlite3_db_config = function(pDb, op, ...args){
if(!this.s){
- this.s = wasm.xWrap('sqlite3_wasm_db_config_s','int',
+ this.s = wasm.xWrap('sqlite3__wasm_db_config_s','int',
['sqlite3*', 'int', 'string:static']
/* MAINDBNAME requires a static string */);
- this.pii = wasm.xWrap('sqlite3_wasm_db_config_pii', 'int',
+ this.pii = wasm.xWrap('sqlite3__wasm_db_config_pii', 'int',
['sqlite3*', 'int', '*','int', 'int']);
- this.ip = wasm.xWrap('sqlite3_wasm_db_config_ip','int',
+ this.ip = wasm.xWrap('sqlite3__wasm_db_config_ip','int',
['sqlite3*', 'int', 'int','*']);
}
switch(op){
case capi.SQLITE_DBCONFIG_ENABLE_FKEY:
case capi.SQLITE_DBCONFIG_ENABLE_TRIGGER:
Index: ext/wasm/api/sqlite3-api-worker1.js
==================================================================
--- ext/wasm/api/sqlite3-api-worker1.js
+++ ext/wasm/api/sqlite3-api-worker1.js
@@ -1,5 +1,6 @@
+//#ifnot omit-oo1
/**
2022-07-22
The author disclaims copyright to this source code. In place of a
legal notice, here is a blessing:
@@ -60,11 +61,11 @@
Each message posted to the worker has an operation-independent
envelope and operation-dependent arguments:
```
{
- type: string, // one of: 'open', 'close', 'exec', 'config-get'
+ type: string, // one of: 'open', 'close', 'exec', 'export', 'config-get'
messageId: OPTIONAL arbitrary value. The worker will copy it as-is
into response messages to assist in client-side dispatching.
dbId: a db identifier string (returned by 'open') which tells the
@@ -323,12 +324,44 @@
The response is the input options object (or a synthesized one if
passed only a string), noting that options.resultRows and
options.columnNames may be populated by the call to db.exec().
+
+ ====================================================================
+ "export" the current db
+
+ To export the underlying database as a byte array...
+
+ Message format:
+
+ ```
+ {
+ type: "export",
+ messageId: ...as above...,
+ dbId: ...as above...
+ }
+ ```
+
+ Response:
+
+ ```
+ {
+ type: "export",
+ messageId: ...as above...,
+ dbId: ...as above...
+ result: {
+ byteArray: Uint8Array (as per sqlite3_js_db_export()),
+ filename: the db filename,
+ mimetype: "application/x-sqlite3"
+ }
+ }
+ ```
+
*/
globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
+const util = sqlite3.util;
sqlite3.initWorker1API = function(){
'use strict';
const toss = (...args)=>{throw new Error(args.join(' '))};
if(!(globalThis.WorkerGlobalScope instanceof Function)){
toss("initWorker1API() must be run from a Worker thread.");
@@ -375,16 +408,16 @@
},
close: function(db,alsoUnlink){
if(db){
delete this.dbs[getDbId(db)];
const filename = db.filename;
- const pVfs = sqlite3.wasm.sqlite3_wasm_db_vfs(db.pointer, 0);
+ const pVfs = util.sqlite3__wasm_db_vfs(db.pointer, 0);
db.close();
const ddNdx = this.dbList.indexOf(db);
if(ddNdx>=0) this.dbList.splice(ddNdx, 1);
if(alsoUnlink && filename && pVfs){
- sqlite3.wasm.sqlite3_wasm_vfs_unlink(pVfs, filename);
+ util.sqlite3__wasm_vfs_unlink(pVfs, filename);
}
}
},
/**
Posts the given worker message value. If xferList is provided,
@@ -461,16 +494,16 @@
byteArray = args.byteArray;
if(byteArray) pVfs = guessVfs(args.filename);
}
if(pVfs){
/* 2022-11-02: this feature is as-yet untested except that
- sqlite3_wasm_vfs_create_file() has been tested from the
+ sqlite3__wasm_vfs_create_file() has been tested from the
browser dev console. */
let pMem;
try{
pMem = sqlite3.wasm.allocFromTypedArray(byteArray);
- const rc = sqlite3.wasm.sqlite3_wasm_vfs_create_file(
+ const rc = util.sqlite3__wasm_vfs_create_file(
pVfs, oargs.filename, pMem, byteArray.byteLength
);
if(rc) sqlite3.SQLite3Error.toss(rc);
}catch(e){
throw new sqlite3.SQLite3Error(
@@ -656,5 +689,8 @@
}, wState.xfer);
};
globalThis.postMessage({type:'sqlite3-api',result:'worker1-ready'});
}.bind({sqlite3});
});
+//#else
+/* Built with the omit-oo1 flag. */
+//#endif ifnot omit-oo1
DELETED ext/wasm/api/sqlite3-v-helper.js
Index: ext/wasm/api/sqlite3-v-helper.js
==================================================================
--- ext/wasm/api/sqlite3-v-helper.js
+++ /dev/null
@@ -1,718 +0,0 @@
-/*
-** 2022-11-30
-**
-** The author disclaims copyright to this source code. In place of a
-** legal notice, here is a blessing:
-**
-** * May you do good and not evil.
-** * May you find forgiveness for yourself and forgive others.
-** * May you share freely, never taking more than you give.
-*/
-
-/**
- This file installs sqlite3.vfs, and object which exists to assist
- in the creation of JavaScript implementations of sqlite3_vfs, along
- with its virtual table counterpart, sqlite3.vtab.
-*/
-'use strict';
-globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
- const wasm = sqlite3.wasm, capi = sqlite3.capi, toss = sqlite3.util.toss3;
- const vfs = Object.create(null), vtab = Object.create(null);
-
- const StructBinder = sqlite3.StructBinder
- /* we require a local alias b/c StructBinder is removed from the sqlite3
- object during the final steps of the API cleanup. */;
- sqlite3.vfs = vfs;
- sqlite3.vtab = vtab;
-
- const sii = capi.sqlite3_index_info;
- /**
- If n is >=0 and less than this.$nConstraint, this function
- returns either a WASM pointer to the 0-based nth entry of
- this.$aConstraint (if passed a truthy 2nd argument) or an
- sqlite3_index_info.sqlite3_index_constraint object wrapping that
- address (if passed a falsy value or no 2nd argument). Returns a
- falsy value if n is out of range.
- */
- sii.prototype.nthConstraint = function(n, asPtr=false){
- if(n<0 || n>=this.$nConstraint) return false;
- const ptr = this.$aConstraint + (
- sii.sqlite3_index_constraint.structInfo.sizeof * n
- );
- return asPtr ? ptr : new sii.sqlite3_index_constraint(ptr);
- };
-
- /**
- Works identically to nthConstraint() but returns state from
- this.$aConstraintUsage, so returns an
- sqlite3_index_info.sqlite3_index_constraint_usage instance
- if passed no 2nd argument or a falsy 2nd argument.
- */
- sii.prototype.nthConstraintUsage = function(n, asPtr=false){
- if(n<0 || n>=this.$nConstraint) return false;
- const ptr = this.$aConstraintUsage + (
- sii.sqlite3_index_constraint_usage.structInfo.sizeof * n
- );
- return asPtr ? ptr : new sii.sqlite3_index_constraint_usage(ptr);
- };
-
- /**
- If n is >=0 and less than this.$nOrderBy, this function
- returns either a WASM pointer to the 0-based nth entry of
- this.$aOrderBy (if passed a truthy 2nd argument) or an
- sqlite3_index_info.sqlite3_index_orderby object wrapping that
- address (if passed a falsy value or no 2nd argument). Returns a
- falsy value if n is out of range.
- */
- sii.prototype.nthOrderBy = function(n, asPtr=false){
- if(n<0 || n>=this.$nOrderBy) return false;
- const ptr = this.$aOrderBy + (
- sii.sqlite3_index_orderby.structInfo.sizeof * n
- );
- return asPtr ? ptr : new sii.sqlite3_index_orderby(ptr);
- };
-
- /**
- Installs a StructBinder-bound function pointer member of the
- given name and function in the given StructType target object.
-
- It creates a WASM proxy for the given function and arranges for
- that proxy to be cleaned up when tgt.dispose() is called. Throws
- on the slightest hint of error, e.g. tgt is-not-a StructType,
- name does not map to a struct-bound member, etc.
-
- As a special case, if the given function is a pointer, then
- `wasm.functionEntry()` is used to validate that it is a known
- function. If so, it is used as-is with no extra level of proxying
- or cleanup, else an exception is thrown. It is legal to pass a
- value of 0, indicating a NULL pointer, with the caveat that 0
- _is_ a legal function pointer in WASM but it will not be accepted
- as such _here_. (Justification: the function at address zero must
- be one which initially came from the WASM module, not a method we
- want to bind to a virtual table or VFS.)
-
- This function returns a proxy for itself which is bound to tgt
- and takes 2 args (name,func). That function returns the same
- thing as this one, permitting calls to be chained.
-
- If called with only 1 arg, it has no side effects but returns a
- func with the same signature as described above.
-
- ACHTUNG: because we cannot generically know how to transform JS
- exceptions into result codes, the installed functions do no
- automatic catching of exceptions. It is critical, to avoid
- undefined behavior in the C layer, that methods mapped via
- this function do not throw. The exception, as it were, to that
- rule is...
-
- If applyArgcCheck is true then each JS function (as opposed to
- function pointers) gets wrapped in a proxy which asserts that it
- is passed the expected number of arguments, throwing if the
- argument count does not match expectations. That is only intended
- for dev-time usage for sanity checking, and will leave the C
- environment in an undefined state.
- */
- const installMethod = function callee(
- tgt, name, func, applyArgcCheck = callee.installMethodArgcCheck
- ){
- if(!(tgt instanceof StructBinder.StructType)){
- toss("Usage error: target object is-not-a StructType.");
- }else if(!(func instanceof Function) && !wasm.isPtr(func)){
- toss("Usage errror: expecting a Function or WASM pointer to one.");
- }
- if(1===arguments.length){
- return (n,f)=>callee(tgt, n, f, applyArgcCheck);
- }
- if(!callee.argcProxy){
- callee.argcProxy = function(tgt, funcName, func,sig){
- return function(...args){
- if(func.length!==arguments.length){
- toss("Argument mismatch for",
- tgt.structInfo.name+"::"+funcName
- +": Native signature is:",sig);
- }
- return func.apply(this, args);
- }
- };
- /* An ondispose() callback for use with
- StructBinder-created types. */
- callee.removeFuncList = function(){
- if(this.ondispose.__removeFuncList){
- this.ondispose.__removeFuncList.forEach(
- (v,ndx)=>{
- if('number'===typeof v){
- try{wasm.uninstallFunction(v)}
- catch(e){/*ignore*/}
- }
- /* else it's a descriptive label for the next number in
- the list. */
- }
- );
- delete this.ondispose.__removeFuncList;
- }
- };
- }/*static init*/
- const sigN = tgt.memberSignature(name);
- if(sigN.length<2){
- toss("Member",name,"does not have a function pointer signature:",sigN);
- }
- const memKey = tgt.memberKey(name);
- const fProxy = (applyArgcCheck && !wasm.isPtr(func))
- /** This middle-man proxy is only for use during development, to
- confirm that we always pass the proper number of
- arguments. We know that the C-level code will always use the
- correct argument count. */
- ? callee.argcProxy(tgt, memKey, func, sigN)
- : func;
- if(wasm.isPtr(fProxy)){
- if(fProxy && !wasm.functionEntry(fProxy)){
- toss("Pointer",fProxy,"is not a WASM function table entry.");
- }
- tgt[memKey] = fProxy;
- }else{
- const pFunc = wasm.installFunction(fProxy, tgt.memberSignature(name, true));
- tgt[memKey] = pFunc;
- if(!tgt.ondispose || !tgt.ondispose.__removeFuncList){
- tgt.addOnDispose('ondispose.__removeFuncList handler',
- callee.removeFuncList);
- tgt.ondispose.__removeFuncList = [];
- }
- tgt.ondispose.__removeFuncList.push(memKey, pFunc);
- }
- return (n,f)=>callee(tgt, n, f, applyArgcCheck);
- }/*installMethod*/;
- installMethod.installMethodArgcCheck = false;
-
- /**
- Installs methods into the given StructType-type instance. Each
- entry in the given methods object must map to a known member of
- the given StructType, else an exception will be triggered. See
- installMethod() for more details, including the semantics of the
- 3rd argument.
-
- As an exception to the above, if any two or more methods in the
- 2nd argument are the exact same function, installMethod() is
- _not_ called for the 2nd and subsequent instances, and instead
- those instances get assigned the same method pointer which is
- created for the first instance. This optimization is primarily to
- accommodate special handling of sqlite3_module::xConnect and
- xCreate methods.
-
- On success, returns its first argument. Throws on error.
- */
- const installMethods = function(
- structInstance, methods, applyArgcCheck = installMethod.installMethodArgcCheck
- ){
- const seen = new Map /* map of */;
- for(const k of Object.keys(methods)){
- const m = methods[k];
- const prior = seen.get(m);
- if(prior){
- const mkey = structInstance.memberKey(k);
- structInstance[mkey] = structInstance[structInstance.memberKey(prior)];
- }else{
- installMethod(structInstance, k, m, applyArgcCheck);
- seen.set(m, k);
- }
- }
- return structInstance;
- };
-
- /**
- Equivalent to calling installMethod(this,...arguments) with a
- first argument of this object. If called with 1 or 2 arguments
- and the first is an object, it's instead equivalent to calling
- installMethods(this,...arguments).
- */
- StructBinder.StructType.prototype.installMethod = function callee(
- name, func, applyArgcCheck = installMethod.installMethodArgcCheck
- ){
- return (arguments.length < 3 && name && 'object'===typeof name)
- ? installMethods(this, ...arguments)
- : installMethod(this, ...arguments);
- };
-
- /**
- Equivalent to calling installMethods() with a first argument
- of this object.
- */
- StructBinder.StructType.prototype.installMethods = function(
- methods, applyArgcCheck = installMethod.installMethodArgcCheck
- ){
- return installMethods(this, methods, applyArgcCheck);
- };
-
- /**
- Uses sqlite3_vfs_register() to register this
- sqlite3.capi.sqlite3_vfs. This object must have already been
- filled out properly. If the first argument is truthy, the VFS is
- registered as the default VFS, else it is not.
-
- On success, returns this object. Throws on error.
- */
- capi.sqlite3_vfs.prototype.registerVfs = function(asDefault=false){
- if(!(this instanceof sqlite3.capi.sqlite3_vfs)){
- toss("Expecting a sqlite3_vfs-type argument.");
- }
- const rc = capi.sqlite3_vfs_register(this, asDefault ? 1 : 0);
- if(rc){
- toss("sqlite3_vfs_register(",this,") failed with rc",rc);
- }
- if(this.pointer !== capi.sqlite3_vfs_find(this.$zName)){
- toss("BUG: sqlite3_vfs_find(vfs.$zName) failed for just-installed VFS",
- this);
- }
- return this;
- };
-
- /**
- A wrapper for installMethods() or registerVfs() to reduce
- installation of a VFS and/or its I/O methods to a single
- call.
-
- Accepts an object which contains the properties "io" and/or
- "vfs", each of which is itself an object with following properties:
-
- - `struct`: an sqlite3.StructType-type struct. This must be a
- populated (except for the methods) object of type
- sqlite3_io_methods (for the "io" entry) or sqlite3_vfs (for the
- "vfs" entry).
-
- - `methods`: an object mapping sqlite3_io_methods method names
- (e.g. 'xClose') to JS implementations of those methods. The JS
- implementations must be call-compatible with their native
- counterparts.
-
- For each of those object, this function passes its (`struct`,
- `methods`, (optional) `applyArgcCheck`) properties to
- installMethods().
-
- If the `vfs` entry is set then:
-
- - Its `struct` property's registerVfs() is called. The
- `vfs` entry may optionally have an `asDefault` property, which
- gets passed as the argument to registerVfs().
-
- - If `struct.$zName` is falsy and the entry has a string-type
- `name` property, `struct.$zName` is set to the C-string form of
- that `name` value before registerVfs() is called. That string
- gets added to the on-dispose state of the struct.
-
- On success returns this object. Throws on error.
- */
- vfs.installVfs = function(opt){
- let count = 0;
- const propList = ['io','vfs'];
- for(const key of propList){
- const o = opt[key];
- if(o){
- ++count;
- installMethods(o.struct, o.methods, !!o.applyArgcCheck);
- if('vfs'===key){
- if(!o.struct.$zName && 'string'===typeof o.name){
- o.struct.addOnDispose(
- o.struct.$zName = wasm.allocCString(o.name)
- );
- }
- o.struct.registerVfs(!!o.asDefault);
- }
- }
- }
- if(!count) toss("Misuse: installVfs() options object requires at least",
- "one of:", propList);
- return this;
- };
-
- /**
- Internal factory function for xVtab and xCursor impls.
- */
- const __xWrapFactory = function(methodName,StructType){
- return function(ptr,removeMapping=false){
- if(0===arguments.length) ptr = new StructType;
- if(ptr instanceof StructType){
- //T.assert(!this.has(ptr.pointer));
- this.set(ptr.pointer, ptr);
- return ptr;
- }else if(!wasm.isPtr(ptr)){
- sqlite3.SQLite3Error.toss("Invalid argument to",methodName+"()");
- }
- let rc = this.get(ptr);
- if(removeMapping) this.delete(ptr);
- return rc;
- }.bind(new Map);
- };
-
- /**
- A factory function which implements a simple lifetime manager for
- mappings between C struct pointers and their JS-level wrappers.
- The first argument must be the logical name of the manager
- (e.g. 'xVtab' or 'xCursor'), which is only used for error
- reporting. The second must be the capi.XYZ struct-type value,
- e.g. capi.sqlite3_vtab or capi.sqlite3_vtab_cursor.
-
- Returns an object with 4 methods: create(), get(), unget(), and
- dispose(), plus a StructType member with the value of the 2nd
- argument. The methods are documented in the body of this
- function.
- */
- const StructPtrMapper = function(name, StructType){
- const __xWrap = __xWrapFactory(name,StructType);
- /**
- This object houses a small API for managing mappings of (`T*`)
- to StructType objects, specifically within the lifetime
- requirements of sqlite3_module methods.
- */
- return Object.assign(Object.create(null),{
- /** The StructType object for this object's API. */
- StructType,
- /**
- Creates a new StructType object, writes its `pointer`
- value to the given output pointer, and returns that
- object. Its intended usage depends on StructType:
-
- sqlite3_vtab: to be called from sqlite3_module::xConnect()
- or xCreate() implementations.
-
- sqlite3_vtab_cursor: to be called from xOpen().
-
- This will throw if allocation of the StructType instance
- fails or if ppOut is not a pointer-type value.
- */
- create: (ppOut)=>{
- const rc = __xWrap();
- wasm.pokePtr(ppOut, rc.pointer);
- return rc;
- },
- /**
- Returns the StructType object previously mapped to the
- given pointer using create(). Its intended usage depends
- on StructType:
-
- sqlite3_vtab: to be called from sqlite3_module methods which
- take a (sqlite3_vtab*) pointer _except_ for
- xDestroy()/xDisconnect(), in which case unget() or dispose().
-
- sqlite3_vtab_cursor: to be called from any sqlite3_module methods
- which take a `sqlite3_vtab_cursor*` argument except xClose(),
- in which case use unget() or dispose().
-
- Rule to remember: _never_ call dispose() on an instance
- returned by this function.
- */
- get: (pCObj)=>__xWrap(pCObj),
- /**
- Identical to get() but also disconnects the mapping between the
- given pointer and the returned StructType object, such that
- future calls to this function or get() with the same pointer
- will return the undefined value. Its intended usage depends
- on StructType:
-
- sqlite3_vtab: to be called from sqlite3_module::xDisconnect() or
- xDestroy() implementations or in error handling of a failed
- xCreate() or xConnect().
-
- sqlite3_vtab_cursor: to be called from xClose() or during
- cleanup in a failed xOpen().
-
- Calling this method obligates the caller to call dispose() on
- the returned object when they're done with it.
- */
- unget: (pCObj)=>__xWrap(pCObj,true),
- /**
- Works like unget() plus it calls dispose() on the
- StructType object.
- */
- dispose: (pCObj)=>{
- const o = __xWrap(pCObj,true);
- if(o) o.dispose();
- }
- });
- };
-
- /**
- A lifetime-management object for mapping `sqlite3_vtab*`
- instances in sqlite3_module methods to capi.sqlite3_vtab
- objects.
-
- The API docs are in the API-internal StructPtrMapper().
- */
- vtab.xVtab = StructPtrMapper('xVtab', capi.sqlite3_vtab);
-
- /**
- A lifetime-management object for mapping `sqlite3_vtab_cursor*`
- instances in sqlite3_module methods to capi.sqlite3_vtab_cursor
- objects.
-
- The API docs are in the API-internal StructPtrMapper().
- */
- vtab.xCursor = StructPtrMapper('xCursor', capi.sqlite3_vtab_cursor);
-
- /**
- Convenience form of creating an sqlite3_index_info wrapper,
- intended for use in xBestIndex implementations. Note that the
- caller is expected to call dispose() on the returned object
- before returning. Though not _strictly_ required, as that object
- does not own the pIdxInfo memory, it is nonetheless good form.
- */
- vtab.xIndexInfo = (pIdxInfo)=>new capi.sqlite3_index_info(pIdxInfo);
-
- /**
- Given an error object, this function returns
- sqlite3.capi.SQLITE_NOMEM if (e instanceof
- sqlite3.WasmAllocError), else it returns its
- second argument. Its intended usage is in the methods
- of a sqlite3_vfs or sqlite3_module:
-
- ```
- try{
- let rc = ...
- return rc;
- }catch(e){
- return sqlite3.vtab.exceptionToRc(e, sqlite3.capi.SQLITE_XYZ);
- // where SQLITE_XYZ is some call-appropriate result code.
- }
- ```
- */
- /**vfs.exceptionToRc = vtab.exceptionToRc =
- (e, defaultRc=capi.SQLITE_ERROR)=>(
- (e instanceof sqlite3.WasmAllocError)
- ? capi.SQLITE_NOMEM
- : defaultRc
- );*/
-
- /**
- Given an sqlite3_module method name and error object, this
- function returns sqlite3.capi.SQLITE_NOMEM if (e instanceof
- sqlite3.WasmAllocError), else it returns its second argument. Its
- intended usage is in the methods of a sqlite3_vfs or
- sqlite3_module:
-
- ```
- try{
- let rc = ...
- return rc;
- }catch(e){
- return sqlite3.vtab.xError(
- 'xColumn', e, sqlite3.capi.SQLITE_XYZ);
- // where SQLITE_XYZ is some call-appropriate result code.
- }
- ```
-
- If no 3rd argument is provided, its default depends on
- the error type:
-
- - An sqlite3.WasmAllocError always resolves to capi.SQLITE_NOMEM.
-
- - If err is an SQLite3Error then its `resultCode` property
- is used.
-
- - If all else fails, capi.SQLITE_ERROR is used.
-
- If xError.errorReporter is a function, it is called in
- order to report the error, else the error is not reported.
- If that function throws, that exception is ignored.
- */
- vtab.xError = function f(methodName, err, defaultRc){
- if(f.errorReporter instanceof Function){
- try{f.errorReporter("sqlite3_module::"+methodName+"(): "+err.message);}
- catch(e){/*ignored*/}
- }
- let rc;
- if(err instanceof sqlite3.WasmAllocError) rc = capi.SQLITE_NOMEM;
- else if(arguments.length>2) rc = defaultRc;
- else if(err instanceof sqlite3.SQLite3Error) rc = err.resultCode;
- return rc || capi.SQLITE_ERROR;
- };
- vtab.xError.errorReporter = 1 ? console.error.bind(console) : false;
-
- /**
- "The problem" with this is that it introduces an outer function with
- a different arity than the passed-in method callback. That means we
- cannot do argc validation on these. Additionally, some methods (namely
- xConnect) may have call-specific error handling. It would be a shame to
- hard-coded that per-method support in this function.
- */
- /** vtab.methodCatcher = function(methodName, method, defaultErrRc=capi.SQLITE_ERROR){
- return function(...args){
- try { method(...args); }
- }catch(e){ return vtab.xError(methodName, e, defaultRc) }
- };
- */
-
- /**
- A helper for sqlite3_vtab::xRowid() and xUpdate()
- implementations. It must be passed the final argument to one of
- those methods (an output pointer to an int64 row ID) and the
- value to store at the output pointer's address. Returns the same
- as wasm.poke() and will throw if the 1st or 2nd arguments
- are invalid for that function.
-
- Example xRowid impl:
-
- ```
- const xRowid = (pCursor, ppRowid64)=>{
- const c = vtab.xCursor(pCursor);
- vtab.xRowid(ppRowid64, c.myRowId);
- return 0;
- };
- ```
- */
- vtab.xRowid = (ppRowid64, value)=>wasm.poke(ppRowid64, value, 'i64');
-
- /**
- A helper to initialize and set up an sqlite3_module object for
- later installation into individual databases using
- sqlite3_create_module(). Requires an object with the following
- properties:
-
- - `methods`: an object containing a mapping of properties with
- the C-side names of the sqlite3_module methods, e.g. xCreate,
- xBestIndex, etc., to JS implementations for those functions.
- Certain special-case handling is performed, as described below.
-
- - `catchExceptions` (default=false): if truthy, the given methods
- are not mapped as-is, but are instead wrapped inside wrappers
- which translate exceptions into result codes of SQLITE_ERROR or
- SQLITE_NOMEM, depending on whether the exception is an
- sqlite3.WasmAllocError. In the case of the xConnect and xCreate
- methods, the exception handler also sets the output error
- string to the exception's error string.
-
- - OPTIONAL `struct`: a sqlite3.capi.sqlite3_module() instance. If
- not set, one will be created automatically. If the current
- "this" is-a sqlite3_module then it is unconditionally used in
- place of `struct`.
-
- - OPTIONAL `iVersion`: if set, it must be an integer value and it
- gets assigned to the `$iVersion` member of the struct object.
- If it's _not_ set, and the passed-in `struct` object's `$iVersion`
- is 0 (the default) then this function attempts to define a value
- for that property based on the list of methods it has.
-
- If `catchExceptions` is false, it is up to the client to ensure
- that no exceptions escape the methods, as doing so would move
- them through the C API, leading to undefined
- behavior. (vtab.xError() is intended to assist in reporting
- such exceptions.)
-
- Certain methods may refer to the same implementation. To simplify
- the definition of such methods:
-
- - If `methods.xConnect` is `true` then the value of
- `methods.xCreate` is used in its place, and vice versa. sqlite
- treats xConnect/xCreate functions specially if they are exactly
- the same function (same pointer value).
-
- - If `methods.xDisconnect` is true then the value of
- `methods.xDestroy` is used in its place, and vice versa.
-
- This is to facilitate creation of those methods inline in the
- passed-in object without requiring the client to explicitly get a
- reference to one of them in order to assign it to the other
- one.
-
- The `catchExceptions`-installed handlers will account for
- identical references to the above functions and will install the
- same wrapper function for both.
-
- The given methods are expected to return integer values, as
- expected by the C API. If `catchExceptions` is truthy, the return
- value of the wrapped function will be used as-is and will be
- translated to 0 if the function returns a falsy value (e.g. if it
- does not have an explicit return). If `catchExceptions` is _not_
- active, the method implementations must explicitly return integer
- values.
-
- Throws on error. On success, returns the sqlite3_module object
- (`this` or `opt.struct` or a new sqlite3_module instance,
- depending on how it's called).
- */
- vtab.setupModule = function(opt){
- let createdMod = false;
- const mod = (this instanceof capi.sqlite3_module)
- ? this : (opt.struct || (createdMod = new capi.sqlite3_module()));
- try{
- const methods = opt.methods || toss("Missing 'methods' object.");
- for(const e of Object.entries({
- // -----^ ==> [k,v] triggers a broken code transformation in
- // some versions of the emsdk toolchain.
- xConnect: 'xCreate', xDisconnect: 'xDestroy'
- })){
- // Remap X=true to X=Y for certain X/Y combinations
- const k = e[0], v = e[1];
- if(true === methods[k]) methods[k] = methods[v];
- else if(true === methods[v]) methods[v] = methods[k];
- }
- if(opt.catchExceptions){
- const fwrap = function(methodName, func){
- if(['xConnect','xCreate'].indexOf(methodName) >= 0){
- return function(pDb, pAux, argc, argv, ppVtab, pzErr){
- try{return func(...arguments) || 0}
- catch(e){
- if(!(e instanceof sqlite3.WasmAllocError)){
- wasm.dealloc(wasm.peekPtr(pzErr));
- wasm.pokePtr(pzErr, wasm.allocCString(e.message));
- }
- return vtab.xError(methodName, e);
- }
- };
- }else{
- return function(...args){
- try{return func(...args) || 0}
- catch(e){
- return vtab.xError(methodName, e);
- }
- };
- }
- };
- const mnames = [
- 'xCreate', 'xConnect', 'xBestIndex', 'xDisconnect',
- 'xDestroy', 'xOpen', 'xClose', 'xFilter', 'xNext',
- 'xEof', 'xColumn', 'xRowid', 'xUpdate',
- 'xBegin', 'xSync', 'xCommit', 'xRollback',
- 'xFindFunction', 'xRename', 'xSavepoint', 'xRelease',
- 'xRollbackTo', 'xShadowName'
- ];
- const remethods = Object.create(null);
- for(const k of mnames){
- const m = methods[k];
- if(!(m instanceof Function)) continue;
- else if('xConnect'===k && methods.xCreate===m){
- remethods[k] = methods.xCreate;
- }else if('xCreate'===k && methods.xConnect===m){
- remethods[k] = methods.xConnect;
- }else{
- remethods[k] = fwrap(k, m);
- }
- }
- installMethods(mod, remethods, false);
- }else{
- // No automatic exception handling. Trust the client
- // to not throw.
- installMethods(
- mod, methods, !!opt.applyArgcCheck/*undocumented option*/
- );
- }
- if(0===mod.$iVersion){
- let v;
- if('number'===typeof opt.iVersion) v = opt.iVersion;
- else if(mod.$xShadowName) v = 3;
- else if(mod.$xSavePoint || mod.$xRelease || mod.$xRollbackTo) v = 2;
- else v = 1;
- mod.$iVersion = v;
- }
- }catch(e){
- if(createdMod) createdMod.dispose();
- throw e;
- }
- return mod;
- }/*setupModule()*/;
-
- /**
- Equivalent to calling vtab.setupModule() with this sqlite3_module
- object as the call's `this`.
- */
- capi.sqlite3_module.prototype.setupModule = function(opt){
- return vtab.setupModule.call(this, opt);
- };
-}/*sqlite3ApiBootstrap.initializers.push()*/);
ADDED ext/wasm/api/sqlite3-vfs-helper.c-pp.js
Index: ext/wasm/api/sqlite3-vfs-helper.c-pp.js
==================================================================
--- /dev/null
+++ ext/wasm/api/sqlite3-vfs-helper.c-pp.js
@@ -0,0 +1,103 @@
+/*
+** 2022-11-30
+**
+** The author disclaims copyright to this source code. In place of a
+** legal notice, here is a blessing:
+**
+** * May you do good and not evil.
+** * May you find forgiveness for yourself and forgive others.
+** * May you share freely, never taking more than you give.
+*/
+
+/**
+ This file installs sqlite3.vfs, a namespace of helpers for use in
+ the creation of JavaScript implementations of sqlite3_vfs.
+*/
+'use strict';
+globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
+ const wasm = sqlite3.wasm, capi = sqlite3.capi, toss = sqlite3.util.toss3;
+ const vfs = Object.create(null);
+ sqlite3.vfs = vfs;
+
+ /**
+ Uses sqlite3_vfs_register() to register this
+ sqlite3.capi.sqlite3_vfs instance. This object must have already
+ been filled out properly. If the first argument is truthy, the
+ VFS is registered as the default VFS, else it is not.
+
+ On success, returns this object. Throws on error.
+ */
+ capi.sqlite3_vfs.prototype.registerVfs = function(asDefault=false){
+ if(!(this instanceof sqlite3.capi.sqlite3_vfs)){
+ toss("Expecting a sqlite3_vfs-type argument.");
+ }
+ const rc = capi.sqlite3_vfs_register(this, asDefault ? 1 : 0);
+ if(rc){
+ toss("sqlite3_vfs_register(",this,") failed with rc",rc);
+ }
+ if(this.pointer !== capi.sqlite3_vfs_find(this.$zName)){
+ toss("BUG: sqlite3_vfs_find(vfs.$zName) failed for just-installed VFS",
+ this);
+ }
+ return this;
+ };
+
+ /**
+ A wrapper for
+ sqlite3.StructBinder.StructType.prototype.installMethods() or
+ registerVfs() to reduce installation of a VFS and/or its I/O
+ methods to a single call.
+
+ Accepts an object which contains the properties "io" and/or
+ "vfs", each of which is itself an object with following properties:
+
+ - `struct`: an sqlite3.StructBinder.StructType-type struct. This
+ must be a populated (except for the methods) object of type
+ sqlite3_io_methods (for the "io" entry) or sqlite3_vfs (for the
+ "vfs" entry).
+
+ - `methods`: an object mapping sqlite3_io_methods method names
+ (e.g. 'xClose') to JS implementations of those methods. The JS
+ implementations must be call-compatible with their native
+ counterparts.
+
+ For each of those object, this function passes its (`struct`,
+ `methods`, (optional) `applyArgcCheck`) properties to
+ installMethods().
+
+ If the `vfs` entry is set then:
+
+ - Its `struct` property's registerVfs() is called. The
+ `vfs` entry may optionally have an `asDefault` property, which
+ gets passed as the argument to registerVfs().
+
+ - If `struct.$zName` is falsy and the entry has a string-type
+ `name` property, `struct.$zName` is set to the C-string form of
+ that `name` value before registerVfs() is called. That string
+ gets added to the on-dispose state of the struct.
+
+ On success returns this object. Throws on error.
+ */
+ vfs.installVfs = function(opt){
+ let count = 0;
+ const propList = ['io','vfs'];
+ for(const key of propList){
+ const o = opt[key];
+ if(o){
+ ++count;
+ o.struct.installMethods(o.methods, !!o.applyArgcCheck);
+ if('vfs'===key){
+ if(!o.struct.$zName && 'string'===typeof o.name){
+ o.struct.addOnDispose(
+ o.struct.$zName = wasm.allocCString(o.name)
+ );
+ }
+ o.struct.registerVfs(!!o.asDefault);
+ }
+ }
+ }
+ if(!count) toss("Misuse: installVfs() options object requires at least",
+ "one of:", propList);
+ return this;
+ };
+}/*sqlite3ApiBootstrap.initializers.push()*/);
Index: ext/wasm/api/sqlite3-vfs-opfs.c-pp.js
==================================================================
--- ext/wasm/api/sqlite3-vfs-opfs.c-pp.js
+++ ext/wasm/api/sqlite3-vfs-opfs.c-pp.js
@@ -243,11 +243,12 @@
: null /* dVfs will be null when sqlite3 is built with
SQLITE_OS_OTHER. */;
opfsIoMethods.$iVersion = 1;
opfsVfs.$iVersion = 2/*yes, two*/;
opfsVfs.$szOsFile = capi.sqlite3_file.structInfo.sizeof;
- opfsVfs.$mxPathname = 1024/*sure, why not?*/;
+ opfsVfs.$mxPathname = 1024/* sure, why not? The OPFS name length limit
+ is undocumented/unspecified. */;
opfsVfs.$zName = wasm.allocCString("opfs");
// All C-side memory of opfsVfs is zeroed out, but just to be explicit:
opfsVfs.$xDlOpen = opfsVfs.$xDlError = opfsVfs.$xDlSym = opfsVfs.$xDlClose = null;
opfsVfs.addOnDispose(
'$zName', opfsVfs.$zName,
@@ -991,31 +992,10 @@
temporary file name. Its argument is the length of the string,
defaulting to 16.
*/
opfsUtil.randomFilename = randomFilename;
- /**
- Re-registers the OPFS VFS. This is intended only for odd use
- cases which have to call sqlite3_shutdown() as part of their
- initialization process, which will unregister the VFS
- registered by installOpfsVfs(). If passed a truthy value, the
- OPFS VFS is registered as the default VFS, else it is not made
- the default. Returns the result of the the
- sqlite3_vfs_register() call.
-
- Design note: the problem of having to re-register things after
- a shutdown/initialize pair is more general. How to best plug
- that in to the library is unclear. In particular, we cannot
- hook in to any C-side calls to sqlite3_initialize(), so we
- cannot add an after-initialize callback mechanism.
- */
- opfsUtil.registerVfs = (asDefault=false)=>{
- return wasm.exports.sqlite3_vfs_register(
- opfsVfs.pointer, asDefault ? 1 : 0
- );
- };
-
/**
Returns a promise which resolves to an object which represents
all files and directories in the OPFS tree. The top-most object
has two properties: `dirs` is an array of directory entries
(described below) and `files` is a list of file names for all
ADDED ext/wasm/api/sqlite3-vtab-helper.c-pp.js
Index: ext/wasm/api/sqlite3-vtab-helper.c-pp.js
==================================================================
--- /dev/null
+++ ext/wasm/api/sqlite3-vtab-helper.c-pp.js
@@ -0,0 +1,423 @@
+/*
+** 2022-11-30
+**
+** The author disclaims copyright to this source code. In place of a
+** legal notice, here is a blessing:
+**
+** * May you do good and not evil.
+** * May you find forgiveness for yourself and forgive others.
+** * May you share freely, never taking more than you give.
+*/
+
+/**
+ This file installs sqlite3.vtab, a namespace of helpers for use in
+ the creation of JavaScript implementations virtual tables.
+*/
+'use strict';
+globalThis.sqlite3ApiBootstrap.initializers.push(function(sqlite3){
+ const wasm = sqlite3.wasm, capi = sqlite3.capi, toss = sqlite3.util.toss3;
+ const vtab = Object.create(null);
+ sqlite3.vtab = vtab;
+
+ const sii = capi.sqlite3_index_info;
+ /**
+ If n is >=0 and less than this.$nConstraint, this function
+ returns either a WASM pointer to the 0-based nth entry of
+ this.$aConstraint (if passed a truthy 2nd argument) or an
+ sqlite3_index_info.sqlite3_index_constraint object wrapping that
+ address (if passed a falsy value or no 2nd argument). Returns a
+ falsy value if n is out of range.
+ */
+ sii.prototype.nthConstraint = function(n, asPtr=false){
+ if(n<0 || n>=this.$nConstraint) return false;
+ const ptr = this.$aConstraint + (
+ sii.sqlite3_index_constraint.structInfo.sizeof * n
+ );
+ return asPtr ? ptr : new sii.sqlite3_index_constraint(ptr);
+ };
+
+ /**
+ Works identically to nthConstraint() but returns state from
+ this.$aConstraintUsage, so returns an
+ sqlite3_index_info.sqlite3_index_constraint_usage instance
+ if passed no 2nd argument or a falsy 2nd argument.
+ */
+ sii.prototype.nthConstraintUsage = function(n, asPtr=false){
+ if(n<0 || n>=this.$nConstraint) return false;
+ const ptr = this.$aConstraintUsage + (
+ sii.sqlite3_index_constraint_usage.structInfo.sizeof * n
+ );
+ return asPtr ? ptr : new sii.sqlite3_index_constraint_usage(ptr);
+ };
+
+ /**
+ If n is >=0 and less than this.$nOrderBy, this function
+ returns either a WASM pointer to the 0-based nth entry of
+ this.$aOrderBy (if passed a truthy 2nd argument) or an
+ sqlite3_index_info.sqlite3_index_orderby object wrapping that
+ address (if passed a falsy value or no 2nd argument). Returns a
+ falsy value if n is out of range.
+ */
+ sii.prototype.nthOrderBy = function(n, asPtr=false){
+ if(n<0 || n>=this.$nOrderBy) return false;
+ const ptr = this.$aOrderBy + (
+ sii.sqlite3_index_orderby.structInfo.sizeof * n
+ );
+ return asPtr ? ptr : new sii.sqlite3_index_orderby(ptr);
+ };
+
+ /**
+ Internal factory function for xVtab and xCursor impls.
+ */
+ const __xWrapFactory = function(methodName,StructType){
+ return function(ptr,removeMapping=false){
+ if(0===arguments.length) ptr = new StructType;
+ if(ptr instanceof StructType){
+ //T.assert(!this.has(ptr.pointer));
+ this.set(ptr.pointer, ptr);
+ return ptr;
+ }else if(!wasm.isPtr(ptr)){
+ sqlite3.SQLite3Error.toss("Invalid argument to",methodName+"()");
+ }
+ let rc = this.get(ptr);
+ if(removeMapping) this.delete(ptr);
+ return rc;
+ }.bind(new Map);
+ };
+
+ /**
+ A factory function which implements a simple lifetime manager for
+ mappings between C struct pointers and their JS-level wrappers.
+ The first argument must be the logical name of the manager
+ (e.g. 'xVtab' or 'xCursor'), which is only used for error
+ reporting. The second must be the capi.XYZ struct-type value,
+ e.g. capi.sqlite3_vtab or capi.sqlite3_vtab_cursor.
+
+ Returns an object with 4 methods: create(), get(), unget(), and
+ dispose(), plus a StructType member with the value of the 2nd
+ argument. The methods are documented in the body of this
+ function.
+ */
+ const StructPtrMapper = function(name, StructType){
+ const __xWrap = __xWrapFactory(name,StructType);
+ /**
+ This object houses a small API for managing mappings of (`T*`)
+ to StructType objects, specifically within the lifetime
+ requirements of sqlite3_module methods.
+ */
+ return Object.assign(Object.create(null),{
+ /** The StructType object for this object's API. */
+ StructType,
+ /**
+ Creates a new StructType object, writes its `pointer`
+ value to the given output pointer, and returns that
+ object. Its intended usage depends on StructType:
+
+ sqlite3_vtab: to be called from sqlite3_module::xConnect()
+ or xCreate() implementations.
+
+ sqlite3_vtab_cursor: to be called from xOpen().
+
+ This will throw if allocation of the StructType instance
+ fails or if ppOut is not a pointer-type value.
+ */
+ create: (ppOut)=>{
+ const rc = __xWrap();
+ wasm.pokePtr(ppOut, rc.pointer);
+ return rc;
+ },
+ /**
+ Returns the StructType object previously mapped to the
+ given pointer using create(). Its intended usage depends
+ on StructType:
+
+ sqlite3_vtab: to be called from sqlite3_module methods which
+ take a (sqlite3_vtab*) pointer _except_ for
+ xDestroy()/xDisconnect(), in which case unget() or dispose().
+
+ sqlite3_vtab_cursor: to be called from any sqlite3_module methods
+ which take a `sqlite3_vtab_cursor*` argument except xClose(),
+ in which case use unget() or dispose().
+
+ Rule to remember: _never_ call dispose() on an instance
+ returned by this function.
+ */
+ get: (pCObj)=>__xWrap(pCObj),
+ /**
+ Identical to get() but also disconnects the mapping between the
+ given pointer and the returned StructType object, such that
+ future calls to this function or get() with the same pointer
+ will return the undefined value. Its intended usage depends
+ on StructType:
+
+ sqlite3_vtab: to be called from sqlite3_module::xDisconnect() or
+ xDestroy() implementations or in error handling of a failed
+ xCreate() or xConnect().
+
+ sqlite3_vtab_cursor: to be called from xClose() or during
+ cleanup in a failed xOpen().
+
+ Calling this method obligates the caller to call dispose() on
+ the returned object when they're done with it.
+ */
+ unget: (pCObj)=>__xWrap(pCObj,true),
+ /**
+ Works like unget() plus it calls dispose() on the
+ StructType object.
+ */
+ dispose: (pCObj)=>{
+ const o = __xWrap(pCObj,true);
+ if(o) o.dispose();
+ }
+ });
+ };
+
+ /**
+ A lifetime-management object for mapping `sqlite3_vtab*`
+ instances in sqlite3_module methods to capi.sqlite3_vtab
+ objects.
+
+ The API docs are in the API-internal StructPtrMapper().
+ */
+ vtab.xVtab = StructPtrMapper('xVtab', capi.sqlite3_vtab);
+
+ /**
+ A lifetime-management object for mapping `sqlite3_vtab_cursor*`
+ instances in sqlite3_module methods to capi.sqlite3_vtab_cursor
+ objects.
+
+ The API docs are in the API-internal StructPtrMapper().
+ */
+ vtab.xCursor = StructPtrMapper('xCursor', capi.sqlite3_vtab_cursor);
+
+ /**
+ Convenience form of creating an sqlite3_index_info wrapper,
+ intended for use in xBestIndex implementations. Note that the
+ caller is expected to call dispose() on the returned object
+ before returning. Though not _strictly_ required, as that object
+ does not own the pIdxInfo memory, it is nonetheless good form.
+ */
+ vtab.xIndexInfo = (pIdxInfo)=>new capi.sqlite3_index_info(pIdxInfo);
+
+ /**
+ Given an sqlite3_module method name and error object, this
+ function returns sqlite3.capi.SQLITE_NOMEM if (e instanceof
+ sqlite3.WasmAllocError), else it returns its second argument. Its
+ intended usage is in the methods of a sqlite3_vfs or
+ sqlite3_module:
+
+ ```
+ try{
+ let rc = ...
+ return rc;
+ }catch(e){
+ return sqlite3.vtab.xError(
+ 'xColumn', e, sqlite3.capi.SQLITE_XYZ);
+ // where SQLITE_XYZ is some call-appropriate result code.
+ }
+ ```
+
+ If no 3rd argument is provided, its default depends on
+ the error type:
+
+ - An sqlite3.WasmAllocError always resolves to capi.SQLITE_NOMEM.
+
+ - If err is an SQLite3Error then its `resultCode` property
+ is used.
+
+ - If all else fails, capi.SQLITE_ERROR is used.
+
+ If xError.errorReporter is a function, it is called in
+ order to report the error, else the error is not reported.
+ If that function throws, that exception is ignored.
+ */
+ vtab.xError = function f(methodName, err, defaultRc){
+ if(f.errorReporter instanceof Function){
+ try{f.errorReporter("sqlite3_module::"+methodName+"(): "+err.message);}
+ catch(e){/*ignored*/}
+ }
+ let rc;
+ if(err instanceof sqlite3.WasmAllocError) rc = capi.SQLITE_NOMEM;
+ else if(arguments.length>2) rc = defaultRc;
+ else if(err instanceof sqlite3.SQLite3Error) rc = err.resultCode;
+ return rc || capi.SQLITE_ERROR;
+ };
+ vtab.xError.errorReporter = 1 ? console.error.bind(console) : false;
+
+ /**
+ A helper for sqlite3_vtab::xRowid() and xUpdate()
+ implementations. It must be passed the final argument to one of
+ those methods (an output pointer to an int64 row ID) and the
+ value to store at the output pointer's address. Returns the same
+ as wasm.poke() and will throw if the 1st or 2nd arguments
+ are invalid for that function.
+
+ Example xRowid impl:
+
+ ```
+ const xRowid = (pCursor, ppRowid64)=>{
+ const c = vtab.xCursor(pCursor);
+ vtab.xRowid(ppRowid64, c.myRowId);
+ return 0;
+ };
+ ```
+ */
+ vtab.xRowid = (ppRowid64, value)=>wasm.poke(ppRowid64, value, 'i64');
+
+ /**
+ A helper to initialize and set up an sqlite3_module object for
+ later installation into individual databases using
+ sqlite3_create_module(). Requires an object with the following
+ properties:
+
+ - `methods`: an object containing a mapping of properties with
+ the C-side names of the sqlite3_module methods, e.g. xCreate,
+ xBestIndex, etc., to JS implementations for those functions.
+ Certain special-case handling is performed, as described below.
+
+ - `catchExceptions` (default=false): if truthy, the given methods
+ are not mapped as-is, but are instead wrapped inside wrappers
+ which translate exceptions into result codes of SQLITE_ERROR or
+ SQLITE_NOMEM, depending on whether the exception is an
+ sqlite3.WasmAllocError. In the case of the xConnect and xCreate
+ methods, the exception handler also sets the output error
+ string to the exception's error string.
+
+ - OPTIONAL `struct`: a sqlite3.capi.sqlite3_module() instance. If
+ not set, one will be created automatically. If the current
+ "this" is-a sqlite3_module then it is unconditionally used in
+ place of `struct`.
+
+ - OPTIONAL `iVersion`: if set, it must be an integer value and it
+ gets assigned to the `$iVersion` member of the struct object.
+ If it's _not_ set, and the passed-in `struct` object's `$iVersion`
+ is 0 (the default) then this function attempts to define a value
+ for that property based on the list of methods it has.
+
+ If `catchExceptions` is false, it is up to the client to ensure
+ that no exceptions escape the methods, as doing so would move
+ them through the C API, leading to undefined
+ behavior. (vtab.xError() is intended to assist in reporting
+ such exceptions.)
+
+ Certain methods may refer to the same implementation. To simplify
+ the definition of such methods:
+
+ - If `methods.xConnect` is `true` then the value of
+ `methods.xCreate` is used in its place, and vice versa. sqlite
+ treats xConnect/xCreate functions specially if they are exactly
+ the same function (same pointer value).
+
+ - If `methods.xDisconnect` is true then the value of
+ `methods.xDestroy` is used in its place, and vice versa.
+
+ This is to facilitate creation of those methods inline in the
+ passed-in object without requiring the client to explicitly get a
+ reference to one of them in order to assign it to the other
+ one.
+
+ The `catchExceptions`-installed handlers will account for
+ identical references to the above functions and will install the
+ same wrapper function for both.
+
+ The given methods are expected to return integer values, as
+ expected by the C API. If `catchExceptions` is truthy, the return
+ value of the wrapped function will be used as-is and will be
+ translated to 0 if the function returns a falsy value (e.g. if it
+ does not have an explicit return). If `catchExceptions` is _not_
+ active, the method implementations must explicitly return integer
+ values.
+
+ Throws on error. On success, returns the sqlite3_module object
+ (`this` or `opt.struct` or a new sqlite3_module instance,
+ depending on how it's called).
+ */
+ vtab.setupModule = function(opt){
+ let createdMod = false;
+ const mod = (this instanceof capi.sqlite3_module)
+ ? this : (opt.struct || (createdMod = new capi.sqlite3_module()));
+ try{
+ const methods = opt.methods || toss("Missing 'methods' object.");
+ for(const e of Object.entries({
+ // -----^ ==> [k,v] triggers a broken code transformation in
+ // some versions of the emsdk toolchain.
+ xConnect: 'xCreate', xDisconnect: 'xDestroy'
+ })){
+ // Remap X=true to X=Y for certain X/Y combinations
+ const k = e[0], v = e[1];
+ if(true === methods[k]) methods[k] = methods[v];
+ else if(true === methods[v]) methods[v] = methods[k];
+ }
+ if(opt.catchExceptions){
+ const fwrap = function(methodName, func){
+ if(['xConnect','xCreate'].indexOf(methodName) >= 0){
+ return function(pDb, pAux, argc, argv, ppVtab, pzErr){
+ try{return func(...arguments) || 0}
+ catch(e){
+ if(!(e instanceof sqlite3.WasmAllocError)){
+ wasm.dealloc(wasm.peekPtr(pzErr));
+ wasm.pokePtr(pzErr, wasm.allocCString(e.message));
+ }
+ return vtab.xError(methodName, e);
+ }
+ };
+ }else{
+ return function(...args){
+ try{return func(...args) || 0}
+ catch(e){
+ return vtab.xError(methodName, e);
+ }
+ };
+ }
+ };
+ const mnames = [
+ 'xCreate', 'xConnect', 'xBestIndex', 'xDisconnect',
+ 'xDestroy', 'xOpen', 'xClose', 'xFilter', 'xNext',
+ 'xEof', 'xColumn', 'xRowid', 'xUpdate',
+ 'xBegin', 'xSync', 'xCommit', 'xRollback',
+ 'xFindFunction', 'xRename', 'xSavepoint', 'xRelease',
+ 'xRollbackTo', 'xShadowName'
+ ];
+ const remethods = Object.create(null);
+ for(const k of mnames){
+ const m = methods[k];
+ if(!(m instanceof Function)) continue;
+ else if('xConnect'===k && methods.xCreate===m){
+ remethods[k] = methods.xCreate;
+ }else if('xCreate'===k && methods.xConnect===m){
+ remethods[k] = methods.xConnect;
+ }else{
+ remethods[k] = fwrap(k, m);
+ }
+ }
+ mod.installMethods(remethods, false);
+ }else{
+ // No automatic exception handling. Trust the client
+ // to not throw.
+ mod.installMethods(
+ methods, !!opt.applyArgcCheck/*undocumented option*/
+ );
+ }
+ if(0===mod.$iVersion){
+ let v;
+ if('number'===typeof opt.iVersion) v = opt.iVersion;
+ else if(mod.$xShadowName) v = 3;
+ else if(mod.$xSavePoint || mod.$xRelease || mod.$xRollbackTo) v = 2;
+ else v = 1;
+ mod.$iVersion = v;
+ }
+ }catch(e){
+ if(createdMod) createdMod.dispose();
+ throw e;
+ }
+ return mod;
+ }/*setupModule()*/;
+
+ /**
+ Equivalent to calling vtab.setupModule() with this sqlite3_module
+ object as the call's `this`.
+ */
+ capi.sqlite3_module.prototype.setupModule = function(opt){
+ return vtab.setupModule.call(this, opt);
+ };
+}/*sqlite3ApiBootstrap.initializers.push()*/);
Index: ext/wasm/api/sqlite3-wasm.c
==================================================================
--- ext/wasm/api/sqlite3-wasm.c
+++ ext/wasm/api/sqlite3-wasm.c
@@ -144,10 +144,16 @@
# define SQLITE_OMIT_UTF16 1
#endif
#ifndef SQLITE_OS_KV_OPTIONAL
# define SQLITE_OS_KV_OPTIONAL 1
#endif
+
+/**********************************************************************/
+/* SQLITE_S... */
+#ifndef SQLITE_STRICT_SUBTYPE
+# define SQLITE_STRICT_SUBTYPE 1
+#endif
/**********************************************************************/
/* SQLITE_T... */
#ifndef SQLITE_TEMP_STORE
# define SQLITE_TEMP_STORE 2
@@ -230,40 +236,40 @@
** to work just fine.
**
** Another option is to malloc() a chunk of our own and call that our
** "stack".
*/
-SQLITE_WASM_EXPORT void * sqlite3_wasm_stack_end(void){
+SQLITE_WASM_EXPORT void * sqlite3__wasm_stack_end(void){
extern void __heap_base
/* see https://stackoverflow.com/questions/10038964 */;
return &__heap_base;
}
-SQLITE_WASM_EXPORT void * sqlite3_wasm_stack_begin(void){
+SQLITE_WASM_EXPORT void * sqlite3__wasm_stack_begin(void){
extern void __data_end;
return &__data_end;
}
static void * pWasmStackPtr = 0;
-SQLITE_WASM_EXPORT void * sqlite3_wasm_stack_ptr(void){
- if(!pWasmStackPtr) pWasmStackPtr = sqlite3_wasm_stack_end();
+SQLITE_WASM_EXPORT void * sqlite3__wasm_stack_ptr(void){
+ if(!pWasmStackPtr) pWasmStackPtr = sqlite3__wasm_stack_end();
return pWasmStackPtr;
}
-SQLITE_WASM_EXPORT void sqlite3_wasm_stack_restore(void * p){
+SQLITE_WASM_EXPORT void sqlite3__wasm_stack_restore(void * p){
pWasmStackPtr = p;
}
-SQLITE_WASM_EXPORT void * sqlite3_wasm_stack_alloc(int n){
+SQLITE_WASM_EXPORT void * sqlite3__wasm_stack_alloc(int n){
if(n<=0) return 0;
n = (n + 7) & ~7 /* align to 8-byte boundary */;
- unsigned char * const p = (unsigned char *)sqlite3_wasm_stack_ptr();
- unsigned const char * const b = (unsigned const char *)sqlite3_wasm_stack_begin();
+ unsigned char * const p = (unsigned char *)sqlite3__wasm_stack_ptr();
+ unsigned const char * const b = (unsigned const char *)sqlite3__wasm_stack_begin();
if(b + n >= p || b + n < b/*overflow*/) return 0;
return pWasmStackPtr = p - n;
}
#endif /* stack allocator experiment */
/*
** State for the "pseudo-stack" allocator implemented in
-** sqlite3_wasm_pstack_xyz(). In order to avoid colliding with
+** sqlite3__wasm_pstack_xyz(). In order to avoid colliding with
** Emscripten-controled stack space, it carves out a bit of stack
** memory to use for that purpose. This memory ends up in the
** WASM-managed memory, such that routines which manipulate the wasm
** heap can also be used to manipulate this memory.
**
@@ -283,18 +289,18 @@
&PStack_mem[0] + sizeof(PStack_mem)
};
/*
** Returns the current pstack position.
*/
-SQLITE_WASM_EXPORT void * sqlite3_wasm_pstack_ptr(void){
+SQLITE_WASM_EXPORT void * sqlite3__wasm_pstack_ptr(void){
return PStack.pPos;
}
/*
** Sets the pstack position poitner to p. Results are undefined if the
-** given value did not come from sqlite3_wasm_pstack_ptr().
+** given value did not come from sqlite3__wasm_pstack_ptr().
*/
-SQLITE_WASM_EXPORT void sqlite3_wasm_pstack_restore(unsigned char * p){
+SQLITE_WASM_EXPORT void sqlite3__wasm_pstack_restore(unsigned char * p){
assert(p>=PStack.pBegin && p<=PStack.pEnd && p>=PStack.pPos);
assert(0==((unsigned long long)p & 0x7));
if(p>=PStack.pBegin && p<=PStack.pEnd /*&& p>=PStack.pPos*/){
PStack.pPos = p;
}
@@ -305,11 +311,11 @@
** is always adjusted to be a multiple of 8 and returned memory is
** always zeroed out before returning (because this keeps the client
** JS code from having to do so, and most uses of the pstack will
** call for doing so).
*/
-SQLITE_WASM_EXPORT void * sqlite3_wasm_pstack_alloc(int n){
+SQLITE_WASM_EXPORT void * sqlite3__wasm_pstack_alloc(int n){
if( n<=0 ) return 0;
//if( n & 0x7 ) n += 8 - (n & 0x7) /* align to 8-byte boundary */;
n = (n + 7) & ~7 /* align to 8-byte boundary */;
if( PStack.pBegin + n > PStack.pPos /*not enough space left*/
|| PStack.pBegin + n <= PStack.pBegin /*overflow*/ ) return 0;
@@ -316,13 +322,13 @@
memset((PStack.pPos = PStack.pPos - n), 0, (unsigned int)n);
return PStack.pPos;
}
/*
** Return the number of bytes left which can be
-** sqlite3_wasm_pstack_alloc()'d.
+** sqlite3__wasm_pstack_alloc()'d.
*/
-SQLITE_WASM_EXPORT int sqlite3_wasm_pstack_remaining(void){
+SQLITE_WASM_EXPORT int sqlite3__wasm_pstack_remaining(void){
assert(PStack.pPos >= PStack.pBegin);
assert(PStack.pPos <= PStack.pEnd);
return (int)(PStack.pPos - PStack.pBegin);
}
@@ -329,11 +335,11 @@
/*
** Return the total number of bytes available in the pstack, including
** any space which is currently allocated. This value is a
** compile-time constant.
*/
-SQLITE_WASM_EXPORT int sqlite3_wasm_pstack_quota(void){
+SQLITE_WASM_EXPORT int sqlite3__wasm_pstack_quota(void){
return (int)(PStack.pEnd - PStack.pBegin);
}
/*
** This function is NOT part of the sqlite3 public API. It is strictly
@@ -348,11 +354,11 @@
** from client code.
**
** Returns err_code.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_error(sqlite3*db, int err_code, const char *zMsg){
+int sqlite3__wasm_db_error(sqlite3*db, int err_code, const char *zMsg){
if( db!=0 ){
if( 0!=zMsg ){
const int nMsg = sqlite3Strlen30(zMsg);
sqlite3_mutex_enter(sqlite3_db_mutex(db));
sqlite3ErrorWithMsg(db, err_code, "%.*s", nMsg, zMsg);
@@ -372,11 +378,11 @@
int64_t v8;
void (*xFunc)(void*);
};
typedef struct WasmTestStruct WasmTestStruct;
SQLITE_WASM_EXPORT
-void sqlite3_wasm_test_struct(WasmTestStruct * s){
+void sqlite3__wasm_test_struct(WasmTestStruct * s){
if(s){
s->v4 *= 2;
s->v8 = s->v4 * 2;
s->ppV = s;
s->cstr = __FILE__;
@@ -400,11 +406,11 @@
** If this function returns NULL then it means that the internal
** buffer is not large enough for the generated JSON and needs to be
** increased. In debug builds that will trigger an assert().
*/
SQLITE_WASM_EXPORT
-const char * sqlite3_wasm_enum_json(void){
+const char * sqlite3__wasm_enum_json(void){
static char aBuffer[1024 * 20] = {0} /* where the JSON goes */;
int n = 0, nChildren = 0, nStruct = 0
/* output counters for figuring out where commas go */;
char * zPos = &aBuffer[1] /* skip first byte for now to help protect
** against a small race condition */;
@@ -417,11 +423,11 @@
** instance might return and use the string before the 1st instance
** is done filling it. */
/* Core output macros... */
#define lenCheck assert(zPos < zEnd - 128 \
- && "sqlite3_wasm_enum_json() buffer is too small."); \
+ && "sqlite3__wasm_enum_json() buffer is too small."); \
if( zPos >= zEnd - 128 ) return 0
#define outf(format,...) \
zPos += snprintf(zPos, ((size_t)(zEnd - zPos)), format, __VA_ARGS__); \
lenCheck
#define out(TXT) outf("%s",TXT)
@@ -1095,11 +1101,11 @@
M(xRollbackTo, "i(pi)");
// ^^^ v2. v3+ follows...
M(xShadowName, "i(s)");
} _StructBinder;
#undef CurrentStruct
-
+
/**
** Workaround: in order to map the various inner structs from
** sqlite3_index_info, we have to uplift those into constructs we
** can access by type name. These structs _must_ match their
** in-sqlite3_index_info counterparts byte for byte.
@@ -1212,11 +1218,11 @@
** zName is NULL, no default VFS is found, or it has no xDelete
** method, SQLITE_MISUSE is returned, else the result of the xDelete()
** call is returned.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_vfs_unlink(sqlite3_vfs *pVfs, const char *zName){
+int sqlite3__wasm_vfs_unlink(sqlite3_vfs *pVfs, const char *zName){
int rc = SQLITE_MISUSE /* ??? */;
if( 0==pVfs && 0!=zName ) pVfs = sqlite3_vfs_find(0);
if( zName && pVfs && pVfs->xDelete ){
rc = pVfs->xDelete(pVfs, zName, 1);
}
@@ -1230,11 +1236,11 @@
** Returns a pointer to the given DB's VFS for the given DB name,
** defaulting to "main" if zDbName is 0. Returns 0 if no db with the
** given name is open.
*/
SQLITE_WASM_EXPORT
-sqlite3_vfs * sqlite3_wasm_db_vfs(sqlite3 *pDb, const char *zDbName){
+sqlite3_vfs * sqlite3__wasm_db_vfs(sqlite3 *pDb, const char *zDbName){
sqlite3_vfs * pVfs = 0;
sqlite3_file_control(pDb, zDbName ? zDbName : "main",
SQLITE_FCNTL_VFS_POINTER, &pVfs);
return pVfs;
}
@@ -1253,11 +1259,11 @@
**
** Returns 0 on success, an SQLITE_xxx code on error. Returns
** SQLITE_MISUSE if pDb is NULL.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_reset(sqlite3 *pDb){
+int sqlite3__wasm_db_reset(sqlite3 *pDb){
int rc = SQLITE_MISUSE;
if( pDb ){
sqlite3_table_column_metadata(pDb, "main", 0, 0, 0, 0, 0, 0, 0);
rc = sqlite3_db_config(pDb, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0);
if( 0==rc ){
@@ -1280,15 +1286,15 @@
** code from the callback. Note that this is not thread-friendly: it
** expects that it will be the only thread reading the db file and
** takes no measures to ensure that is the case.
**
** This implementation appears to work fine, but
-** sqlite3_wasm_db_serialize() is arguably the better way to achieve
+** sqlite3__wasm_db_serialize() is arguably the better way to achieve
** this.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_export_chunked( sqlite3* pDb,
+int sqlite3__wasm_db_export_chunked( sqlite3* pDb,
int (*xCallback)(unsigned const char *zOut, int n) ){
sqlite3_int64 nSize = 0;
sqlite3_int64 nPos = 0;
sqlite3_file * pFile = 0;
unsigned char buf[1024 * 8];
@@ -1335,11 +1341,11 @@
**
** If `*pOut` is not NULL, the caller is responsible for passing it to
** sqlite3_free() to free it.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_serialize( sqlite3 *pDb, const char *zSchema,
+int sqlite3__wasm_db_serialize( sqlite3 *pDb, const char *zSchema,
unsigned char **pOut,
sqlite3_int64 *nOut, unsigned int mFlags ){
unsigned char * z;
if( !pDb || !pOut ) return SQLITE_MISUSE;
if( nOut ) *nOut = 0;
@@ -1358,11 +1364,11 @@
**
** ACHTUNG: it was discovered on 2023-08-11 that, with SQLITE_DEBUG,
** this function's out-of-scope use of the sqlite3_vfs/file/io_methods
** APIs leads to triggering of assertions in the core library. Its use
** is now deprecated and VFS-specific APIs for importing files need to
-** be found to replace it. sqlite3_wasm_posix_create_file() is
+** be found to replace it. sqlite3__wasm_posix_create_file() is
** suitable for the "unix" family of VFSes.
**
** Creates a new file using the I/O API of the given VFS, containing
** the given number of bytes of the given data. If the file exists, it
** is truncated to the given length and populated with the given
@@ -1399,11 +1405,11 @@
** Design note: nData is an integer, instead of int64, for WASM
** portability, so that the API can still work in builds where BigInt
** support is disabled or unavailable.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_vfs_create_file( sqlite3_vfs *pVfs,
+int sqlite3__wasm_vfs_create_file( sqlite3_vfs *pVfs,
const char *zFilename,
const unsigned char * pData,
int nData ){
int rc;
sqlite3_file *pFile = 0;
@@ -1489,11 +1495,11 @@
** i.e. Emscripten's virtual filesystem. Creates or truncates
** zFilename, appends pData bytes to it, and returns 0 on success or
** SQLITE_IOERR on error.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_posix_create_file( const char *zFilename,
+int sqlite3__wasm_posix_create_file( const char *zFilename,
const unsigned char * pData,
int nData ){
int rc;
FILE * pFile = 0;
int fileExisted = 0;
@@ -1512,21 +1518,21 @@
/*
** This function is NOT part of the sqlite3 public API. It is strictly
** for use by the sqlite project's own JS/WASM bindings.
**
** Allocates sqlite3KvvfsMethods.nKeySize bytes from
-** sqlite3_wasm_pstack_alloc() and returns 0 if that allocation fails,
+** sqlite3__wasm_pstack_alloc() and returns 0 if that allocation fails,
** else it passes that string to kvstorageMakeKey() and returns a
** NUL-terminated pointer to that string. It is up to the caller to
-** use sqlite3_wasm_pstack_restore() to free the returned pointer.
+** use sqlite3__wasm_pstack_restore() to free the returned pointer.
*/
SQLITE_WASM_EXPORT
-char * sqlite3_wasm_kvvfsMakeKeyOnPstack(const char *zClass,
+char * sqlite3__wasm_kvvfsMakeKeyOnPstack(const char *zClass,
const char *zKeyIn){
assert(sqlite3KvvfsMethods.nKeySize>24);
char *zKeyOut =
- (char *)sqlite3_wasm_pstack_alloc(sqlite3KvvfsMethods.nKeySize);
+ (char *)sqlite3__wasm_pstack_alloc(sqlite3KvvfsMethods.nKeySize);
if(zKeyOut){
kvstorageMakeKey(zClass, zKeyIn, zKeyOut);
}
return zKeyOut;
}
@@ -1537,11 +1543,11 @@
**
** Returns the pointer to the singleton object which holds the kvvfs
** I/O methods and associated state.
*/
SQLITE_WASM_EXPORT
-sqlite3_kvvfs_methods * sqlite3_wasm_kvvfs_methods(void){
+sqlite3_kvvfs_methods * sqlite3__wasm_kvvfs_methods(void){
return &sqlite3KvvfsMethods;
}
/*
** This function is NOT part of the sqlite3 public API. It is strictly
@@ -1552,11 +1558,11 @@
** value of its 2nd argument. Returns the result of
** sqlite3_vtab_config(), or SQLITE_MISUSE if the 2nd arg is not a
** valid value.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_vtab_config(sqlite3 *pDb, int op, int arg){
+int sqlite3__wasm_vtab_config(sqlite3 *pDb, int op, int arg){
switch(op){
case SQLITE_VTAB_DIRECTONLY:
case SQLITE_VTAB_INNOCUOUS:
return sqlite3_vtab_config(pDb, op);
case SQLITE_VTAB_CONSTRAINT_SUPPORT:
@@ -1572,11 +1578,11 @@
**
** Wrapper for the variants of sqlite3_db_config() which take
** (int,int*) variadic args.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_config_ip(sqlite3 *pDb, int op, int arg1, int* pArg2){
+int sqlite3__wasm_db_config_ip(sqlite3 *pDb, int op, int arg1, int* pArg2){
switch(op){
case SQLITE_DBCONFIG_ENABLE_FKEY:
case SQLITE_DBCONFIG_ENABLE_TRIGGER:
case SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER:
case SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION:
@@ -1605,11 +1611,11 @@
**
** Wrapper for the variants of sqlite3_db_config() which take
** (void*,int,int) variadic args.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_config_pii(sqlite3 *pDb, int op, void * pArg1, int arg2, int arg3){
+int sqlite3__wasm_db_config_pii(sqlite3 *pDb, int op, void * pArg1, int arg2, int arg3){
switch(op){
case SQLITE_DBCONFIG_LOOKASIDE:
return sqlite3_db_config(pDb, op, pArg1, arg2, arg3);
default: return SQLITE_MISUSE;
}
@@ -1621,11 +1627,11 @@
**
** Wrapper for the variants of sqlite3_db_config() which take
** (const char *) variadic args.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_db_config_s(sqlite3 *pDb, int op, const char *zArg){
+int sqlite3__wasm_db_config_s(sqlite3 *pDb, int op, const char *zArg){
switch(op){
case SQLITE_DBCONFIG_MAINDBNAME:
return sqlite3_db_config(pDb, op, zArg);
default: return SQLITE_MISUSE;
}
@@ -1638,11 +1644,11 @@
**
** Binding for combinations of sqlite3_config() arguments which take
** a single integer argument.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_config_i(int op, int arg){
+int sqlite3__wasm_config_i(int op, int arg){
return sqlite3_config(op, arg);
}
/*
** This function is NOT part of the sqlite3 public API. It is strictly
@@ -1650,11 +1656,11 @@
**
** Binding for combinations of sqlite3_config() arguments which take
** two int arguments.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_config_ii(int op, int arg1, int arg2){
+int sqlite3__wasm_config_ii(int op, int arg1, int arg2){
return sqlite3_config(op, arg1, arg2);
}
/*
** This function is NOT part of the sqlite3 public API. It is strictly
@@ -1662,11 +1668,11 @@
**
** Binding for combinations of sqlite3_config() arguments which take
** a single i64 argument.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_config_j(int op, sqlite3_int64 arg){
+int sqlite3__wasm_config_j(int op, sqlite3_int64 arg){
return sqlite3_config(op, arg);
}
#if 0
// Pending removal after verification of a workaround discussed in the
@@ -1681,21 +1687,21 @@
** sqlite3.wasm.exports.sqlite3_free. i.e. from a dev console where
** sqlite3 is exported globally, the following must be true:
**
** ```
** sqlite3.wasm.functionEntry(
-** sqlite3.wasm.exports.sqlite3_wasm_ptr_to_sqlite3_free()
+** sqlite3.wasm.exports.sqlite3__wasm_ptr_to_sqlite3_free()
** ) === sqlite3.wasm.exports.sqlite3_free
** ```
**
** Using a function to return this pointer, as opposed to exporting it
-** via sqlite3_wasm_enum_json(), is an attempt to work around a
+** via sqlite3__wasm_enum_json(), is an attempt to work around a
** Safari-specific quirk covered at
** https://sqlite.org/forum/info/e5b20e1feb37a19a.
**/
SQLITE_WASM_EXPORT
-void * sqlite3_wasm_ptr_to_sqlite3_free(void){
+void * sqlite3__wasm_ptr_to_sqlite3_free(void){
return (void*)sqlite3_free;
}
#endif
#if defined(__EMSCRIPTEN__) && defined(SQLITE_ENABLE_WASMFS)
@@ -1721,11 +1727,11 @@
** object fails, SQLITE_IOERR if mkdir() of the zMountPoint dir in
** the virtual FS fails. In builds compiled without SQLITE_ENABLE_WASMFS
** defined, SQLITE_NOTFOUND is returned without side effects.
*/
SQLITE_WASM_EXPORT
-int sqlite3_wasm_init_wasmfs(const char *zMountPoint){
+int sqlite3__wasm_init_wasmfs(const char *zMountPoint){
static backend_t pOpfs = 0;
if( !zMountPoint || !*zMountPoint ) zMountPoint = "/opfs";
if( !pOpfs ){
pOpfs = wasmfs_create_opfs_backend();
}
@@ -1741,65 +1747,65 @@
}
return pOpfs ? 0 : SQLITE_NOMEM;
}
#else
SQLITE_WASM_EXPORT
-int sqlite3_wasm_init_wasmfs(const char *zUnused){
+int sqlite3__wasm_init_wasmfs(const char *zUnused){
//emscripten_console_warn("WASMFS OPFS is not compiled in.");
if(zUnused){/*unused*/}
return SQLITE_NOTFOUND;
}
#endif /* __EMSCRIPTEN__ && SQLITE_ENABLE_WASMFS */
#if SQLITE_WASM_TESTS
SQLITE_WASM_EXPORT
-int sqlite3_wasm_test_intptr(int * p){
+int sqlite3__wasm_test_intptr(int * p){
return *p = *p * 2;
}
SQLITE_WASM_EXPORT
-void * sqlite3_wasm_test_voidptr(void * p){
+void * sqlite3__wasm_test_voidptr(void * p){
return p;
}
SQLITE_WASM_EXPORT
-int64_t sqlite3_wasm_test_int64_max(void){
+int64_t sqlite3__wasm_test_int64_max(void){
return (int64_t)0x7fffffffffffffff;
}
SQLITE_WASM_EXPORT
-int64_t sqlite3_wasm_test_int64_min(void){
- return ~sqlite3_wasm_test_int64_max();
+int64_t sqlite3__wasm_test_int64_min(void){
+ return ~sqlite3__wasm_test_int64_max();
}
SQLITE_WASM_EXPORT
-int64_t sqlite3_wasm_test_int64_times2(int64_t x){
+int64_t sqlite3__wasm_test_int64_times2(int64_t x){
return x * 2;
}
SQLITE_WASM_EXPORT
-void sqlite3_wasm_test_int64_minmax(int64_t * min, int64_t *max){
- *max = sqlite3_wasm_test_int64_max();
- *min = sqlite3_wasm_test_int64_min();
+void sqlite3__wasm_test_int64_minmax(int64_t * min, int64_t *max){
+ *max = sqlite3__wasm_test_int64_max();
+ *min = sqlite3__wasm_test_int64_min();
/*printf("minmax: min=%lld, max=%lld\n", *min, *max);*/
}
SQLITE_WASM_EXPORT
-int64_t sqlite3_wasm_test_int64ptr(int64_t * p){
- /*printf("sqlite3_wasm_test_int64ptr( @%lld = 0x%llx )\n", (int64_t)p, *p);*/
+int64_t sqlite3__wasm_test_int64ptr(int64_t * p){
+ /*printf("sqlite3__wasm_test_int64ptr( @%lld = 0x%llx )\n", (int64_t)p, *p);*/
return *p = *p * 2;
}
SQLITE_WASM_EXPORT
-void sqlite3_wasm_test_stack_overflow(int recurse){
- if(recurse) sqlite3_wasm_test_stack_overflow(recurse);
+void sqlite3__wasm_test_stack_overflow(int recurse){
+ if(recurse) sqlite3__wasm_test_stack_overflow(recurse);
}
/* For testing the 'string:dealloc' whwasmutil.xWrap() conversion. */
SQLITE_WASM_EXPORT
-char * sqlite3_wasm_test_str_hello(int fail){
+char * sqlite3__wasm_test_str_hello(int fail){
char * s = fail ? 0 : (char *)sqlite3_malloc(6);
if(s){
memcpy(s, "hello", 5);
s[5] = 0;
}
@@ -1830,16 +1836,16 @@
**
** '#' Matches any sequence of one or more digits with an
** optional + or - sign in front, or a hexadecimal
** literal of the form 0x...
*/
-static int sqlite3_wasm_SQLTester_strnotglob(const char *zGlob, const char *z){
+static int sqlite3__wasm_SQLTester_strnotglob(const char *zGlob, const char *z){
int c, c2;
int invert;
int seen;
typedef int (*recurse_f)(const char *,const char *);
- static const recurse_f recurse = sqlite3_wasm_SQLTester_strnotglob;
+ static const recurse_f recurse = sqlite3__wasm_SQLTester_strnotglob;
while( (c = (*(zGlob++)))!=0 ){
if( c=='*' ){
while( (c=(*(zGlob++))) == '*' || c=='?' ){
if( c=='?' && (*(z++))==0 ) return 0;
@@ -1910,13 +1916,12 @@
}
return *z==0;
}
SQLITE_WASM_EXPORT
-int sqlite3_wasm_SQLTester_strglob(const char *zGlob, const char *z){
- return !sqlite3_wasm_SQLTester_strnotglob(zGlob, z);
+int sqlite3__wasm_SQLTester_strglob(const char *zGlob, const char *z){
+ return !sqlite3__wasm_SQLTester_strnotglob(zGlob, z);
}
-
#endif /* SQLITE_WASM_TESTS */
#undef SQLITE_WASM_EXPORT
Index: ext/wasm/api/sqlite3-worker1-promiser.c-pp.js
==================================================================
--- ext/wasm/api/sqlite3-worker1-promiser.c-pp.js
+++ ext/wasm/api/sqlite3-worker1-promiser.c-pp.js
@@ -1,5 +1,6 @@
+//#ifnot omit-oo1
/*
2022-08-24
The author disclaims copyright to this source code. In place of a
legal notice, here is a blessing:
@@ -274,5 +275,8 @@
})
//#endif
,
onerror: (...args)=>console.error('worker1 promiser error',...args)
};
+//#else
+/* Built with the omit-oo1 flag. */
+//#endif ifnot omit-oo1
Index: ext/wasm/api/sqlite3-worker1.c-pp.js
==================================================================
--- ext/wasm/api/sqlite3-worker1.c-pp.js
+++ ext/wasm/api/sqlite3-worker1.c-pp.js
@@ -1,5 +1,6 @@
+//#ifnot omit-oo1
/*
2022-05-23
The author disclaims copyright to this source code. In place of a
legal notice, here is a blessing:
@@ -46,5 +47,8 @@
//console.warn("worker1 theJs =",theJs);
importScripts(theJs);
}
//#endif
sqlite3InitModule().then(sqlite3 => sqlite3.initWorker1API());
+//#else
+/* Built with the omit-oo1 flag. */
+//#endif ifnot omit-oo1
ADDED ext/wasm/batch-runner-sahpool.html
Index: ext/wasm/batch-runner-sahpool.html
==================================================================
--- /dev/null
+++ ext/wasm/batch-runner-sahpool.html
@@ -0,0 +1,86 @@
+
+
+
+
+
+
+
+ sqlite3-api batch SQL runner for the SAHPool VFS
+
+
+ sqlite3-api batch SQL runner for the SAHPool VFS
+
+
+
+
+
+
+
+
+
+
+
ADDED ext/wasm/batch-runner-sahpool.js
Index: ext/wasm/batch-runner-sahpool.js
==================================================================
--- /dev/null
+++ ext/wasm/batch-runner-sahpool.js
@@ -0,0 +1,341 @@
+/*
+ 2023-11-30
+
+ The author disclaims copyright to this source code. In place of a
+ legal notice, here is a blessing:
+
+ * May you do good and not evil.
+ * May you find forgiveness for yourself and forgive others.
+ * May you share freely, never taking more than you give.
+
+ ***********************************************************************
+
+ A basic batch SQL runner for the SAHPool VFS. This file must be run in
+ a worker thread. This is not a full-featured app, just a way to get some
+ measurements for batch execution of SQL for the OPFS SAH Pool VFS.
+*/
+'use strict';
+
+const wMsg = function(msgType,...args){
+ postMessage({
+ type: msgType,
+ data: args
+ });
+};
+const toss = function(...args){throw new Error(args.join(' '))};
+const warn = (...args)=>{ wMsg('warn',...args); };
+const error = (...args)=>{ wMsg('error',...args); };
+const log = (...args)=>{ wMsg('stdout',...args); }
+let sqlite3;
+const urlParams = new URL(globalThis.location.href).searchParams;
+const cacheSize = (()=>{
+ if(urlParams.has('cachesize')) return +urlParams.get('cachesize');
+ return 200;
+})();
+
+
+/** Throws if the given sqlite3 result code is not 0. */
+const checkSqliteRc = (dbh,rc)=>{
+ if(rc) toss("Prepare failed:",sqlite3.capi.sqlite3_errmsg(dbh));
+};
+
+const sqlToDrop = [
+ "SELECT type,name FROM sqlite_schema ",
+ "WHERE name NOT LIKE 'sqlite\\_%' escape '\\' ",
+ "AND name NOT LIKE '\\_%' escape '\\'"
+].join('');
+
+const clearDbSqlite = function(db){
+ // This would be SO much easier with the oo1 API, but we specifically want to
+ // inject metrics we can't get via that API, and we cannot reliably (OPFS)
+ // open the same DB twice to clear it using that API, so...
+ const rc = sqlite3.wasm.exports.sqlite3_wasm_db_reset(db.handle);
+ log("reset db rc =",rc,db.id, db.filename);
+};
+
+const App = {
+ db: undefined,
+ cache:Object.create(null),
+ log: log,
+ warn: warn,
+ error: error,
+ metrics: {
+ fileCount: 0,
+ runTimeMs: 0,
+ prepareTimeMs: 0,
+ stepTimeMs: 0,
+ stmtCount: 0,
+ strcpyMs: 0,
+ sqlBytes: 0
+ },
+ fileList: undefined,
+ execSql: async function(name,sql){
+ const db = this.db;
+ const banner = "========================================";
+ this.log(banner,
+ "Running",name,'('+sql.length,'bytes)');
+ const capi = this.sqlite3.capi, wasm = this.sqlite3.wasm;
+ let pStmt = 0, pSqlBegin;
+ const metrics = db.metrics = Object.create(null);
+ metrics.prepTotal = metrics.stepTotal = 0;
+ metrics.stmtCount = 0;
+ metrics.malloc = 0;
+ metrics.strcpy = 0;
+ if(this.gotErr){
+ this.error("Cannot run SQL: error cleanup is pending.");
+ return;
+ }
+ // Run this async so that the UI can be updated for the above header...
+ const endRun = ()=>{
+ metrics.evalSqlEnd = performance.now();
+ metrics.evalTimeTotal = (metrics.evalSqlEnd - metrics.evalSqlStart);
+ this.log("metrics:",JSON.stringify(metrics, undefined, ' '));
+ this.log("prepare() count:",metrics.stmtCount);
+ this.log("Time in prepare_v2():",metrics.prepTotal,"ms",
+ "("+(metrics.prepTotal / metrics.stmtCount),"ms per prepare())");
+ this.log("Time in step():",metrics.stepTotal,"ms",
+ "("+(metrics.stepTotal / metrics.stmtCount),"ms per step())");
+ this.log("Total runtime:",metrics.evalTimeTotal,"ms");
+ this.log("Overhead (time - prep - step):",
+ (metrics.evalTimeTotal - metrics.prepTotal - metrics.stepTotal)+"ms");
+ this.log(banner,"End of",name);
+ this.metrics.prepareTimeMs += metrics.prepTotal;
+ this.metrics.stepTimeMs += metrics.stepTotal;
+ this.metrics.stmtCount += metrics.stmtCount;
+ this.metrics.strcpyMs += metrics.strcpy;
+ this.metrics.sqlBytes += sql.length;
+ };
+
+ const runner = function(resolve, reject){
+ ++this.metrics.fileCount;
+ metrics.evalSqlStart = performance.now();
+ const stack = wasm.scopedAllocPush();
+ try {
+ let t, rc;
+ let sqlByteLen = sql.byteLength;
+ const [ppStmt, pzTail] = wasm.scopedAllocPtr(2);
+ t = performance.now();
+ pSqlBegin = wasm.scopedAlloc( sqlByteLen + 1/*SQL + NUL*/) || toss("alloc(",sqlByteLen,") failed");
+ metrics.malloc = performance.now() - t;
+ metrics.byteLength = sqlByteLen;
+ let pSql = pSqlBegin;
+ const pSqlEnd = pSqlBegin + sqlByteLen;
+ t = performance.now();
+ wasm.heap8().set(sql, pSql);
+ wasm.poke(pSql + sqlByteLen, 0);
+ //log("SQL:",wasm.cstrToJs(pSql));
+ metrics.strcpy = performance.now() - t;
+ let breaker = 0;
+ while(pSql && wasm.peek8(pSql)){
+ wasm.pokePtr(ppStmt, 0);
+ wasm.pokePtr(pzTail, 0);
+ t = performance.now();
+ rc = capi.sqlite3_prepare_v2(
+ db.handle, pSql, sqlByteLen, ppStmt, pzTail
+ );
+ metrics.prepTotal += performance.now() - t;
+ checkSqliteRc(db.handle, rc);
+ pStmt = wasm.peekPtr(ppStmt);
+ pSql = wasm.peekPtr(pzTail);
+ sqlByteLen = pSqlEnd - pSql;
+ if(!pStmt) continue/*empty statement*/;
+ ++metrics.stmtCount;
+ t = performance.now();
+ rc = capi.sqlite3_step(pStmt);
+ capi.sqlite3_finalize(pStmt);
+ pStmt = 0;
+ metrics.stepTotal += performance.now() - t;
+ switch(rc){
+ case capi.SQLITE_ROW:
+ case capi.SQLITE_DONE: break;
+ default: checkSqliteRc(db.handle, rc); toss("Not reached.");
+ }
+ }
+ resolve(this);
+ }catch(e){
+ if(pStmt) capi.sqlite3_finalize(pStmt);
+ this.gotErr = e;
+ reject(e);
+ }finally{
+ capi.sqlite3_exec(db.handle,"rollback;",0,0,0);
+ wasm.scopedAllocPop(stack);
+ }
+ }.bind(this);
+ const p = new Promise(runner);
+ return p.catch(
+ (e)=>this.error("Error via execSql("+name+",...):",e.message)
+ ).finally(()=>{
+ endRun();
+ });
+ },
+
+ /**
+ Loads batch-runner.list and populates the selection list from
+ it. Returns a promise which resolves to nothing in particular
+ when it completes. Only intended to be run once at the start
+ of the app.
+ */
+ loadSqlList: async function(){
+ const infile = 'batch-runner.list';
+ this.log("Loading list of SQL files:", infile);
+ let txt;
+ try{
+ const r = await fetch(infile);
+ if(404 === r.status){
+ toss("Missing file '"+infile+"'.");
+ }
+ if(!r.ok) toss("Loading",infile,"failed:",r.statusText);
+ txt = await r.text();
+ }catch(e){
+ this.error(e.message);
+ throw e;
+ }
+ App.fileList = txt.split(/\n+/).filter(x=>!!x);
+ this.log("Loaded",infile);
+ },
+
+ /** Fetch ./fn and return its contents as a Uint8Array. */
+ fetchFile: async function(fn, cacheIt=false){
+ if(cacheIt && this.cache[fn]) return this.cache[fn];
+ this.log("Fetching",fn,"...");
+ let sql;
+ try {
+ const r = await fetch(fn);
+ if(!r.ok) toss("Fetch failed:",r.statusText);
+ sql = new Uint8Array(await r.arrayBuffer());
+ }catch(e){
+ this.error(e.message);
+ throw e;
+ }
+ this.log("Fetched",sql.length,"bytes from",fn);
+ if(cacheIt) this.cache[fn] = sql;
+ return sql;
+ }/*fetchFile()*/,
+
+ /**
+ Converts this.metrics() to a form which is suitable for easy conversion to
+ CSV. It returns an array of arrays. The first sub-array is the column names.
+ The 2nd and subsequent are the values, one per test file (only the most recent
+ metrics are kept for any given file).
+ */
+ metricsToArrays: function(){
+ const rc = [];
+ Object.keys(this.dbs).sort().forEach((k)=>{
+ const d = this.dbs[k];
+ const m = d.metrics;
+ delete m.evalSqlStart;
+ delete m.evalSqlEnd;
+ const mk = Object.keys(m).sort();
+ if(!rc.length){
+ rc.push(['db', ...mk]);
+ }
+ const row = [k.split('/').pop()/*remove dir prefix from filename*/];
+ rc.push(row);
+ row.push(...mk.map((kk)=>m[kk]));
+ });
+ return rc;
+ },
+
+ metricsToBlob: function(colSeparator='\t'){
+ const ar = [], ma = this.metricsToArrays();
+ if(!ma.length){
+ this.error("Metrics are empty. Run something.");
+ return;
+ }
+ ma.forEach(function(row){
+ ar.push(row.join(colSeparator),'\n');
+ });
+ return new Blob(ar);
+ },
+
+ /**
+ Fetch file fn and eval it as an SQL blob. This is an async
+ operation and returns a Promise which resolves to this
+ object on success.
+ */
+ evalFile: async function(fn){
+ const sql = await this.fetchFile(fn);
+ return this.execSql(fn,sql);
+ }/*evalFile()*/,
+
+ /**
+ Fetches the handle of the db associated with
+ this.e.selImpl.value, opening it if needed.
+ */
+ initDb: function(){
+ const capi = this.sqlite3.capi, wasm = this.sqlite3.wasm;
+ const stack = wasm.scopedAllocPush();
+ let pDb = 0;
+ const d = Object.create(null);
+ d.filename = "/batch.db";
+ try{
+ const oFlags = capi.SQLITE_OPEN_CREATE | capi.SQLITE_OPEN_READWRITE;
+ const ppDb = wasm.scopedAllocPtr();
+ const rc = capi.sqlite3_open_v2(d.filename, ppDb, oFlags, this.PoolUtil.vfsName);
+ pDb = wasm.peekPtr(ppDb)
+ if(rc) toss("sqlite3_open_v2() failed with code",rc);
+ capi.sqlite3_exec(pDb, "PRAGMA cache_size="+cacheSize, 0, 0, 0);
+ this.log("cache_size =",cacheSize);
+ }catch(e){
+ if(pDb) capi.sqlite3_close_v2(pDb);
+ throw e;
+ }finally{
+ wasm.scopedAllocPop(stack);
+ }
+ d.handle = pDb;
+ this.log("Opened db:",d.filename,'@',d.handle);
+ return d;
+ },
+
+ closeDb: function(){
+ if(this.db.handle){
+ this.sqlite3.capi.sqlite3_close_v2(this.db.handle);
+ this.db.handle = undefined;
+ }
+ },
+
+ run: async function(sqlite3){
+ delete this.run;
+ this.sqlite3 = sqlite3;
+ const capi = sqlite3.capi, wasm = sqlite3.wasm;
+ this.log("Loaded module:",capi.sqlite3_libversion(), capi.sqlite3_sourceid());
+ this.log("WASM heap size =",wasm.heap8().length);
+ let timeStart;
+ sqlite3.installOpfsSAHPoolVfs({
+ clearOnInit: true, initialCapacity: 4,
+ name: 'batch-sahpool',
+ verbosity: 2
+ }).then(PoolUtil=>{
+ App.PoolUtil = PoolUtil;
+ App.db = App.initDb();
+ })
+ .then(async ()=>this.loadSqlList())
+ .then(async ()=>{
+ timeStart = performance.now();
+ for(let i = 0; i < App.fileList.length; ++i){
+ const fn = App.fileList[i];
+ await App.evalFile(fn);
+ if(App.gotErr) throw App.gotErr;
+ }
+ })
+ .then(()=>{
+ App.metrics.runTimeMs = performance.now() - timeStart;
+ App.log("total metrics:",JSON.stringify(App.metrics, undefined, ' '));
+ App.log("Reload the page to run this again.");
+ App.closeDb();
+ App.PoolUtil.removeVfs();
+ })
+ .catch(e=>this.error("ERROR:",e));
+ }/*run()*/
+}/*App*/;
+
+let sqlite3Js = 'sqlite3.js';
+if(urlParams.has('sqlite3.dir')){
+ sqlite3Js = urlParams.get('sqlite3.dir') + '/' + sqlite3Js;
+}
+importScripts(sqlite3Js);
+globalThis.sqlite3InitModule().then(async function(sqlite3_){
+ log("Done initializing. Running batch runner...");
+ sqlite3 = sqlite3_;
+ App.run(sqlite3_);
+});
Index: ext/wasm/batch-runner.js
==================================================================
--- ext/wasm/batch-runner.js
+++ ext/wasm/batch-runner.js
@@ -70,11 +70,10 @@
// open the same DB twice to clear it using that API, so...
const rc = sqlite3.wasm.exports.sqlite3_wasm_db_reset(db.handle);
App.logHtml("reset db rc =",rc,db.id, db.filename);
};
-
const E = (s)=>document.querySelector(s);
const App = {
e: {
output: E('#test-output'),
selSql: E('#sql-select'),
@@ -89,10 +88,19 @@
fsToolbar: E('#toolbar')
},
db: Object.create(null),
dbs: Object.create(null),
cache:{},
+ metrics: {
+ fileCount: 0,
+ runTimeMs: 0,
+ prepareTimeMs: 0,
+ stepTimeMs: 0,
+ stmtCount: 0,
+ strcpyMs: 0,
+ sqlBytes: 0
+ },
log: console.log.bind(console),
warn: console.warn.bind(console),
cls: function(){this.e.output.innerHTML = ''},
logHtml2: function(cssClass,...args){
const ln = document.createElement('div');
@@ -115,11 +123,10 @@
const banner = "========================================";
this.logHtml(banner,
"Running",name,'('+sql.length,'bytes) using',db.id);
const capi = this.sqlite3.capi, wasm = this.sqlite3.wasm;
let pStmt = 0, pSqlBegin;
- const stack = wasm.scopedAllocPush();
const metrics = db.metrics = Object.create(null);
metrics.prepTotal = metrics.stepTotal = 0;
metrics.stmtCount = 0;
metrics.malloc = 0;
metrics.strcpy = 0;
@@ -140,10 +147,15 @@
"("+(metrics.stepTotal / metrics.stmtCount),"ms per step())");
this.logHtml("Total runtime:",metrics.evalTimeTotal,"ms");
this.logHtml("Overhead (time - prep - step):",
(metrics.evalTimeTotal - metrics.prepTotal - metrics.stepTotal)+"ms");
this.logHtml(banner,"End of",name);
+ this.metrics.prepareTimeMs += metrics.prepTotal;
+ this.metrics.stepTimeMs += metrics.stepTotal;
+ this.metrics.stmtCount += metrics.stmtCount;
+ this.metrics.strcpyMs += metrics.strcpy;
+ this.metrics.sqlBytes += sql.length;
};
let runner;
if('websql'===db.id){
const who = this;
@@ -212,11 +224,13 @@
//reject(e);
}
}.bind(this);
}else{/*sqlite3 db...*/
runner = function(resolve, reject){
+ ++this.metrics.fileCount;
metrics.evalSqlStart = performance.now();
+ const stack = wasm.scopedAllocPush();
try {
let t;
let sqlByteLen = sql.byteLength;
const [ppStmt, pzTail] = wasm.scopedAllocPtr(2);
t = performance.now();
@@ -267,11 +281,11 @@
}.bind(this);
}
let p;
if(1){
p = new Promise(function(res,rej){
- setTimeout(()=>runner(res, rej), 50)/*give UI a chance to output the "running" banner*/;
+ setTimeout(()=>runner(res, rej), 0)/*give UI a chance to output the "running" banner*/;
});
}else{
p = new Promise(runner);
}
return p.catch(
@@ -399,11 +413,11 @@
ma.forEach(function(row){
ar.push(row.join(colSeparator),'\n');
});
return new Blob(ar);
},
-
+
downloadMetrics: function(){
const b = this.metricsToBlob();
if(!b) return;
const url = URL.createObjectURL(b);
const a = document.createElement('a');
@@ -574,10 +588,12 @@
v = who.e.selSql.value;
}
const timeTotal = performance.now() - timeStart;
who.logHtml("Run-remaining time:",timeTotal,"ms ("+(timeTotal/1000/60)+" minute(s))");
who.clearStorage();
+ App.metrics.runTimeMs = timeTotal;
+ who.logHtml("Total metrics:",JSON.stringify(App.metrics,undefined,' '));
}, false);
}/*run()*/
}/*App*/;
self.sqlite3TestModule.initSqlite3().then(function(sqlite3_){
Index: ext/wasm/demo-123.js
==================================================================
--- ext/wasm/demo-123.js
+++ ext/wasm/demo-123.js
@@ -18,11 +18,11 @@
Set up our output channel differently depending
on whether we are running in a worker thread or
the main (UI) thread.
*/
let logHtml;
- if(self.window === self /* UI thread */){
+ if(globalThis.window === globalThis /* UI thread */){
console.log("Running demo from main UI thread.");
logHtml = function(cssClass,...args){
const ln = document.createElement('div');
if(cssClass) ln.classList.add(cssClass);
ln.append(document.createTextNode(args.join(' ')));
@@ -248,11 +248,11 @@
- getParamIndex(name)
*/
}/*demo1()*/;
log("Loading and initializing sqlite3 module...");
- if(self.window!==self) /*worker thread*/{
+ if(globalThis.window!==globalThis) /*worker thread*/{
/*
If sqlite3.js is in a directory other than this script, in order
to get sqlite3.js to resolve sqlite3.wasm properly, we have to
explicitly tell it where sqlite3.js is being loaded from. We do
that by passing the `sqlite3.dir=theDirName` URL argument to
@@ -260,23 +260,24 @@
loader and it will adjust the sqlite3.wasm path accordingly. If
sqlite3.js/.wasm are in the same directory as this script then
that's not needed.
URL arguments passed as part of the filename via importScripts()
- are simply lost, and such scripts see the self.location of
+ are simply lost, and such scripts see the globalThis.location of
_this_ script.
*/
let sqlite3Js = 'sqlite3.js';
- const urlParams = new URL(self.location.href).searchParams;
+ const urlParams = new URL(globalThis.location.href).searchParams;
if(urlParams.has('sqlite3.dir')){
sqlite3Js = urlParams.get('sqlite3.dir') + '/' + sqlite3Js;
}
importScripts(sqlite3Js);
}
- self.sqlite3InitModule({
- // We can redirect any stdout/stderr from the module
- // like so...
+ globalThis.sqlite3InitModule({
+ /* We can redirect any stdout/stderr from the module like so, but
+ note that doing so makes use of Emscripten-isms, not
+ well-defined sqlite APIs. */
print: log,
printErr: error
}).then(function(sqlite3){
//console.log('sqlite3 =',sqlite3);
log("Done initializing. Running demo...");
Index: ext/wasm/fiddle/fiddle-worker.js
==================================================================
--- ext/wasm/fiddle/fiddle-worker.js
+++ ext/wasm/fiddle/fiddle-worker.js
@@ -372,13 +372,11 @@
sqlite3 = _sqlite3;
console.warn("Installing sqlite3 module globally (in Worker)",
"for use in the dev console.", sqlite3);
globalThis.sqlite3 = sqlite3;
const dbVfs = sqlite3.wasm.xWrap('fiddle_db_vfs', "*", ['string']);
- fiddleModule.fsUnlink = (fn)=>{
- return sqlite3.wasm.sqlite3_wasm_vfs_unlink(dbVfs(0), fn);
- };
+ fiddleModule.fsUnlink = (fn)=>fiddleModule.FS.unlink(fn);
wMsg('fiddle-ready');
}).catch(e=>{
console.error("Fiddle worker init failed:",e);
});
})();
Index: ext/wasm/index-dist.html
==================================================================
--- ext/wasm/index-dist.html
+++ ext/wasm/index-dist.html
@@ -44,23 +44,20 @@
All of these pages must be served via an HTTP
server. Browsers do not support loading WASM files via
file:// URLs.
Any OPFS-related pages or tests require:
+
An OPFS-capable browser released after February
+ 2023. Some tests will work with Chromium-based browsers
+ going back to around v102.
That the web server emit the so-called
COOP
and
COEP
headers. althttpd requires the
-enable-sab flag for that.
-
A very recent version of a Chromium-based browser
- (v102 at least, possibly newer). OPFS support in the
- other major browsers is pending. Development and testing
- is currently done against a dev-channel release of
- Chrome (v111 as of 2023-02-10).
-
The tests and demos...
Index: ext/wasm/index.html
==================================================================
--- ext/wasm/index.html
+++ ext/wasm/index.html
@@ -29,23 +29,20 @@
All of these pages must be served via an HTTP
server. Browsers do not support loading WASM files via
file:// URLs.
Any OPFS-related pages or tests require:
+
An OPFS-capable browser released after February
+ 2023. Some tests will work with Chromium-based browsers
+ going back to around v102.
That the web server emit the so-called
COOP
and
COEP
headers. althttpd requires the
-enable-sab flag for that.
-
A very recent version of a
- Chromium-based browser (v102 at least, possibly newer). OPFS
- support in the other major browsers is pending. Development
- and testing is currently done against a dev-channel release
- of Chrome (v111 as of 2023-02-10).
-
The tests and demos...
Index: ext/wasm/jaccwabyt/jaccwabyt.md
==================================================================
--- ext/wasm/jaccwabyt/jaccwabyt.md
+++ ext/wasm/jaccwabyt/jaccwabyt.md
@@ -204,13 +204,12 @@
```
It also offers a number of other settings, but all are optional except
for the ones shown above. Those three config options abstract away
details which are specific to a given WASM environment. They provide
-the WASM "heap" memory (a byte array), the memory allocator, and the
-deallocator. In a conventional Emscripten setup, that config might
-simply look like:
+the WASM "heap" memory, the memory allocator, and the deallocator. In
+a conventional Emscripten setup, that config might simply look like:
>
```javascript
{
heap: Module['asm']['memory'],
Index: ext/wasm/speedtest1-worker.html
==================================================================
--- ext/wasm/speedtest1-worker.html
+++ ext/wasm/speedtest1-worker.html
@@ -348,11 +348,11 @@
case 'ready':
log("Worker is ready.");
eControls.classList.remove('hidden');
break;
case 'stdout': log(msg.data); break;
- case 'stdout': logErr(msg.data); break;
+ case 'stderr': logErr(msg.data); break;
case 'run-start':
eControls.disabled = true;
log("Running speedtest1 with argv =",msg.data.join(' '));
break;
case 'run-end':
Index: ext/wasm/speedtest1-worker.js
==================================================================
--- ext/wasm/speedtest1-worker.js
+++ ext/wasm/speedtest1-worker.js
@@ -5,13 +5,13 @@
if(urlParams.has('sqlite3.dir')){
speedtestJs = urlParams.get('sqlite3.dir') + '/' + speedtestJs;
}
importScripts(speedtestJs);
/**
- If this environment contains OPFS, this function initializes it and
- returns the name of the dir on which OPFS is mounted, else it returns
- an empty string.
+ If this build includes WASMFS, this function initializes it and
+ returns the name of the dir on which OPFS is mounted, else it
+ returns an empty string.
*/
const wasmfsDir = function f(wasmUtil){
if(undefined !== f._) return f._;
const pdir = '/opfs';
if( !self.FileSystemHandle
@@ -45,10 +45,11 @@
App.logBuffer.push(msg);
mPost(type,msg);
};
const log = (...args)=>logMsg('stdout',args);
const logErr = (...args)=>logMsg('stderr',args);
+ const realSahName = 'opfs-sahpool-speedtest1';
const runSpeedtest = async function(cliFlagsArray){
const scope = App.wasm.scopedAllocPush();
const dbFile = App.pDir+"/speedtest1.sqlite3";
try{
@@ -55,11 +56,10 @@
const argv = [
"speedtest1.wasm", ...cliFlagsArray, dbFile
];
App.logBuffer.length = 0;
const ndxSahPool = argv.indexOf('opfs-sahpool');
- const realSahName = 'opfs-sahpool-speedtest1';
if(ndxSahPool>0){
argv[ndxSahPool] = realSahName;
log("Updated argv for opfs-sahpool: --vfs",realSahName);
}
mPost('run-start', [...argv]);
@@ -71,11 +71,11 @@
name: realSahName,
initialCapacity: 3,
clearOnInit: true,
verbosity: 2
}).then(PoolUtil=>{
- log("opfs-sahpool successfully installed as",realSahName);
+ log("opfs-sahpool successfully installed as",PoolUtil.vfsName);
App.sqlite3.$SAHPoolUtil = PoolUtil;
//console.log("sqlite3.oo1.OpfsSAHPoolDb =", App.sqlite3.oo1.OpfsSAHPoolDb);
});
}
App.wasm.xCall('wasm_main', argv.length,
@@ -100,36 +100,19 @@
logErr("Unhandled worker message type:",msg.type);
break;
}
};
- const sahpSanityChecks = function(sqlite3){
- log("Attempting OpfsSAHPoolDb sanity checks...");
- const db = new sqlite3.oo1.OpfsSAHPoolDb('opfs-sahpoool.db');
- const fn = db.filename;
- db.exec([
- 'create table t(a);',
- 'insert into t(a) values(1),(2),(3);'
- ]);
- db.close();
- sqlite3.wasm.sqlite3_wasm_vfs_unlink(sqlite3_vfs_find("opfs-sahpool"), fn);
- log("SAH sanity checks done.");
- };
-
const EmscriptenModule = {
print: log,
printErr: logErr,
setStatus: (text)=>mPost('load-status',text)
};
log("Initializing speedtest1 module...");
self.sqlite3InitModule(EmscriptenModule).then(async (sqlite3)=>{
const S = globalThis.S = App.sqlite3 = sqlite3;
log("Loaded speedtest1 module. Setting up...");
- App.vfsUnlink = function(pDb, fname){
- const pVfs = S.wasm.sqlite3_wasm_db_vfs(pDb, 0);
- if(pVfs) S.wasm.sqlite3_wasm_vfs_unlink(pVfs, fname||0);
- };
App.pDir = wasmfsDir(S.wasm);
App.wasm = S.wasm;
//if(App.pDir) log("Persistent storage:",pDir);
//else log("Using transient storage.");
mPost('ready',true);
Index: ext/wasm/test-opfs-vfs.js
==================================================================
--- ext/wasm/test-opfs-vfs.js
+++ ext/wasm/test-opfs-vfs.js
@@ -20,11 +20,11 @@
const warn = (...args)=>console.warn(logPrefix,...args);
const error = (...args)=>console.error(logPrefix,...args);
const opfs = sqlite3.opfs;
log("tryOpfsVfs()");
if(!sqlite3.opfs){
- const e = toss("OPFS is not available.");
+ const e = new Error("OPFS is not available.");
error(e);
throw e;
}
const capi = sqlite3.capi;
const pVfs = capi.sqlite3_vfs_find("opfs") || toss("Missing 'opfs' VFS.");
Index: ext/wasm/tester1.c-pp.js
==================================================================
--- ext/wasm/tester1.c-pp.js
+++ ext/wasm/tester1.c-pp.js
@@ -61,11 +61,11 @@
/* Predicate for tests/groups. */
const isWorker = ()=>!isUIThread();
/* Predicate for tests/groups. */
const testIsTodo = ()=>false;
const haveWasmCTests = ()=>{
- return !!wasm.exports.sqlite3_wasm_test_intptr;
+ return !!wasm.exports.sqlite3__wasm_test_intptr;
};
const hasOpfs = ()=>{
return globalThis.FileSystemHandle
&& globalThis.FileSystemDirectoryHandle
&& globalThis.FileSystemFileHandle
@@ -720,11 +720,11 @@
});
}/*scopedAlloc()*/
//log("xCall()...");
{
- const pJson = w.xCall('sqlite3_wasm_enum_json');
+ const pJson = w.xCall('sqlite3__wasm_enum_json');
T.assert(Number.isFinite(pJson)).assert(w.cstrlen(pJson)>300);
}
//log("xWrap()...");
{
@@ -734,13 +734,13 @@
assert(w.xWrap.argAdapter('i32') instanceof Function);
let fw = w.xWrap('sqlite3_libversion','utf8');
T.mustThrowMatching(()=>fw(1), /requires 0 arg/);
let rc = fw();
T.assert('string'===typeof rc).assert(rc.length>5);
- rc = w.xCallWrapped('sqlite3_wasm_enum_json','*');
+ rc = w.xCallWrapped('sqlite3__wasm_enum_json','*');
T.assert(rc>0 && Number.isFinite(rc));
- rc = w.xCallWrapped('sqlite3_wasm_enum_json','utf8');
+ rc = w.xCallWrapped('sqlite3__wasm_enum_json','utf8');
T.assert('string'===typeof rc).assert(rc.length>300);
{ // 'string:static' argAdapter() sanity checks...
let argAd = w.xWrap.argAdapter('string:static');
@@ -819,28 +819,28 @@
}
}
if(haveWasmCTests()){
if(!sqlite3.config.useStdAlloc){
- fw = w.xWrap('sqlite3_wasm_test_str_hello', 'utf8:dealloc',['i32']);
+ fw = w.xWrap('sqlite3__wasm_test_str_hello', 'utf8:dealloc',['i32']);
rc = fw(0);
T.assert('hello'===rc);
rc = fw(1);
T.assert(null===rc);
}
if(w.bigIntEnabled){
w.xWrap.resultAdapter('thrice', (v)=>3n*BigInt(v));
w.xWrap.argAdapter('twice', (v)=>2n*BigInt(v));
- fw = w.xWrap('sqlite3_wasm_test_int64_times2','thrice','twice');
+ fw = w.xWrap('sqlite3__wasm_test_int64_times2','thrice','twice');
rc = fw(1);
T.assert(12n===rc);
w.scopedAllocCall(function(){
const pI1 = w.scopedAlloc(8), pI2 = pI1+4;
w.pokePtr([pI1, pI2], 0);
- const f = w.xWrap('sqlite3_wasm_test_int64_minmax',undefined,['i64*','i64*']);
+ const f = w.xWrap('sqlite3__wasm_test_int64_minmax',undefined,['i64*','i64*']);
const [r1, r2] = w.peek64([pI1, pI2]);
T.assert(!Number.isSafeInteger(r1)).assert(!Number.isSafeInteger(r2));
});
}
}
@@ -940,11 +940,11 @@
assert(wts instanceof StructType).
assert(StructType.isA(wts)).
assert(wts.pointer>0).assert(0===wts.$v4).assert(0n===wts.$v8).
assert(0===wts.$ppV).assert(0===wts.$xFunc);
const testFunc =
- W.xGet('sqlite3_wasm_test_struct'/*name gets mangled in -O3 builds!*/);
+ W.xGet('sqlite3__wasm_test_struct'/*name gets mangled in -O3 builds!*/);
let counter = 0;
//log("wts.pointer =",wts.pointer);
const wtsFunc = function(arg){
/*log("This from a JS function called from C, "+
"which itself was called from JS. arg =",arg);*/
@@ -1126,11 +1126,11 @@
////////////////////////////////////////////////////////////////////////
T.g('sqlite3.oo1')
.t('Create db', function(sqlite3){
const dbFile = '/tester1.db';
- wasm.sqlite3_wasm_vfs_unlink(0, dbFile);
+ sqlite3.util.sqlite3__wasm_vfs_unlink(0, dbFile);
const db = this.db = new sqlite3.oo1.DB(dbFile, 0 ? 'ct' : 'c');
db.onclose = {
disposeAfter: [],
disposeBefore: [
(db)=>{
@@ -1457,11 +1457,11 @@
T.assert(Array.isArray(rv)).assert(3===rv.length)
.assert(-1===rv[0]).assert(-3===rv[2]);
rv = db.exec("SELECT 1 WHERE 0",{rowMode: 0});
T.assert(Array.isArray(rv)).assert(0===rv.length);
if(wasm.bigIntEnabled && haveWasmCTests()){
- const mI = wasm.xCall('sqlite3_wasm_test_int64_max');
+ const mI = wasm.xCall('sqlite3__wasm_test_int64_max');
const b = BigInt(Number.MAX_SAFE_INTEGER * 2);
T.assert(b === db.selectValue("SELECT "+b)).
assert(b === db.selectValue("SELECT ?", b)).
assert(mI == db.selectValue("SELECT $x", {$x:mI}));
}else{
@@ -1683,14 +1683,14 @@
const sql = "select count(*) from t";
const n = db.selectValue(sql);
T.assert(n>0 && db2.selectValue(sql) === n);
}finally{
db2.close();
- wasm.sqlite3_wasm_vfs_unlink(0, filename);
+ sqlite3.util.sqlite3__wasm_vfs_unlink(0, filename);
}
}
- }/*sqlite3_js_vfs_create_file()*/)
+ }/*sqlite3_js_posix_create_file()*/)
////////////////////////////////////////////////////////////////////
.t({
name:'Scalar UDFs',
test: function(sqlite3){
@@ -2073,11 +2073,11 @@
let ptrInt;
const origValue = 512;
try{
ptrInt = w.scopedAlloc(4);
w.poke32(ptrInt,origValue);
- const cf = w.xGet('sqlite3_wasm_test_intptr');
+ const cf = w.xGet('sqlite3__wasm_test_intptr');
const oldPtrInt = ptrInt;
T.assert(origValue === w.peek32(ptrInt));
const rc = cf(ptrInt);
T.assert(2*origValue === rc).
assert(rc === w.peek32(ptrInt)).
@@ -2088,33 +2088,33 @@
w.poke64(pi64, o64);
//log("pi64 =",pi64, "o64 = 0x",o64.toString(16), o64);
const v64 = ()=>w.peek64(pi64)
T.assert(v64() == o64);
//T.assert(o64 === w.peek64(pi64));
- const cf64w = w.xGet('sqlite3_wasm_test_int64ptr');
+ const cf64w = w.xGet('sqlite3__wasm_test_int64ptr');
cf64w(pi64);
T.assert(v64() == BigInt(2 * o64));
cf64w(pi64);
T.assert(v64() == BigInt(4 * o64));
- const biTimes2 = w.xGet('sqlite3_wasm_test_int64_times2');
+ const biTimes2 = w.xGet('sqlite3__wasm_test_int64_times2');
T.assert(BigInt(2 * o64) ===
biTimes2(BigInt(o64)/*explicit conv. required to avoid TypeError
in the call :/ */));
const pMin = w.scopedAlloc(16);
const pMax = pMin + 8;
const g64 = (p)=>w.peek64(p);
w.poke64([pMin, pMax], 0);
const minMaxI64 = [
- w.xCall('sqlite3_wasm_test_int64_min'),
- w.xCall('sqlite3_wasm_test_int64_max')
+ w.xCall('sqlite3__wasm_test_int64_min'),
+ w.xCall('sqlite3__wasm_test_int64_max')
];
T.assert(minMaxI64[0] < BigInt(Number.MIN_SAFE_INTEGER)).
assert(minMaxI64[1] > BigInt(Number.MAX_SAFE_INTEGER));
//log("int64_min/max() =",minMaxI64, typeof minMaxI64[0]);
- w.xCall('sqlite3_wasm_test_int64_minmax', pMin, pMax);
+ w.xCall('sqlite3__wasm_test_int64_minmax', pMin, pMax);
T.assert(g64(pMin) === minMaxI64[0], "int64 mismatch").
assert(g64(pMax) === minMaxI64[1], "int64 mismatch");
//log("pMin",g64(pMin), "pMax",g64(pMax));
w.poke64(pMin, minMaxI64[0]);
T.assert(g64(pMin) === minMaxI64[0]).
@@ -2558,11 +2558,11 @@
})/*custom collation*/
////////////////////////////////////////////////////////////////////////
.t('Close db', function(){
T.assert(this.db).assert(wasm.isPtr(this.db.pointer));
- //wasm.sqlite3_wasm_db_reset(this.db); // will leak virtual tables!
+ //wasm.sqlite3__wasm_db_reset(this.db); // will leak virtual tables!
this.db.close();
T.assert(!this.db.pointer);
})
;/* end of oo1 checks */
@@ -2603,32 +2603,10 @@
}finally{
db.close();
}
}
}/*kvvfs sanity checks*/)
- .t({
- name: 'kvvfs sqlite3_js_vfs_create_file()',
- predicate: ()=>"kvvfs does not currently support this",
- test: function(sqlite3){
- let db;
- try {
- db = new this.JDb(this.kvvfsDbFile);
- const exp = capi.sqlite3_js_db_export(db);
- db.close();
- this.kvvfsUnlink();
- capi.sqlite3_js_vfs_create_file("kvvfs", this.kvvfsDbFile, exp);
- db = new this.JDb(filename);
- T.assert(6 === db.selectValue('select count(*) from kvvfs'));
- }finally{
- db.close();
- this.kvvfsUnlink();
- }
- delete this.kvvfsDbFile;
- delete this.kvvfsUnlink;
- delete this.JDb;
- }
- }/*kvvfs sqlite3_js_vfs_create_file()*/)
;/* end kvvfs tests */
////////////////////////////////////////////////////////////////////////
T.g('Hook APIs')
.t({
@@ -2642,17 +2620,21 @@
return (1 === p) ? 0 : capi.SQLITE_ERROR;
}, 1);
T.assert( 0 === rc /*void pointer*/ );
// Commit hook...
+ T.assert( 0!=capi.sqlite3_get_autocommit(db) );
db.exec("BEGIN; SELECT 1; COMMIT");
T.assert(0 === countCommit,
"No-op transactions (mostly) do not trigger commit hook.");
db.exec("BEGIN EXCLUSIVE; SELECT 1; COMMIT");
T.assert(1 === countCommit,
"But EXCLUSIVE transactions do.");
- db.transaction((d)=>{d.exec("create table t(a)");});
+ db.transaction((d)=>{
+ T.assert( 0==capi.sqlite3_get_autocommit(db) );
+ d.exec("create table t(a)");
+ });
T.assert(2 === countCommit);
// Rollback hook:
rc = capi.sqlite3_rollback_hook(db, (p)=>{
++countRollback;
@@ -2908,11 +2890,11 @@
test: async function(sqlite3){
const filename = this.opfsDbFile = '/dir/sqlite3-tester1.db';
const pVfs = this.opfsVfs = capi.sqlite3_vfs_find('opfs');
T.assert(pVfs);
const unlink = this.opfsUnlink =
- (fn=filename)=>{wasm.sqlite3_wasm_vfs_unlink(pVfs,fn)};
+ (fn=filename)=>{sqlite3.util.sqlite3__wasm_vfs_unlink(pVfs,fn)};
unlink();
let db = new sqlite3.oo1.OpfsDb(filename);
try {
db.exec([
'create table p(a);',
@@ -3225,10 +3207,11 @@
test internal APIs from here */;
globalThis.sqlite3InitModule({
print: log,
printErr: error
}).then(async function(sqlite3){
+ TestUtil.assert(!!sqlite3.util);
log("Done initializing WASM/JS bits. Running tests...");
sqlite3.config.warn("Installing sqlite3 bits as global S for local dev/test purposes.");
globalThis.S = sqlite3;
/*await sqlite3.installOpfsSAHPoolVfs(sahPoolConfig)
.then((u)=>log("Loaded",u.vfsName,"VFS"))
@@ -3243,13 +3226,13 @@
log("BigInt/int64 support is enabled.");
}else{
logClass('warning',"BigInt/int64 support is disabled.");
}
if(haveWasmCTests()){
- log("sqlite3_wasm_test_...() APIs are available.");
+ log("sqlite3__wasm_test_...() APIs are available.");
}else{
- logClass('warning',"sqlite3_wasm_test_...() APIs unavailable.");
+ logClass('warning',"sqlite3__wasm_test_...() APIs unavailable.");
}
log("registered vfs list =",capi.sqlite3_js_vfs_list().join(', '));
TestUtil.runTests(sqlite3);
});
})(self);
Index: main.mk
==================================================================
--- main.mk
+++ main.mk
@@ -358,10 +358,11 @@
$(TOP)/ext/misc/nextchar.c \
$(TOP)/ext/misc/normalize.c \
$(TOP)/ext/misc/percentile.c \
$(TOP)/ext/misc/prefixes.c \
$(TOP)/ext/misc/qpvtab.c \
+ $(TOP)/ext/misc/randomjson.c \
$(TOP)/ext/misc/regexp.c \
$(TOP)/ext/misc/remember.c \
$(TOP)/ext/misc/series.c \
$(TOP)/ext/misc/spellfix.c \
$(TOP)/ext/misc/totype.c \
@@ -524,16 +525,19 @@
FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS4
FUZZCHECK_OPT += -DSQLITE_ENABLE_RTREE
FUZZCHECK_OPT += -DSQLITE_ENABLE_GEOPOLY
FUZZCHECK_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB
FUZZCHECK_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB
+FUZZCHECK_OPT += -DSQLITE_STRICT_SUBTYPE=1
+FUZZCHECK_OPT += -DSQLITE_STATIC_RANDOMJSON
FUZZSRC += $(TOP)/test/fuzzcheck.c
FUZZSRC += $(TOP)/test/ossfuzz.c
FUZZSRC += $(TOP)/test/vt02.c
FUZZSRC += $(TOP)/test/fuzzinvariants.c
FUZZSRC += $(TOP)/ext/recover/dbdata.c
FUZZSRC += $(TOP)/ext/recover/sqlite3recover.c
+FUZZSRC += $(TOP)/ext/misc/randomjson.c
DBFUZZ_OPT =
KV_OPT = -DSQLITE_THREADSAFE=0 -DSQLITE_DIRECT_OVERFLOW_READ
ST_OPT = -DSQLITE_THREADSAFE=0
# This is the default Makefile target. The objects listed here
@@ -896,10 +900,12 @@
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_STMTVTAB
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_DBPAGE_VTAB
TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_BYTECODE_VTAB
TESTFIXTURE_FLAGS += -DTCLSH_INIT_PROC=sqlite3TestInit
TESTFIXTURE_FLAGS += -DSQLITE_CKSUMVFS_STATIC
+TESTFIXTURE_FLAGS += -DSQLITE_STATIC_RANDOMJSON
+TESTFIXTURE_FLAGS += -DSQLITE_STRICT_SUBTYPE=1
testfixture$(EXE): $(TESTSRC2) libsqlite3.a $(TESTSRC) $(TOP)/src/tclsqlite.c
$(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \
$(TESTSRC) $(TESTSRC2) $(TOP)/src/tclsqlite.c \
-o testfixture$(EXE) $(LIBTCL) libsqlite3.a $(THREADLIB)
Index: src/analyze.c
==================================================================
--- src/analyze.c
+++ src/analyze.c
@@ -262,13 +262,13 @@
** information.
*/
typedef struct StatAccum StatAccum;
typedef struct StatSample StatSample;
struct StatSample {
- tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anDLt; /* sqlite_stat4.nDLt */
#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anLt; /* sqlite_stat4.nLt */
union {
i64 iRowid; /* Rowid in main table of the key */
u8 *aRowid; /* Key for WITHOUT ROWID tables */
} u;
@@ -422,13 +422,13 @@
assert( nKeyCol<=nCol );
assert( nKeyCol>0 );
/* Allocate the space required for the StatAccum object */
n = sizeof(*p)
- + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */
- + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
+ + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
#ifdef SQLITE_ENABLE_STAT4
+ n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */
if( mxSample ){
n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */
+ sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */
+ sizeof(tRowcnt)*3*nColUp*(nCol+mxSample);
}
@@ -445,13 +445,13 @@
p->nLimit = sqlite3_value_int64(argv[3]);
p->nCol = nCol;
p->nKeyCol = nKeyCol;
p->nSkipAhead = 0;
p->current.anDLt = (tRowcnt*)&p[1];
- p->current.anEq = &p->current.anDLt[nColUp];
#ifdef SQLITE_ENABLE_STAT4
+ p->current.anEq = &p->current.anDLt[nColUp];
p->mxSample = p->nLimit==0 ? mxSample : 0;
if( mxSample ){
u8 *pSpace; /* Allocated space not yet assigned */
int i; /* Used to iterate through p->aSample[] */
@@ -714,28 +714,32 @@
assert( p->nCol>0 );
assert( iChngnCol );
if( p->nRow==0 ){
/* This is the first call to this function. Do initialization. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; inCol; i++) p->current.anEq[i] = 1;
+#endif
}else{
/* Second and subsequent calls get processed here */
#ifdef SQLITE_ENABLE_STAT4
if( p->mxSample ) samplePushPrevious(p, iChng);
#endif
/* Update anDLt[], anLt[] and anEq[] to reflect the values that apply
** to the current row of the index. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; icurrent.anEq[i]++;
}
+#endif
for(i=iChng; inCol; i++){
p->current.anDLt[i]++;
#ifdef SQLITE_ENABLE_STAT4
if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i];
-#endif
p->current.anEq[i] = 1;
+#endif
}
}
p->nRow++;
#ifdef SQLITE_ENABLE_STAT4
@@ -865,11 +869,13 @@
for(i=0; inKeyCol; i++){
u64 nDistinct = p->current.anDLt[i] + 1;
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1;
sqlite3_str_appendf(&sStat, " %llu", iVal);
+#ifdef SQLITE_ENABLE_STAT4
assert( p->current.anEq[i] );
+#endif
}
sqlite3ResultStrAccum(context, &sStat);
}
#ifdef SQLITE_ENABLE_STAT4
else if( eCall==STAT_GET_ROWID ){
@@ -1554,10 +1560,20 @@
}
#endif
while( z[0]!=0 && z[0]!=' ' ) z++;
while( z[0]==' ' ) z++;
}
+
+ /* Set the bLowQual flag if the peak number of rows obtained
+ ** from a full equality match is so large that a full table scan
+ ** seems likely to be faster than using the index.
+ */
+ if( aLog[0] > 66 /* Index has more than 100 rows */
+ && aLog[0] <= aLog[nOut-1] /* And only a single value seen */
+ ){
+ pIndex->bLowQual = 1;
+ }
}
}
/*
** This callback is invoked once for each index when reading the
Index: src/btree.c
==================================================================
--- src/btree.c
+++ src/btree.c
@@ -149,12 +149,51 @@
# define SQLITE_CORRUPT_PAGE(pMemPage) corruptPageError(__LINE__, pMemPage)
#else
# define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno)
#endif
+/* Default value for SHARED_LOCK_TRACE macro if shared-cache is disabled
+** or if the lock tracking is disabled. This is always the value for
+** release builds.
+*/
+#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) /*no-op*/
+
#ifndef SQLITE_OMIT_SHARED_CACHE
+#if 0
+/* ^---- Change to 1 and recompile to enable shared-lock tracing
+** for debugging purposes.
+**
+** Print all shared-cache locks on a BtShared. Debugging use only.
+*/
+static void sharedLockTrace(
+ BtShared *pBt,
+ const char *zMsg,
+ int iRoot,
+ int eLockType
+){
+ BtLock *pLock;
+ if( iRoot>0 ){
+ printf("%s-%p %u%s:", zMsg, pBt, iRoot, eLockType==READ_LOCK?"R":"W");
+ }else{
+ printf("%s-%p:", zMsg, pBt);
+ }
+ for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){
+ printf(" %p/%u%s", pLock->pBtree, pLock->iTable,
+ pLock->eLock==READ_LOCK ? "R" : "W");
+ while( pLock->pNext && pLock->pBtree==pLock->pNext->pBtree ){
+ pLock = pLock->pNext;
+ printf(",%u%s", pLock->iTable, pLock->eLock==READ_LOCK ? "R" : "W");
+ }
+ }
+ printf("\n");
+ fflush(stdout);
+}
+#undef SHARED_LOCK_TRACE
+#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) sharedLockTrace(X,MSG,TAB,TYPE)
+#endif /* Shared-lock tracing */
+
#ifdef SQLITE_DEBUG
/*
**** This function is only used as part of an assert() statement. ***
**
** Check to see if pBtree holds the required locks to read or write to the
@@ -226,10 +265,12 @@
}
}
}else{
iTab = iRoot;
}
+
+ SHARED_LOCK_TRACE(pBtree->pBt,"hasLock",iRoot,eLockType);
/* Search for the required lock. Either a write-lock on root-page iTab, a
** write-lock on the schema table, or (if the client is reading) a
** read-lock on iTab will suffice. Return 1 if any of these are found. */
for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){
@@ -360,10 +401,12 @@
static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){
BtShared *pBt = p->pBt;
BtLock *pLock = 0;
BtLock *pIter;
+ SHARED_LOCK_TRACE(pBt,"setLock", iTable, eLock);
+
assert( sqlite3BtreeHoldsMutex(p) );
assert( eLock==READ_LOCK || eLock==WRITE_LOCK );
assert( p->db!=0 );
/* A connection with the read-uncommitted flag set will never try to
@@ -427,10 +470,12 @@
assert( sqlite3BtreeHoldsMutex(p) );
assert( p->sharable || 0==*ppIter );
assert( p->inTrans>0 );
+ SHARED_LOCK_TRACE(pBt, "clearAllLocks", 0, 0);
+
while( *ppIter ){
BtLock *pLock = *ppIter;
assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree );
assert( pLock->pBtree->inTrans>=pLock->eLock );
if( pLock->pBtree==p ){
@@ -465,10 +510,13 @@
/*
** This function changes all write-locks held by Btree p into read-locks.
*/
static void downgradeAllSharedCacheTableLocks(Btree *p){
BtShared *pBt = p->pBt;
+
+ SHARED_LOCK_TRACE(pBt, "downgradeLocks", 0, 0);
+
if( pBt->pWriter==p ){
BtLock *pLock;
pBt->pWriter = 0;
pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING);
for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){
@@ -5159,11 +5207,10 @@
u8 aSave[4];
u8 *aWrite = &pBuf[-4];
assert( aWrite>=pBufStart ); /* due to (6) */
memcpy(aSave, aWrite, 4);
rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
- if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT;
nextPage = get4byte(aWrite);
memcpy(aWrite, aSave, 4);
}else
#endif
@@ -8261,11 +8308,11 @@
/* Verify that all sibling pages are of the same "type" (table-leaf,
** table-interior, index-leaf, or index-interior).
*/
if( pOld->aData[0]!=apOld[0]->aData[0] ){
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pOld);
goto balance_cleanup;
}
/* Load b.apCell[] with pointers to all cells in pOld. If pOld
** contains overflow cells, include them in the b.apCell[] array
@@ -8285,11 +8332,11 @@
** first.
*/
memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow));
if( pOld->nOverflow>0 ){
if( NEVER(limitaiOvfl[0]) ){
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pOld);
goto balance_cleanup;
}
limit = pOld->aiOvfl[0];
for(j=0; jpBt->pCursor; pOther; pOther=pOther->pNext){
if( pOther!=pCur
&& pOther->eState==CURSOR_VALID
&& pOther->pPage==pCur->pPage
){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pCur->pPage);
}
}
return SQLITE_OK;
}
@@ -8988,11 +9035,11 @@
}
}else if( sqlite3PagerPageRefcount(pPage->pDbPage)>1 ){
/* The page being written is not a root page, and there is currently
** more than one reference to it. This only happens if the page is one
** of its own ancestor pages. Corruption. */
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pPage);
}else{
MemPage * const pParent = pCur->apPage[iPage-1];
int const iIdx = pCur->aiIdx[iPage-1];
rc = sqlite3PagerWrite(pParent->pDbPage);
@@ -9152,11 +9199,11 @@
ovflPageSize = pBt->usableSize - 4;
do{
rc = btreeGetPage(pBt, ovflPgno, &pPage, 0);
if( rc ) return rc;
if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pPage);
}else{
if( iOffset+ovflPageSize<(u32)nTotal ){
ovflPgno = get4byte(pPage->aData);
}else{
ovflPageSize = nTotal - iOffset;
@@ -9180,11 +9227,11 @@
MemPage *pPage = pCur->pPage; /* Page being written */
if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd
|| pCur->info.pPayload < pPage->aData + pPage->cellOffset
){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
if( pCur->info.nLocal==nTotal ){
/* The entire cell is local */
return btreeOverwriteContent(pPage, pCur->info.pPayload, pX,
0, pCur->info.nLocal);
@@ -9261,11 +9308,11 @@
/* This can only happen if the schema is corrupt such that there is more
** than one table or index with the same root page as used by the cursor.
** Which can only happen if the SQLITE_NoSchemaError flag was set when
** the schema was loaded. This cannot be asserted though, as a user might
** set the flag, load the schema, and then unset the flag. */
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot);
}
}
/* Ensure that the cursor is not in the CURSOR_FAULT state and that it
** points to a valid cell.
@@ -9384,11 +9431,11 @@
assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) );
assert( pPage->leaf || !pPage->intKey );
if( pPage->nFree<0 ){
if( NEVER(pCur->eState>CURSOR_INVALID) ){
/* ^^^^^--- due to the moveToRoot() call above */
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pPage);
}else{
rc = btreeComputeFreeSpace(pPage);
}
if( rc ) return rc;
}
@@ -9423,11 +9470,11 @@
pCur->info.nSize = 0;
if( loc==0 ){
CellInfo info;
assert( idx>=0 );
if( idx>=pPage->nCell ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc ){
goto end_insert;
}
@@ -9450,14 +9497,14 @@
** This optimization cannot be used on an autovacuum database if the
** new entry uses overflow pages, as the insertCell() call below is
** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */
assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */
if( oldCell < pPage->aData+pPage->hdrOffset+10 ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
if( oldCell+szNew > pPage->aDataEnd ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
memcpy(oldCell, newCell, szNew);
return SQLITE_OK;
}
dropCell(pPage, idx, info.nSize, &rc);
@@ -9555,11 +9602,11 @@
}
if( pDest->pKeyInfo==0 ) aOut += putVarint(aOut, iKey);
nIn = pSrc->info.nLocal;
aIn = pSrc->info.pPayload;
if( aIn+nIn>pSrc->pPage->aDataEnd ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pSrc->pPage);
}
nRem = pSrc->info.nPayload;
if( nIn==nRem && nInpPage->maxLocal ){
memcpy(aOut, aIn, nIn);
pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace);
@@ -9580,11 +9627,11 @@
pBt->nPreformatSize += 4;
}
if( nRem>nIn ){
if( aIn+nIn+4>pSrc->pPage->aDataEnd ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pSrc->pPage);
}
ovflIn = get4byte(&pSrc->info.pPayload[nIn]);
}
do {
@@ -9676,27 +9723,27 @@
if( pCur->eState>=CURSOR_REQUIRESEEK ){
rc = btreeRestoreCursorPosition(pCur);
assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID );
if( rc || pCur->eState!=CURSOR_VALID ) return rc;
}else{
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot);
}
}
assert( pCur->eState==CURSOR_VALID );
iCellDepth = pCur->iPage;
iCellIdx = pCur->ix;
pPage = pCur->pPage;
if( pPage->nCell<=iCellIdx ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
pCell = findCell(pPage, iCellIdx);
if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
if( pCell<&pPage->aCellIdx[pPage->nCell] ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
/* If the BTREE_SAVEPOSITION bit is on, then the cursor position must
** be preserved following this delete operation. If the current delete
** will cause a b-tree rebalance, then this is done by saving the cursor
@@ -9783,11 +9830,11 @@
n = pCur->apPage[iCellDepth+1]->pgno;
}else{
n = pCur->pPage->pgno;
}
pCell = findCell(pLeaf, pLeaf->nCell-1);
- if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT;
+ if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_PAGE(pLeaf);
nCell = pLeaf->xCellSize(pLeaf, pCell);
assert( MX_CELL_SIZE(pBt) >= nCell );
pTmp = pBt->pTmpSpace;
assert( pTmp!=0 );
rc = sqlite3PagerWrite(pLeaf->pDbPage);
@@ -9899,11 +9946,11 @@
** root page of the new table should go. meta[3] is the largest root-page
** created so far, so the new root-page is (meta[3]+1).
*/
sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot);
if( pgnoRoot>btreePagecount(pBt) ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PGNO(pgnoRoot);
}
pgnoRoot++;
/* The new root-page may not be allocated on a pointer-map page, or the
** PENDING_BYTE page.
@@ -9947,11 +9994,11 @@
if( rc!=SQLITE_OK ){
return rc;
}
rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage);
if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PGNO(pgnoRoot);
}
if( rc!=SQLITE_OK ){
releasePage(pRoot);
return rc;
}
@@ -10037,18 +10084,18 @@
int hdr;
CellInfo info;
assert( sqlite3_mutex_held(pBt->mutex) );
if( pgno>btreePagecount(pBt) ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PGNO(pgno);
}
rc = getAndInitPage(pBt, pgno, &pPage, 0);
if( rc ) return rc;
if( (pBt->openFlags & BTREE_SINGLE)==0
&& sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1))
){
- rc = SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_PAGE(pPage);
goto cleardatabasepage_out;
}
hdr = pPage->hdrOffset;
for(i=0; inCell; i++){
pCell = findCell(pPage, i);
@@ -10148,11 +10195,11 @@
assert( sqlite3BtreeHoldsMutex(p) );
assert( p->inTrans==TRANS_WRITE );
assert( iTable>=2 );
if( iTable>btreePagecount(pBt) ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_PGNO(iTable);
}
rc = sqlite3BtreeClearTable(p, iTable, 0);
if( rc ) return rc;
rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0);
Index: src/btreeInt.h
==================================================================
--- src/btreeInt.h
+++ src/btreeInt.h
@@ -62,11 +62,11 @@
** 20 1 Bytes of unused space at the end of each page
** 21 1 Max embedded payload fraction (must be 64)
** 22 1 Min embedded payload fraction (must be 32)
** 23 1 Min leaf payload fraction (must be 32)
** 24 4 File change counter
-** 28 4 Reserved for future use
+** 28 4 The size of the database in pages
** 32 4 First freelist page
** 36 4 Number of freelist pages in the file
** 40 60 15 4-byte meta values passed to higher layers
**
** 40 4 Schema cookie
Index: src/build.c
==================================================================
--- src/build.c
+++ src/build.c
@@ -187,11 +187,11 @@
sqlite3VdbeJumpHere(v, addrRewind);
}
}
sqlite3VdbeAddOp0(v, OP_Halt);
-#if SQLITE_USER_AUTHENTICATION
+#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE)
if( pParse->nTableLock>0 && db->init.busy==0 ){
sqlite3UserAuthInit(db);
if( db->auth.authLevelrc = SQLITE_AUTH_USER;
@@ -718,11 +718,11 @@
** the DEFAULT clause or the AS clause of a generated column.
** Return NULL if the column has no associated expression.
*/
Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){
if( pCol->iDflt==0 ) return 0;
- if( NEVER(!IsOrdinaryTable(pTab)) ) return 0;
+ if( !IsOrdinaryTable(pTab) ) return 0;
if( NEVER(pTab->u.tab.pDfltList==0) ) return 0;
if( NEVER(pTab->u.tab.pDfltList->nExpriDflt) ) return 0;
return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr;
}
@@ -870,10 +870,13 @@
/* Do not delete the table until the reference count reaches zero. */
assert( db!=0 );
if( !pTable ) return;
if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return;
deleteTable(db, pTable);
+}
+void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){
+ sqlite3DeleteTable(db, (Table*)pTable);
}
/*
** Unlink the given table from the hash tables and the delete the
@@ -1408,11 +1411,12 @@
#endif
/*
** Clean up the data structures associated with the RETURNING clause.
*/
-static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){
+static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){
+ Returning *pRet = (Returning*)pArg;
Hash *pHash;
pHash = &(db->aDb[1].pSchema->trigHash);
sqlite3HashInsert(pHash, pRet->zName, 0);
sqlite3ExprListDelete(db, pRet->pReturnEL);
sqlite3DbFree(db, pRet);
@@ -1450,12 +1454,11 @@
return;
}
pParse->u1.pReturning = pRet;
pRet->pParse = pParse;
pRet->pReturnEL = pList;
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet);
+ sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet);
testcase( pParse->earlyCleanup );
if( db->mallocFailed ) return;
sqlite3_snprintf(sizeof(pRet->zName), pRet->zName,
"sqlite_returning_%p", pParse);
pRet->retTrig.zName = pRet->zName;
@@ -1650,11 +1653,12 @@
char aff = SQLITE_AFF_NUMERIC;
const char *zChar = 0;
assert( zIn!=0 );
while( zIn[0] ){
- h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
+ u8 x = *(u8*)zIn;
+ h = (h<<8) + sqlite3UpperToLower[x];
zIn++;
if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */
aff = SQLITE_AFF_TEXT;
zChar = zIn;
}else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */
@@ -3000,16 +3004,13 @@
/* Legacy versions of SQLite allowed the use of the magic "rowid" column
** on a view, even though views do not have rowids. The following flag
** setting fixes this problem. But the fix can be disabled by compiling
** with -DSQLITE_ALLOW_ROWID_IN_VIEW in case there are legacy apps that
- ** depend upon the old buggy behavior. The ability can also be toggled
- ** using sqlite3_config(SQLITE_CONFIG_ROWID_IN_VIEW,...) */
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- p->tabFlags |= sqlite3Config.mNoVisibleRowid; /* Optional. Allow by default */
-#else
- p->tabFlags |= TF_NoVisibleRowid; /* Never allow rowid in view */
+ ** depend upon the old buggy behavior. */
+#ifndef SQLITE_ALLOW_ROWID_IN_VIEW
+ p->tabFlags |= TF_NoVisibleRowid;
#endif
sqlite3TwoPartName(pParse, pName1, pName2, &pName);
iDb = sqlite3SchemaToIndex(db, p->pSchema);
sqlite3FixInit(&sFix, pParse, iDb, "view", pName);
@@ -5518,20 +5519,21 @@
}
iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName);
if( iDb<0 ) return;
z = sqlite3NameFromToken(db, pObjName);
if( z==0 ) return;
- zDb = db->aDb[iDb].zDbSName;
+ zDb = pName2->n ? db->aDb[iDb].zDbSName : 0;
pTab = sqlite3FindTable(db, z, zDb);
if( pTab ){
reindexTable(pParse, pTab, 0);
sqlite3DbFree(db, z);
return;
}
pIndex = sqlite3FindIndex(db, z, zDb);
sqlite3DbFree(db, z);
if( pIndex ){
+ iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema);
sqlite3BeginWriteOperation(pParse, 0, iDb);
sqlite3RefillIndex(pParse, pIndex, -1);
return;
}
sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed");
@@ -5692,7 +5694,10 @@
for(i=0; inCte; i++){
cteClear(db, &pWith->a[i]);
}
sqlite3DbFree(db, pWith);
}
+}
+void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){
+ sqlite3WithDelete(db, (With*)pWith);
}
#endif /* !defined(SQLITE_OMIT_CTE) */
Index: src/date.c
==================================================================
--- src/date.c
+++ src/date.c
@@ -1041,10 +1041,16 @@
n = sqlite3_value_bytes(argv[i]);
if( z==0 || parseModifier(context, (char*)z, n, p, i) ) return 1;
}
computeJD(p);
if( p->isError || !validJulianDay(p->iJD) ) return 1;
+ if( argc==1 && p->validYMD && p->D>28 ){
+ /* Make sure a YYYY-MM-DD is normalized.
+ ** Example: 2023-02-31 -> 2023-03-03 */
+ assert( p->validJD );
+ p->validYMD = 0;
+ }
return 0;
}
/*
@@ -1227,27 +1233,88 @@
}else{
sqlite3_result_text(context, &zBuf[1], 10, SQLITE_TRANSIENT);
}
}
}
+
+/*
+** Compute the number of days after the most recent January 1.
+**
+** In other words, compute the zero-based day number for the
+** current year:
+**
+** Jan01 = 0, Jan02 = 1, ..., Jan31 = 30, Feb01 = 31, ...
+** Dec31 = 364 or 365.
+*/
+static int daysAfterJan01(DateTime *pDate){
+ DateTime jan01 = *pDate;
+ assert( jan01.validYMD );
+ assert( jan01.validHMS );
+ assert( pDate->validJD );
+ jan01.validJD = 0;
+ jan01.M = 1;
+ jan01.D = 1;
+ computeJD(&jan01);
+ return (int)((pDate->iJD-jan01.iJD+43200000)/86400000);
+}
+
+/*
+** Return the number of days after the most recent Monday.
+**
+** In other words, return the day of the week according
+** to this code:
+**
+** 0=Monday, 1=Tuesday, 2=Wednesday, ..., 6=Sunday.
+*/
+static int daysAfterMonday(DateTime *pDate){
+ assert( pDate->validJD );
+ return (int)((pDate->iJD+43200000)/86400000) % 7;
+}
+
+/*
+** Return the number of days after the most recent Sunday.
+**
+** In other words, return the day of the week according
+** to this code:
+**
+** 0=Sunday, 1=Monday, 2=Tues, ..., 6=Saturday
+*/
+static int daysAfterSunday(DateTime *pDate){
+ assert( pDate->validJD );
+ return (int)((pDate->iJD+129600000)/86400000) % 7;
+}
/*
** strftime( FORMAT, TIMESTRING, MOD, MOD, ...)
**
** Return a string described by FORMAT. Conversions as follows:
**
-** %d day of month
+** %d day of month 01-31
+** %e day of month 1-31
** %f ** fractional seconds SS.SSS
+** %F ISO date. YYYY-MM-DD
+** %G ISO year corresponding to %V 0000-9999.
+** %g 2-digit ISO year corresponding to %V 00-99
** %H hour 00-24
-** %j day of year 000-366
+** %k hour 0-24 (leading zero converted to space)
+** %I hour 01-12
+** %j day of year 001-366
** %J ** julian day number
+** %l hour 1-12 (leading zero converted to space)
** %m month 01-12
** %M minute 00-59
+** %p "am" or "pm"
+** %P "AM" or "PM"
+** %R time as HH:MM
** %s seconds since 1970-01-01
** %S seconds 00-59
-** %w day of week 0-6 Sunday==0
-** %W week of year 00-53
+** %T time as HH:MM:SS
+** %u day of week 1-7 Monday==1, Sunday==7
+** %w day of week 0-6 Sunday==0, Monday==1
+** %U week of year 00-53 (First Sunday is start of week 01)
+** %V week of year 01-53 (First week containing Thursday is week 01)
+** %W week of year 00-53 (First Monday is start of week 01)
** %Y year 0000-9999
** %% %
*/
static void strftimeFunc(
sqlite3_context *context,
@@ -1280,19 +1347,34 @@
case 'd': /* Fall thru */
case 'e': {
sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D);
break;
}
- case 'f': {
+ case 'f': { /* Fractional seconds. (Non-standard) */
double s = x.s;
if( s>59.999 ) s = 59.999;
sqlite3_str_appendf(&sRes, "%06.3f", s);
break;
}
case 'F': {
sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D);
break;
+ }
+ case 'G': /* Fall thru */
+ case 'g': {
+ DateTime y = x;
+ assert( y.validJD );
+ /* Move y so that it is the Thursday in the same week as x */
+ y.iJD += (3 - daysAfterMonday(&x))*86400000;
+ y.validYMD = 0;
+ computeYMD(&y);
+ if( cf=='g' ){
+ sqlite3_str_appendf(&sRes, "%02d", y.Y%100);
+ }else{
+ sqlite3_str_appendf(&sRes, "%04d", y.Y);
+ }
+ break;
}
case 'H':
case 'k': {
sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h);
break;
@@ -1303,29 +1385,15 @@
if( h>12 ) h -= 12;
if( h==0 ) h = 12;
sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h);
break;
}
- case 'W': /* Fall thru */
- case 'j': {
- int nDay; /* Number of days since 1st day of year */
- DateTime y = x;
- y.validJD = 0;
- y.M = 1;
- y.D = 1;
- computeJD(&y);
- nDay = (int)((x.iJD-y.iJD+43200000)/86400000);
- if( cf=='W' ){
- int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
- wd = (int)(((x.iJD+43200000)/86400000)%7);
- sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7);
- }else{
- sqlite3_str_appendf(&sRes,"%03d",nDay+1);
- }
+ case 'j': { /* Day of year. Jan01==1, Jan02==2, and so forth */
+ sqlite3_str_appendf(&sRes,"%03d",daysAfterJan01(&x)+1);
break;
}
- case 'J': {
+ case 'J': { /* Julian day number. (Non-standard) */
sqlite3_str_appendf(&sRes,"%.16g",x.iJD/86400000.0);
break;
}
case 'm': {
sqlite3_str_appendf(&sRes,"%02d",x.M);
@@ -1364,16 +1432,36 @@
}
case 'T': {
sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s);
break;
}
- case 'u': /* Fall thru */
- case 'w': {
- char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0';
+ case 'u': /* Day of week. 1 to 7. Monday==1, Sunday==7 */
+ case 'w': { /* Day of week. 0 to 6. Sunday==0, Monday==1 */
+ char c = (char)daysAfterSunday(&x) + '0';
if( c=='0' && cf=='u' ) c = '7';
sqlite3_str_appendchar(&sRes, 1, c);
break;
+ }
+ case 'U': { /* Week num. 00-53. First Sun of the year is week 01 */
+ sqlite3_str_appendf(&sRes,"%02d",
+ (daysAfterJan01(&x)-daysAfterSunday(&x)+7)/7);
+ break;
+ }
+ case 'V': { /* Week num. 01-53. First week with a Thur is week 01 */
+ DateTime y = x;
+ /* Adjust y so that is the Thursday in the same week as x */
+ assert( y.validJD );
+ y.iJD += (3 - daysAfterMonday(&x))*86400000;
+ y.validYMD = 0;
+ computeYMD(&y);
+ sqlite3_str_appendf(&sRes,"%02d", daysAfterJan01(&y)/7+1);
+ break;
+ }
+ case 'W': { /* Week num. 00-53. First Mon of the year is week 01 */
+ sqlite3_str_appendf(&sRes,"%02d",
+ (daysAfterJan01(&x)-daysAfterMonday(&x)+7)/7);
+ break;
}
case 'Y': {
sqlite3_str_appendf(&sRes,"%04d",x.Y);
break;
}
Index: src/expr.c
==================================================================
--- src/expr.c
+++ src/expr.c
@@ -1221,13 +1221,11 @@
assert( pExpr->op==TK_FUNCTION );
assert( pExpr->pLeft==0 );
assert( ExprUseXList(pExpr) );
if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){
/* Ignore ORDER BY on zero-argument aggregates */
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy);
return;
}
if( IsWindowFunc(pExpr) ){
sqlite3ExprOrderByAggregateError(pParse, pExpr);
sqlite3ExprListDelete(db, pOrderBy);
@@ -1404,10 +1402,13 @@
}
}
void sqlite3ExprDelete(sqlite3 *db, Expr *p){
if( p ) sqlite3ExprDeleteNN(db, p);
}
+void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p);
+}
/*
** Clear both elements of an OnOrUsing object
*/
void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
@@ -1429,13 +1430,11 @@
**
** The deferred delete is (currently) implemented by adding the
** pExpr to the pParse->pConstExpr list with a register number of 0.
*/
void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprDelete,
- pExpr);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr);
}
/* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
** expression.
*/
@@ -2237,10 +2236,13 @@
sqlite3DbNNFreeNN(db, pList);
}
void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
if( pList ) exprListDeleteNN(db, pList);
}
+void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){
+ if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList);
+}
/*
** Return the bitwise-OR of all Expr.flags fields in the given
** ExprList.
*/
@@ -2736,13 +2738,14 @@
case TK_BLOB:
return 0;
case TK_COLUMN:
assert( ExprUseYTab(p) );
return ExprHasProperty(p, EP_CanBeNull) ||
- p->y.pTab==0 || /* Reference to column of index on expression */
+ NEVER(p->y.pTab==0) || /* Reference to column of index on expr */
(p->iColumn>=0
&& p->y.pTab->aCol!=0 /* Possible due to prior error */
+ && ALWAYS(p->iColumny.pTab->nCol)
&& p->y.pTab->aCol[p->iColumn].notNull==0);
default:
return 1;
}
}
@@ -6769,17 +6772,18 @@
return WRC_Continue;
}
case TK_AGG_FUNCTION: {
if( (pNC->ncFlags & NC_InAggFunc)==0
&& pWalker->walkerDepth==pExpr->op2
+ && pExpr->pAggInfo==0
){
/* Check to see if pExpr is a duplicate of another aggregate
** function that is already in the pAggInfo structure
*/
struct AggInfo_func *pItem = pAggInfo->aFunc;
for(i=0; inFunc; i++, pItem++){
- if( pItem->pFExpr==pExpr ) break;
+ if( NEVER(pItem->pFExpr==pExpr) ) break;
if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){
break;
}
}
if( i>=pAggInfo->nFunc ){
@@ -6818,10 +6822,12 @@
pItem->bOBPayload = 0;
pItem->bOBUnique = ExprHasProperty(pExpr, EP_Distinct);
}else{
pItem->bOBPayload = 1;
}
+ pItem->bUseSubtype =
+ (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0;
}else{
pItem->iOBTab = -1;
}
if( ExprHasProperty(pExpr, EP_Distinct) && !pItem->bOBUnique ){
pItem->iDistinct = pParse->nTab++;
Index: src/func.c
==================================================================
--- src/func.c
+++ src/func.c
@@ -1407,11 +1407,11 @@
|| sqlite3_context_db_handle(context)->mallocFailed );
return;
}
if( zPattern[0]==0 ){
assert( sqlite3_value_type(argv[1])!=SQLITE_NULL );
- sqlite3_result_value(context, argv[0]);
+ sqlite3_result_text(context, (const char*)zStr, nStr, SQLITE_TRANSIENT);
return;
}
nPattern = sqlite3_value_bytes(argv[1]);
assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */
zRep = sqlite3_value_text(argv[2]);
@@ -1890,11 +1890,11 @@
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
if( p->approx ){
if( p->ovrfl ){
sqlite3_result_error(context,"integer overflow",-1);
- }else if( !sqlite3IsOverflow(p->rErr) ){
+ }else if( !sqlite3IsNaN(p->rErr) ){
sqlite3_result_double(context, p->rSum+p->rErr);
}else{
sqlite3_result_double(context, p->rSum);
}
}else{
@@ -1907,11 +1907,11 @@
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
double r;
if( p->approx ){
r = p->rSum;
- if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
}else{
r = (double)(p->iSum);
}
sqlite3_result_double(context, r/(double)p->cnt);
}
@@ -1921,11 +1921,11 @@
double r = 0.0;
p = sqlite3_aggregate_context(context, 0);
if( p ){
if( p->approx ){
r = p->rSum;
- if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
}else{
r = (double)(p->iSum);
}
}
sqlite3_result_double(context, r);
Index: src/global.c
==================================================================
--- src/global.c
+++ src/global.c
@@ -242,10 +242,13 @@
SQLITE_USE_URI, /* bOpenUri */
SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
0, /* bSmallMalloc */
1, /* bExtraSchemaChecks */
sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */
+#ifdef SQLITE_DEBUG
+ 0, /* bJsonSelfcheck */
+#endif
0x7ffffffe, /* mxStrlen */
0, /* neverCorrupt */
SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */
SQLITE_STMTJRNL_SPILL, /* nStmtSpill */
{0,0,0,0,0,0,0,0}, /* m */
@@ -284,13 +287,10 @@
SQLITE_MEMDB_DEFAULT_MAXSIZE, /* mxMemdbSize */
#endif
#ifndef SQLITE_UNTESTABLE
0, /* xTestCallback */
#endif
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- 0, /* mNoVisibleRowid. 0 == allow rowid-in-view */
-#endif
0, /* bLocaltimeFault */
0, /* xAltLocaltime */
0x7ffffffe, /* iOnceResetThreshold */
SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */
0, /* iPrngSeed */
Index: src/insert.c
==================================================================
--- src/insert.c
+++ src/insert.c
@@ -1084,11 +1084,11 @@
pNx->pUpsertSrc = pTabList;
pNx->regData = regData;
pNx->iDataCur = iDataCur;
pNx->iIdxCur = iIdxCur;
if( pNx->pUpsertTarget ){
- if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx, pUpsert) ){
+ if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx) ){
goto insert_cleanup;
}
}
pNx = pNx->pNextUpsert;
}while( pNx!=0 );
Index: src/json.c
==================================================================
--- src/json.c
+++ src/json.c
@@ -8,28 +8,149 @@
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
-** This SQLite JSON functions.
+** SQLite JSON functions.
**
** This file began as an extension in ext/misc/json1.c in 2015. That
** extension proved so useful that it has now been moved into the core.
**
-** For the time being, all JSON is stored as pure text. (We might add
-** a JSONB type in the future which stores a binary encoding of JSON in
-** a BLOB, but there is no support for JSONB in the current implementation.
-** This implementation parses JSON text at 250 MB/s, so it is hard to see
-** how JSONB might improve on that.)
+** The original design stored all JSON as pure text, canonical RFC-8259.
+** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16).
+** All generated JSON text still conforms strictly to RFC-8259, but text
+** with JSON-5 extensions is accepted as input.
+**
+** Beginning with version 3.45.0 (circa 2024-01-01), these routines also
+** accept BLOB values that have JSON encoded using a binary representation
+** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk
+** format SQLite JSONB is completely different and incompatible with
+** PostgreSQL JSONB.
+**
+** Decoding and interpreting JSONB is still O(N) where N is the size of
+** the input, the same as text JSON. However, the constant of proportionality
+** for JSONB is much smaller due to faster parsing. The size of each
+** element in JSONB is encoded in its header, so there is no need to search
+** for delimiters using persnickety syntax rules. JSONB seems to be about
+** 3x faster than text JSON as a result. JSONB is also tends to be slightly
+** smaller than text JSON, by 5% or 10%, but there are corner cases where
+** JSONB can be slightly larger. So you are not far mistaken to say that
+** a JSONB blob is the same size as the equivalent RFC-8259 text.
+**
+**
+** THE JSONB ENCODING:
+**
+** Every JSON element is encoded in JSONB as a header and a payload.
+** The header is between 1 and 9 bytes in size. The payload is zero
+** or more bytes.
+**
+** The lower 4 bits of the first byte of the header determines the
+** element type:
+**
+** 0: NULL
+** 1: TRUE
+** 2: FALSE
+** 3: INT -- RFC-8259 integer literal
+** 4: INT5 -- JSON5 integer literal
+** 5: FLOAT -- RFC-8259 floating point literal
+** 6: FLOAT5 -- JSON5 floating point literal
+** 7: TEXT -- Text literal acceptable to both SQL and JSON
+** 8: TEXTJ -- Text containing RFC-8259 escapes
+** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes
+** 10: TEXTRAW -- Text containing unescaped syntax characters
+** 11: ARRAY
+** 12: OBJECT
+**
+** The other three possible values (13-15) are reserved for future
+** enhancements.
+**
+** The upper 4 bits of the first byte determine the size of the header
+** and sometimes also the size of the payload. If X is the first byte
+** of the element and if X>>4 is between 0 and 11, then the payload
+** will be that many bytes in size and the header is exactly one byte
+** in size. Other four values for X>>4 (12-15) indicate that the header
+** is more than one byte in size and that the payload size is determined
+** by the remainder of the header, interpreted as a unsigned big-endian
+** integer.
+**
+** Value of X>>4 Size integer Total header size
+** ------------- -------------------- -----------------
+** 12 1 byte (0-255) 2
+** 13 2 byte (0-65535) 3
+** 14 4 byte (0-4294967295) 5
+** 15 8 byte (0-1.8e19) 9
+**
+** The payload size need not be expressed in its minimal form. For example,
+** if the payload size is 10, the size can be expressed in any of 5 different
+** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte,
+** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by
+** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and
+** a single byte of 0x0a. The shorter forms are preferred, of course, but
+** sometimes when generating JSONB, the payload size is not known in advance
+** and it is convenient to reserve sufficient header space to cover the
+** largest possible payload size and then come back later and patch up
+** the size when it becomes known, resulting in a non-minimal encoding.
+**
+** The value (X>>4)==15 is not actually used in the current implementation
+** (as SQLite is currently unable handle BLOBs larger than about 2GB)
+** but is included in the design to allow for future enhancements.
+**
+** The payload follows the header. NULL, TRUE, and FALSE have no payload and
+** their payload size must always be zero. The payload for INT, INT5,
+** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the
+** "..." or '...' delimiters are omitted from the various text encodings.
+** The payload for ARRAY and OBJECT is a list of additional elements that
+** are the content for the array or object. The payload for an OBJECT
+** must be an even number of elements. The first element of each pair is
+** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW.
+**
+** A valid JSONB blob consists of a single element, as described above.
+** Usually this will be an ARRAY or OBJECT element which has many more
+** elements as its content. But the overall blob is just a single element.
+**
+** Input validation for JSONB blobs simply checks that the element type
+** code is between 0 and 12 and that the total size of the element
+** (header plus payload) is the same as the size of the BLOB. If those
+** checks are true, the BLOB is assumed to be JSONB and processing continues.
+** Errors are only raised if some other miscoding is discovered during
+** processing.
+**
+** Additional information can be found in the doc/jsonb.md file of the
+** canonical SQLite source tree.
*/
#ifndef SQLITE_OMIT_JSON
#include "sqliteInt.h"
+/* JSONB element types
+*/
+#define JSONB_NULL 0 /* "null" */
+#define JSONB_TRUE 1 /* "true" */
+#define JSONB_FALSE 2 /* "false" */
+#define JSONB_INT 3 /* integer acceptable to JSON and SQL */
+#define JSONB_INT5 4 /* integer in 0x000 notation */
+#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */
+#define JSONB_FLOAT5 6 /* float with JSON5 extensions */
+#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */
+#define JSONB_TEXTJ 8 /* Text with JSON escapes */
+#define JSONB_TEXT5 9 /* Text with JSON-5 escape */
+#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */
+#define JSONB_ARRAY 11 /* An array */
+#define JSONB_OBJECT 12 /* An object */
+
+/* Human-readable names for the JSONB values. The index for each
+** string must correspond to the JSONB_* integer above.
+*/
+static const char * const jsonbType[] = {
+ "null", "true", "false", "integer", "integer",
+ "real", "real", "text", "text", "text",
+ "text", "array", "object", "", "", "", ""
+};
+
/*
** Growing our own isspace() routine this way is twice as fast as
** the library isspace() function, resulting in a 7% overall performance
-** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
+** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
*/
static const char jsonIsSpace[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -46,15 +167,23 @@
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-#define fast_isspace(x) (jsonIsSpace[(unsigned char)x])
+#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x])
/*
-** Characters that are special to JSON. Control charaters,
-** '"' and '\\'.
+** The set of all space characters recognized by jsonIsspace().
+** Useful as the second argument to strspn().
+*/
+static const char jsonSpaces[] = "\011\012\015\040";
+
+/*
+** Characters that are special to JSON. Control characters,
+** '"' and '\\' and '\''. Actually, '\'' is not special to
+** canonical JSON, but it is special in JSON-5, so we include
+** it in the set of special characters.
*/
static const char jsonIsOk[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -72,247 +201,364 @@
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
-
-#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST)
-# define VVA(X)
-#else
-# define VVA(X) X
-#endif
-
/* Objects */
+typedef struct JsonCache JsonCache;
typedef struct JsonString JsonString;
-typedef struct JsonNode JsonNode;
typedef struct JsonParse JsonParse;
-typedef struct JsonCleanup JsonCleanup;
+
+/*
+** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+*/
+#define JSON_CACHE_ID (-429938) /* Cache entry */
+#define JSON_CACHE_SIZE 4 /* Max number of cache entries */
+
+/*
+** jsonUnescapeOneChar() returns this invalid code point if it encounters
+** a syntax error.
+*/
+#define JSON_INVALID_CHAR 0x99999
+
+/* A cache mapping JSON text into JSONB blobs.
+**
+** Each cache entry is a JsonParse object with the following restrictions:
+**
+** * The bReadOnly flag must be set
+**
+** * The aBlob[] array must be owned by the JsonParse object. In other
+** words, nBlobAlloc must be non-zero.
+**
+** * eEdit and delta must be zero.
+**
+** * zJson must be an RCStr. In other words bJsonIsRCStr must be true.
+*/
+struct JsonCache {
+ sqlite3 *db; /* Database connection */
+ int nUsed; /* Number of active entries in the cache */
+ JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */
+};
/* An instance of this object represents a JSON string
** under construction. Really, this is a generic string accumulator
** that can be and is used to create strings other than JSON.
+**
+** If the generated string is longer than will fit into the zSpace[] buffer,
+** then it will be an RCStr string. This aids with caching of large
+** JSON strings.
*/
struct JsonString {
sqlite3_context *pCtx; /* Function context - put error messages here */
char *zBuf; /* Append JSON content here */
u64 nAlloc; /* Bytes of storage available in zBuf[] */
u64 nUsed; /* Bytes of zBuf[] currently used */
u8 bStatic; /* True if zBuf is static space */
- u8 bErr; /* True if an error has been encountered */
+ u8 eErr; /* True if an error has been encountered */
char zSpace[100]; /* Initial static space */
};
-/* A deferred cleanup task. A list of JsonCleanup objects might be
-** run when the JsonParse object is destroyed.
-*/
-struct JsonCleanup {
- JsonCleanup *pJCNext; /* Next in a list */
- void (*xOp)(void*); /* Routine to run */
- void *pArg; /* Argument to xOp() */
-};
-
-/* JSON type values
-*/
-#define JSON_SUBST 0 /* Special edit node. Uses u.iPrev */
-#define JSON_NULL 1
-#define JSON_TRUE 2
-#define JSON_FALSE 3
-#define JSON_INT 4
-#define JSON_REAL 5
-#define JSON_STRING 6
-#define JSON_ARRAY 7
-#define JSON_OBJECT 8
-
-/* The "subtype" set for JSON values */
+/* Allowed values for JsonString.eErr */
+#define JSTRING_OOM 0x01 /* Out of memory */
+#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */
+#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */
+
+/* The "subtype" set for text JSON values passed through using
+** sqlite3_result_subtype() and sqlite3_value_subtype().
+*/
#define JSON_SUBTYPE 74 /* Ascii for "J" */
/*
-** Names of the various JSON types:
-*/
-static const char * const jsonType[] = {
- "subst",
- "null", "true", "false", "integer", "real", "text", "array", "object"
-};
-
-/* Bit values for the JsonNode.jnFlag field
-*/
-#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
-#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
-#define JNODE_REMOVE 0x04 /* Do not output */
-#define JNODE_REPLACE 0x08 /* Target of a JSON_SUBST node */
-#define JNODE_APPEND 0x10 /* More ARRAY/OBJECT entries at u.iAppend */
-#define JNODE_LABEL 0x20 /* Is a label of an object */
-#define JNODE_JSON5 0x40 /* Node contains JSON5 enhancements */
-
-
-/* A single node of parsed JSON. An array of these nodes describes
-** a parse of JSON + edits.
-**
-** Use the json_parse() SQL function (available when compiled with
-** -DSQLITE_DEBUG) to see a dump of complete JsonParse objects, including
-** a complete listing and decoding of the array of JsonNodes.
-*/
-struct JsonNode {
- u8 eType; /* One of the JSON_ type values */
- u8 jnFlags; /* JNODE flags */
- u8 eU; /* Which union element to use */
- u32 n; /* Bytes of content for INT, REAL or STRING
- ** Number of sub-nodes for ARRAY and OBJECT
- ** Node that SUBST applies to */
- union {
- const char *zJContent; /* 1: Content for INT, REAL, and STRING */
- u32 iAppend; /* 2: More terms for ARRAY and OBJECT */
- u32 iKey; /* 3: Key for ARRAY objects in json_tree() */
- u32 iPrev; /* 4: Previous SUBST node, or 0 */
- } u;
-};
-
-
-/* A parsed and possibly edited JSON string. Lifecycle:
-**
-** 1. JSON comes in and is parsed into an array aNode[]. The original
-** JSON text is stored in zJson.
-**
-** 2. Zero or more changes are made (via json_remove() or json_replace()
-** or similar) to the aNode[] array.
-**
-** 3. A new, edited and mimified JSON string is generated from aNode
-** and stored in zAlt. The JsonParse object always owns zAlt.
-**
-** Step 1 always happens. Step 2 and 3 may or may not happen, depending
-** on the operation.
-**
-** aNode[].u.zJContent entries typically point into zJson. Hence zJson
-** must remain valid for the lifespan of the parse. For edits,
-** aNode[].u.zJContent might point to malloced space other than zJson.
-** Entries in pClup are responsible for freeing that extra malloced space.
-**
-** When walking the parse tree in aNode[], edits are ignored if useMod is
-** false.
+** Bit values for the flags passed into various SQL function implementations
+** via the sqlite3_user_data() value.
+*/
+#define JSON_JSON 0x01 /* Result is always JSON */
+#define JSON_SQL 0x02 /* Result is always SQL */
+#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
+#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+#define JSON_BLOB 0x08 /* Use the BLOB output format */
+
+
+/* A parsed JSON value. Lifecycle:
+**
+** 1. JSON comes in and is parsed into a JSONB value in aBlob. The
+** original text is stored in zJson. This step is skipped if the
+** input is JSONB instead of text JSON.
+**
+** 2. The aBlob[] array is searched using the JSON path notation, if needed.
+**
+** 3. Zero or more changes are made to aBlob[] (via json_remove() or
+** json_replace() or json_patch() or similar).
+**
+** 4. New JSON text is generated from the aBlob[] for output. This step
+** is skipped if the function is one of the jsonb_* functions that
+** returns JSONB instead of text JSON.
*/
struct JsonParse {
- u32 nNode; /* Number of slots of aNode[] used */
- u32 nAlloc; /* Number of slots of aNode[] allocated */
- JsonNode *aNode; /* Array of nodes containing the parse */
- char *zJson; /* Original JSON string (before edits) */
- char *zAlt; /* Revised and/or mimified JSON */
- u32 *aUp; /* Index of parent of each node */
- JsonCleanup *pClup;/* Cleanup operations prior to freeing this object */
+ u8 *aBlob; /* JSONB representation of JSON value */
+ u32 nBlob; /* Bytes of aBlob[] actually used */
+ u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */
+ char *zJson; /* Json text used for parsing */
+ sqlite3 *db; /* The database connection to which this object belongs */
+ int nJson; /* Length of the zJson string in bytes */
+ u32 nJPRef; /* Number of references to this object */
+ u32 iErr; /* Error location in zJson[] */
u16 iDepth; /* Nesting depth */
u8 nErr; /* Number of errors seen */
u8 oom; /* Set to true if out of memory */
u8 bJsonIsRCStr; /* True if zJson is an RCStr */
u8 hasNonstd; /* True if input uses non-standard features like JSON5 */
- u8 useMod; /* Actually use the edits contain inside aNode */
- u8 hasMod; /* aNode contains edits from the original zJson */
- u32 nJPRef; /* Number of references to this object */
- int nJson; /* Length of the zJson string in bytes */
- int nAlt; /* Length of alternative JSON string zAlt, in bytes */
- u32 iErr; /* Error location in zJson[] */
- u32 iSubst; /* Last JSON_SUBST entry in aNode[] */
- u32 iHold; /* Age of this entry in the cache for LRU replacement */
+ u8 bReadOnly; /* Do not modify. */
+ /* Search and edit information. See jsonLookupStep() */
+ u8 eEdit; /* Edit operation to apply */
+ int delta; /* Size change due to the edit */
+ u32 nIns; /* Number of bytes to insert */
+ u32 iLabel; /* Location of label if search landed on an object value */
+ u8 *aIns; /* Content to be inserted */
};
+/* Allowed values for JsonParse.eEdit */
+#define JEDIT_DEL 1 /* Delete if exists */
+#define JEDIT_REPL 2 /* Overwrite if exists */
+#define JEDIT_INS 3 /* Insert if not exists */
+#define JEDIT_SET 4 /* Insert or overwrite */
+
/*
** Maximum nesting depth of JSON for this implementation.
**
** This limit is needed to avoid a stack overflow in the recursive
** descent parser. A depth of 1000 is far deeper than any sane JSON
** should go. Historical note: This limit was 2000 prior to version 3.42.0
*/
-#define JSON_MAX_DEPTH 1000
+#ifndef SQLITE_JSON_MAX_DEPTH
+# define JSON_MAX_DEPTH 1000
+#else
+# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH
+#endif
+
+/*
+** Allowed values for the flgs argument to jsonParseFuncArg();
+*/
+#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */
+#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */
+
+/**************************************************************************
+** Forward references
+**************************************************************************/
+static void jsonReturnStringAsBlob(JsonString*);
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson);
+static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*);
+static void jsonReturnParse(sqlite3_context*,JsonParse*);
+static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32);
+static void jsonParseFree(JsonParse*);
+static u32 jsonbPayloadSize(const JsonParse*, u32, u32*);
+static u32 jsonUnescapeOneChar(const char*, u32, u32*);
+
+/**************************************************************************
+** Utility routines for dealing with JsonCache objects
+**************************************************************************/
+
+/*
+** Free a JsonCache object.
+*/
+static void jsonCacheDelete(JsonCache *p){
+ int i;
+ for(i=0; inUsed; i++){
+ jsonParseFree(p->a[i]);
+ }
+ sqlite3DbFree(p->db, p);
+}
+static void jsonCacheDeleteGeneric(void *p){
+ jsonCacheDelete((JsonCache*)p);
+}
+
+/*
+** Insert a new entry into the cache. If the cache is full, expel
+** the least recently used entry. Return SQLITE_OK on success or a
+** result code otherwise.
+**
+** Cache entries are stored in age order, oldest first.
+*/
+static int jsonCacheInsert(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ JsonParse *pParse /* The parse object to be added to the cache */
+){
+ JsonCache *p;
+
+ assert( pParse->zJson!=0 );
+ assert( pParse->bJsonIsRCStr );
+ assert( pParse->delta==0 );
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ sqlite3 *db = sqlite3_context_db_handle(ctx);
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM;
+ p->db = db;
+ sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric);
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ) return SQLITE_NOMEM;
+ }
+ if( p->nUsed >= JSON_CACHE_SIZE ){
+ jsonParseFree(p->a[0]);
+ memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0]));
+ p->nUsed = JSON_CACHE_SIZE-1;
+ }
+ assert( pParse->nBlobAlloc>0 );
+ pParse->eEdit = 0;
+ pParse->nJPRef++;
+ pParse->bReadOnly = 1;
+ p->a[p->nUsed] = pParse;
+ p->nUsed++;
+ return SQLITE_OK;
+}
+
+/*
+** Search for a cached translation the json text supplied by pArg. Return
+** the JsonParse object if found. Return NULL if not found.
+**
+** When a match if found, the matching entry is moved to become the
+** most-recently used entry if it isn't so already.
+**
+** The JsonParse object returned still belongs to the Cache and might
+** be deleted at any moment. If the caller whants the JsonParse to
+** linger, it needs to increment the nPJRef reference counter.
+*/
+static JsonParse *jsonCacheSearch(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ sqlite3_value *pArg /* Function argument containing SQL text */
+){
+ JsonCache *p;
+ int i;
+ const char *zJson;
+ int nJson;
+
+ if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){
+ return 0;
+ }
+ zJson = (const char*)sqlite3_value_text(pArg);
+ if( zJson==0 ) return 0;
+ nJson = sqlite3_value_bytes(pArg);
+
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ return 0;
+ }
+ for(i=0; inUsed; i++){
+ if( p->a[i]->zJson==zJson ) break;
+ }
+ if( i>=p->nUsed ){
+ for(i=0; inUsed; i++){
+ if( p->a[i]->nJson!=nJson ) continue;
+ if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break;
+ }
+ }
+ if( inUsed ){
+ if( inUsed-1 ){
+ /* Make the matching entry the most recently used entry */
+ JsonParse *tmp = p->a[i];
+ memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp));
+ p->a[p->nUsed-1] = tmp;
+ i = p->nUsed - 1;
+ }
+ assert( p->a[i]->delta==0 );
+ return p->a[i];
+ }else{
+ return 0;
+ }
+}
/**************************************************************************
** Utility routines for dealing with JsonString objects
**************************************************************************/
-/* Set the JsonString object to an empty string
+/* Turn uninitialized bulk memory into a valid JsonString object
+** holding a zero-length string.
*/
-static void jsonZero(JsonString *p){
+static void jsonStringZero(JsonString *p){
p->zBuf = p->zSpace;
p->nAlloc = sizeof(p->zSpace);
p->nUsed = 0;
p->bStatic = 1;
}
/* Initialize the JsonString object
*/
-static void jsonInit(JsonString *p, sqlite3_context *pCtx){
+static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){
p->pCtx = pCtx;
- p->bErr = 0;
- jsonZero(p);
+ p->eErr = 0;
+ jsonStringZero(p);
}
/* Free all allocated memory and reset the JsonString object back to its
** initial state.
*/
-static void jsonReset(JsonString *p){
+static void jsonStringReset(JsonString *p){
if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf);
- jsonZero(p);
+ jsonStringZero(p);
}
/* Report an out-of-memory (OOM) condition
*/
-static void jsonOom(JsonString *p){
- p->bErr = 1;
- sqlite3_result_error_nomem(p->pCtx);
- jsonReset(p);
+static void jsonStringOom(JsonString *p){
+ p->eErr |= JSTRING_OOM;
+ if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx);
+ jsonStringReset(p);
}
/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
** Return zero on success. Return non-zero on an OOM error
*/
-static int jsonGrow(JsonString *p, u32 N){
+static int jsonStringGrow(JsonString *p, u32 N){
u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
char *zNew;
if( p->bStatic ){
- if( p->bErr ) return 1;
+ if( p->eErr ) return 1;
zNew = sqlite3RCStrNew(nTotal);
if( zNew==0 ){
- jsonOom(p);
+ jsonStringOom(p);
return SQLITE_NOMEM;
}
memcpy(zNew, p->zBuf, (size_t)p->nUsed);
p->zBuf = zNew;
p->bStatic = 0;
}else{
p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal);
if( p->zBuf==0 ){
- p->bErr = 1;
- jsonZero(p);
+ p->eErr |= JSTRING_OOM;
+ jsonStringZero(p);
return SQLITE_NOMEM;
}
}
p->nAlloc = nTotal;
return SQLITE_OK;
}
/* Append N bytes from zIn onto the end of the JsonString string.
*/
-static SQLITE_NOINLINE void jsonAppendExpand(
+static SQLITE_NOINLINE void jsonStringExpandAndAppend(
JsonString *p,
const char *zIn,
u32 N
){
assert( N>0 );
- if( jsonGrow(p,N) ) return;
+ if( jsonStringGrow(p,N) ) return;
memcpy(p->zBuf+p->nUsed, zIn, N);
p->nUsed += N;
}
static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
if( N==0 ) return;
if( N+p->nUsed >= p->nAlloc ){
- jsonAppendExpand(p,zIn,N);
+ jsonStringExpandAndAppend(p,zIn,N);
}else{
memcpy(p->zBuf+p->nUsed, zIn, N);
p->nUsed += N;
}
}
static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){
assert( N>0 );
if( N+p->nUsed >= p->nAlloc ){
- jsonAppendExpand(p,zIn,N);
+ jsonStringExpandAndAppend(p,zIn,N);
}else{
memcpy(p->zBuf+p->nUsed, zIn, N);
p->nUsed += N;
}
}
@@ -320,21 +566,21 @@
/* Append formatted text (not to exceed N bytes) to the JsonString.
*/
static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
va_list ap;
- if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
+ if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return;
va_start(ap, zFormat);
sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
va_end(ap);
p->nUsed += (int)strlen(p->zBuf+p->nUsed);
}
/* Append a single character
*/
static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){
- if( jsonGrow(p,1) ) return;
+ if( jsonStringGrow(p,1) ) return;
p->zBuf[p->nUsed++] = c;
}
static void jsonAppendChar(JsonString *p, char c){
if( p->nUsed>=p->nAlloc ){
jsonAppendCharExpand(p,c);
@@ -341,27 +587,30 @@
}else{
p->zBuf[p->nUsed++] = c;
}
}
-/* Try to force the string to be a zero-terminated RCStr string.
+/* Remove a single character from the end of the string
+*/
+static void jsonStringTrimOneChar(JsonString *p){
+ if( p->eErr==0 ){
+ assert( p->nUsed>0 );
+ p->nUsed--;
+ }
+}
+
+
+/* Make sure there is a zero terminator on p->zBuf[]
**
** Return true on success. Return false if an OOM prevents this
** from happening.
*/
-static int jsonForceRCStr(JsonString *p){
+static int jsonStringTerminate(JsonString *p){
jsonAppendChar(p, 0);
- if( p->bErr ) return 0;
- p->nUsed--;
- if( p->bStatic==0 ) return 1;
- p->nAlloc = 0;
- p->nUsed++;
- jsonGrow(p, p->nUsed);
- p->nUsed--;
- return p->bStatic==0;
-}
-
+ jsonStringTrimOneChar(p);
+ return p->eErr==0;
+}
/* Append a comma separator to the output buffer, if the previous
** character is not '[' or '{'.
*/
static void jsonAppendSeparator(JsonString *p){
@@ -369,195 +618,125 @@
if( p->nUsed==0 ) return;
c = p->zBuf[p->nUsed-1];
if( c=='[' || c=='{' ) return;
jsonAppendChar(p, ',');
}
+
+/* c is a control character. Append the canonical JSON representation
+** of that control character to p.
+**
+** This routine assumes that the output buffer has already been enlarged
+** sufficiently to hold the worst-case encoding plus a nul terminator.
+*/
+static void jsonAppendControlChar(JsonString *p, u8 c){
+ static const char aSpecial[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ assert( sizeof(aSpecial)==32 );
+ assert( aSpecial['\b']=='b' );
+ assert( aSpecial['\f']=='f' );
+ assert( aSpecial['\n']=='n' );
+ assert( aSpecial['\r']=='r' );
+ assert( aSpecial['\t']=='t' );
+ assert( c>=0 && cnUsed+7 <= p->nAlloc );
+ if( aSpecial[c] ){
+ p->zBuf[p->nUsed] = '\\';
+ p->zBuf[p->nUsed+1] = aSpecial[c];
+ p->nUsed += 2;
+ }else{
+ p->zBuf[p->nUsed] = '\\';
+ p->zBuf[p->nUsed+1] = 'u';
+ p->zBuf[p->nUsed+2] = '0';
+ p->zBuf[p->nUsed+3] = '0';
+ p->zBuf[p->nUsed+4] = "0123456789abcdef"[c>>4];
+ p->zBuf[p->nUsed+5] = "0123456789abcdef"[c&0xf];
+ p->nUsed += 6;
+ }
+}
/* Append the N-byte string in zIn to the end of the JsonString string
-** under construction. Enclose the string in "..." and escape
-** any double-quotes or backslash characters contained within the
+** under construction. Enclose the string in double-quotes ("...") and
+** escape any double-quotes or backslash characters contained within the
** string.
+**
+** This routine is a high-runner. There is a measurable performance
+** increase associated with unwinding the jsonIsOk[] loop.
*/
static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return;
+ u32 k;
+ u8 c;
+ const u8 *z = (const u8*)zIn;
+ if( z==0 ) return;
+ if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return;
p->zBuf[p->nUsed++] = '"';
- for(i=0; izBuf[p->nUsed++] = c;
- }else if( c=='"' || c=='\\' ){
- json_simple_escape:
- if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
+ while( 1 /*exit-by-break*/ ){
+ k = 0;
+ /* The following while() is the 4-way unwound equivalent of
+ **
+ ** while( k=N ){
+ while( k=N ){
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ }
+ break;
+ }
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ z += k;
+ N -= k;
+ }
+ c = z[0];
+ if( c=='"' || c=='\\' ){
+ if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return;
p->zBuf[p->nUsed++] = '\\';
p->zBuf[p->nUsed++] = c;
}else if( c=='\'' ){
p->zBuf[p->nUsed++] = c;
}else{
- static const char aSpecial[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- assert( sizeof(aSpecial)==32 );
- assert( aSpecial['\b']=='b' );
- assert( aSpecial['\f']=='f' );
- assert( aSpecial['\n']=='n' );
- assert( aSpecial['\r']=='r' );
- assert( aSpecial['\t']=='t' );
- assert( c>=0 && cnUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
- p->zBuf[p->nUsed++] = '\\';
- p->zBuf[p->nUsed++] = 'u';
- p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4];
- p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf];
- }
+ if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return;
+ jsonAppendControlChar(p, c);
+ }
+ z++;
+ N--;
}
p->zBuf[p->nUsed++] = '"';
assert( p->nUsednAlloc );
}
/*
-** The zIn[0..N] string is a JSON5 string literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- jsonAppendChar(p, '"');
- zIn++;
- N -= 2;
- while( N>0 ){
- for(i=0; i0 ){
- jsonAppendRawNZ(p, zIn, i);
- zIn += i;
- N -= i;
- if( N==0 ) break;
- }
- if( zIn[0]=='"' ){
- jsonAppendRawNZ(p, "\\\"", 2);
- zIn++;
- N--;
- continue;
- }
- assert( zIn[0]=='\\' );
- switch( (u8)zIn[1] ){
- case '\'':
- jsonAppendChar(p, '\'');
- break;
- case 'v':
- jsonAppendRawNZ(p, "\\u0009", 6);
- break;
- case 'x':
- jsonAppendRawNZ(p, "\\u00", 4);
- jsonAppendRawNZ(p, &zIn[2], 2);
- zIn += 2;
- N -= 2;
- break;
- case '0':
- jsonAppendRawNZ(p, "\\u0000", 6);
- break;
- case '\r':
- if( zIn[2]=='\n' ){
- zIn++;
- N--;
- }
- break;
- case '\n':
- break;
- case 0xe2:
- assert( N>=4 );
- assert( 0x80==(u8)zIn[2] );
- assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] );
- zIn += 2;
- N -= 2;
- break;
- default:
- jsonAppendRawNZ(p, zIn, 2);
- break;
- }
- zIn += 2;
- N -= 2;
- }
- jsonAppendChar(p, '"');
-}
-
-/*
-** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){
- sqlite3_int64 i = 0;
- int rc = sqlite3DecOrHexToI64(zIn, &i);
- if( rc<=1 ){
- jsonPrintf(100,p,"%lld",i);
- }else{
- assert( rc==2 );
- jsonAppendRawNZ(p, "9.0e999", 7);
- }
- return;
- }
- assert( N>0 );
- jsonAppendRawNZ(p, zIn, N);
-}
-
-/*
-** The zIn[0..N] string is a JSON5 real literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='.' ){
- jsonAppendChar(p, '0');
- }
- for(i=0; i0 ){
- jsonAppendRawNZ(p, zIn, N);
- }
-}
-
-
-
-/*
-** Append a function parameter value to the JSON string under
-** construction.
-*/
-static void jsonAppendValue(
+** Append an sqlite3_value (such as a function parameter) to the JSON
+** string under construction in p.
+*/
+static void jsonAppendSqlValue(
JsonString *p, /* Append to this JSON string */
sqlite3_value *pValue /* Value to append */
){
switch( sqlite3_value_type(pValue) ){
case SQLITE_NULL: {
@@ -583,591 +762,147 @@
jsonAppendString(p, z, n);
}
break;
}
default: {
- if( p->bErr==0 ){
+ if( jsonFuncArgMightBeBinary(pValue) ){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(pValue);
+ px.nBlob = sqlite3_value_bytes(pValue);
+ jsonTranslateBlobToText(&px, 0, p);
+ }else if( p->eErr==0 ){
sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
- p->bErr = 2;
- jsonReset(p);
+ p->eErr = JSTRING_ERR;
+ jsonStringReset(p);
}
break;
}
}
}
-
-/* Make the JSON in p the result of the SQL function.
+/* Make the text in p (which is probably a generated JSON text string)
+** the result of the SQL function.
**
-** The JSON string is reset.
+** The JsonString is reset.
+**
+** If pParse and ctx are both non-NULL, then the SQL string in p is
+** loaded into the zJson field of the pParse object as a RCStr and the
+** pParse is added to the cache.
*/
-static void jsonResult(JsonString *p){
- if( p->bErr==0 ){
- if( p->bStatic ){
+static void jsonReturnString(
+ JsonString *p, /* String to return */
+ JsonParse *pParse, /* JSONB source or NULL */
+ sqlite3_context *ctx /* Where to cache */
+){
+ assert( (pParse!=0)==(ctx!=0) );
+ assert( ctx==0 || ctx==p->pCtx );
+ if( p->eErr==0 ){
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx));
+ if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(p);
+ }else if( p->bStatic ){
sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
SQLITE_TRANSIENT, SQLITE_UTF8);
- }else if( jsonForceRCStr(p) ){
- sqlite3RCStrRef(p->zBuf);
- sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
+ }else if( jsonStringTerminate(p) ){
+ if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){
+ int rc;
+ pParse->zJson = sqlite3RCStrRef(p->zBuf);
+ pParse->nJson = p->nUsed;
+ pParse->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, pParse);
+ if( rc==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(ctx);
+ jsonStringReset(p);
+ return;
+ }
+ }
+ sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed,
sqlite3RCStrUnref,
SQLITE_UTF8);
+ }else{
+ sqlite3_result_error_nomem(p->pCtx);
}
- }
- if( p->bErr==1 ){
+ }else if( p->eErr & JSTRING_OOM ){
sqlite3_result_error_nomem(p->pCtx);
+ }else if( p->eErr & JSTRING_MALFORMED ){
+ sqlite3_result_error(p->pCtx, "malformed JSON", -1);
}
- jsonReset(p);
+ jsonStringReset(p);
}
/**************************************************************************
-** Utility routines for dealing with JsonNode and JsonParse objects
+** Utility routines for dealing with JsonParse objects
**************************************************************************/
-/*
-** Return the number of consecutive JsonNode slots need to represent
-** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
-** OBJECT types, the number might be larger.
-**
-** Appended elements are not counted. The value returned is the number
-** by which the JsonNode counter should increment in order to go to the
-** next peer value.
-*/
-static u32 jsonNodeSize(JsonNode *pNode){
- return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
-}
-
/*
** Reclaim all memory allocated by a JsonParse object. But do not
** delete the JsonParse object itself.
*/
static void jsonParseReset(JsonParse *pParse){
- while( pParse->pClup ){
- JsonCleanup *pTask = pParse->pClup;
- pParse->pClup = pTask->pJCNext;
- pTask->xOp(pTask->pArg);
- sqlite3_free(pTask);
- }
assert( pParse->nJPRef<=1 );
- if( pParse->aNode ){
- sqlite3_free(pParse->aNode);
- pParse->aNode = 0;
- }
- pParse->nNode = 0;
- pParse->nAlloc = 0;
- if( pParse->aUp ){
- sqlite3_free(pParse->aUp);
- pParse->aUp = 0;
- }
if( pParse->bJsonIsRCStr ){
sqlite3RCStrUnref(pParse->zJson);
pParse->zJson = 0;
+ pParse->nJson = 0;
pParse->bJsonIsRCStr = 0;
}
- if( pParse->zAlt ){
- sqlite3RCStrUnref(pParse->zAlt);
- pParse->zAlt = 0;
+ if( pParse->nBlobAlloc ){
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ pParse->aBlob = 0;
+ pParse->nBlob = 0;
+ pParse->nBlobAlloc = 0;
}
}
/*
-** Free a JsonParse object that was obtained from sqlite3_malloc().
-**
-** Note that destroying JsonParse might call sqlite3RCStrUnref() to
-** destroy the zJson value. The RCStr object might recursively invoke
-** JsonParse to destroy this pParse object again. Take care to ensure
-** that this recursive destructor sequence terminates harmlessly.
+** Decrement the reference count on the JsonParse object. When the
+** count reaches zero, free the object.
*/
static void jsonParseFree(JsonParse *pParse){
- if( pParse->nJPRef>1 ){
- pParse->nJPRef--;
- }else{
- jsonParseReset(pParse);
- sqlite3_free(pParse);
- }
-}
-
-/*
-** Add a cleanup task to the JsonParse object.
-**
-** If an OOM occurs, the cleanup operation happens immediately
-** and this function returns SQLITE_NOMEM.
-*/
-static int jsonParseAddCleanup(
- JsonParse *pParse, /* Add the cleanup task to this parser */
- void(*xOp)(void*), /* The cleanup task */
- void *pArg /* Argument to the cleanup */
-){
- JsonCleanup *pTask = sqlite3_malloc64( sizeof(*pTask) );
- if( pTask==0 ){
- pParse->oom = 1;
- xOp(pArg);
- return SQLITE_ERROR;
- }
- pTask->pJCNext = pParse->pClup;
- pParse->pClup = pTask;
- pTask->xOp = xOp;
- pTask->pArg = pArg;
- return SQLITE_OK;
-}
-
-/*
-** Convert the JsonNode pNode into a pure JSON string and
-** append to pOut. Subsubstructure is also included. Return
-** the number of JsonNode objects that are encoded.
-*/
-static void jsonRenderNode(
- JsonParse *pParse, /* the complete parse of the JSON */
- JsonNode *pNode, /* The node to render */
- JsonString *pOut /* Write JSON here */
-){
- assert( pNode!=0 );
- while( (pNode->jnFlags & JNODE_REPLACE)!=0 && pParse->useMod ){
- u32 idx = (u32)(pNode - pParse->aNode);
- u32 i = pParse->iSubst;
- while( 1 /*exit-by-break*/ ){
- assert( inNode );
- assert( pParse->aNode[i].eType==JSON_SUBST );
- assert( pParse->aNode[i].eU==4 );
- assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){
- pNode = &pParse->aNode[i+1];
- break;
- }
- i = pParse->aNode[i].u.iPrev;
- }
- }
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- jsonAppendRawNZ(pOut, "null", 4);
- break;
- }
- case JSON_TRUE: {
- jsonAppendRawNZ(pOut, "true", 4);
- break;
- }
- case JSON_FALSE: {
- jsonAppendRawNZ(pOut, "false", 5);
- break;
- }
- case JSON_STRING: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->jnFlags & JNODE_LABEL ){
- jsonAppendChar(pOut, '"');
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- jsonAppendChar(pOut, '"');
- }else{
- jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
- }
- }else if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n);
- }else{
- assert( pNode->n>0 );
- jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_REAL: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n);
- }else{
- assert( pNode->n>0 );
- jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_INT: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n);
- }else{
- assert( pNode->n>0 );
- jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_ARRAY: {
- u32 j = 1;
- jsonAppendChar(pOut, '[');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(pParse, &pNode[j], pOut);
- }
- j += jsonNodeSize(&pNode[j]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- if( pParse->useMod==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pParse->aNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, ']');
- break;
- }
- case JSON_OBJECT: {
- u32 j = 1;
- jsonAppendChar(pOut, '{');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(pParse, &pNode[j], pOut);
- jsonAppendChar(pOut, ':');
- jsonRenderNode(pParse, &pNode[j+1], pOut);
- }
- j += 1 + jsonNodeSize(&pNode[j+1]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- if( pParse->useMod==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pParse->aNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, '}');
- break;
- }
- }
-}
-
-/*
-** Return a JsonNode and all its descendants as a JSON string.
-*/
-static void jsonReturnJson(
- JsonParse *pParse, /* The complete JSON */
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- int bGenerateAlt, /* Also store the rendered text in zAlt */
- int omitSubtype /* Do not call sqlite3_result_subtype() */
-){
- JsonString s;
- if( pParse->oom ){
- sqlite3_result_error_nomem(pCtx);
- return;
- }
- if( pParse->nErr==0 ){
- jsonInit(&s, pCtx);
- jsonRenderNode(pParse, pNode, &s);
- if( bGenerateAlt && pParse->zAlt==0 && jsonForceRCStr(&s) ){
- pParse->zAlt = sqlite3RCStrRef(s.zBuf);
- pParse->nAlt = s.nUsed;
- }
- jsonResult(&s);
- if( !omitSubtype ) sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
- }
-}
+ if( pParse ){
+ if( pParse->nJPRef>1 ){
+ pParse->nJPRef--;
+ }else{
+ jsonParseReset(pParse);
+ sqlite3DbFree(pParse->db, pParse);
+ }
+ }
+}
+
+/**************************************************************************
+** Utility routines for the JSON text parser
+**************************************************************************/
/*
** Translate a single byte of Hex into an integer.
-** This routine only works if h really is a valid hexadecimal
-** character: 0..9a..fA..F
+** This routine only gives a correct answer if h really is a valid hexadecimal
+** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not
+** assert() if the digit is not hex.
*/
static u8 jsonHexToInt(int h){
- assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') );
+#ifdef SQLITE_ASCII
+ h += 9*(1&(h>>6));
+#endif
#ifdef SQLITE_EBCDIC
h += 9*(1&~(h>>4));
-#else
- h += 9*(1&(h>>6));
#endif
return (u8)(h & 0xf);
}
/*
** Convert a 4-byte hex string into an integer
*/
static u32 jsonHexToInt4(const char *z){
u32 v;
- assert( sqlite3Isxdigit(z[0]) );
- assert( sqlite3Isxdigit(z[1]) );
- assert( sqlite3Isxdigit(z[2]) );
- assert( sqlite3Isxdigit(z[3]) );
v = (jsonHexToInt(z[0])<<12)
+ (jsonHexToInt(z[1])<<8)
+ (jsonHexToInt(z[2])<<4)
+ jsonHexToInt(z[3]);
return v;
}
-/*
-** Make the JsonNode the return value of the function.
-*/
-static void jsonReturn(
- JsonParse *pParse, /* Complete JSON parse tree */
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- int omitSubtype /* Do not call sqlite3_result_subtype() */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- sqlite3_result_null(pCtx);
- break;
- }
- case JSON_TRUE: {
- sqlite3_result_int(pCtx, 1);
- break;
- }
- case JSON_FALSE: {
- sqlite3_result_int(pCtx, 0);
- break;
- }
- case JSON_INT: {
- sqlite3_int64 i = 0;
- int rc;
- int bNeg = 0;
- const char *z;
-
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- if( z[0]=='-' ){ z++; bNeg = 1; }
- else if( z[0]=='+' ){ z++; }
- rc = sqlite3DecOrHexToI64(z, &i);
- if( rc<=1 ){
- sqlite3_result_int64(pCtx, bNeg ? -i : i);
- }else if( rc==3 && bNeg ){
- sqlite3_result_int64(pCtx, SMALLEST_INT64);
- }else{
- goto to_double;
- }
- break;
- }
- case JSON_REAL: {
- double r;
- const char *z;
- assert( pNode->eU==1 );
- to_double:
- z = pNode->u.zJContent;
- sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
- sqlite3_result_double(pCtx, r);
- break;
- }
- case JSON_STRING: {
- if( pNode->jnFlags & JNODE_RAW ){
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
- SQLITE_TRANSIENT);
- }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
- /* JSON formatted without any backslash-escapes */
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
- SQLITE_TRANSIENT);
- }else{
- /* Translate JSON formatted string into raw text */
- u32 i;
- u32 n = pNode->n;
- const char *z;
- char *zOut;
- u32 j;
- u32 nOut = n;
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- zOut = sqlite3_malloc( nOut+1 );
- if( zOut==0 ){
- sqlite3_result_error_nomem(pCtx);
- break;
- }
- for(i=1, j=0; i>6));
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- u32 vlo;
- if( (v&0xfc00)==0xd800
- && i>18);
- zOut[j++] = 0x80 | ((v>>12)&0x3f);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- zOut[j++] = 0xe0 | (v>>12);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }
- }
- continue;
- }else if( c=='b' ){
- c = '\b';
- }else if( c=='f' ){
- c = '\f';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='r' ){
- c = '\r';
- }else if( c=='t' ){
- c = '\t';
- }else if( c=='v' ){
- c = '\v';
- }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){
- /* pass through unchanged */
- }else if( c=='0' ){
- c = 0;
- }else if( c=='x' ){
- c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]);
- i += 2;
- }else if( c=='\r' && z[i+1]=='\n' ){
- i++;
- continue;
- }else if( 0xe2==(u8)c ){
- assert( 0x80==(u8)z[i+1] );
- assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] );
- i += 2;
- continue;
- }else{
- continue;
- }
- } /* end if( c=='\\' ) */
- zOut[j++] = c;
- } /* end for() */
- zOut[j] = 0;
- sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
- }
- break;
- }
- case JSON_ARRAY:
- case JSON_OBJECT: {
- jsonReturnJson(pParse, pNode, pCtx, 0, omitSubtype);
- break;
- }
- }
-}
-
-/* Forward reference */
-static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define JSON_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define JSON_NOINLINE __declspec(noinline)
-#else
-# define JSON_NOINLINE
-#endif
-
-
-/*
-** Add a single node to pParse->aNode after first expanding the
-** size of the aNode array. Return the index of the new node.
-**
-** If an OOM error occurs, set pParse->oom and return -1.
-*/
-static JSON_NOINLINE int jsonParseAddNodeExpand(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- u32 nNew;
- JsonNode *pNew;
- assert( pParse->nNode>=pParse->nAlloc );
- if( pParse->oom ) return -1;
- nNew = pParse->nAlloc*2 + 10;
- pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew);
- if( pNew==0 ){
- pParse->oom = 1;
- return -1;
- }
- pParse->nAlloc = sqlite3_msize(pNew)/sizeof(JsonNode);
- pParse->aNode = pNew;
- assert( pParse->nNodenAlloc );
- return jsonParseAddNode(pParse, eType, n, zContent);
-}
-
-/*
-** Create a new JsonNode instance based on the arguments and append that
-** instance to the JsonParse. Return the index in pParse->aNode[] of the
-** new node, or -1 if a memory allocation fails.
-*/
-static int jsonParseAddNode(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- JsonNode *p;
- assert( pParse->aNode!=0 || pParse->nNode>=pParse->nAlloc );
- if( pParse->nNode>=pParse->nAlloc ){
- return jsonParseAddNodeExpand(pParse, eType, n, zContent);
- }
- assert( pParse->aNode!=0 );
- p = &pParse->aNode[pParse->nNode];
- assert( p!=0 );
- p->eType = (u8)(eType & 0xff);
- p->jnFlags = (u8)(eType >> 8);
- VVA( p->eU = zContent ? 1 : 0 );
- p->n = n;
- p->u.zJContent = zContent;
- return pParse->nNode++;
-}
-
-/*
-** Add an array of new nodes to the current pParse->aNode array.
-** Return the index of the first node added.
-**
-** If an OOM error occurs, set pParse->oom.
-*/
-static void jsonParseAddNodeArray(
- JsonParse *pParse, /* Append the node to this object */
- JsonNode *aNode, /* Array of nodes to add */
- u32 nNode /* Number of elements in aNew */
-){
- assert( aNode!=0 );
- assert( nNode>=1 );
- if( pParse->nNode + nNode > pParse->nAlloc ){
- u32 nNew = pParse->nNode + nNode;
- JsonNode *aNew = sqlite3_realloc64(pParse->aNode, nNew*sizeof(JsonNode));
- if( aNew==0 ){
- pParse->oom = 1;
- return;
- }
- pParse->nAlloc = sqlite3_msize(aNew)/sizeof(JsonNode);
- pParse->aNode = aNew;
- }
- memcpy(&pParse->aNode[pParse->nNode], aNode, nNode*sizeof(JsonNode));
- pParse->nNode += nNode;
-}
-
-/*
-** Add a new JSON_SUBST node. The node immediately following
-** this new node will be the substitute content for iNode.
-*/
-static int jsonParseAddSubstNode(
- JsonParse *pParse, /* Add the JSON_SUBST here */
- u32 iNode /* References this node */
-){
- int idx = jsonParseAddNode(pParse, JSON_SUBST, iNode, 0);
- if( pParse->oom ) return -1;
- pParse->aNode[iNode].jnFlags |= JNODE_REPLACE;
- pParse->aNode[idx].eU = 4;
- pParse->aNode[idx].u.iPrev = pParse->iSubst;
- pParse->iSubst = idx;
- pParse->hasMod = 1;
- pParse->useMod = 1;
- return idx;
-}
-
/*
** Return true if z[] begins with 2 (or more) hexadecimal digits
*/
static int jsonIs2Hex(const char *z){
return sqlite3Isxdigit(z[0]) && sqlite3Isxdigit(z[1]);
@@ -1317,101 +1052,541 @@
char eType;
char nRepl;
char *zMatch;
char *zRepl;
} aNanInfName[] = {
- { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" },
- { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" },
- { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" },
- { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" },
- { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" },
+ { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" },
+ { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" },
+ { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" },
+ { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" },
+ { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" },
};
+
+/*
+** Report the wrong number of arguments for json_insert(), json_replace()
+** or json_set().
+*/
+static void jsonWrongNumArgs(
+ sqlite3_context *pCtx,
+ const char *zFuncName
+){
+ char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
+ zFuncName);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+}
+
+/****************************************************************************
+** Utility routines for dealing with the binary BLOB representation of JSON
+****************************************************************************/
+
+/*
+** Expand pParse->aBlob so that it holds at least N bytes.
+**
+** Return the number of errors.
+*/
+static int jsonBlobExpand(JsonParse *pParse, u32 N){
+ u8 *aNew;
+ u32 t;
+ assert( N>pParse->nBlobAlloc );
+ if( pParse->nBlobAlloc==0 ){
+ t = 100;
+ }else{
+ t = pParse->nBlobAlloc*2;
+ }
+ if( tdb, pParse->aBlob, t);
+ if( aNew==0 ){ pParse->oom = 1; return 1; }
+ pParse->aBlob = aNew;
+ pParse->nBlobAlloc = t;
+ return 0;
+}
+
+/*
+** If pParse->aBlob is not previously editable (because it is taken
+** from sqlite3_value_blob(), as indicated by the fact that
+** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable
+** by making a copy into space obtained from malloc.
+**
+** Return true on success. Return false on OOM.
+*/
+static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){
+ u8 *aOld;
+ u32 nSize;
+ assert( !pParse->bReadOnly );
+ if( pParse->oom ) return 0;
+ if( pParse->nBlobAlloc>0 ) return 1;
+ aOld = pParse->aBlob;
+ nSize = pParse->nBlob + nExtra;
+ pParse->aBlob = 0;
+ if( jsonBlobExpand(pParse, nSize) ){
+ return 0;
+ }
+ assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra );
+ memcpy(pParse->aBlob, aOld, pParse->nBlob);
+ return 1;
+}
+
+/* Expand pParse->aBlob and append one bytes.
+*/
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte(
+ JsonParse *pParse,
+ u8 c
+){
+ jsonBlobExpand(pParse, pParse->nBlob+1);
+ if( pParse->oom==0 ){
+ assert( pParse->nBlob+1<=pParse->nBlobAlloc );
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Append a single character.
+*/
+static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){
+ if( pParse->nBlob >= pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendOneByte(pParse, c);
+ }else{
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Slow version of jsonBlobAppendNode() that first resizes the
+** pParse->aBlob structure.
+*/
+static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*);
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode(
+ JsonParse *pParse,
+ u8 eType,
+ u32 szPayload,
+ const void *aPayload
+){
+ if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return;
+ jsonBlobAppendNode(pParse, eType, szPayload, aPayload);
+}
+
+
+/* Append an node type byte together with the payload size and
+** possibly also the payload.
+**
+** If aPayload is not NULL, then it is a pointer to the payload which
+** is also appended. If aPayload is NULL, the pParse->aBlob[] array
+** is resized (if necessary) so that it is big enough to hold the
+** payload, but the payload is not appended and pParse->nBlob is left
+** pointing to where the first byte of payload will eventually be.
+*/
+static void jsonBlobAppendNode(
+ JsonParse *pParse, /* The JsonParse object under construction */
+ u8 eType, /* Node type. One of JSONB_* */
+ u32 szPayload, /* Number of bytes of payload */
+ const void *aPayload /* The payload. Might be NULL */
+){
+ u8 *a;
+ if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload);
+ return;
+ }
+ assert( pParse->aBlob!=0 );
+ a = &pParse->aBlob[pParse->nBlob];
+ if( szPayload<=11 ){
+ a[0] = eType | (szPayload<<4);
+ pParse->nBlob += 1;
+ }else if( szPayload<=0xff ){
+ a[0] = eType | 0xc0;
+ a[1] = szPayload & 0xff;
+ pParse->nBlob += 2;
+ }else if( szPayload<=0xffff ){
+ a[0] = eType | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ pParse->nBlob += 3;
+ }else{
+ a[0] = eType | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ pParse->nBlob += 5;
+ }
+ if( aPayload ){
+ pParse->nBlob += szPayload;
+ memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload);
+ }
+}
+
+/* Change the payload size for the node at index i to be szPayload.
+*/
+static int jsonBlobChangePayloadSize(
+ JsonParse *pParse,
+ u32 i,
+ u32 szPayload
+){
+ u8 *a;
+ u8 szType;
+ u8 nExtra;
+ u8 nNeeded;
+ int delta;
+ if( pParse->oom ) return 0;
+ a = &pParse->aBlob[i];
+ szType = a[0]>>4;
+ if( szType<=11 ){
+ nExtra = 0;
+ }else if( szType==12 ){
+ nExtra = 1;
+ }else if( szType==13 ){
+ nExtra = 2;
+ }else{
+ nExtra = 4;
+ }
+ if( szPayload<=11 ){
+ nNeeded = 0;
+ }else if( szPayload<=0xff ){
+ nNeeded = 1;
+ }else if( szPayload<=0xffff ){
+ nNeeded = 2;
+ }else{
+ nNeeded = 4;
+ }
+ delta = nNeeded - nExtra;
+ if( delta ){
+ u32 newSize = pParse->nBlob + delta;
+ if( delta>0 ){
+ if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){
+ return 0; /* OOM error. Error state recorded in pParse->oom. */
+ }
+ a = &pParse->aBlob[i];
+ memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1));
+ }else{
+ memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta));
+ }
+ pParse->nBlob = newSize;
+ }
+ if( nNeeded==0 ){
+ a[0] = (a[0] & 0x0f) | (szPayload<<4);
+ }else if( nNeeded==1 ){
+ a[0] = (a[0] & 0x0f) | 0xc0;
+ a[1] = szPayload & 0xff;
+ }else if( nNeeded==2 ){
+ a[0] = (a[0] & 0x0f) | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ }else{
+ a[0] = (a[0] & 0x0f) | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ }
+ return delta;
+}
+
+/*
+** If z[0] is 'u' and is followed by exactly 4 hexadecimal character,
+** then set *pOp to JSONB_TEXTJ and return true. If not, do not make
+** any changes to *pOp and return false.
+*/
+static int jsonIs4HexB(const char *z, int *pOp){
+ if( z[0]!='u' ) return 0;
+ if( !jsonIs4Hex(&z[1]) ) return 0;
+ *pOp = JSONB_TEXTJ;
+ return 1;
+}
+
+/*
+** Check a single element of the JSONB in pParse for validity.
+**
+** The element to be checked starts at offset i and must end at on the
+** last byte before iEnd.
+**
+** Return 0 if everything is correct. Return the 1-based byte offset of the
+** error if a problem is detected. (In other words, if the error is at offset
+** 0, return 1).
+*/
+static u32 jsonbValidityCheck(
+ const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */
+ u32 i, /* Start of element as pParse->aBlob[i] */
+ u32 iEnd, /* One more than the last byte of the element */
+ u32 iDepth /* Current nesting depth */
+){
+ u32 n, sz, j, k;
+ const u8 *z;
+ u8 x;
+ if( iDepth>JSON_MAX_DEPTH ) return i+1;
+ sz = 0;
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( NEVER(n==0) ) return i+1; /* Checked by caller */
+ if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */
+ z = pParse->aBlob;
+ x = z[i] & 0x0f;
+ switch( x ){
+ case JSONB_NULL:
+ case JSONB_TRUE:
+ case JSONB_FALSE: {
+ return n+sz==1 ? 0 : i+1;
+ }
+ case JSONB_INT: {
+ if( sz<1 ) return i+1;
+ j = i+n;
+ if( z[j]=='-' ){
+ j++;
+ if( sz<2 ) return i+1;
+ }
+ k = i+n+sz;
+ while( jk ) return j+1;
+ if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1;
+ j++;
+ }
+ for(; j0 ) return j+1;
+ if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){
+ return j+1;
+ }
+ seen = 1;
+ continue;
+ }
+ if( z[j]=='e' || z[j]=='E' ){
+ if( seen==2 ) return j+1;
+ if( j==k-1 ) return j+1;
+ if( z[j+1]=='+' || z[j+1]=='-' ){
+ j++;
+ if( j==k-1 ) return j+1;
+ }
+ seen = 2;
+ continue;
+ }
+ return j+1;
+ }
+ if( seen==0 ) return i+1;
+ return 0;
+ }
+ case JSONB_TEXT: {
+ j = i+n;
+ k = j+sz;
+ while( j=k ){
+ return j+1;
+ }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){
+ j++;
+ }else if( z[j+1]=='u' ){
+ if( j+5>=k ) return j+1;
+ if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1;
+ j++;
+ }else if( x!=JSONB_TEXT5 ){
+ return j+1;
+ }else{
+ u32 c = 0;
+ u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c);
+ if( c==JSON_INVALID_CHAR ) return j+1;
+ j += szC - 1;
+ }
+ }
+ j++;
+ }
+ return 0;
+ }
+ case JSONB_TEXTRAW: {
+ return 0;
+ }
+ case JSONB_ARRAY: {
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( jk ) return j+1;
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ j += n + sz;
+ }
+ assert( j==k );
+ return 0;
+ }
+ case JSONB_OBJECT: {
+ u32 cnt = 0;
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( jk ) return j+1;
+ if( (cnt & 1)==0 ){
+ x = z[j] & 0x0f;
+ if( xJSONB_TEXTRAW ) return j+1;
+ }
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ cnt++;
+ j += n + sz;
+ }
+ assert( j==k );
+ if( (cnt & 1)!=0 ) return j+1;
+ return 0;
+ }
+ default: {
+ return i+1;
+ }
+ }
+}
+
/*
-** Parse a single JSON value which begins at pParse->zJson[i]. Return the
-** index of the first character past the end of the value parsed.
+** Translate a single element of JSON text at pParse->zJson[i] into
+** its equivalent binary JSONB representation. Append the translation into
+** pParse->aBlob[] beginning at pParse->nBlob. The size of
+** pParse->aBlob[] is increased as necessary.
**
-** Special return values:
+** Return the index of the first character past the end of the element parsed,
+** or one of the following special result codes:
**
** 0 End of input
-** -1 Syntax error
-** -2 '}' seen
-** -3 ']' seen
-** -4 ',' seen
-** -5 ':' seen
+** -1 Syntax error or OOM
+** -2 '}' seen \
+** -3 ']' seen \___ For these returns, pParse->iErr is set to
+** -4 ',' seen / the index in zJson[] of the seen character
+** -5 ':' seen /
*/
-static int jsonParseValue(JsonParse *pParse, u32 i){
+static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){
char c;
u32 j;
- int iThis;
+ u32 iThis, iStart;
int x;
- JsonNode *pNode;
+ u8 t;
const char *z = pParse->zJson;
json_parse_restart:
switch( (u8)z[i] ){
case '{': {
/* Parse object */
- iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0);
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
+ iStart = pParse->nBlob;
for(j=i+1;;j++){
- u32 nNode = pParse->nNode;
- x = jsonParseValue(pParse, j);
+ u32 iBlob = pParse->nBlob;
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
+ int op;
if( x==(-2) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1;
break;
}
j += json5Whitespace(&z[j]);
- if( sqlite3JsonId1(z[j])
- || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2]))
+ op = JSONB_TEXT;
+ if( sqlite3JsonId1(z[j])
+ || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op))
){
int k = j+1;
while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0)
- || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2]))
+ || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op))
){
k++;
}
- jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]);
+ assert( iBlob==pParse->nBlob );
+ jsonBlobAppendNode(pParse, op, k-j, &z[j]);
pParse->hasNonstd = 1;
x = k;
}else{
if( x!=-1 ) pParse->iErr = j;
return -1;
}
}
if( pParse->oom ) return -1;
- pNode = &pParse->aNode[nNode];
- if( pNode->eType!=JSON_STRING ){
+ t = pParse->aBlob[iBlob] & 0x0f;
+ if( tJSONB_TEXTRAW ){
pParse->iErr = j;
return -1;
}
- pNode->jnFlags |= JNODE_LABEL;
j = x;
if( z[j]==':' ){
j++;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ /* strspn() is not helpful here */
+ do{ j++; }while( jsonIsspace(z[j]) );
if( z[j]==':' ){
j++;
goto parse_object_value;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x!=(-5) ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
}
j = pParse->iErr+1;
}
parse_object_value:
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
}
j = x;
@@ -1418,19 +1593,19 @@
if( z[j]==',' ){
continue;
}else if( z[j]=='}' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]=='}' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
}
if( x==(-2) ){
@@ -1439,29 +1614,30 @@
}
}
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '[': {
/* Parse array */
- iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0);
+ iStart = pParse->nBlob;
+ if( pParse->oom ) return -1;
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
- memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u));
for(j=i+1;;j++){
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x==(-3) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1;
break;
}
if( x!=(-1) ) pParse->iErr = j;
return -1;
}
@@ -1469,19 +1645,19 @@
if( z[j]==',' ){
continue;
}else if( z[j]==']' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]==']' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
}
if( x==(-3) ){
@@ -1490,86 +1666,103 @@
}
}
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '\'': {
- u8 jnFlags;
+ u8 opcode;
char cDelim;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ opcode = JSONB_TEXT;
goto parse_string;
case '"':
/* Parse string */
- jnFlags = 0;
+ opcode = JSONB_TEXT;
parse_string:
cDelim = z[i];
- for(j=i+1; 1; j++){
- if( jsonIsOk[(unsigned char)z[j]] ) continue;
+ j = i+1;
+ while( 1 /*exit-by-break*/ ){
+ if( jsonIsOk[(u8)z[j]] ){
+ if( !jsonIsOk[(u8)z[j+1]] ){
+ j += 1;
+ }else if( !jsonIsOk[(u8)z[j+2]] ){
+ j += 2;
+ }else{
+ j += 3;
+ continue;
+ }
+ }
c = z[j];
if( c==cDelim ){
break;
}else if( c=='\\' ){
c = z[++j];
if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
|| c=='n' || c=='r' || c=='t'
|| (c=='u' && jsonIs4Hex(&z[j+1])) ){
- jnFlags |= JNODE_ESCAPE;
+ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ;
}else if( c=='\'' || c=='0' || c=='v' || c=='\n'
|| (0xe2==(u8)c && 0x80==(u8)z[j+1]
&& (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]))
|| (c=='x' && jsonIs2Hex(&z[j+1])) ){
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else if( c=='\r' ){
if( z[j+1]=='\n' ) j++;
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else{
pParse->iErr = j;
return -1;
}
}else if( c<=0x1f ){
- /* Control characters are not allowed in strings */
- pParse->iErr = j;
- return -1;
+ if( c==0 ){
+ pParse->iErr = j;
+ return -1;
+ }
+ /* Control characters are not allowed in canonical JSON string
+ ** literals, but are allowed in JSON5 string literals. */
+ opcode = JSONB_TEXT5;
+ pParse->hasNonstd = 1;
+ }else if( c=='"' ){
+ opcode = JSONB_TEXT5;
}
+ j++;
}
- jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]);
+ jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]);
return j+1;
}
case 't': {
if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_TRUE);
return i+4;
}
pParse->iErr = i;
return -1;
}
case 'f': {
if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){
- jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_FALSE);
return i+5;
}
pParse->iErr = i;
return -1;
}
case '+': {
- u8 seenDP, seenE, jnFlags;
+ u8 seenE;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
goto parse_number;
case '.':
if( sqlite3Isdigit(z[i+1]) ){
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
seenE = 0;
- seenDP = JSON_REAL;
goto parse_number_2;
}
pParse->iErr = i;
return -1;
case '-':
@@ -1582,25 +1775,24 @@
case '6':
case '7':
case '8':
case '9':
/* Parse number */
- jnFlags = 0;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
parse_number:
- seenDP = JSON_INT;
seenE = 0;
assert( '-' < '0' );
assert( '+' < '0' );
assert( '.' < '0' );
c = z[i];
if( c<='0' ){
if( c=='0' ){
if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){
- assert( seenDP==JSON_INT );
+ assert( t==0x00 );
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t = 0x01;
for(j=i+3; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}else if( sqlite3Isdigit(z[i+1]) ){
pParse->iErr = i+1;
return -1;
@@ -1613,19 +1805,19 @@
if( (z[i+1]=='I' || z[i+1]=='i')
&& sqlite3StrNICmp(&z[i+1], "inf",3)==0
){
pParse->hasNonstd = 1;
if( z[i]=='-' ){
- jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
}else{
- jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
}
return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4);
}
if( z[i+1]=='.' ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
goto parse_number_2;
}
pParse->iErr = i;
return -1;
}
@@ -1633,44 +1825,45 @@
if( sqlite3Isdigit(z[i+2]) ){
pParse->iErr = i+1;
return -1;
}else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
for(j=i+4; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}
}
}
}
+
parse_number_2:
for(j=i+1;; j++){
c = z[j];
if( sqlite3Isdigit(c) ) continue;
if( c=='.' ){
- if( seenDP==JSON_REAL ){
+ if( (t & 0x02)!=0 ){
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
continue;
}
if( c=='e' || c=='E' ){
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
}
}
if( seenE ){
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
seenE = 1;
c = z[j+1];
if( c=='+' || c=='-' ){
j++;
c = z[j+1];
@@ -1684,18 +1877,22 @@
break;
}
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
}
}
parse_number_finish:
- jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]);
+ assert( JSONB_INT+0x01==JSONB_INT5 );
+ assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 );
+ assert( JSONB_INT+0x02==JSONB_FLOAT );
+ if( z[i]=='+' ) i++;
+ jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]);
return j;
}
case '}': {
pParse->iErr = i;
return -2; /* End of {...} */
@@ -1717,13 +1914,11 @@
}
case 0x09:
case 0x0a:
case 0x0d:
case 0x20: {
- do{
- i++;
- }while( fast_isspace(z[i]) );
+ i += 1 + (u32)strspn(&z[i+1], jsonSpaces);
goto json_parse_restart;
}
case 0x0b:
case 0x0c:
case '/':
@@ -1741,14 +1936,15 @@
pParse->iErr = i;
return -1;
}
case 'n': {
if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
return i+4;
}
/* fall-through into the default case that checks for NaN */
+ /* no break */ deliberate_fall_through
}
default: {
u32 k;
int nn;
c = z[i];
@@ -1757,43 +1953,53 @@
nn = aNanInfName[k].n;
if( sqlite3StrNICmp(&z[i], aNanInfName[k].zMatch, nn)!=0 ){
continue;
}
if( sqlite3Isalnum(z[i+nn]) ) continue;
- jsonParseAddNode(pParse, aNanInfName[k].eType,
- aNanInfName[k].nRepl, aNanInfName[k].zRepl);
+ if( aNanInfName[k].eType==JSONB_FLOAT ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else{
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
+ }
pParse->hasNonstd = 1;
return i + nn;
}
pParse->iErr = i;
return -1; /* Syntax error */
}
} /* End switch(z[i]) */
}
+
/*
** Parse a complete JSON string. Return 0 on success or non-zero if there
** are any errors. If an error occurs, free all memory held by pParse,
** but not pParse itself.
**
** pParse must be initialized to an empty parse object prior to calling
** this routine.
*/
-static int jsonParse(
+static int jsonConvertTextToBlob(
JsonParse *pParse, /* Initialize and fill this JsonParse object */
sqlite3_context *pCtx /* Report errors here */
){
int i;
const char *zJson = pParse->zJson;
- i = jsonParseValue(pParse, 0);
+ i = jsonTranslateTextToBlob(pParse, 0);
if( pParse->oom ) i = -1;
if( i>0 ){
+#ifdef SQLITE_DEBUG
assert( pParse->iDepth==0 );
- while( fast_isspace(zJson[i]) ) i++;
+ if( sqlite3Config.bJsonSelfcheck ){
+ assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 );
+ }
+#endif
+ while( jsonIsspace(zJson[i]) ) i++;
if( zJson[i] ){
i += json5Whitespace(&zJson[i]);
if( zJson[i] ){
+ if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1);
jsonParseReset(pParse);
return 1;
}
pParse->hasNonstd = 1;
}
@@ -1810,617 +2016,1587 @@
return 1;
}
return 0;
}
-
-/* Mark node i of pParse as being a child of iParent. Call recursively
-** to fill in all the descendants of node i.
-*/
-static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
- JsonNode *pNode = &pParse->aNode[i];
- u32 j;
- pParse->aUp[i] = iParent;
- switch( pNode->eType ){
- case JSON_ARRAY: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
- jsonParseFillInParentage(pParse, i+j, i);
- }
- break;
- }
- case JSON_OBJECT: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
- pParse->aUp[i+j] = i;
- jsonParseFillInParentage(pParse, i+j+1, i);
- }
- break;
- }
- default: {
- break;
- }
- }
-}
-
-/*
-** Compute the parentage of all nodes in a completed parse.
-*/
-static int jsonParseFindParents(JsonParse *pParse){
- u32 *aUp;
- assert( pParse->aUp==0 );
- aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode );
- if( aUp==0 ){
- pParse->oom = 1;
- return SQLITE_NOMEM;
- }
- jsonParseFillInParentage(pParse, 0, 0);
- return SQLITE_OK;
-}
-
-/*
-** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
-*/
-#define JSON_CACHE_ID (-429938) /* First cache entry */
-#define JSON_CACHE_SZ 4 /* Max number of cache entries */
-
-/*
-** Obtain a complete parse of the JSON found in the pJson argument
-**
-** Use the sqlite3_get_auxdata() cache to find a preexisting parse
-** if it is available. If the cache is not available or if it
-** is no longer valid, parse the JSON again and return the new parse.
-** Also register the new parse so that it will be available for
-** future sqlite3_get_auxdata() calls.
-**
-** If an error occurs and pErrCtx!=0 then report the error on pErrCtx
-** and return NULL.
-**
-** The returned pointer (if it is not NULL) is owned by the cache in
-** most cases, not the caller. The caller does NOT need to invoke
-** jsonParseFree(), in most cases.
-**
-** Except, if an error occurs and pErrCtx==0 then return the JsonParse
-** object with JsonParse.nErr non-zero and the caller will own the JsonParse
-** object. In that case, it will be the responsibility of the caller to
-** invoke jsonParseFree(). To summarize:
-**
-** pErrCtx!=0 || p->nErr==0 ==> Return value p is owned by the
-** cache. Call does not need to
-** free it.
-**
-** pErrCtx==0 && p->nErr!=0 ==> Return value is owned by the caller
-** and so the caller must free it.
-*/
-static JsonParse *jsonParseCached(
- sqlite3_context *pCtx, /* Context to use for cache search */
- sqlite3_value *pJson, /* Function param containing JSON text */
- sqlite3_context *pErrCtx, /* Write parse errors here if not NULL */
- int bUnedited /* No prior edits allowed */
-){
- char *zJson = (char*)sqlite3_value_text(pJson);
- int nJson = sqlite3_value_bytes(pJson);
- JsonParse *p;
- JsonParse *pMatch = 0;
- int iKey;
- int iMinKey = 0;
- u32 iMinHold = 0xffffffff;
- u32 iMaxHold = 0;
- int bJsonRCStr;
-
- if( zJson==0 ) return 0;
- for(iKey=0; iKeynJson==nJson
- && (p->hasMod==0 || bUnedited==0)
- && (p->zJson==zJson || memcmp(p->zJson,zJson,nJson)==0)
- ){
- p->nErr = 0;
- p->useMod = 0;
- pMatch = p;
- }else
- if( pMatch==0
- && p->zAlt!=0
- && bUnedited==0
- && p->nAlt==nJson
- && memcmp(p->zAlt, zJson, nJson)==0
- ){
- p->nErr = 0;
- p->useMod = 1;
- pMatch = p;
- }else if( p->iHoldiHold;
- iMinKey = iKey;
- }
- if( p->iHold>iMaxHold ){
- iMaxHold = p->iHold;
- }
- }
- if( pMatch ){
- /* The input JSON text was found in the cache. Use the preexisting
- ** parse of this JSON */
- pMatch->nErr = 0;
- pMatch->iHold = iMaxHold+1;
- assert( pMatch->nJPRef>0 ); /* pMatch is owned by the cache */
- return pMatch;
- }
-
- /* The input JSON was not found anywhere in the cache. We will need
- ** to parse it ourselves and generate a new JsonParse object.
- */
- bJsonRCStr = sqlite3ValueIsOfClass(pJson,sqlite3RCStrUnref);
- p = sqlite3_malloc64( sizeof(*p) + (bJsonRCStr ? 0 : nJson+1) );
- if( p==0 ){
- sqlite3_result_error_nomem(pCtx);
- return 0;
- }
- memset(p, 0, sizeof(*p));
- if( bJsonRCStr ){
- p->zJson = sqlite3RCStrRef(zJson);
- p->bJsonIsRCStr = 1;
- }else{
- p->zJson = (char*)&p[1];
- memcpy(p->zJson, zJson, nJson+1);
- }
- p->nJPRef = 1;
- if( jsonParse(p, pErrCtx) ){
- if( pErrCtx==0 ){
- p->nErr = 1;
- assert( p->nJPRef==1 ); /* Caller will own the new JsonParse object p */
- return p;
- }
- jsonParseFree(p);
- return 0;
- }
- p->nJson = nJson;
- p->iHold = iMaxHold+1;
- /* Transfer ownership of the new JsonParse to the cache */
- sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p,
- (void(*)(void*))jsonParseFree);
- return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);
-}
-
-/*
-** Compare the OBJECT label at pNode against zKey,nKey. Return true on
-** a match.
-*/
-static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->n!=nKey ) return 0;
- return strncmp(pNode->u.zJContent, zKey, nKey)==0;
- }else{
- if( pNode->n!=nKey+2 ) return 0;
- return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
- }
-}
-static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){
- if( p1->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p2, p1->u.zJContent, p1->n);
- }else if( p2->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p1, p2->u.zJContent, p2->n);
- }else{
- return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0;
- }
-}
-
-/* forward declaration */
-static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
-
-/*
-** Search along zPath to find the node specified. Return a pointer
-** to that node, or NULL if zPath is malformed or if there is no such
-** node.
-**
-** If pApnd!=0, then try to append new nodes to complete zPath if it is
-** possible to do so and if no existing node corresponds to zPath. If
-** new nodes are appended *pApnd is set to 1.
-*/
-static JsonNode *jsonLookupStep(
- JsonParse *pParse, /* The JSON to search */
- u32 iRoot, /* Begin the search at this node */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- const char **pzErr /* Make *pzErr point to any syntax error in zPath */
-){
- u32 i, j, nKey;
- const char *zKey;
- JsonNode *pRoot;
- if( pParse->oom ) return 0;
- pRoot = &pParse->aNode[iRoot];
- if( pRoot->jnFlags & (JNODE_REPLACE|JNODE_REMOVE) && pParse->useMod ){
- while( (pRoot->jnFlags & JNODE_REPLACE)!=0 ){
- u32 idx = (u32)(pRoot - pParse->aNode);
- i = pParse->iSubst;
- while( 1 /*exit-by-break*/ ){
- assert( inNode );
- assert( pParse->aNode[i].eType==JSON_SUBST );
- assert( pParse->aNode[i].eU==4 );
- assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){
- pRoot = &pParse->aNode[i+1];
- iRoot = i+1;
- break;
- }
- i = pParse->aNode[i].u.iPrev;
- }
- }
- if( pRoot->jnFlags & JNODE_REMOVE ){
- return 0;
- }
- }
- if( zPath[0]==0 ) return pRoot;
- if( zPath[0]=='.' ){
- if( pRoot->eType!=JSON_OBJECT ) return 0;
+/*
+** The input string pStr is a well-formed JSON text string. Convert
+** this into the JSONB format and make it the return value of the
+** SQL function.
+*/
+static void jsonReturnStringAsBlob(JsonString *pStr){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ jsonStringTerminate(pStr);
+ px.zJson = pStr->zBuf;
+ px.nJson = pStr->nUsed;
+ px.db = sqlite3_context_db_handle(pStr->pCtx);
+ (void)jsonTranslateTextToBlob(&px, 0);
+ if( px.oom ){
+ sqlite3DbFree(px.db, px.aBlob);
+ sqlite3_result_error_nomem(pStr->pCtx);
+ }else{
+ assert( px.nBlobAlloc>0 );
+ assert( !px.bReadOnly );
+ sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC);
+ }
+}
+
+/* The byte at index i is a node type-code. This routine
+** determines the payload size for that node and writes that
+** payload size in to *pSz. It returns the offset from i to the
+** beginning of the payload. Return 0 on error.
+*/
+static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){
+ u8 x;
+ u32 sz;
+ u32 n;
+ if( NEVER(i>pParse->nBlob) ){
+ *pSz = 0;
+ return 0;
+ }
+ x = pParse->aBlob[i]>>4;
+ if( x<=11 ){
+ sz = x;
+ n = 1;
+ }else if( x==12 ){
+ if( i+1>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = pParse->aBlob[i+1];
+ n = 2;
+ }else if( x==13 ){
+ if( i+2>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2];
+ n = 3;
+ }else if( x==14 ){
+ if( i+4>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) +
+ (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4];
+ n = 5;
+ }else{
+ if( i+8>=pParse->nBlob
+ || pParse->aBlob[i+1]!=0
+ || pParse->aBlob[i+2]!=0
+ || pParse->aBlob[i+3]!=0
+ || pParse->aBlob[i+4]!=0
+ ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) +
+ (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8];
+ n = 9;
+ }
+ if( (i64)i+sz+n > pParse->nBlob
+ && (i64)i+sz+n > pParse->nBlob-pParse->delta
+ ){
+ sz = 0;
+ n = 0;
+ }
+ *pSz = sz;
+ return n;
+}
+
+
+/*
+** Translate the binary JSONB representation of JSON beginning at
+** pParse->aBlob[i] into a JSON text string. Append the JSON
+** text onto the end of pOut. Return the index in pParse->aBlob[]
+** of the first byte past the end of the element that is translated.
+**
+** If an error is detected in the BLOB input, the pOut->eErr flag
+** might get set to JSTRING_MALFORMED. But not all BLOB input errors
+** are detected. So a malformed JSONB input might either result
+** in an error, or in incorrect JSON.
+**
+** The pOut->eErr JSTRING_OOM flag is set on a OOM.
+*/
+static u32 jsonTranslateBlobToText(
+ const JsonParse *pParse, /* the complete parse of the JSON */
+ u32 i, /* Start rendering at this index */
+ JsonString *pOut /* Write JSON here */
+){
+ u32 sz, n, j, iEnd;
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ return pParse->nBlob+1;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ jsonAppendRawNZ(pOut, "null", 4);
+ return i+1;
+ }
+ case JSONB_TRUE: {
+ jsonAppendRawNZ(pOut, "true", 4);
+ return i+1;
+ }
+ case JSONB_FALSE: {
+ jsonAppendRawNZ(pOut, "false", 5);
+ return i+1;
+ }
+ case JSONB_INT:
+ case JSONB_FLOAT: {
+ if( sz==0 ) goto malformed_jsonb;
+ jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_INT5: { /* Integer literal in hexadecimal notation */
+ u32 k = 2;
+ sqlite3_uint64 u = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ int bOverflow = 0;
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }else if( zIn[0]=='+' ){
+ k++;
+ }
+ for(; keErr |= JSTRING_MALFORMED;
+ break;
+ }else if( (u>>60)!=0 ){
+ bOverflow = 1;
+ }else{
+ u = u*16 + sqlite3HexToInt(zIn[k]);
+ }
+ }
+ jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u);
+ break;
+ }
+ case JSONB_FLOAT5: { /* Float literal missing digits beside "." */
+ u32 k = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }
+ if( zIn[k]=='.' ){
+ jsonAppendChar(pOut, '0');
+ }
+ for(; kaBlob[i+n], sz);
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXT5: {
+ const char *zIn;
+ u32 k;
+ u32 sz2 = sz;
+ zIn = (const char*)&pParse->aBlob[i+n];
+ jsonAppendChar(pOut, '"');
+ while( sz2>0 ){
+ for(k=0; k0 ){
+ jsonAppendRawNZ(pOut, zIn, k);
+ if( k>=sz2 ){
+ break;
+ }
+ zIn += k;
+ sz2 -= k;
+ }
+ if( zIn[0]=='"' ){
+ jsonAppendRawNZ(pOut, "\\\"", 2);
+ zIn++;
+ sz2--;
+ continue;
+ }
+ if( zIn[0]<=0x1f ){
+ if( pOut->nUsed+7>pOut->nAlloc && jsonStringGrow(pOut,7) ) break;
+ jsonAppendControlChar(pOut, zIn[0]);
+ zIn++;
+ sz2--;
+ continue;
+ }
+ assert( zIn[0]=='\\' );
+ assert( sz2>=1 );
+ if( sz2<2 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }
+ switch( (u8)zIn[1] ){
+ case '\'':
+ jsonAppendChar(pOut, '\'');
+ break;
+ case 'v':
+ jsonAppendRawNZ(pOut, "\\u0009", 6);
+ break;
+ case 'x':
+ if( sz2<4 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ jsonAppendRawNZ(pOut, "\\u00", 4);
+ jsonAppendRawNZ(pOut, &zIn[2], 2);
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ case '0':
+ jsonAppendRawNZ(pOut, "\\u0000", 6);
+ break;
+ case '\r':
+ if( sz2>2 && zIn[2]=='\n' ){
+ zIn++;
+ sz2--;
+ }
+ break;
+ case '\n':
+ break;
+ case 0xe2:
+ /* '\' followed by either U+2028 or U+2029 is ignored as
+ ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29.
+ ** U+2029 is the same except for the last byte */
+ if( sz2<4
+ || 0x80!=(u8)zIn[2]
+ || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3])
+ ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ default:
+ jsonAppendRawNZ(pOut, zIn, 2);
+ break;
+ }
+ assert( sz2>=2 );
+ zIn += 2;
+ sz2 -= 2;
+ }
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXTRAW: {
+ jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_ARRAY: {
+ jsonAppendChar(pOut, '[');
+ j = i+n;
+ iEnd = j+sz;
+ while( jeErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, ',');
+ }
+ if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, ']');
+ break;
+ }
+ case JSONB_OBJECT: {
+ int x = 0;
+ jsonAppendChar(pOut, '{');
+ j = i+n;
+ iEnd = j+sz;
+ while( jeErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, (x++ & 1) ? ',' : ':');
+ }
+ if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, '}');
+ break;
+ }
+
+ default: {
+ malformed_jsonb:
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }
+ }
+ return i+n+sz;
+}
+
+/* Return true if the input pJson
+**
+** For performance reasons, this routine does not do a detailed check of the
+** input BLOB to ensure that it is well-formed. Hence, false positives are
+** possible. False negatives should never occur, however.
+*/
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){
+ u32 sz, n;
+ const u8 *aBlob;
+ int nBlob;
+ JsonParse s;
+ if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0;
+ aBlob = sqlite3_value_blob(pJson);
+ nBlob = sqlite3_value_bytes(pJson);
+ if( nBlob<1 ) return 0;
+ if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0;
+ memset(&s, 0, sizeof(s));
+ s.aBlob = (u8*)aBlob;
+ s.nBlob = nBlob;
+ n = jsonbPayloadSize(&s, 0, &sz);
+ if( n==0 ) return 0;
+ if( sz+n!=(u32)nBlob ) return 0;
+ if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0;
+ return sz+n==(u32)nBlob;
+}
+
+/*
+** Given that a JSONB_ARRAY object starts at offset i, return
+** the number of entries in that array.
+*/
+static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){
+ u32 n, sz, i, iEnd;
+ u32 k = 0;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ iEnd = iRoot+n+sz;
+ for(i=iRoot+n; n>0 && idelta.
+*/
+static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){
+ u32 sz = 0;
+ u32 nBlob;
+ assert( pParse->delta!=0 );
+ assert( pParse->nBlobAlloc >= pParse->nBlob );
+ nBlob = pParse->nBlob;
+ pParse->nBlob = pParse->nBlobAlloc;
+ (void)jsonbPayloadSize(pParse, iRoot, &sz);
+ pParse->nBlob = nBlob;
+ sz += pParse->delta;
+ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz);
+}
+
+/*
+** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of
+** content beginning at iDel, and replacing them with nIns bytes of
+** content given by aIns.
+**
+** nDel may be zero, in which case no bytes are removed. But iDel is
+** still important as new bytes will be insert beginning at iDel.
+**
+** aIns may be zero, in which case space is created to hold nIns bytes
+** beginning at iDel, but that space is uninitialized.
+**
+** Set pParse->oom if an OOM occurs.
+*/
+static void jsonBlobEdit(
+ JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */
+ u32 iDel, /* First byte to be removed */
+ u32 nDel, /* Number of bytes to remove */
+ const u8 *aIns, /* Content to insert */
+ u32 nIns /* Bytes of content to insert */
+){
+ i64 d = (i64)nIns - (i64)nDel;
+ if( d!=0 ){
+ if( pParse->nBlob + d > pParse->nBlobAlloc ){
+ jsonBlobExpand(pParse, pParse->nBlob+d);
+ if( pParse->oom ) return;
+ }
+ memmove(&pParse->aBlob[iDel+nIns],
+ &pParse->aBlob[iDel+nDel],
+ pParse->nBlob - (iDel+nDel));
+ pParse->nBlob += d;
+ pParse->delta += d;
+ }
+ if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns);
+}
+
+/*
+** Return the number of escaped newlines to be ignored.
+** An escaped newline is a one of the following byte sequences:
+**
+** 0x5c 0x0a
+** 0x5c 0x0d
+** 0x5c 0x0d 0x0a
+** 0x5c 0xe2 0x80 0xa8
+** 0x5c 0xe2 0x80 0xa9
+*/
+static u32 jsonBytesToBypass(const char *z, u32 n){
+ u32 i = 0;
+ while( i+10 );
+ assert( z[0]=='\\' );
+ if( n<2 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ switch( (u8)z[1] ){
+ case 'u': {
+ u32 v, vlo;
+ if( n<6 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ v = jsonHexToInt4(&z[2]);
+ if( (v & 0xfc00)==0xd800
+ && n>=12
+ && z[6]=='\\'
+ && z[7]=='u'
+ && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00
+ ){
+ *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
+ return 12;
+ }else{
+ *piOut = v;
+ return 6;
+ }
+ }
+ case 'b': { *piOut = '\b'; return 2; }
+ case 'f': { *piOut = '\f'; return 2; }
+ case 'n': { *piOut = '\n'; return 2; }
+ case 'r': { *piOut = '\r'; return 2; }
+ case 't': { *piOut = '\t'; return 2; }
+ case 'v': { *piOut = '\v'; return 2; }
+ case '0': { *piOut = 0; return 2; }
+ case '\'':
+ case '"':
+ case '/':
+ case '\\':{ *piOut = z[1]; return 2; }
+ case 'x': {
+ if( n<4 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]);
+ return 4;
+ }
+ case 0xe2:
+ case '\r':
+ case '\n': {
+ u32 nSkip = jsonBytesToBypass(z, n);
+ if( nSkip==0 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }else if( nSkip==n ){
+ *piOut = 0;
+ return n;
+ }else if( z[nSkip]=='\\' ){
+ return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut);
+ }else{
+ int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut);
+ return nSkip + sz;
+ }
+ }
+ default: {
+ *piOut = JSON_INVALID_CHAR;
+ return 2;
+ }
+ }
+}
+
+
+/*
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ.
+**
+** In this version, we know that one or the other or both of the
+** two comparands contains an escape sequence.
+*/
+static SQLITE_NOINLINE int jsonLabelCompareEscaped(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ u32 cLeft, cRight;
+ assert( rawLeft==0 || rawRight==0 );
+ while( 1 /*exit-by-return*/ ){
+ if( nLeft==0 ){
+ cLeft = 0;
+ }else if( rawLeft || zLeft[0]!='\\' ){
+ cLeft = ((u8*)zLeft)[0];
+ if( cLeft>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft);
+ zLeft += sz;
+ nLeft -= sz;
+ }else{
+ zLeft++;
+ nLeft--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft);
+ zLeft += n;
+ assert( n<=nLeft );
+ nLeft -= n;
+ }
+ if( nRight==0 ){
+ cRight = 0;
+ }else if( rawRight || zRight[0]!='\\' ){
+ cRight = ((u8*)zRight)[0];
+ if( cRight>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight);
+ zRight += sz;
+ nRight -= sz;
+ }else{
+ zRight++;
+ nRight--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight);
+ zRight += n;
+ assert( n<=nRight );
+ nRight -= n;
+ }
+ if( cLeft!=cRight ) return 0;
+ if( cLeft==0 ) return 1;
+ }
+}
+
+/*
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ. Return -1 if an OOM occurs.
+*/
+static int jsonLabelCompare(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ if( rawLeft && rawRight ){
+ /* Simpliest case: Neither label contains escapes. A simple
+ ** memcmp() is sufficient. */
+ if( nLeft!=nRight ) return 0;
+ return memcmp(zLeft, zRight, nLeft)==0;
+ }else{
+ return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft,
+ zRight, nRight, rawRight);
+ }
+}
+
+/*
+** Error returns from jsonLookupStep()
+*/
+#define JSON_LOOKUP_ERROR 0xffffffff
+#define JSON_LOOKUP_NOTFOUND 0xfffffffe
+#define JSON_LOOKUP_PATHERROR 0xfffffffd
+#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR)
+
+/* Forward declaration */
+static u32 jsonLookupStep(JsonParse*,u32,const char*,u32);
+
+
+/* This helper routine for jsonLookupStep() populates pIns with
+** binary data that is to be inserted into pParse.
+**
+** In the common case, pIns just points to pParse->aIns and pParse->nIns.
+** But if the zPath of the original edit operation includes path elements
+** that go deeper, additional substructure must be created.
+**
+** For example:
+**
+** json_insert('{}', '$.a.b.c', 123);
+**
+** The search stops at '$.a' But additional substructure must be
+** created for the ".b.c" part of the patch so that the final result
+** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with
+** the binary equivalent of {"b":{"c":123}} so that it can be inserted.
+**
+** The caller is responsible for resetting pIns when it has finished
+** using the substructure.
+*/
+static u32 jsonCreateEditSubstructure(
+ JsonParse *pParse, /* The original JSONB that is being edited */
+ JsonParse *pIns, /* Populate this with the blob data to insert */
+ const char *zTail /* Tail of the path that determins substructure */
+){
+ static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT };
+ int rc;
+ memset(pIns, 0, sizeof(*pIns));
+ pIns->db = pParse->db;
+ if( zTail[0]==0 ){
+ /* No substructure. Just insert what is given in pParse. */
+ pIns->aBlob = pParse->aIns;
+ pIns->nBlob = pParse->nIns;
+ rc = 0;
+ }else{
+ /* Construct the binary substructure */
+ pIns->nBlob = 1;
+ pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.'];
+ pIns->eEdit = pParse->eEdit;
+ pIns->nIns = pParse->nIns;
+ pIns->aIns = pParse->aIns;
+ rc = jsonLookupStep(pIns, 0, zTail, 0);
+ pParse->oom |= pIns->oom;
+ }
+ return rc; /* Error code only */
+}
+
+/*
+** Search along zPath to find the Json element specified. Return an
+** index into pParse->aBlob[] for the start of that element's value.
+**
+** If the value found by this routine is the value half of label/value pair
+** within an object, then set pPath->iLabel to the start of the corresponding
+** label, before returning.
+**
+** Return one of the JSON_LOOKUP error codes if problems are seen.
+**
+** This routine will also modify the blob. If pParse->eEdit is one of
+** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be
+** made to the selected value. If an edit is performed, then the return
+** value does not necessarily point to the select element. If an edit
+** is performed, the return value is only useful for detecting error
+** conditions.
+*/
+static u32 jsonLookupStep(
+ JsonParse *pParse, /* The JSON to search */
+ u32 iRoot, /* Begin the search at this element of aBlob[] */
+ const char *zPath, /* The path to search */
+ u32 iLabel /* Label if iRoot is a value of in an object */
+){
+ u32 i, j, k, nKey, sz, n, iEnd, rc;
+ const char *zKey;
+ u8 x;
+
+ if( zPath[0]==0 ){
+ if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ sz += n;
+ if( pParse->eEdit==JEDIT_DEL ){
+ if( iLabel>0 ){
+ sz += iRoot - iLabel;
+ iRoot = iLabel;
+ }
+ jsonBlobEdit(pParse, iRoot, sz, 0, 0);
+ }else if( pParse->eEdit==JEDIT_INS ){
+ /* Already exists, so json_insert() is a no-op */
+ }else{
+ /* json_set() or json_replace() */
+ jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns);
+ }
+ }
+ pParse->iLabel = iLabel;
+ return iRoot;
+ }
+ if( zPath[0]=='.' ){
+ int rawKey = 1;
+ x = pParse->aBlob[iRoot];
zPath++;
if( zPath[0]=='"' ){
zKey = zPath + 1;
for(i=1; zPath[i] && zPath[i]!='"'; i++){}
nKey = i-1;
if( zPath[i] ){
i++;
}else{
- *pzErr = zPath;
- return 0;
+ return JSON_LOOKUP_PATHERROR;
}
testcase( nKey==0 );
+ rawKey = memchr(zKey, '\\', nKey)==0;
}else{
zKey = zPath;
for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
nKey = i;
if( nKey==0 ){
- *pzErr = zPath;
- return 0;
- }
- }
- j = 1;
- for(;;){
- while( j<=pRoot->n ){
- if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
- return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
- }
- j++;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- if( pParse->useMod==0 ) break;
- assert( pRoot->eU==2 );
- iRoot = pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( pApnd ){
- u32 iStart, iLabel;
- JsonNode *pNode;
- assert( pParse->useMod );
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- zPath += i;
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
- pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
- }
- return pNode;
- }
- }else if( zPath[0]=='[' ){
- i = 0;
- j = 1;
- while( sqlite3Isdigit(zPath[j]) ){
- i = i*10 + zPath[j] - '0';
- j++;
- }
- if( j<2 || zPath[j]!=']' ){
- if( zPath[1]=='#' ){
- JsonNode *pBase = pRoot;
- int iBase = iRoot;
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- for(;;){
- while( j<=pBase->n ){
- if( (pBase[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i++;
- j += jsonNodeSize(&pBase[j]);
- }
- if( (pBase->jnFlags & JNODE_APPEND)==0 ) break;
- if( pParse->useMod==0 ) break;
- assert( pBase->eU==2 );
- iBase = pBase->u.iAppend;
- pBase = &pParse->aNode[iBase];
- j = 1;
- }
- j = 2;
- if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){
- unsigned int x = 0;
- j = 3;
- do{
- x = x*10 + zPath[j] - '0';
- j++;
- }while( sqlite3Isdigit(zPath[j]) );
- if( x>i ) return 0;
- i -= x;
- }
- if( zPath[j]!=']' ){
- *pzErr = zPath;
- return 0;
- }
- }else{
- *pzErr = zPath;
- return 0;
- }
- }
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- zPath += j + 1;
- j = 1;
- for(;;){
- while( j<=pRoot->n
- && (i>0 || ((pRoot[j].jnFlags & JNODE_REMOVE)!=0 && pParse->useMod))
- ){
- if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i--;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( i==0 && j<=pRoot->n ) break;
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- if( pParse->useMod==0 ) break;
- assert( pRoot->eU==2 );
- iRoot = pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( j<=pRoot->n ){
- return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
- }
- if( i==0 && pApnd ){
- u32 iStart;
- JsonNode *pNode;
- assert( pParse->useMod );
- iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
- }
- return pNode;
- }
- }else{
- *pzErr = zPath;
- }
- return 0;
-}
-
-/*
-** Append content to pParse that will complete zPath. Return a pointer
-** to the inserted node, or return NULL if the append fails.
-*/
-static JsonNode *jsonLookupAppend(
- JsonParse *pParse, /* Append content to the JSON parse */
- const char *zPath, /* Description of content to append */
- int *pApnd, /* Set this flag to 1 */
- const char **pzErr /* Make this point to any syntax error */
-){
- *pApnd = 1;
- if( zPath[0]==0 ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
- }
- if( zPath[0]=='.' ){
- jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- }else if( strncmp(zPath,"[0]",3)==0 ){
- jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- }else{
- return 0;
- }
- if( pParse->oom ) return 0;
- return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
-}
-
-/*
-** Return the text of a syntax error message on a JSON path. Space is
-** obtained from sqlite3_malloc().
-*/
-static char *jsonPathSyntaxError(const char *zErr){
- return sqlite3_mprintf("JSON path error near '%q'", zErr);
-}
-
-/*
-** Do a node lookup using zPath. Return a pointer to the node on success.
-** Return NULL if not found or if there is an error.
-**
-** On an error, write an error message into pCtx and increment the
-** pParse->nErr counter.
-**
-** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
-** nodes are appended.
-*/
-static JsonNode *jsonLookup(
- JsonParse *pParse, /* The JSON to search */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- sqlite3_context *pCtx /* Report errors here, if not NULL */
-){
- const char *zErr = 0;
- JsonNode *pNode = 0;
- char *zMsg;
-
- if( zPath==0 ) return 0;
- if( zPath[0]!='$' ){
- zErr = zPath;
- goto lookup_err;
- }
- zPath++;
- pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
- if( zErr==0 ) return pNode;
-
-lookup_err:
- pParse->nErr++;
- assert( zErr!=0 && pCtx!=0 );
- zMsg = jsonPathSyntaxError(zErr);
- if( zMsg ){
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
- }else{
- sqlite3_result_error_nomem(pCtx);
- }
- return 0;
-}
-
-
-/*
-** Report the wrong number of arguments for json_insert(), json_replace()
-** or json_set().
-*/
-static void jsonWrongNumArgs(
- sqlite3_context *pCtx,
- const char *zFuncName
-){
- char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
- zFuncName);
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
-}
-
-/*
-** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
-*/
-static void jsonRemoveAllNulls(JsonNode *pNode){
- int i, n;
- assert( pNode->eType==JSON_OBJECT );
- n = pNode->n;
- for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
- switch( pNode[i].eType ){
- case JSON_NULL:
- pNode[i].jnFlags |= JNODE_REMOVE;
- break;
- case JSON_OBJECT:
- jsonRemoveAllNulls(&pNode[i]);
- break;
- }
- }
-}
-
+ return JSON_LOOKUP_PATHERROR;
+ }
+ }
+ if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ j = iRoot + n; /* j is the index of a label */
+ iEnd = j+sz;
+ while( jaBlob[j] & 0x0f;
+ if( xJSONB_TEXTRAW ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ k = j+n; /* k is the index of the label text */
+ if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR;
+ zLabel = (const char*)&pParse->aBlob[k];
+ rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW;
+ if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){
+ u32 v = k+sz; /* v is the index of the value */
+ if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, v, &sz);
+ if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR;
+ assert( j>0 );
+ rc = jsonLookupStep(pParse, v, &zPath[i], j);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
+ }
+ j = k+sz;
+ if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( pParse->eEdit>=JEDIT_INS ){
+ u32 nIns; /* Total bytes to insert (label+value) */
+ JsonParse v; /* BLOB encoding of the value to be inserted */
+ JsonParse ix; /* Header of the label to be inserted */
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ memset(&ix, 0, sizeof(ix));
+ ix.db = pParse->db;
+ jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0);
+ pParse->oom |= ix.oom;
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob)
+ ){
+ assert( !pParse->oom );
+ nIns = ix.nBlob + nKey + v.nBlob;
+ jsonBlobEdit(pParse, j, 0, 0, nIns);
+ if( !pParse->oom ){
+ assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */
+ assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */
+ memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob);
+ k = j + ix.nBlob;
+ memcpy(&pParse->aBlob[k], zKey, nKey);
+ k += nKey;
+ memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob);
+ if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ }
+ }
+ jsonParseReset(&v);
+ jsonParseReset(&ix);
+ return rc;
+ }
+ }else if( zPath[0]=='[' ){
+ x = pParse->aBlob[iRoot] & 0x0f;
+ if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ k = 0;
+ i = 1;
+ while( sqlite3Isdigit(zPath[i]) ){
+ k = k*10 + zPath[i] - '0';
+ i++;
+ }
+ if( i<2 || zPath[i]!=']' ){
+ if( zPath[1]=='#' ){
+ k = jsonbArrayCount(pParse, iRoot);
+ i = 2;
+ if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){
+ unsigned int nn = 0;
+ i = 3;
+ do{
+ nn = nn*10 + zPath[i] - '0';
+ i++;
+ }while( sqlite3Isdigit(zPath[i]) );
+ if( nn>k ) return JSON_LOOKUP_NOTFOUND;
+ k -= nn;
+ }
+ if( zPath[i]!=']' ){
+ return JSON_LOOKUP_PATHERROR;
+ }
+ }else{
+ return JSON_LOOKUP_PATHERROR;
+ }
+ }
+ j = iRoot+n;
+ iEnd = j+sz;
+ while( jdelta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
+ }
+ k--;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( k>0 ) return JSON_LOOKUP_NOTFOUND;
+ if( pParse->eEdit>=JEDIT_INS ){
+ JsonParse v;
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, v.nBlob)
+ ){
+ assert( !pParse->oom );
+ jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob);
+ }
+ jsonParseReset(&v);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
+ }
+ }else{
+ return JSON_LOOKUP_PATHERROR;
+ }
+ return JSON_LOOKUP_NOTFOUND;
+}
+
+/*
+** Convert a JSON BLOB into text and make that text the return value
+** of an SQL function.
+*/
+static void jsonReturnTextJsonFromBlob(
+ sqlite3_context *ctx,
+ const u8 *aBlob,
+ u32 nBlob
+){
+ JsonParse x;
+ JsonString s;
+
+ if( NEVER(aBlob==0) ) return;
+ memset(&x, 0, sizeof(x));
+ x.aBlob = (u8*)aBlob;
+ x.nBlob = nBlob;
+ jsonStringInit(&s, ctx);
+ jsonTranslateBlobToText(&x, 0, &s);
+ jsonReturnString(&s, 0, 0);
+}
+
+
+/*
+** Return the value of the BLOB node at index i.
+**
+** If the value is a primitive, return it as an SQL value.
+** If the value is an array or object, return it as either
+** JSON text or the BLOB encoding, depending on the JSON_B flag
+** on the userdata.
+*/
+static void jsonReturnFromBlob(
+ JsonParse *pParse, /* Complete JSON parse tree */
+ u32 i, /* Index of the node */
+ sqlite3_context *pCtx, /* Return value for this function */
+ int textOnly /* return text JSON. Disregard user-data */
+){
+ u32 n, sz;
+ int rc;
+ sqlite3 *db = sqlite3_context_db_handle(pCtx);
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_null(pCtx);
+ break;
+ }
+ case JSONB_TRUE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 1);
+ break;
+ }
+ case JSONB_FALSE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }
+ case JSONB_INT5:
+ case JSONB_INT: {
+ sqlite3_int64 iRes = 0;
+ char *z;
+ int bNeg = 0;
+ char x;
+ if( sz==0 ) goto returnfromblob_malformed;
+ x = (char)pParse->aBlob[i+n];
+ if( x=='-' ){
+ if( sz<2 ) goto returnfromblob_malformed;
+ n++;
+ sz--;
+ bNeg = 1;
+ }
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3DecOrHexToI64(z, &iRes);
+ sqlite3DbFree(db, z);
+ if( rc==0 ){
+ sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes);
+ }else if( rc==3 && bNeg ){
+ sqlite3_result_int64(pCtx, SMALLEST_INT64);
+ }else if( rc==1 ){
+ goto returnfromblob_malformed;
+ }else{
+ if( bNeg ){ n--; sz++; }
+ goto to_double;
+ }
+ break;
+ }
+ case JSONB_FLOAT5:
+ case JSONB_FLOAT: {
+ double r;
+ char *z;
+ if( sz==0 ) goto returnfromblob_malformed;
+ to_double:
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
+ sqlite3DbFree(db, z);
+ if( rc<=0 ) goto returnfromblob_malformed;
+ sqlite3_result_double(pCtx, r);
+ break;
+ }
+ case JSONB_TEXTRAW:
+ case JSONB_TEXT: {
+ sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz,
+ SQLITE_TRANSIENT);
+ break;
+ }
+ case JSONB_TEXT5:
+ case JSONB_TEXTJ: {
+ /* Translate JSON formatted string into raw text */
+ u32 iIn, iOut;
+ const char *z;
+ char *zOut;
+ u32 nOut = sz;
+ z = (const char*)&pParse->aBlob[i+n];
+ zOut = sqlite3DbMallocRaw(db, nOut+1);
+ if( zOut==0 ) goto returnfromblob_oom;
+ for(iIn=iOut=0; iIn=2 );
+ zOut[iOut++] = (char)(0xc0 | (v>>6));
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v<0x10000 ){
+ assert( szEscape>=3 );
+ zOut[iOut++] = 0xe0 | (v>>12);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v==JSON_INVALID_CHAR ){
+ /* Silently ignore illegal unicode */
+ }else{
+ assert( szEscape>=4 );
+ zOut[iOut++] = 0xf0 | (v>>18);
+ zOut[iOut++] = 0x80 | ((v>>12)&0x3f);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }
+ iIn += szEscape - 1;
+ }else{
+ zOut[iOut++] = c;
+ }
+ } /* end for() */
+ assert( iOut<=nOut );
+ zOut[iOut] = 0;
+ sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC);
+ break;
+ }
+ case JSONB_ARRAY:
+ case JSONB_OBJECT: {
+ int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx));
+ if( flags & JSON_BLOB ){
+ sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT);
+ }else{
+ jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n);
+ }
+ break;
+ }
+ default: {
+ goto returnfromblob_malformed;
+ }
+ }
+ return;
+
+returnfromblob_oom:
+ sqlite3_result_error_nomem(pCtx);
+ return;
+
+returnfromblob_malformed:
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
+}
+
+/*
+** pArg is a function argument that might be an SQL value or a JSON
+** value. Figure out what it is and encode it as a JSONB blob.
+** Return the results in pParse.
+**
+** pParse is uninitialized upon entry. This routine will handle the
+** initialization of pParse. The result will be contained in
+** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically
+** allocated (if pParse->nBlobAlloc is greater than zero) in which case
+** the caller is responsible for freeing the space allocated to pParse->aBlob
+** when it has finished with it. Or pParse->aBlob might be a static string
+** or a value obtained from sqlite3_value_blob(pArg).
+**
+** If the argument is a BLOB that is clearly not a JSONB, then this
+** function might set an error message in ctx and return non-zero.
+** It might also set an error message and return non-zero on an OOM error.
+*/
+static int jsonFunctionArgToBlob(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ JsonParse *pParse
+){
+ int eType = sqlite3_value_type(pArg);
+ static u8 aNull[] = { 0x00 };
+ memset(pParse, 0, sizeof(pParse[0]));
+ pParse->db = sqlite3_context_db_handle(ctx);
+ switch( eType ){
+ default: {
+ pParse->aBlob = aNull;
+ pParse->nBlob = 1;
+ return 0;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(pArg) ){
+ pParse->aBlob = (u8*)sqlite3_value_blob(pArg);
+ pParse->nBlob = sqlite3_value_bytes(pArg);
+ }else{
+ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1);
+ return 1;
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ const char *zJson = (const char*)sqlite3_value_text(pArg);
+ int nJson = sqlite3_value_bytes(pArg);
+ if( zJson==0 ) return 1;
+ if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){
+ pParse->zJson = (char*)zJson;
+ pParse->nJson = nJson;
+ if( jsonConvertTextToBlob(pParse, ctx) ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ memset(pParse, 0, sizeof(pParse[0]));
+ return 1;
+ }
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson);
+ }
+ break;
+ }
+ case SQLITE_FLOAT: {
+ double r = sqlite3_value_double(pArg);
+ if( NEVER(sqlite3IsNaN(r)) ){
+ jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0);
+ }else{
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ if( z[0]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else if( z[0]=='-' && z[1]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z);
+ }
+ }
+ break;
+ }
+ case SQLITE_INTEGER: {
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ jsonBlobAppendNode(pParse, JSONB_INT, n, z);
+ break;
+ }
+ }
+ if( pParse->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return 1;
+ }else{
+ return 0;
+ }
+}
+
+/*
+** Generate a bad path error.
+**
+** If ctx is not NULL then push the error message into ctx and return NULL.
+** If ctx is NULL, then return the text of the error message.
+*/
+static char *jsonBadPathError(
+ sqlite3_context *ctx, /* The function call containing the error */
+ const char *zPath /* The path with the problem */
+){
+ char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath);
+ if( ctx==0 ) return zMsg;
+ if( zMsg ){
+ sqlite3_result_error(ctx, zMsg, -1);
+ sqlite3_free(zMsg);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ return 0;
+}
+
+/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent
+** arguments come in parse where each pair contains a JSON path and
+** content to insert or set at that patch. Do the updates
+** and return the result.
+**
+** The specific operation is determined by eEdit, which can be one
+** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET.
+*/
+static void jsonInsertIntoBlob(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv,
+ int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */
+){
+ int i;
+ u32 rc = 0;
+ const char *zPath = 0;
+ int flgs;
+ JsonParse *p;
+ JsonParse ax;
+
+ assert( (argc&1)==1 );
+ flgs = argc==1 ? 0 : JSON_EDITABLE;
+ p = jsonParseFuncArg(ctx, argv[0], flgs);
+ if( p==0 ) return;
+ for(i=1; inBlob, ax.aBlob, ax.nBlob);
+ }
+ rc = 0;
+ }else{
+ p->eEdit = eEdit;
+ p->nIns = ax.nBlob;
+ p->aIns = ax.aBlob;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ }
+ jsonParseReset(&ax);
+ if( rc==JSON_LOOKUP_NOTFOUND ) continue;
+ if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror;
+ }
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+jsonInsertIntoBlob_patherror:
+ jsonParseFree(p);
+ if( rc==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ }
+ return;
+}
+
+/*
+** If pArg is a blob that seems like a JSONB blob, then initialize
+** p to point to that JSONB and return TRUE. If pArg does not seem like
+** a JSONB blob, then return FALSE;
+**
+** This routine is only called if it is already known that pArg is a
+** blob. The only open question is whether or not the blob appears
+** to be a JSONB blob.
+*/
+static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){
+ u32 n, sz = 0;
+ p->aBlob = (u8*)sqlite3_value_blob(pArg);
+ p->nBlob = (u32)sqlite3_value_bytes(pArg);
+ if( p->nBlob==0 ){
+ p->aBlob = 0;
+ return 0;
+ }
+ if( NEVER(p->aBlob==0) ){
+ return 0;
+ }
+ if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT
+ && (n = jsonbPayloadSize(p, 0, &sz))>0
+ && sz+n==p->nBlob
+ && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0)
+ ){
+ return 1;
+ }
+ p->aBlob = 0;
+ p->nBlob = 0;
+ return 0;
+}
+
+/*
+** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob,
+** from the SQL function argument pArg. Return a pointer to the new
+** JsonParse object.
+**
+** Ownership of the new JsonParse object is passed to the caller. The
+** caller should invoke jsonParseFree() on the return value when it
+** has finished using it.
+**
+** If any errors are detected, an appropriate error messages is set
+** using sqlite3_result_error() or the equivalent and this routine
+** returns NULL. This routine also returns NULL if the pArg argument
+** is an SQL NULL value, but no error message is set in that case. This
+** is so that SQL functions that are given NULL arguments will return
+** a NULL value.
+*/
+static JsonParse *jsonParseFuncArg(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ u32 flgs
+){
+ int eType; /* Datatype of pArg */
+ JsonParse *p = 0; /* Value to be returned */
+ JsonParse *pFromCache = 0; /* Value taken from cache */
+ sqlite3 *db; /* The database connection */
+
+ assert( ctx!=0 );
+ eType = sqlite3_value_type(pArg);
+ if( eType==SQLITE_NULL ){
+ return 0;
+ }
+ pFromCache = jsonCacheSearch(ctx, pArg);
+ if( pFromCache ){
+ pFromCache->nJPRef++;
+ if( (flgs & JSON_EDITABLE)==0 ){
+ return pFromCache;
+ }
+ }
+ db = sqlite3_context_db_handle(ctx);
+rebuild_from_cache:
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) goto json_pfa_oom;
+ memset(p, 0, sizeof(*p));
+ p->db = db;
+ p->nJPRef = 1;
+ if( pFromCache!=0 ){
+ u32 nBlob = pFromCache->nBlob;
+ p->aBlob = sqlite3DbMallocRaw(db, nBlob);
+ if( p->aBlob==0 ) goto json_pfa_oom;
+ memcpy(p->aBlob, pFromCache->aBlob, nBlob);
+ p->nBlobAlloc = p->nBlob = nBlob;
+ p->hasNonstd = pFromCache->hasNonstd;
+ jsonParseFree(pFromCache);
+ return p;
+ }
+ if( eType==SQLITE_BLOB ){
+ if( jsonArgIsJsonb(pArg,p) ){
+ if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){
+ goto json_pfa_oom;
+ }
+ return p;
+ }
+ /* If the blob is not valid JSONB, fall through into trying to cast
+ ** the blob into text which is then interpreted as JSON. (tag-20240123-a)
+ **
+ ** This goes against all historical documentation about how the SQLite
+ ** JSON functions were suppose to work. From the beginning, blob was
+ ** reserved for expansion and a blob value should have raised an error.
+ ** But it did not, due to a bug. And many applications came to depend
+ ** upon this buggy behavior, espeically when using the CLI and reading
+ ** JSON text using readfile(), which returns a blob. For this reason
+ ** we will continue to support the bug moving forward.
+ ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d
+ */
+ }
+ p->zJson = (char*)sqlite3_value_text(pArg);
+ p->nJson = sqlite3_value_bytes(pArg);
+ if( p->nJson==0 ) goto json_pfa_malformed;
+ if( NEVER(p->zJson==0) ) goto json_pfa_oom;
+ if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ return 0;
+ }
+ }else{
+ int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref);
+ int rc;
+ if( !isRCStr ){
+ char *zNew = sqlite3RCStrNew( p->nJson );
+ if( zNew==0 ) goto json_pfa_oom;
+ memcpy(zNew, p->zJson, p->nJson);
+ p->zJson = zNew;
+ p->zJson[p->nJson] = 0;
+ }else{
+ sqlite3RCStrRef(p->zJson);
+ }
+ p->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, p);
+ if( rc==SQLITE_NOMEM ) goto json_pfa_oom;
+ if( flgs & JSON_EDITABLE ){
+ pFromCache = p;
+ p = 0;
+ goto rebuild_from_cache;
+ }
+ }
+ return p;
+
+json_pfa_malformed:
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ return 0;
+ }
+
+json_pfa_oom:
+ jsonParseFree(pFromCache);
+ jsonParseFree(p);
+ sqlite3_result_error_nomem(ctx);
+ return 0;
+}
+
+/*
+** Make the return value of a JSON function either the raw JSONB blob
+** or make it JSON text, depending on whether the JSON_BLOB flag is
+** set on the function.
+*/
+static void jsonReturnParse(
+ sqlite3_context *ctx,
+ JsonParse *p
+){
+ int flgs;
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return;
+ }
+ flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( flgs & JSON_BLOB ){
+ if( p->nBlobAlloc>0 && !p->bReadOnly ){
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC);
+ p->nBlobAlloc = 0;
+ }else{
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT);
+ }
+ }else{
+ JsonString s;
+ jsonStringInit(&s, ctx);
+ p->delta = 0;
+ jsonTranslateBlobToText(p, 0, &s);
+ jsonReturnString(&s, p, ctx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+}
/****************************************************************************
** SQL functions used for testing and debugging
****************************************************************************/
#if SQLITE_DEBUG
/*
-** Print N node entries.
+** Decode JSONB bytes in aBlob[] starting at iStart through but not
+** including iEnd. Indent the
+** content by nIndent spaces.
*/
-static void jsonDebugPrintNodeEntries(
- JsonNode *aNode, /* First node entry to print */
- int N /* Number of node entries to print */
+static void jsonDebugPrintBlob(
+ JsonParse *pParse, /* JSON content */
+ u32 iStart, /* Start rendering here */
+ u32 iEnd, /* Do not render this byte or any byte after this one */
+ int nIndent, /* Indent by this many spaces */
+ sqlite3_str *pOut /* Generate output into this sqlite3_str object */
){
- int i;
- for(i=0; iaBlob[iStart] & 0x0f;
+ u32 savedNBlob = pParse->nBlob;
+ sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, "");
+ if( pParse->nBlobAlloc>pParse->nBlob ){
+ pParse->nBlob = pParse->nBlobAlloc;
+ }
+ nn = n = jsonbPayloadSize(pParse, iStart, &sz);
+ if( nn==0 ) nn = 1;
+ if( sz>0 && xaBlob[iStart+i]);
+ }
+ if( n==0 ){
+ sqlite3_str_appendf(pOut, " ERROR invalid node size\n");
+ iStart = n==0 ? iStart+1 : iEnd;
+ continue;
+ }
+ pParse->nBlob = savedNBlob;
+ if( iStart+n+sz>iEnd ){
+ iEnd = iStart+n+sz;
+ if( iEnd>pParse->nBlob ){
+ if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){
+ iEnd = pParse->nBlobAlloc;
+ }else{
+ iEnd = pParse->nBlob;
+ }
+ }
+ }
+ sqlite3_str_appendall(pOut," <-- ");
+ switch( x ){
+ case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break;
+ case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break;
+ case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break;
+ case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break;
+ case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break;
+ case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break;
+ case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break;
+ case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break;
+ case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break;
+ case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break;
+ case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break;
+ case JSONB_ARRAY: {
+ sqlite3_str_appendf(pOut,"array, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ case JSONB_OBJECT: {
+ sqlite3_str_appendf(pOut, "object, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ default: {
+ sqlite3_str_appendall(pOut, "ERROR: unknown node type\n");
+ showContent = 0;
+ break;
+ }
+ }
+ if( showContent ){
+ if( sz==0 && x<=JSONB_FALSE ){
+ sqlite3_str_append(pOut, "\n", 1);
+ }else{
+ u32 j;
+ sqlite3_str_appendall(pOut, ": \"");
+ for(j=iStart+n; jaBlob[j];
+ if( c<0x20 || c>=0x7f ) c = '.';
+ sqlite3_str_append(pOut, (char*)&c, 1);
+ }
+ sqlite3_str_append(pOut, "\"\n", 2);
+ }
+ }
+ iStart += n + sz;
+ }
+}
+static void jsonShowParse(JsonParse *pParse){
+ sqlite3_str out;
+ char zBuf[1000];
+ if( pParse==0 ){
+ printf("NULL pointer\n");
+ return;
+ }else{
+ printf("nBlobAlloc = %u\n", pParse->nBlobAlloc);
+ printf("nBlob = %u\n", pParse->nBlob);
+ printf("delta = %d\n", pParse->delta);
+ if( pParse->nBlob==0 ) return;
+ printf("content (bytes 0..%u):\n", pParse->nBlob-1);
+ }
+ sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000);
+ jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out);
+ printf("%s", sqlite3_str_value(&out));
+ sqlite3_str_reset(&out);
}
#endif /* SQLITE_DEBUG */
-
-#if 0 /* 1 for debugging. 0 normally. Requires -DSQLITE_DEBUG too */
-static void jsonDebugPrintParse(JsonParse *p){
- jsonDebugPrintNodeEntries(p->aNode, p->nNode);
-}
-static void jsonDebugPrintNode(JsonNode *pNode){
- jsonDebugPrintNodeEntries(pNode, jsonNodeSize(pNode));
-}
-#else
- /* The usual case */
-# define jsonDebugPrintNode(X)
-# define jsonDebugPrintParse(X)
-#endif
-
#ifdef SQLITE_DEBUG
/*
** SQL function: json_parse(JSON)
**
-** Parse JSON using jsonParseCached(). Then print a dump of that
-** parse on standard output. Return the mimified JSON result, just
-** like the json() function.
+** Parse JSON using jsonParseFuncArg(). Return text that is a
+** human-readable dump of the binary JSONB for the input parameter.
*/
static void jsonParseFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
JsonParse *p; /* The parse */
+ sqlite3_str out;
- assert( argc==1 );
- p = jsonParseCached(ctx, argv[0], ctx, 0);
+ assert( argc>=1 );
+ sqlite3StrAccumInit(&out, 0, 0, 0, 1000000);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- printf("nNode = %u\n", p->nNode);
- printf("nAlloc = %u\n", p->nAlloc);
- printf("nJson = %d\n", p->nJson);
- printf("nAlt = %d\n", p->nAlt);
- printf("nErr = %u\n", p->nErr);
- printf("oom = %u\n", p->oom);
- printf("hasNonstd = %u\n", p->hasNonstd);
- printf("useMod = %u\n", p->useMod);
- printf("hasMod = %u\n", p->hasMod);
- printf("nJPRef = %u\n", p->nJPRef);
- printf("iSubst = %u\n", p->iSubst);
- printf("iHold = %u\n", p->iHold);
- jsonDebugPrintNodeEntries(p->aNode, p->nNode);
- jsonReturnJson(p, p->aNode, ctx, 1, 0);
-}
-
-/*
-** The json_test1(JSON) function return true (1) if the input is JSON
-** text generated by another json function. It returns (0) if the input
-** is not known to be JSON.
-*/
-static void jsonTest1Func(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- UNUSED_PARAMETER(argc);
- sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE);
+ if( argc==1 ){
+ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out);
+ sqlite3_result_text64(ctx,out.zText,out.nChar,SQLITE_TRANSIENT,SQLITE_UTF8);
+ }else{
+ jsonShowParse(p);
+ }
+ jsonParseFree(p);
+ sqlite3_str_reset(&out);
}
#endif /* SQLITE_DEBUG */
/****************************************************************************
** Scalar SQL function implementations
****************************************************************************/
/*
-** Implementation of the json_QUOTE(VALUE) function. Return a JSON value
+** Implementation of the json_quote(VALUE) function. Return a JSON value
** corresponding to the SQL value input. Mostly this means putting
** double-quotes around strings and returning the unquoted string "null"
** when given a NULL input.
*/
static void jsonQuoteFunc(
@@ -2429,13 +3605,13 @@
sqlite3_value **argv
){
JsonString jx;
UNUSED_PARAMETER(argc);
- jsonInit(&jx, ctx);
- jsonAppendValue(&jx, argv[0]);
- jsonResult(&jx);
+ jsonStringInit(&jx, ctx);
+ jsonAppendSqlValue(&jx, argv[0]);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
/*
** Implementation of the json_array(VALUE,...) function. Return a JSON
@@ -2448,21 +3624,20 @@
sqlite3_value **argv
){
int i;
JsonString jx;
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '[');
for(i=0; inNode );
if( argc==2 ){
const char *zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ){
+ jsonParseFree(p);
+ return;
+ }
+ i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ eErr = 1;
+ i = 0;
+ }
}else{
- pNode = p->aNode;
- }
- if( pNode==0 ){
- return;
- }
- if( pNode->eType==JSON_ARRAY ){
- while( 1 /*exit-by-break*/ ){
- i = 1;
- while( i<=pNode->n ){
- if( (pNode[i].jnFlags & JNODE_REMOVE)==0 ) n++;
- i += jsonNodeSize(&pNode[i]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- if( p->useMod==0 ) break;
- assert( pNode->eU==2 );
- pNode = &p->aNode[pNode->u.iAppend];
- }
- }
- sqlite3_result_int64(ctx, n);
-}
-
-/*
-** Bit values for the flags passed into jsonExtractFunc() or
-** jsonSetFunc() via the user-data value.
-*/
-#define JSON_JSON 0x01 /* Result is always JSON */
-#define JSON_SQL 0x02 /* Result is always SQL */
-#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
-#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+ i = 0;
+ }
+ if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){
+ cnt = jsonbArrayCount(p, i);
+ }
+ if( !eErr ) sqlite3_result_int64(ctx, cnt);
+ jsonParseFree(p);
+}
+
+/* True if the string is all digits */
+static int jsonAllDigits(const char *z, int n){
+ int i;
+ for(i=0; i"(JSON,PATH)
** "->>"(JSON,PATH)
@@ -2539,154 +3721,310 @@
static void jsonExtractFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
- JsonString jx;
+ JsonParse *p = 0; /* The parse */
+ int flags; /* Flags associated with the function */
+ int i; /* Loop counter */
+ JsonString jx; /* String for array result */
if( argc<2 ) return;
- p = jsonParseCached(ctx, argv[0], ctx, 0);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- if( argc==2 ){
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ jsonStringInit(&jx, ctx);
+ if( argc>2 ){
+ jsonAppendChar(&jx, '[');
+ }
+ for(i=1; i and ->> operators accept abbreviated PATH arguments. This
- ** is mostly for compatibility with PostgreSQL, but also for
- ** convenience.
- **
- ** NUMBER ==> $[NUMBER] // PG compatible
- ** LABEL ==> $.LABEL // PG compatible
- ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
- */
- jsonInit(&jx, ctx);
- if( sqlite3Isdigit(zPath[0]) ){
- jsonAppendRawNZ(&jx, "$[", 2);
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendRawNZ(&jx, "]", 2);
- }else{
- jsonAppendRawNZ(&jx, "$.", 1 + (zPath[0]!='['));
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendChar(&jx, 0);
- }
- pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx);
- jsonReset(&jx);
- }else{
- pNode = jsonLookup(p, zPath, 0, ctx);
- }
- if( pNode ){
+ const char *zPath = (const char*)sqlite3_value_text(argv[i]);
+ int nPath;
+ u32 j;
+ if( zPath==0 ) goto json_extract_error;
+ nPath = sqlite3Strlen30(zPath);
+ if( zPath[0]=='$' ){
+ j = jsonLookupStep(p, 0, zPath+1, 0);
+ }else if( (flags & JSON_ABPATH) ){
+ /* The -> and ->> operators accept abbreviated PATH arguments. This
+ ** is mostly for compatibility with PostgreSQL, but also for
+ ** convenience.
+ **
+ ** NUMBER ==> $[NUMBER] // PG compatible
+ ** LABEL ==> $.LABEL // PG compatible
+ ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
+ */
+ jsonStringInit(&jx, ctx);
+ if( jsonAllDigits(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, "[", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "]", 2);
+ }else if( jsonAllAlphanum(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, ".", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){
+ jsonAppendRaw(&jx, zPath, nPath);
+ }else{
+ jsonAppendRawNZ(&jx, ".\"", 2);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "\"", 1);
+ }
+ jsonStringTerminate(&jx);
+ j = jsonLookupStep(p, 0, jx.zBuf, 0);
+ jsonStringReset(&jx);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
+ }
+ if( jnBlob ){
+ if( argc==2 ){
if( flags & JSON_JSON ){
- jsonReturnJson(p, pNode, ctx, 0, 0);
- }else{
- jsonReturn(p, pNode, ctx, 1);
- }
- }
- }else{
- pNode = jsonLookup(p, zPath, 0, ctx);
- if( p->nErr==0 && pNode ) jsonReturn(p, pNode, ctx, 0);
- }
- }else{
- /* Two or more PATH arguments results in a JSON array with each
- ** element of the array being the value selected by one of the PATHs */
- int i;
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=1; inErr ) break;
- jsonAppendSeparator(&jx);
- if( pNode ){
- jsonRenderNode(p, pNode, &jx);
- }else{
+ jsonStringInit(&jx, ctx);
+ jsonTranslateBlobToText(p, j, &jx);
+ jsonReturnString(&jx, 0, 0);
+ jsonStringReset(&jx);
+ assert( (flags & JSON_BLOB)==0 );
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }else{
+ jsonReturnFromBlob(p, j, ctx, 0);
+ if( (flags & (JSON_SQL|JSON_BLOB))==0
+ && (p->aBlob[j]&0x0f)>=JSONB_ARRAY
+ ){
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+ }
+ }else{
+ jsonAppendSeparator(&jx);
+ jsonTranslateBlobToText(p, j, &jx);
+ }
+ }else if( j==JSON_LOOKUP_NOTFOUND ){
+ if( argc==2 ){
+ goto json_extract_error; /* Return NULL if not found */
+ }else{
+ jsonAppendSeparator(&jx);
jsonAppendRawNZ(&jx, "null", 4);
}
+ }else if( j==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ goto json_extract_error;
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
}
- if( i==argc ){
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
+ }
+ if( argc>2 ){
+ jsonAppendChar(&jx, ']');
+ jsonReturnString(&jx, 0, 0);
+ if( (flags & JSON_BLOB)==0 ){
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
- jsonReset(&jx);
}
+json_extract_error:
+ jsonStringReset(&jx);
+ jsonParseFree(p);
+ return;
}
-/* This is the RFC 7396 MergePatch algorithm.
+/*
+** Return codes for jsonMergePatch()
+*/
+#define JSON_MERGE_OK 0 /* Success */
+#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */
+#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */
+#define JSON_MERGE_OOM 3 /* Out-of-memory condition */
+
+/*
+** RFC-7396 MergePatch for two JSONB blobs.
+**
+** pTarget is the target. pPatch is the patch. The target is updated
+** in place. The patch is read-only.
+**
+** The original RFC-7396 algorithm is this:
+**
+** define MergePatch(Target, Patch):
+** if Patch is an Object:
+** if Target is not an Object:
+** Target = {} # Ignore the contents and set it to an empty Object
+** for each Name/Value pair in Patch:
+** if Value is null:
+** if Name exists in Target:
+** remove the Name/Value pair from Target
+** else:
+** Target[Name] = MergePatch(Target[Name], Value)
+** return Target
+** else:
+** return Patch
+**
+** Here is an equivalent algorithm restructured to show the actual
+** implementation:
+**
+** 01 define MergePatch(Target, Patch):
+** 02 if Patch is not an Object:
+** 03 return Patch
+** 04 else: // if Patch is an Object
+** 05 if Target is not an Object:
+** 06 Target = {}
+** 07 for each Name/Value pair in Patch:
+** 08 if Name exists in Target:
+** 09 if Value is null:
+** 10 remove the Name/Value pair from Target
+** 11 else
+** 12 Target[name] = MergePatch(Target[Name], Value)
+** 13 else if Value is not NULL:
+** 14 if Value is not an Object:
+** 15 Target[name] = Value
+** 16 else:
+** 17 Target[name] = MergePatch('{}',value)
+** 18 return Target
+** |
+** ^---- Line numbers referenced in comments in the implementation
*/
-static JsonNode *jsonMergePatch(
- JsonParse *pParse, /* The JSON parser that contains the TARGET */
- u32 iTarget, /* Node of the TARGET in pParse */
- JsonNode *pPatch /* The PATCH */
+static int jsonMergePatch(
+ JsonParse *pTarget, /* The JSON parser that contains the TARGET */
+ u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */
+ const JsonParse *pPatch, /* The PATCH */
+ u32 iPatch /* Index of PATCH in pPatch->aBlob[] */
){
- u32 i, j;
- u32 iRoot;
- JsonNode *pTarget;
- if( pPatch->eType!=JSON_OBJECT ){
- return pPatch;
- }
- assert( iTargetnNode );
- pTarget = &pParse->aNode[iTarget];
- assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
- if( pTarget->eType!=JSON_OBJECT ){
- jsonRemoveAllNulls(pPatch);
- return pPatch;
- }
- iRoot = iTarget;
- for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){
- u32 nKey;
- const char *zKey;
- assert( pPatch[i].eType==JSON_STRING );
- assert( pPatch[i].jnFlags & JNODE_LABEL );
- assert( pPatch[i].eU==1 );
- nKey = pPatch[i].n;
- zKey = pPatch[i].u.zJContent;
- for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){
- assert( pTarget[j].eType==JSON_STRING );
- assert( pTarget[j].jnFlags & JNODE_LABEL );
- if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){
- if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_REPLACE) ) break;
- if( pPatch[i+1].eType==JSON_NULL ){
- pTarget[j+1].jnFlags |= JNODE_REMOVE;
- }else{
- JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
- if( pNew==0 ) return 0;
- if( pNew!=&pParse->aNode[iTarget+j+1] ){
- jsonParseAddSubstNode(pParse, iTarget+j+1);
- jsonParseAddNodeArray(pParse, pNew, jsonNodeSize(pNew));
- }
- pTarget = &pParse->aNode[iTarget];
- }
- break;
- }
- }
- if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
- int iStart;
- JsonNode *pApnd;
- u32 nApnd;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- pApnd = &pPatch[i+1];
- if( pApnd->eType==JSON_OBJECT ) jsonRemoveAllNulls(pApnd);
- nApnd = jsonNodeSize(pApnd);
- jsonParseAddNodeArray(pParse, pApnd, jsonNodeSize(pApnd));
- if( pParse->oom ) return 0;
- pParse->aNode[iStart].n = 1+nApnd;
- pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
- pParse->aNode[iRoot].u.iAppend = iStart;
- VVA( pParse->aNode[iRoot].eU = 2 );
- iRoot = iStart;
- pTarget = &pParse->aNode[iTarget];
- }
- }
- return pTarget;
-}
+ u8 x; /* Type of a single node */
+ u32 n, sz=0; /* Return values from jsonbPayloadSize() */
+ u32 iTCursor; /* Cursor position while scanning the target object */
+ u32 iTStart; /* First label in the target object */
+ u32 iTEndBE; /* Original first byte past end of target, before edit */
+ u32 iTEnd; /* Current first byte past end of target */
+ u8 eTLabel; /* Node type of the target label */
+ u32 iTLabel = 0; /* Index of the label */
+ u32 nTLabel = 0; /* Header size in bytes for the target label */
+ u32 szTLabel = 0; /* Size of the target label payload */
+ u32 iTValue = 0; /* Index of the target value */
+ u32 nTValue = 0; /* Header size of the target value */
+ u32 szTValue = 0; /* Payload size for the target value */
+
+ u32 iPCursor; /* Cursor position while scanning the patch */
+ u32 iPEnd; /* First byte past the end of the patch */
+ u8 ePLabel; /* Node type of the patch label */
+ u32 iPLabel; /* Start of patch label */
+ u32 nPLabel; /* Size of header on the patch label */
+ u32 szPLabel; /* Payload size of the patch label */
+ u32 iPValue; /* Start of patch value */
+ u32 nPValue; /* Header size for the patch value */
+ u32 szPValue; /* Payload size of the patch value */
+
+ assert( iTarget>=0 && iTargetnBlob );
+ assert( iPatch>=0 && iPatchnBlob );
+ x = pPatch->aBlob[iPatch] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */
+ u32 szPatch; /* Total size of the patch, header+payload */
+ u32 szTarget; /* Total size of the target, header+payload */
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ szPatch = n+sz;
+ sz = 0;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ szTarget = n+sz;
+ jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */
+ }
+ x = pTarget->aBlob[iTarget] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0);
+ x = pTarget->aBlob[iTarget];
+ pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT;
+ }
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPatch+n;
+ iPEnd = iPCursor+sz;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADTARGET;
+ iTStart = iTarget+n;
+ iTEndBE = iTStart+sz;
+
+ while( iPCursoraBlob[iPCursor] & 0x0f;
+ if( ePLabelJSONB_TEXTRAW ){
+ return JSON_MERGE_BADPATCH;
+ }
+ nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel);
+ if( nPLabel==0 ) return JSON_MERGE_BADPATCH;
+ iPValue = iPCursor + nPLabel + szPLabel;
+ if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH;
+ nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue);
+ if( nPValue==0 ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPValue + nPValue + szPValue;
+ if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH;
+
+ iTCursor = iTStart;
+ iTEnd = iTEndBE + pTarget->delta;
+ while( iTCursoraBlob[iTCursor] & 0x0f;
+ if( eTLabelJSONB_TEXTRAW ){
+ return JSON_MERGE_BADTARGET;
+ }
+ nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel);
+ if( nTLabel==0 ) return JSON_MERGE_BADTARGET;
+ iTValue = iTLabel + nTLabel + szTLabel;
+ if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET;
+ nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue);
+ if( nTValue==0 ) return JSON_MERGE_BADTARGET;
+ if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET;
+ isEqual = jsonLabelCompare(
+ (const char*)&pPatch->aBlob[iPLabel+nPLabel],
+ szPLabel,
+ (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW),
+ (const char*)&pTarget->aBlob[iTLabel+nTLabel],
+ szTLabel,
+ (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW));
+ if( isEqual ) break;
+ iTCursor = iTValue + nTValue + szTValue;
+ }
+ x = pPatch->aBlob[iPValue] & 0x0f;
+ if( iTCursoroom) ) return JSON_MERGE_OOM;
+ }else{
+ /* Algorithm line 12 */
+ int rc, savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
+ }
+ }else if( x>0 ){ /* Algorithm line 13 */
+ /* No match and patch value is not NULL */
+ u32 szNew = szPLabel+nPLabel;
+ if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ memcpy(&pTarget->aBlob[iTEnd+szNew],
+ &pPatch->aBlob[iPValue], szPValue+nPValue);
+ }else{
+ int rc, savedDelta;
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ pTarget->aBlob[iTEnd+szNew] = 0x00;
+ savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
+ }
+ }
+ }
+ if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK;
+}
+
/*
** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
** object that is the result of running the RFC 7396 MergePatch() algorithm
** on the two arguments.
@@ -2694,32 +4032,31 @@
static void jsonPatchFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *pX; /* The JSON that is being patched */
- JsonParse *pY; /* The patch */
- JsonNode *pResult; /* The result of the merge */
+ JsonParse *pTarget; /* The TARGET */
+ JsonParse *pPatch; /* The PATCH */
+ int rc; /* Result code */
UNUSED_PARAMETER(argc);
- pX = jsonParseCached(ctx, argv[0], ctx, 1);
- if( pX==0 ) return;
- assert( pX->hasMod==0 );
- pX->hasMod = 1;
- pY = jsonParseCached(ctx, argv[1], ctx, 1);
- if( pY==0 ) return;
- pX->useMod = 1;
- pY->useMod = 1;
- pResult = jsonMergePatch(pX, 0, pY->aNode);
- assert( pResult!=0 || pX->oom );
- if( pResult && pX->oom==0 ){
- jsonDebugPrintParse(pX);
- jsonDebugPrintNode(pResult);
- jsonReturnJson(pX, pResult, ctx, 0, 0);
- }else{
- sqlite3_result_error_nomem(ctx);
- }
+ assert( argc==2 );
+ pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE);
+ if( pTarget==0 ) return;
+ pPatch = jsonParseFuncArg(ctx, argv[1], 0);
+ if( pPatch ){
+ rc = jsonMergePatch(pTarget, 0, pPatch, 0);
+ if( rc==JSON_MERGE_OK ){
+ jsonReturnParse(ctx, pTarget);
+ }else if( rc==JSON_MERGE_OOM ){
+ sqlite3_result_error_nomem(ctx);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ jsonParseFree(pPatch);
+ }
+ jsonParseFree(pTarget);
}
/*
** Implementation of the json_object(NAME,VALUE,...) function. Return a JSON
@@ -2739,27 +4076,27 @@
if( argc&1 ){
sqlite3_result_error(ctx, "json_object() requires an even number "
"of arguments", -1);
return;
}
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '{');
for(i=0; i1);
- if( pParse==0 ) return;
- for(i=1; i<(u32)argc; i++){
+ p = jsonParseFuncArg(ctx, argv[0], argc>1 ? JSON_EDITABLE : 0);
+ if( p==0 ) return;
+ for(i=1; inErr ) goto remove_done;
- if( pNode ){
- pNode->jnFlags |= JNODE_REMOVE;
- pParse->hasMod = 1;
- pParse->useMod = 1;
- }
- }
- if( (pParse->aNode[0].jnFlags & JNODE_REMOVE)==0 ){
- jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0);
- }
-remove_done:
- jsonDebugPrintParse(p);
-}
-
-/*
-** Substitute the value at iNode with the pValue parameter.
-*/
-static void jsonReplaceNode(
- sqlite3_context *pCtx,
- JsonParse *p,
- int iNode,
- sqlite3_value *pValue
-){
- int idx = jsonParseAddSubstNode(p, iNode);
- if( idx<=0 ){
- assert( p->oom );
- return;
- }
- switch( sqlite3_value_type(pValue) ){
- case SQLITE_NULL: {
- jsonParseAddNode(p, JSON_NULL, 0, 0);
- break;
- }
- case SQLITE_FLOAT: {
- char *z = sqlite3_mprintf("%!0.15g", sqlite3_value_double(pValue));
- int n;
- if( z==0 ){
- p->oom = 1;
- break;
- }
- n = sqlite3Strlen30(z);
- jsonParseAddNode(p, JSON_REAL, n, z);
- jsonParseAddCleanup(p, sqlite3_free, z);
- break;
- }
- case SQLITE_INTEGER: {
- char *z = sqlite3_mprintf("%lld", sqlite3_value_int64(pValue));
- int n;
- if( z==0 ){
- p->oom = 1;
- break;
- }
- n = sqlite3Strlen30(z);
- jsonParseAddNode(p, JSON_INT, n, z);
- jsonParseAddCleanup(p, sqlite3_free, z);
-
- break;
- }
- case SQLITE_TEXT: {
- const char *z = (const char*)sqlite3_value_text(pValue);
- u32 n = (u32)sqlite3_value_bytes(pValue);
- if( z==0 ){
- p->oom = 1;
- break;
- }
- if( sqlite3_value_subtype(pValue)!=JSON_SUBTYPE ){
- char *zCopy = sqlite3_malloc64( n+1 );
- int k;
- if( zCopy ){
- memcpy(zCopy, z, n);
- zCopy[n] = 0;
- jsonParseAddCleanup(p, sqlite3_free, zCopy);
- }else{
- p->oom = 1;
- sqlite3_result_error_nomem(pCtx);
- }
- k = jsonParseAddNode(p, JSON_STRING, n, zCopy);
- assert( k>0 || p->oom );
- if( p->oom==0 ) p->aNode[k].jnFlags |= JNODE_RAW;
- }else{
- JsonParse *pPatch = jsonParseCached(pCtx, pValue, pCtx, 1);
- if( pPatch==0 ){
- p->oom = 1;
- break;
- }
- jsonParseAddNodeArray(p, pPatch->aNode, pPatch->nNode);
- /* The nodes copied out of pPatch and into p likely contain
- ** u.zJContent pointers into pPatch->zJson. So preserve the
- ** content of pPatch until p is destroyed. */
- assert( pPatch->nJPRef>=1 );
- pPatch->nJPRef++;
- jsonParseAddCleanup(p, (void(*)(void*))jsonParseFree, pPatch);
- }
- break;
- }
- default: {
- jsonParseAddNode(p, JSON_NULL, 0, 0);
- sqlite3_result_error(pCtx, "JSON cannot hold BLOB values", -1);
- p->nErr++;
- break;
- }
- }
+ if( zPath==0 ){
+ goto json_remove_done;
+ }
+ if( zPath[0]!='$' ){
+ goto json_remove_patherror;
+ }
+ if( zPath[1]==0 ){
+ /* json_remove(j,'$') returns NULL */
+ goto json_remove_done;
+ }
+ p->eEdit = JEDIT_DEL;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(rc) ){
+ if( rc==JSON_LOOKUP_NOTFOUND ){
+ continue; /* No-op */
+ }else if( rc==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_remove_done;
+ }
+ }
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+json_remove_patherror:
+ jsonBadPathError(ctx, zPath);
+
+json_remove_done:
+ jsonParseFree(p);
+ return;
}
/*
** json_replace(JSON, PATH, VALUE, ...)
**
@@ -2898,36 +4165,16 @@
static void jsonReplaceFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *pParse; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, "replace");
return;
}
- pParse = jsonParseCached(ctx, argv[0], ctx, argc>1);
- if( pParse==0 ) return;
- pParse->nJPRef++;
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pParse->useMod = 1;
- pNode = jsonLookup(pParse, zPath, 0, ctx);
- if( pParse->nErr ) goto replace_err;
- if( pNode ){
- jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]);
- }
- }
- jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0);
-replace_err:
- jsonDebugPrintParse(pParse);
- jsonParseFree(pParse);
+ jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL);
}
/*
** json_set(JSON, PATH, VALUE, ...)
@@ -2944,43 +4191,20 @@
static void jsonSetFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *pParse; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
- int bApnd;
- int bIsSet = sqlite3_user_data(ctx)!=0;
+
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ int bIsSet = (flags&JSON_ISSET)!=0;
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
return;
}
- pParse = jsonParseCached(ctx, argv[0], ctx, argc>1);
- if( pParse==0 ) return;
- pParse->nJPRef++;
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- bApnd = 0;
- pParse->useMod = 1;
- pNode = jsonLookup(pParse, zPath, &bApnd, ctx);
- if( pParse->oom ){
- sqlite3_result_error_nomem(ctx);
- goto jsonSetDone;
- }else if( pParse->nErr ){
- goto jsonSetDone;
- }else if( pNode && (bApnd || bIsSet) ){
- jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]);
- }
- }
- jsonDebugPrintParse(pParse);
- jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0);
-jsonSetDone:
- jsonParseFree(pParse);
+ jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS);
}
/*
** json_type(JSON)
** json_type(JSON, PATH)
@@ -2992,110 +4216,225 @@
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- const char *zPath;
- JsonNode *pNode;
+ const char *zPath = 0;
+ u32 i;
- p = jsonParseCached(ctx, argv[0], ctx, 0);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
if( argc==2 ){
zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ) goto json_type_done;
+ if( zPath[0]!='$' ){
+ jsonBadPathError(ctx, zPath);
+ goto json_type_done;
+ }
+ i = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_type_done;
+ }
}else{
- pNode = p->aNode;
+ i = 0;
}
- if( pNode ){
- sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
- }
+ sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC);
+json_type_done:
+ jsonParseFree(p);
}
/*
** json_valid(JSON)
+** json_valid(JSON, FLAGS)
+**
+** Check the JSON argument to see if it is well-formed. The FLAGS argument
+** encodes the various constraints on what is meant by "well-formed":
+**
+** 0x01 Canonical RFC-8259 JSON text
+** 0x02 JSON text with optional JSON-5 extensions
+** 0x04 Superficially appears to be JSONB
+** 0x08 Strictly well-formed JSONB
+**
+** If the FLAGS argument is omitted, it defaults to 1. Useful values for
+** FLAGS include:
+**
+** 1 Strict canonical JSON text
+** 2 JSON text perhaps with JSON-5 extensions
+** 4 Superficially appears to be JSONB
+** 5 Canonical JSON text or superficial JSONB
+** 6 JSON-5 text or superficial JSONB
+** 8 Strict JSONB
+** 9 Canonical JSON text or strict JSONB
+** 10 JSON-5 text or strict JSONB
+**
+** Other flag combinations are redundant. For example, every canonical
+** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3
+** are the same. Similarly, any input that passes a strict JSONB validation
+** will also pass the superficial validation so 12 through 15 are the same
+** as 8 through 11 respectively.
+**
+** This routine runs in linear time to validate text and when doing strict
+** JSONB validation. Superficial JSONB validation is constant time,
+** assuming the BLOB is already in memory. The performance advantage
+** of superficial JSONB validation is why that option is provided.
+** Application developers can choose to do fast superficial validation or
+** slower strict validation, according to their specific needs.
+**
+** Only the lower four bits of the FLAGS argument are currently used.
+** Higher bits are reserved for future expansion. To facilitate
+** compatibility, the current implementation raises an error if any bit
+** in FLAGS is set other than the lower four bits.
+**
+** The original circa 2015 implementation of the JSON routines in
+** SQLite only supported canonical RFC-8259 JSON text and the json_valid()
+** function only accepted one argument. That is why the default value
+** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only
+** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS
+** argument was added when the JSON routines were extended to support
+** JSON5-like extensions and binary JSONB stored in BLOBs.
+**
+** Return Values:
**
-** Return 1 if JSON is a well-formed canonical JSON string according
-** to RFC-7159. Return 0 otherwise.
+** * Raise an error if FLAGS is outside the range of 1 to 15.
+** * Return NULL if the input is NULL
+** * Return 1 if the input is well-formed.
+** * Return 0 if the input is not well-formed.
*/
static void jsonValidFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ){
+ u8 flags = 1;
+ u8 res = 0;
+ if( argc==2 ){
+ i64 f = sqlite3_value_int64(argv[1]);
+ if( f<1 || f>15 ){
+ sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be"
+ " between 1 and 15", -1);
+ return;
+ }
+ flags = f & 0x0f;
+ }
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_NULL: {
#ifdef SQLITE_LEGACY_JSON_VALID
- /* Incorrect legacy behavior was to return FALSE for a NULL input */
- sqlite3_result_int(ctx, 0);
+ /* Incorrect legacy behavior was to return FALSE for a NULL input */
+ sqlite3_result_int(ctx, 0);
#endif
- return;
- }
- p = jsonParseCached(ctx, argv[0], 0, 0);
- if( p==0 || p->oom ){
- sqlite3_result_error_nomem(ctx);
- sqlite3_free(p);
- }else{
- sqlite3_result_int(ctx, p->nErr==0 && (p->hasNonstd==0 || p->useMod));
- if( p->nErr ) jsonParseFree(p);
- }
+ return;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ if( flags & 0x04 ){
+ /* Superficial checking only - accomplished by the
+ ** jsonFuncArgMightBeBinary() call above. */
+ res = 1;
+ }else if( flags & 0x08 ){
+ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If
+ ** no errors occur, call that a "strict check". */
+ JsonParse px;
+ u32 iErr;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ px.nBlob = sqlite3_value_bytes(argv[0]);
+ iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1);
+ res = iErr==0;
+ }
+ break;
+ }
+ /* Fall through into interpreting the input as text. See note
+ ** above at tag-20240123-a. */
+ /* no break */ deliberate_fall_through
+ }
+ default: {
+ JsonParse px;
+ if( (flags & 0x3)==0 ) break;
+ memset(&px, 0, sizeof(px));
+
+ p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR);
+ if( p ){
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ }else if( p->nErr ){
+ /* no-op */
+ }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){
+ res = 1;
+ }
+ jsonParseFree(p);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ break;
+ }
+ }
+ sqlite3_result_int(ctx, res);
}
/*
** json_error_position(JSON)
**
-** If the argument is not an interpretable JSON string, then return the 1-based
-** character position at which the parser first recognized that the input
-** was in error. The left-most character is 1. If the string is valid
-** JSON, then return 0.
-**
-** Note that json_valid() is only true for strictly conforming canonical JSON.
-** But this routine returns zero if the input contains extension. Thus:
-**
-** (1) If the input X is strictly conforming canonical JSON:
-**
-** json_valid(X) returns true
-** json_error_position(X) returns 0
-**
-** (2) If the input X is JSON but it includes extension (such as JSON5) that
-** are not part of RFC-8259:
-**
-** json_valid(X) returns false
-** json_error_position(X) return 0
-**
-** (3) If the input X cannot be interpreted as JSON even taking extensions
-** into account:
-**
-** json_valid(X) return false
-** json_error_position(X) returns 1 or more
+** If the argument is NULL, return NULL
+**
+** If the argument is BLOB, do a full validity check and return non-zero
+** if the check fails. The return value is the approximate 1-based offset
+** to the byte of the element that contains the first error.
+**
+** Otherwise interpret the argument is TEXT (even if it is numeric) and
+** return the 1-based character position for where the parser first recognized
+** that the input was not valid JSON, or return 0 if the input text looks
+** ok. JSON-5 extensions are accepted.
*/
static void jsonErrorFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
+ i64 iErrPos = 0; /* Error position to be returned */
+ JsonParse s;
+
+ assert( argc==1 );
UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- p = jsonParseCached(ctx, argv[0], 0, 0);
- if( p==0 || p->oom ){
+ memset(&s, 0, sizeof(s));
+ s.db = sqlite3_context_db_handle(ctx);
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ s.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ s.nBlob = sqlite3_value_bytes(argv[0]);
+ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1);
+ }else{
+ s.zJson = (char*)sqlite3_value_text(argv[0]);
+ if( s.zJson==0 ) return; /* NULL input or OOM */
+ s.nJson = sqlite3_value_bytes(argv[0]);
+ if( jsonConvertTextToBlob(&s,0) ){
+ if( s.oom ){
+ iErrPos = -1;
+ }else{
+ /* Convert byte-offset s.iErr into a character offset */
+ u32 k;
+ assert( s.zJson!=0 ); /* Because s.oom is false */
+ for(k=0; knErr==0 ){
- sqlite3_result_int(ctx, 0);
}else{
- int n = 1;
- u32 i;
- const char *z = (const char*)sqlite3_value_text(argv[0]);
- for(i=0; iiErr && ALWAYS(z[i]); i++){
- if( (z[i]&0xc0)!=0x80 ) n++;
- }
- sqlite3_result_int(ctx, n);
- jsonParseFree(p);
+ sqlite3_result_int64(ctx, iErrPos);
}
}
-
/****************************************************************************
** Aggregate SQL function implementations
****************************************************************************/
/*
@@ -3111,36 +4450,46 @@
JsonString *pStr;
UNUSED_PARAMETER(argc);
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '[');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
- jsonAppendValue(pStr, argv[0]);
+ jsonAppendSqlValue(pStr, argv[0]);
}
}
static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
pStr->pCtx = ctx;
jsonAppendChar(pStr, ']');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
pStr->bStatic ? SQLITE_TRANSIENT :
sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
}
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
@@ -3217,39 +4566,50 @@
u32 n;
UNUSED_PARAMETER(argc);
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '{');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
z = (const char*)sqlite3_value_text(argv[0]);
- n = (u32)sqlite3_value_bytes(argv[0]);
+ n = sqlite3Strlen30(z);
jsonAppendString(pStr, z, n);
jsonAppendChar(pStr, ':');
- jsonAppendValue(pStr, argv[1]);
+ jsonAppendSqlValue(pStr, argv[1]);
}
}
static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
jsonAppendChar(pStr, '}');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ pStr->pCtx = ctx;
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
pStr->bStatic ? SQLITE_TRANSIENT :
sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
}
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
@@ -3265,33 +4625,51 @@
#ifndef SQLITE_OMIT_VIRTUALTABLE
/****************************************************************************
** The json_each virtual table
****************************************************************************/
+typedef struct JsonParent JsonParent;
+struct JsonParent {
+ u32 iHead; /* Start of object or array */
+ u32 iValue; /* Start of the value */
+ u32 iEnd; /* First byte past the end */
+ u32 nPath; /* Length of path */
+ i64 iKey; /* Key for JSONB_ARRAY */
+};
+
typedef struct JsonEachCursor JsonEachCursor;
struct JsonEachCursor {
sqlite3_vtab_cursor base; /* Base class - must be first */
u32 iRowid; /* The rowid */
- u32 iBegin; /* The first node of the scan */
- u32 i; /* Index in sParse.aNode[] of current row */
+ u32 i; /* Index in sParse.aBlob[] of current row */
u32 iEnd; /* EOF when i equals or exceeds this value */
- u8 eType; /* Type of top-level element */
+ u32 nRoot; /* Size of the root path in bytes */
+ u8 eType; /* Type of the container for element i */
u8 bRecursive; /* True for json_tree(). False for json_each() */
- char *zJson; /* Input JSON */
- char *zRoot; /* Path by which to filter zJson */
+ u32 nParent; /* Current nesting depth */
+ u32 nParentAlloc; /* Space allocated for aParent[] */
+ JsonParent *aParent; /* Parent elements of i */
+ sqlite3 *db; /* Database connection */
+ JsonString path; /* Current path */
JsonParse sParse; /* Parse of the input JSON */
};
+typedef struct JsonEachConnection JsonEachConnection;
+struct JsonEachConnection {
+ sqlite3_vtab base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection */
+};
+
/* Constructor for the json_each virtual table */
static int jsonEachConnect(
sqlite3 *db,
void *pAux,
int argc, const char *const*argv,
sqlite3_vtab **ppVtab,
char **pzErr
){
- sqlite3_vtab *pNew;
+ JsonEachConnection *pNew;
int rc;
/* Column numbers */
#define JEACH_KEY 0
#define JEACH_VALUE 1
@@ -3313,32 +4691,36 @@
UNUSED_PARAMETER(pAux);
rc = sqlite3_declare_vtab(db,
"CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
"json HIDDEN,root HIDDEN)");
if( rc==SQLITE_OK ){
- pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
+ pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew));
+ *ppVtab = (sqlite3_vtab*)pNew;
if( pNew==0 ) return SQLITE_NOMEM;
- memset(pNew, 0, sizeof(*pNew));
sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ pNew->db = db;
}
return rc;
}
/* destructor for json_each virtual table */
static int jsonEachDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
+ JsonEachConnection *p = (JsonEachConnection*)pVtab;
+ sqlite3DbFree(p->db, pVtab);
return SQLITE_OK;
}
/* constructor for a JsonEachCursor object for json_each(). */
static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ JsonEachConnection *pVtab = (JsonEachConnection*)p;
JsonEachCursor *pCur;
UNUSED_PARAMETER(p);
- pCur = sqlite3_malloc( sizeof(*pCur) );
+ pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur));
if( pCur==0 ) return SQLITE_NOMEM;
- memset(pCur, 0, sizeof(*pCur));
+ pCur->db = pVtab->db;
+ jsonStringZero(&pCur->path);
*ppCursor = &pCur->base;
return SQLITE_OK;
}
/* constructor for a JsonEachCursor object for json_tree(). */
@@ -3352,25 +4734,28 @@
}
/* Reset a JsonEachCursor back to its original state. Free any memory
** held. */
static void jsonEachCursorReset(JsonEachCursor *p){
- sqlite3_free(p->zRoot);
jsonParseReset(&p->sParse);
+ jsonStringReset(&p->path);
+ sqlite3DbFree(p->db, p->aParent);
p->iRowid = 0;
p->i = 0;
+ p->aParent = 0;
+ p->nParent = 0;
+ p->nParentAlloc = 0;
p->iEnd = 0;
p->eType = 0;
- p->zJson = 0;
- p->zRoot = 0;
}
/* Destructor for a jsonEachCursor object */
static int jsonEachClose(sqlite3_vtab_cursor *cur){
JsonEachCursor *p = (JsonEachCursor*)cur;
jsonEachCursorReset(p);
- sqlite3_free(cur);
+
+ sqlite3DbFree(p->db, cur);
return SQLITE_OK;
}
/* Return TRUE if the jsonEachCursor object has been advanced off the end
** of the JSON object */
@@ -3377,204 +4762,234 @@
static int jsonEachEof(sqlite3_vtab_cursor *cur){
JsonEachCursor *p = (JsonEachCursor*)cur;
return p->i >= p->iEnd;
}
-/* Advance the cursor to the next element for json_tree() */
-static int jsonEachNext(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- if( p->bRecursive ){
- if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
- p->i++;
- p->iRowid++;
- if( p->iiEnd ){
- u32 iUp = p->sParse.aUp[p->i];
- JsonNode *pUp = &p->sParse.aNode[iUp];
- p->eType = pUp->eType;
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==0 || pUp->eU==3 );
- testcase( pUp->eU==3 );
- VVA( pUp->eU = 3 );
- if( iUp==p->i-1 ){
- pUp->u.iKey = 0;
- }else{
- pUp->u.iKey++;
- }
- }
- }
- }else{
- switch( p->eType ){
- case JSON_ARRAY: {
- p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
- p->iRowid++;
- break;
- }
- case JSON_OBJECT: {
- p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
- p->iRowid++;
- break;
- }
- default: {
- p->i = p->iEnd;
- break;
- }
- }
- }
- return SQLITE_OK;
-}
-
-/* Append an object label to the JSON Path being constructed
-** in pStr.
-*/
-static void jsonAppendObjectPathElement(
- JsonString *pStr,
- JsonNode *pNode
-){
- int jj, nn;
- const char *z;
- assert( pNode->eType==JSON_STRING );
- assert( pNode->jnFlags & JNODE_LABEL );
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- nn = pNode->n;
- if( (pNode->jnFlags & JNODE_RAW)==0 ){
- assert( nn>=2 );
- assert( z[0]=='"' || z[0]=='\'' );
- assert( z[nn-1]=='"' || z[0]=='\'' );
- if( nn>2 && sqlite3Isalpha(z[1]) ){
- for(jj=2; jjsParse.aUp[i];
- jsonEachComputePath(p, pStr, iUp);
- pNode = &p->sParse.aNode[i];
- pUp = &p->sParse.aNode[iUp];
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) );
- testcase( pUp->eU==0 );
- jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
- }else{
- assert( pUp->eType==JSON_OBJECT );
- if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
- jsonAppendObjectPathElement(pStr, pNode);
- }
+/*
+** If the cursor is currently pointing at the label of a object entry,
+** then return the index of the value. For all other cases, return the
+** current pointer position, which is the value.
+*/
+static int jsonSkipLabel(JsonEachCursor *p){
+ if( p->eType==JSONB_OBJECT ){
+ u32 sz = 0;
+ u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ return p->i + n + sz;
+ }else{
+ return p->i;
+ }
+}
+
+/*
+** Append the path name for the current element.
+*/
+static void jsonAppendPathName(JsonEachCursor *p){
+ assert( p->nParent>0 );
+ assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT );
+ if( p->eType==JSONB_ARRAY ){
+ jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey);
+ }else{
+ u32 n, sz = 0, k, i;
+ const char *z;
+ int needQuote = 0;
+ n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ k = p->i + n;
+ z = (const char*)&p->sParse.aBlob[k];
+ if( sz==0 || !sqlite3Isalpha(z[0]) ){
+ needQuote = 1;
+ }else{
+ for(i=0; ipath,".\"%.*s\"", sz, z);
+ }else{
+ jsonPrintf(sz+2,&p->path,".%.*s", sz, z);
+ }
+ }
+}
+
+/* Advance the cursor to the next element for json_tree() */
+static int jsonEachNext(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ int rc = SQLITE_OK;
+ if( p->bRecursive ){
+ u8 x;
+ u8 levelChange = 0;
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ x = p->sParse.aBlob[i] & 0x0f;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ if( x==JSONB_OBJECT || x==JSONB_ARRAY ){
+ JsonParent *pParent;
+ if( p->nParent>=p->nParentAlloc ){
+ JsonParent *pNew;
+ u64 nNew;
+ nNew = p->nParentAlloc*2 + 3;
+ pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew);
+ if( pNew==0 ) return SQLITE_NOMEM;
+ p->nParentAlloc = (u32)nNew;
+ p->aParent = pNew;
+ }
+ levelChange = 1;
+ pParent = &p->aParent[p->nParent];
+ pParent->iHead = p->i;
+ pParent->iValue = i;
+ pParent->iEnd = i + n + sz;
+ pParent->iKey = -1;
+ pParent->nPath = (u32)p->path.nUsed;
+ if( p->eType && p->nParent ){
+ jsonAppendPathName(p);
+ if( p->path.eErr ) rc = SQLITE_NOMEM;
+ }
+ p->nParent++;
+ p->i = i + n;
+ }else{
+ p->i = i + n + sz;
+ }
+ while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){
+ p->nParent--;
+ p->path.nUsed = p->aParent[p->nParent].nPath;
+ levelChange = 1;
+ }
+ if( levelChange ){
+ if( p->nParent>0 ){
+ JsonParent *pParent = &p->aParent[p->nParent-1];
+ u32 iVal = pParent->iValue;
+ p->eType = p->sParse.aBlob[iVal] & 0x0f;
+ }else{
+ p->eType = 0;
+ }
+ }
+ }else{
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->i = i + n + sz;
+ }
+ if( p->eType==JSONB_ARRAY && p->nParent ){
+ p->aParent[p->nParent-1].iKey++;
+ }
+ p->iRowid++;
+ return rc;
+}
+
+/* Length of the path for rowid==0 in bRecursive mode.
+*/
+static int jsonEachPathLength(JsonEachCursor *p){
+ u32 n = p->path.nUsed;
+ char *z = p->path.zBuf;
+ if( p->iRowid==0 && p->bRecursive && n>=2 ){
+ while( n>1 ){
+ n--;
+ if( z[n]=='[' || z[n]=='.' ){
+ u32 x, sz = 0;
+ char cSaved = z[n];
+ z[n] = 0;
+ assert( p->sParse.eEdit==0 );
+ x = jsonLookupStep(&p->sParse, 0, z+1, 0);
+ z[n] = cSaved;
+ if( JSON_LOOKUP_ISERROR(x) ) continue;
+ if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break;
+ }
+ }
+ }
+ return n;
}
/* Return the value of a column */
static int jsonEachColumn(
sqlite3_vtab_cursor *cur, /* The cursor */
sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
- int i /* Which column to return */
+ int iColumn /* Which column to return */
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- JsonNode *pThis = &p->sParse.aNode[p->i];
- switch( i ){
+ switch( iColumn ){
case JEACH_KEY: {
- if( p->i==0 ) break;
- if( p->eType==JSON_OBJECT ){
- jsonReturn(&p->sParse, pThis, ctx, 0);
- }else if( p->eType==JSON_ARRAY ){
- u32 iKey;
- if( p->bRecursive ){
- if( p->iRowid==0 ) break;
- assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 );
- iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
+ if( p->nParent==0 ){
+ u32 n, j;
+ if( p->nRoot==1 ) break;
+ j = jsonEachPathLength(p);
+ n = p->nRoot - j;
+ if( n==0 ){
+ break;
+ }else if( p->path.zBuf[j]=='[' ){
+ i64 x;
+ sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8);
+ sqlite3_result_int64(ctx, x);
+ }else if( p->path.zBuf[j+1]=='"' ){
+ sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT);
}else{
- iKey = p->iRowid;
+ sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT);
}
- sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
+ break;
+ }
+ if( p->eType==JSONB_OBJECT ){
+ jsonReturnFromBlob(&p->sParse, p->i, ctx, 1);
+ }else{
+ assert( p->eType==JSONB_ARRAY );
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey);
}
break;
}
case JEACH_VALUE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- jsonReturn(&p->sParse, pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ jsonReturnFromBlob(&p->sParse, i, ctx, 1);
break;
}
case JEACH_TYPE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
+ u32 i = jsonSkipLabel(p);
+ u8 eType = p->sParse.aBlob[i] & 0x0f;
+ sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC);
break;
}
case JEACH_ATOM: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- if( pThis->eType>=JSON_ARRAY ) break;
- jsonReturn(&p->sParse, pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ if( (p->sParse.aBlob[i] & 0x0f)sParse, i, ctx, 1);
+ }
break;
}
case JEACH_ID: {
- sqlite3_result_int64(ctx,
- (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
+ sqlite3_result_int64(ctx, (sqlite3_int64)p->i);
break;
}
case JEACH_PARENT: {
- if( p->i>p->iBegin && p->bRecursive ){
- sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
+ if( p->nParent>0 && p->bRecursive ){
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead);
}
break;
}
case JEACH_FULLKEY: {
- JsonString x;
- jsonInit(&x, ctx);
- if( p->bRecursive ){
- jsonEachComputePath(p, &x, p->i);
- }else{
- if( p->zRoot ){
- jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
- }else{
- jsonAppendChar(&x, '$');
- }
- if( p->eType==JSON_ARRAY ){
- jsonPrintf(30, &x, "[%d]", p->iRowid);
- }else if( p->eType==JSON_OBJECT ){
- jsonAppendObjectPathElement(&x, pThis);
- }
- }
- jsonResult(&x);
+ u64 nBase = p->path.nUsed;
+ if( p->nParent ) jsonAppendPathName(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ p->path.nUsed = nBase;
break;
}
case JEACH_PATH: {
- if( p->bRecursive ){
- JsonString x;
- jsonInit(&x, ctx);
- jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
- jsonResult(&x);
- break;
- }
- /* For json_each() path and root are the same so fall through
- ** into the root case */
- /* no break */ deliberate_fall_through
+ u32 n = jsonEachPathLength(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, n,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ break;
}
default: {
- const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
- sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
+ sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC);
break;
}
case JEACH_JSON: {
- assert( i==JEACH_JSON );
- sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ if( p->sParse.zJson==0 ){
+ sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob,
+ SQLITE_STATIC);
+ }else{
+ sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ }
break;
}
}
return SQLITE_OK;
}
@@ -3661,90 +5076,101 @@
sqlite3_vtab_cursor *cur,
int idxNum, const char *idxStr,
int argc, sqlite3_value **argv
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- const char *z;
const char *zRoot = 0;
- sqlite3_int64 n;
+ u32 i, n, sz;
UNUSED_PARAMETER(idxStr);
UNUSED_PARAMETER(argc);
jsonEachCursorReset(p);
if( idxNum==0 ) return SQLITE_OK;
- z = (const char*)sqlite3_value_text(argv[0]);
- if( z==0 ) return SQLITE_OK;
memset(&p->sParse, 0, sizeof(p->sParse));
p->sParse.nJPRef = 1;
- if( sqlite3ValueIsOfClass(argv[0], sqlite3RCStrUnref) ){
- p->sParse.zJson = sqlite3RCStrRef((char*)z);
- }else{
- n = sqlite3_value_bytes(argv[0]);
- p->sParse.zJson = sqlite3RCStrNew( n+1 );
- if( p->sParse.zJson==0 ) return SQLITE_NOMEM;
- memcpy(p->sParse.zJson, z, (size_t)n+1);
- }
- p->sParse.bJsonIsRCStr = 1;
- p->zJson = p->sParse.zJson;
- if( jsonParse(&p->sParse, 0) ){
- int rc = SQLITE_NOMEM;
- if( p->sParse.oom==0 ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
- if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
- }
- jsonEachCursorReset(p);
- return rc;
- }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
- jsonEachCursorReset(p);
- return SQLITE_NOMEM;
- }else{
- JsonNode *pNode = 0;
- if( idxNum==3 ){
- const char *zErr = 0;
- zRoot = (const char*)sqlite3_value_text(argv[1]);
- if( zRoot==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[1]);
- p->zRoot = sqlite3_malloc64( n+1 );
- if( p->zRoot==0 ) return SQLITE_NOMEM;
- memcpy(p->zRoot, zRoot, (size_t)n+1);
- if( zRoot[0]!='$' ){
- zErr = zRoot;
- }else{
- pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
- }
- if( zErr ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
+ p->sParse.db = p->db;
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ p->sParse.nBlob = sqlite3_value_bytes(argv[0]);
+ p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ }else{
+ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]);
+ p->sParse.nJson = sqlite3_value_bytes(argv[0]);
+ if( p->sParse.zJson==0 ){
+ p->i = p->iEnd = 0;
+ return SQLITE_OK;
+ }
+ if( jsonConvertTextToBlob(&p->sParse, 0) ){
+ if( p->sParse.oom ){
+ return SQLITE_NOMEM;
+ }
+ goto json_each_malformed_input;
+ }
+ }
+ if( idxNum==3 ){
+ zRoot = (const char*)sqlite3_value_text(argv[1]);
+ if( zRoot==0 ) return SQLITE_OK;
+ if( zRoot[0]!='$' ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
+ }
+ p->nRoot = sqlite3Strlen30(zRoot);
+ if( zRoot[1]==0 ){
+ i = p->i = 0;
+ p->eType = 0;
+ }else{
+ i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ p->i = 0;
+ p->eType = 0;
+ p->iEnd = 0;
+ return SQLITE_OK;
+ }
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
jsonEachCursorReset(p);
return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }else if( pNode==0 ){
- return SQLITE_OK;
- }
- }else{
- pNode = p->sParse.aNode;
- }
- p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
- p->eType = pNode->eType;
- if( p->eType>=JSON_ARRAY ){
- assert( pNode->eU==0 );
- VVA( pNode->eU = 3 );
- pNode->u.iKey = 0;
- p->iEnd = p->i + pNode->n + 1;
- if( p->bRecursive ){
- p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
- if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
- p->i--;
- }
- }else{
- p->i++;
- }
- }else{
- p->iEnd = p->i+1;
- }
- }
- return SQLITE_OK;
+ }
+ if( p->sParse.iLabel ){
+ p->i = p->sParse.iLabel;
+ p->eType = JSONB_OBJECT;
+ }else{
+ p->i = i;
+ p->eType = JSONB_ARRAY;
+ }
+ }
+ jsonAppendRaw(&p->path, zRoot, p->nRoot);
+ }else{
+ i = p->i = 0;
+ p->eType = 0;
+ p->nRoot = 1;
+ jsonAppendRaw(&p->path, "$", 1);
+ }
+ p->nParent = 0;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->iEnd = i+n+sz;
+ if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){
+ p->i = i + n;
+ p->eType = p->sParse.aBlob[i] & 0x0f;
+ p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent));
+ if( p->aParent==0 ) return SQLITE_NOMEM;
+ p->nParent = 1;
+ p->nParentAlloc = 1;
+ p->aParent[0].iKey = 0;
+ p->aParent[0].iEnd = p->iEnd;
+ p->aParent[0].iHead = p->i;
+ p->aParent[0].iValue = i;
+ }
+ return SQLITE_OK;
+
+json_each_malformed_input:
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
}
/* The methods of the json_each virtual table */
static sqlite3_module jsonEachModule = {
0, /* iVersion */
@@ -3809,44 +5235,58 @@
** Register JSON functions.
*/
void sqlite3RegisterJsonFunctions(void){
#ifndef SQLITE_OMIT_JSON
static FuncDef aJsonFunc[] = {
- /* calls sqlite3_result_subtype() */
- /* | */
- /* Uses cache ______ | __ calls sqlite3_value_subtype() */
- /* | | | */
- /* Num args _________ | | | ___ Flags */
- /* | | | | | */
- /* | | | | | */
- JFUNCTION(json, 1, 1, 1, 0, 0, jsonRemoveFunc),
- JFUNCTION(json_array, -1, 0, 1, 1, 0, jsonArrayFunc),
- JFUNCTION(json_array_length, 1, 1, 0, 0, 0, jsonArrayLengthFunc),
- JFUNCTION(json_array_length, 2, 1, 0, 0, 0, jsonArrayLengthFunc),
- JFUNCTION(json_error_position,1, 1, 0, 0, 0, jsonErrorFunc),
- JFUNCTION(json_extract, -1, 1, 1, 0, 0, jsonExtractFunc),
- JFUNCTION(->, 2, 1, 1, 0, JSON_JSON, jsonExtractFunc),
- JFUNCTION(->>, 2, 1, 0, 0, JSON_SQL, jsonExtractFunc),
- JFUNCTION(json_insert, -1, 1, 1, 1, 0, jsonSetFunc),
- JFUNCTION(json_object, -1, 0, 1, 1, 0, jsonObjectFunc),
- JFUNCTION(json_patch, 2, 1, 1, 0, 0, jsonPatchFunc),
- JFUNCTION(json_quote, 1, 0, 1, 1, 0, jsonQuoteFunc),
- JFUNCTION(json_remove, -1, 1, 1, 0, 0, jsonRemoveFunc),
- JFUNCTION(json_replace, -1, 1, 1, 1, 0, jsonReplaceFunc),
- JFUNCTION(json_set, -1, 1, 1, 1, JSON_ISSET, jsonSetFunc),
- JFUNCTION(json_type, 1, 1, 0, 0, 0, jsonTypeFunc),
- JFUNCTION(json_type, 2, 1, 0, 0, 0, jsonTypeFunc),
- JFUNCTION(json_valid, 1, 1, 0, 0, 0, jsonValidFunc),
-#ifdef SQLITE_DEBUG
- JFUNCTION(json_parse, 1, 1, 1, 0, 0, jsonParseFunc),
- JFUNCTION(json_test1, 1, 1, 0, 1, 0, jsonTest1Func),
+ /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */
+ /* | | */
+ /* Uses cache ------, | | ,---- Returns JSONB */
+ /* | | | | */
+ /* Number of arguments ---, | | | | ,--- Flags */
+ /* | | | | | | */
+ JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc),
+ JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc),
+ JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc),
+ JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc),
+ JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc),
+ JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc),
+ JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc),
+ JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc),
+ JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc),
+ JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc),
+ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc),
+ JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc),
+ JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc),
+ JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc),
+ JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc),
+ JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc),
+ JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc),
+ JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc),
+#if SQLITE_DEBUG
+ JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc),
#endif
WAGGREGATE(json_group_array, 1, 0, 0,
jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0,
+ jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
WAGGREGATE(json_group_object, 2, 0, 0,
+ jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0,
jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
SQLITE_DETERMINISTIC)
};
sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc));
Index: src/main.c
==================================================================
--- src/main.c
+++ src/main.c
@@ -763,22 +763,10 @@
sqlite3GlobalConfig.mxMemdbSize = va_arg(ap, sqlite3_int64);
break;
}
#endif /* SQLITE_OMIT_DESERIALIZE */
- case SQLITE_CONFIG_ROWID_IN_VIEW: {
- int *pVal = va_arg(ap,int*);
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- if( 0==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = TF_NoVisibleRowid;
- if( 1==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = 0;
- *pVal = (sqlite3GlobalConfig.mNoVisibleRowid==0);
-#else
- *pVal = 0;
-#endif
- break;
- }
-
default: {
rc = SQLITE_ERROR;
break;
}
}
@@ -4669,10 +4657,32 @@
rc = SQLITE_NOTFOUND;
}
break;
}
#endif
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff);
+ **
+ ** Activate or deactivate validation of JSONB that is generated from
+ ** text. Off by default, as the validation is slow. Validation is
+ ** only available if compiled using SQLITE_DEBUG.
+ **
+ ** If onOff is initially 1, then turn it on. If onOff is initially
+ ** off, turn it off. If onOff is initially -1, then change onOff
+ ** to be the current setting.
+ */
+ case SQLITE_TESTCTRL_JSON_SELFCHECK: {
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD)
+ int *pOnOff = va_arg(ap, int*);
+ if( *pOnOff<0 ){
+ *pOnOff = sqlite3Config.bJsonSelfcheck;
+ }else{
+ sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff);
+ }
+#endif
+ break;
+ }
}
va_end(ap);
#endif /* SQLITE_UNTESTABLE */
return rc;
}
Index: src/malloc.c
==================================================================
--- src/malloc.c
+++ src/malloc.c
@@ -219,10 +219,28 @@
sqlite3_mutex_leave(mem0.mutex);
sqlite3_release_memory(nByte);
sqlite3_mutex_enter(mem0.mutex);
}
+#ifdef SQLITE_DEBUG
+/*
+** This routine is called whenever an out-of-memory condition is seen,
+** It's only purpose to to serve as a breakpoint for gdb or similar
+** code debuggers when working on out-of-memory conditions, for example
+** caused by PRAGMA hard_heap_limit=N.
+*/
+static SQLITE_NOINLINE void test_oom_breakpoint(void){
+ static u64 nOomFault = 0;
+ nOomFault++;
+ /* The assert() is never reached in a human lifetime. It is here mostly
+ ** to prevent code optimizers from optimizing out this function. */
+ assert( (nOomFault>>32) < 0xffffffff );
+}
+#else
+# define test_oom_breakpoint(X) /* No-op for production builds */
+#endif
+
/*
** Do a memory allocation with statistics and alarms. Assume the
** lock is already held.
*/
static void mallocWithAlarm(int n, void **pp){
@@ -245,10 +263,11 @@
AtomicStore(&mem0.nearlyFull, 1);
sqlite3MallocAlarm(nFull);
if( mem0.hardLimit ){
nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
if( nUsed >= mem0.hardLimit - nFull ){
+ test_oom_breakpoint();
*pp = 0;
return;
}
}
}else{
@@ -533,10 +552,11 @@
if( nDiff>0 && (nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED)) >=
mem0.alarmThreshold-nDiff ){
sqlite3MallocAlarm(nDiff);
if( mem0.hardLimit>0 && nUsed >= mem0.hardLimit - nDiff ){
sqlite3_mutex_leave(mem0.mutex);
+ test_oom_breakpoint();
return 0;
}
}
pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
Index: src/memdb.c
==================================================================
--- src/memdb.c
+++ src/memdb.c
@@ -797,10 +797,18 @@
rc = sqlite3_step(pStmt);
if( rc!=SQLITE_ROW ){
pOut = 0;
}else{
sz = sqlite3_column_int64(pStmt, 0)*szPage;
+ if( sz==0 ){
+ sqlite3_reset(pStmt);
+ sqlite3_exec(db, "BEGIN IMMEDIATE; COMMIT;", 0, 0, 0);
+ rc = sqlite3_step(pStmt);
+ if( rc==SQLITE_ROW ){
+ sz = sqlite3_column_int64(pStmt, 0)*szPage;
+ }
+ }
if( piSize ) *piSize = sz;
if( mFlags & SQLITE_SERIALIZE_NOCOPY ){
pOut = 0;
}else{
pOut = sqlite3_malloc64( sz );
Index: src/os_unix.c
==================================================================
--- src/os_unix.c
+++ src/os_unix.c
@@ -4052,11 +4052,17 @@
return SQLITE_OK;
}
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
case SQLITE_FCNTL_LOCK_TIMEOUT: {
int iOld = pFile->iBusyTimeout;
+#if SQLITE_ENABLE_SETLK_TIMEOUT==1
pFile->iBusyTimeout = *(int*)pArg;
+#elif SQLITE_ENABLE_SETLK_TIMEOUT==2
+ pFile->iBusyTimeout = !!(*(int*)pArg);
+#else
+# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2"
+#endif
*(int*)pArg = iOld;
return SQLITE_OK;
}
#endif
#if SQLITE_MAX_MMAP_SIZE>0
@@ -4305,10 +4311,29 @@
** zFilename
**
** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and
** unixMutexHeld() is true when reading or writing any other field
** in this structure.
+**
+** aLock[SQLITE_SHM_NLOCK]:
+** This array records the various locks held by clients on each of the
+** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no
+** locks are held by the process on this slot. If it is set to -1, then
+** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[]
+** value is set to a positive value, then it is the number of shared
+** locks currently held on the slot.
+**
+** aMutex[SQLITE_SHM_NLOCK]:
+** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex
+** pShmMutex is used to protect the aLock[] array and the right to
+** call fcntl() on unixShmNode.hShm to obtain or release locks.
+**
+** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array
+** of mutexes - one for each locking slot. To read or write locking
+** slot aLock[iSlot], the caller must hold the corresponding mutex
+** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a
+** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held.
*/
struct unixShmNode {
unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */
sqlite3_mutex *pShmMutex; /* Mutex to access this object */
char *zFilename; /* Name of the mmapped file */
@@ -4318,14 +4343,15 @@
u8 isReadonly; /* True if read-only */
u8 isUnlocked; /* True if no DMS lock held */
char **apRegion; /* Array of mapped shared-memory regions */
int nRef; /* Number of unixShm objects pointing to this */
unixShm *pFirst; /* All unixShm objects pointing to this */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK];
+#endif
int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */
#ifdef SQLITE_DEBUG
- u8 exclMask; /* Mask of exclusive locks held */
- u8 sharedMask; /* Mask of shared locks held */
u8 nextShmId; /* Next available unixShm.id value */
#endif
};
/*
@@ -4404,20 +4430,39 @@
){
unixShmNode *pShmNode; /* Apply locks to this open shared-memory segment */
struct flock f; /* The posix advisory locking structure */
int rc = SQLITE_OK; /* Result code form fcntl() */
- /* Access to the unixShmNode object is serialized by the caller */
pShmNode = pFile->pInode->pShmNode;
- assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
- assert( pShmNode->nRef>0 || unixMutexHeld() );
+
+ /* Assert that the parameters are within expected range and that the
+ ** correct mutex or mutexes are held. */
+ assert( pShmNode->nRef>=0 );
+ assert( (ofst==UNIX_SHM_DMS && n==1)
+ || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK))
+ );
+ if( ofst==UNIX_SHM_DMS ){
+ assert( pShmNode->nRef>0 || unixMutexHeld() );
+ assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
+ }else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int ii;
+ for(ii=ofst-UNIX_SHM_BASE; iiaMutex[ii]) );
+ }
+#else
+ assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
+ assert( pShmNode->nRef>0 );
+#endif
+ }
/* Shared locks never span more than one byte */
assert( n==1 || lockType!=F_RDLCK );
/* Locks are within range */
assert( n>=1 && n<=SQLITE_SHM_NLOCK );
+ assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) );
if( pShmNode->hShm>=0 ){
int res;
/* Initialize the locking parameters */
f.l_type = lockType;
@@ -4424,50 +4469,39 @@
f.l_whence = SEEK_SET;
f.l_start = ofst;
f.l_len = n;
res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile);
if( res==-1 ){
-#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1
rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY);
#else
rc = SQLITE_BUSY;
#endif
}
}
- /* Update the global lock state and do debug tracing */
-#ifdef SQLITE_DEBUG
- { u16 mask;
- OSTRACE(("SHM-LOCK "));
- mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask;
- pShmNode->sharedMask &= ~mask;
- }else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock %d ok", ofst));
- pShmNode->exclMask &= ~mask;
- pShmNode->sharedMask |= mask;
- }else{
- assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d ok", ofst));
- pShmNode->exclMask |= mask;
- pShmNode->sharedMask &= ~mask;
- }
- }else{
- if( lockType==F_UNLCK ){
- OSTRACE(("unlock %d failed", ofst));
- }else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock failed"));
- }else{
- assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d failed", ofst));
- }
- }
- OSTRACE((" - afterwards %03x,%03x\n",
- pShmNode->sharedMask, pShmNode->exclMask));
+ /* Do debug tracing */
+#ifdef SQLITE_DEBUG
+ OSTRACE(("SHM-LOCK "));
+ if( rc==SQLITE_OK ){
+ if( lockType==F_UNLCK ){
+ OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1));
+ }else if( lockType==F_RDLCK ){
+ OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1));
+ }else{
+ assert( lockType==F_WRLCK );
+ OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1));
+ }
+ }else{
+ if( lockType==F_UNLCK ){
+ OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1));
+ }else if( lockType==F_RDLCK ){
+ OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1));
+ }else{
+ assert( lockType==F_WRLCK );
+ OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1));
+ }
}
#endif
return rc;
}
@@ -4501,10 +4535,15 @@
if( p && ALWAYS(p->nRef==0) ){
int nShmPerMap = unixShmRegionPerMap();
int i;
assert( p->pInode==pFd->pInode );
sqlite3_mutex_free(p->pShmMutex);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ for(i=0; iaMutex[i]);
+ }
+#endif
for(i=0; inRegion; i+=nShmPerMap){
if( p->hShm>=0 ){
osMunmap(p->apRegion[i], p->szRegion);
}else{
sqlite3_free(p->apRegion[i]);
@@ -4560,11 +4599,24 @@
}else if( lock.l_type==F_UNLCK ){
if( pShmNode->isReadonly ){
pShmNode->isUnlocked = 1;
rc = SQLITE_READONLY_CANTINIT;
}else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* Do not use a blocking lock here. If the lock cannot be obtained
+ ** immediately, it means some other connection is truncating the
+ ** *-shm file. And after it has done so, it will not release its
+ ** lock, but only downgrade it to a shared lock. So no point in
+ ** blocking here. The call below to obtain the shared DMS lock may
+ ** use a blocking lock. */
+ int iSaveTimeout = pDbFd->iBusyTimeout;
+ pDbFd->iBusyTimeout = 0;
+#endif
rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ pDbFd->iBusyTimeout = iSaveTimeout;
+#endif
/* The first connection to attach must truncate the -shm file. We
** truncate to 3 bytes (an arbitrary small number, less than the
** -shm header size) rather than 0 as a system debugging aid, to
** help detect if a -shm file truncation is legitimate or is the work
** or a rogue process. */
@@ -4681,10 +4733,22 @@
pShmNode->pShmMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
if( pShmNode->pShmMutex==0 ){
rc = SQLITE_NOMEM_BKPT;
goto shm_open_err;
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ {
+ int ii;
+ for(ii=0; iiaMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pShmNode->aMutex[ii]==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ }
+ }
+#endif
}
if( pInode->bProcessLock==0 ){
if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){
pShmNode->hShm = robust_open(zShm, O_RDWR|O_CREAT|O_NOFOLLOW,
@@ -4902,13 +4966,15 @@
**
** assert( assertLockingArrayOk(pShmNode) );
*/
#ifdef SQLITE_DEBUG
static int assertLockingArrayOk(unixShmNode *pShmNode){
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ return 1;
+#else
unixShm *pX;
int aLock[SQLITE_SHM_NLOCK];
- assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
memset(aLock, 0, sizeof(aLock));
for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
int i;
for(i=0; iaLock, aLock, sizeof(aLock)) );
return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0);
+#endif
}
#endif
/*
** Change the lock state for a shared-memory segment.
**
-** Note that the relationship between SHAREd and EXCLUSIVE locks is a little
+** Note that the relationship between SHARED and EXCLUSIVE locks is a little
** different here than in posix. In xShmLock(), one can go from unlocked
** to shared and back or from unlocked to exclusive and back. But one may
** not go from shared to exclusive or from exclusive to shared.
*/
static int unixShmLock(
@@ -4943,11 +5010,11 @@
){
unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */
unixShm *p; /* The shared memory being locked */
unixShmNode *pShmNode; /* The underlying file iNode */
int rc = SQLITE_OK; /* Result code */
- u16 mask; /* Mask of locks to take or release */
+ u16 mask = (1<<(ofst+n)) - (1<pShm;
if( p==0 ) return SQLITE_IOERR_SHMLOCK;
pShmNode = p->pShmNode;
@@ -4978,92 +5045,155 @@
** held.
**
** It is not permitted to block on the RECOVER lock.
*/
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
- assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
- (ofst!=2) /* not RECOVER */
- && (ofst!=1 || (p->exclMask|p->sharedMask)==0)
- && (ofst!=0 || (p->exclMask|p->sharedMask)<3)
- && (ofst<3 || (p->exclMask|p->sharedMask)<(1<1 || mask==(1<pShmMutex);
- assert( assertLockingArrayOk(pShmNode) );
- if( flags & SQLITE_SHM_UNLOCK ){
- if( (p->exclMask|p->sharedMask) & mask ){
- int ii;
- int bUnlock = 1;
-
- for(ii=ofst; ii((p->sharedMask & (1<sharedMask & (1<1 );
- aLock[ofst]--;
- }
-
- /* Undo the local locks */
- if( rc==SQLITE_OK ){
- p->exclMask &= ~mask;
- p->sharedMask &= ~mask;
- }
- }
- }else if( flags & SQLITE_SHM_SHARED ){
- assert( n==1 );
- assert( (p->exclMask & (1<sharedMask & mask)==0 ){
- if( aLock[ofst]<0 ){
- rc = SQLITE_BUSY;
- }else if( aLock[ofst]==0 ){
- rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
- }
-
- /* Get the local shared locks */
- if( rc==SQLITE_OK ){
- p->sharedMask |= mask;
- aLock[ofst]++;
- }
- }
- }else{
- /* Make sure no sibling connections hold locks that will block this
- ** lock. If any do, return SQLITE_BUSY right away. */
- int ii;
- for(ii=ofst; iisharedMask & mask)==0 );
- if( ALWAYS((p->exclMask & (1<sharedMask & mask)==0 );
- p->exclMask |= mask;
- for(ii=ofst; iipShmMutex);
+ {
+ u16 lockMask = (p->exclMask|p->sharedMask);
+ assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
+ (ofst!=2) /* not RECOVER */
+ && (ofst!=1 || lockMask==0 || lockMask==2)
+ && (ofst!=0 || lockMask<3)
+ && (ofst<3 || lockMask<(1<exclMask & mask)
+ );
+ if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask))
+ || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask))
+ || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK))
+ ){
+
+ /* Take the required mutexes. In SETLK_TIMEOUT mode (blocking locks), if
+ ** this is an attempt on an exclusive lock use sqlite3_mutex_try(). If any
+ ** other thread is holding this mutex, then it is either holding or about
+ ** to hold a lock exclusive to the one being requested, and we may
+ ** therefore return SQLITE_BUSY to the caller.
+ **
+ ** Doing this prevents some deadlock scenarios. For example, thread 1 may
+ ** be a checkpointer blocked waiting on the WRITER lock. And thread 2
+ ** may be a normal SQL client upgrading to a write transaction. In this
+ ** case thread 2 does a non-blocking request for the WRITER lock. But -
+ ** if it were to use sqlite3_mutex_enter() then it would effectively
+ ** become a (doomed) blocking request, as thread 2 would block until thread
+ ** 1 obtained WRITER and released the mutex. Since thread 2 already holds
+ ** a lock on a read-locking slot at this point, this breaks the
+ ** anti-deadlock rules (see above). */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int iMutex;
+ for(iMutex=ofst; iMutexaMutex[iMutex]);
+ if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes;
+ }else{
+ sqlite3_mutex_enter(pShmNode->aMutex[iMutex]);
+ }
+ }
+#else
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+#endif
+
+ if( ALWAYS(rc==SQLITE_OK) ){
+ if( flags & SQLITE_SHM_UNLOCK ){
+ /* Case (a) - unlock. */
+ int bUnlock = 1;
+ assert( (p->exclMask & p->sharedMask)==0 );
+ assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask );
+ assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask );
+
+ /* If this is a SHARED lock being unlocked, it is possible that other
+ ** clients within this process are holding the same SHARED lock. In
+ ** this case, set bUnlock to 0 so that the posix lock is not removed
+ ** from the file-descriptor below. */
+ if( flags & SQLITE_SHM_SHARED ){
+ assert( n==1 );
+ assert( aLock[ofst]>=1 );
+ if( aLock[ofst]>1 ){
+ bUnlock = 0;
+ aLock[ofst]--;
+ p->sharedMask &= ~mask;
+ }
+ }
+
+ if( bUnlock ){
+ rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ memset(&aLock[ofst], 0, sizeof(int)*n);
+ p->sharedMask &= ~mask;
+ p->exclMask &= ~mask;
+ }
+ }
+ }else if( flags & SQLITE_SHM_SHARED ){
+ /* Case (b) - a shared lock. */
+
+ if( aLock[ofst]<0 ){
+ /* An exclusive lock is held by some other connection. BUSY. */
+ rc = SQLITE_BUSY;
+ }else if( aLock[ofst]==0 ){
+ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
+ }
+
+ /* Get the local shared locks */
+ if( rc==SQLITE_OK ){
+ p->sharedMask |= mask;
+ aLock[ofst]++;
+ }
+ }else{
+ /* Case (c) - an exclusive lock. */
+ int ii;
+
+ assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) );
+ assert( (p->sharedMask & mask)==0 );
+ assert( (p->exclMask & mask)==0 );
+
+ /* Make sure no sibling connections hold locks that will block this
+ ** lock. If any do, return SQLITE_BUSY right away. */
+ for(ii=ofst; iiexclMask |= mask;
+ for(ii=ofst; ii=ofst; iMutex--){
+ sqlite3_mutex_leave(pShmNode->aMutex[iMutex]);
+ }
+#else
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+#endif
+ }
+
OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n",
p->id, osGetpid(0), p->sharedMask, p->exclMask));
return rc;
}
Index: src/pager.c
==================================================================
--- src/pager.c
+++ src/pager.c
@@ -686,11 +686,11 @@
i64 journalSizeLimit; /* Size limit for persistent journal files */
char *zFilename; /* Name of the database file */
char *zJournal; /* Name of the journal file */
int (*xBusyHandler)(void*); /* Function to call when busy */
void *pBusyHandlerArg; /* Context argument for xBusyHandler */
- int aStat[4]; /* Total cache hits, misses, writes, spills */
+ u32 aStat[4]; /* Total cache hits, misses, writes, spills */
#ifdef SQLITE_TEST
int nRead; /* Database pages read */
#endif
void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */
int (*xGet)(Pager*,Pgno,DbPage**,int); /* Routine to fetch a patch */
@@ -816,13 +816,12 @@
if( pPager->fd->pMethods==0 ) return 0;
if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0;
#ifndef SQLITE_OMIT_WAL
if( pPager->pWal ){
u32 iRead = 0;
- int rc;
- rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
- return (rc==SQLITE_OK && iRead==0);
+ (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
+ return iRead==0;
}
#endif
return 1;
}
#endif
@@ -6830,15 +6829,15 @@
a[1] = sqlite3PcachePagecount(pPager->pPCache);
a[2] = sqlite3PcacheGetCachesize(pPager->pPCache);
a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize;
a[4] = pPager->eState;
a[5] = pPager->errCode;
- a[6] = pPager->aStat[PAGER_STAT_HIT];
- a[7] = pPager->aStat[PAGER_STAT_MISS];
+ a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff;
+ a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff;
a[8] = 0; /* Used to be pPager->nOvfl */
a[9] = pPager->nRead;
- a[10] = pPager->aStat[PAGER_STAT_WRITE];
+ a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff;
return a;
}
#endif
/*
@@ -6850,11 +6849,11 @@
** Before returning, *pnVal is incremented by the
** current cache hit or miss count, according to the value of eStat. If the
** reset parameter is non-zero, the cache hit or miss count is zeroed before
** returning.
*/
-void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){
+void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){
assert( eStat==SQLITE_DBSTATUS_CACHE_HIT
|| eStat==SQLITE_DBSTATUS_CACHE_MISS
|| eStat==SQLITE_DBSTATUS_CACHE_WRITE
|| eStat==SQLITE_DBSTATUS_CACHE_WRITE+1
Index: src/pager.h
==================================================================
--- src/pager.h
+++ src/pager.h
@@ -214,11 +214,11 @@
sqlite3_file *sqlite3PagerFile(Pager*);
sqlite3_file *sqlite3PagerJrnlFile(Pager*);
const char *sqlite3PagerJournalname(Pager*);
void *sqlite3PagerTempSpace(Pager*);
int sqlite3PagerIsMemdb(Pager*);
-void sqlite3PagerCacheStat(Pager *, int, int, int *);
+void sqlite3PagerCacheStat(Pager *, int, int, u64*);
void sqlite3PagerClearCache(Pager*);
int sqlite3SectorSize(sqlite3_file *);
/* Functions used to truncate the database file. */
void sqlite3PagerTruncateImage(Pager*,Pgno);
Index: src/parse.y
==================================================================
--- src/parse.y
+++ src/parse.y
@@ -18,10 +18,14 @@
** implementation of a parser for the given grammar. You might be reading
** this comment as part of the translated C-code. Edits should be made
** to the original parse.y sources.
*/
}
+
+// Function used to enlarge the parser stack, if needed
+%realloc parserStackRealloc
+%free sqlite3_free
// All token codes are small integers with #defines that begin with "TK_"
%token_prefix TK_
// The type of the data attached to each token is Token. This is also the
@@ -43,11 +47,11 @@
}else{
sqlite3ErrorMsg(pParse, "incomplete input");
}
}
%stack_overflow {
- sqlite3ErrorMsg(pParse, "parser stack overflow");
+ sqlite3OomFault(pParse->db);
}
// The name of the generated procedure that implements the parser
// is as follows:
%name sqlite3Parser
@@ -545,10 +549,18 @@
}else{
sqlite3WithDelete(pParse->db, pWith);
}
return pSelect;
}
+
+ /* Memory allocator for parser stack resizing. This is a thin wrapper around
+ ** sqlite3_realloc() that includes a call to sqlite3FaultSim() to facilitate
+ ** testing.
+ */
+ static void *parserStackRealloc(void *pOld, sqlite3_uint64 newSize){
+ return sqlite3FaultSim(700) ? 0 : sqlite3_realloc(pOld, newSize);
+ }
}
%ifndef SQLITE_OMIT_CTE
select(A) ::= WITH wqlist(W) selectnowith(X). {A = attachWithToSelect(pParse,X,W);}
select(A) ::= WITH RECURSIVE wqlist(W) selectnowith(X).
Index: src/prepare.c
==================================================================
--- src/prepare.c
+++ src/prepare.c
@@ -866,10 +866,11 @@
sqlite3BtreeLeaveAll(db);
rc = sqlite3ApiExit(db, rc);
assert( (rc&db->errMask)==rc );
db->busyHandler.nBusy = 0;
sqlite3_mutex_leave(db->mutex);
+ assert( rc==SQLITE_OK || (*ppStmt)==0 );
return rc;
}
/*
Index: src/printf.c
==================================================================
--- src/printf.c
+++ src/printf.c
@@ -1367,11 +1367,11 @@
va_end(ap);
}
/*****************************************************************************
-** Reference counted string storage
+** Reference counted string/blob storage
*****************************************************************************/
/*
** Increase the reference count of the string by one.
**
Index: src/resolve.c
==================================================================
--- src/resolve.c
+++ src/resolve.c
@@ -77,12 +77,10 @@
sqlite3 *db; /* The database connection */
assert( iCol>=0 && iColnExpr );
pOrig = pEList->a[iCol].pExpr;
assert( pOrig!=0 );
- assert( !ExprHasProperty(pExpr, EP_Reduced|EP_TokenOnly) );
- if( pExpr->pAggInfo ) return;
db = pParse->db;
pDup = sqlite3ExprDup(db, pOrig, 0);
if( db->mallocFailed ){
sqlite3ExprDelete(db, pDup);
pDup = 0;
@@ -277,11 +275,11 @@
*/
static int lookupName(
Parse *pParse, /* The parsing context */
const char *zDb, /* Name of the database containing table, or NULL */
const char *zTab, /* Name of table containing column, or NULL */
- const char *zCol, /* Name of the column. */
+ const Expr *pRight, /* Name of the column. */
NameContext *pNC, /* The name context used to resolve the name */
Expr *pExpr /* Make this EXPR node point to the selected column */
){
int i, j; /* Loop counters */
int cnt = 0; /* Number of matching column names */
@@ -294,10 +292,11 @@
Schema *pSchema = 0; /* Schema of the expression */
int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */
Table *pTab = 0; /* Table holding the row */
Column *pCol; /* A column of pTab */
ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */
+ const char *zCol = pRight->u.zToken;
assert( pNC ); /* the name context cannot be NULL. */
assert( zCol ); /* The Z in X.Y.Z cannot be NULL */
assert( zDb==0 || zTab!=0 );
assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
@@ -466,41 +465,12 @@
}
break;
}
}
if( 0==cnt && VisibleRowid(pTab) ){
- /* pTab is a potential ROWID match. Keep track of it and match
- ** the ROWID later if that seems appropriate. (Search for "cntTab"
- ** to find related code.) Only allow a ROWID match if there is
- ** a single ROWID match candidate.
- */
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- /* In SQLITE_ALLOW_ROWID_IN_VIEW mode, allow a ROWID match
- ** if there is a single VIEW candidate or if there is a single
- ** non-VIEW candidate plus multiple VIEW candidates. In other
- ** words non-VIEW candidate terms take precedence over VIEWs.
- */
- if( cntTab==0
- || (cntTab==1
- && ALWAYS(pMatch!=0)
- && ALWAYS(pMatch->pTab!=0)
- && (pMatch->pTab->tabFlags & TF_Ephemeral)!=0
- && (pTab->tabFlags & TF_Ephemeral)==0)
- ){
- cntTab = 1;
- pMatch = pItem;
- }else{
- cntTab++;
- }
-#else
- /* The (much more common) non-SQLITE_ALLOW_ROWID_IN_VIEW case is
- ** simpler since we require exactly one candidate, which will
- ** always be a non-VIEW
- */
- cntTab++;
- pMatch = pItem;
-#endif
+ cntTab++;
+ pMatch = pItem;
}
}
if( pMatch ){
pExpr->iTable = pMatch->iCursor;
assert( ExprUseYTab(pExpr) );
@@ -622,17 +592,17 @@
/*
** Perhaps the name is a reference to the ROWID
*/
if( cnt==0
- && cntTab>=1
+ && cntTab==1
&& pMatch
&& (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0
&& sqlite3IsRowid(zCol)
&& ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom)
){
- cnt = cntTab;
+ cnt = 1;
if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1;
pExpr->affExpr = SQLITE_AFF_INTEGER;
}
/*
@@ -782,10 +752,14 @@
zErr = cnt==0 ? "no such column" : "ambiguous column name";
if( zDb ){
sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol);
}else if( zTab ){
sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol);
+ }else if( cnt==0 && ExprHasProperty(pRight,EP_DblQuoted) ){
+ sqlite3ErrorMsg(pParse, "%s: \"%s\" - should this be a"
+ " string literal in single-quotes?",
+ zErr, zCol);
}else{
sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol);
}
sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr);
pParse->checkSchema = 1;
@@ -1029,20 +1003,19 @@
** be one call to lookupName(). Then the compiler will in-line
** lookupName() for a size reduction and performance increase.
*/
case TK_ID:
case TK_DOT: {
- const char *zColumn;
const char *zTable;
const char *zDb;
Expr *pRight;
if( pExpr->op==TK_ID ){
zDb = 0;
zTable = 0;
assert( !ExprHasProperty(pExpr, EP_IntValue) );
- zColumn = pExpr->u.zToken;
+ pRight = pExpr;
}else{
Expr *pLeft = pExpr->pLeft;
testcase( pNC->ncFlags & NC_IdxExpr );
testcase( pNC->ncFlags & NC_GenCol );
sqlite3ResolveNotValid(pParse, pNC, "the \".\" operator",
@@ -1057,18 +1030,17 @@
pLeft = pRight->pLeft;
pRight = pRight->pRight;
}
assert( ExprUseUToken(pLeft) && ExprUseUToken(pRight) );
zTable = pLeft->u.zToken;
- zColumn = pRight->u.zToken;
assert( ExprUseYTab(pExpr) );
if( IN_RENAME_OBJECT ){
sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight);
sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft);
}
}
- return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr);
+ return lookupName(pParse, zDb, zTable, pRight, pNC, pExpr);
}
/* Resolve function names
*/
case TK_FUNCTION: {
@@ -1280,15 +1252,16 @@
#endif
pNC2 = pNC;
while( pNC2
&& sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0
){
- pExpr->op2++;
+ pExpr->op2 += (1 + pNC2->nNestedSelect);
pNC2 = pNC2->pNext;
}
assert( pDef!=0 || IN_RENAME_OBJECT );
if( pNC2 && pDef ){
+ pExpr->op2 += pNC2->nNestedSelect;
assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg );
testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
testcase( (pDef->funcFlags & SQLITE_FUNC_ANYORDER)!=0 );
pNC2->ncFlags |= NC_HasAgg
@@ -1843,10 +1816,11 @@
p->pOrderBy = 0;
}
/* Recursively resolve names in all subqueries in the FROM clause
*/
+ if( pOuterNC ) pOuterNC->nNestedSelect++;
for(i=0; ipSrc->nSrc; i++){
SrcItem *pItem = &p->pSrc->a[i];
if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){
int nRef = pOuterNC ? pOuterNC->nRef : 0;
const char *zSavedContext = pParse->zAuthContext;
@@ -1866,10 +1840,13 @@
if( pOuterNC ){
assert( pItem->fg.isCorrelated==0 && pOuterNC->nRef>=nRef );
pItem->fg.isCorrelated = (pOuterNC->nRef>nRef);
}
}
+ }
+ if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){
+ pOuterNC->nNestedSelect--;
}
/* Set up the local name-context to pass to sqlite3ResolveExprNames() to
** resolve the result-set expression list.
*/
Index: src/select.c
==================================================================
--- src/select.c
+++ src/select.c
@@ -182,10 +182,13 @@
** Delete the given Select structure and all of its substructures.
*/
void sqlite3SelectDelete(sqlite3 *db, Select *p){
if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1);
}
+void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1);
+}
/*
** Return a pointer to the right-most SELECT statement in a compound.
*/
static Select *findRightmost(Select *p){
@@ -1948,11 +1951,15 @@
/* The "table" is actually a sub-select or a view in the FROM clause
** of the SELECT statement. Return the declaration type and origin
** data for the result-set column of the sub-select.
*/
if( iColpEList->nExpr
- && (!ViewCanHaveRowid || iCol>=0)
+#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
+ && iCol>=0
+#else
+ && ALWAYS(iCol>=0)
+#endif
){
/* If iCol is less than zero, then the expression requests the
** rowid of the sub-select or view. This expression is legal (see
** test case misc2.2.2) - it always evaluates to NULL.
*/
@@ -3198,13 +3205,11 @@
multi_select_end:
pDest->iSdst = dest.iSdst;
pDest->nSdst = dest.nSdst;
if( pDelete ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete,
- pDelete);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete);
}
return rc;
}
#endif /* SQLITE_OMIT_COMPOUND_SELECT */
@@ -3751,12 +3756,11 @@
sqlite3VdbeResolveLabel(v, labelEnd);
/* Make arrangements to free the 2nd and subsequent arms of the compound
** after the parse has finished */
if( pSplit->pPrior ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior);
}
pSplit->pPrior = pPrior;
pPrior->pNext = pSplit;
sqlite3ExprListDelete(db, pPrior->pOrderBy);
pPrior->pOrderBy = 0;
@@ -4573,13 +4577,11 @@
*/
if( ALWAYS(pSubitem->pTab!=0) ){
Table *pTabToDel = pSubitem->pTab;
if( pTabToDel->nTabRef==1 ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
- sqlite3ParserAddCleanup(pToplevel,
- (void(*)(sqlite3*,void*))sqlite3DeleteTable,
- pTabToDel);
+ sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel);
testcase( pToplevel->earlyCleanup );
}else{
pTabToDel->nTabRef--;
}
pSubitem->pTab = 0;
@@ -5622,12 +5624,11 @@
** calling this routine, Instead, use only the return value.
*/
With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
if( pWith ){
if( bFree ){
- pWith = (With*)sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3WithDelete,
+ pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric,
pWith);
if( pWith==0 ) return 0;
}
if( pParse->nErr==0 ){
assert( pParse->pWith!=pWith );
@@ -5871,12 +5872,11 @@
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
#ifndef SQLITE_ALLOW_ROWID_IN_VIEW
/* The usual case - do not allow ROWID on a subquery */
pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid;
#else
- /* Legacy compatibility mode */
- pTab->tabFlags |= TF_Ephemeral | sqlite3Config.mNoVisibleRowid;
+ pTab->tabFlags |= TF_Ephemeral; /* Legacy compatibility mode */
#endif
return pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
}
@@ -6140,11 +6140,11 @@
if( pFrom->fg.isNestedFrom ){
assert( pFrom->pSelect!=0 );
pNestedFrom = pFrom->pSelect->pEList;
assert( pNestedFrom!=0 );
assert( pNestedFrom->nExpr==pTab->nCol );
- assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid );
+ assert( VisibleRowid(pTab)==0 );
}else{
if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
continue;
}
pNestedFrom = 0;
@@ -6172,12 +6172,11 @@
}
}else{
pUsing = 0;
}
- nAdd = pTab->nCol;
- if( VisibleRowid(pTab) && (selFlags & SF_NestedFrom)!=0 ) nAdd++;
+ nAdd = pTab->nCol + (VisibleRowid(pTab) && (selFlags&SF_NestedFrom));
for(j=0; jnCol ){
@@ -6255,12 +6254,11 @@
break; /* OOM */
}
pX = &pNew->a[pNew->nExpr-1];
assert( pX->zEName==0 );
if( (selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
- if( pNestedFrom && (!ViewCanHaveRowid || jnExpr) ){
- assert( jnExpr );
+ if( pNestedFrom ){
pX->zEName = sqlite3DbStrDup(db, pNestedFrom->a[j].zEName);
testcase( pX->zEName==0 );
}else{
pX->zEName = sqlite3MPrintf(db, "%s.%s.%s",
zSchemaName, zTabName, zName);
@@ -6659,19 +6657,23 @@
KeyInfo *pKeyInfo;
int nExtra = 0;
assert( pFunc->pFExpr->pLeft!=0 );
assert( pFunc->pFExpr->pLeft->op==TK_ORDER );
assert( ExprUseXList(pFunc->pFExpr->pLeft) );
+ assert( pFunc->pFunc!=0 );
pOBList = pFunc->pFExpr->pLeft->x.pList;
if( !pFunc->bOBUnique ){
nExtra++; /* One extra column for the OP_Sequence */
}
if( pFunc->bOBPayload ){
/* extra columns for the function arguments */
assert( ExprUseXList(pFunc->pFExpr) );
nExtra += pFunc->pFExpr->x.pList->nExpr;
}
+ if( pFunc->bUseSubtype ){
+ nExtra += pFunc->pFExpr->x.pList->nExpr;
+ }
pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra);
if( !pFunc->bOBUnique && pParse->nErr==0 ){
pKeyInfo->nKeyField++;
}
sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
@@ -6694,20 +6696,21 @@
for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
pList = pF->pFExpr->x.pList;
if( pF->iOBTab>=0 ){
- /* For an ORDER BY aggregate, calls to OP_AggStep where deferred and
- ** all content was stored in emphermal table pF->iOBTab. Extract that
- ** content now (in ORDER BY order) and make all calls to OP_AggStep
+ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs
+ ** were stored in emphermal table pF->iOBTab. Here, we extract those
+ ** inputs (in ORDER BY order) and make all calls to OP_AggStep
** before doing the OP_AggFinal call. */
int iTop; /* Start of loop for extracting columns */
int nArg; /* Number of columns to extract */
int nKey; /* Key columns to be skipped */
int regAgg; /* Extract into this array */
int j; /* Loop counter */
-
+
+ assert( pF->pFunc!=0 );
nArg = pList->nExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
if( pF->bOBPayload==0 ){
nKey = 0;
@@ -6720,10 +6723,19 @@
}
iTop = sqlite3VdbeAddOp1(v, OP_Rewind, pF->iOBTab); VdbeCoverage(v);
for(j=nArg-1; j>=0; j--){
sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j);
}
+ if( pF->bUseSubtype ){
+ int regSubtype = sqlite3GetTempReg(pParse);
+ int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0);
+ for(j=nArg-1; j>=0; j--){
+ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype);
+ sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j);
+ }
+ sqlite3ReleaseTempReg(pParse, regSubtype);
+ }
sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v);
sqlite3VdbeJumpHere(v, iTop);
@@ -6774,10 +6786,11 @@
int regAggSz = 0;
int regDistinct = 0;
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
assert( !IsWindowFunc(pF->pFExpr) );
+ assert( pF->pFunc!=0 );
pList = pF->pFExpr->x.pList;
if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){
Expr *pFilter = pF->pFExpr->y.pWin->pFilter;
if( pAggInfo->nAccumulator
&& (pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
@@ -6817,10 +6830,13 @@
if( !pF->bOBUnique ){
regAggSz++; /* One register for OP_Sequence */
}
if( pF->bOBPayload ){
regAggSz += nArg;
+ }
+ if( pF->bUseSubtype ){
+ regAggSz += nArg;
}
regAggSz++; /* One extra register to hold result of MakeRecord */
regAgg = sqlite3GetTempRange(pParse, regAggSz);
regDistinct = regAgg;
sqlite3ExprCodeExprList(pParse, pOBList, regAgg, 0, SQLITE_ECEL_DUP);
@@ -6830,10 +6846,18 @@
jj++;
}
if( pF->bOBPayload ){
regDistinct = regAgg+jj;
sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP);
+ jj += nArg;
+ }
+ if( pF->bUseSubtype ){
+ int kk;
+ int regBase = pF->bOBPayload ? regDistinct : regAgg;
+ for(kk=0; kknExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
regDistinct = regAgg;
@@ -7034,11 +7058,12 @@
}
/*
** Deallocate a single AggInfo object
*/
-static void agginfoFree(sqlite3 *db, AggInfo *p){
+static void agginfoFree(sqlite3 *db, void *pArg){
+ AggInfo *p = (AggInfo*)pArg;
sqlite3DbFree(db, p->aCol);
sqlite3DbFree(db, p->aFunc);
sqlite3DbFreeNN(db, p);
}
@@ -7108,11 +7133,11 @@
pSub->pPrior = 0;
pSub->pNext = 0;
pSub->selFlags |= SF_Aggregate;
pSub->selFlags &= ~SF_Compound;
pSub->nSelectRow = 0;
- sqlite3ExprListDelete(db, pSub->pEList);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList);
pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount;
pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm);
pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, pTerm, pSub);
if( pExpr==0 ){
@@ -7288,13 +7313,12 @@
TREETRACE(0x800,pParse,p, ("dropping superfluous ORDER BY:\n"));
if( sqlite3TreeTrace & 0x800 ){
sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY");
}
#endif
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- p->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ p->pOrderBy);
testcase( pParse->earlyCleanup );
p->pOrderBy = 0;
}
p->selFlags &= ~SF_Distinct;
p->selFlags |= SF_NoopOrderBy;
@@ -7482,13 +7506,12 @@
&& (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */
&& OptimizationEnabled(db, SQLITE_OmitOrderBy)
){
TREETRACE(0x800,pParse,p,
("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1));
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- pSub->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ pSub->pOrderBy);
pSub->pOrderBy = 0;
}
/* If the outer query contains a "complex" result set (that is,
** if the result set of the outer query uses functions or subqueries)
@@ -8013,12 +8036,11 @@
** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the
** SELECT statement.
*/
pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) );
if( pAggInfo ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))agginfoFree, pAggInfo);
+ sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo);
testcase( pParse->earlyCleanup );
}
if( db->mallocFailed ){
goto select_end;
}
Index: src/shell.c.in
==================================================================
--- src/shell.c.in
+++ src/shell.c.in
@@ -249,10 +249,11 @@
#endif
INCLUDE ../ext/consio/console_io.h
INCLUDE ../ext/consio/console_io.c
#ifndef SQLITE_SHELL_FIDDLE
+
/* From here onward, fgets() is redirected to the console_io library. */
# define fgets(b,n,f) fGetsUtf8(b,n,f)
/*
* Define macros for emitting output text in various ways:
* sputz(s, z) => emit 0-terminated string z to given stream s
@@ -273,10 +274,11 @@
# define oputz(z) oPutsUtf8(z)
# define oputf oPrintfUtf8
# define eputz(z) ePutsUtf8(z)
# define eputf ePrintfUtf8
# define oputb(buf,na) oPutbUtf8(buf,na)
+
#else
/* For Fiddle, all console handling and emit redirection is omitted. */
# define sputz(fp,z) fputs(z,fp)
# define sputf(fp,fmt, ...) fprintf(fp,fmt,__VA_ARGS__)
# define oputz(z) fputs(z,stdout)
@@ -356,11 +358,11 @@
static void endTimer(void){
if( enableTimer ){
sqlite3_int64 iEnd = timeOfDay();
struct rusage sEnd;
getrusage(RUSAGE_SELF, &sEnd);
- oputf("Run Time: real %.3f user %f sys %f\n",
+ sputf(stdout, "Run Time: real %.3f user %f sys %f\n",
(iEnd - iBegin)*0.001,
timeDiff(&sBegin.ru_utime, &sEnd.ru_utime),
timeDiff(&sBegin.ru_stime, &sEnd.ru_stime));
}
}
@@ -435,11 +437,11 @@
static void endTimer(void){
if( enableTimer && getProcessTimesAddr){
FILETIME ftCreation, ftExit, ftKernelEnd, ftUserEnd;
sqlite3_int64 ftWallEnd = timeOfDay();
getProcessTimesAddr(hProcess,&ftCreation,&ftExit,&ftKernelEnd,&ftUserEnd);
- oputf("Run Time: real %.3f user %f sys %f\n",
+ sputf(stdout, "Run Time: real %.3f user %f sys %f\n",
(ftWallEnd - ftWallBegin)*0.001,
timeDiff(&ftUserBegin, &ftUserEnd),
timeDiff(&ftKernelBegin, &ftKernelEnd));
}
}
@@ -732,18 +734,18 @@
** and is an ordinary file or a character stream source.
** Otherwise return 0.
*/
static FILE * openChrSource(const char *zFile){
#if defined(_WIN32) || defined(WIN32)
- struct _stat x = {0};
+ struct __stat64 x = {0};
# define STAT_CHR_SRC(mode) ((mode & (_S_IFCHR|_S_IFIFO|_S_IFREG))!=0)
/* On Windows, open first, then check the stream nature. This order
** is necessary because _stat() and sibs, when checking a named pipe,
** effectively break the pipe as its supplier sees it. */
FILE *rv = fopen(zFile, "rb");
if( rv==0 ) return 0;
- if( _fstat(_fileno(rv), &x) != 0
+ if( _fstat64(_fileno(rv), &x) != 0
|| !STAT_CHR_SRC(x.st_mode)){
fclose(rv);
rv = 0;
}
return rv;
@@ -1288,10 +1290,11 @@
u8 doXdgOpen; /* Invoke start/open/xdg-open in output_reset() */
u8 nEqpLevel; /* Depth of the EQP output graph */
u8 eTraceType; /* SHELL_TRACE_* value for type of trace */
u8 bSafeMode; /* True to prohibit unsafe operations */
u8 bSafeModePersist; /* The long-term value of bSafeMode */
+ u8 eRestoreState; /* See comments above doAutoDetectRestore() */
ColModeOpts cmOpts; /* Option values affecting columnar mode output */
unsigned statsOn; /* True to display memory stats before each finalize */
unsigned mEqpLines; /* Mask of vertical lines in the EQP output graph */
int inputNesting; /* Track nesting level of .read and other redirects */
int outCount; /* Revert to stdout when reaching zero */
@@ -5315,11 +5318,10 @@
sqlite3_open_v2(zDbFilename, &p->db,
SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|p->openFlags, 0);
break;
}
}
- globalDb = p->db;
if( p->db==0 || SQLITE_OK!=sqlite3_errcode(p->db) ){
eputf("Error: unable to open database \"%s\": %s\n",
zDbFilename, sqlite3_errmsg(p->db));
if( (openFlags & OPEN_DB_KEEPALIVE)==0 ){
exit(1);
@@ -5332,10 +5334,11 @@
}else{
eputf("Notice: using substitute in-memory database instead of \"%s\"\n",
zDbFilename);
}
}
+ globalDb = p->db;
sqlite3_db_config(p->db, SQLITE_DBCONFIG_STMT_SCANSTATUS, (int)0, (int*)0);
/* Reflect the use or absence of --unsafe-testing invocation. */
{
int testmode_on = ShellHasFlag(p,SHFLG_TestingMode);
@@ -6727,11 +6730,10 @@
eputz("Where sub-commands are:\n");
eputz(" fkey-indexes\n");
return SQLITE_ERROR;
}
-#if !defined SQLITE_OMIT_VIRTUALTABLE
static void shellPrepare(
sqlite3 *db,
int *pRc,
const char *zSql,
sqlite3_stmt **ppStmt
@@ -6746,16 +6748,12 @@
}
}
/*
** Create a prepared statement using printf-style arguments for the SQL.
-**
-** This routine is could be marked "static". But it is not always used,
-** depending on compile-time options. By omitting the "static", we avoid
-** nuisance compiler warnings about "defined but not used".
*/
-void shellPreparePrintf(
+static void shellPreparePrintf(
sqlite3 *db,
int *pRc,
sqlite3_stmt **ppStmt,
const char *zFmt,
...
@@ -6774,17 +6772,14 @@
sqlite3_free(z);
}
}
}
-/* Finalize the prepared statement created using shellPreparePrintf().
-**
-** This routine is could be marked "static". But it is not always used,
-** depending on compile-time options. By omitting the "static", we avoid
-** nuisance compiler warnings about "defined but not used".
+/*
+** Finalize the prepared statement created using shellPreparePrintf().
*/
-void shellFinalize(
+static void shellFinalize(
int *pRc,
sqlite3_stmt *pStmt
){
if( pStmt ){
sqlite3 *db = sqlite3_db_handle(pStmt);
@@ -6796,10 +6791,11 @@
*pRc = rc;
}
}
}
+#if !defined SQLITE_OMIT_VIRTUALTABLE
/* Reset the prepared statement created using shellPreparePrintf().
**
** This routine is could be marked "static". But it is not always used,
** depending on compile-time options. By omitting the "static", we avoid
** nuisance compiler warnings about "defined but not used".
@@ -7861,10 +7857,34 @@
sqlite3_close(*pDb);
*pDb = 0;
return zColsSpec;
}
}
+
+/*
+** Check if the sqlite_schema table contains one or more virtual tables. If
+** parameter zLike is not NULL, then it is an SQL expression that the
+** sqlite_schema row must also match. If one or more such rows are found,
+** print the following warning to the output:
+**
+** WARNING: Script requires that SQLITE_DBCONFIG_DEFENSIVE be disabled
+*/
+static int outputDumpWarning(ShellState *p, const char *zLike){
+ int rc = SQLITE_OK;
+ sqlite3_stmt *pStmt = 0;
+ shellPreparePrintf(p->db, &rc, &pStmt,
+ "SELECT 1 FROM sqlite_schema o WHERE "
+ "sql LIKE 'CREATE VIRTUAL TABLE%%' AND %s", zLike ? zLike : "true"
+ );
+ if( rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW ){
+ oputz("/* WARNING: "
+ "Script requires that SQLITE_DBCONFIG_DEFENSIVE be disabled */\n"
+ );
+ }
+ shellFinalize(&rc, pStmt);
+ return rc;
+}
/*
** If an input line begins with "." then invoke this routine to
** process that line.
**
@@ -8324,10 +8344,11 @@
}
}
open_db(p, 0);
+ outputDumpWarning(p, zLike);
if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
/* When playing back a "dump", the content might appear in an order
** which causes immediate foreign key constraints to be violated.
** So disable foreign-key constraint enforcement to prevent problems. */
oputz("PRAGMA foreign_keys=OFF;\n");
@@ -8352,11 +8373,12 @@
sqlite3_free(zSql);
if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
zSql = sqlite3_mprintf(
"SELECT sql FROM sqlite_schema AS o "
"WHERE (%s) AND sql NOT NULL"
- " AND type IN ('index','trigger','view')",
+ " AND type IN ('index','trigger','view') "
+ "ORDER BY type COLLATE NOCASE DESC",
zLike
);
run_table_dump_query(p, zSql);
sqlite3_free(zSql);
}
@@ -10752,10 +10774,11 @@
{"extra_schema_checks",SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS,0,"BOOLEAN" },
/*{"fault_install", SQLITE_TESTCTRL_FAULT_INSTALL, 1,"" },*/
{"fk_no_action", SQLITE_TESTCTRL_FK_NO_ACTION, 0, "BOOLEAN" },
{"imposter", SQLITE_TESTCTRL_IMPOSTER,1,"SCHEMA ON/OFF ROOTPAGE"},
{"internal_functions", SQLITE_TESTCTRL_INTERNAL_FUNCTIONS,0,"" },
+ {"json_selfcheck", SQLITE_TESTCTRL_JSON_SELFCHECK ,0,"BOOLEAN" },
{"localtime_fault", SQLITE_TESTCTRL_LOCALTIME_FAULT,0,"BOOLEAN" },
{"never_corrupt", SQLITE_TESTCTRL_NEVER_CORRUPT,1, "BOOLEAN" },
{"optimizations", SQLITE_TESTCTRL_OPTIMIZATIONS,0,"DISABLE-MASK" },
#ifdef YYCOVERAGE
{"parser_coverage", SQLITE_TESTCTRL_PARSER_COVERAGE,0,"" },
@@ -10970,10 +10993,20 @@
int opt = (unsigned int)integerValue(azArg[2]);
rc2 = sqlite3_test_control(testctrl, p->db, opt);
isOk = 3;
}
break;
+ case SQLITE_TESTCTRL_JSON_SELFCHECK:
+ if( nArg==2 ){
+ rc2 = -1;
+ isOk = 1;
+ }else{
+ rc2 = booleanValue(azArg[2]);
+ isOk = 3;
+ }
+ sqlite3_test_control(testctrl, &rc2);
+ break;
}
}
if( isOk==0 && iCtrl>=0 ){
oputf("Usage: .testctrl %s %s\n", zCmd,aCtrl[iCtrl].zUsage);
rc = 1;
@@ -11375,10 +11408,92 @@
zSql[nSql+1] = 0;
rc = sqlite3_complete(zSql);
zSql[nSql] = 0;
return rc;
}
+
+/*
+** This function is called after processing each line of SQL in the
+** runOneSqlLine() function. Its purpose is to detect scenarios where
+** defensive mode should be automatically turned off. Specifically, when
+**
+** 1. The first line of input is "PRAGMA foreign_keys=OFF;",
+** 2. The second line of input is "BEGIN TRANSACTION;",
+** 3. The database is empty, and
+** 4. The shell is not running in --safe mode.
+**
+** The implementation uses the ShellState.eRestoreState to maintain state:
+**
+** 0: Have not seen any SQL.
+** 1: Have seen "PRAGMA foreign_keys=OFF;".
+** 2-6: Currently running .dump transaction. If the "2" bit is set,
+** disable DEFENSIVE when done. If "4" is set, disable DQS_DDL.
+** 7: Nothing left to do. This function becomes a no-op.
+*/
+static int doAutoDetectRestore(ShellState *p, const char *zSql){
+ int rc = SQLITE_OK;
+
+ if( p->eRestoreState<7 ){
+ switch( p->eRestoreState ){
+ case 0: {
+ const char *zExpect = "PRAGMA foreign_keys=OFF;";
+ assert( strlen(zExpect)==24 );
+ if( p->bSafeMode==0 && memcmp(zSql, zExpect, 25)==0 ){
+ p->eRestoreState = 1;
+ }else{
+ p->eRestoreState = 7;
+ }
+ break;
+ };
+
+ case 1: {
+ int bIsDump = 0;
+ const char *zExpect = "BEGIN TRANSACTION;";
+ assert( strlen(zExpect)==18 );
+ if( memcmp(zSql, zExpect, 19)==0 ){
+ /* Now check if the database is empty. */
+ const char *zQuery = "SELECT 1 FROM sqlite_schema LIMIT 1";
+ sqlite3_stmt *pStmt = 0;
+
+ bIsDump = 1;
+ shellPrepare(p->db, &rc, zQuery, &pStmt);
+ if( rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW ){
+ bIsDump = 0;
+ }
+ shellFinalize(&rc, pStmt);
+ }
+ if( bIsDump && rc==SQLITE_OK ){
+ int bDefense = 0;
+ int bDqsDdl = 0;
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DEFENSIVE, -1, &bDefense);
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DQS_DDL, -1, &bDqsDdl);
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DEFENSIVE, 0, 0);
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DQS_DDL, 1, 0);
+ p->eRestoreState = (bDefense ? 2 : 0) + (bDqsDdl ? 4 : 0);
+ }else{
+ p->eRestoreState = 7;
+ }
+ break;
+ }
+
+ default: {
+ if( sqlite3_get_autocommit(p->db) ){
+ if( (p->eRestoreState & 2) ){
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DEFENSIVE, 1, 0);
+ }
+ if( (p->eRestoreState & 4) ){
+ sqlite3_db_config(p->db, SQLITE_DBCONFIG_DQS_DDL, 0, 0);
+ }
+ p->eRestoreState = 7;
+ }
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
/*
** Run a single line of SQL. Return the number of errors.
*/
static int runOneSqlLine(ShellState *p, char *zSql, FILE *in, int startline){
@@ -11423,10 +11538,12 @@
sqlite3_snprintf(sizeof(zLineBuf), zLineBuf,
"changes: %lld total_changes: %lld",
sqlite3_changes64(p->db), sqlite3_total_changes64(p->db));
oputf("%s\n", zLineBuf);
}
+
+ if( doAutoDetectRestore(p, zSql) ) return 1;
return 0;
}
static void echo_group_input(ShellState *p, const char *zDo){
if( ShellHasFlag(p, SHFLG_Echo) ) oputf("%s\n", zDo);
@@ -11775,11 +11892,10 @@
" -multiplex enable the multiplexor VFS\n"
#endif
" -newline SEP set output row separator. Default: '\\n'\n"
" -nofollow refuse to open symbolic links to database files\n"
" -nonce STRING set the safe-mode escape nonce\n"
- " -no-rowid-in-view Disable rowid-in-view using sqlite3_config()\n"
" -nullvalue TEXT set text string for NULL values. Default ''\n"
" -pagecache SIZE N use N slots of SZ bytes each for page cache memory\n"
" -pcachetrace trace all page cache operations\n"
" -quote set output mode to 'quote'\n"
" -readonly open the database read-only\n"
@@ -11808,11 +11924,11 @@
if( showDetail ){
eputf("OPTIONS include:\n%s", zOptions);
}else{
eputz("Use the -help option for additional information\n");
}
- exit(1);
+ exit(0);
}
/*
** Internal check: Verify that the SQLite is uninitialized. Print a
** error message if it is initialized.
@@ -11857,18 +11973,18 @@
GetConsoleScreenBufferInfo(out, &defaultScreenInfo);
SetConsoleTextAttribute(out,
FOREGROUND_RED|FOREGROUND_INTENSITY
);
#endif
- oputz(zText);
+ sputz(stdout, zText);
#if !SQLITE_OS_WINRT
SetConsoleTextAttribute(out, defaultScreenInfo.wAttributes);
#endif
}
#else
static void printBold(const char *zText){
- oputf("\033[1m%s\033[0m", zText);
+ sputf(stdout, "\033[1m%s\033[0m", zText);
}
#endif
/*
** Get the argument to an --option. Throw an error and die if no argument
@@ -12058,26 +12174,18 @@
){
(void)cmdline_option_value(argc, argv, ++i);
}else if( cli_strcmp(z,"-init")==0 ){
zInitFile = cmdline_option_value(argc, argv, ++i);
}else if( cli_strcmp(z,"-interactive")==0 ){
- /* Need to check for interactive override here to so that it can
- ** affect console setup (for Windows only) and testing thereof.
- */
- stdin_is_interactive = 1;
}else if( cli_strcmp(z,"-batch")==0 ){
/* Need to check for batch mode here to so we can avoid printing
** informational messages (like from process_sqliterc) before
** we do the actual processing of arguments later in a second pass.
*/
stdin_is_interactive = 0;
}else if( cli_strcmp(z,"-utf8")==0 ){
}else if( cli_strcmp(z,"-no-utf8")==0 ){
- }else if( cli_strcmp(z,"-no-rowid-in-view")==0 ){
- int val = 0;
- sqlite3_config(SQLITE_CONFIG_ROWID_IN_VIEW, &val);
- assert( val==0 );
}else if( cli_strcmp(z,"-heap")==0 ){
#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5)
const char *zSize;
sqlite3_int64 szHeap;
@@ -12335,23 +12443,24 @@
*/
ShellSetFlag(&data, SHFLG_Backslash);
}else if( cli_strcmp(z,"-bail")==0 ){
/* No-op. The bail_on_error flag should already be set. */
}else if( cli_strcmp(z,"-version")==0 ){
- oputf("%s %s (%d-bit)\n", sqlite3_libversion(), sqlite3_sourceid(),
- 8*(int)sizeof(char*));
+ sputf(stdout, "%s %s (%d-bit)\n",
+ sqlite3_libversion(), sqlite3_sourceid(), 8*(int)sizeof(char*));
return 0;
}else if( cli_strcmp(z,"-interactive")==0 ){
- /* already handled */
+ /* Need to check for interactive override here to so that it can
+ ** affect console setup (for Windows only) and testing thereof.
+ */
+ stdin_is_interactive = 1;
}else if( cli_strcmp(z,"-batch")==0 ){
/* already handled */
}else if( cli_strcmp(z,"-utf8")==0 ){
/* already handled */
}else if( cli_strcmp(z,"-no-utf8")==0 ){
/* already handled */
- }else if( cli_strcmp(z,"-no-rowid-in-view")==0 ){
- /* already handled */
}else if( cli_strcmp(z,"-heap")==0 ){
i++;
}else if( cli_strcmp(z,"-pagecache")==0 ){
i+=2;
}else if( cli_strcmp(z,"-lookaside")==0 ){
@@ -12470,17 +12579,17 @@
#if CIO_WIN_WC_XLATE
# define SHELL_CIO_CHAR_SET (stdout_is_console? " (UTF-16 console I/O)" : "")
#else
# define SHELL_CIO_CHAR_SET ""
#endif
- oputf("SQLite version %s %.19s%s\n" /*extra-version-info*/
+ sputf(stdout, "SQLite version %s %.19s%s\n" /*extra-version-info*/
"Enter \".help\" for usage hints.\n",
sqlite3_libversion(), sqlite3_sourceid(), SHELL_CIO_CHAR_SET);
if( warnInmemoryDb ){
- oputz("Connected to a ");
+ sputz(stdout, "Connected to a ");
printBold("transient in-memory database");
- oputz(".\nUse \".open FILENAME\" to reopen on a"
+ sputz(stdout, ".\nUse \".open FILENAME\" to reopen on a"
" persistent database.\n");
}
zHistory = getenv("SQLITE_HISTORY");
if( zHistory ){
zHistory = strdup(zHistory);
Index: src/sqlite.h.in
==================================================================
--- src/sqlite.h.in
+++ src/sqlite.h.in
@@ -2139,26 +2139,10 @@
** size can be adjusted up or down for individual databases using the
** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this
** configuration setting is never used, then the default maximum is determined
** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
** compile-time option is not set, then the default maximum is 1073741824.
-**
-** [[SQLITE_CONFIG_ROWID_IN_VIEW]]
-**
SQLITE_CONFIG_ROWID_IN_VIEW
-**
The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability
-** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is
-** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability
-** defaults to on. This configuration option queries the current setting or
-** changes the setting to off or on. The argument is a pointer to an integer.
-** If that integer initially holds a value of 1, then the ability for VIEWs to
-** have ROWIDs is activated. If the integer initially holds zero, then the
-** ability is deactivated. Any other initial value for the integer leaves the
-** setting unchanged. After changes, if any, the integer is written with
-** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite
-** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and
-** recommended case) then the integer is always filled with zero, regardless
-** if its initial value.
**
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */
#define SQLITE_CONFIG_SERIALIZED 3 /* nil */
@@ -2186,11 +2170,10 @@
#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
-#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */
/*
** CAPI3REF: Database Connection Configuration Options
**
** These constants are the available integer configuration options that
@@ -3969,19 +3952,21 @@
**
sqlite3_errmsg16()
**
sqlite3_error_offset()
**
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** text that describes the error, as either UTF-8 or UTF-16 respectively,
+** or NULL if no error message is available.
** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
+** ^The sqlite3_errstr(E) interface returns the English-language text
+** that describes the [result code] E, as UTF-8, or NULL if E is not an
+** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
** ^If the most recent error references a specific token in the input
** SQL, the sqlite3_error_offset() interface returns the byte offset
@@ -8052,13 +8037,15 @@
** can enter.)^ If the same thread tries to enter any mutex other
** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. In most cases the SQLite core only uses
+** sqlite3_mutex_try() as an optimization, so this is acceptable
+** behavior. The exceptions are unix builds that set the
+** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
+** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
** is undefined if the mutex is not currently entered by the
** calling thread or is not currently allocated.
@@ -8313,10 +8300,11 @@
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
#define SQLITE_TESTCTRL_PENDING_BYTE 11
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
Index: src/sqliteInt.h
==================================================================
--- src/sqliteInt.h
+++ src/sqliteInt.h
@@ -325,10 +325,23 @@
#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH)
# define SQLITE_USE_SEH 1
#else
# undef SQLITE_USE_SEH
#endif
+
+/*
+** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly
+** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0
+*/
+#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1
+ /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */
+# undef SQLITE_DIRECT_OVERFLOW_READ
+#else
+ /* In all other cases, enable */
+# define SQLITE_DIRECT_OVERFLOW_READ 1
+#endif
+
/*
** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
** 0 means mutexes are permanently disable and the library is never
** threadsafe. 1 means the library is serialized which is the highest
@@ -871,11 +884,11 @@
#ifndef SQLITE_PTRSIZE
# if defined(__SIZEOF_POINTER__)
# define SQLITE_PTRSIZE __SIZEOF_POINTER__
# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(_M_ARM) || defined(__arm__) || defined(__x86) || \
- (defined(__APPLE__) && defined(__POWERPC__)) || \
+ (defined(__APPLE__) && defined(__ppc__)) || \
(defined(__TOS_AIX__) && !defined(__64BIT__))
# define SQLITE_PTRSIZE 4
# else
# define SQLITE_PTRSIZE 8
# endif
@@ -1581,10 +1594,14 @@
struct FuncDefHash {
FuncDef *a[SQLITE_FUNC_HASH_SZ]; /* Hash table for functions */
};
#define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ)
+#if defined(SQLITE_USER_AUTHENTICATION)
+# warning "The SQLITE_USER_AUTHENTICATION extension is deprecated. \
+ See ext/userauth/user-auth.txt for details."
+#endif
#ifdef SQLITE_USER_AUTHENTICATION
/*
** Information held in the "sqlite3" database connection object and used
** to manage user authentication.
*/
@@ -2112,15 +2129,15 @@
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \
SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
#define MFUNCTION(zName, nArg, xPtr, xFunc) \
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
xPtr, 0, xFunc, 0, 0, 0, #zName, {0} }
-#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, iArg, xFunc) \
+#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\
SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\
((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+ SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} }
#define INLINE_FUNC(zName, nArg, iArg, mFlags) \
{nArg, SQLITE_FUNC_BUILTIN|\
SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \
SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} }
#define TEST_FUNC(zName, nArg, iArg, mFlags) \
@@ -2514,19 +2531,10 @@
/* Does the table have a rowid */
#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0)
#define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0)
-/* Macro is true if the SQLITE_ALLOW_ROWID_IN_VIEW (mis-)feature is
-** available. By default, this macro is false
-*/
-#ifndef SQLITE_ALLOW_ROWID_IN_VIEW
-# define ViewCanHaveRowid 0
-#else
-# define ViewCanHaveRowid (sqlite3Config.mNoVisibleRowid==0)
-#endif
-
/*
** Each foreign key constraint is an instance of the following structure.
**
** A foreign key is associated with two tables. The "from" table is
** the table that contains the REFERENCES clause that creates the foreign
@@ -2760,10 +2768,11 @@
unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */
unsigned isResized:1; /* True if resizeIndexObject() has been called */
unsigned isCovering:1; /* True if this is a covering index */
unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */
+ unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */
unsigned bNoQuery:1; /* Do not use this index to optimize queries */
unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */
unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */
unsigned bHasExpr:1; /* Index contains an expression, either a literal
** expression, or a reference to a VIRTUAL column */
@@ -2873,10 +2882,11 @@
int iDistinct; /* Ephemeral table used to enforce DISTINCT */
int iDistAddr; /* Address of OP_OpenEphemeral */
int iOBTab; /* Ephemeral table to implement ORDER BY */
u8 bOBPayload; /* iOBTab has payload columns separate from key */
u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */
+ u8 bUseSubtype; /* Transfer subtype info through sorter */
} *aFunc;
int nFunc; /* Number of entries in aFunc[] */
u32 selId; /* Select to which this AggInfo belongs */
#ifdef SQLITE_DEBUG
Select *pSelect; /* SELECT statement that this AggInfo supports */
@@ -3406,10 +3416,11 @@
} uNC;
NameContext *pNext; /* Next outer name context. NULL for outermost */
int nRef; /* Number of names resolved by this context */
int nNcErr; /* Number of errors encountered while resolving names */
int ncFlags; /* Zero or more NC_* flags defined below */
+ u32 nNestedSelect; /* Number of nested selects using this NC */
Select *pWinSelect; /* SELECT statement for any window functions */
};
/*
** Allowed values for the NameContext, ncFlags field.
@@ -3462,11 +3473,10 @@
Expr *pUpsertTargetWhere; /* WHERE clause for partial index targets */
ExprList *pUpsertSet; /* The SET clause from an ON CONFLICT UPDATE */
Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */
Upsert *pNextUpsert; /* Next ON CONFLICT clause in the list */
u8 isDoUpdate; /* True for DO UPDATE. False for DO NOTHING */
- u8 isDup; /* True if 2nd or later with same pUpsertIdx */
/* Above this point is the parse tree for the ON CONFLICT clauses.
** The next group of fields stores intermediate data. */
void *pToFree; /* Free memory when deleting the Upsert object */
/* All fields above are owned by the Upsert object and must be freed
** when the Upsert is destroyed. The fields below are used to transfer
@@ -4123,10 +4133,13 @@
** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than
** sqlite3_free()
**
** 3. Make a (read-only) copy of a read-only RCStr string using
** sqlite3RCStrRef().
+**
+** "String" is in the name, but an RCStr object can also be used to hold
+** binary data.
*/
struct RCStr {
u64 nRCRef; /* Number of references */
/* Total structure size should be a multiple of 8 bytes for alignment */
};
@@ -4181,10 +4194,13 @@
u8 bOpenUri; /* True to interpret filenames as URIs */
u8 bUseCis; /* Use covering indices for full-scans */
u8 bSmallMalloc; /* Avoid large memory allocations if true */
u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */
u8 bUseLongDouble; /* Make use of long double */
+#ifdef SQLITE_DEBUG
+ u8 bJsonSelfcheck; /* Double-check JSON parsing */
+#endif
int mxStrlen; /* Maximum string length */
int neverCorrupt; /* Database is always well-formed */
int szLookaside; /* Default lookaside buffer size */
int nLookaside; /* Default lookaside buffer count */
int nStmtSpill; /* Stmt-journal spill-to-disk threshold */
@@ -4228,15 +4244,10 @@
sqlite3_int64 mxMemdbSize; /* Default max memdb size */
#endif
#ifndef SQLITE_UNTESTABLE
int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
#endif
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- u32 mNoVisibleRowid; /* TF_NoVisibleRowid if the ROWID_IN_VIEW
- ** feature is disabled. 0 if rowids can
- ** occur in views. */
-#endif
int bLocaltimeFault; /* True to fail localtime() calls */
int (*xAltLocaltime)(const void*,void*); /* Alternative localtime() routine */
int iOnceResetThreshold; /* When to reset OP_Once counters */
u32 szSorterRef; /* Min size in bytes to use sorter-refs */
unsigned int iPrngSeed; /* Alternative fixed seed for the PRNG */
@@ -4688,17 +4699,14 @@
#ifndef SQLITE_OMIT_FLOATING_POINT
# define EXP754 (((u64)0x7ff)<<52)
# define MAN754 ((((u64)1)<<52)-1)
# define IsNaN(X) (((X)&EXP754)==EXP754 && ((X)&MAN754)!=0)
-# define IsOvfl(X) (((X)&EXP754)==EXP754)
int sqlite3IsNaN(double);
- int sqlite3IsOverflow(double);
#else
-# define IsNaN(X) 0
-# define sqlite3IsNaN(X) 0
-# define sqlite3IsOVerflow(X) 0
+# define IsNaN(X) 0
+# define sqlite3IsNaN(X) 0
#endif
/*
** An instance of the following structure holds information about SQL
** functions arguments that are the parameters to the printf() function.
@@ -4815,19 +4823,21 @@
void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*);
void sqlite3ExprOrderByAggregateError(Parse*,Expr*);
void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*);
void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
void sqlite3ExprDelete(sqlite3*, Expr*);
+void sqlite3ExprDeleteGeneric(sqlite3*,void*);
void sqlite3ExprDeferredDelete(Parse*, Expr*);
void sqlite3ExprUnmapAndDelete(Parse*, Expr*);
ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*);
Select *sqlite3ExprListToValues(Parse*, int, ExprList*);
void sqlite3ExprListSetSortOrder(ExprList*,int,int);
void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int);
void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*);
void sqlite3ExprListDelete(sqlite3*, ExprList*);
+void sqlite3ExprListDeleteGeneric(sqlite3*,void*);
u32 sqlite3ExprListFlags(const ExprList*);
int sqlite3IndexHasDuplicateRootPage(Index*);
int sqlite3Init(sqlite3*, char**);
int sqlite3InitCallback(void*, int, char**, char**);
int sqlite3InitOne(sqlite3*, int, char**, u32);
@@ -4914,10 +4924,11 @@
int sqlite3DbMaskAllZero(yDbMask);
#endif
void sqlite3DropTable(Parse*, SrcList*, int, int);
void sqlite3CodeDropTable(Parse*, Table*, int, int);
void sqlite3DeleteTable(sqlite3*, Table*);
+void sqlite3DeleteTableGeneric(sqlite3*, void*);
void sqlite3FreeIndex(sqlite3*, Index*);
#ifndef SQLITE_OMIT_AUTOINCREMENT
void sqlite3AutoincrementBegin(Parse *pParse);
void sqlite3AutoincrementEnd(Parse *pParse);
#else
@@ -4950,10 +4961,11 @@
void sqlite3DropIndex(Parse*, SrcList*, int);
int sqlite3Select(Parse*, Select*, SelectDest*);
Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
Expr*,ExprList*,u32,Expr*);
void sqlite3SelectDelete(sqlite3*, Select*);
+void sqlite3SelectDeleteGeneric(sqlite3*,void*);
Table *sqlite3SrcListLookup(Parse*, SrcList*);
int sqlite3IsReadOnly(Parse*, Table*, Trigger*);
void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int);
#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY)
Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*);
@@ -5176,10 +5188,11 @@
#ifndef SQLITE_OMIT_UTF16
int sqlite3Utf16ByteLen(const void *pData, int nChar);
#endif
int sqlite3Utf8CharLen(const char *pData, int nByte);
u32 sqlite3Utf8Read(const u8**);
+int sqlite3Utf8ReadLimited(const u8*, int, u32*);
LogEst sqlite3LogEst(u64);
LogEst sqlite3LogEstAdd(LogEst,LogEst);
LogEst sqlite3LogEstFromDouble(double);
u64 sqlite3LogEstToInt(LogEst);
VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int);
@@ -5522,10 +5535,11 @@
#ifndef SQLITE_OMIT_CTE
Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8);
void sqlite3CteDelete(sqlite3*,Cte*);
With *sqlite3WithAdd(Parse*,With*,Cte*);
void sqlite3WithDelete(sqlite3*,With*);
+ void sqlite3WithDeleteGeneric(sqlite3*,void*);
With *sqlite3WithPush(Parse*, With*, u8);
#else
# define sqlite3CteNew(P,T,E,S) ((void*)0)
# define sqlite3CteDelete(D,C)
# define sqlite3CteWithAdd(P,W,C) ((void*)0)
@@ -5534,11 +5548,11 @@
#endif
#ifndef SQLITE_OMIT_UPSERT
Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*);
void sqlite3UpsertDelete(sqlite3*,Upsert*);
Upsert *sqlite3UpsertDup(sqlite3*,Upsert*);
- int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*,Upsert*);
+ int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*);
void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int);
Upsert *sqlite3UpsertOfIndex(Upsert*,Index*);
int sqlite3UpsertNextIsIPK(Upsert*);
#else
#define sqlite3UpsertNew(u,v,w,x,y,z) ((Upsert*)0)
Index: src/sqliteLimit.h
==================================================================
--- src/sqliteLimit.h
+++ src/sqliteLimit.h
@@ -185,11 +185,11 @@
** This is really just the default value for the max_page_count pragma.
** This value can be lowered (or raised) at run-time using that the
** max_page_count macro.
*/
#ifndef SQLITE_MAX_PAGE_COUNT
-# define SQLITE_MAX_PAGE_COUNT 1073741823
+# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */
#endif
/*
** Maximum length (in bytes) of the pattern in a LIKE or GLOB
** operator.
Index: src/status.c
==================================================================
--- src/status.c
+++ src/status.c
@@ -360,11 +360,11 @@
/* no break */ deliberate_fall_through
case SQLITE_DBSTATUS_CACHE_HIT:
case SQLITE_DBSTATUS_CACHE_MISS:
case SQLITE_DBSTATUS_CACHE_WRITE:{
int i;
- int nRet = 0;
+ u64 nRet = 0;
assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 );
assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 );
for(i=0; inDb; i++){
if( db->aDb[i].pBt ){
@@ -373,11 +373,11 @@
}
}
*pHighwater = 0; /* IMP: R-42420-56072 */
/* IMP: R-54100-20147 */
/* IMP: R-29431-39229 */
- *pCurrent = nRet;
+ *pCurrent = (int)nRet & 0x7fffffff;
break;
}
/* Set *pCurrent to non-zero if there are unresolved deferred foreign
** key constraints. Set *pCurrent to zero if all foreign key constraints
Index: src/test1.c
==================================================================
--- src/test1.c
+++ src/test1.c
@@ -988,10 +988,43 @@
){
sqlite3_int64 v = sqlite3_value_int64(argv[0]);
sqlite3_result_int64(context, v);
sqlite3_test_control(SQLITE_TESTCTRL_RESULT_INTREAL, context);
}
+
+/*
+** These SQL functions attempt to return a value (their first argument)
+** that has been modified to have multiple datatypes. For example both
+** TEXT and INTEGER.
+*/
+static void addTextTypeFunction(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ (void)sqlite3_value_text(argv[0]);
+ (void)argc;
+ sqlite3_result_value(context, argv[0]);
+}
+static void addIntTypeFunction(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ (void)sqlite3_value_int64(argv[0]);
+ (void)argc;
+ sqlite3_result_value(context, argv[0]);
+}
+static void addRealTypeFunction(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ (void)sqlite3_value_double(argv[0]);
+ (void)argc;
+ sqlite3_result_value(context, argv[0]);
+}
/*
** SQL function: strtod(X)
**
** Use the C-library strtod() function to convert string X into a double.
@@ -1100,10 +1133,26 @@
*/
if( rc==SQLITE_OK ){
rc = sqlite3_create_function(db, "intreal", 1, SQLITE_UTF8,
0, intrealFunction, 0, 0);
}
+
+ /* The add_text_type(), add_int_type(), and add_real_type() functions
+ ** attempt to return a value that has multiple datatypes.
+ */
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "add_text_type", 1, SQLITE_UTF8,
+ 0, addTextTypeFunction, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "add_int_type", 1, SQLITE_UTF8,
+ 0, addIntTypeFunction, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "add_real_type", 1, SQLITE_UTF8,
+ 0, addRealTypeFunction, 0, 0);
+ }
/* Functions strtod() and dtostr() work as in the shell. These routines
** use the standard C library to convert between floating point and
** text. This is used to compare SQLite's internal conversion routines
** against the standard library conversion routines.
@@ -8118,10 +8167,11 @@
extern int sqlite3_percentile_init(sqlite3*,char**,const sqlite3_api_routines*);
#ifndef SQLITE_OMIT_VIRTUALTABLE
extern int sqlite3_prefixes_init(sqlite3*,char**,const sqlite3_api_routines*);
#endif
extern int sqlite3_qpvtab_init(sqlite3*,char**,const sqlite3_api_routines*);
+ extern int sqlite3_randomjson_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_regexp_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_remember_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_series_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_spellfix_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_totype_init(sqlite3*,char**,const sqlite3_api_routines*);
@@ -8150,10 +8200,11 @@
{ "percentile", sqlite3_percentile_init },
#ifndef SQLITE_OMIT_VIRTUALTABLE
{ "prefixes", sqlite3_prefixes_init },
#endif
{ "qpvtab", sqlite3_qpvtab_init },
+ { "randomjson", sqlite3_randomjson_init },
{ "regexp", sqlite3_regexp_init },
{ "remember", sqlite3_remember_init },
{ "series", sqlite3_series_init },
{ "spellfix", sqlite3_spellfix_init },
{ "totype", sqlite3_totype_init },
Index: src/test_config.c
==================================================================
--- src/test_config.c
+++ src/test_config.c
@@ -57,18 +57,10 @@
Tcl_SetVar2(interp, "sqlite_options", "rowid32", "1", TCL_GLOBAL_ONLY);
#else
Tcl_SetVar2(interp, "sqlite_options", "rowid32", "0", TCL_GLOBAL_ONLY);
#endif
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- Tcl_SetVar2(
- interp, "sqlite_options", "allow_rowid_in_view", "1", TCL_GLOBAL_ONLY);
-#else
- Tcl_SetVar2(
- interp, "sqlite_options", "allow_rowid_in_view", "0", TCL_GLOBAL_ONLY);
-#endif
-
#ifdef SQLITE_CASE_SENSITIVE_LIKE
Tcl_SetVar2(interp, "sqlite_options","casesensitivelike","1",TCL_GLOBAL_ONLY);
#else
Tcl_SetVar2(interp, "sqlite_options","casesensitivelike","0",TCL_GLOBAL_ONLY);
#endif
Index: src/test_func.c
==================================================================
--- src/test_func.c
+++ src/test_func.c
@@ -692,11 +692,12 @@
{ "real2hex", 1, SQLITE_UTF8, real2hex},
{ "test_decode", 1, SQLITE_UTF8, test_decode},
{ "test_extract", 2, SQLITE_UTF8, test_extract},
{ "test_zeroblob", 1, SQLITE_UTF8|SQLITE_DETERMINISTIC, test_zeroblob},
{ "test_getsubtype", 1, SQLITE_UTF8, test_getsubtype},
- { "test_setsubtype", 2, SQLITE_UTF8, test_setsubtype},
+ { "test_setsubtype", 2, SQLITE_UTF8|SQLITE_RESULT_SUBTYPE,
+ test_setsubtype},
{ "test_frombind", -1, SQLITE_UTF8, test_frombind},
};
int i;
for(i=0; ipLeft;
assert( ExprUseXList(pExpr) );
assert( pExpr->x.pList->nExpr==2 );
pY = pExpr->x.pList->a[0].pExpr;
pZ = pExpr->x.pList->a[1].pExpr;
- sqlite3TreeViewLine(pView, "BETWEEN");
+ sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs);
sqlite3TreeViewExpr(pView, pX, 1);
sqlite3TreeViewExpr(pView, pY, 1);
sqlite3TreeViewExpr(pView, pZ, 0);
break;
}
Index: src/upsert.c
==================================================================
--- src/upsert.c
+++ src/upsert.c
@@ -88,12 +88,11 @@
** is wrong.
*/
int sqlite3UpsertAnalyzeTarget(
Parse *pParse, /* The parsing context */
SrcList *pTabList, /* Table into which we are inserting */
- Upsert *pUpsert, /* The ON CONFLICT clauses */
- Upsert *pAll /* Complete list of all ON CONFLICT clauses */
+ Upsert *pUpsert /* The ON CONFLICT clauses */
){
Table *pTab; /* That table into which we are inserting */
int rc; /* Result code */
int iCursor; /* Cursor used by pTab */
Index *pIdx; /* One of the indexes of pTab */
@@ -192,18 +191,10 @@
/* Column ii of the index did not match any term of the conflict target.
** Continue the search with the next index. */
continue;
}
pUpsert->pUpsertIdx = pIdx;
- if( sqlite3UpsertOfIndex(pAll,pIdx)!=pUpsert ){
- /* Really this should be an error. The isDup ON CONFLICT clause will
- ** never fire. But this problem was not discovered until three years
- ** after multi-CONFLICT upsert was added, and so we silently ignore
- ** the problem to prevent breaking applications that might actually
- ** have redundant ON CONFLICT clauses. */
- pUpsert->isDup = 1;
- }
break;
}
if( pUpsert->pUpsertIdx==0 ){
char zWhich[16];
if( nClause==0 && pUpsert->pNextUpsert==0 ){
@@ -226,17 +217,13 @@
*/
int sqlite3UpsertNextIsIPK(Upsert *pUpsert){
Upsert *pNext;
if( NEVER(pUpsert==0) ) return 0;
pNext = pUpsert->pNextUpsert;
- while( 1 /*exit-by-return*/ ){
- if( pNext==0 ) return 1;
- if( pNext->pUpsertTarget==0 ) return 1;
- if( pNext->pUpsertIdx==0 ) return 1;
- if( !pNext->isDup ) return 0;
- pNext = pNext->pNextUpsert;
- }
+ if( pNext==0 ) return 1;
+ if( pNext->pUpsertTarget==0 ) return 1;
+ if( pNext->pUpsertIdx==0 ) return 1;
return 0;
}
/*
** Given the list of ON CONFLICT clauses described by pUpsert, and
Index: src/utf.c
==================================================================
--- src/utf.c
+++ src/utf.c
@@ -162,11 +162,42 @@
|| (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; }
}
return c;
}
-
+/*
+** Read a single UTF8 character out of buffer z[], but reading no
+** more than n characters from the buffer. z[] is not zero-terminated.
+**
+** Return the number of bytes used to construct the character.
+**
+** Invalid UTF8 might generate a strange result. No effort is made
+** to detect invalid UTF8.
+**
+** At most 4 bytes will be read out of z[]. The return value will always
+** be between 1 and 4.
+*/
+int sqlite3Utf8ReadLimited(
+ const u8 *z,
+ int n,
+ u32 *piOut
+){
+ u32 c;
+ int i = 1;
+ assert( n>0 );
+ c = z[0];
+ if( c>=0xc0 ){
+ c = sqlite3Utf8Trans1[c-0xc0];
+ if( n>4 ) n = 4;
+ while( ip1];
memAboutToChange(p, pIn1);
sqlite3VdbeMemIntegerify(pIn1);
- pIn1->u.i += pOp->p2;
+ *(u64*)&pIn1->u.i += (u64)pOp->p2;
break;
}
/* Opcode: MustBeInt P1 P2 * * *
**
@@ -2299,20 +2299,24 @@
if( (flags3 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn3,0);
}
}
}else if( affinity==SQLITE_AFF_TEXT && ((flags1 | flags3) & MEM_Str)!=0 ){
- if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
+ if( (flags1 & MEM_Str)!=0 ){
+ pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal);
+ }else if( (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
testcase( pIn1->flags & MEM_Int );
testcase( pIn1->flags & MEM_Real );
testcase( pIn1->flags & MEM_IntReal );
sqlite3VdbeMemStringify(pIn1, encoding, 1);
testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) );
flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask);
if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str;
}
- if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
+ if( (flags3 & MEM_Str)!=0 ){
+ pIn3->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal);
+ }else if( (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
testcase( pIn3->flags & MEM_Int );
testcase( pIn3->flags & MEM_Real );
testcase( pIn3->flags & MEM_IntReal );
sqlite3VdbeMemStringify(pIn3, encoding, 1);
testcase( (flags3&MEM_Dyn) != (pIn3->flags&MEM_Dyn) );
@@ -3652,15 +3656,20 @@
len = sqlite3SmallTypeSizes[serial_type];
assert( len>=1 && len<=8 && len!=5 && len!=7 );
switch( len ){
default: zPayload[7] = (u8)(v&0xff); v >>= 8;
zPayload[6] = (u8)(v&0xff); v >>= 8;
+ /* no break */ deliberate_fall_through
case 6: zPayload[5] = (u8)(v&0xff); v >>= 8;
zPayload[4] = (u8)(v&0xff); v >>= 8;
+ /* no break */ deliberate_fall_through
case 4: zPayload[3] = (u8)(v&0xff); v >>= 8;
+ /* no break */ deliberate_fall_through
case 3: zPayload[2] = (u8)(v&0xff); v >>= 8;
+ /* no break */ deliberate_fall_through
case 2: zPayload[1] = (u8)(v&0xff); v >>= 8;
+ /* no break */ deliberate_fall_through
case 1: zPayload[0] = (u8)(v&0xff);
}
zPayload += len;
}
}else if( serial_type<0x80 ){
@@ -7090,12 +7099,12 @@
** register P1 the text of an error message describing any problems.
** If no problems are found, store a NULL in register P1.
**
** The register P3 contains one less than the maximum number of allowed errors.
** At most reg(P3) errors will be reported.
-** In other words, the analysis stops as soon as reg(P1) errors are
-** seen. Reg(P1) is updated with the number of errors remaining.
+** In other words, the analysis stops as soon as reg(P3) errors are
+** seen. Reg(P3) is updated with the number of errors remaining.
**
** The root page numbers of all tables in the database are integers
** stored in P4_INTARRAY argument.
**
** If P5 is not zero, the check is done on the auxiliary database
@@ -8319,10 +8328,11 @@
case OP_VColumn: { /* ncycle */
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
Mem *pDest;
sqlite3_context sContext;
+ FuncDef nullFunc;
VdbeCursor *pCur = p->apCsr[pOp->p1];
assert( pCur!=0 );
assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
pDest = &aMem[pOp->p3];
@@ -8336,10 +8346,13 @@
pModule = pVtab->pModule;
assert( pModule->xColumn );
memset(&sContext, 0, sizeof(sContext));
sContext.pOut = pDest;
sContext.enc = encoding;
+ nullFunc.pUserData = 0;
+ nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE;
+ sContext.pFunc = &nullFunc;
assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 );
if( pOp->p5 & OPFLAG_NOCHNG ){
sqlite3VdbeMemSetNull(pDest);
pDest->flags = MEM_Null|MEM_Zero;
pDest->u.nZero = 0;
@@ -8667,10 +8680,46 @@
case OP_ClrSubtype: { /* in1 */
pIn1 = &aMem[pOp->p1];
pIn1->flags &= ~MEM_Subtype;
break;
}
+
+/* Opcode: GetSubtype P1 P2 * * *
+** Synopsis: r[P2] = r[P1].subtype
+**
+** Extract the subtype value from register P1 and write that subtype
+** into register P2. If P1 has no subtype, then P1 gets a NULL.
+*/
+case OP_GetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Subtype ){
+ sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype);
+ }else{
+ sqlite3VdbeMemSetNull(pOut);
+ }
+ break;
+}
+
+/* Opcode: SetSubtype P1 P2 * * *
+** Synopsis: r[P2].subtype = r[P1]
+**
+** Set the subtype value of register P2 to the integer from register P1.
+** If P1 is NULL, clear the subtype from p2.
+*/
+case OP_SetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Null ){
+ pOut->flags &= ~MEM_Subtype;
+ }else{
+ assert( pIn1->flags & MEM_Int );
+ pOut->flags |= MEM_Subtype;
+ pOut->eSubtype = (u8)(pIn1->u.i & 0xff);
+ }
+ break;
+}
/* Opcode: FilterAdd P1 * P3 P4 *
** Synopsis: filter(P1) += key(P3@P4)
**
** Compute a hash on the P4 registers starting with r[P3] and
Index: src/vdbeapi.c
==================================================================
--- src/vdbeapi.c
+++ src/vdbeapi.c
@@ -150,11 +150,19 @@
int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
int i;
int rc = SQLITE_OK;
Vdbe *p = (Vdbe*)pStmt;
#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex;
+ sqlite3_mutex *mutex;
+#endif
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+#if SQLITE_THREADSAFE
+ mutex = p->db->mutex;
#endif
sqlite3_mutex_enter(mutex);
for(i=0; inVar; i++){
sqlite3VdbeMemRelease(&p->aVar[i]);
p->aVar[i].flags = MEM_Null;
@@ -940,13 +948,12 @@
** pointer to it.
*/
void *sqlite3_user_data(sqlite3_context *p){
#ifdef SQLITE_ENABLE_API_ARMOR
if( p==0 ) return 0;
-#else
- assert( p && p->pFunc );
#endif
+ assert( p && p->pFunc );
return p->pFunc->pUserData;
}
/*
** Extract the user data from a sqlite3_context structure and return a
Index: src/vdbeaux.c
==================================================================
--- src/vdbeaux.c
+++ src/vdbeaux.c
@@ -1529,11 +1529,11 @@
Op *pOp,
const char *zP4,
int n
){
if( pOp->p4type ){
- freeP4(p->db, pOp->p4type, pOp->p4.p);
+ assert( pOp->p4type > P4_FREE_IF_LE );
pOp->p4type = 0;
pOp->p4.p = 0;
}
if( n<0 ){
sqlite3VdbeChangeP4(p, (int)(pOp - p->aOp), zP4, n);
@@ -4058,27 +4058,10 @@
swapMixedEndianFloat(x);
memcpy(&pMem->u.r, &x, sizeof(x));
pMem->flags = IsNaN(x) ? MEM_Null : MEM_Real;
}
}
-static int serialGet7(
- const unsigned char *buf, /* Buffer to deserialize from */
- Mem *pMem /* Memory cell to write value into */
-){
- u64 x = FOUR_BYTE_UINT(buf);
- u32 y = FOUR_BYTE_UINT(buf+4);
- x = (x<<32) + y;
- assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 );
- swapMixedEndianFloat(x);
- memcpy(&pMem->u.r, &x, sizeof(x));
- if( IsNaN(x) ){
- pMem->flags = MEM_Null;
- return 1;
- }
- pMem->flags = MEM_Real;
- return 0;
-}
void sqlite3VdbeSerialGet(
const unsigned char *buf, /* Buffer to deserialize from */
u32 serial_type, /* Serial type to deserialize */
Mem *pMem /* Memory cell to write value into */
){
@@ -4754,11 +4737,11 @@
if( serial_type>=10 ){
rc = serial_type==10 ? -1 : +1;
}else if( serial_type==0 ){
rc = -1;
}else if( serial_type==7 ){
- serialGet7(&aKey1[d1], &mem1);
+ sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r);
}else{
i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]);
i64 rhs = pRhs->u.i;
if( lhsu.r ){
+ if( mem1.u.ru.r ){
rc = -1;
}else if( mem1.u.r>pRhs->u.r ){
rc = +1;
- }else{
- assert( rc==0 );
}
}else{
- sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1);
rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r);
}
}
}
@@ -4860,18 +4839,11 @@
}
/* RHS is null */
else{
serial_type = aKey1[idx1];
- if( serial_type==0
- || serial_type==10
- || (serial_type==7 && serialGet7(&aKey1[d1], &mem1)!=0)
- ){
- assert( rc==0 );
- }else{
- rc = 1;
- }
+ rc = (serial_type!=0 && serial_type!=10);
}
if( rc!=0 ){
int sortFlags = pPKey2->pKeyInfo->aSortFlags[i];
if( sortFlags ){
Index: src/vdbemem.c
==================================================================
--- src/vdbemem.c
+++ src/vdbemem.c
@@ -1629,32 +1629,52 @@
}
return rc;
}
/* Handle negative integers in a single step. This is needed in the
- ** case when the value is -9223372036854775808.
- */
- if( op==TK_UMINUS
- && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){
- pExpr = pExpr->pLeft;
- op = pExpr->op;
- negInt = -1;
- zNeg = "-";
+ ** case when the value is -9223372036854775808. Except - do not do this
+ ** for hexadecimal literals. */
+ if( op==TK_UMINUS ){
+ Expr *pLeft = pExpr->pLeft;
+ if( (pLeft->op==TK_INTEGER || pLeft->op==TK_FLOAT) ){
+ if( ExprHasProperty(pLeft, EP_IntValue)
+ || pLeft->u.zToken[0]!='0' || (pLeft->u.zToken[1] & ~0x20)!='X'
+ ){
+ pExpr = pLeft;
+ op = pExpr->op;
+ negInt = -1;
+ zNeg = "-";
+ }
+ }
}
if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){
pVal = valueNew(db, pCtx);
if( pVal==0 ) goto no_mem;
if( ExprHasProperty(pExpr, EP_IntValue) ){
sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt);
}else{
- zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken);
- if( zVal==0 ) goto no_mem;
- sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC);
+ i64 iVal;
+ if( op==TK_INTEGER && 0==sqlite3DecOrHexToI64(pExpr->u.zToken, &iVal) ){
+ sqlite3VdbeMemSetInt64(pVal, iVal*negInt);
+ }else{
+ zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken);
+ if( zVal==0 ) goto no_mem;
+ sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC);
+ }
}
- if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){
- sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8);
+ if( affinity==SQLITE_AFF_BLOB ){
+ if( op==TK_FLOAT ){
+ assert( pVal && pVal->z && pVal->flags==(MEM_Str|MEM_Term) );
+ sqlite3AtoF(pVal->z, &pVal->u.r, pVal->n, SQLITE_UTF8);
+ pVal->flags = MEM_Real;
+ }else if( op==TK_INTEGER ){
+ /* This case is required by -9223372036854775808 and other strings
+ ** that look like integers but cannot be handled by the
+ ** sqlite3DecOrHexToI64() call above. */
+ sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8);
+ }
}else{
sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8);
}
assert( (pVal->flags & MEM_IntReal)==0 );
if( pVal->flags & (MEM_Int|MEM_IntReal|MEM_Real) ){
Index: src/vdbesort.c
==================================================================
--- src/vdbesort.c
+++ src/vdbesort.c
@@ -1288,11 +1288,11 @@
void *p = 0;
int chunksize = 4*1024;
sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize);
sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte);
sqlite3OsFetch(pFd, 0, (int)nByte, &p);
- if( p ) sqlite3OsUnfetch(pFd, 0, p);
+ if( NEVER(p) ) sqlite3OsUnfetch(pFd, 0, p);
}
}
#else
# define vdbeSorterExtendFile(x,y,z)
#endif
Index: src/vtab.c
==================================================================
--- src/vtab.c
+++ src/vtab.c
@@ -313,11 +313,10 @@
assert( sqlite3BtreeHoldsAllMutexes(db) );
assert( sqlite3_mutex_held(db->mutex) );
if( p ){
db->pDisconnect = 0;
- sqlite3ExpirePreparedStatements(db, 0);
do {
VTable *pNext = p->pNext;
sqlite3VtabUnlock(p);
p = pNext;
}while( p );
Index: src/wal.c
==================================================================
--- src/wal.c
+++ src/wal.c
@@ -2002,10 +2002,23 @@
*pp = p;
return rc;
}
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+
+
+/*
+** Attempt to enable blocking locks that block for nMs ms. Return 1 if
+** blocking locks are successfully enabled, or 0 otherwise.
+*/
+static int walEnableBlockingMs(Wal *pWal, int nMs){
+ int rc = sqlite3OsFileControl(
+ pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs
+ );
+ return (rc==SQLITE_OK);
+}
+
/*
** Attempt to enable blocking locks. Blocking locks are enabled only if (a)
** they are supported by the VFS, and (b) the database handle is configured
** with a busy-timeout. Return 1 if blocking locks are successfully enabled,
** or 0 otherwise.
@@ -2013,15 +2026,11 @@
static int walEnableBlocking(Wal *pWal){
int res = 0;
if( pWal->db ){
int tmout = pWal->db->busyTimeout;
if( tmout ){
- int rc;
- rc = sqlite3OsFileControl(
- pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout
- );
- res = (rc==SQLITE_OK);
+ res = walEnableBlockingMs(pWal, tmout);
}
}
return res;
}
@@ -2066,24 +2075,14 @@
*/
void sqlite3WalDb(Wal *pWal, sqlite3 *db){
pWal->db = db;
}
-/*
-** Take an exclusive WRITE lock. Blocking if so configured.
-*/
-static int walLockWriter(Wal *pWal){
- int rc;
- walEnableBlocking(pWal);
- rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
- walDisableBlocking(pWal);
- return rc;
-}
#else
# define walEnableBlocking(x) 0
# define walDisableBlocking(x)
-# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1)
+# define walEnableBlockingMs(pWal, ms) 0
# define sqlite3WalDb(pWal, db)
#endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */
/*
@@ -2680,19 +2679,22 @@
walUnlockShared(pWal, WAL_WRITE_LOCK);
rc = SQLITE_READONLY_RECOVERY;
}
}else{
int bWriteLock = pWal->writeLock;
- if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){
+ if( bWriteLock
+ || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1))
+ ){
pWal->writeLock = 1;
if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){
badHdr = walIndexTryHdr(pWal, pChanged);
if( badHdr ){
/* If the wal-index header is still malformed even while holding
** a WRITE lock, it can only mean that the header is corrupted and
** needs to be reconstructed. So run recovery to do exactly that.
- */
+ ** Disable blocking locks first. */
+ walDisableBlocking(pWal);
rc = walIndexRecover(pWal);
*pChanged = 1;
}
}
if( bWriteLock==0 ){
@@ -2898,10 +2900,41 @@
*pChanged = 1;
}
return rc;
}
+/*
+** The final argument passed to walTryBeginRead() is of type (int*). The
+** caller should invoke walTryBeginRead as follows:
+**
+** int cnt = 0;
+** do {
+** rc = walTryBeginRead(..., &cnt);
+** }while( rc==WAL_RETRY );
+**
+** The final value of "cnt" is of no use to the caller. It is used by
+** the implementation of walTryBeginRead() as follows:
+**
+** + Each time walTryBeginRead() is called, it is incremented. Once
+** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead()
+** has many times been invoked and failed with WAL_RETRY - walTryBeginRead()
+** returns SQLITE_PROTOCOL.
+**
+** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed
+** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS
+** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case
+** the next invocation of walTryBeginRead() may omit an expected call to
+** sqlite3OsSleep(). There has already been a delay when the previous call
+** waited on a lock.
+*/
+#define WAL_RETRY_PROTOCOL_LIMIT 100
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+# define WAL_RETRY_BLOCKED_MASK 0x10000000
+#else
+# define WAL_RETRY_BLOCKED_MASK 0
+#endif
+
/*
** Attempt to start a read transaction. This might fail due to a race or
** other transient condition. When that happens, it returns WAL_RETRY to
** indicate to the caller that it is safe to retry immediately.
**
@@ -2948,17 +2981,20 @@
** checkpoint process do as much work as possible. This routine might
** update values of the aReadMark[] array in the header, but if it does
** so it takes care to hold an exclusive lock on the corresponding
** WAL_READ_LOCK() while changing values.
*/
-static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
+static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){
volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */
u32 mxReadMark; /* Largest aReadMark[] value */
int mxI; /* Index of largest aReadMark[] value */
int i; /* Loop counter */
int rc = SQLITE_OK; /* Return code */
u32 mxFrame; /* Wal frame to lock to */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int nBlockTmout = 0;
+#endif
assert( pWal->readLock<0 ); /* Not currently locked */
/* useWal may only be set for read/write connections */
assert( (pWal->readOnly & WAL_SHM_RDONLY)==0 || useWal==0 );
@@ -2978,25 +3014,52 @@
** is more of a scheduler yield than an actual delay. But on the 10th
** an subsequent retries, the delays start becoming longer and longer,
** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
** The total delay time before giving up is less than 10 seconds.
*/
- if( cnt>5 ){
+ (*pCnt)++;
+ if( *pCnt>5 ){
int nDelay = 1; /* Pause time in microseconds */
- if( cnt>100 ){
+ int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK);
+ if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){
VVA_ONLY( pWal->lockError = 1; )
return SQLITE_PROTOCOL;
}
- if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+ if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor
+ ** to block for locks for approximately nDelay us. This affects three
+ ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if
+ ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the
+ ** first attempted read fails, and (c) the shared lock taken on the
+ ** read-mark.
+ **
+ ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error,
+ ** then sleep for the minimum of 1us. The previous call already provided
+ ** an extra delay while it was blocking on the lock.
+ */
+ nBlockTmout = (nDelay+998) / 1000;
+ if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){
+ if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1;
+ }
+#endif
sqlite3OsSleep(pWal->pVfs, nDelay);
+ *pCnt &= ~WAL_RETRY_BLOCKED_MASK;
}
if( !useWal ){
assert( rc==SQLITE_OK );
if( pWal->bShmUnreliable==0 ){
rc = walIndexReadHdr(pWal, pChanged);
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ walDisableBlocking(pWal);
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ rc = SQLITE_BUSY;
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#endif
if( rc==SQLITE_BUSY ){
/* If there is not a recovery running in another thread or process
** then convert BUSY errors to WAL_RETRY. If recovery is known to
** be running, convert BUSY to BUSY_RECOVERY. There is a race here
** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY
@@ -3107,13 +3170,23 @@
if( mxI==0 ){
assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 );
return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT;
}
+ (void)walEnableBlockingMs(pWal, nBlockTmout);
rc = walLockShared(pWal, WAL_READ_LOCK(mxI));
+ walDisableBlocking(pWal);
if( rc ){
- return rc==SQLITE_BUSY ? WAL_RETRY : rc;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#else
+ assert( rc!=SQLITE_BUSY_TIMEOUT );
+#endif
+ assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT );
+ return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc;
}
/* Now that the read-lock has been obtained, check that neither the
** value in the aReadMark[] array or the contents of the wal-index
** header have changed.
**
@@ -3297,11 +3370,11 @@
ckptLock = 1;
}
#endif
do{
- rc = walTryBeginRead(pWal, pChanged, 0, ++cnt);
+ rc = walTryBeginRead(pWal, pChanged, 0, &cnt);
}while( rc==WAL_RETRY );
testcase( (rc&0xff)==SQLITE_BUSY );
testcase( (rc&0xff)==SQLITE_IOERR );
testcase( rc==SQLITE_PROTOCOL );
testcase( rc==SQLITE_OK );
@@ -3478,10 +3551,11 @@
if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){
assert( iFrame>iRead || CORRUPT_DB );
iRead = iFrame;
}
if( (nCollide--)==0 ){
+ *piRead = 0;
return SQLITE_CORRUPT_BKPT;
}
iKey = walNextHash(iKey);
}
if( iRead ) break;
@@ -3781,11 +3855,11 @@
walUnlockShared(pWal, WAL_READ_LOCK(0));
pWal->readLock = -1;
cnt = 0;
do{
int notUsed;
- rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt);
+ rc = walTryBeginRead(pWal, ¬Used, 1, &cnt);
}while( rc==WAL_RETRY );
assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */
testcase( (rc&0xff)==SQLITE_IOERR );
testcase( rc==SQLITE_PROTOCOL );
testcase( rc==SQLITE_OK );
@@ -4202,14 +4276,13 @@
assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 );
if( pWal->readOnly ) return SQLITE_READONLY;
WALTRACE(("WAL%p: checkpoint begins\n", pWal));
- /* Enable blocking locks, if possible. If blocking locks are successfully
- ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */
+ /* Enable blocking locks, if possible. */
sqlite3WalDb(pWal, db);
- (void)walEnableBlocking(pWal);
+ if( xBusy2 ) (void)walEnableBlocking(pWal);
/* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
** "checkpoint" lock on the database file.
** EVIDENCE-OF: R-10421-19736 If any other process is running a
** checkpoint operation at the same time, the lock cannot be obtained and
@@ -4246,13 +4319,18 @@
/* Read the wal-index header. */
SEH_TRY {
if( rc==SQLITE_OK ){
+ /* For a passive checkpoint, do not re-enable blocking locks after
+ ** reading the wal-index header. A passive checkpoint should not block
+ ** or invoke the busy handler. The only lock such a checkpoint may
+ ** attempt to obtain is a lock on a read-slot, and it should give up
+ ** immediately and do a partial checkpoint if it cannot obtain it. */
walDisableBlocking(pWal);
rc = walIndexReadHdr(pWal, &isChanged);
- (void)walEnableBlocking(pWal);
+ if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal);
if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
}
}
Index: src/where.c
==================================================================
--- src/where.c
+++ src/where.c
@@ -676,16 +676,26 @@
int iEnd = sqlite3VdbeCurrentAddr(v);
if( pParse->db->mallocFailed ) return;
for(; iStartp1!=iTabCur ) continue;
if( pOp->opcode==OP_Column ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Copy;
pOp->p1 = pOp->p2 + iRegister;
pOp->p2 = pOp->p3;
pOp->p3 = 0;
pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */
}else if( pOp->opcode==OP_Rowid ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Sequence;
pOp->p1 = iAutoidxCur;
#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
if( iAutoidxCur==0 ){
pOp->opcode = OP_Null;
@@ -2008,11 +2018,12 @@
nNew = sqlite3LogEst(iUpper - iLower);
/* TUNING: If both iUpper and iLower are derived from the same
** sample, then assume they are 4x more selective. This brings
** the estimated selectivity more in line with what it would be
** if estimated without the use of STAT4 tables. */
- if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ if( iLwrIdx==iUprIdx ){ nNew -= 20; }
+ assert( 20==sqlite3LogEst(4) );
}else{
nNew = 10; assert( 10==sqlite3LogEst(2) );
}
if( nNewpWInfo;
- int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
- SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
- Table *pTab = pItem->pTab;
- Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
- sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
- p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
- sqlite3DebugPrintf(" %12s",
- pItem->zAlias ? pItem->zAlias : pTab->zName);
+void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){
+ if( pWC ){
+ WhereInfo *pWInfo = pWC->pWInfo;
+ int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
+ SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
+ Table *pTab = pItem->pTab;
+ Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
+ sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
+ p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
+ sqlite3DebugPrintf(" %12s",
+ pItem->zAlias ? pItem->zAlias : pTab->zName);
+ }else{
+ sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d",
+ p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab);
+ }
if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
const char *zName;
if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){
int i = sqlite3Strlen30(zName) - 1;
@@ -2278,10 +2306,19 @@
int i;
for(i=0; inLTerm; i++){
sqlite3WhereTermPrint(p->aLTerm[i], i);
}
}
+}
+void sqlite3ShowWhereLoop(const WhereLoop *p){
+ if( p ) sqlite3WhereLoopPrint(p, 0);
+}
+void sqlite3ShowWhereLoopList(const WhereLoop *p){
+ while( p ){
+ sqlite3ShowWhereLoop(p);
+ p = p->pNextLoop;
+ }
}
#endif
/*
** Convert bulk memory into a valid WhereLoop that can be passed
@@ -2391,50 +2428,64 @@
}
sqlite3DbNNFreeNN(db, pWInfo);
}
/*
-** Return TRUE if all of the following are true:
-**
-** (1) X has the same or lower cost, or returns the same or fewer rows,
-** than Y.
-** (2) X uses fewer WHERE clause terms than Y
-** (3) Every WHERE clause term used by X is also used by Y
-** (4) X skips at least as many columns as Y
-** (5) If X is a covering index, than Y is too
-**
-** Conditions (2) and (3) mean that X is a "proper subset" of Y.
-** If X is a proper subset of Y then Y is a better choice and ought
-** to have a lower cost. This routine returns TRUE when that cost
-** relationship is inverted and needs to be adjusted. Constraint (4)
-** was added because if X uses skip-scan less than Y it still might
-** deserve a lower cost even if it is a proper subset of Y. Constraint (5)
-** was added because a covering index probably deserves to have a lower cost
-** than a non-covering index even if it is a proper subset.
+** Return TRUE if X is a proper subset of Y but is of equal or less cost.
+** In other words, return true if all constraints of X are also part of Y
+** and Y has additional constraints that might speed the search that X lacks
+** but the cost of running X is not more than the cost of running Y.
+**
+** In other words, return true if the cost relationwship between X and Y
+** is inverted and needs to be adjusted.
+**
+** Case 1:
+**
+** (1a) X and Y use the same index.
+** (1b) X has fewer == terms than Y
+** (1c) Neither X nor Y use skip-scan
+** (1d) X does not have a a greater cost than Y
+**
+** Case 2:
+**
+** (2a) X has the same or lower cost, or returns the same or fewer rows,
+** than Y.
+** (2b) X uses fewer WHERE clause terms than Y
+** (2c) Every WHERE clause term used by X is also used by Y
+** (2d) X skips at least as many columns as Y
+** (2e) If X is a covering index, than Y is too
*/
static int whereLoopCheaperProperSubset(
const WhereLoop *pX, /* First WhereLoop to compare */
const WhereLoop *pY /* Compare against this WhereLoop */
){
int i, j;
+ if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */
+ assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */
+ && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */
+ && pX->nSkip==0 && pY->nSkip==0 /* (1c) */
+ ){
+ return 1; /* Case 1 is true */
+ }
if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){
- return 0; /* X is not a subset of Y */
+ return 0; /* (2b) */
}
- if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0;
- if( pY->nSkip > pX->nSkip ) return 0;
+ if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */
for(i=pX->nLTerm-1; i>=0; i--){
if( pX->aLTerm[i]==0 ) continue;
for(j=pY->nLTerm-1; j>=0; j--){
if( pY->aLTerm[j]==pX->aLTerm[i] ) break;
}
- if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */
+ if( j<0 ) return 0; /* (2c) */
}
if( (pX->wsFlags&WHERE_IDX_ONLY)!=0
&& (pY->wsFlags&WHERE_IDX_ONLY)==0 ){
- return 0; /* Constraint (5) */
+ return 0; /* (2e) */
}
- return 1; /* All conditions meet */
+ return 1; /* Case 2 is true */
}
/*
** Try to adjust the cost and number of output rows of WhereLoop pTemplate
** upwards or downwards so that:
@@ -2920,11 +2971,14 @@
opMask = WO_LT|WO_LE;
}else{
assert( pNew->u.btree.nBtm==0 );
opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
}
- if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bUnordered || pProbe->bLowQual ){
+ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS);
+ }
assert( pNew->u.btree.nEqnColumn );
assert( pNew->u.btree.nEqnKeyCol
|| pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY );
@@ -3306,11 +3360,13 @@
if( pIndex->bUnordered ) return 0;
if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0;
for(ii=0; iinExpr; ii++){
Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr);
if( NEVER(pExpr==0) ) continue;
- if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){
+ if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN)
+ && pExpr->iTable==iCursor
+ ){
if( pExpr->iColumn<0 ) return 1;
for(jj=0; jjnKeyCol; jj++){
if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1;
}
}else if( (aColExpr = pIndex->aColExpr)!=0 ){
@@ -5406,14 +5462,13 @@
pWInfo->nOBSat = pFrom->isOrdered;
if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){
if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}
- if( pWInfo->pSelect->pOrderBy
- && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){
- pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr;
- }
+ /* vvv--- See check-in [12ad822d9b827777] on 2023-03-16 ---vvv */
+ assert( pWInfo->pSelect->pOrderBy==0
+ || pWInfo->nOBSat <= pWInfo->pSelect->pOrderBy->nExpr );
}else{
pWInfo->revMask = pFrom->revLoop;
if( pWInfo->nOBSat<=0 ){
pWInfo->nOBSat = 0;
if( nLoop>0 ){
@@ -6028,11 +6083,14 @@
** struct, the contents of WhereInfo.a[], the WhereClause structure
** and the WhereMaskSet structure. Since WhereClause contains an 8-byte
** field (type Bitmask) it must be aligned on an 8-byte boundary on
** some architectures. Hence the ROUND8() below.
*/
- nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
+ nByteWInfo = ROUND8P(sizeof(WhereInfo));
+ if( nTabList>1 ){
+ nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel));
+ }
pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
if( db->mallocFailed ){
sqlite3DbFree(db, pWInfo);
pWInfo = 0;
goto whereBeginError;
@@ -6590,10 +6648,15 @@
whereBeginError:
if( pWInfo ){
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
}
+#ifdef WHERETRACE_ENABLED
+ /* Prevent harmless compiler warnings about debugging routines
+ ** being declared but never used */
+ sqlite3ShowWhereLoopList(0);
+#endif /* WHERETRACE_ENABLED */
return 0;
}
/*
** Part of sqlite3WhereEnd() will rewrite opcodes to reference the
Index: src/whereInt.h
==================================================================
--- src/whereInt.h
+++ src/whereInt.h
@@ -500,11 +500,11 @@
*/
Bitmask sqlite3WhereGetMask(WhereMaskSet*,int);
#ifdef WHERETRACE_ENABLED
void sqlite3WhereClausePrint(WhereClause *pWC);
void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm);
-void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC);
+void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC);
#endif
WhereTerm *sqlite3WhereFindTerm(
WhereClause *pWC, /* The WHERE clause to be searched */
int iCur, /* Cursor number of LHS */
int iColumn, /* Column number of LHS */
Index: test/aggnested.test
==================================================================
--- test/aggnested.test
+++ test/aggnested.test
@@ -356,10 +356,132 @@
SELECT c FROM (SELECT t2.b AS c FROM t1) GROUP BY c HAVING t2.b
)
FROM t2 GROUP BY 'constant_string';
} {{}}
+#-------------------------------------------------------------------------
+reset_db
+
+do_execsql_test 7.0 {
+ CREATE TABLE invoice (
+ id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+ amount DOUBLE PRECISION DEFAULT NULL,
+ name VARCHAR(100) DEFAULT NULL
+ );
+
+ INSERT INTO invoice (amount, name) VALUES
+ (4.0, 'Michael'), (15.0, 'Bara'), (4.0, 'Michael'), (6.0, 'John');
+}
+
+do_execsql_test 7.1 {
+ SELECT sum(amount), name
+ from invoice
+ group by name
+ having (select v > 6 from (select sum(amount) v) t)
+} {
+ 15.0 Bara
+ 8.0 Michael
+}
+
+do_execsql_test 7.2 {
+ SELECT (select 1 from (select sum(amount))) FROM invoice
+} {1}
+
+do_execsql_test 8.0 {
+ CREATE TABLE t1(x INT);
+ INSERT INTO t1 VALUES(100);
+ INSERT INTO t1 VALUES(20);
+ INSERT INTO t1 VALUES(3);
+ SELECT (SELECT y FROM (SELECT sum(x) AS y) AS t2 ) FROM t1;
+} {123}
+
+do_execsql_test 8.1 {
+ SELECT (
+ SELECT y FROM (
+ SELECT z AS y FROM (SELECT sum(x) AS z) AS t2
+ )
+ ) FROM t1;
+} {123}
+
+do_execsql_test 8.2 {
+ SELECT (
+ SELECT a FROM (
+ SELECT y AS a FROM (
+ SELECT z AS y FROM (SELECT sum(x) AS z) AS t2
+ )
+ )
+ ) FROM t1;
+} {123}
+
+#-------------------------------------------------------------------------
+# dbsqlfuzz 04408efc51ae46897c4c122b407412045ed221b4
+#
+reset_db
+
+do_execsql_test 9.1 {
+ WITH out(i, j, k) AS (
+ VALUES(1234, 5678, 9012)
+ )
+ SELECT (
+ SELECT (
+ SELECT min(abc) = ( SELECT ( SELECT 1234 fROM (SELECT abc) ) )
+ FROM (
+ SELECT sum( out.i ) + ( SELECT sum( out.i ) ) AS abc FROM (SELECT out.j)
+ )
+ )
+ ) FROM out;
+} {0}
+
+do_execsql_test 9.2 {
+ CREATE TABLE t1(a);
+ CREATE TABLE t2(b);
+ INSERT INTO t1 VALUES(1), (2), (3);
+ INSERT INTO t2 VALUES(4), (5), (6);
+
+ SELECT (
+ SELECT min(y) + (SELECT x) FROM (
+ SELECT sum(a) AS x, b AS y FROM t2
+ )
+ )
+ FROM t1;
+} {10}
+
+do_execsql_test 9.3 {
+ SELECT (
+ SELECT min(y) + (SELECT (SELECT x)) FROM (
+ SELECT sum(a) AS x, b AS y FROM t2
+ )
+ )
+ FROM t1;
+} {10}
+
+do_execsql_test 9.4 {
+ SELECT (
+ SELECT (SELECT x) FROM (
+ SELECT sum(a) AS x, b AS y FROM t2
+ ) GROUP BY y
+ )
+ FROM t1;
+} {6}
+do_execsql_test 9.5 {
+ SELECT (
+ SELECT (SELECT (SELECT x)) FROM (
+ SELECT sum(a) AS x, b AS y FROM t2
+ ) GROUP BY y
+ )
+ FROM t1;
+} {6}
-
+# 2023-12-16
+# New test case for check-in [4470f657d2069972] from 2023-11-02
+# https://bugs.chromium.org/p/chromium/issues/detail?id=1511689
+#
+do_execsql_test 10.1 {
+ DROP TABLE IF EXISTS t0;
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t0(c1, c2); INSERT INTO t0 VALUES(1,2);
+ CREATE TABLE t1(c3, c4); INSERT INTO t1 VALUES(3,4);
+ SELECT * FROM t0 WHERE EXISTS (SELECT 1 FROM t1 GROUP BY c3 HAVING ( SELECT count(*) FROM (SELECT 1 UNION ALL SELECT sum(DISTINCT c1) ) ) ) BETWEEN 1 AND 1;
+} {1 2}
finish_test
Index: test/aggorderby.test
==================================================================
--- test/aggorderby.test
+++ test/aggorderby.test
@@ -115,8 +115,48 @@
} {c,b,a}
do_execsql_test aggorderby-8.2 {
WITH c(x,y) AS (VALUES(1,1),(2,2),(3,3),(3,4),(3,5),(3,6))
SELECT sum(DISTINCT x ORDER BY y) FROM c;
} 6
+
+# Subtype information is transfered through the sorter for aggregates
+# that make use of subtype info.
+#
+do_execsql_test aggorderby-9.0 {
+ WITH c(x,y) AS (VALUES
+ ('{a:3}', 3),
+ ('[1,1]', 1),
+ ('[4,4]', 4),
+ ('{x:2}', 2))
+ SELECT json_group_array(json(x) ORDER BY y) FROM c;
+} {{[[1,1],{"x":2},{"a":3},[4,4]]}}
+do_execsql_test aggorderby-9.1 {
+ WITH c(x,y) AS (VALUES
+ ('[4,4]', 4),
+ ('{a:3}', 3),
+ ('[4,4]', 4),
+ ('[1,1]', 1),
+ ('[4,4]', 4),
+ ('{x:2}', 2))
+ SELECT json_group_array(DISTINCT json(x) ORDER BY y) FROM c;
+} {{[[1,1],{"x":2},{"a":3},[4,4]]}}
+do_execsql_test aggorderby-9.2 {
+ WITH c(x,y) AS (VALUES
+ ('{a:3}', 3),
+ ('[1,1]', 1),
+ ('[4,4]', 4),
+ ('{x:2}', 2))
+ SELECT json_group_array(json(x) ORDER BY json(x)) FROM c;
+} {{[[1,1],[4,4],{"a":3},{"x":2}]}}
+do_execsql_test aggorderby-9.3 {
+ WITH c(x,y) AS (VALUES
+ ('[4,4]', 4),
+ ('{a:3}', 3),
+ ('[4,4]', 4),
+ ('[1,1]', 1),
+ ('[4,4]', 4),
+ ('{x:2}', 2))
+ SELECT json_group_array(DISTINCT json(x) ORDER BY json(x)) FROM c;
+} {{[[1,1],[4,4],{"a":3},{"x":2}]}}
finish_test
Index: test/alter2.test
==================================================================
--- test/alter2.test
+++ test/alter2.test
@@ -369,11 +369,11 @@
set sql {CREATE TABLE t1(a, b DEFAULT -123.0, c VARCHAR(10) default 5)}
alter_table t1 $sql 3
execsql {
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
-} {1 integer -123 integer 5 text}
+} {1 integer -123.0 real 5 text}
#-----------------------------------------------------------------------
# Test that UPDATE trigger tables work with default values, and that when
# a row is updated the default values are correctly transfered to the
# new row.
@@ -395,15 +395,15 @@
do_test alter2-8.2 {
execsql {
UPDATE t1 SET c = 10 WHERE a = 1;
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
-} {1 integer -123 integer 10 text}
+} {1 integer -123.0 real 10 text}
ifcapable trigger {
do_test alter2-8.3 {
set ::val
- } {-123 integer 5 text -123 integer 10 text}
+ } {-123.0 real 5 text -123.0 real 10 text}
}
#-----------------------------------------------------------------------
# Test that DELETE trigger tables work with default values, and that when
# a row is updated the default values are correctly transfered to the
@@ -423,11 +423,11 @@
do_test alter2-9.2 {
execsql {
DELETE FROM t1 WHERE a = 2;
}
set ::val
- } {-123 integer 5 text}
+ } {-123.0 real 5 text}
}
#-----------------------------------------------------------------------
# Test creating an index on a column added with a default value.
#
Index: test/date.test
==================================================================
--- test/date.test
+++ test/date.test
@@ -144,10 +144,12 @@
datetest 2.48 {datetime('2003-10-22 12:24','9.4 second')} {2003-10-22 12:24:09}
datetest 2.49 {datetime('2003-10-22 12:24','0000 second')} {2003-10-22 12:24:00}
datetest 2.50 {datetime('2003-10-22 12:24','0001 second')} {2003-10-22 12:24:01}
datetest 2.51 {datetime('2003-10-22 12:24','nonsense')} NULL
+datetest 2.60 {datetime('2023-02-31')} {2023-03-03 00:00:00}
+
datetest 3.1 {strftime('%d','2003-10-31 12:34:56.432')} 31
datetest 3.2.1 {strftime('pre%fpost','2003-10-31 12:34:56.432')} pre56.432post
datetest 3.2.2 {strftime('%f','2003-10-31 12:34:59.9999999')} 59.999
datetest 3.3 {strftime('%H','2003-10-31 12:34:56.432')} 12
datetest 3.4 {strftime('%j','2003-10-31 12:34:56.432')} 304
@@ -205,12 +207,12 @@
}
datetest 3.16 "strftime('[repeat 200 %Y]','2003-10-31')" [repeat 200 2003]
datetest 3.17 "strftime('[repeat 200 abc%m123]','2003-10-31')" \
[repeat 200 abc10123]
-foreach c {a b c g h i n o q r t v x y z
- A B C D E G K L N O Q V Z
+foreach c {a b c h i n o q r t v x y z
+ A B C D E K L N O Q Z
0 1 2 3 4 5 6 6 7 9 _} {
datetest 3.18.$c "strftime('%$c','2003-10-31')" NULL
}
datetest 3.20 {strftime('%e','2023-08-09')} { 9}
datetest 3.21 {strftime('%F %T','2023-08-09 01:23')} {2023-08-09 01:23:00}
@@ -450,10 +452,13 @@
datetest 13.30 {date('2000-01-01','+1.5 years')} {2001-07-02}
datetest 13.31 {date('2001-01-01','+1.5 years')} {2002-07-02}
datetest 13.32 {date('2002-01-01','+1.5 years')} {2003-07-02}
datetest 13.33 {date('2002-01-01','-1.5 years')} {2000-07-02}
datetest 13.34 {date('2001-01-01','-1.5 years')} {1999-07-02}
+datetest 13.35 {date('2023-02-28')} {2023-02-28}
+datetest 13.36 {date('2023-02-29')} {2023-03-01}
+datetest 13.37 {date('2023-04-31')} {2023-05-01}
# Test for issues reported by BareFeet (list.sql at tandb.com.au)
# on mailing list on 2008-06-12.
#
# Put a floating point number in the database so that we can manipulate
Index: test/date4.test
==================================================================
--- test/date4.test
+++ test/date4.test
@@ -22,16 +22,16 @@
finish_test
return
}
if {$tcl_platform(os)=="Linux"} {
- set FMT {%d,%e,%F,%H,%k,%I,%l,%j,%m,%M,%u,%w,%W,%Y,%%,%P,%p}
+ set FMT {%d,%e,%F,%H,%k,%I,%l,%j,%m,%M,%u,%w,%W,%Y,%%,%P,%p,%U,%V,%G,%g}
} else {
set FMT {%d,%e,%F,%H,%I,%j,%p,%R,%u,%w,%W,%%}
}
-for {set i 0} {$i<=24854} {incr i} {
- set TS [expr {$i*86401}]
+for {set i 0} {$i<=24858} {incr i} {
+ set TS [expr {$i*86390}]
do_execsql_test date4-$i {
SELECT strftime($::FMT,$::TS,'unixepoch');
} [list [strftime $FMT $TS]]
}
Index: test/distinctagg.test
==================================================================
--- test/distinctagg.test
+++ test/distinctagg.test
@@ -93,11 +93,11 @@
5 0 "SELECT count(DISTINCT rowid) FROM t1" 10
6 0 "SELECT count(DISTINCT a) FROM t1, t2" 5
7 0 "SELECT count(DISTINCT a) FROM t2, t1" 5
8 1 "SELECT count(DISTINCT a+b) FROM t1, t2, t2, t2" 6
9 0 "SELECT count(DISTINCT c) FROM t1 WHERE c=2" 1
- 10 1 "SELECT count(DISTINCT t1.rowid) FROM t1, t2" 10
+ 10 0 "SELECT count(DISTINCT t1.rowid) FROM t1, t2" 10
} {
do_test 3.$tn.1 {
set prg [db eval "EXPLAIN $sql"]
set idx [lsearch $prg OpenEphemeral]
expr {$idx>=0}
@@ -146,10 +146,14 @@
INSERT INTO t2 VALUES(2, 3, 'z');
CREATE TABLE t3(x, y, z);
INSERT INTO t3 VALUES(1,1,1);
INSERT INTO t3 VALUES(2,2,2);
+
+ CREATE TABLE t4(a);
+ CREATE INDEX t4a ON t4(a);
+ INSERT INTO t4 VALUES(1), (2), (2), (3), (1);
}
foreach {tn use_eph sql res} {
1 0 "SELECT count(DISTINCT c) FROM t1 GROUP BY b" {2 3 0 1}
2 1 "SELECT count(DISTINCT a) FROM t1 GROUP BY b" {2 3 0 1}
@@ -156,10 +160,13 @@
3 1 "SELECT count(DISTINCT a) FROM t1 GROUP BY b+c" {0 1 1 1 1}
4 0 "SELECT count(DISTINCT f) FROM t2 GROUP BY d, e" {1 2 2 3}
5 1 "SELECT count(DISTINCT f) FROM t2 GROUP BY d" {2 3}
6 0 "SELECT count(DISTINCT f) FROM t2 WHERE d IS 1 GROUP BY e" {1 2 2}
+
+ 7 0 "SELECT count(DISTINCT a) FROM t1" {4}
+ 8 0 "SELECT count(DISTINCT a) FROM t4" {3}
} {
do_test 4.$tn.1 {
set prg [db eval "EXPLAIN $sql"]
set idx [lsearch $prg OpenEphemeral]
expr {$idx>=0}
Index: test/fts3fault3.test
==================================================================
--- test/fts3fault3.test
+++ test/fts3fault3.test
@@ -47,8 +47,36 @@
} -test {
catchsql { COMMIT }
faultsim_integrity_check
faultsim_test_result {0 {}}
}
+
+#-------------------------------------------------------------------
+reset_db
+
+do_execsql_test 2.0 {
+ BEGIN;
+ CREATE VIRTUAL TABLE t1 USING fts3(a);
+ WITH s(i) AS (
+ SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<50
+ )
+ INSERT INTO t1 SELECT 'abc def ghi jkl mno pqr' FROM s;
+ COMMIT;
+}
+
+faultsim_save_and_close
+do_faultsim_test 2 -faults oom-t* -prep {
+ faultsim_restore_and_reopen
+ execsql {
+ BEGIN;
+ CREATE TABLE x1(a PRIMARY KEY);
+ }
+} -body {
+ execsql {
+ PRAGMA integrity_check;
+ }
+} -test {
+ faultsim_test_result {0 ok} $::TMPDBERROR
+}
finish_test
ADDED test/fts3integrity.test
Index: test/fts3integrity.test
==================================================================
--- /dev/null
+++ test/fts3integrity.test
@@ -0,0 +1,42 @@
+# 2023 December 16
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file runs all tests.
+#
+# $Id: fts3.test,v 1.2 2008/07/23 18:17:32 drh Exp $
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix fts3integrity
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE t1 USING fts3(x);
+ INSERT INTO t1 VALUES('first row');
+ INSERT INTO t1 VALUES('second row');
+
+ CREATE TABLE t2(x PRIMARY KEY);
+ INSERT INTO t2 VALUES('first row');
+ INSERT INTO t2 VALUES('second row');
+}
+
+sqlite3 db2 test.db
+
+do_execsql_test -db db2 1.1 {
+ CREATE TABLE t3(x, y);
+}
+
+do_execsql_test 1.2 {
+ PRAGMA integrity_check;
+} {ok}
+
+finish_test
Index: test/fts4intck1.test
==================================================================
--- test/fts4intck1.test
+++ test/fts4intck1.test
@@ -51,8 +51,25 @@
proc slang {in} {return $in}
do_execsql_test 2.3 {
PRAGMA integrity_check(t2);
} {{malformed inverted index for FTS4 table main.t2}}
+
+#-------------------------------------------------------------------------
+# Test that integrity-check works on a read-only database.
+#
+reset_db
+do_execsql_test 3.0 {
+ CREATE VIRTUAL TABLE x1 USING fts4(a, b);
+ INSERT INTO x1 VALUES('one', 'two');
+ INSERT INTO x1 VALUES('three', 'four');
+}
+db close
+sqlite3 db test.db -readonly 1
+
+do_execsql_test 3.1 {
+ PRAGMA integrity_check;
+} {ok}
+
finish_test
Index: test/func.test
==================================================================
--- test/func.test
+++ test/func.test
@@ -1040,10 +1040,13 @@
do_test func-21.8 {
execsql {
SELECT replace('aaaaaaa', 'a', '0123456789');
}
} {0123456789012345678901234567890123456789012345678901234567890123456789}
+do_execsql_test func-21.9 {
+ SELECT typeof(replace(1,'',0));
+} {text}
ifcapable tclvar {
do_test func-21.9 {
# Attempt to exploit a buffer-overflow that at one time existed
# in the REPLACE function.
@@ -1551,27 +1554,6 @@
do_execsql_test func-38.100 {
WITH t1(x) AS (VALUES(9e+999)) SELECT sum(x), avg(x), total(x) FROM t1;
WITH t1(x) AS (VALUES(-9e+999)) SELECT sum(x), avg(x), total(x) FROM t1;
} {Inf Inf Inf -Inf -Inf -Inf}
-# 2024-03-21 https://sqlite.org/forum/forumpost/23b8688ef4
-# Another problem with Kahan-Babushka-Neumaier summation and
-# infinities.
-#
-do_execsql_test func-39.101 {
- WITH RECURSIVE c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<1)
- SELECT sum(1.7976931348623157e308),
- avg(1.7976931348623157e308),
- total(1.7976931348623157e308)
- FROM c;
-} {1.79769313486232e+308 1.79769313486232e+308 1.79769313486232e+308}
-for {set i 2} {$i<10} {incr i} {
- do_execsql_test func-39.[expr {10*$i+100}] {
- WITH RECURSIVE c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<$i)
- SELECT sum(1.7976931348623157e308),
- avg(1.7976931348623157e308),
- total(1.7976931348623157e308)
- FROM c;
- } {Inf Inf Inf}
-}
-
finish_test
Index: test/func4.test
==================================================================
--- test/func4.test
+++ test/func4.test
@@ -90,11 +90,11 @@
do_execsql_test func4-1.22 {
SELECT tointeger(-1.79769313486232e308 + 1);
} {{}}
do_execsql_test func4-1.23 {
SELECT tointeger(-9223372036854775808 - 1);
-} {-9223372036854775808}
+} {{}}
do_execsql_test func4-1.24 {
SELECT tointeger(-9223372036854775808);
} {-9223372036854775808}
do_execsql_test func4-1.25 {
SELECT tointeger(-9223372036854775808 + 1);
@@ -267,19 +267,19 @@
do_execsql_test func4-2.23 {
SELECT toreal(-9223372036854775808 - 1);
} {-9.223372036854776e+18}
do_execsql_test func4-2.24 {
SELECT toreal(-9223372036854775808);
- } {-9.223372036854776e+18}
+ } {{}}
if {$highPrecision(2)} {
do_execsql_test func4-2.25 {
SELECT toreal(-9223372036854775808 + 1);
} {{}}
}
do_execsql_test func4-2.26 {
SELECT toreal(-9223372036854775807 - 1);
- } {-9.223372036854776e+18}
+ } {{}}
if {$highPrecision(2)} {
do_execsql_test func4-2.27 {
SELECT toreal(-9223372036854775807);
} {{}}
do_execsql_test func4-2.28 {
@@ -459,11 +459,11 @@
} {0 {}}
do_test func4-3.18 {
catchsql {
INSERT INTO t1 (x) VALUES ('-9223372036854775809');
}
- } {0 {}}
+ } {1 {CHECK constraint failed: tointeger(x) IS NOT NULL}}
if {$highPrecision(1)} {
do_test func4-3.19 {
catchsql {
INSERT INTO t1 (x) VALUES (9223372036854775808);
}
@@ -571,14 +571,14 @@
do_execsql_test func4-5.5 {
SELECT tointeger(toreal(1));
} {1}
do_execsql_test func4-5.6 {
SELECT tointeger(toreal(-9223372036854775808 - 1));
- } {-9223372036854775808}
+ } {{}}
do_execsql_test func4-5.7 {
SELECT tointeger(toreal(-9223372036854775808));
- } {-9223372036854775808}
+ } {{}}
if {$highPrecision(2)} {
do_execsql_test func4-5.8 {
SELECT tointeger(toreal(-9223372036854775808 + 1));
} {{}}
}
Index: test/fuzzcheck.c
==================================================================
--- test/fuzzcheck.c
+++ test/fuzzcheck.c
@@ -157,13 +157,14 @@
unsigned int nInvariant; /* Number of invariant checks run */
char zTestName[100]; /* Name of current test */
} g;
/*
-** Include the external vt02.c module.
+** Include the external vt02.c and randomjson.c modules.
*/
extern int sqlite3_vt02_init(sqlite3*,char***,void*);
+extern int sqlite3_randomjson_init(sqlite3*,char***,void*);
/*
** Print an error message and quit.
*/
@@ -1265,10 +1266,12 @@
if( depthLimit>0 ){
sqlite3_limit(cx.db, SQLITE_LIMIT_EXPR_DEPTH, depthLimit);
}
sqlite3_limit(cx.db, SQLITE_LIMIT_LIKE_PATTERN_LENGTH, 100);
sqlite3_hard_heap_limit64(heapLimit);
+ rc = 1;
+ sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &rc);
if( nDb>=20 && aDb[18]==2 && aDb[19]==2 ){
aDb[18] = aDb[19] = 1;
}
rc = sqlite3_deserialize(cx.db, "main", aDb, nDb, nDb,
@@ -1292,10 +1295,13 @@
** deserialize to do this because deserialize depends on ATTACH */
sqlite3_set_authorizer(cx.db, block_troublesome_sql, &btsFlags);
/* Add the vt02 virtual table */
sqlite3_vt02_init(cx.db, 0, 0);
+
+ /* Add the random_json() and random_json5() functions */
+ sqlite3_randomjson_init(cx.db, 0, 0);
/* Add support for sqlite_dbdata and sqlite_dbptr virtual tables used
** by the recovery API */
sqlite3_dbdata_init(cx.db, 0, 0);
Index: test/fuzzinvariants.c
==================================================================
--- test/fuzzinvariants.c
+++ test/fuzzinvariants.c
@@ -294,18 +294,10 @@
){
/* This is a randomized column name and so cannot be used in the
** WHERE clause. */
continue;
}
-#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
- if( sqlite3_strlike("%rowid%",zColName,0)==0
- || sqlite3_strlike("%oid%",zColName,0)==0
- ){
- /* ROWID values are unreliable if SQLITE_ALLOW_ROWID_IN_VIEW is used */
- continue;
- }
-#endif
for(j=0; j UPDATE data1 SET x=jsonb(x);
+> VACUUM;
+
+ * Build the baseline sqlite3.c file with sqlite3.h and shell.c.
+
+> make clean sqlite3.c
+
+ * Run "`sh json-speed-check.sh trunk`". This creates the baseline
+ profile in "jout-trunk.txt" for the preformance test using text JSON.
+
+ * Run "`sh json-speed-check.sh trunk --jsonb`". This creates the
+ baseline profile in "joutb-trunk.txt" for the performance test
+ for processing JSONB
+
+ * (Optional) Verify that the json100mb.db database really does contain
+ approximately 100MB of JSON content by running:
- 3. Run "`sh json-speed-check.sh trunk`". This creates the baseline
- profile in "jout-trunk.txt".
+> SELECT sum(length(x)) FROM data1;
+> SELECT * FROM data1 WHERE NOT json_valid(x);
# 3.0 Testing
- 1. Build the sqlite3.c (with sqlite3.h and shell.c) to be tested.
+ * Build the sqlite3.c (with sqlite3.h and shell.c) to be tested.
- 2. Run "`sh json-speed-check.sh x1`". The profile output will appear
+ * Run "`sh json-speed-check.sh x1`". The profile output will appear
in jout-x1.txt. Substitute any label you want in place of "x1".
- 3. Run the script shown below in the CLI.
+ * Run "`sh json-speed-check.sh x1 --jsonb`". The profile output will appear
+ in joutb-x1.txt. Substitute any label you want in place of "x1".
+
+ * Run the script shown below in the CLI.
Divide 2500 by the real elapse time from this test
to get an estimate for number of MB/s that the JSON parser is
able to process.
-> ~~~~
-.open json100mb.db
-.timer on
-WITH RECURSIVE c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<25)
-SELECT sum(json_valid(x)) FROM c, data1;
-~~~~
+> .open json100mb.db
+> .timer on
+> WITH RECURSIVE c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<25)
+> SELECT sum(json_valid(x)) FROM c, data1;
Index: test/json/json-speed-check.sh
==================================================================
--- test/json/json-speed-check.sh
+++ test/json/json-speed-check.sh
@@ -33,15 +33,17 @@
LEAN_OPTS="$LEAN_OPTS -DSQLITE_OMIT_DEPRECATED"
LEAN_OPTS="$LEAN_OPTS -DSQLITE_OMIT_PROGRESS_CALLBACK"
LEAN_OPTS="$LEAN_OPTS -DSQLITE_OMIT_SHARED_CACHE"
LEAN_OPTS="$LEAN_OPTS -DSQLITE_USE_ALLOCA"
BASELINE="trunk"
+TYPE="json"
doExplain=0
doCachegrind=1
doVdbeProfile=0
doWal=1
doDiff=1
+doJsonB=0
while test "$1" != ""; do
case $1 in
--nodiff)
doDiff=0
;;
@@ -52,10 +54,14 @@
CC=clang
;;
--gcc7)
CC=gcc-7
;;
+ --jsonb)
+ doJsonB=1
+ TYPE="jsonb"
+ ;;
-*)
CC_OPTS="$CC_OPTS $1"
;;
*)
BASELINE=$1
@@ -67,14 +73,15 @@
echo "CC_OPTS = $CC_OPTS" | tee -a summary-$NAME.txt
rm -f cachegrind.out.* jsonshell
$CC -g -Os -Wall -I. $CC_OPTS ./shell.c ./sqlite3.c -o jsonshell -ldl -lpthread
ls -l jsonshell | tee -a summary-$NAME.txt
home=`echo $0 | sed -e 's,/[^/]*$,,'`
-echo ./jsonshell json100mb.db "<$home/json-q1.txt"
-valgrind --tool=cachegrind ./jsonshell json100mb.db <$home/json-q1.txt \
- 2>&1 | tee -a summary-$NAME.txt
-cg_anno.tcl cachegrind.out.* >jout-$NAME.txt
-echo '*****************************************************' >>jout-$NAME.txt
-sed 's/^[0-9=-]\{9\}/==00000==/' summary-$NAME.txt >>jout-$NAME.txt
-if test "$NAME" != "$BASELINE"; then
- fossil xdiff --tk -c 20 jout-$BASELINE.txt jout-$NAME.txt
+DB=$TYPE''100mb.db
+echo ./jsonshell $DB "<$home/$TYPE-q1.txt"
+valgrind --tool=cachegrind ./jsonshell json100mb_b.db <$home/$TYPE-q1.txt \
+ 2>&1 | tee -a summary-$NAME.txt
+cg_anno.tcl cachegrind.out.* >$TYPE-$NAME.txt
+echo '*****************************************************' >>$TYPE-$NAME.txt
+sed 's/^[0-9=-]\{9\}/==00000==/' summary-$NAME.txt >>$TYPE-$NAME.txt
+if test "$NAME" != "$BASELINE" -a $doDiff -ne 0; then
+ fossil xdiff --tk -c 20 $TYPE-$BASELINE.txt $TYPE-$NAME.txt
fi
ADDED test/json/jsonb-q1.txt
Index: test/json/jsonb-q1.txt
==================================================================
--- /dev/null
+++ test/json/jsonb-q1.txt
@@ -0,0 +1,24 @@
+.mode qbox
+.timer on
+.param set $label 'q87'
+SELECT rowid, x->>$label FROM data1 WHERE x->>$label IS NOT NULL;
+
+CREATE TEMP TABLE t2(x JSON TEXT);
+WITH RECURSIVE
+ c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<25000),
+ array1(y) AS (
+ SELECT json_group_array(
+ json_object('x',x,'y',random(),'z',hex(randomblob(50)))
+ )
+ FROM c
+ ),
+ c2(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c2 WHERE n<5)
+INSERT INTO t2(x)
+ SELECT jsonb_object('a',n,'b',n*2,'c',y,'d',3,'e',5,'f',6) FROM array1, c2;
+CREATE INDEX t2x1 ON t2(x->>'a');
+CREATE INDEX t2x2 ON t2(x->>'b');
+CREATE INDEX t2x3 ON t2(x->>'e');
+CREATE INDEX t2x4 ON t2(x->>'f');
+UPDATE t2 SET x=jsonb_replace(x,'$.f',(x->>'f')+1);
+UPDATE t2 SET x=jsonb_set(x,'$.e',(x->>'f')-1);
+UPDATE t2 SET x=jsonb_remove(x,'$.d');
Index: test/json101.test
==================================================================
--- test/json101.test
+++ test/json101.test
@@ -34,10 +34,13 @@
SELECT hex(json_array('String "\ Test'));
} {5B22537472696E67205C225C5C2054657374225D}
do_catchsql_test json101-1.3 {
SELECT json_array(1,printf('%.1000c','x'),x'abcd',3);
} {1 {JSON cannot hold BLOB values}}
+do_catchsql_test json101-1.3b {
+ SELECT jsonb_array(1,printf('%.1000c','x'),x'abcd',3);
+} {1 {JSON cannot hold BLOB values}}
do_execsql_test json101-1.4 {
SELECT json_array(-9223372036854775808,9223372036854775807,0,1,-1,
0.0, 1.0, -1.0, -1e99, +2e100,
'one','two','three',
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
@@ -45,40 +48,87 @@
'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
99);
} {[-9223372036854775808,9223372036854775807,0,1,-1,0.0,1.0,-1.0,-1.0e+99,2.0e+100,"one","two","three",4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,null,21,22,23,24,25,26,27,28,29,30,31,"abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ",99]}
+do_execsql_test json101-1.4b {
+ SELECT json(jsonb_array(-9223372036854775808,9223372036854775807,0,1,-1,
+ 0.0, 1.0, -1.0, -1e99, +2e100,
+ 'one','two','three',
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, NULL, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 'abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ 99));
+} {[-9223372036854775808,9223372036854775807,0,1,-1,0.0,1.0,-1.0,-1.0e+99,2.0e+100,"one","two","three",4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,null,21,22,23,24,25,26,27,28,29,30,31,"abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ",99]}
do_execsql_test json101-2.1 {
SELECT json_object('a',1,'b',2.5,'c',null,'d','String Test');
} {{{"a":1,"b":2.5,"c":null,"d":"String Test"}}}
+do_execsql_test json101-2.1b {
+ SELECT json(jsonb_object('a',1,'b',2.5,'c',null,'d','String Test'));
+} {{{"a":1,"b":2.5,"c":null,"d":"String Test"}}}
do_catchsql_test json101-2.2 {
SELECT json_object('a',printf('%.1000c','x'),2,2.5);
} {1 {json_object() labels must be TEXT}}
+do_catchsql_test json101-2.2b {
+ SELECT jsonb_object('a',printf('%.1000c','x'),2,2.5);
+} {1 {json_object() labels must be TEXT}}
+do_execsql_test json101-2.2.2 {
+ SELECT json_object('a',json_array('xyx',77,4.5),'x',2.5);
+} {{{"a":["xyx",77,4.5],"x":2.5}}}
+do_execsql_test json101-2.2.2b {
+ SELECT json(jsonb_object('a',json_array('xyx',77,4.5),'x',2.5));
+} {{{"a":["xyx",77,4.5],"x":2.5}}}
+do_execsql_test json101-2.2.3 {
+ SELECT json_object('a',jsonb_array('xyx',77,4.5),'x',2.5);
+} {{{"a":["xyx",77,4.5],"x":2.5}}}
+do_execsql_test json101-2.2.3b {
+ SELECT json(jsonb_object('a',jsonb_array('xyx',77,4.5),'x',2.5));
+} {{{"a":["xyx",77,4.5],"x":2.5}}}
do_catchsql_test json101-2.3 {
SELECT json_object('a',1,'b');
} {1 {json_object() requires an even number of arguments}}
do_catchsql_test json101-2.4 {
SELECT json_object('a',printf('%.1000c','x'),'b',x'abcd');
} {1 {JSON cannot hold BLOB values}}
+do_execsql_test json101-2.5 {
+ SELECT json_object('a',printf('%.10c','x'),'b',jsonb_array(1,2,3));
+} {{{"a":"xxxxxxxxxx","b":[1,2,3]}}}
do_execsql_test json101-3.1 {
SELECT json_replace('{"a":1,"b":2}','$.a','[3,4,5]');
} {{{"a":"[3,4,5]","b":2}}}
+do_execsql_test json101-3.1b {
+ SELECT json(jsonb_replace('{"a":1,"b":2}','$.a','[3,4,5]'));
+} {{{"a":"[3,4,5]","b":2}}}
do_execsql_test json101-3.2 {
SELECT json_replace('{"a":1,"b":2}','$.a',json('[3,4,5]'));
} {{{"a":[3,4,5],"b":2}}}
+do_execsql_test json101-3.2b {
+ SELECT json_replace('{"a":1,"b":2}','$.a',jsonb('[3,4,5]'));
+} {{{"a":[3,4,5],"b":2}}}
do_execsql_test json101-3.3 {
SELECT json_type(json_set('{"a":1,"b":2}','$.b','{"x":3,"y":4}'),'$.b');
} {text}
+do_execsql_test json101-3.3b {
+ SELECT json_type(jsonb_set('{"a":1,"b":2}','$.b','{"x":3,"y":4}'),'$.b');
+} {text}
do_execsql_test json101-3.4 {
SELECT json_type(json_set('{"a":1,"b":2}','$.b',json('{"x":3,"y":4}')),'$.b');
} {object}
+do_execsql_test json101-3.4b {
+ SELECT json_type(jsonb_set('{"a":1,"b":2}','$.b',jsonb('{"x":3,"y":4}')),'$.b');
+} {object}
ifcapable vtab {
-do_execsql_test json101-3.5 {
- SELECT fullkey, atom, '|' FROM json_tree(json_set('{}','$.x',123,'$.x',456));
-} {{$} {} | {$.x} 456 |}
+ do_execsql_test json101-3.5 {
+ SELECT fullkey, atom, '|' FROM json_tree(json_set('{}','$.x',123,'$.x',456));
+ } {{$} {} | {$.x} 456 |}
+ do_execsql_test json101-3.5b {
+ SELECT fullkey, atom, '|' FROM json_tree(jsonb_set('{}','$.x',123,'$.x',456));
+ } {{$} {} | {$.x} 456 |}
}
# Per rfc7159, any JSON value is allowed at the top level, and whitespace
# is permitting before and/or after that value.
#
@@ -129,10 +179,17 @@
SELECT count(*) FROM j1 WHERE json_type(x) IN ('object','array');
SELECT x FROM j1
WHERE json_extract(x,'$')<>x
AND json_type(x) IN ('object','array');
} {4}
+do_execsql_test json101-4.10b {
+ CREATE TABLE j1b AS SELECT jsonb(x) AS "x" FROM j1;
+ SELECT count(*) FROM j1b WHERE json_type(x) IN ('object','array');
+ SELECT json(x) FROM j1b
+ WHERE json_extract(x,'$')<>json(x)
+ AND json_type(x) IN ('object','array');
+} {4}
do_execsql_test json101-5.1 {
CREATE TABLE j2(id INTEGER PRIMARY KEY, json, src);
INSERT INTO j2(id,json,src)
VALUES(1,'{
@@ -256,15 +313,21 @@
{ "id": "5004", "type": "Maple" }
]
}
]','https://adobe.github.io/Spry/samples/data_region/JSONDataSetSample.html');
SELECT count(*) FROM j2;
-} {3}
+ CREATE TABLE j2b(id INTEGER PRIMARY KEY, json, src);
+ INSERT INTO J2b(id,json,src) SELECT id, jsonb(json), src FROM j2;
+ SELECT count(*) FROM j2b;
+} {3 3}
do_execsql_test json101-5.2 {
SELECT id, json_valid(json), json_type(json), '|' FROM j2 ORDER BY id;
} {1 1 object | 2 1 object | 3 1 array |}
+do_execsql_test json101-5.2b {
+ SELECT id, json_valid(json,5), json_type(json), '|' FROM j2b ORDER BY id;
+} {1 1 object | 2 1 object | 3 1 array |}
ifcapable !vtab {
finish_test
return
}
@@ -272,10 +335,16 @@
# fullkey is always the same as path+key (with appropriate formatting)
#
do_execsql_test json101-5.3 {
SELECT j2.rowid, jx.rowid, fullkey, path, key
FROM j2, json_tree(j2.json) AS jx
+ WHERE fullkey!=(path || CASE WHEN typeof(key)=='integer' THEN '['||key||']'
+ ELSE '.'||key END);
+} {}
+do_execsql_test json101-5.3b {
+ SELECT j2b.rowid, jx.rowid, fullkey, path, key
+ FROM j2b, json_tree(j2b.json) AS jx
WHERE fullkey!=(path || CASE WHEN typeof(key)=='integer' THEN '['||key||']'
ELSE '.'||key END);
} {}
do_execsql_test json101-5.4 {
SELECT j2.rowid, jx.rowid, fullkey, path, key
@@ -368,10 +437,17 @@
DROP TABLE IF EXISTS t8;
CREATE TABLE t8(a,b);
INSERT INTO t8(a) VALUES('abc' || char(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35) || 'xyz');
UPDATE t8 SET b=json_array(a);
SELECT b FROM t8;
+} {{["abc\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !\"#xyz"]}}
+do_execsql_test json101-8.1b {
+ DROP TABLE IF EXISTS t8;
+ CREATE TABLE t8(a,b);
+ INSERT INTO t8(a) VALUES('abc' || char(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35) || 'xyz');
+ UPDATE t8 SET b=jsonb_array(a);
+ SELECT json(b) FROM t8;
} {{["abc\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !\"#xyz"]}}
do_execsql_test json101-8.2 {
SELECT a=json_extract(b,'$[0]') FROM t8;
} {1}
@@ -399,11 +475,11 @@
} {12345}
do_execsql_test json101-9.4 {
SELECT json_quote(null);
} {"null"}
do_catchsql_test json101-9.5 {
- SELECT json_quote(x'30313233');
+ SELECT json_quote(x'3031323334');
} {1 {JSON cannot hold BLOB values}}
do_catchsql_test json101-9.6 {
SELECT json_quote(123,456)
} {1 {wrong number of arguments to function json_quote()}}
do_catchsql_test json101-9.7 {
@@ -767,17 +843,26 @@
"summary.report":false}
}
}
}');
} {}
+
do_execsql_test json101-12.110 {
SELECT json_remove(x, '$.settings.layer2."dis.legomenon".forceDisplay')
+ FROM t12;
+} {{{"settings":{"layer2":{"hapax.legomenon":{"forceDisplay":true,"transliterate":true,"add.footnote":true,"summary.report":true},"dis.legomenon":{"transliterate":false,"add.footnote":false,"summary.report":true},"tris.legomenon":{"forceDisplay":true,"transliterate":false,"add.footnote":false,"summary.report":false}}}}}}
+do_execsql_test json101-12.110b {
+ SELECT json_remove(jsonb(x), '$.settings.layer2."dis.legomenon".forceDisplay')
FROM t12;
} {{{"settings":{"layer2":{"hapax.legomenon":{"forceDisplay":true,"transliterate":true,"add.footnote":true,"summary.report":true},"dis.legomenon":{"transliterate":false,"add.footnote":false,"summary.report":true},"tris.legomenon":{"forceDisplay":true,"transliterate":false,"add.footnote":false,"summary.report":false}}}}}}
do_execsql_test json101-12.120 {
SELECT json_extract(x, '$.settings.layer2."tris.legomenon"."summary.report"')
FROM t12;
+} {0}
+do_execsql_test json101-12.120b {
+ SELECT json_extract(jsonb(x), '$.settings.layer2."tris.legomenon"."summary.report"')
+ FROM t12;
} {0}
# 2018-01-26
# ticket https://www.sqlite.org/src/tktview/80177f0c226ff54f6ddd41
# Make sure the query planner knows about the arguments to table-valued functions.
@@ -838,20 +923,20 @@
#
# Bug reported via private email. See TH3 for more information.
#
do_execsql_test json101-15.100 {
SELECT * FROM JSON_EACH('{"a":1, "b":2}');
-} {a 1 integer 1 2 {} {$.a} {$} b 2 integer 2 4 {} {$.b} {$}}
+} {a 1 integer 1 1 {} {$.a} {$} b 2 integer 2 5 {} {$.b} {$}}
do_execsql_test json101-15.110 {
SELECT xyz.* FROM JSON_EACH('{"a":1, "b":2}') AS xyz;
-} {a 1 integer 1 2 {} {$.a} {$} b 2 integer 2 4 {} {$.b} {$}}
+} {a 1 integer 1 1 {} {$.a} {$} b 2 integer 2 5 {} {$.b} {$}}
do_execsql_test json101-15.120 {
SELECT * FROM (JSON_EACH('{"a":1, "b":2}'));
-} {a 1 integer 1 2 {} {$.a} {$} b 2 integer 2 4 {} {$.b} {$}}
+} {a 1 integer 1 1 {} {$.a} {$} b 2 integer 2 5 {} {$.b} {$}}
do_execsql_test json101-15.130 {
SELECT xyz.* FROM (JSON_EACH('{"a":1, "b":2}')) AS xyz;
-} {a 1 integer 1 2 {} {$.a} {$} b 2 integer 2 4 {} {$.b} {$}}
+} {a 1 integer 1 1 {} {$.a} {$} b 2 integer 2 5 {} {$.b} {$}}
# 2019-11-10
# Mailing list bug report on the handling of surrogate pairs
# in JSON.
#
@@ -887,11 +972,11 @@
do_execsql_test json101-18.4 {
SELECT json_extract('[3,{"a":4,"":[5,{"hi":6},7]},8]', '$[1].""[1]."hi"');
} {6}
do_catchsql_test json101-18.5 {
SELECT json_extract('{"":8}', '$.');
-} {1 {JSON path error near ''}}
+} {1 {bad JSON path: '$.'}}
# 2022-08-29 https://sqlite.org/forum/forumpost/9b9e4716c0d7bbd1
# This is not a problem specifically with JSON functions. It is
# a problem with transaction control. But the json() function makes
# the problem more easily accessible, so it is tested here.
@@ -1052,8 +1137,31 @@
do_execsql_test json101-23.2 {
SELECT j, j->>0, j->>1
FROM (SELECT json_set('[]','$[#]',0,'$[#]',1) AS j);
} {{[0,1]} 0 1}
-
+# Insert/Set/Replace where the path specifies substructure that
+# does not yet exist
+#
+proc tx x {return [string map [list ( \173 ) \175 ' \042 < \133 > \135] $x]}
+foreach {id start path ins set repl} {
+ 1 {{}} {$.a.b.c} ('a':('b':('c':9))) ('a':('b':('c':9))) ()
+ 2 {{a:4}} {$.a.b.c} ('a':4) ('a':4) ('a':4)
+ 3 {{a:{}}} {$.a.b.c} ('a':('b':('c':9))) ('a':('b':('c':9))) ('a':())
+ 4 {[0,1,2]} {$[3].a[0].b} <0,1,2,('a':<('b':9)>)> <0,1,2,('a':<('b':9)>)> <0,1,2>
+ 5 {[0,1,2]} {$[1].a[0].b} <0,1,2> <0,1,2> <0,1,2>
+ 6 {[0,{},2]} {$[1].a[0].b} <0,('a':<('b':9)>),2> <0,('a':<('b':9)>),2> <0,(),2>
+ 7 {[0,1,2]} {$[3][0].b} <0,1,2,<('b':9)>> <0,1,2,<('b':9)>> <0,1,2>
+ 8 {[0,1,2]} {$[1][0].b} <0,1,2> <0,1,2> <0,1,2>
+} {
+ do_execsql_test json101-24.$id.insert {
+ SELECT json_insert($start,$path,9);
+ } [list [tx $ins]]
+ do_execsql_test json101-24.$id.set {
+ SELECT json_set($start,$path,9);
+ } [list [tx $set]]
+ do_execsql_test json101-24.$id.replace {
+ SELECT json_replace($start,$path,9);
+ } [list [tx $repl]]
+}
finish_test
Index: test/json102.test
==================================================================
--- test/json102.test
+++ test/json102.test
@@ -19,182 +19,522 @@
source $testdir/tester.tcl
do_execsql_test json102-100 {
SELECT json_object('ex','[52,3.14159]');
} {{{"ex":"[52,3.14159]"}}}
+do_execsql_test json102-100b {
+ SELECT json(jsonb_object('ex','[52,3.14159]'));
+} {{{"ex":"[52,3.14159]"}}}
do_execsql_test json102-110 {
SELECT json_object('ex',json('[52,3.14159]'));
} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-110-2 {
+ SELECT json(jsonb_object('ex',json('[52,3.14159]')));
+} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-110-3 {
+ SELECT json_object('ex',jsonb('[52,3.14159]'));
+} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-110-3 {
+ SELECT json(jsonb_object('ex',jsonb('[52,3.14159]')));
+} {{{"ex":[52,3.14159]}}}
do_execsql_test json102-120 {
SELECT json_object('ex',json_array(52,3.14159));
} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-120-2 {
+ SELECT json(jsonb_object('ex',json_array(52,3.14159)));
+} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-120-3 {
+ SELECT json_object('ex',jsonb_array(52,3.14159));
+} {{{"ex":[52,3.14159]}}}
+do_execsql_test json102-120-4 {
+ SELECT json(jsonb_object('ex',jsonb_array(52,3.14159)));
+} {{{"ex":[52,3.14159]}}}
do_execsql_test json102-130 {
SELECT json(' { "this" : "is", "a": [ "test" ] } ');
} {{{"this":"is","a":["test"]}}}
+do_execsql_test json102-130b {
+ SELECT json(jsonb(' { "this" : "is", "a": [ "test" ] } '));
+} {{{"this":"is","a":["test"]}}}
do_execsql_test json102-140 {
SELECT json_array(1,2,'3',4);
} {{[1,2,"3",4]}}
+do_execsql_test json102-140b {
+ SELECT json(jsonb_array(1,2,'3',4));
+} {{[1,2,"3",4]}}
do_execsql_test json102-150 {
SELECT json_array('[1,2]');
} {{["[1,2]"]}}
+do_execsql_test json102-150b {
+ SELECT json(jsonb_array('[1,2]'));
+} {{["[1,2]"]}}
do_execsql_test json102-160 {
SELECT json_array(json_array(1,2));
} {{[[1,2]]}}
+do_execsql_test json102-160-2 {
+ SELECT json_array(jsonb_array(1,2));
+} {{[[1,2]]}}
+do_execsql_test json102-160-3 {
+ SELECT json(jsonb_array(json_array(1,2)));
+} {{[[1,2]]}}
+do_execsql_test json102-160-4 {
+ SELECT json(jsonb_array(jsonb_array(1,2)));
+} {{[[1,2]]}}
do_execsql_test json102-170 {
SELECT json_array(1,null,'3','[4,5]','{"six":7.7}');
} {{[1,null,"3","[4,5]","{\"six\":7.7}"]}}
+do_execsql_test json102-170b {
+ SELECT json(jsonb_array(1,null,'3','[4,5]','{"six":7.7}'));
+} {{[1,null,"3","[4,5]","{\"six\":7.7}"]}}
do_execsql_test json102-180 {
SELECT json_array(1,null,'3',json('[4,5]'),json('{"six":7.7}'));
} {{[1,null,"3",[4,5],{"six":7.7}]}}
+do_execsql_test json102-180-2 {
+ SELECT json_array(1,null,'3',jsonb('[4,5]'),json('{"six":7.7}'));
+} {{[1,null,"3",[4,5],{"six":7.7}]}}
+do_execsql_test json102-180-3 {
+ SELECT json(jsonb_array(1,null,'3',json('[4,5]'),json('{"six":7.7}')));
+} {{[1,null,"3",[4,5],{"six":7.7}]}}
+do_execsql_test json102-180-4 {
+ SELECT json(jsonb_array(1,null,'3',jsonb('[4,5]'),jsonb('{"six":7.7}')));
+} {{[1,null,"3",[4,5],{"six":7.7}]}}
do_execsql_test json102-190 {
SELECT json_array_length('[1,2,3,4]');
} {{4}}
+do_execsql_test json102-190b {
+ SELECT json_array_length(jsonb('[1,2,3,4]'));
+} {{4}}
do_execsql_test json102-191 {
SELECT json_array_length( json_remove('[1,2,3,4]','$[2]') );
} {{3}}
+do_execsql_test json102-191b {
+ SELECT json_array_length( jsonb_remove('[1,2,3,4]','$[2]') );
+} {{3}}
do_execsql_test json102-200 {
SELECT json_array_length('[1,2,3,4]', '$');
} {{4}}
+do_execsql_test json102-200b {
+ SELECT json_array_length(jsonb('[1,2,3,4]'), '$');
+} {{4}}
do_execsql_test json102-210 {
SELECT json_array_length('[1,2,3,4]', '$[2]');
} {{0}}
+do_execsql_test json102-210b {
+ SELECT json_array_length(jsonb('[1,2,3,4]'), '$[2]');
+} {{0}}
+do_execsql_test json102-220 {
+ SELECT json_array_length('{"one":[1,2,3]}');
+} {{0}}
do_execsql_test json102-220 {
SELECT json_array_length('{"one":[1,2,3]}');
} {{0}}
-do_execsql_test json102-230 {
- SELECT json_array_length('{"one":[1,2,3]}', '$.one');
+do_execsql_test json102-230b {
+ SELECT json_array_length(jsonb('{"one":[1,2,3]}'), '$.one');
} {{3}}
do_execsql_test json102-240 {
SELECT json_array_length('{"one":[1,2,3]}', '$.two');
} {{}}
+do_execsql_test json102-240b {
+ SELECT json_array_length(jsonb('{"one":[1,2,3]}'), '$.two');
+} {{}}
do_execsql_test json102-250 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$');
} {{{"a":2,"c":[4,5,{"f":7}]}}}
+do_execsql_test json102-250-2 {
+ SELECT json_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$');
+} {{{"a":2,"c":[4,5,{"f":7}]}}}
+do_execsql_test json102-250-3 {
+ SELECT json(jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$'));
+} {{{"a":2,"c":[4,5,{"f":7}]}}}
+do_execsql_test json102-250-4 {
+ SELECT json(jsonb_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$'));
+} {{{"a":2,"c":[4,5,{"f":7}]}}}
do_execsql_test json102-260 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c');
} {{[4,5,{"f":7}]}}
+do_execsql_test json102-260-2 {
+ SELECT json_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.c');
+} {{[4,5,{"f":7}]}}
+do_execsql_test json102-260-3 {
+ SELECT json(jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c'));
+} {{[4,5,{"f":7}]}}
+do_execsql_test json102-260-4 {
+ SELECT json(jsonb_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.c'));
+} {{[4,5,{"f":7}]}}
do_execsql_test json102-270 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c[2]');
} {{{"f":7}}}
+do_execsql_test json102-270-2 {
+ SELECT json_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.c[2]');
+} {{{"f":7}}}
+do_execsql_test json102-270-3 {
+ SELECT json(jsonb_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.c[2]'));
+} {{{"f":7}}}
+do_execsql_test json102-270-4 {
+ SELECT json(jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c[2]'));
+} {{{"f":7}}}
do_execsql_test json102-280 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c[2].f');
} {{7}}
+do_execsql_test json102-280b {
+ SELECT jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.c[2].f');
+} {{7}}
do_execsql_test json102-290 {
SELECT json_extract('{"a":2,"c":[4,5],"f":7}','$.c','$.a');
} {{[[4,5],2]}}
+do_execsql_test json102-290-2 {
+ SELECT json_extract(jsonb('{"a":2,"c":[4,5],"f":7}'),'$.c','$.a');
+} {{[[4,5],2]}}
+do_execsql_test json102-290-3 {
+ SELECT json(jsonb_extract('{"a":2,"c":[4,5],"f":7}','$.c','$.a'));
+} {{[[4,5],2]}}
+do_execsql_test json102-290-4 {
+ SELECT json(jsonb_extract(jsonb('{"a":2,"c":[4,5],"f":7}'),'$.c','$.a'));
+} {{[[4,5],2]}}
do_execsql_test json102-300 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.x');
} {{}}
+do_execsql_test json102-300b {
+ SELECT jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.x');
+} {{}}
do_execsql_test json102-310 {
SELECT json_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.x', '$.a');
} {{[null,2]}}
+do_execsql_test json102-310-2 {
+ SELECT json_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.x', '$.a');
+} {{[null,2]}}
+do_execsql_test json102-310-3 {
+ SELECT json(jsonb_extract(jsonb('{"a":2,"c":[4,5,{"f":7}]}'), '$.x', '$.a'));
+} {{[null,2]}}
+do_execsql_test json102-310-43 {
+ SELECT json(jsonb_extract('{"a":2,"c":[4,5,{"f":7}]}', '$.x', '$.a'));
+} {{[null,2]}}
do_execsql_test json102-320 {
SELECT json_insert('{"a":2,"c":4}', '$.a', 99);
} {{{"a":2,"c":4}}}
+do_execsql_test json102-320-2 {
+ SELECT json_insert(jsonb('{"a":2,"c":4}'), '$.a', 99);
+} {{{"a":2,"c":4}}}
+do_execsql_test json102-320-3 {
+ SELECT json(jsonb_insert('{"a":2,"c":4}', '$.a', 99));
+} {{{"a":2,"c":4}}}
+do_execsql_test json102-320-4 {
+ SELECT json(jsonb_insert(jsonb('{"a":2,"c":4}'), '$.a', 99));
+} {{{"a":2,"c":4}}}
do_execsql_test json102-330 {
SELECT json_insert('{"a":2,"c":4}', '$.e', 99);
} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-330-2 {
+ SELECT json_insert(jsonb('{"a":2,"c":4}'), '$.e', 99);
+} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-330-3 {
+ SELECT json(jsonb_insert('{"a":2,"c":4}', '$.e', 99));
+} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-330-4 {
+ SELECT json(jsonb_insert(jsonb('{"a":2,"c":4}'), '$.e', 99));
+} {{{"a":2,"c":4,"e":99}}}
do_execsql_test json102-340 {
SELECT json_replace('{"a":2,"c":4}', '$.a', 99);
} {{{"a":99,"c":4}}}
+do_execsql_test json102-340-2 {
+ SELECT json_replace(jsonb('{"a":2,"c":4}'), '$.a', 99);
+} {{{"a":99,"c":4}}}
+do_execsql_test json102-340-3 {
+ SELECT json(jsonb_replace('{"a":2,"c":4}', '$.a', 99));
+} {{{"a":99,"c":4}}}
+do_execsql_test json102-340-4 {
+ SELECT json(jsonb_replace(jsonb('{"a":2,"c":4}'), '$.a', 99));
+} {{{"a":99,"c":4}}}
do_execsql_test json102-350 {
SELECT json_replace('{"a":2,"c":4}', '$.e', 99);
} {{{"a":2,"c":4}}}
+do_execsql_test json102-350-2 {
+ SELECT json_replace(jsonb('{"a":2,"c":4}'), '$.e', 99);
+} {{{"a":2,"c":4}}}
+do_execsql_test json102-350-3 {
+ SELECT json(jsonb_replace('{"a":2,"c":4}', '$.e', 99));
+} {{{"a":2,"c":4}}}
+do_execsql_test json102-350-4 {
+ SELECT json(jsonb_replace(jsonb('{"a":2,"c":4}'), '$.e', 99));
+} {{{"a":2,"c":4}}}
do_execsql_test json102-360 {
SELECT json_set('{"a":2,"c":4}', '$.a', 99);
} {{{"a":99,"c":4}}}
+do_execsql_test json102-360-2 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.a', 99);
+} {{{"a":99,"c":4}}}
+do_execsql_test json102-360-3 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.a', 99));
+} {{{"a":99,"c":4}}}
+do_execsql_test json102-360-4 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.a', 99));
+} {{{"a":99,"c":4}}}
do_execsql_test json102-370 {
SELECT json_set('{"a":2,"c":4}', '$.e', 99);
} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-370-2 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.e', 99);
+} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-370-3 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.e', 99));
+} {{{"a":2,"c":4,"e":99}}}
+do_execsql_test json102-370-4 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.e', 99));
+} {{{"a":2,"c":4,"e":99}}}
do_execsql_test json102-380 {
SELECT json_set('{"a":2,"c":4}', '$.c', '[97,96]');
} {{{"a":2,"c":"[97,96]"}}}
+do_execsql_test json102-380-2 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.c', '[97,96]');
+} {{{"a":2,"c":"[97,96]"}}}
+do_execsql_test json102-380-3 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.c', '[97,96]'));
+} {{{"a":2,"c":"[97,96]"}}}
+do_execsql_test json102-380-4 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.c', '[97,96]'));
+} {{{"a":2,"c":"[97,96]"}}}
do_execsql_test json102-390 {
SELECT json_set('{"a":2,"c":4}', '$.c', json('[97,96]'));
} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-2 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.c', json('[97,96]'));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-3 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.c', json('[97,96]')));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-4 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.c', json('[97,96]')));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-5 {
+ SELECT json_set('{"a":2,"c":4}', '$.c', jsonb('[97,96]'));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-6 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.c', jsonb('[97,96]'));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-7 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.c', jsonb('[97,96]')));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-390-8 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.c', jsonb('[97,96]')));
+} {{{"a":2,"c":[97,96]}}}
do_execsql_test json102-400 {
SELECT json_set('{"a":2,"c":4}', '$.c', json_array(97,96));
} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-2 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.c', json_array(97,96));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-3 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.c', json_array(97,96)));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-4 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.c', json_array(97,96)));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-5 {
+ SELECT json_set('{"a":2,"c":4}', '$.c', jsonb_array(97,96));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-6 {
+ SELECT json_set(jsonb('{"a":2,"c":4}'), '$.c', jsonb_array(97,96));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-7 {
+ SELECT json(jsonb_set('{"a":2,"c":4}', '$.c', jsonb_array(97,96)));
+} {{{"a":2,"c":[97,96]}}}
+do_execsql_test json102-400-8 {
+ SELECT json(jsonb_set(jsonb('{"a":2,"c":4}'), '$.c', jsonb_array(97,96)));
+} {{{"a":2,"c":[97,96]}}}
do_execsql_test json102-410 {
SELECT json_object('a',2,'c',4);
} {{{"a":2,"c":4}}}
+do_execsql_test json102-410b {
+ SELECT json(jsonb_object('a',2,'c',4));
+} {{{"a":2,"c":4}}}
do_execsql_test json102-420 {
SELECT json_object('a',2,'c','{e:5}');
} {{{"a":2,"c":"{e:5}"}}}
+do_execsql_test json102-420b {
+ SELECT json(jsonb_object('a',2,'c','{e:5}'));
+} {{{"a":2,"c":"{e:5}"}}}
do_execsql_test json102-430 {
SELECT json_object('a',2,'c',json_object('e',5));
} {{{"a":2,"c":{"e":5}}}}
+do_execsql_test json102-430-2 {
+ SELECT json(jsonb_object('a',2,'c',json_object('e',5)));
+} {{{"a":2,"c":{"e":5}}}}
+do_execsql_test json102-430-3 {
+ SELECT json_object('a',2,'c',jsonb_object('e',5));
+} {{{"a":2,"c":{"e":5}}}}
+do_execsql_test json102-430-4 {
+ SELECT json(jsonb_object('a',2,'c',jsonb_object('e',5)));
+} {{{"a":2,"c":{"e":5}}}}
do_execsql_test json102-440 {
SELECT json_remove('[0,1,2,3,4]','$[2]');
} {{[0,1,3,4]}}
+do_execsql_test json102-440-2 {
+ SELECT json_remove(jsonb('[0,1,2,3,4]'),'$[2]');
+} {{[0,1,3,4]}}
+do_execsql_test json102-440-3 {
+ SELECT json(jsonb_remove('[0,1,2,3,4]','$[2]'));
+} {{[0,1,3,4]}}
+do_execsql_test json102-440-4 {
+ SELECT json(jsonb_remove(jsonb('[0,1,2,3,4]'),'$[2]'));
+} {{[0,1,3,4]}}
do_execsql_test json102-450 {
SELECT json_remove('[0,1,2,3,4]','$[2]','$[0]');
} {{[1,3,4]}}
+do_execsql_test json102-450-2 {
+ SELECT json_remove(jsonb('[0,1,2,3,4]'),'$[2]','$[0]');
+} {{[1,3,4]}}
+do_execsql_test json102-450-3 {
+ SELECT json(jsonb_remove('[0,1,2,3,4]','$[2]','$[0]'));
+} {{[1,3,4]}}
+do_execsql_test json102-450-4 {
+ SELECT json(jsonb_remove(jsonb('[0,1,2,3,4]'),'$[2]','$[0]'));
+} {{[1,3,4]}}
do_execsql_test json102-460 {
SELECT json_remove('[0,1,2,3,4]','$[0]','$[2]');
} {{[1,2,4]}}
+do_execsql_test json102-460-2 {
+ SELECT json_remove(jsonb('[0,1,2,3,4]'),'$[0]','$[2]');
+} {{[1,2,4]}}
+do_execsql_test json102-460-3 {
+ SELECT json(jsonb_remove('[0,1,2,3,4]','$[0]','$[2]'));
+} {{[1,2,4]}}
+do_execsql_test json102-460-4 {
+ SELECT json(jsonb_remove(jsonb('[0,1,2,3,4]'),'$[0]','$[2]'));
+} {{[1,2,4]}}
do_execsql_test json102-470 {
SELECT json_remove('{"x":25,"y":42}');
} {{{"x":25,"y":42}}}
+do_execsql_test json102-470-2 {
+ SELECT json_remove(jsonb('{"x":25,"y":42}'));
+} {{{"x":25,"y":42}}}
+do_execsql_test json102-470-3 {
+ SELECT json(jsonb_remove('{"x":25,"y":42}'));
+} {{{"x":25,"y":42}}}
+do_execsql_test json102-470-4 {
+ SELECT json(jsonb_remove(jsonb('{"x":25,"y":42}')));
+} {{{"x":25,"y":42}}}
do_execsql_test json102-480 {
SELECT json_remove('{"x":25,"y":42}','$.z');
} {{{"x":25,"y":42}}}
+do_execsql_test json102-480-2 {
+ SELECT json_remove(jsonb('{"x":25,"y":42}'),'$.z');
+} {{{"x":25,"y":42}}}
+do_execsql_test json102-480-3 {
+ SELECT json(jsonb_remove('{"x":25,"y":42}','$.z'));
+} {{{"x":25,"y":42}}}
+do_execsql_test json102-480-4 {
+ SELECT json(jsonb_remove(jsonb('{"x":25,"y":42}'),'$.z'));
+} {{{"x":25,"y":42}}}
do_execsql_test json102-490 {
SELECT json_remove('{"x":25,"y":42}','$.y');
} {{{"x":25}}}
+do_execsql_test json102-490-2 {
+ SELECT json_remove(jsonb('{"x":25,"y":42}'),'$.y');
+} {{{"x":25}}}
+do_execsql_test json102-490-3 {
+ SELECT json(jsonb_remove('{"x":25,"y":42}','$.y'));
+} {{{"x":25}}}
+do_execsql_test json102-490-4 {
+ SELECT json(jsonb_remove(jsonb('{"x":25,"y":42}'),'$.y'));
+} {{{"x":25}}}
do_execsql_test json102-500 {
SELECT json_remove('{"x":25,"y":42}','$');
} {{}}
+do_execsql_test json102-500-2 {
+ SELECT json_remove(jsonb('{"x":25,"y":42}'),'$');
+} {{}}
+do_execsql_test json102-500-3 {
+ SELECT json(jsonb_remove('{"x":25,"y":42}','$'));
+} {{}}
+do_execsql_test json102-500-4 {
+ SELECT json(jsonb_remove(jsonb('{"x":25,"y":42}'),'$'));
+} {{}}
do_execsql_test json102-510 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}');
} {{object}}
+do_execsql_test json102-510b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778');
+} {{object}}
do_execsql_test json102-520 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$');
} {{object}}
+do_execsql_test json102-520b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$');
+} {{object}}
do_execsql_test json102-530 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a');
} {{array}}
+do_execsql_test json102-530b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a');
+} {{array}}
do_execsql_test json102-540 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[0]');
} {{integer}}
+do_execsql_test json102-540b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[0]');
+} {{integer}}
do_execsql_test json102-550 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[1]');
} {{real}}
+do_execsql_test json102-550b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[1]');
+} {{real}}
do_execsql_test json102-560 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[2]');
} {{true}}
+do_execsql_test json102-560b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[2]');
+} {{true}}
do_execsql_test json102-570 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[3]');
} {{false}}
+do_execsql_test json102-570b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[3]');
+} {{false}}
do_execsql_test json102-580 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[4]');
} {{null}}
+do_execsql_test json102-580b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[4]');
+} {{null}}
do_execsql_test json102-590 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[5]');
} {{text}}
+do_execsql_test json102-590b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[5]');
+} {{text}}
do_execsql_test json102-600 {
SELECT json_type('{"a":[2,3.5,true,false,null,"x"]}','$.a[6]');
} {{}}
+do_execsql_test json102-600b {
+ SELECT json_type(x'cc0f1761cb0b133235332e350102001778','$.a[6]');
+} {{}}
do_execsql_test json102-610 {
SELECT json_valid(char(123)||'"x":35'||char(125));
} {{1}}
do_execsql_test json102-620 {
SELECT json_valid(char(123)||'"x":35');
} {{0}}
ifcapable vtab {
do_execsql_test json102-1000 {
- CREATE TABLE user(name,phone);
+ CREATE TABLE user(name,phone,phoneb);
INSERT INTO user(name,phone) VALUES
('Alice','["919-555-2345","804-555-3621"]'),
('Bob','["201-555-8872"]'),
('Cindy','["704-555-9983"]'),
('Dave','["336-555-8421","704-555-4321","803-911-4421"]');
+ UPDATE user SET phoneb=jsonb(phone);
SELECT DISTINCT user.name
FROM user, json_each(user.phone)
WHERE json_each.value LIKE '704-%'
ORDER BY 1;
+} {Cindy Dave}
+do_execsql_test json102-1000b {
+ SELECT DISTINCT user.name
+ FROM user, json_each(user.phoneb)
+ WHERE json_each.value LIKE '704-%'
+ ORDER BY 1;
} {Cindy Dave}
do_execsql_test json102-1010 {
UPDATE user
SET phone=json_extract(phone,'$[0]')
@@ -251,10 +591,16 @@
SELECT big.rowid, fullkey, value
FROM big, json_tree(big.json)
WHERE json_tree.type NOT IN ('object','array')
ORDER BY +big.rowid, +json_tree.id
} $correct_answer
+do_execsql_test json102-1110b {
+ SELECT big.rowid, fullkey, value
+ FROM big, json_tree(jsonb(big.json))
+ WHERE json_tree.type NOT IN ('object','array')
+ ORDER BY +big.rowid, +json_tree.id
+} $correct_answer
do_execsql_test json102-1120 {
SELECT big.rowid, fullkey, atom
FROM big, json_tree(big.json)
WHERE atom IS NOT NULL
ORDER BY +big.rowid, +json_tree.id
Index: test/json105.test
==================================================================
--- test/json105.test
+++ test/json105.test
@@ -11,11 +11,11 @@
# This file implements tests for "[#]" extension to json-path
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
-set testprefix json104
+set testprefix json105
# This is the example from pages 2 and 3 of RFC-7396
db eval {
CREATE TABLE t1(j);
INSERT INTO t1(j) VALUES('{"a":1,"b":[1,[2,3],4],"c":99}');
@@ -94,20 +94,20 @@
json_replace_test 80 {'$.b[#-1]','AAA','$.b[#-1]','BBB'} \
{'{"a":1,"b":[1,[2,3],"BBB"],"c":99}'}
do_catchsql_test json105-6.10 {
SELECT json_extract(j, '$.b[#-]') FROM t1;
-} {1 {JSON path error near '[#-]'}}
+} {1 {bad JSON path: '$.b[#-]'}}
do_catchsql_test json105-6.20 {
SELECT json_extract(j, '$.b[#9]') FROM t1;
-} {1 {JSON path error near '[#9]'}}
+} {1 {bad JSON path: '$.b[#9]'}}
do_catchsql_test json105-6.30 {
SELECT json_extract(j, '$.b[#+2]') FROM t1;
-} {1 {JSON path error near '[#+2]'}}
+} {1 {bad JSON path: '$.b[#+2]'}}
do_catchsql_test json105-6.40 {
SELECT json_extract(j, '$.b[#-1') FROM t1;
-} {1 {JSON path error near '[#-1'}}
+} {1 {bad JSON path: '$.b[#-1'}}
do_catchsql_test json105-6.50 {
SELECT json_extract(j, '$.b[#-1x]') FROM t1;
-} {1 {JSON path error near '[#-1x]'}}
+} {1 {bad JSON path: '$.b[#-1x]'}}
finish_test
ADDED test/json106.test
Index: test/json106.test
==================================================================
--- /dev/null
+++ test/json106.test
@@ -0,0 +1,73 @@
+# 2023-12-18
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# Invariant tests for JSON built around the randomjson extension
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix json106
+
+# These tests require virtual table "json_tree" to run.
+ifcapable !vtab { finish_test ; return }
+
+load_static_extension db randomjson
+db eval {
+ CREATE TEMP TABLE t1(j0,j5,p);
+ CREATE TEMP TABLE kv(n,key,val);
+}
+unset -nocomplain ii
+for {set ii 1} {$ii<=5000} {incr ii} {
+ do_execsql_test $ii.1 {
+ DELETE FROM t1;
+ INSERT INTO t1(j0,j5) VALUES(random_json($ii),random_json5($ii));
+ SELECT json_valid(j0), json_valid(j5,2) FROM t1;
+ } {1 1}
+ do_execsql_test $ii.2 {
+ SELECT count(*)
+ FROM t1, json_tree(j0) AS rt
+ WHERE rt.type NOT IN ('object','array')
+ AND rt.atom IS NOT (j0 ->> rt.fullkey);
+ } 0
+ do_execsql_test $ii.3 {
+ SELECT count(*)
+ FROM t1, json_tree(j5) AS rt
+ WHERE rt.type NOT IN ('object','array')
+ AND rt.atom IS NOT (j0 ->> rt.fullkey);
+ } 0
+ do_execsql_test $ii.4 {
+ DELETE FROM kv;
+ INSERT INTO kv
+ SELECT rt.rowid, rt.fullkey, rt.atom
+ FROM t1, json_tree(j0) AS rt
+ WHERE rt.type NOT IN ('object','array');
+ }
+ do_execsql_test $ii.5 {
+ SELECT count(*)
+ FROM t1, kv
+ WHERE key NOT LIKE '%]'
+ AND json_remove(j5,key)->>key IS NOT NULL
+ } 0
+ do_execsql_test $ii.6 {
+ SELECT count(*)
+ FROM t1, kv
+ WHERE key NOT LIKE '%]'
+ AND json_insert(json_remove(j5,key),key,val)->>key IS NOT val
+ } 0
+ do_execsql_test $ii.7 {
+ UPDATE t1 SET p=json_patch(j0,j5);
+ SELECT count(*)
+ FROM t1, kv
+ WHERE p->>key IS NOT val
+ } 0
+}
+
+
+finish_test
ADDED test/json107.test
Index: test/json107.test
==================================================================
--- /dev/null
+++ test/json107.test
@@ -0,0 +1,86 @@
+# 2024-01-23
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Legacy JSON bug: If the input is a BLOB that when cast into TEXT looks
+# like valid JSON, then treat it as valid JSON.
+#
+# The original intent of the JSON functions was to raise an error on any
+# BLOB input. That intent was clearly documented, but the code failed to
+# to implement it. Subsequently, many applications began to depend on the
+# incorrect behavior, especially apps that used readfile() to read JSON
+# content, since readfile() returns a BLOB. So we need to support the
+# bug moving forward.
+#
+# The tests in this fail verify that the original buggy behavior is
+# preserved.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix json107
+
+if {[db one {PRAGMA encoding}]!="UTF-8"} {
+ # These tests only work for a UTF-8 encoding.
+ finish_test
+ return
+}
+
+do_execsql_test 1.1 {
+ SELECT json_valid( CAST('{"a":1}' AS BLOB) );
+} 1
+do_execsql_test 1.1.1 {
+ SELECT json_valid( CAST('{"a":1}' AS BLOB), 1);
+} 1
+do_execsql_test 1.1.2 {
+ SELECT json_valid( CAST('{"a":1}' AS BLOB), 2);
+} 1
+do_execsql_test 1.1.4 {
+ SELECT json_valid( CAST('{"a":1}' AS BLOB), 4);
+} 0
+do_execsql_test 1.1.8 {
+ SELECT json_valid( CAST('{"a":1}' AS BLOB), 8);
+} 0
+
+do_execsql_test 1.2.1 {
+ SELECT CAST('{"a":123}' AS blob) -> 'a';
+} 123
+do_execsql_test 1.2.2 {
+ SELECT CAST('{"a":123}' AS blob) ->> 'a';
+} 123
+do_execsql_test 1.2.3 {
+ SELECT json_extract(CAST('{"a":123}' AS blob), '$.a');
+} 123
+do_execsql_test 1.3 {
+ SELECT json_insert(CAST('{"a":123}' AS blob),'$.b',456);
+} {{{"a":123,"b":456}}}
+do_execsql_test 1.4 {
+ SELECT json_remove(CAST('{"a":123,"b":456}' AS blob),'$.a');
+} {{{"b":456}}}
+do_execsql_test 1.5 {
+ SELECT json_set(CAST('{"a":123,"b":456}' AS blob),'$.a',789);
+} {{{"a":789,"b":456}}}
+do_execsql_test 1.6 {
+ SELECT json_replace(CAST('{"a":123,"b":456}' AS blob),'$.a',789);
+} {{{"a":789,"b":456}}}
+do_execsql_test 1.7 {
+ SELECT json_type(CAST('{"a":123,"b":456}' AS blob));
+} object
+do_execsql_test 1.8 {
+ SELECT json(CAST('{"a":123,"b":456}' AS blob));
+} {{{"a":123,"b":456}}}
+
+ifcapable vtab {
+ do_execsql_test 2.1 {
+ SELECT key, value FROM json_tree( CAST('{"a":123,"b":456}' AS blob) )
+ WHERE atom;
+ } {a 123 b 456}
+}
+finish_test
Index: test/json501.test
==================================================================
--- test/json501.test
+++ test/json501.test
@@ -250,17 +250,17 @@
###############################################################################
# 9) Numbers may be IEEE 754 positive infinity, negative infinity, and NaN.
do_execsql_test 9.1 {
WITH c(x) AS (VALUES('{x: +Infinity}')) SELECT x->>'x', json(x) FROM c;
-} {Inf {{"x":9.0e999}}}
+} {Inf {{"x":9e999}}}
do_execsql_test 9.2 {
WITH c(x) AS (VALUES('{x: -Infinity}')) SELECT x->>'x', json(x) FROM c;
-} {-Inf {{"x":-9.0e999}}}
+} {-Inf {{"x":-9e999}}}
do_execsql_test 9.3 {
WITH c(x) AS (VALUES('{x: Infinity}')) SELECT x->>'x', json(x) FROM c;
-} {Inf {{"x":9.0e999}}}
+} {Inf {{"x":9e999}}}
do_execsql_test 9.4 {
WITH c(x) AS (VALUES('{x: NaN}')) SELECT x->>'x', json(x) FROM c;
} {{} {{"x":null}}}
###############################################################################
@@ -303,7 +303,34 @@
# 2023-11-08 forum/forumpost/ddcad3e884
#
do_execsql_test 13.1 {
SELECT json('{x:''a "b" c''}');
} {{{"x":"a \"b\" c"}}}
+
+# 2024-01-31
+# Allow control characters within JSON5 string literals.
+#
+for {set c 1} {$c<=0x1f} {incr c} {
+ do_execsql_test 14.$c.1 {
+ SELECT json_valid('"abc' || char($c) || 'xyz"');
+ } {0}
+ do_execsql_test 14.$c.2 {
+ SELECT json_valid('"abc' || char($c) || 'xyz"', 2);
+ } {1}
+ switch $c {
+ 8 {set e "\\b"}
+ 9 {set e "\\t"}
+ 10 {set e "\\n"}
+ 12 {set e "\\f"}
+ 13 {set e "\\r"}
+ default {set e [format "\\u00%02x" $c]}
+ }
+ do_execsql_test 14.$c.3 {
+ SELECT json('{label:"abc' || char($c) || 'xyz"}');
+ } "{{\"label\":\"abc${e}xyz\"}}"
+ do_execsql_test 14.$c.4 {
+ SELECT jsonb('{label:"abc' || char($c) || 'xyz"}') -> '$';
+ } "{{\"label\":\"abc${e}xyz\"}}"
+}
+
finish_test
Index: test/json502.test
==================================================================
--- test/json502.test
+++ test/json502.test
@@ -34,7 +34,34 @@
} {1 {malformed JSON}}
do_catchsql_test 2.3 {
SELECT '{a:null,{"h":[1,[1,2,3]],"j":"abc"}:true}'->'$h[#-1]';
} {1 {malformed JSON}}
+# Verify that escaped label names are compared correctly.
+#
+do_execsql_test 3.1 {
+ SELECT '{"a\x62c":123}' ->> 'abc';
+} 123
+do_execsql_test 3.2 {
+ SELECT '{"abc":123}' ->> 'a\x62c';
+} 123
+
+db null null
+do_execsql_test 3.3 {
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(json_insert('{}','$.a\',111,'$."b\\"',222));
+ INSERT INTO t1 VALUES(jsonb_insert('{}','$.a\',111,'$."b\\"',222));
+ SELECT x->'$.a\', x->'$.a\\', x->'$."a\\"', x->'$."b\\"' FROM t1;
+} {111 null 111 222 111 null 111 222}
+
+do_execsql_test 3.4 {
+ SELECT json_patch('{"a\x62c":123}','{"ab\x63":456}') ->> 'abc';
+} 456
+
+ifcapable vtab {
+ do_execsql_test 4.1 {
+ SELECT * FROM json_tree('{"\u0017":1}','$."\x17"');
+ } {{\x17} 1 integer 1 1 null {$."\x17"} {$}}
+}
finish_test
ADDED test/jsonb01.test
Index: test/jsonb01.test
==================================================================
--- /dev/null
+++ test/jsonb01.test
@@ -0,0 +1,53 @@
+# 2023-11-15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# Test cases for JSONB
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_execsql_test jsonb01-1.1 {
+ CREATE TABLE t1(x JSON BLOB);
+ INSERT INTO t1 VALUES(jsonb('{a:5,b:{x:10,y:11},c:[1,2,3,4]}'));
+}
+foreach {id path res} {
+ 1 {$.a} {{{"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+ 2 {$.b} {{{"a":5,"c":[1,2,3,4]}}}
+ 3 {$.c} {{{"a":5,"b":{"x":10,"y":11}}}}
+ 4 {$.d} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+ 5 {$.b.x} {{{"a":5,"b":{"y":11},"c":[1,2,3,4]}}}
+ 6 {$.b.y} {{{"a":5,"b":{"x":10},"c":[1,2,3,4]}}}
+ 7 {$.c[0]} {{{"a":5,"b":{"x":10,"y":11},"c":[2,3,4]}}}
+ 8 {$.c[1]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,3,4]}}}
+ 9 {$.c[2]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,4]}}}
+ 10 {$.c[3]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3]}}}
+ 11 {$.c[4]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+ 12 {$.c[#]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+ 13 {$.c[#-1]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3]}}}
+ 14 {$.c[#-2]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,4]}}}
+ 15 {$.c[#-3]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,3,4]}}}
+ 16 {$.c[#-4]} {{{"a":5,"b":{"x":10,"y":11},"c":[2,3,4]}}}
+ 17 {$.c[#-5]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+ 18 {$.c[#-6]} {{{"a":5,"b":{"x":10,"y":11},"c":[1,2,3,4]}}}
+} {
+ do_execsql_test jsonb01-1.2.$id.1 {
+ SELECT json(jsonb_remove(x,$path)) FROM t1;
+ } $res
+ do_execsql_test jsonb01-1.2.$id.2 {
+ SELECT json_remove(x,$path) FROM t1;
+ } $res
+}
+
+do_catchsql_test jsonb01-2.0 {
+ SELECT x'8ce6ffffffff171333' -> '$';
+} {1 {malformed JSON}}
+
+finish_test
ADDED test/literal.test
Index: test/literal.test
==================================================================
--- /dev/null
+++ test/literal.test
@@ -0,0 +1,62 @@
+# 2024-01-19
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file implements tests for SQL literals
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix literal
+
+proc test_literal {tn lit type val} {
+ do_execsql_test $tn.1 "SELECT typeof( $lit ), $lit" [list $type $val]
+
+ ifcapable altertable {
+ do_execsql_test $tn.2 "
+ DROP TABLE IF EXISTS x1;
+ CREATE TABLE x1(a);
+ INSERT INTO x1 VALUES(123);
+ ALTER TABLE x1 ADD COLUMN b DEFAULT $lit ;
+ SELECT typeof(b), b FROM x1;
+ " [list $type $val]
+ }
+
+ do_execsql_test $tn.3 "
+ DROP TABLE IF EXISTS x1;
+ CREATE TABLE x1(a DEFAULT $lit);
+ INSERT INTO x1 DEFAULT VALUES;
+ SELECT typeof(a), a FROM x1;
+ " [list $type $val]
+}
+
+
+test_literal 1.0 45 integer 45
+test_literal 1.1 0xFF integer 255
+test_literal 1.2 0xFFFFFFFF integer [expr 0xFFFFFFFF]
+test_literal 1.3 0x123FFFFFFFF integer [expr 0x123FFFFFFFF]
+test_literal 1.4 -0x123FFFFFFFF integer [expr -1 * 0x123FFFFFFFF]
+test_literal 1.5 0xFFFFFFFFFFFFFFFF integer -1
+test_literal 1.7 0x7FFFFFFFFFFFFFFF integer [expr 0x7FFFFFFFFFFFFFFF]
+test_literal 1.8 -0x7FFFFFFFFFFFFFFF integer [expr -0x7FFFFFFFFFFFFFFF]
+test_literal 1.9 +0x7FFFFFFFFFFFFFFF integer [expr +0x7FFFFFFFFFFFFFFF]
+test_literal 1.10 -45 integer -45
+test_literal 1.11 '0xFF' text 0xFF
+test_literal 1.12 '-0xFF' text -0xFF
+test_literal 1.13 -'0xFF' integer 0
+
+test_literal 1.14 -9223372036854775808 integer -9223372036854775808
+
+test_literal 2.1 1e12 real 1000000000000.0
+test_literal 2.2 1.0 real 1.0
+test_literal 2.3 1e1000 real Inf
+test_literal 2.4 -1e1000 real -Inf
+
+finish_test
Index: test/memdb1.test
==================================================================
--- test/memdb1.test
+++ test/memdb1.test
@@ -265,6 +265,19 @@
do_catchsql_test 830 {
PRAGMA wal_checkpoint;
} {1 {database disk image is malformed}}
}
+# 2024-01-20
+# https://sqlite.org/forum/forumpost/498777780e16880a
+#
+# Make sure a database is initialized before serializing it.
+#
+reset_db
+sqlite3 dbempty :memory:
+do_test 900 {
+ set len [string length [dbempty serialize]]
+ expr {$len>0}
+} 1
+dbempty close
+
finish_test
Index: test/misc2.test
==================================================================
--- test/misc2.test
+++ test/misc2.test
@@ -52,38 +52,23 @@
CREATE TABLE t2(a,b,c);
INSERT INTO t2 VALUES(7,8,9);
}
} {}
ifcapable subquery {
- ifcapable allow_rowid_in_view {
- do_catchsql_test misc2-2.2 {
- SELECT rowid, * FROM (SELECT * FROM t1, t2);
- } {0 {{} 1 2 3 7 8 9}}
- } else {
- do_catchsql_test misc2-2.2 {
- SELECT rowid, * FROM (SELECT * FROM t1, t2);
- } {1 {no such column: rowid}}
- }
+ do_catchsql_test misc2-2.2 {
+ SELECT rowid, * FROM (SELECT * FROM t1, t2);
+ } {1 {no such column: rowid}}
do_catchsql_test misc2-2.2b {
SELECT 'rowid', * FROM (SELECT * FROM t1, t2);
} {0 {rowid 1 2 3 7 8 9}}
}
ifcapable view {
- ifcapable allow_rowid_in_view {
- do_catchsql_test misc2-2.3 {
- CREATE VIEW v1 AS SELECT * FROM t1, t2;
- SELECT rowid, * FROM v1;
- } {0 {{} 1 2 3 7 8 9}}
- } else {
- do_catchsql_test misc2-2.3 {
- CREATE VIEW v1 AS SELECT * FROM t1, t2;
- SELECT rowid, * FROM v1;
- } {1 {no such column: rowid}}
- }
-
-
+ do_catchsql_test misc2-2.3 {
+ CREATE VIEW v1 AS SELECT * FROM t1, t2;
+ SELECT rowid, * FROM v1;
+ } {1 {no such column: rowid}}
do_catchsql_test misc2-2.3b {
SELECT 'rowid', * FROM v1;
} {0 {rowid 1 2 3 7 8 9}}
} ;# ifcapable view
Index: test/misc5.test
==================================================================
--- test/misc5.test
+++ test/misc5.test
@@ -567,25 +567,37 @@
}
} {1 {no such table: logs_base}}
}
# Overflow the lemon parser stack by providing an overly complex
-# expression. Make sure that the overflow is detected and reported.
-#
-# This test fails when building with -DYYSTACKDEPTH=0
+# expression. Make sure that the overflow is detected and the
+# stack is grown automatically such that the application calling
+# SQLite never notices.
#
-do_test misc5-7.1 {
+do_test misc5-7.1.1 {
execsql {CREATE TABLE t1(x)}
set sql "INSERT INTO t1 VALUES("
set tail ""
for {set i 0} {$i<200} {incr i} {
append sql "(1+"
append tail ")"
}
- append sql 2$tail
+ append sql "0$tail); SELECT * FROM t1;"
+ catchsql $sql
+} {0 200}
+do_test misc5-7.1.2 {
+ execsql {DELETE FROM t1}
+ set sql "INSERT INTO t1 VALUES("
+ set tail ""
+ for {set i 0} {$i<900} {incr i} {
+ append sql "(1+"
+ append tail ")"
+ }
+ append sql "0$tail); SELECT * FROM t1;"
catchsql $sql
-} {1 {parser stack overflow}}
+} {0 900}
+
# Parser stack overflow is silently ignored when it occurs while parsing the
# schema and PRAGMA writable_schema is turned on.
#
do_test misc5-7.2 {
Index: test/mmap1.test
==================================================================
--- test/mmap1.test
+++ test/mmap1.test
@@ -49,16 +49,16 @@
# unix and 12 on windows. The difference is that windows only ever maps
# an integer number of OS pages (i.e. creates mappings that are a multiple
# of 4KB in size). Whereas on unix any sized mapping may be created.
#
foreach {t mmap_size nRead c2init} {
- 1.1 { PRAGMA mmap_size = 67108864 } /8|12/ {PRAGMA mmap_size = 0}
- 1.2 { PRAGMA mmap_size = 53248 } 154 {PRAGMA mmap_size = 0}
- 1.3 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 0}
- 1.4 { PRAGMA mmap_size = 67108864 } /12|8/ {PRAGMA mmap_size = 67108864 }
- 1.5 { PRAGMA mmap_size = 53248 } 154 {PRAGMA mmap_size = 67108864 }
- 1.6 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 67108864 }
+ 1.1 { PRAGMA mmap_size = 67108864 } /8|12/ {PRAGMA mmap_size = 0}
+ 1.2 { PRAGMA mmap_size = 53248 } /15[34]/ {PRAGMA mmap_size = 0}
+ 1.3 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 0}
+ 1.4 { PRAGMA mmap_size = 67108864 } /12|8/ {PRAGMA mmap_size = 67108864 }
+ 1.5 { PRAGMA mmap_size = 53248 } /15[34]/ {PRAGMA mmap_size = 67108864 }
+ 1.6 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 67108864 }
} {
do_multiclient_test tn {
sql1 {PRAGMA cache_size=2000}
sql2 {PRAGMA cache_size=2000}
Index: test/quote.test
==================================================================
--- test/quote.test
+++ test/quote.test
@@ -101,11 +101,11 @@
1 { CREATE TABLE xyz(a, b, c CHECK (c!="null") ) } null
2 { CREATE INDEX i2 ON t1(x, y, z||"abc") } abc
3 { CREATE INDEX i3 ON t1("w") } w
4 { CREATE INDEX i4 ON t1(x) WHERE z="w" } w
} {
- do_catchsql_test 2.1.$tn $sql [list 1 "no such column: $errname"]
+ do_catchsql_test 2.1.$tn $sql [list 1 "no such column: \"$errname\" - should this be a string literal in single-quotes?"]
}
do_execsql_test 2.2 {
PRAGMA writable_schema = 1;
CREATE TABLE xyz(a, b, c CHECK (c!="null") );
@@ -145,23 +145,23 @@
reset_db
do_catchsql_test 3.0 {
CREATE TABLE t1(a,b);
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
- } {1 {error in index x1 after drop column: no such column: b}}
+ } {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.1 {
DROP TABLE t1;
CREATE TABLE t1(a,"b");
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
- } {1 {error in index x1 after drop column: no such column: b}}
+ } {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.2 {
DROP TABLE t1;
CREATE TABLE t1(a,'b');
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
- } {1 {error in index x1 after drop column: no such column: b}}
+ } {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.3 {
DROP TABLE t1;
CREATE TABLE t1(a,"b");
CREATE INDEX x1 on t1('b');
ALTER TABLE t1 DROP COLUMN b;
@@ -170,11 +170,11 @@
DROP TABLE t1;
CREATE TABLE t1(a, b, c);
CREATE INDEX x1 ON t1("a"||"b");
INSERT INTO t1 VALUES(1,2,3),(1,4,5);
ALTER TABLE t1 DROP COLUMN b;
- } {1 {error in index x1 after drop column: no such column: b}}
+ } {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
sqlite3_db_config db SQLITE_DBCONFIG_DQS_DDL 1
do_catchsql_test 3.5 {
DROP TABLE t1;
CREATE TABLE t1(a, b, c);
CREATE INDEX x1 ON t1("a"||"x");
DELETED test/releasetest_data.tcl
Index: test/releasetest_data.tcl
==================================================================
--- test/releasetest_data.tcl
+++ /dev/null
@@ -1,845 +0,0 @@
-# 2019 August 01
-#
-# The author disclaims copyright to this source code. In place of
-# a legal notice, here is a blessing:
-#
-# May you do good and not evil.
-# May you find forgiveness for yourself and forgive others.
-# May you share freely, never taking more than you give.
-#
-#***********************************************************************
-#
-# This file implements a program that produces scripts (either shell scripts
-# or batch files) to implement a particular test that is part of the SQLite
-# release testing procedure. For example, to run veryquick.test with a
-# specified set of -D compiler switches.
-#
-# A "configuration" is a set of options passed to [./configure] and [make]
-# to build the SQLite library in a particular fashion. A "platform" is a
-# list of tests; most platforms are named after the hardware/OS platform
-# that the tests will be run on as part of the release procedure. Each
-# "test" is a combination of a configuration and a makefile target (e.g.
-# "fulltest"). The program may be invoked as follows:
-#
-set USAGE {
-$argv0 script ?-msvc? CONFIGURATION TARGET
- Given a configuration and make target, return a bash (or, if -msvc
- is specified, batch) script to execute the test. The first argument
- passed to the script must be a directory containing SQLite source code.
-
-$argv0 configurations
- List available configurations.
-
-$argv0 platforms
- List available platforms.
-
-$argv0 tests ?-nodebug? PLATFORM
- List tests in a specified platform. If the -nodebug switch is
- specified, synthetic debug/ndebug configurations are omitted. Each
- test is a combination of a configuration and a makefile target.
-}
-
-# Omit comments (text between # and \n) in a long multi-line string.
-#
-proc strip_comments {in} {
- regsub -all {#[^\n]*\n} $in {} out
- return $out
-}
-
-array set ::Configs [strip_comments {
- "Default" {
- -O2
- --disable-amalgamation --disable-shared
- --enable-session
- -DSQLITE_ENABLE_RBU
- }
- "All-Debug" {
- --enable-debug --enable-all
- }
- "All-O0" {
- -O0 --enable-all
- }
- "Sanitize" {
- CC=clang -fsanitize=address,undefined
- -DSQLITE_ENABLE_STAT4
- -DCONFIG_SLOWDOWN_FACTOR=5.0
- --enable-debug
- --enable-all
- }
- "Stdcall" {
- -DUSE_STDCALL=1
- -O2
- }
- "Have-Not" {
- # The "Have-Not" configuration sets all possible -UHAVE_feature options
- # in order to verify that the code works even on platforms that lack
- # these support services.
- -DHAVE_FDATASYNC=0
- -DHAVE_GMTIME_R=0
- -DHAVE_ISNAN=0
- -DHAVE_LOCALTIME_R=0
- -DHAVE_LOCALTIME_S=0
- -DHAVE_MALLOC_USABLE_SIZE=0
- -DHAVE_STRCHRNUL=0
- -DHAVE_USLEEP=0
- -DHAVE_UTIME=0
- }
- "Unlock-Notify" {
- -O2
- -DSQLITE_ENABLE_UNLOCK_NOTIFY
- -DSQLITE_THREADSAFE
- -DSQLITE_TCL_DEFAULT_FULLMUTEX=1
- }
- "User-Auth" {
- -O2
- -DSQLITE_USER_AUTHENTICATION=1
- }
- "Secure-Delete" {
- -O2
- -DSQLITE_SECURE_DELETE=1
- -DSQLITE_SOUNDEX=1
- }
- "Update-Delete-Limit" {
- -O2
- -DSQLITE_DEFAULT_FILE_FORMAT=4
- -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT=1
- -DSQLITE_ENABLE_STMT_SCANSTATUS
- -DSQLITE_LIKE_DOESNT_MATCH_BLOBS
- -DSQLITE_ENABLE_CURSOR_HINTS
- }
- "Check-Symbols" {
- -DSQLITE_MEMDEBUG=1
- -DSQLITE_ENABLE_FTS3_PARENTHESIS=1
- -DSQLITE_ENABLE_FTS3=1
- -DSQLITE_ENABLE_RTREE=1
- -DSQLITE_ENABLE_MEMSYS5=1
- -DSQLITE_ENABLE_MEMSYS3=1
- -DSQLITE_ENABLE_COLUMN_METADATA=1
- -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT=1
- -DSQLITE_SECURE_DELETE=1
- -DSQLITE_SOUNDEX=1
- -DSQLITE_ENABLE_ATOMIC_WRITE=1
- -DSQLITE_ENABLE_MEMORY_MANAGEMENT=1
- -DSQLITE_ENABLE_OVERSIZE_CELL_CHECK=1
- -DSQLITE_ENABLE_STAT4
- -DSQLITE_ENABLE_STMT_SCANSTATUS
- --enable-fts5 --enable-session
- }
- "Debug-One" {
- --disable-shared
- -O2 -funsigned-char
- -DSQLITE_DEBUG=1
- -DSQLITE_MEMDEBUG=1
- -DSQLITE_MUTEX_NOOP=1
- -DSQLITE_TCL_DEFAULT_FULLMUTEX=1
- -DSQLITE_ENABLE_FTS3=1
- -DSQLITE_ENABLE_RTREE=1
- -DSQLITE_ENABLE_MEMSYS5=1
- -DSQLITE_ENABLE_COLUMN_METADATA=1
- -DSQLITE_ENABLE_STAT4
- -DSQLITE_ENABLE_HIDDEN_COLUMNS
- -DSQLITE_MAX_ATTACHED=125
- -DSQLITE_MUTATION_TEST
- --enable-fts5
- }
- "Debug-Two" {
- -DSQLITE_DEFAULT_MEMSTATUS=0
- -DSQLITE_MAX_EXPR_DEPTH=0
- --enable-debug
- }
- "Fast-One" {
- -O6
- -DSQLITE_ENABLE_FTS4=1
- -DSQLITE_ENABLE_RTREE=1
- -DSQLITE_ENABLE_STAT4
- -DSQLITE_ENABLE_RBU
- -DSQLITE_MAX_ATTACHED=125
- -DSQLITE_MAX_MMAP_SIZE=12884901888
- -DSQLITE_ENABLE_SORTER_MMAP=1
- -DLONGDOUBLE_TYPE=double
- --enable-session
- }
- "Device-One" {
- -O2
- -DSQLITE_DEBUG=1
- -DSQLITE_DEFAULT_AUTOVACUUM=1
- -DSQLITE_DEFAULT_CACHE_SIZE=64
- -DSQLITE_DEFAULT_PAGE_SIZE=1024
- -DSQLITE_DEFAULT_TEMP_CACHE_SIZE=32
- -DSQLITE_DISABLE_LFS=1
- -DSQLITE_ENABLE_ATOMIC_WRITE=1
- -DSQLITE_ENABLE_IOTRACE=1
- -DSQLITE_ENABLE_MEMORY_MANAGEMENT=1
- -DSQLITE_MAX_PAGE_SIZE=4096
- -DSQLITE_OMIT_LOAD_EXTENSION=1
- -DSQLITE_OMIT_PROGRESS_CALLBACK=1
- -DSQLITE_OMIT_VIRTUALTABLE=1
- -DSQLITE_ENABLE_HIDDEN_COLUMNS
- -DSQLITE_TEMP_STORE=3
- }
- "Device-Two" {
- -DSQLITE_4_BYTE_ALIGNED_MALLOC=1
- -DSQLITE_DEFAULT_AUTOVACUUM=1
- -DSQLITE_DEFAULT_CACHE_SIZE=1000
- -DSQLITE_DEFAULT_LOCKING_MODE=0
- -DSQLITE_DEFAULT_PAGE_SIZE=1024
- -DSQLITE_DEFAULT_TEMP_CACHE_SIZE=1000
- -DSQLITE_DISABLE_LFS=1
- -DSQLITE_ENABLE_FTS3=1
- -DSQLITE_ENABLE_MEMORY_MANAGEMENT=1
- -DSQLITE_ENABLE_RTREE=1
- -DSQLITE_MAX_COMPOUND_SELECT=50
- -DSQLITE_MAX_PAGE_SIZE=32768
- -DSQLITE_OMIT_TRACE=1
- -DSQLITE_TEMP_STORE=3
- -DSQLITE_THREADSAFE=2
- --enable-fts5 --enable-session
- }
- "Locking-Style" {
- -O2
- -DSQLITE_ENABLE_LOCKING_STYLE=1
- }
- "Apple" {
- -Os
- -DHAVE_GMTIME_R=1
- -DHAVE_ISNAN=1
- -DHAVE_LOCALTIME_R=1
- -DHAVE_PREAD=1
- -DHAVE_PWRITE=1
- -DHAVE_UTIME=1
- -DSQLITE_DEFAULT_CACHE_SIZE=1000
- -DSQLITE_DEFAULT_CKPTFULLFSYNC=1
- -DSQLITE_DEFAULT_MEMSTATUS=1
- -DSQLITE_DEFAULT_PAGE_SIZE=1024
- -DSQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS=1
- -DSQLITE_ENABLE_API_ARMOR=1
- -DSQLITE_ENABLE_AUTO_PROFILE=1
- -DSQLITE_ENABLE_FLOCKTIMEOUT=1
- -DSQLITE_ENABLE_FTS3=1
- -DSQLITE_ENABLE_FTS3_PARENTHESIS=1
- -DSQLITE_ENABLE_FTS3_TOKENIZER=1
- -DSQLITE_ENABLE_PERSIST_WAL=1
- -DSQLITE_ENABLE_PURGEABLE_PCACHE=1
- -DSQLITE_ENABLE_RTREE=1
- -DSQLITE_ENABLE_SNAPSHOT=1
- # -DSQLITE_ENABLE_SQLLOG=1
- -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT=1
- -DSQLITE_MAX_LENGTH=2147483645
- -DSQLITE_MAX_VARIABLE_NUMBER=500000
- # -DSQLITE_MEMDEBUG=1
- -DSQLITE_NO_SYNC=1
- -DSQLITE_OMIT_AUTORESET=1
- -DSQLITE_OMIT_LOAD_EXTENSION=1
- -DSQLITE_PREFER_PROXY_LOCKING=1
- -DSQLITE_SERIES_CONSTRAINT_VERIFY=1
- -DSQLITE_THREADSAFE=2
- -DSQLITE_USE_URI=1
- -DSQLITE_WRITE_WALFRAME_PREBUFFERED=1
- -DUSE_GUARDED_FD=1
- -DUSE_PREAD=1
- --enable-fts5
- }
- "Extra-Robustness" {
- -DSQLITE_ENABLE_OVERSIZE_CELL_CHECK=1
- -DSQLITE_MAX_ATTACHED=62
- }
- "Devkit" {
- -DSQLITE_DEFAULT_FILE_FORMAT=4
- -DSQLITE_MAX_ATTACHED=30
- -DSQLITE_ENABLE_COLUMN_METADATA
- -DSQLITE_ENABLE_FTS4
- -DSQLITE_ENABLE_FTS5
- -DSQLITE_ENABLE_FTS4_PARENTHESIS
- -DSQLITE_DISABLE_FTS4_DEFERRED
- -DSQLITE_ENABLE_RTREE
- --enable-fts5
- }
- "No-lookaside" {
- -DSQLITE_TEST_REALLOC_STRESS=1
- -DSQLITE_OMIT_LOOKASIDE=1
- }
- "Valgrind" {
- -DSQLITE_ENABLE_STAT4
- -DSQLITE_ENABLE_FTS4
- -DSQLITE_ENABLE_RTREE
- -DSQLITE_ENABLE_HIDDEN_COLUMNS
- -DLONGDOUBLE_TYPE=double
- -DCONFIG_SLOWDOWN_FACTOR=8.0
- }
-
- "Windows-Memdebug" {
- MEMDEBUG=1
- DEBUG=3
- }
- "Windows-Win32Heap" {
- WIN32HEAP=1
- DEBUG=4
- }
-
- # The next group of configurations are used only by the
- # Failure-Detection platform. They are all the same, but we need
- # different names for them all so that they results appear in separate
- # subdirectories.
- #
- Fail0 {-O0}
- Fail2 {-O0}
- Fail3 {-O0}
- Fail4 {-O0}
- FuzzFail1 {-O0}
- FuzzFail2 {-O0}
-}]
-if {$tcl_platform(os)=="Darwin"} {
- lappend Configs(Apple) -DSQLITE_ENABLE_LOCKING_STYLE=1
-}
-
-array set ::Platforms [strip_comments {
- Linux-x86_64 {
- "Check-Symbols*" "" checksymbols
- "Fast-One" QUICKTEST_INCLUDE=rbu.test "fuzztest test"
- "Debug-One" "" "mptest test"
- "Debug-Two" "" test
- "Have-Not" "" test
- "Secure-Delete" "" test
- "Unlock-Notify" QUICKTEST_INCLUDE=notify2.test test
- "User-Auth" "" tcltest
- "Update-Delete-Limit" "" test
- "Extra-Robustness" "" test
- "Device-Two" "" "threadtest test"
- "No-lookaside" "" test
- "Devkit" "" test
- "Apple" "" test
- "Sanitize*" "" test
- "Device-One" "" alltest
- "Default" "" "threadtest fuzztest alltest"
- "Valgrind*" "" valgrindtest
- }
- Linux-i686 {
- "Devkit" "" test
- "Have-Not" "" test
- "Unlock-Notify" QUICKTEST_INCLUDE=notify2.test test
- "Device-One" "" test
- "Device-Two" "" test
- "Default" "" "threadtest fuzztest alltest"
- }
- Darwin-i386 {
- "Locking-Style" "" "mptest test"
- "Have-Not" "" test
- "Apple" "" "threadtest fuzztest alltest"
- }
- Darwin-x86_64 {
- "Locking-Style" "" "mptest test"
- "Have-Not" "" test
- "Apple" "" "threadtest fuzztest alltest"
- }
- Darwin-arm64 {
- "Locking-Style" "" "mptest test"
- "Have-Not" "" test
- "Apple" "" "threadtest fuzztest alltest"
- }
- "Windows NT-intel" {
- "Stdcall" "" test
- "Have-Not" "" test
- "Windows-Memdebug*" "" test
- "Windows-Win32Heap*" "" test
- "Default" "" "mptest fulltestonly"
- }
- "Windows NT-amd64" {
- "Stdcall" "" test
- "Have-Not" "" test
- "Windows-Memdebug*" "" test
- "Windows-Win32Heap*" "" test
- "Default" "" "mptest fulltestonly"
- }
-
- # The Failure-Detection platform runs various tests that deliberately
- # fail. This is used as a test of this script to verify that this script
- # correctly identifies failures.
- #
- Failure-Detection {
- Fail0* "TEST_FAILURE=0" test
- Sanitize* "TEST_FAILURE=1" test
- Fail2* "TEST_FAILURE=2" valgrindtest
- Fail3* "TEST_FAILURE=3" valgrindtest
- Fail4* "TEST_FAILURE=4" test
- FuzzFail1* "TEST_FAILURE=5" test
- FuzzFail2* "TEST_FAILURE=5" valgrindtest
- }
-}]
-
-#--------------------------------------------------------------------------
-#--------------------------------------------------------------------------
-#--------------------------------------------------------------------------
-# End of configuration section.
-#--------------------------------------------------------------------------
-#--------------------------------------------------------------------------
-#--------------------------------------------------------------------------
-
-# Configuration verification: Check that each entry in the list of configs
-# specified for each platforms exists.
-#
-foreach {key value} [array get ::Platforms] {
- foreach {v vars t} $value {
- if {[string range $v end end]=="*"} {
- set v [string range $v 0 end-1]
- }
- if {0==[info exists ::Configs($v)]} {
- puts stderr "No such configuration: \"$v\""
- exit -1
- }
- }
-}
-
-proc usage {} {
- global argv0
- puts stderr [subst $::USAGE]
- exit 1
-}
-
-proc is_prefix {p str min} {
- set n [string length $p]
- if {$n<$min} { return 0 }
- if {[string range $str 0 [expr $n-1]]!=$p} { return 0 }
- return 1
-}
-
-proc main_configurations {} {
- foreach k [lsort [array names ::Configs]] {
- puts $k
- }
-}
-
-proc main_platforms {} {
- foreach k [lsort [array names ::Platforms]] {
- puts "\"$k\""
- }
-}
-
-proc main_script {args} {
- set bMsvc 0
- set nArg [llength $args]
- if {$nArg==3} {
- if {![is_prefix [lindex $args 0] -msvc 2]} usage
- set bMsvc 1
- } elseif {$nArg<2 || $nArg>3} {
- usage
- }
- set config [lindex $args end-1]
- set target [lindex $args end]
-
- set opts [list] ;# OPTS value
- set cflags [expr {$bMsvc ? "-Zi" : "-g"}] ;# CFLAGS value
- set makeOpts [list] ;# Extra args for [make]
- set configOpts [list] ;# Extra args for [configure]
-
- if {$::tcl_platform(platform)=="windows" || $bMsvc} {
- lappend opts -DSQLITE_OS_WIN=1
- } else {
- lappend opts -DSQLITE_OS_UNIX=1
- }
-
- # Figure out if this is a synthetic ndebug or debug configuration.
- #
- set bRemoveDebug 0
- if {[string match *-ndebug $config]} {
- set bRemoveDebug 1
- set config [string range $config 0 end-7]
- }
- if {[string match *-debug $config]} {
- lappend opts -DSQLITE_DEBUG
- lappend opts -DSQLITE_EXTRA_IFNULLROW
- set config [string range $config 0 end-6]
- }
- regexp {^(.*)-[0-9]+} $config -> config
-
- # Ensure that the named configuration exists.
- #
- if {![info exists ::Configs($config)]} {
- puts stderr "No such config: $config"
- exit 1
- }
-
- # Loop through the parameters of the nominated configuration, updating
- # $opts, $cflags, $makeOpts and $configOpts along the way. Rules are as
- # follows:
- #
- # 1. If the parameter begins with a "*", discard it.
- #
- # 2. If $bRemoveDebug is set and the parameter is -DSQLITE_DEBUG or
- # -DSQLITE_DEBUG=1, discard it
- #
- # 3. If the parameter begins with "-D", add it to $opts.
- #
- # 4. If the parameter begins with "--" add it to $configOpts. Unless
- # this command is preparing a script for MSVC - then add an
- # equivalent to $makeOpts or $opts.
- #
- # 5. If the parameter begins with "-" add it to $cflags. If in MSVC
- # mode and the parameter is an -O option, instead add
- # an OPTIMIZATIONS= switch to $makeOpts.
- #
- # 6. If none of the above apply, add the parameter to $makeOpts
- #
- foreach param $::Configs($config) {
- if {[string range $param 0 0]=="*"} continue
-
- if {$bRemoveDebug} {
- if {$param=="-DSQLITE_DEBUG" || $param=="-DSQLITE_DEBUG=1"
- || $param=="-DSQLITE_MEMDEBUG" || $param=="-DSQLITE_MEMDEBUG=1"
- || $param=="--enable-debug"
- } {
- continue
- }
- }
-
- if {[string range $param 0 1]=="-D"} {
- lappend opts $param
- continue
- }
-
- if {[string range $param 0 1]=="--"} {
- if {$bMsvc} {
- switch -- $param {
- --disable-amalgamation {
- lappend makeOpts USE_AMALGAMATION=0
- }
- --disable-shared {
- lappend makeOpts USE_CRT_DLL=0 DYNAMIC_SHELL=0
- }
- --enable-fts5 {
- lappend opts -DSQLITE_ENABLE_FTS5
- }
- --enable-shared {
- lappend makeOpts USE_CRT_DLL=1 DYNAMIC_SHELL=1
- }
- --enable-session {
- lappend opts -DSQLITE_ENABLE_PREUPDATE_HOOK
- lappend opts -DSQLITE_ENABLE_SESSION
- }
- default {
- error "Cannot translate $param for MSVC"
- }
- }
- } else {
- lappend configOpts $param
- }
-
- continue
- }
-
- if {[string range $param 0 0]=="-"} {
- if {$bMsvc && [regexp -- {^-O(\d+)$} $param -> level]} {
- lappend makeOpts OPTIMIZATIONS=$level
- } else {
- lappend cflags $param
- }
- continue
- }
-
- lappend makeOpts $param
- }
-
- # Some configurations specify -DHAVE_USLEEP=0. For all others, add
- # -DHAVE_USLEEP=1.
- #
- if {[lsearch $opts "-DHAVE_USLEEP=0"]<0} {
- lappend opts -DHAVE_USLEEP=1
- }
-
- if {$bMsvc==0} {
- puts {set -e}
- puts {}
- puts {if [ "$#" -ne 1 ] ; then}
- puts { echo "Usage: $0 " }
- puts { exit -1 }
- puts {fi }
- puts {SRCDIR=$1}
- puts {}
- puts "TCL=\"[::tcl::pkgconfig get libdir,install]\""
-
- puts "\$SRCDIR/configure --with-tcl=\$TCL $configOpts"
- puts {}
- puts {OPTS=" -DSQLITE_NO_SYNC=1"}
- foreach o $opts {
- puts "OPTS=\"\$OPTS $o\""
- }
- puts {}
- puts "CFLAGS=\"$cflags\""
- puts {}
- puts "make $target \"CFLAGS=\$CFLAGS\" \"OPTS=\$OPTS\" $makeOpts"
- } else {
-
- puts {set SRCDIR=%1}
- set makecmd "nmake /f %SRCDIR%\\Makefile.msc TOP=%SRCDIR% $target "
- append makecmd "\"CFLAGS=$cflags\" \"OPTS=$opts\" $makeOpts"
-
- puts "set TMP=%CD%"
- puts $makecmd
- }
-}
-
-proc main_trscript {args} {
- set bMsvc 0
- set nArg [llength $args]
- if {$nArg==3} {
- if {![is_prefix [lindex $args 0] -msvc 2]} usage
- set bMsvc 1
- } elseif {$nArg<2 || $nArg>3} {
- usage
- }
- set config [lindex $args end-1]
- set srcdir [lindex $args end]
-
- set opts [list] ;# OPTS value
- set cflags [expr {$bMsvc ? "-Zi" : "-g"}] ;# CFLAGS value
- set makeOpts [list] ;# Extra args for [make]
- set configOpts [list] ;# Extra args for [configure]
-
- if {$::tcl_platform(platform)=="windows" || $bMsvc} {
- lappend opts -DSQLITE_OS_WIN=1
- } else {
- lappend opts -DSQLITE_OS_UNIX=1
- }
-
- # Figure out if this is a synthetic ndebug or debug configuration.
- #
- set bRemoveDebug 0
- if {[string match *-ndebug $config]} {
- set bRemoveDebug 1
- set config [string range $config 0 end-7]
- }
- if {[string match *-debug $config]} {
- lappend opts -DSQLITE_DEBUG
- lappend opts -DSQLITE_EXTRA_IFNULLROW
- set config [string range $config 0 end-6]
- }
- regexp {^(.*)-[0-9]+} $config -> config
-
- # Ensure that the named configuration exists.
- #
- if {![info exists ::Configs($config)]} {
- puts stderr "No such config: $config"
- exit 1
- }
-
- # Loop through the parameters of the nominated configuration, updating
- # $opts, $cflags, $makeOpts and $configOpts along the way. Rules are as
- # follows:
- #
- # 1. If the parameter begins with a "*", discard it.
- #
- # 2. If $bRemoveDebug is set and the parameter is -DSQLITE_DEBUG or
- # -DSQLITE_DEBUG=1, discard it
- #
- # 3. If the parameter begins with "-D", add it to $opts.
- #
- # 4. If the parameter begins with "--" add it to $configOpts. Unless
- # this command is preparing a script for MSVC - then add an
- # equivalent to $makeOpts or $opts.
- #
- # 5. If the parameter begins with "-" add it to $cflags. If in MSVC
- # mode and the parameter is an -O option, instead add
- # an OPTIMIZATIONS= switch to $makeOpts.
- #
- # 6. If none of the above apply, add the parameter to $makeOpts
- #
- foreach param $::Configs($config) {
- if {[string range $param 0 0]=="*"} continue
-
- if {$bRemoveDebug} {
- if {$param=="-DSQLITE_DEBUG" || $param=="-DSQLITE_DEBUG=1"
- || $param=="-DSQLITE_MEMDEBUG" || $param=="-DSQLITE_MEMDEBUG=1"
- || $param=="--enable-debug"
- } {
- continue
- }
- }
-
- if {[string range $param 0 1]=="-D"} {
- lappend opts $param
- continue
- }
-
- if {[string range $param 0 1]=="--"} {
- if {$bMsvc} {
- switch -- $param {
- --disable-amalgamation {
- lappend makeOpts USE_AMALGAMATION=0
- }
- --disable-shared {
- lappend makeOpts USE_CRT_DLL=0 DYNAMIC_SHELL=0
- }
- --enable-fts5 {
- lappend opts -DSQLITE_ENABLE_FTS5
- }
- --enable-shared {
- lappend makeOpts USE_CRT_DLL=1 DYNAMIC_SHELL=1
- }
- --enable-session {
- lappend opts -DSQLITE_ENABLE_PREUPDATE_HOOK
- lappend opts -DSQLITE_ENABLE_SESSION
- }
- --enable-all {
- }
- --enable-debug {
- # lappend makeOpts OPTIMIZATIONS=0
- lappend opts -DSQLITE_DEBUG
- }
- default {
- error "Cannot translate $param for MSVC"
- }
- }
- } else {
- lappend configOpts $param
- }
-
- continue
- }
-
- if {[string range $param 0 0]=="-"} {
- if {$bMsvc && [regexp -- {^-O(\d+)$} $param -> level]} {
- lappend makeOpts OPTIMIZATIONS=$level
- } else {
- lappend cflags $param
- }
- continue
- }
-
- lappend makeOpts $param
- }
-
- # Some configurations specify -DHAVE_USLEEP=0. For all others, add
- # -DHAVE_USLEEP=1.
- #
- if {[lsearch $opts "-DHAVE_USLEEP=0"]<0} {
- lappend opts -DHAVE_USLEEP=1
- }
-
- if {$bMsvc==0} {
- puts {set -e}
- puts {}
- puts {if [ "$#" -ne 1 ] ; then}
- puts { echo "Usage: $0 " }
- puts { exit -1 }
- puts {fi }
- puts "SRCDIR=\"$srcdir\""
- puts {}
- puts "TCL=\"[::tcl::pkgconfig get libdir,install]\""
-
- puts {if [ ! -f Makefile ] ; then}
- puts " \$SRCDIR/configure --with-tcl=\$TCL $configOpts"
- puts {fi}
- puts {}
- if {[info exists ::env(OPTS)]} {
- puts "# From environment variable:"
- puts "OPTS=$::env(OPTS)"
- puts ""
- }
- puts {OPTS="$OPTS -DSQLITE_NO_SYNC=1"}
- foreach o $opts {
- puts "OPTS=\"\$OPTS $o\""
- }
- puts {}
- puts "CFLAGS=\"$cflags\""
- puts {}
- puts "make \$1 \"CFLAGS=\$CFLAGS\" \"OPTS=\$OPTS\" $makeOpts"
- } else {
-
- set srcdir [file nativename [file normalize $srcdir]]
- # set srcdir [string map [list "\\" "\\\\"] $srcdir]
-
- puts {set TARGET=%1}
- set makecmd "nmake /f $srcdir\\Makefile.msc TOP=\"$srcdir\" %TARGET% "
- append makecmd "\"CFLAGS=$cflags\" \"OPTS=$opts\" $makeOpts"
-
- puts "set TMP=%CD%"
- puts $makecmd
- }
-}
-
-proc main_tests {args} {
- set bNodebug 0
- set nArg [llength $args]
- if {$nArg==2} {
- if {[is_prefix [lindex $args 0] -nodebug 2]} {
- set bNodebug 1
- } elseif {[is_prefix [lindex $args 0] -debug 2]} {
- set bNodebug 0
- } else usage
- } elseif {$nArg==0 || $nArg>2} {
- usage
- }
- set p [lindex $args end]
- if {![info exists ::Platforms($p)]} {
- puts stderr "No such platform: $p"
- exit 1
- }
-
- set lTest [list]
-
- foreach {config vars target} $::Platforms($p) {
- if {[string range $config end end]=="*"} {
- set config [string range $config 0 end-1]
- } elseif {$bNodebug==0} {
- set dtarget test
- if {[lsearch $target fuzztest]<0 && [lsearch $target test]<0} {
- set dtarget tcltest
- }
- if {$vars!=""} { set dtarget "$vars $dtarget" }
-
- if {[string first SQLITE_DEBUG $::Configs($config)]>=0
- || [string first --enable-debug $::Configs($config)]>=0
- } {
- lappend lTest "$config-ndebug \"$dtarget\""
- } else {
- lappend lTest "$config-debug \"$dtarget\""
- }
- }
-
- if {[llength $target]==1 && ([string match "*TEST_FAILURE*" $vars] || (
- [lsearch $target "valgrindtest"]<0
- && [lsearch $target "alltest"]<0
- && [lsearch $target "fulltestonly"]<0
- && ![string match Sanitize* $config]
- ))} {
- if {$vars!=""} { set target "$vars $target" }
- lappend lTest "$config \"$target\""
- } else {
- set idir -1
- foreach t $target {
- if {$t=="valgrindtest" || $t=="alltest" || $t=="fulltestonly"
- || [string match Sanitize* $config]
- } {
- if {$vars!=""} { set t "$vars $t" }
- for {set ii 1} {$ii<=4} {incr ii} {
- lappend lTest "$config-[incr idir] \"TCLTEST_PART=$ii/4 $t\""
- }
- } else {
- if {$vars!=""} { set t "$vars $t" }
- lappend lTest "$config-[incr idir] \"$t\""
- }
- }
- }
- }
-
- foreach l $lTest {
- puts $l
- }
-
-}
-
-if {[llength $argv]==0} { usage }
-set cmd [lindex $argv 0]
-set n [expr [llength $argv]-1]
-if {[string match ${cmd}* configurations] && $n==0} {
- main_configurations
-} elseif {[string match ${cmd}* script]} {
- main_script {*}[lrange $argv 1 end]
-} elseif {[string match ${cmd}* trscript]} {
- main_trscript {*}[lrange $argv 1 end]
-} elseif {[string match ${cmd}* platforms] && $n==0} {
- main_platforms
-} elseif {[string match ${cmd}* tests]} {
- main_tests {*}[lrange $argv 1 end]
-} else {
- usage
-}
Index: test/returning1.test
==================================================================
--- test/returning1.test
+++ test/returning1.test
@@ -210,42 +210,21 @@
CREATE TRIGGER tr2 INSTEAD OF UPDATE ON t1 BEGIN
INSERT INTO log VALUES('update', new.rowid, new.a, new.b);
END;
}
-ifcapable !allow_rowid_in_view {
- do_catchsql_test 10.3a {
- INSERT INTO t1(a, b) VALUES(1234, 5678) RETURNING rowid;
- } {1 {no such column: new.rowid}}
-
- do_catchsql_test 10.3b {
- UPDATE t1 SET a='z' WHERE b='y' RETURNING rowid;
- } {1 {no such column: new.rowid}}
-
- do_execsql_test 10.4 {
- SELECT * FROM log;
- } {}
-} else {
- # Note: The values returned by the RETURNING clauses of the following
- # two statements are the rowid columns of views. These values are not
- # well defined, so the INSERT returns -1, and the UPDATE returns 1, 2
- # and 3. These match the values used for new.rowid expressions, but
- # not much else.
- do_catchsql_test 10.3a {
- INSERT INTO t1(a, b) VALUES(1234, 5678) RETURNING rowid;
- } {0 -1}
-
- do_catchsql_test 10.3b {
- UPDATE t1 SET a='z' WHERE b='y' RETURNING rowid;
- } {0 {1 2 3}}
-
- do_execsql_test 10.4 {
- SELECT * FROM log;
- } {
- insert -1 1234 5678 update 1 z y update 2 z y update 3 z y
- }
-}
+do_catchsql_test 10.3a {
+ INSERT INTO t1(a, b) VALUES(1234, 5678) RETURNING rowid;
+} {1 {no such column: new.rowid}}
+
+do_catchsql_test 10.3b {
+ UPDATE t1 SET a='z' WHERE b='y' RETURNING rowid;
+} {1 {no such column: new.rowid}}
+
+do_execsql_test 10.4 {
+ SELECT * FROM log;
+} {}
# 2021-04-27 dbsqlfuzz 78b9400770ef8cc7d9427dfba26f4fcf46ea7dc2
# Returning clauses on TEMP tables with triggers.
#
reset_db
Index: test/rowid.test
==================================================================
--- test/rowid.test
+++ test/rowid.test
@@ -801,33 +801,20 @@
INSERT INTO t1(rowid, x) VALUES(1, 1);
INSERT INTO t2(y) VALUES(2);
INSERT INTO t3(rowid, z) VALUES(3, 3);
}
-ifcapable allow_rowid_in_view {
- set nosuch "1 {no such column: rowid}"
- do_execsql_test 16.1 { SELECT rowid FROM t1, t2; } {1}
- do_catchsql_test 16.2 { SELECT rowid FROM t1, v1; } $nosuch
- do_catchsql_test 16.3 { SELECT rowid FROM t3, v1; } $nosuch
- do_catchsql_test 16.4 { SELECT rowid FROM t3, (SELECT 123); } $nosuch
-
- do_execsql_test 16.5 { SELECT rowid FROM t2, t1; } {1}
- do_catchsql_test 16.6 { SELECT rowid FROM v1, t1; } $nosuch
- do_catchsql_test 16.7 { SELECT rowid FROM v1, t3; } $nosuch
- do_execsql_test 16.8 { SELECT rowid FROM (SELECT 123), t3; } {3}
-} else {
- do_execsql_test 16.1 { SELECT rowid FROM t1, t2; } {1}
- do_execsql_test 16.2 { SELECT rowid FROM t1, v1; } {1}
- do_execsql_test 16.3 { SELECT rowid FROM t3, v1; } {3}
- do_execsql_test 16.4 { SELECT rowid FROM t3, (SELECT 123); } {3}
-
- do_execsql_test 16.5 { SELECT rowid FROM t2, t1; } {1}
- do_execsql_test 16.6 { SELECT rowid FROM v1, t1; } {1}
- do_execsql_test 16.7 { SELECT rowid FROM v1, t3; } {3}
- do_execsql_test 16.8 { SELECT rowid FROM (SELECT 123), t3; } {3}
-}
-
-do_catchsql_test 16.9 {
- SELECT rowid FROM t1, t3;
-} {1 {ambiguous column name: rowid}}
+do_execsql_test 16.1 { SELECT rowid FROM t1, t2; } {1}
+do_execsql_test 16.2 { SELECT rowid FROM t1, v1; } {1}
+do_execsql_test 16.3 { SELECT rowid FROM t3, v1; } {3}
+do_execsql_test 16.4 { SELECT rowid FROM t3, (SELECT 123); } {3}
+
+do_execsql_test 16.5 { SELECT rowid FROM t2, t1; } {1}
+do_execsql_test 16.6 { SELECT rowid FROM v1, t1; } {1}
+do_execsql_test 16.7 { SELECT rowid FROM v1, t3; } {3}
+do_execsql_test 16.8 { SELECT rowid FROM (SELECT 123), t3; } {3}
+
+do_catchsql_test 16.5 { SELECT rowid FROM t1, t3; } {1 {no such column: rowid}}
+
+
finish_test
Index: test/shell4.test
==================================================================
--- test/shell4.test
+++ test/shell4.test
@@ -115,11 +115,11 @@
do_test shell4-2.2 {
catchcmd ":memory:" "CREATE TABLE t1(x);\n.trace off\n.trace off\n"
} {0 {}}
do_test shell4-2.3 {
catchcmd ":memory:" ".trace stdout\n.dump\n.trace off\n"
-} {/^0 {PRAGMA.*}$/}
+} {/^0 {SELECT.*}$/}
do_test shell4-2.4 {
catchcmd ":memory:" ".trace stdout\nCREATE TABLE t1(x);SELECT * FROM t1;"
} {0 {CREATE TABLE t1(x);
SELECT * FROM t1;}}
do_test shell4-2.5 {
ADDED test/shell9.test
Index: test/shell9.test
==================================================================
--- /dev/null
+++ test/shell9.test
@@ -0,0 +1,148 @@
+# 2024 Jan 8
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool. Specifically,
+# testing that it is possible to run a ".dump" script that creates
+# virtual tables without explicitly disabling defensive mode.
+#
+# And, that it can process a ".dump" script that contains strings
+# delimited using double-quotes in the schema (DQS_DDL setting).
+#
+
+# Test plan:
+#
+# shell1-1.*: Basic command line option handling.
+# shell1-2.*: Basic "dot" command token parsing.
+# shell1-3.*: Basic test that "dot" command can be called.
+# shell1-{4-8}.*: Test various "dot" commands's functionality.
+# shell1-9.*: Basic test that "dot" commands and SQL intermix ok.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set CLI [test_cli_invocation]
+
+set ::testprefix shell9
+
+ifcapable !fts5 {
+ finish_test
+ return
+}
+
+#----------------------------------------------------------------------------
+# Test cases shell9-1.* verify that scripts output by .dump may be parsed
+# by the shell tool without explicitly disabling DEFENSIVE mode, unless
+# the shell is in safe mode.
+#
+do_execsql_test 1.0 {
+ CREATE VIRTUAL TABLE t1 USING fts5(a, b, c);
+ INSERT INTO t1 VALUES('one', 'two', 'three');
+}
+db close
+
+# Create .dump file in "testdump.txt".
+#
+set out [open testdump.txt w]
+puts $out [lindex [catchcmd test.db .dump] 1]
+close $out
+
+# Check testdump.txt can be processed if the initial db is empty.
+#
+do_test 1.1.1 {
+ forcedelete test.db
+ catchcmd test.db ".read testdump.txt"
+} {0 {}}
+sqlite3 db test.db
+do_execsql_test 1.1.2 {
+ SELECT * FROM t1;
+} {one two three}
+
+# Check testdump.txt cannot be processed if the initial db is not empty.
+#
+reset_db
+do_execsql_test 1.2.1 {
+ CREATE TABLE t4(hello);
+}
+db close
+do_test 1.2.2 {
+ catchcmd test.db ".read testdump.txt"
+} {1 {Parse error near line 5: table sqlite_master may not be modified}}
+
+# Check testdump.txt cannot be processed if the db is in safe mode
+#
+do_test 1.3.1 {
+ forcedelete test.db
+ catchsafecmd test.db ".read testdump.txt"
+} {1 {line 1: cannot run .read in safe mode}}
+do_test 1.3.2 {
+ set fd [open testdump.txt]
+ set script [read $fd]
+ close $fd
+ forcedelete test.db
+ catchsafecmd test.db $script
+} {1 {Parse error near line 5: table sqlite_master may not be modified}}
+do_test 1.3.3 {
+ # Quick check that the above would have worked but for safe mode.
+ forcedelete test.db
+ catchcmd test.db $script
+} {0 {}}
+
+#----------------------------------------------------------------------------
+# Test cases shell9-2.* verify that a warning is printed at the top of
+# .dump scripts that contain virtual tables.
+#
+proc contains_warning {text} {
+ return [string match "*WARNING: Script requires that*" $text]
+}
+
+reset_db
+do_execsql_test 2.0.1 {
+ CREATE TABLE t1(x);
+ CREATE TABLE t2(y);
+ INSERT INTO t1 VALUES('one');
+ INSERT INTO t2 VALUES('two');
+}
+do_test 2.0.2 {
+ contains_warning [catchcmd test.db .dump]
+} 0
+
+do_execsql_test 2.1.1 {
+ CREATE virtual TABLE r1 USING fts5(x);
+}
+do_test 2.1.2 {
+ contains_warning [catchcmd test.db .dump]
+} 1
+
+do_test 2.2.1 {
+ contains_warning [catchcmd test.db ".dump t1"]
+} 0
+do_test 2.2.2 {
+ contains_warning [catchcmd test.db ".dump r1"]
+} 1
+
+#-------------------------------------------------------------------------
+reset_db
+sqlite3_db_config db DQS_DDL 1
+do_execsql_test 3.1.0 {
+ CREATE TABLE t4(hello, check( hello IS NOT "xyz") );
+}
+db close
+
+# Create .dump file in "testdump.txt".
+#
+set out [open testdump.txt w]
+puts $out [lindex [catchcmd test.db .dump] 1]
+close $out
+do_test 3.1.1 {
+ forcedelete test.db
+ catchcmd test.db ".read testdump.txt"
+} {0 {}}
+
+finish_test
Index: test/snapshot_up.test
==================================================================
--- test/snapshot_up.test
+++ test/snapshot_up.test
@@ -24,10 +24,12 @@
# and there are one or more existing connections.
if {[permutation]=="inmemory_journal"} {
finish_test
return
}
+
+db timeout 1000
do_execsql_test 1.0 {
CREATE TABLE t1(a, b, c);
PRAGMA journal_mode = wal;
INSERT INTO t1 VALUES(1, 2, 3);
Index: test/tester.tcl
==================================================================
--- test/tester.tcl
+++ test/tester.tcl
@@ -552,11 +552,11 @@
}
}
}
unset -nocomplain a
set testdir [file normalize $testdir]
- set cmdlinearg(TESTFIXTURE_HOME) [pwd]
+ set cmdlinearg(TESTFIXTURE_HOME) [file dirname [info nameofexec]]
set cmdlinearg(INFO_SCRIPT) [file normalize [info script]]
set argv0 [file normalize $argv0]
if {$cmdlinearg(testdir)!=""} {
file mkdir $cmdlinearg(testdir)
cd $cmdlinearg(testdir)
@@ -881,10 +881,19 @@
puts $out $cmd
close $out
set line "exec $CLI $db < cmds.txt"
set rc [catch { eval $line } msg]
list $rc $msg
+}
+proc catchsafecmd {db {cmd ""}} {
+ global CLI
+ set out [open cmds.txt w]
+ puts $out $cmd
+ close $out
+ set line "exec $CLI -safe $db < cmds.txt"
+ set rc [catch { eval $line } msg]
+ list $rc $msg
}
proc catchcmdex {db {cmd ""}} {
global CLI
set out [open cmds.txt w]
Index: test/testrunner.tcl
==================================================================
--- test/testrunner.tcl
+++ test/testrunner.tcl
@@ -56,10 +56,12 @@
$a0 PERMUTATION FILE
$a0 njob ?NJOB?
$a0 status
where SWITCHES are:
+ --buildonly
+ --dryrun
--jobs NUMBER-OF-JOBS
--zipvfs ZIPVFS-SOURCE-DIR
Interesting values for PERMUTATION are:
@@ -146,10 +148,12 @@
set TRG(patternlist) [list]
set TRG(cmdline) $argv
set TRG(reporttime) 2000
set TRG(fuzztest) 0 ;# is the fuzztest option present.
set TRG(zipvfs) "" ;# -zipvfs option, if any
+set TRG(buildonly) 0 ;# True if --buildonly option
+set TRG(dryrun) 0 ;# True if --dryrun option
switch -nocase -glob -- $tcl_platform(os) {
*darwin* {
set TRG(platform) osx
set TRG(make) make.sh
@@ -423,12 +427,16 @@
incr ii
set TRG(nJob) [lindex $argv $ii]
if {$isLast} { usage }
} elseif {($n>2 && [string match "$a*" --zipvfs]) || $a=="-z"} {
incr ii
- set TRG(zipvfs) [lindex $argv $ii]
+ set TRG(zipvfs) [file normalize [lindex $argv $ii]]
if {$isLast} { usage }
+ } elseif {($n>2 && [string match "$a*" --buildonly]) || $a=="-b"} {
+ set TRG(buildonly) 1
+ } elseif {($n>2 && [string match "$a*" --dryrun]) || $a=="-d"} {
+ set TRG(dryrun) 1
} else {
usage
}
} else {
lappend TRG(patternlist) [string map {% *} $a]
@@ -749,10 +757,24 @@
-depid [lindex $bld 0]
}
set ::env(SQLITE_TEST_DIR) $::testdir
}
+
+# Used to add jobs for "mdevtest" and "sdevtest".
+#
+proc add_devtest_jobs {lBld patternlist} {
+ global TRG
+
+ foreach b $lBld {
+ set bld [add_build_job $b $TRG(testfixture)]
+ add_tcl_jobs $bld veryquick $patternlist
+ if {$patternlist==""} {
+ add_fuzztest_jobs $b
+ }
+ }
+}
proc add_jobs_from_cmdline {patternlist} {
global TRG
if {$TRG(zipvfs)!=""} {
@@ -773,37 +795,32 @@
add_tcl_jobs "" $c $patternlist
}
}
mdevtest {
- foreach b [list All-O0 All-Debug] {
- set bld [add_build_job $b $TRG(testfixture)]
- add_tcl_jobs $bld veryquick ""
- add_fuzztest_jobs $b
- }
+ add_devtest_jobs {All-O0 All-Debug} [lrange $patternlist 1 end]
}
sdevtest {
- foreach b [list All-Sanitize All-Debug] {
- set bld [add_build_job $b $TRG(testfixture)]
- add_tcl_jobs $bld veryquick ""
- add_fuzztest_jobs $b
- }
+ add_devtest_jobs {All-Sanitize All-Debug} [lrange $patternlist 1 end]
}
release {
+ set patternlist [lrange $patternlist 1 end]
foreach b [trd_builds $TRG(platform)] {
set bld [add_build_job $b $TRG(testfixture)]
foreach c [trd_configs $TRG(platform) $b] {
- add_tcl_jobs $bld $c ""
+ add_tcl_jobs $bld $c $patternlist
}
- foreach e [trd_extras $TRG(platform) $b] {
- if {$e=="fuzztest"} {
- add_fuzztest_jobs $b
- } else {
- add_make_job $bld $e
+ if {$patternlist==""} {
+ foreach e [trd_extras $TRG(platform) $b] {
+ if {$e=="fuzztest"} {
+ add_fuzztest_jobs $b
+ } else {
+ add_make_job $bld $e
+ }
}
}
}
}
@@ -831,10 +848,21 @@
add_jobs_from_cmdline $TRG(patternlist)
}
}
+
+proc mark_job_as_finished {jobid output state endtm} {
+ r_write_db {
+ trdb eval {
+ UPDATE jobs
+ SET output=$output, state=$state, endtime=$endtm
+ WHERE jobid=$jobid;
+ UPDATE jobs SET state='ready' WHERE depid=$jobid;
+ }
+ }
+}
proc script_input_ready {fd iJob jobid} {
global TRG
global O
global T
@@ -866,19 +894,11 @@
set jobtm [expr {$tm - $job(starttime)}]
puts $TRG(log) "### $job(displayname) ${jobtm}ms ($state)"
puts $TRG(log) [string trim $O($iJob)]
- r_write_db {
- set output $O($iJob)
- trdb eval {
- UPDATE jobs
- SET output=$output, state=$state, endtime=$tm
- WHERE jobid=$jobid;
- UPDATE jobs SET state='ready' WHERE depid=$jobid;
- }
- }
+ mark_job_as_finished $jobid $O($iJob) $state $tm
dirs_freeDir $iJob
launch_some_jobs
incr ::wakeup
} else {
@@ -926,20 +946,32 @@
set fd [open [file join $dir $TRG(make)] w]
puts $fd $script
close $fd
}
- set pwd [pwd]
- cd $dir
- set fd [open $TRG(run) w]
- puts $fd $job(cmd)
- close $fd
- set fd [open "|$TRG(runcmd) 2>@1" r]
- cd $pwd
-
- fconfigure $fd -blocking false
- fileevent $fd readable [list script_input_ready $fd $iJob $job(jobid)]
+ if { $TRG(dryrun) } {
+
+ mark_job_as_finished $job(jobid) "" done 0
+ dirs_freeDir $iJob
+ if {$job(build)!=""} {
+ puts $TRG(log) "(cd $dir ; $job(cmd) )"
+ } else {
+ puts $TRG(log) "$job(cmd)"
+ }
+
+ } else {
+ set pwd [pwd]
+ cd $dir
+ set fd [open $TRG(run) w]
+ puts $fd $job(cmd)
+ close $fd
+ set fd [open "|$TRG(runcmd) 2>@1" r]
+ cd $pwd
+
+ fconfigure $fd -blocking false
+ fileevent $fd readable [list script_input_ready $fd $iJob $job(jobid)]
+ }
return 1
}
proc one_line_report {} {
@@ -1033,16 +1065,28 @@
puts "\nTest database is $TRG(dbname)"
puts "Test log is $TRG(logname)"
}
+# Handle the --buildonly option, if it was specified.
+#
+proc handle_buildonly {} {
+ global TRG
+ if {$TRG(buildonly)} {
+ r_write_db {
+ trdb eval { DELETE FROM jobs WHERE displaytype!='bld' }
+ }
+ }
+}
sqlite3 trdb $TRG(dbname)
trdb timeout $TRG(timeout)
set tm [lindex [time { make_new_testset }] 0]
if {$TRG(nJob)>1} {
puts "splitting work across $TRG(nJob) jobs"
}
puts "built testset in [expr $tm/1000]ms.."
+
+handle_buildonly
run_testset
trdb close
#puts [pwd]
Index: test/testrunner_data.tcl
==================================================================
--- test/testrunner_data.tcl
+++ test/testrunner_data.tcl
@@ -28,11 +28,11 @@
set tcltest(linux.Default) all_plus_autovacuum_crash
set tcltest(linux.Valgrind) valgrind
set tcltest(osx.Locking-Style) veryquick
set tcltest(osx.Have-Not) veryquick
- set tcltest(osx.Apple) all
+ set tcltest(osx.Apple) all_less_no_mutex_try
set tcltest(win.Stdcall) veryquick
set tcltest(win.Have-Not) veryquick
set tcltest(win.Windows-Memdebug) veryquick
set tcltest(win.Windows-Win32Heap) veryquick
@@ -98,15 +98,15 @@
set build(All-O0) {
-O0 --enable-all
}
set build(All-Sanitize) {
-DSQLITE_OMIT_LOOKASIDE=1
- --enable-all -fsanitize=address,undefined
+ --enable-all -fsanitize=address,undefined -fno-sanitize-recover=undefined
}
set build(Sanitize) {
- CC=clang -fsanitize=address,undefined
+ CC=clang -fsanitize=address,undefined -fno-sanitize-recover=undefined
-DSQLITE_ENABLE_STAT4
-DSQLITE_OMIT_LOOKASIDE=1
-DCONFIG_SLOWDOWN_FACTOR=5.0
--enable-debug
--enable-all
@@ -265,10 +265,11 @@
-DSQLITE_ENABLE_FTS3_PARENTHESIS=1
-DSQLITE_ENABLE_FTS3_TOKENIZER=1
-DSQLITE_ENABLE_PERSIST_WAL=1
-DSQLITE_ENABLE_PURGEABLE_PCACHE=1
-DSQLITE_ENABLE_RTREE=1
+ -DSQLITE_ENABLE_SETLK_TIMEOUT=2
-DSQLITE_ENABLE_SNAPSHOT=1
-DSQLITE_ENABLE_UPDATE_DELETE_LIMIT=1
-DSQLITE_MAX_LENGTH=2147483645
-DSQLITE_MAX_VARIABLE_NUMBER=500000
-DSQLITE_NO_SYNC=1
@@ -364,10 +365,13 @@
set clist $tcltest($platform.$bld)
if {$clist=="all"} {
set clist $all_configs
} elseif {$clist=="all_plus_autovacuum_crash"} {
set clist [concat $all_configs autovacuum_crash]
+ } elseif {$clist=="all_less_no_mutex_try"} {
+ set idx [lsearch $all_configs no_mutex_try]
+ set clist [lreplace $all_configs $idx $idx]
}
}
set clist
}
Index: test/tkt-8454a207b9.test
==================================================================
--- test/tkt-8454a207b9.test
+++ test/tkt-8454a207b9.test
@@ -47,11 +47,11 @@
do_test tkt-8454a207b9.4 {
db eval {
ALTER TABLE t1 ADD COLUMN e DEFAULT -123.0;
SELECT e, typeof(e) FROM t1;
}
-} {-123 integer}
+} {-123.0 real}
do_test tkt-8454a207b9.5 {
db eval {
ALTER TABLE t1 ADD COLUMN f DEFAULT -123.5;
SELECT f, typeof(f) FROM t1;
}
Index: test/trace3.test
==================================================================
--- test/trace3.test
+++ test/trace3.test
@@ -130,18 +130,31 @@
set stmt [lindex [lindex $::stmtlist(record) 0] 0]
set ns [lindex [lindex $::stmtlist(record) 0] 1]
list $stmt [expr {$ns >= 0 && $ns <= 9999999}]; # less than 0.010 seconds
} {/^-?\d+ 1$/}
do_test trace3-4.4 {
- set ::stmtlist(record) {}
- db trace_v2 trace_v2_record 2
- execsql {
- SELECT a, b FROM t1 ORDER BY a;
- }
- set stmt [lindex [lindex $::stmtlist(record) 0] 0]
- set ns [lindex [lindex $::stmtlist(record) 0] 1]
- list $stmt [expr {$ns >= 0 && $ns <= 9999999}]; # less than 0.010 seconds
+ set cnt 0
+ while {1} {
+ set ::stmtlist(record) {}
+ db trace_v2 trace_v2_record 2
+ execsql {
+ SELECT a, b FROM t1 ORDER BY a;
+ }
+ set stmt [lindex [lindex $::stmtlist(record) 0] 0]
+ set ns [lindex [lindex $::stmtlist(record) 0] 1]
+ if {$ns<0 || $ns>9999999} { #less than 0.010 seconds
+ incr cnt
+ if {$cnt>3} {
+ set res "time out of bounds. Expected less than 99999999. Got $ns"
+ break
+ }
+ } else {
+ set res 1
+ break
+ }
+ }
+ list $stmt $res
} {/^-?\d+ 1$/}
do_test trace3-5.1 {
set ::stmtlist(record) {}
db trace_v2 trace_v2_record row
Index: test/trigger9.test
==================================================================
--- test/trigger9.test
+++ test/trigger9.test
@@ -240,29 +240,14 @@
CREATE TRIGGER tr3 INSTEAD OF INSERT ON v1 BEGIN
INSERT INTO log VALUES('insert');
END;
}
-ifcapable !allow_rowid_in_view {
- do_catchsql_test 4.2 {
- DELETE FROM v1 WHERE rowid=1;
- } {1 {no such column: rowid}}
-
- do_catchsql_test 4.3 {
- UPDATE v1 SET a=b WHERE rowid=2;
- } {1 {no such column: rowid}}
-} else {
- do_execsql_test 4.2a {
- DELETE FROM log;
- }
- do_catchsql_test 4.2 {
- DELETE FROM v1 WHERE rowid=1;
- } {0 {}}
- do_catchsql_test 4.3 {
- UPDATE v1 SET a=b WHERE rowid=2;
- } {0 {}}
- do_execsql_test 4.3b {
- SELECT * FROM log;
- }
-}
+do_catchsql_test 4.2 {
+ DELETE FROM v1 WHERE rowid=1;
+} {1 {no such column: rowid}}
+
+do_catchsql_test 4.3 {
+ UPDATE v1 SET a=b WHERE rowid=2;
+} {1 {no such column: rowid}}
finish_test
Index: test/types3.test
==================================================================
--- test/types3.test
+++ test/types3.test
@@ -10,12 +10,10 @@
#***********************************************************************
# This file implements regression tests for SQLite library. The focus
# of this file is testing the interaction of SQLite manifest types
# with Tcl dual-representations.
#
-# $Id: types3.test,v 1.8 2008/04/28 13:02:58 drh Exp $
-#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# A variable with only a string representation comes in as TEXT
@@ -93,7 +91,35 @@
} {}
do_test types3-2.6 {
set V [db one {SELECT NULL}]
tcl_variable_type V
} {}
+
+# See https://sqlite.org/forum/forumpost/3776b48e71
+#
+# On a text-affinity comparison of two values where one of
+# the values has both MEM_Str and a numeric type like MEM_Int,
+# make sure that only the MEM_Str representation is used.
+#
+sqlite3_create_function db
+do_execsql_test types3-3.1 {
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1(x TEXT PRIMARY KEY);
+ INSERT INTO t1 VALUES('1');
+ SELECT * FROM t1 WHERE NOT x=upper(1);
+} {}
+do_execsql_test types3-3.2 {
+ SELECT * FROM t1 WHERE NOT x=add_text_type(1);
+} {}
+do_execsql_test types3-3.3 {
+ SELECT * FROM t1 WHERE NOT x=add_int_type('1');
+} {}
+do_execsql_test types3-3.4 {
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES(1.25);
+ SELECT * FROM t1 WHERE NOT x=add_real_type('1.25');
+} {}
+do_execsql_test types3-3.5 {
+ SELECT * FROM t1 WHERE NOT x=add_text_type(1.25);
+} {}
finish_test
Index: test/unionall.test
==================================================================
--- test/unionall.test
+++ test/unionall.test
@@ -349,11 +349,11 @@
ifcapable vtab {
do_catchsql_test 5.30 {
SELECT * FROM (t1 NATURAL JOIN pragma_table_xinfo('t1_a') NATURAL JOIN t3) t1
NATURAL JOIN t2 NATURAL JOIN t3
WHERE rowid ISNULL>0 AND 0%y;
-} {1 {ambiguous column name: rowid}}
+} {1 {no such column: rowid}}
}
reset_db
do_execsql_test 6.0 {
CREATE TABLE t1(a,b);
Index: test/upsert5.test
==================================================================
--- test/upsert5.test
+++ test/upsert5.test
@@ -406,48 +406,6 @@
ON CONFLICT(c) DO UPDATE SET b=''
ON CONFLICT((SELECT t2 FROM nosuchtable)) DO NOTHING;
} {1 {no such table: nosuchtable}}
-# 2024-03-08 https://sqlite.org/forum/forumpost/919c6579c8
-# A redundant ON CONFLICT clause in an upsert can lead to
-# index corruption.
-#
-reset_db
-do_execsql_test 3.0 {
- CREATE TABLE t1(aa INTEGER PRIMARY KEY, bb INT);
- INSERT INTO t1 VALUES(11,22);
- CREATE UNIQUE INDEX t1bb ON t1(bb);
- REPLACE INTO t1 VALUES(11,33)
- ON CONFLICT(bb) DO UPDATE SET aa = 44
- ON CONFLICT(bb) DO UPDATE SET aa = 44;
- PRAGMA integrity_check;
-} {ok}
-do_execsql_test 3.1 {
- SELECT * FROM t1 NOT INDEXED;
-} {11 33}
-do_execsql_test 3.2 {
- SELECT * FROM t1 INDEXED BY t1bb;
-} {11 33}
-do_execsql_test 3.3 {
- DROP TABLE t1;
- CREATE TABLE t1(aa INTEGER PRIMARY KEY, bb INT, cc INT);
- INSERT INTO t1 VALUES(10,21,32),(11,22,33),(12,23,34);
- CREATE UNIQUE INDEX t1bb ON t1(bb);
- CREATE UNIQUE INDEX t1cc ON t1(cc);
- REPLACE INTO t1 VALUES(11,44,55)
- ON CONFLICT(bb) DO UPDATE SET aa = 99
- ON CONFLICT(cc) DO UPDATE SET aa = 99
- ON CONFLICT(bb) DO UPDATE SET aa = 99;
- PRAGMA integrity_check;
-} {ok}
-do_execsql_test 3.4 {
- SELECT * FROM t1 NOT INDEXED ORDER BY +aa;
-} {10 21 32 11 44 55 12 23 34}
-do_execsql_test 3.5 {
- SELECT * FROM t1 INDEXED BY t1bb ORDER BY +aa;
-} {10 21 32 11 44 55 12 23 34}
-do_execsql_test 3.6 {
- SELECT * FROM t1 INDEXED BY t1cc ORDER BY +aa;
-} {10 21 32 11 44 55 12 23 34}
-
finish_test
DELETED test/wapp.tcl
Index: test/wapp.tcl
==================================================================
--- test/wapp.tcl
+++ /dev/null
@@ -1,987 +0,0 @@
-# Copyright (c) 2017 D. Richard Hipp
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the Simplified BSD License (also
-# known as the "2-Clause License" or "FreeBSD License".)
-#
-# This program is distributed in the hope that it will be useful,
-# but without any warranty; without even the implied warranty of
-# merchantability or fitness for a particular purpose.
-#
-#---------------------------------------------------------------------------
-#
-# Design rules:
-#
-# (1) All identifiers in the global namespace begin with "wapp"
-#
-# (2) Indentifiers intended for internal use only begin with "wappInt"
-#
-package require Tcl 8.6
-
-# Add text to the end of the HTTP reply. No interpretation or transformation
-# of the text is performs. The argument should be enclosed within {...}
-#
-proc wapp {txt} {
- global wapp
- dict append wapp .reply $txt
-}
-
-# Add text to the page under construction. Do no escaping on the text.
-#
-# Though "unsafe" in general, there are uses for this kind of thing.
-# For example, if you want to return the complete, unmodified content of
-# a file:
-#
-# set fd [open content.html rb]
-# wapp-unsafe [read $fd]
-# close $fd
-#
-# You could do the same thing using ordinary "wapp" instead of "wapp-unsafe".
-# The difference is that wapp-safety-check will complain about the misuse
-# of "wapp", but it assumes that the person who write "wapp-unsafe" understands
-# the risks.
-#
-# Though occasionally necessary, the use of this interface should be minimized.
-#
-proc wapp-unsafe {txt} {
- global wapp
- dict append wapp .reply $txt
-}
-
-# Add text to the end of the reply under construction. The following
-# substitutions are made:
-#
-# %html(...) Escape text for inclusion in HTML
-# %url(...) Escape text for use as a URL
-# %qp(...) Escape text for use as a URI query parameter
-# %string(...) Escape text for use within a JSON string
-# %unsafe(...) No transformations of the text
-#
-# The substitutions above terminate at the first ")" character. If the
-# text of the TCL string in ... contains ")" characters itself, use instead:
-#
-# %html%(...)%
-# %url%(...)%
-# %qp%(...)%
-# %string%(...)%
-# %unsafe%(...)%
-#
-# In other words, use "%(...)%" instead of "(...)" to include the TCL string
-# to substitute.
-#
-# The %unsafe substitution should be avoided whenever possible, obviously.
-# In addition to the substitutions above, the text also does backslash
-# escapes.
-#
-# The wapp-trim proc works the same as wapp-subst except that it also removes
-# whitespace from the left margin, so that the generated HTML/CSS/Javascript
-# does not appear to be indented when delivered to the client web browser.
-#
-if {$tcl_version>=8.7} {
- proc wapp-subst {txt} {
- global wapp
- regsub -all -command \
- {%(html|url|qp|string|unsafe){1,1}?(|%)\((.+)\)\2} $txt wappInt-enc txt
- dict append wapp .reply [subst -novariables -nocommand $txt]
- }
- proc wapp-trim {txt} {
- global wapp
- regsub -all {\n\s+} [string trim $txt] \n txt
- regsub -all -command \
- {%(html|url|qp|string|unsafe){1,1}?(|%)\((.+)\)\2} $txt wappInt-enc txt
- dict append wapp .reply [subst -novariables -nocommand $txt]
- }
- proc wappInt-enc {all mode nu1 txt} {
- return [uplevel 2 "wappInt-enc-$mode \"$txt\""]
- }
-} else {
- proc wapp-subst {txt} {
- global wapp
- regsub -all {%(html|url|qp|string|unsafe){1,1}?(|%)\((.+)\)\2} $txt \
- {[wappInt-enc-\1 "\3"]} txt
- dict append wapp .reply [uplevel 1 [list subst -novariables $txt]]
- }
- proc wapp-trim {txt} {
- global wapp
- regsub -all {\n\s+} [string trim $txt] \n txt
- regsub -all {%(html|url|qp|string|unsafe){1,1}?(|%)\((.+)\)\2} $txt \
- {[wappInt-enc-\1 "\3"]} txt
- dict append wapp .reply [uplevel 1 [list subst -novariables $txt]]
- }
-}
-
-# There must be a wappInt-enc-NAME routine for each possible substitution
-# in wapp-subst. Thus there are routines for "html", "url", "qp", and "unsafe".
-#
-# wappInt-enc-html Escape text so that it is safe to use in the
-# body of an HTML document.
-#
-# wappInt-enc-url Escape text so that it is safe to pass as an
-# argument to href= and src= attributes in HTML.
-#
-# wappInt-enc-qp Escape text so that it is safe to use as the
-# value of a query parameter in a URL or in
-# post data or in a cookie.
-#
-# wappInt-enc-string Escape ", ', \, and < for using inside of a
-# javascript string literal. The < character
-# is escaped to prevent "" from causing
-# problems in embedded javascript.
-#
-# wappInt-enc-unsafe Perform no encoding at all. Unsafe.
-#
-proc wappInt-enc-html {txt} {
- return [string map {& & < < > > \" " \\ \} $txt]
-}
-proc wappInt-enc-unsafe {txt} {
- return $txt
-}
-proc wappInt-enc-url {s} {
- if {[regsub -all {[^-{}@~?=#_.:/a-zA-Z0-9]} $s {[wappInt-%HHchar {&}]} s]} {
- set s [subst -novar -noback $s]
- }
- if {[regsub -all {[{}]} $s {[wappInt-%HHchar \\&]} s]} {
- set s [subst -novar -noback $s]
- }
- return $s
-}
-proc wappInt-enc-qp {s} {
- if {[regsub -all {[^-{}_.a-zA-Z0-9]} $s {[wappInt-%HHchar {&}]} s]} {
- set s [subst -novar -noback $s]
- }
- if {[regsub -all {[{}]} $s {[wappInt-%HHchar \\&]} s]} {
- set s [subst -novar -noback $s]
- }
- return $s
-}
-proc wappInt-enc-string {s} {
- return [string map {\\ \\\\ \" \\\" ' \\' < \\u003c} $s]
-}
-
-# This is a helper routine for wappInt-enc-url and wappInt-enc-qp. It returns
-# an appropriate %HH encoding for the single character c. If c is a unicode
-# character, then this routine might return multiple bytes: %HH%HH%HH
-#
-proc wappInt-%HHchar {c} {
- if {$c==" "} {return +}
- return [regsub -all .. [binary encode hex [encoding convertto utf-8 $c]] {%&}]
-}
-
-
-# Undo the www-url-encoded format.
-#
-# HT: This code stolen from ncgi.tcl
-#
-proc wappInt-decode-url {str} {
- set str [string map [list + { } "\\" "\\\\" \[ \\\[ \] \\\]] $str]
- regsub -all -- \
- {%([Ee][A-Fa-f0-9])%([89ABab][A-Fa-f0-9])%([89ABab][A-Fa-f0-9])} \
- $str {[encoding convertfrom utf-8 [binary decode hex \1\2\3]]} str
- regsub -all -- \
- {%([CDcd][A-Fa-f0-9])%([89ABab][A-Fa-f0-9])} \
- $str {[encoding convertfrom utf-8 [binary decode hex \1\2]]} str
- regsub -all -- {%([0-7][A-Fa-f0-9])} $str {\\u00\1} str
- return [subst -novar $str]
-}
-
-# Reset the document back to an empty string.
-#
-proc wapp-reset {} {
- global wapp
- dict set wapp .reply {}
-}
-
-# Change the mime-type of the result document.
-#
-proc wapp-mimetype {x} {
- global wapp
- dict set wapp .mimetype $x
-}
-
-# Change the reply code.
-#
-proc wapp-reply-code {x} {
- global wapp
- dict set wapp .reply-code $x
-}
-
-# Set a cookie
-#
-proc wapp-set-cookie {name value} {
- global wapp
- dict lappend wapp .new-cookies $name $value
-}
-
-# Unset a cookie
-#
-proc wapp-clear-cookie {name} {
- wapp-set-cookie $name {}
-}
-
-# Add extra entries to the reply header
-#
-proc wapp-reply-extra {name value} {
- global wapp
- dict lappend wapp .reply-extra $name $value
-}
-
-# Specifies how the web-page under construction should be cached.
-# The argument should be one of:
-#
-# no-cache
-# max-age=N (for some integer number of seconds, N)
-# private,max-age=N
-#
-proc wapp-cache-control {x} {
- wapp-reply-extra Cache-Control $x
-}
-
-# Redirect to a different web page
-#
-proc wapp-redirect {uri} {
- wapp-reply-code {307 Redirect}
- wapp-reply-extra Location $uri
-}
-
-# Return the value of a wapp parameter
-#
-proc wapp-param {name {dflt {}}} {
- global wapp
- if {![dict exists $wapp $name]} {return $dflt}
- return [dict get $wapp $name]
-}
-
-# Return true if a and only if the wapp parameter $name exists
-#
-proc wapp-param-exists {name} {
- global wapp
- return [dict exists $wapp $name]
-}
-
-# Set the value of a wapp parameter
-#
-proc wapp-set-param {name value} {
- global wapp
- dict set wapp $name $value
-}
-
-# Return all parameter names that match the GLOB pattern, or all
-# names if the GLOB pattern is omitted.
-#
-proc wapp-param-list {{glob {*}}} {
- global wapp
- return [dict keys $wapp $glob]
-}
-
-# By default, Wapp does not decode query parameters and POST parameters
-# for cross-origin requests. This is a security restriction, designed to
-# help prevent cross-site request forgery (CSRF) attacks.
-#
-# As a consequence of this restriction, URLs for sites generated by Wapp
-# that contain query parameters will not work as URLs found in other
-# websites. You cannot create a link from a second website into a Wapp
-# website if the link contains query planner, by default.
-#
-# Of course, it is sometimes desirable to allow query parameters on external
-# links. For URLs for which this is safe, the application should invoke
-# wapp-allow-xorigin-params. This procedure tells Wapp that it is safe to
-# go ahead and decode the query parameters even for cross-site requests.
-#
-# In other words, for Wapp security is the default setting. Individual pages
-# need to actively disable the cross-site request security if those pages
-# are safe for cross-site access.
-#
-proc wapp-allow-xorigin-params {} {
- global wapp
- if {![dict exists $wapp .qp] && ![dict get $wapp SAME_ORIGIN]} {
- wappInt-decode-query-params
- }
-}
-
-# Set the content-security-policy.
-#
-# The default content-security-policy is very strict: "default-src 'self'"
-# The default policy prohibits the use of in-line javascript or CSS.
-#
-# Provide an alternative CSP as the argument. Or use "off" to disable
-# the CSP completely.
-#
-proc wapp-content-security-policy {val} {
- global wapp
- if {$val=="off"} {
- dict unset wapp .csp
- } else {
- dict set wapp .csp $val
- }
-}
-
-# Examine the bodys of all procedures in this program looking for
-# unsafe calls to various Wapp interfaces. Return a text string
-# containing warnings. Return an empty string if all is ok.
-#
-# This routine is advisory only. It misses some constructs that are
-# dangerous and flags others that are safe.
-#
-proc wapp-safety-check {} {
- set res {}
- foreach p [info procs] {
- set ln 0
- foreach x [split [info body $p] \n] {
- incr ln
- if {[regexp {^[ \t]*wapp[ \t]+([^\n]+)} $x all tail]
- && [string index $tail 0]!="\173"
- && [regexp {[[$]} $tail]
- } {
- append res "$p:$ln: unsafe \"wapp\" call: \"[string trim $x]\"\n"
- }
- if {[regexp {^[ \t]*wapp-(subst|trim)[ \t]+[^\173]} $x all cx]} {
- append res "$p:$ln: unsafe \"wapp-$cx\" call: \"[string trim $x]\"\n"
- }
- }
- }
- return $res
-}
-
-# Return a string that descripts the current environment. Applications
-# might find this useful for debugging.
-#
-proc wapp-debug-env {} {
- global wapp
- set out {}
- foreach var [lsort [dict keys $wapp]] {
- if {[string index $var 0]=="."} continue
- append out "$var = [list [dict get $wapp $var]]\n"
- }
- append out "\[pwd\] = [list [pwd]]\n"
- return $out
-}
-
-# Tracing function for each HTTP request. This is overridden by wapp-start
-# if tracing is enabled.
-#
-proc wappInt-trace {} {}
-
-# Start up a listening socket. Arrange to invoke wappInt-new-connection
-# for each inbound HTTP connection.
-#
-# port Listen on this TCP port. 0 means to select a port
-# that is not currently in use
-#
-# wappmode One of "scgi", "remote-scgi", "server", or "local".
-#
-# fromip If not {}, then reject all requests from IP addresses
-# other than $fromip
-#
-proc wappInt-start-listener {port wappmode fromip} {
- if {[string match *scgi $wappmode]} {
- set type SCGI
- set server [list wappInt-new-connection \
- wappInt-scgi-readable $wappmode $fromip]
- } else {
- set type HTTP
- set server [list wappInt-new-connection \
- wappInt-http-readable $wappmode $fromip]
- }
- if {$wappmode=="local" || $wappmode=="scgi"} {
- set x [socket -server $server -myaddr 127.0.0.1 $port]
- } else {
- set x [socket -server $server $port]
- }
- set coninfo [chan configure $x -sockname]
- set port [lindex $coninfo 2]
- if {$wappmode=="local"} {
- wappInt-start-browser http://127.0.0.1:$port/
- } elseif {$fromip!=""} {
- puts "Listening for $type requests on TCP port $port from IP $fromip"
- } else {
- puts "Listening for $type requests on TCP port $port"
- }
-}
-
-# Start a web-browser and point it at $URL
-#
-proc wappInt-start-browser {url} {
- global tcl_platform
- if {$tcl_platform(platform)=="windows"} {
- exec cmd /c start $url &
- } elseif {$tcl_platform(os)=="Darwin"} {
- exec open $url &
- } elseif {[catch {exec xdg-open $url}]} {
- exec firefox $url &
- }
-}
-
-# This routine is a "socket -server" callback. The $chan, $ip, and $port
-# arguments are added by the socket command.
-#
-# Arrange to invoke $callback when content is available on the new socket.
-# The $callback will process inbound HTTP or SCGI content. Reject the
-# request if $fromip is not an empty string and does not match $ip.
-#
-proc wappInt-new-connection {callback wappmode fromip chan ip port} {
- upvar #0 wappInt-$chan W
- if {$fromip!="" && ![string match $fromip $ip]} {
- close $chan
- return
- }
- set W [dict create REMOTE_ADDR $ip REMOTE_PORT $port WAPP_MODE $wappmode \
- .header {}]
- fconfigure $chan -blocking 0 -translation binary
- fileevent $chan readable [list $callback $chan]
-}
-
-# Close an input channel
-#
-proc wappInt-close-channel {chan} {
- if {$chan=="stdout"} {
- # This happens after completing a CGI request
- exit 0
- } else {
- unset ::wappInt-$chan
- close $chan
- }
-}
-
-# Process new text received on an inbound HTTP request
-#
-proc wappInt-http-readable {chan} {
- if {[catch [list wappInt-http-readable-unsafe $chan] msg]} {
- puts stderr "$msg\n$::errorInfo"
- wappInt-close-channel $chan
- }
-}
-proc wappInt-http-readable-unsafe {chan} {
- upvar #0 wappInt-$chan W wapp wapp
- if {![dict exists $W .toread]} {
- # If the .toread key is not set, that means we are still reading
- # the header
- set line [string trimright [gets $chan]]
- set n [string length $line]
- if {$n>0} {
- if {[dict get $W .header]=="" || [regexp {^\s+} $line]} {
- dict append W .header $line
- } else {
- dict append W .header \n$line
- }
- if {[string length [dict get $W .header]]>100000} {
- error "HTTP request header too big - possible DOS attack"
- }
- } elseif {$n==0} {
- # We have reached the blank line that terminates the header.
- global argv0
- set a0 [file normalize $argv0]
- dict set W SCRIPT_FILENAME $a0
- dict set W DOCUMENT_ROOT [file dir $a0]
- if {[wappInt-parse-header $chan]} {
- catch {close $chan}
- return
- }
- set len 0
- if {[dict exists $W CONTENT_LENGTH]} {
- set len [dict get $W CONTENT_LENGTH]
- }
- if {$len>0} {
- # Still need to read the query content
- dict set W .toread $len
- } else {
- # There is no query content, so handle the request immediately
- set wapp $W
- wappInt-handle-request $chan 0
- }
- }
- } else {
- # If .toread is set, that means we are reading the query content.
- # Continue reading until .toread reaches zero.
- set got [read $chan [dict get $W .toread]]
- dict append W CONTENT $got
- dict set W .toread [expr {[dict get $W .toread]-[string length $got]}]
- if {[dict get $W .toread]<=0} {
- # Handle the request as soon as all the query content is received
- set wapp $W
- wappInt-handle-request $chan 0
- }
- }
-}
-
-# Decode the HTTP request header.
-#
-# This routine is always running inside of a [catch], so if
-# any problems arise, simply raise an error.
-#
-proc wappInt-parse-header {chan} {
- upvar #0 wappInt-$chan W
- set hdr [split [dict get $W .header] \n]
- if {$hdr==""} {return 1}
- set req [lindex $hdr 0]
- dict set W REQUEST_METHOD [set method [lindex $req 0]]
- if {[lsearch {GET HEAD POST} $method]<0} {
- error "unsupported request method: \"[dict get $W REQUEST_METHOD]\""
- }
- set uri [lindex $req 1]
- set split_uri [split $uri ?]
- set uri0 [lindex $split_uri 0]
- if {![regexp {^/[-.a-z0-9_/]*$} $uri0]} {
- error "invalid request uri: \"$uri0\""
- }
- dict set W REQUEST_URI $uri0
- dict set W PATH_INFO $uri0
- set uri1 [lindex $split_uri 1]
- dict set W QUERY_STRING $uri1
- set n [llength $hdr]
- for {set i 1} {$i<$n} {incr i} {
- set x [lindex $hdr $i]
- if {![regexp {^(.+): +(.*)$} $x all name value]} {
- error "invalid header line: \"$x\""
- }
- set name [string toupper $name]
- switch -- $name {
- REFERER {set name HTTP_REFERER}
- USER-AGENT {set name HTTP_USER_AGENT}
- CONTENT-LENGTH {set name CONTENT_LENGTH}
- CONTENT-TYPE {set name CONTENT_TYPE}
- HOST {set name HTTP_HOST}
- COOKIE {set name HTTP_COOKIE}
- ACCEPT-ENCODING {set name HTTP_ACCEPT_ENCODING}
- default {set name .hdr:$name}
- }
- dict set W $name $value
- }
- return 0
-}
-
-# Decode the QUERY_STRING parameters from a GET request or the
-# application/x-www-form-urlencoded CONTENT from a POST request.
-#
-# This routine sets the ".qp" element of the ::wapp dict as a signal
-# that query parameters have already been decoded.
-#
-proc wappInt-decode-query-params {} {
- global wapp
- dict set wapp .qp 1
- if {[dict exists $wapp QUERY_STRING]} {
- foreach qterm [split [dict get $wapp QUERY_STRING] &] {
- set qsplit [split $qterm =]
- set nm [lindex $qsplit 0]
- if {[regexp {^[a-z][a-z0-9]*$} $nm]} {
- dict set wapp $nm [wappInt-decode-url [lindex $qsplit 1]]
- }
- }
- }
- if {[dict exists $wapp CONTENT_TYPE] && [dict exists $wapp CONTENT]} {
- set ctype [dict get $wapp CONTENT_TYPE]
- if {$ctype=="application/x-www-form-urlencoded"} {
- foreach qterm [split [string trim [dict get $wapp CONTENT]] &] {
- set qsplit [split $qterm =]
- set nm [lindex $qsplit 0]
- if {[regexp {^[a-z][-a-z0-9_]*$} $nm]} {
- dict set wapp $nm [wappInt-decode-url [lindex $qsplit 1]]
- }
- }
- } elseif {[string match multipart/form-data* $ctype]} {
- regexp {^(.*?)\r\n(.*)$} [dict get $wapp CONTENT] all divider body
- set ndiv [string length $divider]
- while {[string length $body]} {
- set idx [string first $divider $body]
- set unit [string range $body 0 [expr {$idx-3}]]
- set body [string range $body [expr {$idx+$ndiv+2}] end]
- if {[regexp {^Content-Disposition: form-data; (.*?)\r\n\r\n(.*)$} \
- $unit unit hdr content]} {
- if {[regexp {name="(.*)"; filename="(.*)"\r\nContent-Type: (.*?)$}\
- $hdr hr name filename mimetype]} {
- dict set wapp $name.filename \
- [string map [list \\\" \" \\\\ \\] $filename]
- dict set wapp $name.mimetype $mimetype
- dict set wapp $name.content $content
- } elseif {[regexp {name="(.*)"} $hdr hr name]} {
- dict set wapp $name $content
- }
- }
- }
- }
- }
-}
-
-# Invoke application-supplied methods to generate a reply to
-# a single HTTP request.
-#
-# This routine always runs within [catch], so handle exceptions by
-# invoking [error].
-#
-proc wappInt-handle-request {chan useCgi} {
- global wapp
- dict set wapp .reply {}
- dict set wapp .mimetype {text/html; charset=utf-8}
- dict set wapp .reply-code {200 Ok}
- dict set wapp .csp {default-src 'self'}
-
- # Set up additional CGI environment values
- #
- if {![dict exists $wapp HTTP_HOST]} {
- dict set wapp BASE_URL {}
- } elseif {[dict exists $wapp HTTPS]} {
- dict set wapp BASE_URL https://[dict get $wapp HTTP_HOST]
- } else {
- dict set wapp BASE_URL http://[dict get $wapp HTTP_HOST]
- }
- if {![dict exists $wapp REQUEST_URI]} {
- dict set wapp REQUEST_URI /
- } elseif {[regsub {\?.*} [dict get $wapp REQUEST_URI] {} newR]} {
- # Some servers (ex: nginx) append the query parameters to REQUEST_URI.
- # These need to be stripped off
- dict set wapp REQUEST_URI $newR
- }
- if {[dict exists $wapp SCRIPT_NAME]} {
- dict append wapp BASE_URL [dict get $wapp SCRIPT_NAME]
- } else {
- dict set wapp SCRIPT_NAME {}
- }
- if {![dict exists $wapp PATH_INFO]} {
- # If PATH_INFO is missing (ex: nginx) then construct it
- set URI [dict get $wapp REQUEST_URI]
- set skip [string length [dict get $wapp SCRIPT_NAME]]
- dict set wapp PATH_INFO [string range $URI $skip end]
- }
- if {[regexp {^/([^/]+)(.*)$} [dict get $wapp PATH_INFO] all head tail]} {
- dict set wapp PATH_HEAD $head
- dict set wapp PATH_TAIL [string trimleft $tail /]
- } else {
- dict set wapp PATH_INFO {}
- dict set wapp PATH_HEAD {}
- dict set wapp PATH_TAIL {}
- }
- dict set wapp SELF_URL [dict get $wapp BASE_URL]/[dict get $wapp PATH_HEAD]
-
- # Parse query parameters from the query string, the cookies, and
- # POST data
- #
- if {[dict exists $wapp HTTP_COOKIE]} {
- foreach qterm [split [dict get $wapp HTTP_COOKIE] {;}] {
- set qsplit [split [string trim $qterm] =]
- set nm [lindex $qsplit 0]
- if {[regexp {^[a-z][-a-z0-9_]*$} $nm]} {
- dict set wapp $nm [wappInt-decode-url [lindex $qsplit 1]]
- }
- }
- }
- set same_origin 0
- if {[dict exists $wapp HTTP_REFERER]} {
- set referer [dict get $wapp HTTP_REFERER]
- set base [dict get $wapp BASE_URL]
- if {$referer==$base || [string match $base/* $referer]} {
- set same_origin 1
- }
- }
- dict set wapp SAME_ORIGIN $same_origin
- if {$same_origin} {
- wappInt-decode-query-params
- }
-
- # Invoke the application-defined handler procedure for this page
- # request. If an error occurs while running that procedure, generate
- # an HTTP reply that contains the error message.
- #
- wapp-before-dispatch-hook
- wappInt-trace
- set mname [dict get $wapp PATH_HEAD]
- if {[catch {
- if {$mname!="" && [llength [info proc wapp-page-$mname]]>0} {
- wapp-page-$mname
- } else {
- wapp-default
- }
- } msg]} {
- if {[wapp-param WAPP_MODE]=="local" || [wapp-param WAPP_MODE]=="server"} {
- puts "ERROR: $::errorInfo"
- }
- wapp-reset
- wapp-reply-code "500 Internal Server Error"
- wapp-mimetype text/html
- wapp-trim {
-
Wapp Application Error
-
%html($::errorInfo)
- }
- dict unset wapp .new-cookies
- }
-
- # Transmit the HTTP reply
- #
- if {$chan=="stdout"} {
- puts $chan "Status: [dict get $wapp .reply-code]\r"
- } else {
- puts $chan "HTTP/1.1 [dict get $wapp .reply-code]\r"
- puts $chan "Server: wapp\r"
- puts $chan "Connection: close\r"
- }
- if {[dict exists $wapp .reply-extra]} {
- foreach {name value} [dict get $wapp .reply-extra] {
- puts $chan "$name: $value\r"
- }
- }
- if {[dict exists $wapp .csp]} {
- puts $chan "Content-Security-Policy: [dict get $wapp .csp]\r"
- }
- set mimetype [dict get $wapp .mimetype]
- puts $chan "Content-Type: $mimetype\r"
- if {[dict exists $wapp .new-cookies]} {
- foreach {nm val} [dict get $wapp .new-cookies] {
- if {[regexp {^[a-z][-a-z0-9_]*$} $nm]} {
- if {$val==""} {
- puts $chan "Set-Cookie: $nm=; HttpOnly; Path=/; Max-Age=1\r"
- } else {
- set val [wappInt-enc-url $val]
- puts $chan "Set-Cookie: $nm=$val; HttpOnly; Path=/\r"
- }
- }
- }
- }
- if {[string match text/* $mimetype]} {
- set reply [encoding convertto utf-8 [dict get $wapp .reply]]
- if {[regexp {\ygzip\y} [wapp-param HTTP_ACCEPT_ENCODING]]} {
- catch {
- set x [zlib gzip $reply]
- set reply $x
- puts $chan "Content-Encoding: gzip\r"
- }
- }
- } else {
- set reply [dict get $wapp .reply]
- }
- puts $chan "Content-Length: [string length $reply]\r"
- puts $chan \r
- puts -nonewline $chan $reply
- flush $chan
- wappInt-close-channel $chan
-}
-
-# This routine runs just prior to request-handler dispatch. The
-# default implementation is a no-op, but applications can override
-# to do additional transformations or checks.
-#
-proc wapp-before-dispatch-hook {} {return}
-
-# Process a single CGI request
-#
-proc wappInt-handle-cgi-request {} {
- global wapp env
- foreach key {
- CONTENT_LENGTH
- CONTENT_TYPE
- DOCUMENT_ROOT
- HTTP_ACCEPT_ENCODING
- HTTP_COOKIE
- HTTP_HOST
- HTTP_REFERER
- HTTP_USER_AGENT
- HTTPS
- PATH_INFO
- QUERY_STRING
- REMOTE_ADDR
- REQUEST_METHOD
- REQUEST_URI
- REMOTE_USER
- SCRIPT_FILENAME
- SCRIPT_NAME
- SERVER_NAME
- SERVER_PORT
- SERVER_PROTOCOL
- } {
- if {[info exists env($key)]} {
- dict set wapp $key $env($key)
- }
- }
- set len 0
- if {[dict exists $wapp CONTENT_LENGTH]} {
- set len [dict get $wapp CONTENT_LENGTH]
- }
- if {$len>0} {
- fconfigure stdin -translation binary
- dict set wapp CONTENT [read stdin $len]
- }
- dict set wapp WAPP_MODE cgi
- fconfigure stdout -translation binary
- wappInt-handle-request stdout 1
-}
-
-# Process new text received on an inbound SCGI request
-#
-proc wappInt-scgi-readable {chan} {
- if {[catch [list wappInt-scgi-readable-unsafe $chan] msg]} {
- puts stderr "$msg\n$::errorInfo"
- wappInt-close-channel $chan
- }
-}
-proc wappInt-scgi-readable-unsafe {chan} {
- upvar #0 wappInt-$chan W wapp wapp
- if {![dict exists $W .toread]} {
- # If the .toread key is not set, that means we are still reading
- # the header.
- #
- # An SGI header is short. This implementation assumes the entire
- # header is available all at once.
- #
- dict set W .remove_addr [dict get $W REMOTE_ADDR]
- set req [read $chan 15]
- set n [string length $req]
- scan $req %d:%s len hdr
- incr len [string length "$len:,"]
- append hdr [read $chan [expr {$len-15}]]
- foreach {nm val} [split $hdr \000] {
- if {$nm==","} break
- dict set W $nm $val
- }
- set len 0
- if {[dict exists $W CONTENT_LENGTH]} {
- set len [dict get $W CONTENT_LENGTH]
- }
- if {$len>0} {
- # Still need to read the query content
- dict set W .toread $len
- } else {
- # There is no query content, so handle the request immediately
- dict set W SERVER_ADDR [dict get $W .remove_addr]
- set wapp $W
- wappInt-handle-request $chan 0
- }
- } else {
- # If .toread is set, that means we are reading the query content.
- # Continue reading until .toread reaches zero.
- set got [read $chan [dict get $W .toread]]
- dict append W CONTENT $got
- dict set W .toread [expr {[dict get $W .toread]-[string length $got]}]
- if {[dict get $W .toread]<=0} {
- # Handle the request as soon as all the query content is received
- dict set W SERVER_ADDR [dict get $W .remove_addr]
- set wapp $W
- wappInt-handle-request $chan 0
- }
- }
-}
-
-# Start up the wapp framework. Parameters are a list passed as the
-# single argument.
-#
-# -server $PORT Listen for HTTP requests on this TCP port $PORT
-#
-# -local $PORT Listen for HTTP requests on 127.0.0.1:$PORT
-#
-# -scgi $PORT Listen for SCGI requests on 127.0.0.1:$PORT
-#
-# -remote-scgi $PORT Listen for SCGI requests on TCP port $PORT
-#
-# -cgi Handle a single CGI request
-#
-# With no arguments, the behavior is called "auto". In "auto" mode,
-# if the GATEWAY_INTERFACE environment variable indicates CGI, then run
-# as CGI. Otherwise, start an HTTP server bound to the loopback address
-# only, on an arbitrary TCP port, and automatically launch a web browser
-# on that TCP port.
-#
-# Additional options:
-#
-# -fromip GLOB Reject any incoming request where the remote
-# IP address does not match the GLOB pattern. This
-# value defaults to '127.0.0.1' for -local and -scgi.
-#
-# -nowait Do not wait in the event loop. Return immediately
-# after all event handlers are established.
-#
-# -trace "puts" each request URL as it is handled, for
-# debugging
-#
-# -lint Run wapp-safety-check on the application instead
-# of running the application itself
-#
-# -Dvar=value Set TCL global variable "var" to "value"
-#
-#
-proc wapp-start {arglist} {
- global env
- set mode auto
- set port 0
- set nowait 0
- set fromip {}
- set n [llength $arglist]
- for {set i 0} {$i<$n} {incr i} {
- set term [lindex $arglist $i]
- if {[string match --* $term]} {set term [string range $term 1 end]}
- switch -glob -- $term {
- -server {
- incr i;
- set mode "server"
- set port [lindex $arglist $i]
- }
- -local {
- incr i;
- set mode "local"
- set fromip 127.0.0.1
- set port [lindex $arglist $i]
- }
- -scgi {
- incr i;
- set mode "scgi"
- set fromip 127.0.0.1
- set port [lindex $arglist $i]
- }
- -remote-scgi {
- incr i;
- set mode "remote-scgi"
- set port [lindex $arglist $i]
- }
- -cgi {
- set mode "cgi"
- }
- -fromip {
- incr i
- set fromip [lindex $arglist $i]
- }
- -nowait {
- set nowait 1
- }
- -trace {
- proc wappInt-trace {} {
- set q [wapp-param QUERY_STRING]
- set uri [wapp-param BASE_URL][wapp-param PATH_INFO]
- if {$q!=""} {append uri ?$q}
- puts $uri
- }
- }
- -lint {
- set res [wapp-safety-check]
- if {$res!=""} {
- puts "Potential problems in this code:"
- puts $res
- exit 1
- } else {
- exit
- }
- }
- -D*=* {
- if {[regexp {^.D([^=]+)=(.*)$} $term all var val]} {
- set ::$var $val
- }
- }
- default {
- error "unknown option: $term"
- }
- }
- }
- if {$mode=="auto"} {
- if {[info exists env(GATEWAY_INTERFACE)]
- && [string match CGI/1.* $env(GATEWAY_INTERFACE)]} {
- set mode cgi
- } else {
- set mode local
- }
- }
- if {$mode=="cgi"} {
- wappInt-handle-cgi-request
- } else {
- wappInt-start-listener $port $mode $fromip
- if {!$nowait} {
- vwait ::forever
- }
- }
-}
-
-# Call this version 1.0
-package provide wapp 1.0
DELETED test/wapptest.tcl
Index: test/wapptest.tcl
==================================================================
--- test/wapptest.tcl
+++ /dev/null
@@ -1,909 +0,0 @@
-#!/bin/sh
-# \
-exec wapptclsh "$0" ${1+"$@"}
-
-# package required wapp
-source [file join [file dirname [info script]] wapp.tcl]
-
-# Variables set by the "control" form:
-#
-# G(platform) - User selected platform.
-# G(cfgglob) - Glob pattern that all configurations must match
-# G(test) - Set to "Normal", "Veryquick", "Smoketest" or "Build-Only".
-# G(keep) - Boolean. True to delete no files after each test.
-# G(msvc) - Boolean. True to use MSVC as the compiler.
-# G(tcl) - Use Tcl from this directory for builds.
-# G(jobs) - How many sub-processes to run simultaneously.
-#
-set G(platform) $::tcl_platform(os)-$::tcl_platform(machine)
-set G(cfgglob) *
-set G(test) Normal
-set G(keep) 1
-set G(msvc) 0
-set G(tcl) [::tcl::pkgconfig get libdir,install]
-set G(jobs) 3
-set G(debug) 0
-
-set G(noui) 0
-set G(stdout) 0
-
-
-proc wapptest_init {} {
- global G
-
- set lSave [list platform test keep msvc tcl jobs debug noui stdout cfgglob]
- foreach k $lSave { set A($k) $G($k) }
- array unset G
- foreach k $lSave { set G($k) $A($k) }
-
- # The root of the SQLite source tree.
- set G(srcdir) [file dirname [file dirname [info script]]]
-
- set G(sqlite_version) "unknown"
-
- # Either "config", "running" or "stopped":
- set G(state) "config"
-
- set G(hostname) "(unknown host)"
- catch { set G(hostname) [exec hostname] }
- set G(host) $G(hostname)
- append G(host) " $::tcl_platform(os) $::tcl_platform(osVersion)"
- append G(host) " $::tcl_platform(machine) $::tcl_platform(byteOrder)"
-}
-
-proc wapptest_run {} {
- global G
- set_test_array
- set G(state) "running"
-
- wapptest_openlog
-
- wapptest_output "Running the following for $G(platform). $G(jobs) jobs."
- foreach t $G(test_array) {
- set config [dict get $t config]
- set target [dict get $t target]
- wapptest_output [format " %-25s%s" $config $target]
- }
- wapptest_output [string repeat * 70]
-}
-
-proc releasetest_data {args} {
- global G
- set rtd [file join $G(srcdir) test releasetest_data.tcl]
- set fd [open "|[info nameofexecutable] $rtd $args" r+]
- set ret [read $fd]
- close $fd
- return $ret
-}
-
-# Generate the text for the box at the top of the UI. The current SQLite
-# version, according to fossil, along with a warning if there are
-# uncommitted changes in the checkout.
-#
-proc generate_fossil_info {} {
- global G
- set pwd [pwd]
- cd $G(srcdir)
- set rc [catch {
- set r1 [exec fossil info]
- set r2 [exec fossil changes]
- }]
- cd $pwd
- if {$rc} return
-
- foreach line [split $r1 "\n"] {
- if {[regexp {^checkout: *(.*)$} $line -> co]} {
- wapp-trim { %html($co) }
- }
- }
-
- if {[string trim $r2]!=""} {
- wapp-trim {
-
- WARNING: Uncommitted changes in checkout
-
- }
- }
-}
-
-# If the application is in "config" state, set the contents of the
-# ::G(test_array) global to reflect the tests that will be run. If the
-# app is in some other state ("running" or "stopped"), this command
-# is a no-op.
-#
-proc set_test_array {} {
- global G
- if { $G(state)=="config" } {
- set G(test_array) [list]
- set debug "-debug"
- if {$G(debug)==0} { set debug "-nodebug"}
- foreach {config target} [releasetest_data tests $debug $G(platform)] {
-
- # All configuration names must match $g(cfgglob), which defaults to *
- #
- if {![string match -nocase $G(cfgglob) $config]} continue
-
- # If using MSVC, do not run sanitize or valgrind tests. Or the
- # checksymbols test.
- if {$G(msvc) && (
- "Sanitize" == $config
- || "checksymbols" in $target
- || "valgrindtest" in $target
- )} {
- continue
- }
-
- # If the test mode is not "Normal", override the target.
- #
- if {$target!="checksymbols" && $G(platform)!="Failure-Detection"} {
- switch -- $G(test) {
- Veryquick { set target quicktest }
- Smoketest { set target smoketest }
- Build-Only {
- set target testfixture
- if {$::tcl_platform(platform)=="windows"} {
- set target testfixture.exe
- }
- }
- }
- }
-
- lappend G(test_array) [dict create config $config target $target]
- }
- }
-}
-
-proc count_tests_and_errors {name logfile} {
- global G
-
- set fd [open $logfile rb]
- set seen 0
- while {![eof $fd]} {
- set line [gets $fd]
- if {[regexp {(\d+) errors out of (\d+) tests} $line all nerr ntest]} {
- incr G(test.$name.nError) $nerr
- incr G(test.$name.nTest) $ntest
- set seen 1
- if {$nerr>0} {
- set G(test.$name.errmsg) $line
- }
- }
- if {[regexp {runtime error: +(.*)} $line all msg]} {
- # skip over "value is outside range" errors
- if {[regexp {.* is outside the range of representable} $line]} {
- # noop
- } else {
- incr G(test.$name.nError)
- if {$G(test.$name.errmsg)==""} {
- set G(test.$name.errmsg) $msg
- }
- }
- }
- if {[regexp {fatal error +(.*)} $line all msg]} {
- incr G(test.$name.nError)
- if {$G(test.$name.errmsg)==""} {
- set G(test.$name.errmsg) $msg
- }
- }
- if {[regexp {ERROR SUMMARY: (\d+) errors.*} $line all cnt] && $cnt>0} {
- incr G(test.$name.nError)
- if {$G(test.$name.errmsg)==""} {
- set G(test.$name.errmsg) $all
- }
- }
- if {[regexp {^VERSION: 3\.\d+.\d+} $line]} {
- set v [string range $line 9 end]
- if {$G(sqlite_version) eq "unknown"} {
- set G(sqlite_version) $v
- } elseif {$G(sqlite_version) ne $v} {
- set G(test.$name.errmsg) "version conflict: {$G(sqlite_version)} vs. {$v}"
- }
- }
- }
- close $fd
- if {$G(test) == "Build-Only"} {
- incr G(test.$name.nTest)
- if {$G(test.$name.nError)>0} {
- set errmsg "Build failed"
- }
- } elseif {!$seen} {
- set G(test.$name.errmsg) "Test did not complete"
- if {[file readable core]} {
- append G(test.$name.errmsg) " - core file exists"
- }
- }
-}
-
-proc wapptest_output {str} {
- global G
- if {$G(stdout)} { puts $str }
- if {[info exists G(log)]} {
- puts $G(log) $str
- flush $G(log)
- }
-}
-proc wapptest_openlog {} {
- global G
- set G(log) [open wapptest-out.txt w+]
-}
-proc wapptest_closelog {} {
- global G
- close $G(log)
- unset G(log)
-}
-
-proc format_seconds {seconds} {
- set min [format %.2d [expr ($seconds / 60) % 60]]
- set hr [format %.2d [expr $seconds / 3600]]
- set sec [format %.2d [expr $seconds % 60]]
- return "$hr:$min:$sec"
-}
-
-# This command is invoked once a slave process has finished running its
-# tests, successfully or otherwise. Parameter $name is the name of the
-# test, $rc the exit code returned by the slave process.
-#
-proc slave_test_done {name rc} {
- global G
- set G(test.$name.done) [clock seconds]
- set G(test.$name.nError) 0
- set G(test.$name.nTest) 0
- set G(test.$name.errmsg) ""
- if {$rc} {
- incr G(test.$name.nError)
- }
- if {[file exists $G(test.$name.log)]} {
- count_tests_and_errors $name $G(test.$name.log)
- }
-
- # If the "keep files" checkbox is clear, delete all files except for
- # the executables and test logs. And any core file that is present.
- if {$G(keep)==0} {
- set keeplist {
- testfixture testfixture.exe
- sqlite3 sqlite3.exe
- test.log test-out.txt
- core
- wapptest_make.sh
- wapptest_configure.sh
- wapptest_run.tcl
- }
- foreach f [glob -nocomplain [file join $G(test.$name.dir) *]] {
- set t [file tail $f]
- if {[lsearch $keeplist $t]<0} {
- catch { file delete -force $f }
- }
- }
- }
-
- # Format a message regarding the success or failure of hte test.
- set t [format_seconds [expr $G(test.$name.done) - $G(test.$name.start)]]
- set res "OK"
- if {$G(test.$name.nError)} { set res "FAILED" }
- set dots [string repeat . [expr 60 - [string length $name]]]
- set msg "$name $dots $res ($t)"
-
- wapptest_output $msg
- if {[info exists G(test.$name.errmsg)] && $G(test.$name.errmsg)!=""} {
- wapptest_output " $G(test.$name.errmsg)"
- }
-}
-
-# This is a fileevent callback invoked each time a file-descriptor that
-# connects this process to a slave process is readable.
-#
-proc slave_fileevent {name} {
- global G
- set fd $G(test.$name.channel)
-
- if {[eof $fd]} {
- fconfigure $fd -blocking 1
- set rc [catch { close $fd }]
- unset G(test.$name.channel)
- slave_test_done $name $rc
- } else {
- set line [gets $fd]
- if {[string trim $line] != ""} { puts "Trace : $name - \"$line\"" }
- }
-
- do_some_stuff
-}
-
-# Return the contents of the "slave script" - the script run by slave
-# processes to actually perform the test. All it does is execute the
-# test script already written to disk (wapptest_cmd.sh or wapptest_cmd.bat).
-#
-proc wapptest_slave_script {} {
- global G
- if {$G(msvc)==0} {
- set dir [file join .. $G(srcdir)]
- set res [subst -nocommands {
- set rc [catch "exec sh wapptest_cmd.sh {$dir} >>& test.log" ]
- exit [set rc]
- }]
- } else {
- set dir [file nativename [file normalize $G(srcdir)]]
- set dir [string map [list "\\" "\\\\"] $dir]
- set res [subst -nocommands {
- set rc [catch "exec wapptest_cmd.bat {$dir} >>& test.log" ]
- exit [set rc]
- }]
- }
-
- set res
-}
-
-
-# Launch a slave process to run a test.
-#
-proc slave_launch {name target dir} {
- global G
-
- catch { file mkdir $dir } msg
- foreach f [glob -nocomplain [file join $dir *]] {
- catch { file delete -force $f }
- }
- set G(test.$name.dir) $dir
-
- # Write the test command to wapptest_cmd.sh|bat.
- #
- set ext sh
- if {$G(msvc)} { set ext bat }
- set fd1 [open [file join $dir wapptest_cmd.$ext] w]
- if {$G(msvc)} {
- puts $fd1 [releasetest_data script -msvc $name $target]
- } else {
- puts $fd1 [releasetest_data script $name $target]
- }
- close $fd1
-
- # Write the wapptest_run.tcl script to the test directory. To run the
- # commands in the other two files.
- #
- set fd3 [open [file join $dir wapptest_run.tcl] w]
- puts $fd3 [wapptest_slave_script]
- close $fd3
-
- set pwd [pwd]
- cd $dir
- set fd [open "|[info nameofexecutable] wapptest_run.tcl" r+]
- cd $pwd
-
- set G(test.$name.channel) $fd
- fconfigure $fd -blocking 0
- fileevent $fd readable [list slave_fileevent $name]
-}
-
-proc do_some_stuff {} {
- global G
-
- # Count the number of running jobs. A running job has an entry named
- # "channel" in its dictionary.
- set nRunning 0
- set bFinished 1
- foreach j $G(test_array) {
- set name [dict get $j config]
- if { [info exists G(test.$name.channel)]} { incr nRunning }
- if {![info exists G(test.$name.done)]} { set bFinished 0 }
- }
-
- if {$bFinished} {
- set nError 0
- set nTest 0
- set nConfig 0
- foreach j $G(test_array) {
- set name [dict get $j config]
- incr nError $G(test.$name.nError)
- incr nTest $G(test.$name.nTest)
- incr nConfig
- }
- set G(result) "$nError errors from $nTest tests in $nConfig configurations."
- wapptest_output [string repeat * 70]
- wapptest_output $G(result)
- catch {
- append G(result) " SQLite version $G(sqlite_version)"
- wapptest_output " SQLite version $G(sqlite_version)"
- }
- set G(state) "stopped"
- wapptest_closelog
- if {$G(noui)} { exit 0 }
- } else {
- set nLaunch [expr $G(jobs) - $nRunning]
- foreach j $G(test_array) {
- if {$nLaunch<=0} break
- set name [dict get $j config]
- if { ![info exists G(test.$name.channel)]
- && ![info exists G(test.$name.done)]
- } {
-
- set target [dict get $j target]
- set dir [string tolower [string map {" " _ "-" _} $name]]
- set G(test.$name.start) [clock seconds]
- set G(test.$name.log) [file join $dir test.log]
-
- slave_launch $name $target $dir
-
- incr nLaunch -1
- }
- }
- }
-}
-
-proc generate_select_widget {label id lOpt opt} {
- wapp-trim {
-
- }
-}
-
-proc generate_main_page {{extra {}}} {
- global G
- set_test_array
-
- set hostname $G(hostname)
- wapp-trim {
-
-
- %html($hostname): wapptest.tcl
-
-
-
- }
-
- set host $G(host)
- wapp-trim {
-
}
- foreach t $G(test_array) {
- set config [dict get $t config]
- set target [dict get $t target]
-
- set class "testwait"
- set seconds ""
-
- if {[info exists G(test.$config.log)]} {
- if {[info exists G(test.$config.channel)]} {
- set class "testrunning"
- set seconds [expr [clock seconds] - $G(test.$config.start)]
- } elseif {[info exists G(test.$config.done)]} {
- if {$G(test.$config.nError)>0} {
- set class "testfail"
- } else {
- set class "testdone"
- }
- set seconds [expr $G(test.$config.done) - $G(test.$config.start)]
- }
- set seconds [format_seconds $seconds]
- }
-
- wapp-trim {
-
-
%html($config)
-
%html($target)
-
%html($seconds)
-
- }
- if {[info exists G(test.$config.log)]} {
- set log $G(test.$config.log)
- set uri "log/$log"
- wapp-trim {
- %html($log)
- }
- }
- if {[info exists G(test.$config.errmsg)] && $G(test.$config.errmsg)!=""} {
- set errmsg $G(test.$config.errmsg)
- wapp-trim {
-
-
%html($errmsg)
- }
- }
- }
-
- wapp-trim {
}
-
- if {[info exists G(result)]} {
- set res $G(result)
- wapp-trim {
-
%string($res)
- }
- }
-}
-
-# URI: /control
-#
-# Whenever the form at the top of the application page is submitted, it
-# is submitted here.
-#
-proc wapp-page-control {} {
- global G
- if {$::G(state)=="config"} {
- set lControls [list platform test tcl jobs keep msvc debug]
- set G(msvc) 0
- set G(keep) 0
- set G(debug) 0
- } else {
- set lControls [list jobs]
- }
- foreach v $lControls {
- if {[wapp-param-exists control_$v]} {
- set G($v) [wapp-param control_$v]
- }
- }
-
- if {[wapp-param-exists control_run]} {
- # This is a "run test" command.
- wapptest_run
- }
-
- if {[wapp-param-exists control_stop]} {
- # A "STOP tests" command.
- set G(state) "stopped"
- set G(result) "Test halted by user"
- foreach j $G(test_array) {
- set name [dict get $j config]
- if { [info exists G(test.$name.channel)] } {
- close $G(test.$name.channel)
- unset G(test.$name.channel)
- slave_test_done $name 1
- }
- }
- wapptest_closelog
- }
-
- if {[wapp-param-exists control_reset]} {
- # A "reset app" command.
- set G(state) "config"
- wapptest_init
- }
-
- if {$::G(state) == "running"} {
- do_some_stuff
- }
- wapp-redirect /
-}
-
-# URI: /style.css
-#
-# Return the stylesheet for the application main page.
-#
-proc wapp-page-style.css {} {
- wapp-subst {
-
- /* The boxes with black borders use this class */
- .border {
- border: 3px groove #444444;
- padding: 1em;
- margin-top: 1em;
- margin-bottom: 1em;
- }
-
- /* Float to the right (used for the Run/Stop/Reset button) */
- .right { float: right; }
-
- /* Style for the large red warning at the top of the page */
- .warning {
- color: red;
- font-weight: bold;
- }
-
- /* Styles used by cells in the test table */
- .padleft { padding-left: 5ex; }
- .nowrap { white-space: nowrap; }
-
- /* Styles for individual tests, depending on the outcome */
- .testwait { }
- .testrunning { color: blue }
- .testdone { color: green }
- .testfail { color: red }
- }
-}
-
-# URI: /script/${state}.js
-#
-# The last part of this URI is always "config.js", "running.js" or
-# "stopped.js", depending on the state of the application. It returns
-# the javascript part of the front-end for the requested state to the
-# browser.
-#
-proc wapp-page-script {} {
- regexp {[^/]*$} [wapp-param REQUEST_URI] script
-
- set tcl $::G(tcl)
- set keep $::G(keep)
- set msvc $::G(msvc)
- set debug $::G(debug)
-
- wapp-subst {
- var lElem = \["control_platform", "control_test", "control_msvc",
- "control_jobs", "control_debug"
- \];
- lElem.forEach(function(e) {
- var elem = document.getElementById(e);
- elem.addEventListener("change", function() { control.submit() } );
- })
-
- elem = document.getElementById("control_tcl");
- elem.value = "%string($tcl)"
-
- elem = document.getElementById("control_keep");
- elem.checked = %string($keep);
-
- elem = document.getElementById("control_msvc");
- elem.checked = %string($msvc);
-
- elem = document.getElementById("control_debug");
- elem.checked = %string($debug);
- }
-
- if {$script != "config.js"} {
- wapp-subst {
- var lElem = \["control_platform", "control_test",
- "control_tcl", "control_keep", "control_msvc",
- "control_debug"
- \];
- lElem.forEach(function(e) {
- var elem = document.getElementById(e);
- elem.disabled = true;
- })
- }
- }
-
- if {$script == "running.js"} {
- wapp-subst {
- function reload_tests() {
- fetch('tests')
- .then( data => data.text() )
- .then( data => {
- document.getElementById("tests").innerHTML = data;
- })
- .then( data => {
- if( document.getElementById("result") ){
- document.location = document.location;
- } else {
- setTimeout(reload_tests, 1000)
- }
- });
- }
-
- setTimeout(reload_tests, 1000)
- }
- }
-}
-
-# URI: /env
-#
-# This is for debugging only. Serves no other purpose.
-#
-proc wapp-page-env {} {
- wapp-allow-xorigin-params
- wapp-trim {
-
Wapp Environment
\n
-
%html([wapp-debug-env])
- }
-}
-
-# URI: /log/dirname/test.log
-#
-# This URI reads file "dirname/test.log" from disk, wraps it in a
-# block, and returns it to the browser. Use for viewing log files.
-#
-proc wapp-page-log {} {
- set log [string range [wapp-param REQUEST_URI] 5 end]
- set fd [open $log]
- set data [read $fd]
- close $fd
- wapp-trim {
-
- %html($data)
-
- }
-}
-
-# Print out a usage message. Then do [exit 1].
-#
-proc wapptest_usage {} {
- puts stderr {
-This Tcl script is used to test various configurations of SQLite. By
-default it uses "wapp" to provide an interactive interface. Supported
-command line options (all optional) are:
-
- --platform PLATFORM (which tests to run)
- --config GLOB (only run configurations matching GLOB)
- --smoketest (run "make smoketest" only)
- --veryquick (run veryquick.test only)
- --buildonly (build executables, do not run tests)
- --jobs N (number of concurrent jobs)
- --tcl DIR (where to find tclConfig.sh)
- --deletefiles (delete extra files after each test)
- --msvc (Use MS Visual C)
- --debug (Also run [n]debugging versions of tests)
- --noui (do not use wapp)
- }
- exit 1
-}
-
-# Sort command line arguments into two groups: those that belong to wapp,
-# and those that belong to the application.
-set WAPPARG(-server) 1
-set WAPPARG(-local) 1
-set WAPPARG(-scgi) 1
-set WAPPARG(-remote-scgi) 1
-set WAPPARG(-fromip) 1
-set WAPPARG(-nowait) 0
-set WAPPARG(-cgi) 0
-set lWappArg [list]
-set lTestArg [list]
-for {set i 0} {$i < [llength $argv]} {incr i} {
- set arg [lindex $argv $i]
- if {[string range $arg 0 1]=="--"} {
- set arg [string range $arg 1 end]
- }
- if {[info exists WAPPARG($arg)]} {
- lappend lWappArg $arg
- if {$WAPPARG($arg)} {
- incr i
- lappend lWappArg [lindex $argv $i]
- }
- } else {
- lappend lTestArg $arg
- }
-}
-
-wapptest_init
-for {set i 0} {$i < [llength $lTestArg]} {incr i} {
- set opt [lindex $lTestArg $i]
- if {[string range $opt 0 1]=="--"} {
- set opt [string range $opt 1 end]
- }
- switch -- $opt {
- -platform {
- if {$i==[llength $lTestArg]-1} { wapptest_usage }
- incr i
- set arg [lindex $lTestArg $i]
- set lPlatform [releasetest_data platforms]
- if {[lsearch $lPlatform $arg]<0} {
- puts stderr "No such platform: $arg. Platforms are: $lPlatform"
- exit -1
- }
- set G(platform) $arg
- }
-
- -smoketest { set G(test) Smoketest }
- -veryquick { set G(test) Veryquick }
- -buildonly { set G(test) Build-Only }
- -jobs {
- if {$i==[llength $lTestArg]-1} { wapptest_usage }
- incr i
- set G(jobs) [lindex $lTestArg $i]
- }
-
- -tcl {
- if {$i==[llength $lTestArg]-1} { wapptest_usage }
- incr i
- set G(tcl) [lindex $lTestArg $i]
- }
-
- -deletefiles {
- set G(keep) 0
- }
-
- -msvc {
- set G(msvc) 1
- }
-
- -debug {
- set G(debug) 1
- }
-
- -noui {
- set G(noui) 1
- set G(stdout) 1
- }
-
- -config {
- if {$i==[llength $lTestArg]-1} { wapptest_usage }
- incr i
- set G(cfgglob) [lindex $lTestArg $i]
- }
-
- -stdout {
- set G(stdout) 1
- }
-
- default {
- puts stderr "Unrecognized option: [lindex $lTestArg $i]"
- wapptest_usage
- }
- }
-}
-
-if {$G(noui)==0} {
- wapp-start $lWappArg
-} else {
- wapptest_run
- do_some_stuff
- vwait forever
-}
Index: test/where3.test
==================================================================
--- test/where3.test
+++ test/where3.test
@@ -489,8 +489,30 @@
} {123 123}
do_execsql_test where3-7.$disabled_opt.9 {
SELECT DISTINCT x3 FROM t73 LEFT JOIN t74 ON x4=y3;
} {123}
}
+
+# 2023-12-23
+# https://sqlite.org/forum/forumpost/2568d1f6e6
+#
+# Index usage should be "x=? and y=?" - equality on both values.
+# Not: "x=? AND y>?" - inequality on "y"
+#
+reset_db
+do_execsql_test where3-8.1 {
+ CREATE TABLE t1(a,b,c,d); INSERT INTO t1 VALUES(1,2,3,4);
+ CREATE TABLE t2(x,y); INSERT INTO t2 VALUES(3,4);
+ CREATE INDEX t2xy ON t2(x,y);
+ SELECT 1 FROM t1 JOIN t2 ON x=c AND y=d WHERE d>0;
+} 1
+do_eqp_test where3-8.2 {
+ SELECT 1 FROM t1 JOIN t2 ON x=c AND y=d WHERE d>0;
+} {
+ QUERY PLAN
+ |--SCAN t1
+ `--SEARCH t2 USING COVERING INDEX t2xy (x=? AND y=?)
+}
+
finish_test
Index: test/window1.test
==================================================================
--- test/window1.test
+++ test/window1.test
@@ -1879,11 +1879,11 @@
INSERT INTO t3(y) VALUES(5),(11),(-9);
SELECT (
SELECT max(y) OVER( ORDER BY (SELECT x FROM (SELECT sum(y) AS x FROM t1)))
)
FROM t3;
-} {1 {misuse of aggregate: sum()}}
+} {0 5}
# 2020-06-06 ticket 1f6f353b684fc708
reset_db
do_execsql_test 58.1 {
CREATE TABLE a(a, b, c);
Index: tool/lemon.c
==================================================================
--- tool/lemon.c
+++ tool/lemon.c
@@ -416,10 +416,12 @@
char *tokendest; /* Code to execute to destroy token data */
char *vardest; /* Code for the default non-terminal destructor */
char *filename; /* Name of the input file */
char *outname; /* Name of the current output file */
char *tokenprefix; /* A prefix added to token names in the .h file */
+ char *reallocFunc; /* Function to use to allocate stack space */
+ char *freeFunc; /* Function to use to free stack space */
int nconflict; /* Number of parsing conflicts */
int nactiontab; /* Number of entries in the yy_action[] table */
int nlookaheadtab; /* Number of entries in yy_lookahead[] */
int tablesize; /* Total table size of all tables in bytes */
int basisflag; /* Print only basis configurations */
@@ -2529,10 +2531,16 @@
psp->declargslot = &(psp->gp->tokentype);
psp->insertLineMacro = 0;
}else if( strcmp(x,"default_type")==0 ){
psp->declargslot = &(psp->gp->vartype);
psp->insertLineMacro = 0;
+ }else if( strcmp(x,"realloc")==0 ){
+ psp->declargslot = &(psp->gp->reallocFunc);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"free")==0 ){
+ psp->declargslot = &(psp->gp->freeFunc);
+ psp->insertLineMacro = 0;
}else if( strcmp(x,"stack_size")==0 ){
psp->declargslot = &(psp->gp->stacksize);
psp->insertLineMacro = 0;
}else if( strcmp(x,"start_symbol")==0 ){
psp->declargslot = &(psp->gp->start);
@@ -4307,11 +4315,11 @@
int lineno;
struct state *stp;
struct action *ap;
struct rule *rp;
struct acttab *pActtab;
- int i, j, n, sz;
+ int i, j, n, sz, mn, mx;
int nLookAhead;
int szActionType; /* sizeof(YYACTIONTYPE) */
int szCodeType; /* sizeof(YYCODETYPE) */
const char *name;
int mnTknOfst, mxTknOfst;
@@ -4499,10 +4507,25 @@
fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
fprintf(out,"#define %sARG_PARAM\n",name); lineno++;
fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
fprintf(out,"#define %sARG_STORE\n",name); lineno++;
}
+ if( lemp->reallocFunc ){
+ fprintf(out,"#define YYREALLOC %s\n", lemp->reallocFunc); lineno++;
+ }else{
+ fprintf(out,"#define YYREALLOC realloc\n"); lineno++;
+ }
+ if( lemp->freeFunc ){
+ fprintf(out,"#define YYFREE %s\n", lemp->freeFunc); lineno++;
+ }else{
+ fprintf(out,"#define YYFREE free\n"); lineno++;
+ }
+ if( lemp->reallocFunc && lemp->freeFunc ){
+ fprintf(out,"#define YYDYNSTACK 1\n"); lineno++;
+ }else{
+ fprintf(out,"#define YYDYNSTACK 0\n"); lineno++;
+ }
if( lemp->ctx && lemp->ctx[0] ){
i = lemonStrlen(lemp->ctx);
while( i>=1 && ISSPACE(lemp->ctx[i-1]) ) i--;
while( i>=1 && (ISALNUM(lemp->ctx[i-1]) || lemp->ctx[i-1]=='_') ) i--;
fprintf(out,"#define %sCTX_SDECL %s;\n",name,lemp->ctx); lineno++;
@@ -4622,10 +4645,26 @@
fprintf(out,"#define YY_ACCEPT_ACTION %d\n", lemp->accAction); lineno++;
fprintf(out,"#define YY_NO_ACTION %d\n", lemp->noAction); lineno++;
fprintf(out,"#define YY_MIN_REDUCE %d\n", lemp->minReduce); lineno++;
i = lemp->minReduce + lemp->nrule;
fprintf(out,"#define YY_MAX_REDUCE %d\n", i-1); lineno++;
+
+ /* Minimum and maximum token values that have a destructor */
+ mn = mx = 0;
+ for(i=0; insymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+
+ if( sp && sp->type!=TERMINAL && sp->destructor ){
+ if( mn==0 || sp->indexindex;
+ if( sp->index>mx ) mx = sp->index;
+ }
+ }
+ if( lemp->tokendest ) mn = 0;
+ if( lemp->vardest ) mx = lemp->nsymbol-1;
+ fprintf(out,"#define YY_MIN_DSTRCTR %d\n", mn); lineno++;
+ fprintf(out,"#define YY_MAX_DSTRCTR %d\n", mx); lineno++;
+
tplt_xfer(lemp->name,in,out,&lineno);
/* Now output the action table and its associates:
**
** yy_action[] A single table containing all actions.
@@ -4765,11 +4804,11 @@
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate the table of fallback tokens.
*/
if( lemp->has_fallback ){
- int mx = lemp->nterminal - 1;
+ mx = lemp->nterminal - 1;
/* 2019-08-28: Generate fallback entries for every token to avoid
** having to do a range check on the index */
/* while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } */
lemp->tablesize += (mx+1)*szCodeType;
for(i=0; i<=mx; i++){
Index: tool/lempar.c
==================================================================
--- tool/lempar.c
+++ tool/lempar.c
@@ -65,10 +65,13 @@
** ParseARG_PDECL A parameter declaration for the %extra_argument
** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter
** ParseARG_STORE Code to store %extra_argument into yypParser
** ParseARG_FETCH Code to extract %extra_argument from yypParser
** ParseCTX_* As ParseARG_ except for %extra_context
+** YYREALLOC Name of the realloc() function to use
+** YYFREE Name of the free() function to use
+** YYDYNSTACK True if stack space should be extended on heap
** YYERRORSYMBOL is the code number of the error symbol. If not
** defined, then do no error processing.
** YYNSTATE the combined number of states.
** YYNRULE the number of rules in the grammar
** YYNTOKEN Number of terminal symbols
@@ -78,10 +81,12 @@
** YY_ERROR_ACTION The yy_action[] code for syntax error
** YY_ACCEPT_ACTION The yy_action[] code for accept
** YY_NO_ACTION The yy_action[] code for no-op
** YY_MIN_REDUCE Minimum value for reduce actions
** YY_MAX_REDUCE Maximum value for reduce actions
+** YY_MIN_DSTRCTR Minimum symbol value that has a destructor
+** YY_MAX_DSTRCTR Maximum symbol value that has a destructor
*/
#ifndef INTERFACE
# define INTERFACE 1
#endif
/************* Begin control #defines *****************************************/
@@ -98,10 +103,26 @@
** for testing.
*/
#ifndef yytestcase
# define yytestcase(X)
#endif
+
+/* Macro to determine if stack space has the ability to grow using
+** heap memory.
+*/
+#if YYSTACKDEPTH<=0 || YYDYNSTACK
+# define YYGROWABLESTACK 1
+#else
+# define YYGROWABLESTACK 0
+#endif
+
+/* Guarantee a minimum number of initial stack slots.
+*/
+#if YYSTACKDEPTH<=0
+# undef YYSTACKDEPTH
+# define YYSTACKDEPTH 2 /* Need a minimum stack size */
+#endif
/* Next are the tables used to determine what action to take based on the
** current state and lookahead token. These tables are used to implement
** functions that take a state number and lookahead value and return an
@@ -210,18 +231,13 @@
#ifndef YYNOERRORRECOVERY
int yyerrcnt; /* Shifts left before out of the error */
#endif
ParseARG_SDECL /* A place to hold %extra_argument */
ParseCTX_SDECL /* A place to hold %extra_context */
-#if YYSTACKDEPTH<=0
- int yystksz; /* Current side of the stack */
- yyStackEntry *yystack; /* The parser's stack */
- yyStackEntry yystk0; /* First stack entry */
-#else
- yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
- yyStackEntry *yystackEnd; /* Last entry in the stack */
-#endif
+ yyStackEntry *yystackEnd; /* Last entry in the stack */
+ yyStackEntry *yystack; /* The parser stack */
+ yyStackEntry yystk0[YYSTACKDEPTH]; /* Initial stack space */
};
typedef struct yyParser yyParser;
#include
#ifndef NDEBUG
@@ -271,41 +287,49 @@
%%
};
#endif /* NDEBUG */
-#if YYSTACKDEPTH<=0
+#if YYGROWABLESTACK
/*
** Try to increase the size of the parser stack. Return the number
** of errors. Return 0 on success.
*/
static int yyGrowStack(yyParser *p){
+ int oldSize = 1 + (int)(p->yystackEnd - p->yystack);
int newSize;
int idx;
yyStackEntry *pNew;
- newSize = p->yystksz*2 + 100;
- idx = p->yytos ? (int)(p->yytos - p->yystack) : 0;
- if( p->yystack==&p->yystk0 ){
- pNew = malloc(newSize*sizeof(pNew[0]));
- if( pNew ) pNew[0] = p->yystk0;
+ newSize = oldSize*2 + 100;
+ idx = (int)(p->yytos - p->yystack);
+ if( p->yystack==p->yystk0 ){
+ pNew = YYREALLOC(0, newSize*sizeof(pNew[0]));
+ if( pNew==0 ) return 1;
+ memcpy(pNew, p->yystack, oldSize*sizeof(pNew[0]));
}else{
- pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
+ pNew = YYREALLOC(p->yystack, newSize*sizeof(pNew[0]));
+ if( pNew==0 ) return 1;
}
- if( pNew ){
- p->yystack = pNew;
- p->yytos = &p->yystack[idx];
+ p->yystack = pNew;
+ p->yytos = &p->yystack[idx];
#ifndef NDEBUG
- if( yyTraceFILE ){
- fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n",
- yyTracePrompt, p->yystksz, newSize);
- }
-#endif
- p->yystksz = newSize;
- }
- return pNew==0;
-}
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n",
+ yyTracePrompt, oldSize, newSize);
+ }
+#endif
+ p->yystackEnd = &p->yystack[newSize-1];
+ return 0;
+}
+#endif /* YYGROWABLESTACK */
+
+#if !YYGROWABLESTACK
+/* For builds that do no have a growable stack, yyGrowStack always
+** returns an error.
+*/
+# define yyGrowStack(X) 1
#endif
/* Datatype of the argument to the memory allocated passed as the
** second argument to ParseAlloc() below. This can be changed by
** putting an appropriate #define in the %include section of the input
@@ -321,28 +345,18 @@
yyParser *yypParser = (yyParser*)yypRawParser;
ParseCTX_STORE
#ifdef YYTRACKMAXSTACKDEPTH
yypParser->yyhwm = 0;
#endif
-#if YYSTACKDEPTH<=0
- yypParser->yytos = NULL;
- yypParser->yystack = NULL;
- yypParser->yystksz = 0;
- if( yyGrowStack(yypParser) ){
- yypParser->yystack = &yypParser->yystk0;
- yypParser->yystksz = 1;
- }
-#endif
+ yypParser->yystack = yypParser->yystk0;
+ yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt = -1;
#endif
yypParser->yytos = yypParser->yystack;
yypParser->yystack[0].stateno = 0;
yypParser->yystack[0].major = 0;
-#if YYSTACKDEPTH>0
- yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
-#endif
}
#ifndef Parse_ENGINEALWAYSONSTACK
/*
** This function allocates a new parser.
@@ -424,13 +438,30 @@
/*
** Clear all secondary memory allocations from the parser
*/
void ParseFinalize(void *p){
yyParser *pParser = (yyParser*)p;
- while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
-#if YYSTACKDEPTH<=0
- if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
+
+ /* In-lined version of calling yy_pop_parser_stack() for each
+ ** element left in the stack */
+ yyStackEntry *yytos = pParser->yytos;
+ while( yytos>pParser->yystack ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sPopping %s\n",
+ yyTracePrompt,
+ yyTokenName[yytos->major]);
+ }
+#endif
+ if( yytos->major>=YY_MIN_DSTRCTR ){
+ yy_destructor(pParser, yytos->major, &yytos->minor);
+ }
+ yytos--;
+ }
+
+#if YYGROWABLESTACK
+ if( pParser->yystack!=pParser->yystk0 ) YYFREE(pParser->yystack);
#endif
}
#ifndef Parse_ENGINEALWAYSONSTACK
/*
@@ -652,29 +683,23 @@
if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
yypParser->yyhwm++;
assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) );
}
#endif
-#if YYSTACKDEPTH>0
- if( yypParser->yytos>yypParser->yystackEnd ){
- yypParser->yytos--;
- yyStackOverflow(yypParser);
- return;
- }
-#else
- if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){
+ yytos = yypParser->yytos;
+ if( yytos>yypParser->yystackEnd ){
if( yyGrowStack(yypParser) ){
yypParser->yytos--;
yyStackOverflow(yypParser);
return;
}
+ yytos = yypParser->yytos;
+ assert( yytos <= yypParser->yystackEnd );
}
-#endif
if( yyNewState > YY_MAX_SHIFT ){
yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
}
- yytos = yypParser->yytos;
yytos->stateno = yyNewState;
yytos->major = yyMajor;
yytos->minor.yy0 = yyMinor;
yyTraceShift(yypParser, yyNewState, "Shift");
}
@@ -909,23 +934,16 @@
yypParser->yyhwm++;
assert( yypParser->yyhwm ==
(int)(yypParser->yytos - yypParser->yystack));
}
#endif
-#if YYSTACKDEPTH>0
if( yypParser->yytos>=yypParser->yystackEnd ){
- yyStackOverflow(yypParser);
- break;
- }
-#else
- if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){
if( yyGrowStack(yypParser) ){
yyStackOverflow(yypParser);
break;
}
}
-#endif
}
yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM);
}else if( yyact <= YY_MAX_SHIFTREDUCE ){
yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
#ifndef YYNOERRORRECOVERY
Index: tool/sqldiff.c
==================================================================
--- tool/sqldiff.c
+++ tool/sqldiff.c
@@ -11,11 +11,12 @@
*************************************************************************
**
** This is a utility program that computes the differences in content
** between two SQLite databases.
**
-** To compile, simply link against SQLite.
+** To compile, simply link against SQLite. (Windows builds must also link
+** against ext/consio/console_io.c.)
**
** See the showHelp() routine below for a brief description of how to
** run the utility.
*/
#include
@@ -23,10 +24,23 @@
#include
#include
#include
#include
#include "sqlite3.h"
+
+/* Output function substitutions that cause UTF8 characters to be rendered
+** correctly on Windows:
+**
+** fprintf() -> Wfprintf()
+**
+*/
+#if defined(_WIN32)
+# include "console_io.h"
+# define Wfprintf fPrintfUtf8
+#else
+# define Wfprintf fprintf
+#endif
/*
** All global variables are gathered into the "g" singleton.
*/
struct GlobalVars {
@@ -44,89 +58,46 @@
*/
#define DEBUG_COLUMN_NAMES 0x000001
#define DEBUG_DIFF_SQL 0x000002
/*
-** Dynamic string object
-*/
-typedef struct Str Str;
-struct Str {
- char *z; /* Text of the string */
- int nAlloc; /* Bytes allocated in z[] */
- int nUsed; /* Bytes actually used in z[] */
-};
-
-/*
-** Initialize a Str object
-*/
-static void strInit(Str *p){
- p->z = 0;
- p->nAlloc = 0;
- p->nUsed = 0;
+** Clear and free an sqlite3_str object
+*/
+static void strFree(sqlite3_str *pStr){
+ sqlite3_free(sqlite3_str_finish(pStr));
}
/*
** Print an error resulting from faulting command-line arguments and
** abort the program.
*/
static void cmdlineError(const char *zFormat, ...){
+ sqlite3_str *pOut = sqlite3_str_new(0);
va_list ap;
- fprintf(stderr, "%s: ", g.zArgv0);
va_start(ap, zFormat);
- vfprintf(stderr, zFormat, ap);
+ sqlite3_str_vappendf(pOut, zFormat, ap);
va_end(ap);
- fprintf(stderr, "\n\"%s --help\" for more help\n", g.zArgv0);
+ Wfprintf(stderr, "%s: %s\n", g.zArgv0, sqlite3_str_value(pOut));
+ strFree(pOut);
+ Wfprintf(stderr, "\"%s --help\" for more help\n", g.zArgv0);
exit(1);
}
/*
** Print an error message for an error that occurs at runtime, then
** abort the program.
*/
static void runtimeError(const char *zFormat, ...){
+ sqlite3_str *pOut = sqlite3_str_new(0);
va_list ap;
- fprintf(stderr, "%s: ", g.zArgv0);
va_start(ap, zFormat);
- vfprintf(stderr, zFormat, ap);
+ sqlite3_str_vappendf(pOut, zFormat, ap);
va_end(ap);
- fprintf(stderr, "\n");
+ Wfprintf(stderr, "%s: %s\n", g.zArgv0, sqlite3_str_value(pOut));
+ strFree(pOut);
exit(1);
}
-
-/*
-** Free all memory held by a Str object
-*/
-static void strFree(Str *p){
- sqlite3_free(p->z);
- strInit(p);
-}
-
-/*
-** Add formatted text to the end of a Str object
-*/
-static void strPrintf(Str *p, const char *zFormat, ...){
- int nNew;
- for(;;){
- if( p->z ){
- va_list ap;
- va_start(ap, zFormat);
- sqlite3_vsnprintf(p->nAlloc-p->nUsed, p->z+p->nUsed, zFormat, ap);
- va_end(ap);
- nNew = (int)strlen(p->z + p->nUsed);
- }else{
- nNew = p->nAlloc;
- }
- if( p->nUsed+nNew < p->nAlloc-1 ){
- p->nUsed += nNew;
- break;
- }
- p->nAlloc = p->nAlloc*2 + 1000;
- p->z = sqlite3_realloc(p->z, p->nAlloc);
- if( p->z==0 ) runtimeError("out of memory");
- }
-}
-
/* Safely quote an SQL identifier. Use the minimum amount of transformation
** necessary to allow the string to be used with %s.
**
@@ -451,67 +422,66 @@
int nPk; /* Number of true primary key columns */
int nCol; /* Number of data columns */
int i; /* Loop counter */
sqlite3_stmt *pStmt; /* SQL statement */
const char *zSep; /* Separator string */
- Str ins; /* Beginning of the INSERT statement */
+ sqlite3_str *pIns; /* Beginning of the INSERT statement */
pStmt = db_prepare("SELECT sql FROM aux.sqlite_schema WHERE name=%Q", zTab);
if( SQLITE_ROW==sqlite3_step(pStmt) ){
fprintf(out, "%s;\n", sqlite3_column_text(pStmt,0));
}
sqlite3_finalize(pStmt);
if( !g.bSchemaOnly ){
az = columnNames("aux", zTab, &nPk, 0);
- strInit(&ins);
+ pIns = sqlite3_str_new(0);
if( az==0 ){
pStmt = db_prepare("SELECT * FROM aux.%s", zId);
- strPrintf(&ins,"INSERT INTO %s VALUES", zId);
+ sqlite3_str_appendf(pIns,"INSERT INTO %s VALUES", zId);
}else{
- Str sql;
- strInit(&sql);
+ sqlite3_str *pSql = sqlite3_str_new(0);
zSep = "SELECT";
for(i=0; az[i]; i++){
- strPrintf(&sql, "%s %s", zSep, az[i]);
+ sqlite3_str_appendf(pSql, "%s %s", zSep, az[i]);
zSep = ",";
}
- strPrintf(&sql," FROM aux.%s", zId);
+ sqlite3_str_appendf(pSql," FROM aux.%s", zId);
zSep = " ORDER BY";
for(i=1; i<=nPk; i++){
- strPrintf(&sql, "%s %d", zSep, i);
+ sqlite3_str_appendf(pSql, "%s %d", zSep, i);
zSep = ",";
}
- pStmt = db_prepare("%s", sql.z);
- strFree(&sql);
- strPrintf(&ins, "INSERT INTO %s", zId);
+ pStmt = db_prepare("%s", sqlite3_str_value(pSql));
+ strFree(pSql);
+ sqlite3_str_appendf(pIns, "INSERT INTO %s", zId);
zSep = "(";
for(i=0; az[i]; i++){
- strPrintf(&ins, "%s%s", zSep, az[i]);
+ sqlite3_str_appendf(pIns, "%s%s", zSep, az[i]);
zSep = ",";
}
- strPrintf(&ins,") VALUES");
+ sqlite3_str_appendf(pIns,") VALUES");
namelistFree(az);
}
nCol = sqlite3_column_count(pStmt);
while( SQLITE_ROW==sqlite3_step(pStmt) ){
- fprintf(out, "%s",ins.z);
+ Wfprintf(out, "%s",sqlite3_str_value(pIns));
zSep = "(";
for(i=0; inPk2 ){
zSep = "SELECT ";
for(i=0; i1)?", ":""), i);
+ sqlite3_str_appendf(pSql, "\nORDER BY ");
+ for(i=1; i<=nPK; i++) sqlite3_str_appendf(pSql, "%s%d", ((i>1)?", ":""), i);
}
static void rbudiff_one_table(const char *zTab, FILE *out){
int bOtaRowid; /* True to use an ota_rowid column */
int nPK; /* Number of primary key columns in table */
char **azCol; /* NULL terminated array of col names */
int i;
int nCol;
- Str ct = {0, 0, 0}; /* The "CREATE TABLE data_xxx" statement */
- Str sql = {0, 0, 0}; /* Query to find differences */
- Str insert = {0, 0, 0}; /* First part of output INSERT statement */
+ sqlite3_str *pCt; /* The "CREATE TABLE data_xxx" statement */
+ sqlite3_str *pSql; /* Query to find differences */
+ sqlite3_str *pInsert; /* First part of output INSERT statement */
sqlite3_stmt *pStmt = 0;
int nRow = 0; /* Total rows in data_xxx table */
/* --rbu mode must use real primary keys. */
g.bSchemaPK = 1;
+ pCt = sqlite3_str_new(0);
+ pSql = sqlite3_str_new(0);
+ pInsert = sqlite3_str_new(0);
/* Check that the schemas of the two tables match. Exit early otherwise. */
checkSchemasMatch(zTab);
/* Grab the column names and PK details for the table(s). If no usable PK
@@ -1283,39 +1258,39 @@
runtimeError("table %s has no usable PK columns", zTab);
}
for(nCol=0; azCol[nCol]; nCol++);
/* Build and output the CREATE TABLE statement for the data_xxx table */
- strPrintf(&ct, "CREATE TABLE IF NOT EXISTS 'data_%q'(", zTab);
- if( bOtaRowid ) strPrintf(&ct, "rbu_rowid, ");
- strPrintfArray(&ct, ", ", "%s", &azCol[bOtaRowid], -1);
- strPrintf(&ct, ", rbu_control);");
+ sqlite3_str_appendf(pCt, "CREATE TABLE IF NOT EXISTS 'data_%q'(", zTab);
+ if( bOtaRowid ) sqlite3_str_appendf(pCt, "rbu_rowid, ");
+ strPrintfArray(pCt, ", ", "%s", &azCol[bOtaRowid], -1);
+ sqlite3_str_appendf(pCt, ", rbu_control);");
/* Get the SQL for the query to retrieve data from the two databases */
- getRbudiffQuery(zTab, azCol, nPK, bOtaRowid, &sql);
+ getRbudiffQuery(zTab, azCol, nPK, bOtaRowid, pSql);
/* Build the first part of the INSERT statement output for each row
** in the data_xxx table. */
- strPrintf(&insert, "INSERT INTO 'data_%q' (", zTab);
- if( bOtaRowid ) strPrintf(&insert, "rbu_rowid, ");
- strPrintfArray(&insert, ", ", "%s", &azCol[bOtaRowid], -1);
- strPrintf(&insert, ", rbu_control) VALUES(");
+ sqlite3_str_appendf(pInsert, "INSERT INTO 'data_%q' (", zTab);
+ if( bOtaRowid ) sqlite3_str_appendf(pInsert, "rbu_rowid, ");
+ strPrintfArray(pInsert, ", ", "%s", &azCol[bOtaRowid], -1);
+ sqlite3_str_appendf(pInsert, ", rbu_control) VALUES(");
- pStmt = db_prepare("%s", sql.z);
+ pStmt = db_prepare("%s", sqlite3_str_value(pSql));
while( sqlite3_step(pStmt)==SQLITE_ROW ){
/* If this is the first row output, print out the CREATE TABLE
- ** statement first. And then set ct.z to NULL so that it is not
+ ** statement first. And reset pCt so that it will not be
** printed again. */
- if( ct.z ){
- fprintf(out, "%s\n", ct.z);
- strFree(&ct);
+ if( sqlite3_str_length(pCt) ){
+ fprintf(out, "%s\n", sqlite3_str_value(pCt));
+ sqlite3_str_reset(pCt);
}
/* Output the first part of the INSERT statement */
- fprintf(out, "%s", insert.z);
+ fprintf(out, "%s", sqlite3_str_value(pInsert));
nRow++;
if( sqlite3_column_type(pStmt, nCol)==SQLITE_INTEGER ){
for(i=0; i<=nCol; i++){
if( i>0 ) fprintf(out, ", ");
@@ -1367,19 +1342,20 @@
fprintf(out, ");\n");
}
sqlite3_finalize(pStmt);
if( nRow>0 ){
- Str cnt = {0, 0, 0};
- strPrintf(&cnt, "INSERT INTO rbu_count VALUES('data_%q', %d);", zTab, nRow);
- fprintf(out, "%s\n", cnt.z);
- strFree(&cnt);
+ sqlite3_str *pCnt = sqlite3_str_new(0);
+ sqlite3_str_appendf(pCnt,
+ "INSERT INTO rbu_count VALUES('data_%q', %d);", zTab, nRow);
+ fprintf(out, "%s\n", sqlite3_str_value(pCnt));
+ strFree(pCnt);
}
- strFree(&ct);
- strFree(&sql);
- strFree(&insert);
+ strFree(pCt);
+ strFree(pSql);
+ strFree(pInsert);
}
/*
** Display a summary of differences between two versions of the same
** table table.
@@ -1397,29 +1373,29 @@
int nPk2; /* Primary key columns in aux */
int n = 0; /* Number of columns in main */
int n2; /* Number of columns in aux */
int i; /* Loop counter */
const char *zSep; /* Separator string */
- Str sql; /* Comparison query */
+ sqlite3_str *pSql; /* Comparison query */
sqlite3_stmt *pStmt; /* Query statement to do the diff */
sqlite3_int64 nUpdate; /* Number of updated rows */
sqlite3_int64 nUnchanged; /* Number of unmodified rows */
sqlite3_int64 nDelete; /* Number of deleted rows */
sqlite3_int64 nInsert; /* Number of inserted rows */
- strInit(&sql);
+ pSql = sqlite3_str_new(0);
if( sqlite3_table_column_metadata(g.db,"aux",zTab,0,0,0,0,0,0) ){
if( !sqlite3_table_column_metadata(g.db,"main",zTab,0,0,0,0,0,0) ){
/* Table missing from second database. */
- fprintf(out, "%s: missing from second database\n", zTab);
+ Wfprintf(out, "%s: missing from second database\n", zTab);
}
goto end_summarize_one_table;
}
if( sqlite3_table_column_metadata(g.db,"main",zTab,0,0,0,0,0,0) ){
/* Table missing from source */
- fprintf(out, "%s: missing from first database\n", zTab);
+ Wfprintf(out, "%s: missing from first database\n", zTab);
goto end_summarize_one_table;
}
az = columnNames("main", zTab, &nPk, 0);
az2 = columnNames("aux", zTab, &nPk2, 0);
@@ -1432,61 +1408,61 @@
|| az2==0
|| nPk!=nPk2
|| az[n]
){
/* Schema mismatch */
- fprintf(out, "%s: incompatible schema\n", zTab);
+ Wfprintf(out, "%s: incompatible schema\n", zTab);
goto end_summarize_one_table;
}
/* Build the comparison query */
for(n2=n; az[n2]; n2++){}
- strPrintf(&sql, "SELECT 1, count(*)");
+ sqlite3_str_appendf(pSql, "SELECT 1, count(*)");
if( n2==nPk2 ){
- strPrintf(&sql, ", 0\n");
+ sqlite3_str_appendf(pSql, ", 0\n");
}else{
zSep = ", sum(";
for(i=nPk; az[i]; i++){
- strPrintf(&sql, "%sA.%s IS NOT B.%s", zSep, az[i], az[i]);
- zSep = " OR ";
- }
- strPrintf(&sql, ")\n");
- }
- strPrintf(&sql, " FROM main.%s A, aux.%s B\n", zId, zId);
- zSep = " WHERE";
- for(i=0; inPk ){
- strPrintf(&sql, "SELECT %d", SQLITE_UPDATE);
+ sqlite3_str_appendf(pSql, "SELECT %d", SQLITE_UPDATE);
for(i=0; i0 ) sqlite3_free(azCol[--nCol]);
sqlite3_free(azCol);
sqlite3_free(aiPk);
sqlite3_free(zId);
sqlite3_free(aiFlg);
- strFree(&sql);
+ strFree(pSql);
}
/*
** Return true if the ascii character passed as the only argument is a
** whitespace character. Otherwise return false.
@@ -1909,12 +1889,12 @@
/*
** Print sketchy documentation for this utility program
*/
static void showHelp(void){
- printf("Usage: %s [options] DB1 DB2\n", g.zArgv0);
- printf(
+ Wfprintf(stdout, "Usage: %s [options] DB1 DB2\n", g.zArgv0);
+ Wfprintf(stdout,
"Output SQL text that would transform DB1 into DB2.\n"
"Options:\n"
" --changeset FILE Write a CHANGESET into FILE\n"
" -L|--lib LIBRARY Load an SQLite extension library\n"
" --primarykey Use schema-defined PRIMARY KEYs\n"
@@ -2047,13 +2027,13 @@
if( rc || zErrMsg ){
cmdlineError("\"%s\" does not appear to be a valid SQLite database", zDb2);
}
if( neverUseTransaction ) useTransaction = 0;
- if( useTransaction ) fprintf(out, "BEGIN TRANSACTION;\n");
+ if( useTransaction ) Wfprintf(out, "BEGIN TRANSACTION;\n");
if( xDiff==rbudiff_one_table ){
- fprintf(out, "CREATE TABLE IF NOT EXISTS rbu_count"
+ Wfprintf(out, "CREATE TABLE IF NOT EXISTS rbu_count"
"(tbl TEXT PRIMARY KEY COLLATE NOCASE, cnt INTEGER) "
"WITHOUT ROWID;\n"
);
}
if( zTab ){
@@ -2064,12 +2044,12 @@
while( SQLITE_ROW==sqlite3_step(pStmt) ){
xDiff((const char*)sqlite3_column_text(pStmt,0), out);
}
sqlite3_finalize(pStmt);
}
- if( useTransaction ) printf("COMMIT;\n");
+ if( useTransaction ) Wfprintf(stdout,"COMMIT;\n");
/* TBD: Handle trigger differences */
/* TBD: Handle view differences */
sqlite3_close(g.db);
return 0;
}
Index: tool/sqlite3_analyzer.c.in
==================================================================
--- tool/sqlite3_analyzer.c.in
+++ tool/sqlite3_analyzer.c.in
@@ -17,12 +17,59 @@
#if !defined(SQLITE_AMALGAMATION) && !defined(USE_EXTERNAL_SQLITE)
INCLUDE sqlite3.c
#endif
INCLUDE $ROOT/src/tclsqlite.c
+#if defined(_WIN32)
+INCLUDE $ROOT/ext/consio/console_io.h
+INCLUDE $ROOT/ext/consio/console_io.c
+
+/* Substitute "puts" command. Only these forms recognized:
+**
+** puts STRING
+** puts stderr STRING
+** puts -nonewline STRING
+*/
+static int subst_puts(
+ void *NotUsed,
+ Tcl_Interp *interp,
+ int objc,
+ Tcl_Obj *const*objv
+){
+ FILE *pOut = stdout;
+ const char *zOut;
+ int addNewLine = 1;
+ if( objc==2 ){
+ zOut = Tcl_GetString(objv[1]);
+ }else if( objc!=3 ){
+ Tcl_WrongNumArgs(interp, 1, objv, "?stderr|-nonewline? STRING");
+ return TCL_ERROR;
+ }else{
+ const char *zArg = Tcl_GetString(objv[1]);
+ if( zArg==0 ) return TCL_ERROR;
+ zOut = Tcl_GetString(objv[2]);
+ if( strcmp(zArg, "stderr")==0 ){
+ pOut = stderr;
+ }else if( strcmp(zArg, "-nonewline")==0 ){
+ addNewLine = 0;
+ }else{
+ Tcl_AppendResult(interp, "bad argument: ", zArg, 0);
+ return TCL_ERROR;
+ }
+ }
+ fPutsUtf8(zOut, pOut);
+ if( addNewLine ) fPutsUtf8("\n", pOut);
+ return TCL_OK;
+}
+#endif /* defined(_WIN32) */
+
const char *sqlite3_analyzer_init_proc(Tcl_Interp *interp){
+#if defined(_WIN32)
+ Tcl_CreateObjCommand(interp, "puts", subst_puts, 0, 0);
+#else
(void)interp;
+#endif
return
BEGIN_STRING
INCLUDE $ROOT/tool/spaceanal.tcl
END_STRING
;
Index: tool/srctree-check.tcl
==================================================================
--- tool/srctree-check.tcl
+++ tool/srctree-check.tcl
@@ -33,10 +33,21 @@
set TCLSH [info nameofexe]
# Number of errors seen.
#
set NERR 0
+
+######################### configure ###########################################
+
+set conf [readfile $ROOT/configure]
+set vers [readfile $ROOT/VERSION]
+if {[string first $vers $conf]<=0} {
+ puts "ERROR: ./configure does not agree with ./VERSION"
+ puts "...... Fix: run autoconf"
+ incr NERR
+}
+unset conf
######################### autoconf/tea/configure.ac ###########################
set confac [readfile $ROOT/autoconf/tea/configure.ac]
set vers [readfile $ROOT/VERSION]
@@ -43,13 +54,15 @@
set pattern {AC_INIT([sqlite],[}
append pattern [string trim $vers]
append pattern {])}
if {[string first $pattern $confac]<=0} {
puts "ERROR: ./autoconf/tea/configure.ac does not agree with ./VERSION"
- puts "...... Fix: manually edit ./autoconf/tea/configure.ac to"
+ puts "...... Fix: manually edit ./autoconf/tea/configure.ac and put the"
+ puts "...... correct version number in AC_INIT()"
incr NERR
}
+unset confac
######################### autoconf/Makefile.msc ###############################
set f1 [readfile $ROOT/autoconf/Makefile.msc]
exec $TCLSH $ROOT/tool/mkmsvcmin.tcl $ROOT/Makefile.msc tmp1.txt