SQLite Forum

Timeline
Login

34 forum posts by user jake

2021-10-01
03:22 Reply: Query group missing from the output (artifact: 3fc316bab5 user: jake)

Sometimes a correlated aggregate subquery which references an outer table will cause the outer query to behave like an aggregate query.

Unfortunately this is not a bug - it's just the way it is. This behaviour is consistent with the way other DB engines behave.

See https://sqlite.org/forum/forumpost/9f2b929904329af5

2021-08-31
15:27 Reply: Possible bug: Unexpected result using correlated aggregate subquery (artifact: 9310273f00 user: jake)

Thank you Richard for validating the behaviour against PG. I'm satisfied that this relegate from "possible bug" to "not a bug" under the WWPGD philosophy. I have now also verified that this is the behaviour in MySQL.

Here is another variation which I find even less intuitive:

SELECT (SELECT group_concat(a) || '-' || group_concat(b) FROM y) z
  FROM x;
┌───────┐
│   z   │
├───────┤
│ 1-1,1 │
└───────┘

But again seems consistent (at least with MySQL).


For context, the goal was to replicate a value as a comma separated list using the out-of-the-box CLI.

The failed attempt looked like this:

SELECT (SELECT group_concat(a) FROM generate_series(0,10))
  FROM x;

And some successful alternatives:

SELECT (SELECT group_concat(a+(value*0)) FROM generate_series(0,10))
  FROM x;

SELECT group_concat(a)
  FROM x
  JOIN generate_series(0,10)
 GROUP BY x.oid;
08:12 Post: Possible bug: Unexpected result using correlated aggregate subquery (artifact: 9f2b929904 user: jake)

The following examples demonstrate a possible bug in some cases when using a correlated aggregate subquery:

SQLite version 3.37.0 2021-08-30 17:02:48
Enter ".help" for usage hints.
Connected to a transient in-memory database.
Use ".open FILENAME" to reopen on a persistent database.
sqlite> CREATE TABLE x AS SELECT 1 a;
sqlite> CREATE TABLE y AS SELECT 1 b UNION ALL SELECT 1;
sqlite>
sqlite> .mode box
sqlite>
sqlite> SELECT (SELECT group_concat(a) FROM y) unexpected, -- expected 1,1
   ...>        group_concat((SELECT a FROM y)) expected,
   ...>        (SELECT group_concat(b) FROM y) expected,
   ...>        (SELECT group_concat(a+b) FROM y) expected
   ...>   FROM x;
┌────────────┬──────────┬──────────┬──────────┐
│ unexpected │ expected │ expected │ expected │
├────────────┼──────────┼──────────┼──────────┤
│ 1          │ 1        │ 1,1      │ 2,2      │
└────────────┴──────────┴──────────┴──────────┘
sqlite>
sqlite> INSERT INTO x VALUES (1);
sqlite>
sqlite> SELECT (SELECT group_concat(a) FROM y) unexpected -- expected 2 rows
   ...>   FROM x;
┌────────────┐
│ unexpected │
├────────────┤
│ 1,1        │
└────────────┘
sqlite>
sqlite> SELECT (SELECT group_concat(b) FROM y) expected
   ...>   FROM x;
┌──────────┐
│ expected │
├──────────┤
│ 1,1      │
│ 1,1      │
└──────────┘
sqlite>
sqlite> SELECT (SELECT group_concat(a+b) FROM y) expected
   ...>   FROM x;
┌──────────┐
│ expected │
├──────────┤
│ 2,2      │
│ 2,2      │
└──────────┘
2021-07-12
14:22 Edit reply: Unstack one column to multiple (artifact: 96701f9f63 user: jake)

Here's a solution utilising the pivot_vtab extension:

--
-- Import data
--
.mode csv
CREATE TABLE import(val);
.import tmp.txt import

--
-- Create groups based on the 2 blank rows rule
--
ALTER TABLE import ADD COLUMN g INT;

UPDATE import
   SET g = oid
 WHERE oid = 1
    OR oid IN (SELECT i2.oid+1
                 FROM import i
                 JOIN import i2 ON i.oid = i2.oid-1
                               AND i2.val = ''
                WHERE i.val = '');

UPDATE import
   SET g = i2.g
  FROM import i2
  LEFT JOIN
       import i3 ON import.oid > i3.oid
                AND i2.oid < i3.oid
                AND i3.g IS NOT NULL
 WHERE import.g IS NULL
   AND import.oid > i2.oid
   AND i2.g IS NOT NULL
   AND i3.oid IS NULL;

--
-- Number the columns
--
ALTER TABLE import ADD COLUMN c INT;

UPDATE import
   SET c = x.c
  FROM (SELECT oid, Row_Number() OVER (PARTITION BY g ORDER BY oid) c
          FROM import) x 
 WHERE import.oid = x.oid;

--
-- Pivot
--
.load pivot_vtab
CREATE VIRTUAL TABLE pivot USING pivot_vtab(
  -- Pivot table row query
  (SELECT g
     FROM import
    GROUP BY g),

  -- Pivot table column definition query
  (SELECT c,
          'col' || c
     FROM import
    WHERE c <= (SELECT Max(c)-2 FROM import)
    GROUP BY c
    ORDER BY c),

  -- Pivot query
  (SELECT val
     FROM import 
    WHERE g = ?1
      AND c = ?2)
);

.mode column
SELECT *
  FROM pivot;
g   col1                                              col2     col3                  col4                      col5        col6  col7  col8  col9  col10  col11  col12  col13  col14  col15  col16
--  ------------------------------------------------  -------  --------------------  ------------------------  ----------  ----  ----  ----  ----  -----  -----  -----  -----  -----  -----  -----
1   Hardside Cabin Luggage Trolley Black              senator  View Product Details  N18602991A	KH132-20_BLK	  KH13220BLK  02    Days  04    Hrs   25     Mins   25     Sec    1      1      0
19  Hardside 4 Wheels Cabin Luggage Trolley Burgundy  senator  View Product Details  N42588980A	KH134-20_BGN	  KH13420BGN  02    Days  04    Hrs   25     Mins   25     Sec    1      1      0
37  Softside Cabin Luggage Trolley Purple             senator  View Product Details  N32139616A	LL051-20_PRP	  LL05120PRP  02    Days  09    Hrs   25     Mins   25     Sec    1      1      0
14:16 Reply: Unstack one column to multiple (artifact: ecf6e748a1 user: jake)

Here's a solution utilising the pivot_vtab extension:

--
-- Import data
--
.mode csv
CREATE TABLE import(val);
.import tmp.txt import

--
-- Create groups based on the 2 blank rows rule
--
ALTER TABLE import ADD COLUMN g INT;

UPDATE import
   SET g = oid
 WHERE oid = 1
    OR oid IN (SELECT i2.oid+1
                 FROM import i
                 JOIN import i2 ON i.oid = i2.oid-1
                               AND i2.val = ''
                WHERE i.val = '');

UPDATE import
   SET g = i2.g
  FROM import i2
  LEFT JOIN
       import i3 ON import.oid > i3.oid
                AND i2.oid < i3.oid
                AND i3.g IS NOT NULL
 WHERE import.g IS NULL
   AND import.oid > i2.oid
   AND i2.g IS NOT NULL
   AND i3.oid IS NULL;

--
-- Number the columns
--
ALTER TABLE import ADD COLUMN c INT;

UPDATE import
   SET c = x.c
  FROM (SELECT oid, Row_Number() OVER (PARTITION BY g ORDER BY oid) c
          FROM import) x 
 WHERE import.oid = x.oid;

--
-- Pivot
--
.load pivot_vtab
CREATE VIRTUAL TABLE pivot USING pivot_vtab(
  -- Pivot table row query
  (SELECT g
     FROM import
    GROUP BY g),

  -- Pivot table column definition query
  (SELECT c,
          'col' || c
     FROM import
    WHERE c < (SELECT Max(c)-2 FROM import)
    GROUP BY c
    ORDER BY c),

  -- Pivot query
  (SELECT val
     FROM import 
    WHERE g = ?1
      AND c = ?2)
);

.mode column
SELECT *
  FROM pivot;
g   col1                                              col2     col3                  col4                      col5        col6  col7  col8  col9  col10  col11  col12  col13  col14  col15
--  ------------------------------------------------  -------  --------------------  ------------------------  ----------  ----  ----  ----  ----  -----  -----  -----  -----  -----  -----
1   Hardside Cabin Luggage Trolley Black              senator  View Product Details  N18602991A	KH132-20_BLK	  KH13220BLK  02    Days  04    Hrs   25     Mins   25     Sec    1      1
19  Hardside 4 Wheels Cabin Luggage Trolley Burgundy  senator  View Product Details  N42588980A	KH134-20_BGN	  KH13420BGN  02    Days  04    Hrs   25     Mins   25     Sec    1      1
37  Softside Cabin Luggage Trolley Purple             senator  View Product Details  N32139616A	LL051-20_PRP	  LL05120PRP  02    Days  09    Hrs   25     Mins   25     Sec    1      1
2021-04-20
13:05 Reply: Aliased function result with the same name issue (artifact: 7174a250f3 user: jake)

Note that MySQL is not using the expression alias in a WHERE clause, but just the original column value. Confusion might arise due to the use of the TRIM function and the way MySQL string collations trim trailing spaces.

Refer to this documentation for MySQL 5.6:

Trailing Space Handling in Comparisons

Nonbinary strings have PAD SPACE behavior for all collations, including _bin collations. Trailing spaces are insignificant in comparisons:

mysql> SET NAMES utf8 COLLATE utf8_bin;
mysql> SELECT 'a ' = 'a';
+------------+
| 'a ' = 'a' |
+------------+
|          1 |
+------------+

For binary strings, all bytes are significant in comparisons, including trailing spaces:

mysql> SET NAMES binary;
mysql> SELECT 'a ' = 'a';
+------------+
| 'a ' = 'a' |
+------------+
|          0 |
+------------+
11:06 Reply: Aliased function result with the same name issue (artifact: 08480e31c5 user: jake)

Here is a comment made by Richard Hipp back in 2015 on using aliased columns in a WHERE clause:

This is not valid SQL, actually. For clarity, here is the (invalid) SQL reformatted:

SELECT a+b AS x FROM t1 WHERE x=99;

You are not suppose to be able to access the "x" alias within the WHERE clause.

Yes, I know that SQLite allows this. But it does so grudgingly, for historical reasons. It is technically not valid. Note that none of MySQL, Oracle, PostgreSQL, nor SQLServer allow statements like the above.

Because the SQL is technically not valid, I am less inclined to spend a lot of time trying to optimize it.

I really wish there was a way for me to fix this historical permissiveness in SQLite without breaking millions of (miscoded) iPhone/Android apps. I'd do so if I could.

2021-02-25
00:18 Reply: Tiny doc bug: sqlite_source_id() has outdated SHA1 reference (artifact: 7b60ca0b85 user: jake)

It's good to see that you're now an authorised doc contributor. To avoid spamming the forum with a new topic, here's another minor doc bug:

https://www.sqlite.org/lang_aggfunc.html#list_of_built_in_aggregate_functions

both the min and max links here incorrectly refer to the core functions instead of the aggregate versions.

2021-02-10
02:49 Reply: Split an Extract column (artifact: 96b5db0dbf user: jake)

If your column value is literally as you have shown, then you could combine John's solution with something like this:

sqlite> SELECT json_extract(replace(replace(replace(your_csv_column, '{', '["'), '}', '"]'), ',', '","'), '$[9]')
   ...>   FROM (SELECT '{1,1,20210206000704,,,,,1772285065,10,(10)4045420,100230,N,1}' your_csv_column
   ...>   UNION ALL SELECT '{1,1,20210126033937,,,,,1772285065,10,(317)5757554,100236,N,1}'
   ...>   UNION ALL SELECT '{1,1,20210202030039,,,,,1772285065,10,(325)4092770,100208,N,1}'
   ...>   UNION ALL SELECT '{1,1,20210202170400,,,,,1772285065,10,(377)4040420,100230,N,1}'
   ...>   UNION ALL SELECT '{1,1,20210203031334,,,,,1772285065,10,(45)4098070,100208,N,1}');
(10)4045420
(317)5757554
(325)4092770
(377)4040420
(45)4098070
2021-02-02
12:28 Reply: string concatenation issue in prepared statement (artifact: 04b59d5e34 user: jake)

I know nothing about swift, but would guess that you are binding the null terminator as part of your string.

Maybe try something like this:

        XCTAssertEqual("B".utf8CString.withUnsafeBufferPointer { (buffer) -> Int32 in
            return sqlite3_bind_text(
                statement,
                1,
                buffer.baseAddress,
                Int32(buffer.count)-1,
                unsafeBitCast(-1, to: sqlite3_destructor_type.self)
            )
        }, SQLITE_OK)
2021-01-27
04:21 Reply: Help with a search command (artifact: 36ec978ca0 user: jake)

Good observation. Depending on the requirements of the OP, it might be sufficient to avoid such cases by defining all_fields with a separator. E.g. using a space character : (fldKey0 || ' ' || fldKey1 || ' ' || fldKey2)

01:11 Reply: Help with a search command (artifact: fcc42fdee6 user: jake)

Assuming performance is never going to be an issue, then something like this might appeal to you:

.mode table

CREATE TABLE sqlDiaryData(
  fldKey0 TEXT,
  fldKey1 TEXT,
  fldKey2 TEXT,
  all_fields AS (fldKey0 || fldKey1 || fldKey2)
);
INSERT INTO sqlDiaryData VALUES ('this', 'is', 'test'), ('abc', 'def', 'test'), ('123abc345', 'test', 'foo'), ('123', 'xyz', 'bar');

-- 2 rows expected
WITH search(search_term) AS(VALUES ('test'), ('abc'))
SELECT dd.*
  FROM sqlDiaryData dd
  JOIN search       s  ON dd.all_fields LIKE '%' || s.search_term || '%'
 GROUP BY dd.rowid
HAVING Count(*) = (SELECT Count(*) FROM search);
+-----------+---------+---------+------------------+
|  fldKey0  | fldKey1 | fldKey2 |    all_fields    |
+-----------+---------+---------+------------------+
| abc       | def     | test    | abcdeftest       |
| 123abc345 | test    | foo     | 123abc345testfoo |
+-----------+---------+---------+------------------+
2020-12-26
02:54 Reply: Novice question about employing more complex SQL versus procedural code (artifact: 38231853d0 user: jake)

This is an example of different SQL engines having different implementations of the same function. The multi-argument version in Ryan's example is supported in MySQL.

Another related example is the difference in the MySQL and SQL Server implementations of the scalar Concat(x1, x2, ...) function (not supported in SQLite). Both implicitly cast the arguments to a string datatype, except for the case of NULL which in MySQL nullifies the whole result, but in SQL Server is treated as an empty string.

e.g.:

-- MySQL
SELECT Concat('a', NULL, 'b');
-- result: NULL

-- SQL Server
SELECT Concat('a', NULL, 'b');
-- result: 'ab'
01:50 Reply: sqlite3_expanded_sql for carray with single bound parameter (artifact: 563e854657 user: jake)

This is as expected. Although not explicity documented for sqlite3_carray_bind, it is essentially a convenience version of sqlite3_bind_pointer:

The sqlite3_bind_pointer(S,I,P,T,D) routine causes the I-th parameter in prepared statement S to have an SQL value of NULL, but to also be associated with the pointer P of type T.

See also:

https://www.sqlite.org/bindptr.html

2020-12-22
09:42 Reply: Is it possible to bind a single ? in a parameterized query to a listing of values for an in clause? (artifact: 0eecae1b74 user: jake)

The carray() extension function ("c-array") is used for this.

It can be used directly in the query as a table-valued function:

SELECT * FROM tags WHERE name IN carray(?);

Or inserted into a TEMP table if an index is useful:

CREATE TEMP TABLE list AS SELECT value FROM carray(?);
CREATE INDEX list_idx ON list(value);
SELECT * FROM tags WHERE name IN list;

Note: These examples use the clearly documented but rarely mentioned SQLite specific functionality:

The right-hand side of an IN or NOT IN operator can be a table name or table-valued function name in which case the right-hand side is understood to be subquery of the form "(SELECT * FROM name)";

03:44 Reply: Novice question about employing more complex SQL versus procedural code (artifact: f260acc733 user: jake)

Would you tell me please, for example, if this query returns twenty-five rows, can SQL be used to convert the result to a single row something like [{"k":id,"n":name,"p":"path"},{"k":id,"n":name,"p":"path"}...]?

select json_group_array(json_object('k', id, 'p', path, 'n', name)) 
from media
where id in (
select media_id
from media_tags
where tag_id in (
select id
from tags
where name in ( 'name_1', 'name_2', 'name_8', 'name_23', 'name_52' ) )
group by media_id
having count( media_id ) = 5 );
2020-12-01
05:36 Reply: Freeing sqlite3_module after registering (artifact: e3b8122f82 user: jake)

The xDestroy param of sqlite3_create_module_v2 is the destructor for pClientData.

SQLite calls xDestroy just before dereferencing the internal module object, so you could theoratically pass your module structure as the pClientData to be cleaned up by your xDestroy function.

Note: this information is being provided from a quick analysis of the source code, not from experience or an officially documented reference.

04:26 Reply: Freeing sqlite3_module after registering (artifact: ed0e9f6830 user: jake)

It looks like SQLite makes a copy of the name, so it should be safe to free after registering the module. Generally though the module name would be a constant.

sqlite3_module should not be freed as per the docs (https://www.sqlite.org/c3ref/module.html):

The content of this structure must not change while it is registered with any database connection.

2020-11-17
23:20 Reply: Start rowid from 0 (artifact: edb232493c user: jake)

Here are 2 ideas.

Using an AFTER INSERT trigger (note the use of INT PRIMARY KEY instead of INTEGER PRIMARY KEY):

sqlite> CREATE TABLE t1(
   ...>   id INT PRIMARY KEY
   ...> );
sqlite>
sqlite> CREATE TRIGGER t1_id AFTER INSERT ON t1 BEGIN
   ...>   UPDATE t1 SET id = rowid-1 WHERE rowid = new.rowid;
   ...> END;
sqlite>
sqlite> INSERT INTO t1 VALUES(NULL);
sqlite> INSERT INTO t1 VALUES(NULL);
sqlite> INSERT INTO t1 VALUES(NULL);
sqlite>
sqlite> SELECT * FROM t1;
+----+
| id |
+----+
| 0  |
| 1  |
| 2  |
+----+

Using generated columns:

sqlite> CREATE TABLE t2(
   ...>   pk INTEGER PRIMARY KEY,
   ...>   id INT GENERATED ALWAYS AS (pk-1) STORED UNIQUE
   ...> );
sqlite>
sqlite> INSERT INTO t2 VALUES(NULL);
sqlite> INSERT INTO t2 VALUES(NULL);
sqlite> INSERT INTO t2 VALUES(NULL);
sqlite>
sqlite> SELECT * FROM t2;
+----+----+
| pk | id |
+----+----+
| 1  | 0  |
| 2  | 1  |
| 3  | 2  |
+----+----+
2020-11-06
08:15 Reply: no such table: generate_series (artifact: 0177297eb7 user: jake)

Although documented to be compiled into the shell (2016-09-14), it was not actually included until recently (2020-08-28).

I imagine this will just remain a documentation bug until the scheduled 3.34.0 release (2020-12-01).

02:09 Reply: Recursive Update on Date field (artifact: 00e06083f9 user: jake)

If for some reason there is a restriction of using vanilla SQLite, or a pre-generate_series CLI, then here is a less efficient solution using recursive triggers:

create table Closing
(
  scheduled text not null check (date(scheduled, '+0 days') == scheduled),
  fixed_scheduled text
);
create table Holidays
(
  holiday text not null unique check (date(holiday, '+0 days') == holiday)
);
insert into Holidays values ('2020-11-11');
insert into Holidays values ('2020-12-25');

create trigger ai_closing after insert on closing
begin
    update Closing
       set fixed_scheduled = new.scheduled
     where rowid == new.rowid;
end;

create trigger au_closing_scheduled after update of scheduled on closing
begin
    update Closing
       set fixed_scheduled = new.scheduled
     where rowid == new.rowid;
end;

create trigger au_closing_fixed_scheduled after update of fixed_scheduled on closing
begin
    update Closing
       set fixed_scheduled = date(fixed_scheduled, '+1 day')
     where rowid == new.rowid
       and (strftime('%w', fixed_scheduled) in ('0', '6') or fixed_scheduled in Holidays);
end;

PRAGMA recursive_triggers = ON;

insert into Closing (scheduled) values ('2020-11-10');
insert into Closing (scheduled) values ('2020-12-25');

select * from closing;
00:01 Reply: Use Recursive Date to test Holiday and Weekend and Update Record (artifact: c925c2793f user: jake)

Here's an alternative using the generate_series table-valued function:

UPDATE A
   SET scheduled = (SELECT Date(value)
                      FROM generate_series
                     WHERE start = JulianDay(A.scheduled)+1
                       AND strftime('%w', value) NOT IN ('0', '6')
                       AND Date(value) NOT IN Holidays);
2020-11-03
10:29 Reply: .param converts text to number - How to avoid this (artifact: 1bf202b391 user: jake)

It seems that the SQLite CLI is evaluating your date as the expression 2020 minus 10 minus 1 = 2009. Placing parentheses around the date seems to do the trick in preserving the value.

Reviewing the docs, it's not clear to me when or how .param set evaluates expressions. Here are some examples of the current behaviour:

SQLite version 3.34.0 2020-11-02 00:40:05
Enter ".help" for usage hints.
Connected to a transient in-memory database.
Use ".open FILENAME" to reopen on a persistent database.
sqlite> .param init
sqlite> .mode table
sqlite> .param set $d '2020-10-01'
sqlite> SELECT * FROM sqlite_parameters;
+-----+-------+
| key | value |
+-----+-------+
| $d  | 2009  |
+-----+-------+
sqlite> .param set $d ('2020-10-01')
sqlite> SELECT * FROM sqlite_parameters;
+-----+------------+
| key |   value    |
+-----+------------+
| $d  | 2020-10-01 |
+-----+------------+
sqlite> .param set $expr1 Abs(-10)
sqlite> .param set $expr2 Upper('abc')
sqlite> .param set $expr3 SubStr('abcdef', 1, 3)
.parameter CMD ...       Manage SQL parameter bindings
   clear                   Erase all bindings
   init                    Initialize the TEMP table that holds bindings
   list                    List the current parameter bindings
   set PARAMETER VALUE     Given SQL parameter PARAMETER a value of VALUE
                           PARAMETER should start with one of: $ : @ ?
   unset PARAMETER         Remove PARAMETER from the binding table
sqlite> .param set $expr3 "SubStr('abcdef', 1, 3)"
sqlite> SELECT * FROM sqlite_parameters;
+--------+------------+
|  key   |   value    |
+--------+------------+
| $d     | 2020-10-01 |
| $expr1 | 10         |
| $expr2 | ABC        |
| $expr3 | abc        |
+--------+------------+
2020-11-02
10:03 Reply: Alternative for nesting REPLACE()? (artifact: ac5cd57abc user: jake)

Here's an aggregate replace function I wrote several years ago.

Disclaimer: I was relatively new to C when this was written, so no guarantees except for the fact that I've used it over the years and have not encountered any issues.

2020-10-13
04:57 Reply: Check Contraints (artifact: 9de75df61d user: jake)

An uglier way without any extension function dependencies might look something like this:

sqlite> CREATE TABLE Vehicle(
   ...>   Vin      TEXT NOT NULL PRIMARY KEY,
   ...>   Odometer INTEGER NOT NULL,
   ...>   CHECK (Vin GLOB Replace(Hex(ZeroBlob(Length(Vin))), '00', '[A-Z0-9]'))
   ...> );
sqlite> INSERT INTO Vehicle VALUES ('ABC123', 0);
sqlite> INSERT INTO Vehicle VALUES ('ABCx123', 0);
Error: CHECK constraint failed: Vin GLOB Replace(Hex(ZeroBlob(Length(Vin))), '00', '[A-Z0-9]')
2020-08-05
08:42 Post: CLI fails to bind parameters in tabular output modes (3.33.0 pre-release) (artifact: 17ba6aac24 user: jake)

I've been playing with the new output modes from the upcoming 3.33.0 release and have discovered a bug when using tabular output modes (box/markdown/table/column).

The CLI tool does not bind parameters when using tabular output modes. Non-output queries also fail when in one of these modes.

This appears to be an issue in the following section of code (usage of sqlite3_get_table):

https://www.sqlite.org/src/artifact/352a0a63?ln=3050-3065

Examples using pre-release snapshot:

SQLite version 3.33.0 2020-07-31 23:34:53
Enter ".help" for usage hints.
Connected to a transient in-memory database.
Use ".open FILENAME" to reopen on a persistent database.
sqlite> .param init
sqlite> .param set $test 999
sqlite>
sqlite> --
sqlite> -- These modes work as expected
sqlite> --
sqlite> .mode ascii
sqlite> SELECT $test;
999sqlite> .mode csv
sqlite> SELECT $test;
999
sqlite> .mode html
sqlite> SELECT $test;
<TR><TD>999</TD>
</TR>
sqlite> .mode insert
sqlite> SELECT $test;
INSERT INTO "table" VALUES(999);
sqlite> .mode json
sqlite> SELECT $test;
[{"$test":999}]
sqlite> .mode line
sqlite> SELECT $test;
$test = 999
sqlite> .mode list
sqlite> SELECT $test;
999
sqlite> .mode quote
sqlite> SELECT $test;
999
sqlite> .mode tabs
sqlite> SELECT $test;
999
sqlite> .mode tcl
sqlite> SELECT $test;
"999"
sqlite>
sqlite> --
sqlite> -- The tabular modes do not work as expected (column mode does work in 3.32.3)
sqlite> --
sqlite> .mode box
sqlite> SELECT $test;
┌───────┐
│ $test │
├───────┤
│       │
└───────┘
sqlite> .mode markdown
sqlite> SELECT $test;
| $test |
|-------|
|       |
sqlite> .mode column
sqlite> SELECT $test;
$test
-----

sqlite> .mode table
sqlite> SELECT $test;
+-------+
| $test |
+-------+
|       |
+-------+
sqlite>
sqlite> --
sqlite> -- If we are in one of the tabular modes, then queries such as the following will also fail:
sqlite> --
sqlite> CREATE TEMPORARY TABLE t AS SELECT $test;
sqlite> .mode list
sqlite> SELECT * FROM t;
$test

sqlite>
2020-07-13
13:27 Reply: Charts (artifact: 1cd7b34b46 user: jake)

My first attempt at using generate_series looked like this, but the official generate_series implementation seems to only support integer parameters.

12:11 Reply: Charts (artifact: eabd38e503 user: jake)

Feedback acknowledged. Would this reduce the psychological harm?

SELECT $min_x+value*$sample_size x
  FROM generate_series(0, ($max_x-$min_x)/$sample_size);
00:54 Reply: Charts (artifact: 756b01fb0d user: jake)

I don't know of any tools, but will suggest crafting your own charts. This gives you complete control and all the options you could possibly need. I recently did this while experimenting with the SQLite shell tool compiled as WebAssembly. In these dynamic examples charts are output as SVG using only the shell tool (partially inspired by Richard Hipp's geopoly demo).

2020-06-03
03:37 Reply: json_append array with multiple values (artifact: c0f2429efb user: jake)

Array concatenation could be achieved like this by using a combination of the json_each table-valued function with the json_group_array aggregate function.

e.g.

CREATE TABLE example(items JSON);
INSERT INTO example(items) VALUES('{"id":3,"items":{"category":"foo","items":[{"type":1,"amount":1},null,null,null,null]}}');

UPDATE example
   SET items = json_set(items, '$.items.items',
		                (SELECT json_group_array(value)
		                   FROM (SELECT value
		                           FROM json_each(items, '$.items.items')
		                          UNION ALL
		                         SELECT value
		                           FROM json_each('[null, null, null, null, null, null, null, null, null, null]')
		                          LIMIT 10)));

SELECT *
  FROM example;

-- {"id":3,"items":{"category":"foo","items":[{"type":1,"amount":1},null,null,null,null,null,null,null,null,null]}}
2020-05-13
03:33 Reply: Can this simple query be optimized? (artifact: 56d977b08f user: jake)

Very correct Keith.

I was able to achieve a "single scan" of foo with a similar approach to Ryan using window functions and a temp table like this:

SELECT first_value(x) OVER win x_min_y,
       last_value(x) OVER win x_max_y,
       first_value(y) OVER win min_y,
       last_value(y) OVER win max_y
  FROM foo
WINDOW win AS (PARTITION BY x/10 ORDER BY y ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING);

However the overhead of such a query is worse than the OPs original query, expecially if an appropriate index exists i.e.

CREATE INDEX idx ON foo(x/10, y);

SELECT x, min(y) FROM foo GROUP BY (x/10)
UNION
SELECT x, max(y) FROM foo GROUP BY (x/10);
00:17 Reply: Can this simple query be optimized? (artifact: f1fabba9fb user: jake)

Here are 2 options equivalent to your original query which will only perform a single scan of table foo:

Using a temp table

CREATE TEMPORARY TABLE x AS
SELECT x, min(y) min_y, max(y) max_y FROM foo GROUP BY (x/10);

SELECT x, min_y
  FROM x
 UNION
SELECT x, max_y
  FROM x;

In a single query

SELECT DISTINCT x,
       CASE a
         WHEN 1 THEN min_y
         WHEN 2 THEN max_y
       END y
  FROM (SELECT x, min(y) min_y, max(y) max_y FROM foo GROUP BY (x/10))
  JOIN (SELECT 1 a UNION ALL SELECT 2);
2020-04-19
00:23 Reply: string literals, is there more than one escape? (artifact: f2063b42c4 user: jake)

A string literal can contain embedded newline characters (or any UTF-8 character from what I understand):

SELECT 'It''s raining
cats and dogs.
' AS weather;

An alternative might be to use the built-in printf function.

SELECT PrintF('It''s raining%scats and dogs.%s', Char(10), X'0A') AS weather;
2020-03-25
12:14 Reply: Can I do this as one liner? (artifact: c09a2acd1e user: jake)

SQLite does not support joins in UPDATE queries as far as I know. The following methods should work (also untested):

--
-- UPDATE with subqueries method
--
UPDATE tbl
   SET value = (SELECT value
                  FROM (SELECT type,
                               name,
                               value
                          FROM tbl
                         WHERE type = 'A') 
                 WHERE name = tbl.name)
 WHERE type = 'B'
   AND name IN (SELECT name
                  FROM tbl
                 WHERE type = 'A');

--
-- INSERT OR REPLACE method
--
-- Requires UNIQUE constraint on (type, name).
-- Will DELETE old record then INSERT a new record
--
REPLACE 
   INTO tbl(
       type,
       name,
       value
)
SELECT 'B',
       name,
       value
  FROM tbl
 WHERE type = 'A';

--
-- CTE method (3.8.3+)
--
WITH A AS (
SELECT type,
       name,
       value
  FROM tbl
 WHERE type = 'A'
)
UPDATE tbl
   SET value = (SELECT value
                  FROM A
                 WHERE name = tbl.name)
 WHERE type = 'B'
   AND name IN (SELECT name FROM A);

--
-- UPSERT method (3.24.0+)
--
-- Requires UNIQUE constraint on (type, name)
--
INSERT 
  INTO tbl(
       type,
       name,
       value
)
SELECT 'B',
       name,
       value
  FROM tbl
 WHERE type = 'A'
    ON CONFLICT(type, name) DO
UPDATE
   SET value = excluded.value;