Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Changes In Branch opfs-unlock-asap Excluding Merge-Ins
This is equivalent to a diff from 5f135575b9 to c0458caca3
2022-11-26
| ||
15:24 | OPFS VFS: add the opfs-unlock-asap=1 URI flag which tells the VFS to release implicit locks ASAP instead of during VFS idle time. This improves concurrency notably in the test app but brings a significant performance penalty in speedtest1 (roughly 4x slowdown). This is not the final word in OPFS concurrency, but gets us a step further. (check-in: 9542f9ce9e user: stephan tags: trunk) | |
2022-11-24
| ||
17:53 | More work on the OPFS concurrency testing app. (Closed-Leaf check-in: c0458caca3 user: stephan tags: opfs-unlock-asap) | |
2022-11-23
| ||
21:03 | Add optional zSchema argument to sqlite3_js_db_export(). (check-in: 9c23644b1e user: stephan tags: trunk) | |
20:49 | OPFS concurrency test: add a URL flag to enable/disable unlock-asap mode. (check-in: 1c1bf22ead user: stephan tags: opfs-unlock-asap) | |
16:39 | Initial infrastructure for adding a mode to the OPFS VFS which causes implicit locks to be released ASAP, which increases concurrency at the cost of performance. (check-in: c5b7a9715a user: stephan tags: opfs-unlock-asap) | |
16:08 | Update Makefile.in to include new target "sqlite3r.c". For generating "sqlite3r.c" and "sqlite3r.h", versions of the amalgamation that include the recover extension. To build the shell tool against these files, add -DSQLITE_HAVE_SQLITE3R. (check-in: 5f135575b9 user: dan tags: trunk) | |
15:52 | Remove a bit of over-cleverness which breaks loading of sqlite3.js in some main-thread cases. Broken by [96f76e7616]. (check-in: 220cc4c639 user: stephan tags: trunk) | |
2022-11-22
| ||
16:12 | Add Makefile.in targets for sqlite3r.c and sqlite3r.h, versions of the amalgamation that include the recover extension. (Closed-Leaf check-in: 59a837cfc7 user: dan tags: make-sqlite3r.c) | |
Changes to ext/wasm/api/sqlite3-api-oo1.js.
︙ | |||
196 197 198 199 200 201 202 | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | - - + + - | { filename: ..., flags: ..., vfs: ... } If passed an object, any additional properties it has are copied as-is into the new object. */ dbCtorHelper.normalizeArgs = function(filename=':memory:',flags = 'c',vfs = null){ const arg = {}; |
︙ |
Changes to ext/wasm/api/sqlite3-api-opfs.js.
︙ | |||
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 | 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | + + + + + + + + + + + + + + + + + + + + | 'SQLITE_LOCK_RESERVED', 'SQLITE_LOCK_SHARED', 'SQLITE_LOCKED', 'SQLITE_MISUSE', 'SQLITE_NOTFOUND', 'SQLITE_OPEN_CREATE', 'SQLITE_OPEN_DELETEONCLOSE', 'SQLITE_OPEN_MAIN_DB', 'SQLITE_OPEN_READONLY' ].forEach((k)=>{ if(undefined === (state.sq3Codes[k] = capi[k])){ toss("Maintenance required: not found:",k); } }); state.opfsFlags = Object.assign(Object.create(null),{ /** Flag for use with xOpen(). "opfs-unlock-asap=1" enables this. See defaultUnlockAsap, below. */ OPFS_UNLOCK_ASAP: 0x01, /** If true, any async routine which implicitly acquires a sync access handle (i.e. an OPFS lock) will release that locks at the end of the call which acquires it. If false, such "autolocks" are not released until the VFS is idle for some brief amount of time. The benefit of enabling this is much higher concurrency. The down-side is much-reduced performance (as much as a 4x decrease in speedtest1). */ defaultUnlockAsap: false }); /** Runs the given operation (by name) in the async worker counterpart, waits for its response, and returns the result which the async worker writes to SAB[state.opIds.rc]. The 2nd and subsequent arguments must be the aruguments for the |
︙ | |||
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 | 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 | + + + + + + - + | to encode them... TextEncoder can do that for us. */ warn("OPFS xGetLastError() has nothing sensible to return."); return 0; }, //xSleep is optionally defined below xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){ mTimeStart('xOpen'); let opfsFlags = 0; if(0===zName){ zName = randomFilename(); }else if('number'===typeof zName){ if(capi.sqlite3_uri_boolean(zName, "opfs-unlock-asap", 0)){ /* -----------------------^^^^^ MUST pass the untranslated C-string here. */ opfsFlags |= state.opfsFlags.OPFS_UNLOCK_ASAP; } zName = wasm.cstringToJs(zName); } const fh = Object.create(null); fh.fid = pFile; fh.filename = zName; fh.sab = new SharedArrayBuffer(state.fileBufferSize); fh.flags = flags; |
︙ | |||
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 | 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 | + + + | } }; doDir(opt.directory, 0); }; //TODO to support fiddle and worker1 db upload: //opfsUtil.createFile = function(absName, content=undefined){...} //We have sqlite3.wasm.sqlite3_wasm_vfs_create_file() for this //purpose but its interface and name are still under //consideration. if(sqlite3.oo1){ opfsUtil.OpfsDb = function(...args){ const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args); opt.vfs = opfsVfs.$zName; sqlite3.oo1.DB.dbCtorHelper.call(this, opt); }; |
︙ |
Changes to ext/wasm/api/sqlite3-opfs-async-proxy.js.
︙ | |||
101 102 103 104 105 106 107 | 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | - + - + | metadata related to a given OPFS file handles. The pointers are, in this side of the interface, opaque file handle IDs provided by the synchronous part of this constellation. Each value is an object with a structure demonstrated in the xOpen() impl. */ const __openFiles = Object.create(null); /** |
︙ | |||
162 163 164 165 166 167 168 | 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | - + | */ const closeSyncHandle = async (fh)=>{ if(fh.syncHandle){ log("Closing sync handle for",fh.filenameAbs); const h = fh.syncHandle; delete fh.syncHandle; delete fh.xLock; |
︙ | |||
186 187 188 189 190 191 192 | 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 | - - + + - + + + + + + + + + + + + + + + | try{await closeSyncHandle(fh)} catch(e){ warn("closeSyncHandleNoThrow() ignoring:",e,fh); } }; /* Release all auto-locks. */ |
︙ | |||
242 243 244 245 246 247 248 | 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 | - + - + - + - + + - + - - + + | In order to help alleviate cross-tab contention for a dabase, if an exception is thrown while acquiring the handle, this routine will wait briefly and try again, up to 3 times. If acquisition still fails at that point it will give up and propagate the exception. */ |
︙ | |||
405 406 407 408 409 410 411 | 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 | - + | } storeAndNotify('xAccess', rc); mTimeEnd(); }, xClose: async function(fid/*sqlite3_file pointer*/){ const opName = 'xClose'; mTimeStart(opName); |
︙ | |||
470 471 472 473 474 475 476 | 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 | - + + - - + + - + + - - - - - - - - + - + + + + + + + + + + + + + + + + + + + - + + | xFileSize: async function(fid/*sqlite3_file pointer*/){ mTimeStart('xFileSize'); const fh = __openFiles[fid]; let rc; wTimeStart('xFileSize'); try{ affirmLocked('xFileSize',fh); |
︙ | |||
599 600 601 602 603 604 605 | 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 | - + + | mTimeStart('xTruncate'); let rc = 0; const fh = __openFiles[fid]; wTimeStart('xTruncate'); try{ affirmLocked('xTruncate',fh); affirmNotRO('xTruncate', fh); |
︙ | |||
636 637 638 639 640 641 642 | 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 | - + + | let rc; const fh = __openFiles[fid]; wTimeStart('xWrite'); try{ affirmLocked('xWrite',fh); affirmNotRO('xWrite', fh); rc = ( |
︙ | |||
773 774 775 776 777 778 779 | 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 | - + - + | } /** waitTime is how long (ms) to wait for each Atomics.wait(). We need to wake up periodically to give the thread a chance to do other things. If this is too high (e.g. 500ms) then even two workers/tabs can easily run into locking errors. */ |
︙ | |||
820 821 822 823 824 825 826 827 828 829 830 831 832 833 | 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | + | state.sabOP = opt.sabOP; state.sabOPView = new Int32Array(state.sabOP); state.sabIO = opt.sabIO; state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize); state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize); state.opIds = opt.opIds; state.sq3Codes = opt.sq3Codes; state.opfsFlags = opt.opfsFlags; Object.keys(vfsAsyncImpls).forEach((k)=>{ if(!Number.isFinite(state.opIds[k])){ toss("Maintenance required: missing state.opIds[",k,"]"); } }); initS11n(); metrics.reset(); |
︙ |
Changes to ext/wasm/tests/opfs/concurrency/index.html.
︙ | |||
20 21 22 23 24 25 26 | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | - - - - + + + + + + + | OPFS concurrency tester using multiple independent Workers. Disclaimer: concurrency in OPFS is currently a pain point and timing/concurrency mitigation in this environment is highly unpredictable! </p> <p> URL flags: pass a number of workers using |
︙ |
Changes to ext/wasm/tests/opfs/concurrency/test.js.
︙ | |||
52 53 54 55 56 57 58 | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | - + + + + - + + + + + + + + + - + + + + + + - - + + + + + + - + + + | const urlArgsJs = new URL(document.currentScript.src).searchParams; const urlArgsHtml = new URL(self.location.href).searchParams; const options = Object.create(null); options.sqlite3Dir = urlArgsJs.get('sqlite3.dir'); options.workerCount = ( urlArgsHtml.has('workers') ? +urlArgsHtml.get('workers') : 3 |
︙ |
Changes to ext/wasm/tests/opfs/concurrency/worker.js.
1 2 3 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | + - + + + - + | importScripts( (new URL(self.location.href).searchParams).get('sqlite3.dir') + '/sqlite3.js' ); self.sqlite3InitModule().then(async function(sqlite3){ const urlArgs = new URL(self.location.href).searchParams; const options = { |
︙ | |||
39 40 41 42 43 44 45 | 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | - + + + + - + + - + | wPost('failed',"Ending work after interval #"+interval.count, "due to error:",interval.error); }else{ wPost('finished',"Ending work after",interval.count,"intervals."); } }; const run = async function(){ |
︙ |