Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Changes In Branch opfs-unlock-asap Excluding Merge-Ins
This is equivalent to a diff from 5f135575b9 to c0458caca3
2022-11-26
| ||
15:24 | OPFS VFS: add the opfs-unlock-asap=1 URI flag which tells the VFS to release implicit locks ASAP instead of during VFS idle time. This improves concurrency notably in the test app but brings a significant performance penalty in speedtest1 (roughly 4x slowdown). This is not the final word in OPFS concurrency, but gets us a step further. (check-in: 9542f9ce9e user: stephan tags: trunk) | |
2022-11-24
| ||
17:53 | More work on the OPFS concurrency testing app. (Closed-Leaf check-in: c0458caca3 user: stephan tags: opfs-unlock-asap) | |
2022-11-23
| ||
21:03 | Add optional zSchema argument to sqlite3_js_db_export(). (check-in: 9c23644b1e user: stephan tags: trunk) | |
20:49 | OPFS concurrency test: add a URL flag to enable/disable unlock-asap mode. (check-in: 1c1bf22ead user: stephan tags: opfs-unlock-asap) | |
16:39 | Initial infrastructure for adding a mode to the OPFS VFS which causes implicit locks to be released ASAP, which increases concurrency at the cost of performance. (check-in: c5b7a9715a user: stephan tags: opfs-unlock-asap) | |
16:08 | Update Makefile.in to include new target "sqlite3r.c". For generating "sqlite3r.c" and "sqlite3r.h", versions of the amalgamation that include the recover extension. To build the shell tool against these files, add -DSQLITE_HAVE_SQLITE3R. (check-in: 5f135575b9 user: dan tags: trunk) | |
15:52 | Remove a bit of over-cleverness which breaks loading of sqlite3.js in some main-thread cases. Broken by [96f76e7616]. (check-in: 220cc4c639 user: stephan tags: trunk) | |
2022-11-22
| ||
16:12 | Add Makefile.in targets for sqlite3r.c and sqlite3r.h, versions of the amalgamation that include the recover extension. (Closed-Leaf check-in: 59a837cfc7 user: dan tags: make-sqlite3r.c) | |
Changes to ext/wasm/api/sqlite3-api-oo1.js.
︙ | ︙ | |||
196 197 198 199 200 201 202 | { filename: ..., flags: ..., vfs: ... } If passed an object, any additional properties it has are copied as-is into the new object. */ dbCtorHelper.normalizeArgs = function(filename=':memory:',flags = 'c',vfs = null){ const arg = {}; | | | < | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | { filename: ..., flags: ..., vfs: ... } If passed an object, any additional properties it has are copied as-is into the new object. */ dbCtorHelper.normalizeArgs = function(filename=':memory:',flags = 'c',vfs = null){ const arg = {}; if(1===arguments.length && arguments[0] && 'object'===typeof arguments[0]){ Object.assign(arg, arguments[0]); if(undefined===arg.flags) arg.flags = 'c'; if(undefined===arg.vfs) arg.vfs = null; if(undefined===arg.filename) arg.filename = ':memory:'; }else{ arg.filename = filename; arg.flags = flags; arg.vfs = vfs; |
︙ | ︙ |
Changes to ext/wasm/api/sqlite3-api-opfs.js.
︙ | ︙ | |||
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 | 'SQLITE_LOCK_RESERVED', 'SQLITE_LOCK_SHARED', 'SQLITE_LOCKED', 'SQLITE_MISUSE', 'SQLITE_NOTFOUND', 'SQLITE_OPEN_CREATE', 'SQLITE_OPEN_DELETEONCLOSE', 'SQLITE_OPEN_READONLY' ].forEach((k)=>{ if(undefined === (state.sq3Codes[k] = capi[k])){ toss("Maintenance required: not found:",k); } }); /** Runs the given operation (by name) in the async worker counterpart, waits for its response, and returns the result which the async worker writes to SAB[state.opIds.rc]. The 2nd and subsequent arguments must be the aruguments for the | > > > > > > > > > > > > > > > > > > > > | 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | 'SQLITE_LOCK_RESERVED', 'SQLITE_LOCK_SHARED', 'SQLITE_LOCKED', 'SQLITE_MISUSE', 'SQLITE_NOTFOUND', 'SQLITE_OPEN_CREATE', 'SQLITE_OPEN_DELETEONCLOSE', 'SQLITE_OPEN_MAIN_DB', 'SQLITE_OPEN_READONLY' ].forEach((k)=>{ if(undefined === (state.sq3Codes[k] = capi[k])){ toss("Maintenance required: not found:",k); } }); state.opfsFlags = Object.assign(Object.create(null),{ /** Flag for use with xOpen(). "opfs-unlock-asap=1" enables this. See defaultUnlockAsap, below. */ OPFS_UNLOCK_ASAP: 0x01, /** If true, any async routine which implicitly acquires a sync access handle (i.e. an OPFS lock) will release that locks at the end of the call which acquires it. If false, such "autolocks" are not released until the VFS is idle for some brief amount of time. The benefit of enabling this is much higher concurrency. The down-side is much-reduced performance (as much as a 4x decrease in speedtest1). */ defaultUnlockAsap: false }); /** Runs the given operation (by name) in the async worker counterpart, waits for its response, and returns the result which the async worker writes to SAB[state.opIds.rc]. The 2nd and subsequent arguments must be the aruguments for the |
︙ | ︙ | |||
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 | to encode them... TextEncoder can do that for us. */ warn("OPFS xGetLastError() has nothing sensible to return."); return 0; }, //xSleep is optionally defined below xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){ mTimeStart('xOpen'); if(0===zName){ zName = randomFilename(); }else if('number'===typeof zName){ zName = wasm.cstringToJs(zName); } const fh = Object.create(null); fh.fid = pFile; fh.filename = zName; fh.sab = new SharedArrayBuffer(state.fileBufferSize); fh.flags = flags; | > > > > > > | | 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 | to encode them... TextEncoder can do that for us. */ warn("OPFS xGetLastError() has nothing sensible to return."); return 0; }, //xSleep is optionally defined below xOpen: function f(pVfs, zName, pFile, flags, pOutFlags){ mTimeStart('xOpen'); let opfsFlags = 0; if(0===zName){ zName = randomFilename(); }else if('number'===typeof zName){ if(capi.sqlite3_uri_boolean(zName, "opfs-unlock-asap", 0)){ /* -----------------------^^^^^ MUST pass the untranslated C-string here. */ opfsFlags |= state.opfsFlags.OPFS_UNLOCK_ASAP; } zName = wasm.cstringToJs(zName); } const fh = Object.create(null); fh.fid = pFile; fh.filename = zName; fh.sab = new SharedArrayBuffer(state.fileBufferSize); fh.flags = flags; const rc = opRun('xOpen', pFile, zName, flags, opfsFlags); if(!rc){ /* Recall that sqlite3_vfs::xClose() will be called, even on error, unless pFile->pMethods is NULL. */ if(fh.readOnly){ wasm.setMemValue(pOutFlags, capi.SQLITE_OPEN_READONLY, 'i32'); } __openFiles[pFile] = fh; |
︙ | ︙ | |||
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 | } }; doDir(opt.directory, 0); }; //TODO to support fiddle and worker1 db upload: //opfsUtil.createFile = function(absName, content=undefined){...} if(sqlite3.oo1){ opfsUtil.OpfsDb = function(...args){ const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args); opt.vfs = opfsVfs.$zName; sqlite3.oo1.DB.dbCtorHelper.call(this, opt); }; | > > > | 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 | } }; doDir(opt.directory, 0); }; //TODO to support fiddle and worker1 db upload: //opfsUtil.createFile = function(absName, content=undefined){...} //We have sqlite3.wasm.sqlite3_wasm_vfs_create_file() for this //purpose but its interface and name are still under //consideration. if(sqlite3.oo1){ opfsUtil.OpfsDb = function(...args){ const opt = sqlite3.oo1.DB.dbCtorHelper.normalizeArgs(...args); opt.vfs = opfsVfs.$zName; sqlite3.oo1.DB.dbCtorHelper.call(this, opt); }; |
︙ | ︙ |
Changes to ext/wasm/api/sqlite3-opfs-async-proxy.js.
︙ | ︙ | |||
101 102 103 104 105 106 107 | metadata related to a given OPFS file handles. The pointers are, in this side of the interface, opaque file handle IDs provided by the synchronous part of this constellation. Each value is an object with a structure demonstrated in the xOpen() impl. */ const __openFiles = Object.create(null); /** | | | | 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | metadata related to a given OPFS file handles. The pointers are, in this side of the interface, opaque file handle IDs provided by the synchronous part of this constellation. Each value is an object with a structure demonstrated in the xOpen() impl. */ const __openFiles = Object.create(null); /** __implicitLocks is a Set of sqlite3_file pointers (integers) which were "auto-locked". i.e. those for which we obtained a sync access handle without an explicit xLock() call. Such locks will be released during db connection idle time, whereas a sync access handle obtained via xLock(), or subsequently xLock()'d after auto-acquisition, will not be released until xUnlock() is called. Maintenance reminder: if we relinquish auto-locks at the end of the operation which acquires them, we pay a massive performance penalty: speedtest1 benchmarks take up to 4x as long. By delaying the lock release until idle time, the hit is negligible. */ const __implicitLocks = new Set(); /** Expects an OPFS file path. It gets resolved, such that ".." components are properly expanded, and returned. If the 2nd arg is true, the result is returned as an array of path elements, else an absolute path string is returned. */ |
︙ | ︙ | |||
162 163 164 165 166 167 168 | */ const closeSyncHandle = async (fh)=>{ if(fh.syncHandle){ log("Closing sync handle for",fh.filenameAbs); const h = fh.syncHandle; delete fh.syncHandle; delete fh.xLock; | | | 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | */ const closeSyncHandle = async (fh)=>{ if(fh.syncHandle){ log("Closing sync handle for",fh.filenameAbs); const h = fh.syncHandle; delete fh.syncHandle; delete fh.xLock; __implicitLocks.delete(fh.fid); return h.close(); } }; /** A proxy for closeSyncHandle() which is guaranteed to not throw. |
︙ | ︙ | |||
186 187 188 189 190 191 192 | try{await closeSyncHandle(fh)} catch(e){ warn("closeSyncHandleNoThrow() ignoring:",e,fh); } }; /* Release all auto-locks. */ | | | | > > > > > > > > > > > > > > | 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 | try{await closeSyncHandle(fh)} catch(e){ warn("closeSyncHandleNoThrow() ignoring:",e,fh); } }; /* Release all auto-locks. */ const releaseImplicitLocks = async ()=>{ if(__implicitLocks.size){ /* Release all auto-locks. */ for(const fid of __implicitLocks){ const fh = __openFiles[fid]; await closeSyncHandleNoThrow(fh); log("Auto-unlocked",fid,fh.filenameAbs); } } }; /** An experiment in improving concurrency by freeing up implicit locks sooner. This is known to impact performance dramatically but it has also shown to improve concurrency considerably. If fh.releaseImplicitLocks is truthy and fh is in __implicitLocks, this routine returns closeSyncHandleNoThrow(), else it is a no-op. */ const releaseImplicitLock = async (fh)=>{ if(fh.releaseImplicitLocks && __implicitLocks.has(fh.fid)){ return closeSyncHandleNoThrow(fh); } }; /** An error class specifically for use with getSyncHandle(), the goal of which is to eventually be able to distinguish unambiguously between locking-related failures and other types, noting that we cannot currently do so because createSyncAccessHandle() does not define its exceptions in the required level of detail. |
︙ | ︙ | |||
242 243 244 245 246 247 248 | In order to help alleviate cross-tab contention for a dabase, if an exception is thrown while acquiring the handle, this routine will wait briefly and try again, up to 3 times. If acquisition still fails at that point it will give up and propagate the exception. */ | | | | | > | | | | 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 | In order to help alleviate cross-tab contention for a dabase, if an exception is thrown while acquiring the handle, this routine will wait briefly and try again, up to 3 times. If acquisition still fails at that point it will give up and propagate the exception. */ const getSyncHandle = async (fh,opName)=>{ if(!fh.syncHandle){ const t = performance.now(); log("Acquiring sync handle for",fh.filenameAbs); const maxTries = 6, msBase = 300; let i = 1, ms = msBase; for(; true; ms = msBase * ++i){ try { //if(i<3) toss("Just testing getSyncHandle() wait-and-retry."); //TODO? A config option which tells it to throw here //randomly every now and then, for testing purposes. fh.syncHandle = await fh.fileHandle.createSyncAccessHandle(); break; }catch(e){ if(i === maxTries){ throw new GetSyncHandleError( e, "Error getting sync handle for",opName+"().",maxTries, "attempts failed.",fh.filenameAbs ); } warn("Error getting sync handle for",opName+"(). Waiting",ms, "ms and trying again.",fh.filenameAbs,e); //await releaseImplicitLocks(); Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms); } } log("Got",opName+"() sync handle for",fh.filenameAbs, 'in',performance.now() - t,'ms'); if(!fh.xLock){ __implicitLocks.add(fh.fid); log("Auto-locked for",opName+"()",fh.fid,fh.filenameAbs); } } return fh.syncHandle; }; /** Stores the given value at state.sabOPView[state.opIds.rc] and then |
︙ | ︙ | |||
405 406 407 408 409 410 411 | } storeAndNotify('xAccess', rc); mTimeEnd(); }, xClose: async function(fid/*sqlite3_file pointer*/){ const opName = 'xClose'; mTimeStart(opName); | | | 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 | } storeAndNotify('xAccess', rc); mTimeEnd(); }, xClose: async function(fid/*sqlite3_file pointer*/){ const opName = 'xClose'; mTimeStart(opName); __implicitLocks.delete(fid); const fh = __openFiles[fid]; let rc = 0; wTimeStart(opName); if(fh){ delete __openFiles[fid]; await closeSyncHandle(fh); if(fh.deleteOnClose){ |
︙ | ︙ | |||
470 471 472 473 474 475 476 | xFileSize: async function(fid/*sqlite3_file pointer*/){ mTimeStart('xFileSize'); const fh = __openFiles[fid]; let rc; wTimeStart('xFileSize'); try{ affirmLocked('xFileSize',fh); | | > | | | > < < < < < < < | | > > > > > > > > > > > > > > > > > > | > | 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 | xFileSize: async function(fid/*sqlite3_file pointer*/){ mTimeStart('xFileSize'); const fh = __openFiles[fid]; let rc; wTimeStart('xFileSize'); try{ affirmLocked('xFileSize',fh); const sz = await (await getSyncHandle(fh,'xFileSize')).getSize(); state.s11n.serialize(Number(sz)); rc = 0; }catch(e){ state.s11n.storeException(2,e); rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR); } await releaseImplicitLock(fh); wTimeEnd(); storeAndNotify('xFileSize', rc); mTimeEnd(); }, xLock: async function(fid/*sqlite3_file pointer*/, lockType/*SQLITE_LOCK_...*/){ mTimeStart('xLock'); const fh = __openFiles[fid]; let rc = 0; const oldLockType = fh.xLock; fh.xLock = lockType; if( !fh.syncHandle ){ wTimeStart('xLock'); try { await getSyncHandle(fh,'xLock'); __implicitLocks.delete(fid); }catch(e){ state.s11n.storeException(1,e); rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_LOCK); fh.xLock = oldLockType; } wTimeEnd(); } storeAndNotify('xLock',rc); mTimeEnd(); }, xOpen: async function(fid/*sqlite3_file pointer*/, filename, flags/*SQLITE_OPEN_...*/, opfsFlags/*OPFS_...*/){ const opName = 'xOpen'; mTimeStart(opName); const create = (state.sq3Codes.SQLITE_OPEN_CREATE & flags); wTimeStart('xOpen'); try{ let hDir, filenamePart; try { [hDir, filenamePart] = await getDirForFilename(filename, !!create); }catch(e){ state.s11n.storeException(1,e); storeAndNotify(opName, state.sq3Codes.SQLITE_NOTFOUND); mTimeEnd(); wTimeEnd(); return; } const hFile = await hDir.getFileHandle(filenamePart, {create}); wTimeEnd(); const fh = Object.assign(Object.create(null),{ fid: fid, filenameAbs: filename, filenamePart: filenamePart, dirHandle: hDir, fileHandle: hFile, sabView: state.sabFileBufView, readOnly: create ? false : (state.sq3Codes.SQLITE_OPEN_READONLY & flags), deleteOnClose: !!(state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags) }); fh.releaseImplicitLocks = (opfsFlags & state.opfsFlags.OPFS_UNLOCK_ASAP) || state.opfsFlags.defaultUnlockAsap; if(0 /* this block is modelled after something wa-sqlite does but it leads to immediate contention on journal files. */ && (0===(flags & state.sq3Codes.SQLITE_OPEN_MAIN_DB))){ /* sqlite does not lock these files, so go ahead and grab an OPFS lock. https://www.sqlite.org/uri.html */ fh.xLock = "xOpen"/* Truthy value to keep entry from getting flagged as auto-locked. String value so that we can easily distinguish is later if needed. */; await getSyncHandle(fh,'xOpen'); } __openFiles[fid] = fh; storeAndNotify(opName, 0); }catch(e){ wTimeEnd(); error(opName,e); state.s11n.storeException(1,e); storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR); } mTimeEnd(); }, xRead: async function(fid/*sqlite3_file pointer*/,n,offset64){ mTimeStart('xRead'); let rc = 0, nRead; const fh = __openFiles[fid]; try{ affirmLocked('xRead',fh); wTimeStart('xRead'); nRead = (await getSyncHandle(fh,'xRead')).read( fh.sabView.subarray(0, n), {at: Number(offset64)} ); wTimeEnd(); if(nRead < n){/* Zero-fill remaining bytes */ fh.sabView.fill(0, nRead, n); rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ; } }catch(e){ if(undefined===nRead) wTimeEnd(); error("xRead() failed",e,fh); state.s11n.storeException(1,e); rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_READ); } await releaseImplicitLock(fh); storeAndNotify('xRead',rc); mTimeEnd(); }, xSync: async function(fid/*sqlite3_file pointer*/,flags/*ignored*/){ mTimeStart('xSync'); const fh = __openFiles[fid]; let rc = 0; |
︙ | ︙ | |||
599 600 601 602 603 604 605 | mTimeStart('xTruncate'); let rc = 0; const fh = __openFiles[fid]; wTimeStart('xTruncate'); try{ affirmLocked('xTruncate',fh); affirmNotRO('xTruncate', fh); | | > | 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 | mTimeStart('xTruncate'); let rc = 0; const fh = __openFiles[fid]; wTimeStart('xTruncate'); try{ affirmLocked('xTruncate',fh); affirmNotRO('xTruncate', fh); await (await getSyncHandle(fh,'xTruncate')).truncate(size); }catch(e){ error("xTruncate():",e,fh); state.s11n.storeException(2,e); rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_TRUNCATE); } await releaseImplicitLock(fh); wTimeEnd(); storeAndNotify('xTruncate',rc); mTimeEnd(); }, xUnlock: async function(fid/*sqlite3_file pointer*/, lockType/*SQLITE_LOCK_...*/){ mTimeStart('xUnlock'); |
︙ | ︙ | |||
636 637 638 639 640 641 642 | let rc; const fh = __openFiles[fid]; wTimeStart('xWrite'); try{ affirmLocked('xWrite',fh); affirmNotRO('xWrite', fh); rc = ( | | > | 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 | let rc; const fh = __openFiles[fid]; wTimeStart('xWrite'); try{ affirmLocked('xWrite',fh); affirmNotRO('xWrite', fh); rc = ( n === (await getSyncHandle(fh,'xWrite')) .write(fh.sabView.subarray(0, n), {at: Number(offset64)}) ) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE; }catch(e){ error("xWrite():",e,fh); state.s11n.storeException(1,e); rc = GetSyncHandleError.convertRc(e,state.sq3Codes.SQLITE_IOERR_WRITE); } await releaseImplicitLock(fh); wTimeEnd(); storeAndNotify('xWrite',rc); mTimeEnd(); } }/*vfsAsyncImpls*/; const initS11n = ()=>{ |
︙ | ︙ | |||
773 774 775 776 777 778 779 | } /** waitTime is how long (ms) to wait for each Atomics.wait(). We need to wake up periodically to give the thread a chance to do other things. If this is too high (e.g. 500ms) then even two workers/tabs can easily run into locking errors. */ | | | | 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 | } /** waitTime is how long (ms) to wait for each Atomics.wait(). We need to wake up periodically to give the thread a chance to do other things. If this is too high (e.g. 500ms) then even two workers/tabs can easily run into locking errors. */ const waitTime = 100; while(!flagAsyncShutdown){ try { if('timed-out'===Atomics.wait( state.sabOPView, state.opIds.whichOp, 0, waitTime )){ await releaseImplicitLocks(); continue; } const opId = Atomics.load(state.sabOPView, state.opIds.whichOp); Atomics.store(state.sabOPView, state.opIds.whichOp, 0); const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId); const args = state.s11n.deserialize( true /* clear s11n to keep the caller from confusing this with |
︙ | ︙ | |||
820 821 822 823 824 825 826 827 828 829 830 831 832 833 | state.sabOP = opt.sabOP; state.sabOPView = new Int32Array(state.sabOP); state.sabIO = opt.sabIO; state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize); state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize); state.opIds = opt.opIds; state.sq3Codes = opt.sq3Codes; Object.keys(vfsAsyncImpls).forEach((k)=>{ if(!Number.isFinite(state.opIds[k])){ toss("Maintenance required: missing state.opIds[",k,"]"); } }); initS11n(); metrics.reset(); | > | 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | state.sabOP = opt.sabOP; state.sabOPView = new Int32Array(state.sabOP); state.sabIO = opt.sabIO; state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize); state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize); state.opIds = opt.opIds; state.sq3Codes = opt.sq3Codes; state.opfsFlags = opt.opfsFlags; Object.keys(vfsAsyncImpls).forEach((k)=>{ if(!Number.isFinite(state.opIds[k])){ toss("Maintenance required: missing state.opIds[",k,"]"); } }); initS11n(); metrics.reset(); |
︙ | ︙ |
Changes to ext/wasm/tests/opfs/concurrency/index.html.
︙ | ︙ | |||
20 21 22 23 24 25 26 | OPFS concurrency tester using multiple independent Workers. Disclaimer: concurrency in OPFS is currently a pain point and timing/concurrency mitigation in this environment is highly unpredictable! </p> <p> URL flags: pass a number of workers using | | | > | | > > | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | OPFS concurrency tester using multiple independent Workers. Disclaimer: concurrency in OPFS is currently a pain point and timing/concurrency mitigation in this environment is highly unpredictable! </p> <p> URL flags: pass a number of workers using the <code>workers=N</code> URL flag. Set the time between each workload with <code>interval=N</code> (milliseconds). Set the number of worker iterations with <code>iterations=N</code>. Enable OPFS VFS verbosity with <code>verbose=1-3</code> (output goes to the dev console). Enable/disable "unlock ASAP" mode (higher concurrency, lower speed) with <code>unlock-asap=0-1</code>. </p> <p>Achtung: if it does not start to do anything within a couple of seconds, check the dev console: Chrome often fails with "cannot allocate WasmMemory" at startup. Closing and re-opening the tab usually resolves it. </p> <div class='input-wrapper'> |
︙ | ︙ |
Changes to ext/wasm/tests/opfs/concurrency/test.js.
︙ | ︙ | |||
52 53 54 55 56 57 58 | const urlArgsJs = new URL(document.currentScript.src).searchParams; const urlArgsHtml = new URL(self.location.href).searchParams; const options = Object.create(null); options.sqlite3Dir = urlArgsJs.get('sqlite3.dir'); options.workerCount = ( urlArgsHtml.has('workers') ? +urlArgsHtml.get('workers') : 3 | | > > > | > > > > > > > > | > > > > > | | > > > > | > > | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | const urlArgsJs = new URL(document.currentScript.src).searchParams; const urlArgsHtml = new URL(self.location.href).searchParams; const options = Object.create(null); options.sqlite3Dir = urlArgsJs.get('sqlite3.dir'); options.workerCount = ( urlArgsHtml.has('workers') ? +urlArgsHtml.get('workers') : 3 ) || 4; options.opfsVerbose = ( urlArgsHtml.has('verbose') ? +urlArgsHtml.get('verbose') : 1 ) || 1; options.interval = ( urlArgsHtml.has('interval') ? +urlArgsHtml.get('interval') : 750 ) || 1000; options.iterations = ( urlArgsHtml.has('iterations') ? +urlArgsHtml.get('iterations') : 10 ) || 10; options.unlockAsap = ( urlArgsHtml.has('unlock-asap') ? +urlArgsHtml.get('unlock-asap') : 0 ) || 0; const workers = []; workers.post = (type,...args)=>{ for(const w of workers) w.postMessage({type, payload:args}); }; workers.counts = {loaded: 0, passed: 0, failed: 0}; const checkFinished = function(){ if(workers.counts.passed + workers.counts.failed !== workers.length){ return; } if(workers.counts.failed>0){ logCss('tests-fail',"Finished with",workers.counts.failed,"failure(s)."); }else{ logCss('tests-pass',"All",workers.length,"workers finished."); } }; workers.onmessage = function(msg){ msg = msg.data; const prefix = 'Worker #'+msg.worker+':'; switch(msg.type){ case 'loaded': stdout(prefix,"loaded"); if(++workers.counts.loaded === workers.length){ stdout("All",workers.length,"workers loaded. Telling them to run..."); workers.post('run'); } break; case 'stdout': stdout(prefix,...msg.payload); break; case 'stderr': stderr(prefix,...msg.payload); break; case 'error': stderr(prefix,"ERROR:",...msg.payload); break; case 'finished': ++workers.counts.passed; logCss('tests-pass',prefix,...msg.payload); checkFinished(); break; case 'failed': ++workers.counts.failed; logCss('tests-fail',prefix,"FAILED:",...msg.payload); checkFinished(); break; default: logCss('error',"Unhandled message type:",msg); break; } }; stdout("Launching",options.workerCount,"workers. Options:",options); workers.uri = ( 'worker.js?' + 'sqlite3.dir='+options.sqlite3Dir + '&interval='+options.interval + '&iterations='+options.iterations + '&opfs-verbose='+options.opfsVerbose + '&opfs-unlock-asap='+options.unlockAsap ); for(let i = 0; i < options.workerCount; ++i){ stdout("Launching worker..."); workers.push(new Worker( workers.uri+'&workerId='+(i+1)+(i ? '' : '&unlink-db') )); } |
︙ | ︙ |
Changes to ext/wasm/tests/opfs/concurrency/worker.js.
1 2 3 4 5 | importScripts( (new URL(self.location.href).searchParams).get('sqlite3.dir') + '/sqlite3.js' ); self.sqlite3InitModule().then(async function(sqlite3){ const urlArgs = new URL(self.location.href).searchParams; | > | > > | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | importScripts( (new URL(self.location.href).searchParams).get('sqlite3.dir') + '/sqlite3.js' ); self.sqlite3InitModule().then(async function(sqlite3){ const urlArgs = new URL(self.location.href).searchParams; const options = { workerName: urlArgs.get('workerId') || Math.round(Math.random()*10000), unlockAsap: urlArgs.get('opfs-unlock-asap') || 0 /*EXPERIMENTAL*/ }; const wPost = (type,...payload)=>{ postMessage({type, worker: options.workerName, payload}); }; const stdout = (...args)=>wPost('stdout',...args); const stderr = (...args)=>wPost('stderr',...args); if(!sqlite3.opfs){ stderr("OPFS support not detected. Aborting."); return; } |
︙ | ︙ | |||
39 40 41 42 43 44 45 | wPost('failed',"Ending work after interval #"+interval.count, "due to error:",interval.error); }else{ wPost('finished',"Ending work after",interval.count,"intervals."); } }; const run = async function(){ | | > > > | > | | 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | wPost('failed',"Ending work after interval #"+interval.count, "due to error:",interval.error); }else{ wPost('finished',"Ending work after",interval.count,"intervals."); } }; const run = async function(){ db = new sqlite3.opfs.OpfsDb({ filename: 'file:'+dbName+'?opfs-unlock-asap='+options.unlockAsap, flags: 'c' }); sqlite3.capi.sqlite3_busy_timeout(db.pointer, 5000); db.transaction((db)=>{ db.exec([ "create table if not exists t1(w TEXT UNIQUE ON CONFLICT REPLACE,v);", "create table if not exists t2(w TEXT UNIQUE ON CONFLICT REPLACE,v);" ]); }); const maxIterations = urlArgs.has('iterations') ? (+urlArgs.get('iterations') || 10) : 10; stdout("Starting interval-based db updates with delay of",interval.delay,"ms."); const doWork = async ()=>{ const tm = new Date().getTime(); ++interval.count; const prefix = "v(#"+interval.count+")"; stdout("Setting",prefix,"=",tm); try{ db.exec({ sql:"INSERT OR REPLACE INTO t1(w,v) VALUES(?,?)", bind: [options.workerName, new Date().getTime()] }); //stdout("Set",prefix); }catch(e){ interval.error = e; } }; if(1){/*use setInterval()*/ |
︙ | ︙ |