mirror of
https://github.com/sqlite/sqlite.git
synced 2024-12-01 17:23:42 +01:00
An alternative messaging strategy for the OPFS VFS proxy which uses only SharedArrayBuffer and Atomics, instead of worker messages, for communication (only the initial one-time handshake during initialization uses worker messages). It runs speedtest1 approx. 15-20% faster but still 20-ish% slower than WASMFS.
FossilOrigin-Name: a83ee3082d89439ea3ad5737e63e25bebb0f91895aca006ce5fecf5b93a2651a
This commit is contained in:
parent
138647a552
commit
5e8bb0aa13
@ -65,7 +65,7 @@ SQLITE_OPT = \
|
||||
|
||||
.PHONY: release
|
||||
release:
|
||||
$(MAKE) 'emcc_opt=-Os -g3 -flto'
|
||||
$(MAKE) "emcc_opt=-Os -g3 -flto" fiddle_opt=-Os
|
||||
# ^^^^^ target-specific vars, e.g.:
|
||||
# release: emcc_opt=...
|
||||
# apparently only work for file targets, not PHONY targets?
|
||||
@ -432,19 +432,20 @@ push-fiddle: $(fiddle_files)
|
||||
# experimentation shows -O2 to be the clear winner in terms of speed.
|
||||
# Note that build times with anything higher than -O0 are somewhat
|
||||
# painful.
|
||||
|
||||
.PHONY: o0 o1 o2 o3 os oz
|
||||
o0:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-O0
|
||||
$((MAKE) clean; $(MAKE) -e "emcc_opt=-O0 -flto" fiddle_opt=-O0
|
||||
o1:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-O1
|
||||
$(MAKE) clean; $(MAKE) -e "emcc_opt=-O1 -flto" fiddle_opt=-O1
|
||||
o2:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-O2
|
||||
$(MAKE) clean; $(MAKE) -e "emcc_opt=-O2 -flto" fiddle_opt=-O2
|
||||
o3:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-O3
|
||||
$(MAKE) clean; $(MAKE) -e "emcc_opt=-O3 -flto" fiddle_opt=-O3
|
||||
os:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-Os
|
||||
$(MAKE) clean; $(MAKE) -e "emcc_opt=-Os -flto" fiddle_opt=-Os
|
||||
oz:
|
||||
$(MAKE) clean; $(MAKE) -e emcc_opt=-Oz
|
||||
$(MAKE) clean; $(MAKE) -e "emcc_opt=-Oz -flto" fiddle_opt=-Oz
|
||||
|
||||
########################################################################
|
||||
# Sub-makes...
|
||||
|
@ -128,7 +128,6 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
// failure is, e.g., that the remote script is 404.
|
||||
promiseReject(new Error("Loading OPFS async Worker failed for unknown reasons."));
|
||||
};
|
||||
const wMsg = (type,args)=>W.postMessage({type,args});
|
||||
/**
|
||||
Generic utilities for working with OPFS. This will get filled out
|
||||
by the Promise setup and, on success, installed as sqlite3.opfs.
|
||||
@ -203,7 +202,6 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
{
|
||||
let i = 0;
|
||||
state.opIds.whichOp = i++;
|
||||
state.opIds.nothing = i++;
|
||||
state.opIds.xAccess = i++;
|
||||
state.rcIds.xAccess = i++;
|
||||
state.opIds.xClose = i++;
|
||||
@ -228,8 +226,9 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
state.rcIds.xWrite = i++;
|
||||
state.opIds.mkdir = i++;
|
||||
state.rcIds.mkdir = i++;
|
||||
state.opIds.xFileControl = i++;
|
||||
state.rcIds.xFileControl = i++;
|
||||
state.sabOP = new SharedArrayBuffer(i * 4/*sizeof int32*/);
|
||||
state.opIds.xFileControl = state.opIds.xSync /* special case */;
|
||||
opfsUtil.metrics.reset();
|
||||
}
|
||||
|
||||
@ -260,12 +259,17 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
given operation's signature in the async API counterpart.
|
||||
*/
|
||||
const opRun = (op,...args)=>{
|
||||
const rcNdx = state.rcIds[op] || toss("Invalid rc ID:",op);
|
||||
const opNdx = state.opIds[op] || toss("Invalid op ID:",op);
|
||||
state.s11n.serialize(...args);
|
||||
Atomics.store(state.sabOPView, rcNdx, -1);
|
||||
Atomics.store(state.sabOPView, state.opIds.whichOp, opNdx);
|
||||
Atomics.notify(state.sabOPView, state.opIds.whichOp) /* async thread will take over here */;
|
||||
const t = performance.now();
|
||||
Atomics.store(state.sabOPView, state.opIds[op], -1);
|
||||
wMsg(op, args);
|
||||
Atomics.wait(state.sabOPView, state.opIds[op], -1);
|
||||
Atomics.wait(state.sabOPView, rcNdx, -1);
|
||||
const rc = Atomics.load(state.sabOPView, rcNdx);
|
||||
metrics[op].wait += performance.now() - t;
|
||||
return Atomics.load(state.sabOPView, state.opIds[op]);
|
||||
return rc;
|
||||
};
|
||||
|
||||
const initS11n = ()=>{
|
||||
@ -297,11 +301,25 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
serialization for simplicy of implementation, but if that
|
||||
proves imperformant then a lower-level approach will be
|
||||
created.
|
||||
|
||||
If passed "too much data" (more that the shared buffer size
|
||||
it will either throw or truncate the data (not certain
|
||||
which)). This routine is only intended for serializing OPFS
|
||||
VFS arguments and (in at least one special case) result
|
||||
values, and the buffer is sized to be able to comfortably
|
||||
handle those.
|
||||
|
||||
If passed no arguments then it zeroes out the serialization
|
||||
state.
|
||||
*/
|
||||
state.s11n.serialize = function(...args){
|
||||
const json = jsonEncoder.encode(JSON.stringify(args));
|
||||
viewSz.setInt32(0, json.byteLength, state.littleEndian);
|
||||
viewJson.set(json);
|
||||
if(args.length){
|
||||
const json = jsonEncoder.encode(JSON.stringify(args));
|
||||
viewSz.setInt32(0, json.byteLength, state.littleEndian);
|
||||
viewJson.set(json);
|
||||
}else{
|
||||
viewSz.setInt32(0, 0, state.littleEndian);
|
||||
}
|
||||
};
|
||||
return state.s11n;
|
||||
};
|
||||
@ -552,9 +570,8 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
const vfsSyncWrappers = {
|
||||
xAccess: function(pVfs,zName,flags,pOut){
|
||||
mTimeStart('xAccess');
|
||||
wasm.setMemValue(
|
||||
pOut, (opRun('xAccess', wasm.cstringToJs(zName)) ? 0 : 1), 'i32'
|
||||
);
|
||||
const rc = opRun('xAccess', wasm.cstringToJs(zName));
|
||||
wasm.setMemValue( pOut, (rc ? 0 : 1), 'i32' );
|
||||
mTimeEnd();
|
||||
return 0;
|
||||
},
|
||||
@ -686,22 +703,12 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
opfsUtil.deleteEntry = function(fsEntryName,recursive=false){
|
||||
return 0===opRun('xDelete', fsEntryName, 0, recursive);
|
||||
};
|
||||
/**
|
||||
Exactly like deleteEntry() but runs asynchronously. This is a
|
||||
"fire and forget" operation: it does not return a promise
|
||||
because the counterpart operation happens in another thread and
|
||||
waiting on that result in a Promise would block the OPFS VFS
|
||||
from acting until it completed.
|
||||
*/
|
||||
opfsUtil.deleteEntryAsync = function(fsEntryName,recursive=false){
|
||||
wMsg('xDeleteNoWait', [fsEntryName, 0, recursive]);
|
||||
};
|
||||
/**
|
||||
Synchronously creates the given directory name, recursively, in
|
||||
the OPFS filesystem. Returns true if it succeeds or the
|
||||
directory already exists, else false.
|
||||
*/
|
||||
opfsUtil.mkdir = async function(absDirName){
|
||||
opfsUtil.mkdir = function(absDirName){
|
||||
return 0===opRun('mkdir', absDirName);
|
||||
};
|
||||
/**
|
||||
@ -736,7 +743,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
features like getting a directory listing.
|
||||
*/
|
||||
|
||||
const sanityCheck = async function(){
|
||||
const sanityCheck = function(){
|
||||
const scope = wasm.scopedAllocPush();
|
||||
const sq3File = new sqlite3_file();
|
||||
try{
|
||||
@ -791,6 +798,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
|
||||
rc = wasm.getMemValue(pOut,'i32');
|
||||
if(rc) toss("Expecting 0 from xAccess(",dbFile,") after xDelete().");
|
||||
log("End of OPFS sanity checks.");
|
||||
}finally{
|
||||
sq3File.dispose();
|
||||
wasm.scopedAllocPop(scope);
|
||||
@ -803,7 +811,7 @@ sqlite3.installOpfsVfs = function callee(asyncProxyUri = callee.defaultProxyUri)
|
||||
switch(data.type){
|
||||
case 'opfs-async-loaded':
|
||||
/*Pass our config and shared state on to the async worker.*/
|
||||
wMsg('opfs-async-init',state);
|
||||
W.postMessage({type: 'opfs-async-init',args: state});
|
||||
break;
|
||||
case 'opfs-async-inited':{
|
||||
/*Indicates that the async partner has received the 'init',
|
||||
|
@ -212,70 +212,71 @@
|
||||
/* TODO? Flags which require values need custom UI
|
||||
controls and some of them make little sense here
|
||||
(e.g. --script FILE). */
|
||||
flags["autovacuum"] = "Enable AUTOVACUUM mode";
|
||||
flags["big-transactions"] = "Important for tests 410 and 510!";
|
||||
//flags["cachesize"] = "N Set the cache size to N";
|
||||
flags["checkpoint"] = "Run PRAGMA wal_checkpoint after each test case";
|
||||
flags["exclusive"] = "Enable locking_mode=EXCLUSIVE";
|
||||
flags["explain"] = "Like --sqlonly but with added EXPLAIN keywords";
|
||||
//flags["heap"] = "SZ MIN Memory allocator uses SZ bytes & min allocation MIN";
|
||||
flags["incrvacuum"] = "Enable incremenatal vacuum mode";
|
||||
//flags["journal"] = "M Set the journal_mode to M";
|
||||
//flags["key"] = "KEY Set the encryption key to KEY";
|
||||
//flags["lookaside"] = "N SZ Configure lookaside for N slots of SZ bytes each";
|
||||
flags["memdb"] = "Use an in-memory database";
|
||||
//flags["mmap"] = "SZ MMAP the first SZ bytes of the database file";
|
||||
flags["multithread"] = "Set multithreaded mode";
|
||||
flags["nomemstat"] = "Disable memory statistics";
|
||||
flags["nomutex"] = "Open db with SQLITE_OPEN_NOMUTEX";
|
||||
flags["nosync"] = "Set PRAGMA synchronous=OFF";
|
||||
flags["notnull"] = "Add NOT NULL constraints to table columns";
|
||||
//flags["output"] = "FILE Store SQL output in FILE";
|
||||
//flags["pagesize"] = "N Set the page size to N";
|
||||
//flags["pcache"] = "N SZ Configure N pages of pagecache each of size SZ bytes";
|
||||
//flags["primarykey"] = "Use PRIMARY KEY instead of UNIQUE where appropriate";
|
||||
//flags["repeat"] = "N Repeat each SELECT N times (default: 1)";
|
||||
flags["reprepare"] = "Reprepare each statement upon every invocation";
|
||||
//flags["reserve"] = "N Reserve N bytes on each database page";
|
||||
//flags["script"] = "FILE Write an SQL script for the test into FILE";
|
||||
flags["serialized"] = "Set serialized threading mode";
|
||||
flags["singlethread"] = "Set single-threaded mode - disables all mutexing";
|
||||
flags["sqlonly"] = "No-op. Only show the SQL that would have been run.";
|
||||
flags["shrink"] = "memory Invoke sqlite3_db_release_memory() frequently.";
|
||||
//flags["size"] = "N Relative test size. Default=100";
|
||||
flags["strict"] = "Use STRICT table where appropriate";
|
||||
flags["stats"] = "Show statistics at the end";
|
||||
//flags["temp"] = "N N from 0 to 9. 0: no temp table. 9: all temp tables";
|
||||
//flags["testset"] = "T Run test-set T (main, cte, rtree, orm, fp, debug)";
|
||||
flags["trace"] = "Turn on SQL tracing";
|
||||
//flags["threads"] = "N Use up to N threads for sorting";
|
||||
flags["--autovacuum"] = "Enable AUTOVACUUM mode";
|
||||
flags["--big-transactions"] = "Important for tests 410 and 510!";
|
||||
//flags["--cachesize"] = "N Set the cache size to N";
|
||||
flags["--checkpoint"] = "Run PRAGMA wal_checkpoint after each test case";
|
||||
flags["--exclusive"] = "Enable locking_mode=EXCLUSIVE";
|
||||
flags["--explain"] = "Like --sqlonly but with added EXPLAIN keywords";
|
||||
//flags["--heap"] = "SZ MIN Memory allocator uses SZ bytes & min allocation MIN";
|
||||
flags["--incrvacuum"] = "Enable incremenatal vacuum mode";
|
||||
//flags["--journal"] = "M Set the journal_mode to M";
|
||||
//flags["--key"] = "KEY Set the encryption key to KEY";
|
||||
//flags["--lookaside"] = "N SZ Configure lookaside for N slots of SZ bytes each";
|
||||
flags["--memdb"] = "Use an in-memory database";
|
||||
//flags["--mmap"] = "SZ MMAP the first SZ bytes of the database file";
|
||||
flags["--multithread"] = "Set multithreaded mode";
|
||||
flags["--nomemstat"] = "Disable memory statistics";
|
||||
flags["--nomutex"] = "Open db with SQLITE_OPEN_NOMUTEX";
|
||||
flags["--nosync"] = "Set PRAGMA synchronous=OFF";
|
||||
flags["--notnull"] = "Add NOT NULL constraints to table columns";
|
||||
//flags["--output"] = "FILE Store SQL output in FILE";
|
||||
//flags["--pagesize"] = "N Set the page size to N";
|
||||
//flags["--pcache"] = "N SZ Configure N pages of pagecache each of size SZ bytes";
|
||||
//flags["--primarykey"] = "Use PRIMARY KEY instead of UNIQUE where appropriate";
|
||||
//flags["--repeat"] = "N Repeat each SELECT N times (default: 1)";
|
||||
flags["--reprepare"] = "Reprepare each statement upon every invocation";
|
||||
//flags["--reserve"] = "N Reserve N bytes on each database page";
|
||||
//flags["--script"] = "FILE Write an SQL script for the test into FILE";
|
||||
flags["--serialized"] = "Set serialized threading mode";
|
||||
flags["--singlethread"] = "Set single-threaded mode - disables all mutexing";
|
||||
flags["--sqlonly"] = "No-op. Only show the SQL that would have been run.";
|
||||
flags["--shrink"] = "memory Invoke sqlite3_db_release_memory() frequently.";
|
||||
//flags["--size"] = "N Relative test size. Default=100";
|
||||
flags["--strict"] = "Use STRICT table where appropriate";
|
||||
flags["--stats"] = "Show statistics at the end";
|
||||
//flags["--temp"] = "N N from 0 to 9. 0: no temp table. 9: all temp tables";
|
||||
//flags["--testset"] = "T Run test-set T (main, cte, rtree, orm, fp, debug)";
|
||||
flags["--trace"] = "Turn on SQL tracing";
|
||||
//flags["--threads"] = "N Use up to N threads for sorting";
|
||||
/*
|
||||
The core API's WASM build does not support UTF16, but in
|
||||
this app it's not an issue because the data are not crossing
|
||||
JS/WASM boundaries.
|
||||
*/
|
||||
flags["utf16be"] = "Set text encoding to UTF-16BE";
|
||||
flags["utf16le"] = "Set text encoding to UTF-16LE";
|
||||
flags["verify"] = "Run additional verification steps.";
|
||||
flags["without"] = "rowid Use WITHOUT ROWID where appropriate";
|
||||
flags["--utf16be"] = "Set text encoding to UTF-16BE";
|
||||
flags["--utf16le"] = "Set text encoding to UTF-16LE";
|
||||
flags["--verify"] = "Run additional verification steps.";
|
||||
flags["--without"] = "rowid Use WITHOUT ROWID where appropriate";
|
||||
const preselectedFlags = [
|
||||
'big-transactions',
|
||||
'singlethread'
|
||||
'--big-transactions',
|
||||
'--singlethread'
|
||||
];
|
||||
if('opfs'!==urlParams.get('vfs')){
|
||||
preselectedFlags.push('memdb');
|
||||
if(urlParams.has('flags')){
|
||||
preselectedFlags.push(...urlParams.get('flags').split(','));
|
||||
}
|
||||
if('opfs'!==urlParams.get('vfs')){
|
||||
preselectedFlags.push('--memdb');
|
||||
}
|
||||
|
||||
Object.keys(flags).sort().forEach(function(f){
|
||||
const opt = document.createElement('option');
|
||||
eFlags.appendChild(opt);
|
||||
const lbl = nbspPad('--'+f)+flags[f];
|
||||
const lbl = nbspPad(f)+flags[f];
|
||||
//opt.innerText = lbl;
|
||||
opt.innerHTML = lbl;
|
||||
opt.value = '--'+f;
|
||||
opt.value = f;
|
||||
if(preselectedFlags.indexOf(f) >= 0) opt.selected = true;
|
||||
});
|
||||
|
||||
});
|
||||
const cbReverseLog = E('#cb-reverse-log-order');
|
||||
const lblReverseLog = E('#lbl-reverse-log-order');
|
||||
if(cbReverseLog.checked){
|
||||
|
@ -63,7 +63,7 @@ const error = (...args)=>logImpl(0, ...args);
|
||||
const metrics = Object.create(null);
|
||||
metrics.reset = ()=>{
|
||||
let k;
|
||||
const r = (m)=>(m.count = m.time = 0);
|
||||
const r = (m)=>(m.count = m.time = m.wait = 0);
|
||||
for(k in state.opIds){
|
||||
r(metrics[k] = Object.create(null));
|
||||
}
|
||||
@ -74,11 +74,15 @@ metrics.dump = ()=>{
|
||||
const m = metrics[k];
|
||||
n += m.count;
|
||||
t += m.time;
|
||||
w += m.wait;
|
||||
m.avgTime = (m.count && m.time) ? (m.time / m.count) : 0;
|
||||
}
|
||||
console.log(self.location.href,
|
||||
"metrics for",self.location.href,":",metrics,
|
||||
"\nTotal of",n,"op(s) for",t,"ms");
|
||||
"metrics for",self.location.href,":\n",
|
||||
JSON.stringify(metrics,0,2)
|
||||
/*dev console can't expand this object!*/,
|
||||
"\nTotal of",n,"op(s) for",t,"ms",
|
||||
"approx",w,"ms spent waiting on OPFS APIs.");
|
||||
};
|
||||
|
||||
warn("This file is very much experimental and under construction.",
|
||||
@ -130,9 +134,9 @@ const getDirForPath = async function f(absFilename, createDirs = false){
|
||||
and then Atomics.notify()'s it.
|
||||
*/
|
||||
const storeAndNotify = (opName, value)=>{
|
||||
log(opName+"() is notify()ing w/ value:",value);
|
||||
Atomics.store(state.sabOPView, state.opIds[opName], value);
|
||||
Atomics.notify(state.sabOPView, state.opIds[opName]);
|
||||
log(opName+"() => notify(",state.rcIds[opName],",",value,")");
|
||||
Atomics.store(state.sabOPView, state.rcIds[opName], value);
|
||||
Atomics.notify(state.sabOPView, state.rcIds[opName]);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -155,6 +159,17 @@ const mTimeStart = (op)=>{
|
||||
const mTimeEnd = ()=>(
|
||||
metrics[opTimer.op].time += performance.now() - opTimer.start
|
||||
);
|
||||
const waitTimer = Object.create(null);
|
||||
waitTimer.op = undefined;
|
||||
waitTimer.start = undefined;
|
||||
const wTimeStart = (op)=>{
|
||||
waitTimer.start = performance.now();
|
||||
waitTimer.op = op;
|
||||
//metrics[op] || toss("Maintenance required: missing metrics for",op);
|
||||
};
|
||||
const wTimeEnd = ()=>(
|
||||
metrics[waitTimer.op].wait += performance.now() - waitTimer.start
|
||||
);
|
||||
|
||||
/**
|
||||
Asynchronous wrappers for sqlite3_vfs and sqlite3_io_methods
|
||||
@ -163,17 +178,20 @@ const mTimeEnd = ()=>(
|
||||
*/
|
||||
const vfsAsyncImpls = {
|
||||
mkdir: async function(dirname){
|
||||
mTimeStart('mkdir');
|
||||
let rc = 0;
|
||||
wTimeStart('mkdir');
|
||||
try {
|
||||
await getDirForPath(dirname+"/filepart", true);
|
||||
}catch(e){
|
||||
//error("mkdir failed",filename, e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
wTimeEnd();
|
||||
storeAndNotify('mkdir', rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
xAccess: async function(filename){
|
||||
log("xAccess(",arguments[0],")");
|
||||
mTimeStart('xAccess');
|
||||
/* OPFS cannot support the full range of xAccess() queries sqlite3
|
||||
calls for. We can essentially just tell if the file is
|
||||
@ -187,20 +205,23 @@ const vfsAsyncImpls = {
|
||||
accessible, non-0 means not accessible.
|
||||
*/
|
||||
let rc = 0;
|
||||
wTimeStart('xAccess');
|
||||
try{
|
||||
const [dh, fn] = await getDirForPath(filename);
|
||||
await dh.getFileHandle(fn);
|
||||
}catch(e){
|
||||
rc = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
wTimeEnd();
|
||||
storeAndNotify('xAccess', rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
xClose: async function(fid){
|
||||
const opName = 'xClose';
|
||||
mTimeStart(opName);
|
||||
log(opName+"(",arguments[0],")");
|
||||
const fh = __openFiles[fid];
|
||||
let rc = 0;
|
||||
wTimeStart('xClose');
|
||||
if(fh){
|
||||
delete __openFiles[fid];
|
||||
if(fh.accessHandle) await fh.accessHandle.close();
|
||||
@ -208,10 +229,11 @@ const vfsAsyncImpls = {
|
||||
try{ await fh.dirHandle.removeEntry(fh.filenamePart) }
|
||||
catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
|
||||
}
|
||||
storeAndNotify(opName, 0);
|
||||
}else{
|
||||
storeAndNotify(opName, state.sq3Codes.SQLITE_NOFOUND);
|
||||
rc = state.sq3Codes.SQLITE_NOTFOUND;
|
||||
}
|
||||
wTimeEnd();
|
||||
storeAndNotify(opName, rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
xDelete: async function(...args){
|
||||
@ -233,12 +255,11 @@ const vfsAsyncImpls = {
|
||||
presumably it will fail if the dir is not empty and that flag
|
||||
is false.
|
||||
*/
|
||||
log("xDelete(",arguments[0],")");
|
||||
let rc = 0;
|
||||
wTimeStart('xDelete');
|
||||
try {
|
||||
while(filename){
|
||||
const [hDir, filenamePart] = await getDirForPath(filename, false);
|
||||
//log("Removing:",hDir, filenamePart);
|
||||
if(!filenamePart) break;
|
||||
await hDir.removeEntry(filenamePart, {recursive});
|
||||
if(0x1234 !== syncDir) break;
|
||||
@ -252,13 +273,14 @@ const vfsAsyncImpls = {
|
||||
//error("Delete failed",filename, e.message);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_DELETE;
|
||||
}
|
||||
wTimeEnd();
|
||||
return rc;
|
||||
},
|
||||
xFileSize: async function(fid){
|
||||
mTimeStart('xFileSize');
|
||||
log("xFileSize(",arguments,")");
|
||||
const fh = __openFiles[fid];
|
||||
let sz;
|
||||
wTimeStart('xFileSize');
|
||||
try{
|
||||
sz = await fh.accessHandle.getSize();
|
||||
state.s11n.serialize(Number(sz));
|
||||
@ -267,15 +289,16 @@ const vfsAsyncImpls = {
|
||||
error("xFileSize():",e, fh);
|
||||
sz = state.sq3Codes.SQLITE_IOERR;
|
||||
}
|
||||
wTimeEnd();
|
||||
storeAndNotify('xFileSize', sz);
|
||||
mTimeEnd();
|
||||
},
|
||||
xOpen: async function(fid/*sqlite3_file pointer*/, filename, flags){
|
||||
const opName = 'xOpen';
|
||||
mTimeStart(opName);
|
||||
log(opName+"(",arguments[0],")");
|
||||
const deleteOnClose = (state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags);
|
||||
const create = (state.sq3Codes.SQLITE_OPEN_CREATE & flags);
|
||||
wTimeStart('xOpen');
|
||||
try{
|
||||
let hDir, filenamePart;
|
||||
try {
|
||||
@ -283,6 +306,7 @@ const vfsAsyncImpls = {
|
||||
}catch(e){
|
||||
storeAndNotify(opName, state.sql3Codes.SQLITE_NOTFOUND);
|
||||
mTimeEnd();
|
||||
wTimeEnd();
|
||||
return;
|
||||
}
|
||||
const hFile = await hDir.getFileHandle(filenamePart, {create});
|
||||
@ -294,6 +318,7 @@ const vfsAsyncImpls = {
|
||||
places that limitation on it.
|
||||
*/
|
||||
fobj.accessHandle = await hFile.createSyncAccessHandle();
|
||||
wTimeEnd();
|
||||
__openFiles[fid] = fobj;
|
||||
fobj.filenameAbs = filename;
|
||||
fobj.filenamePart = filenamePart;
|
||||
@ -304,6 +329,7 @@ const vfsAsyncImpls = {
|
||||
fobj.deleteOnClose = deleteOnClose;
|
||||
storeAndNotify(opName, 0);
|
||||
}catch(e){
|
||||
wTimeEnd();
|
||||
error(opName,e);
|
||||
storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
|
||||
}
|
||||
@ -311,14 +337,15 @@ const vfsAsyncImpls = {
|
||||
},
|
||||
xRead: async function(fid,n,offset){
|
||||
mTimeStart('xRead');
|
||||
log("xRead(",arguments[0],")");
|
||||
let rc = 0;
|
||||
try{
|
||||
const fh = __openFiles[fid];
|
||||
wTimeStart('xRead');
|
||||
const nRead = fh.accessHandle.read(
|
||||
fh.sabView.subarray(0, n),
|
||||
{at: Number(offset)}
|
||||
);
|
||||
wTimeEnd();
|
||||
if(nRead < n){/* Zero-fill remaining bytes */
|
||||
fh.sabView.fill(0, nRead, n);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ;
|
||||
@ -332,17 +359,20 @@ const vfsAsyncImpls = {
|
||||
},
|
||||
xSync: async function(fid,flags/*ignored*/){
|
||||
mTimeStart('xSync');
|
||||
log("xSync(",arguments[0],")");
|
||||
const fh = __openFiles[fid];
|
||||
if(!fh.readOnly && fh.accessHandle) await fh.accessHandle.flush();
|
||||
if(!fh.readOnly && fh.accessHandle){
|
||||
wTimeStart('xSync');
|
||||
await fh.accessHandle.flush();
|
||||
wTimeEnd();
|
||||
}
|
||||
storeAndNotify('xSync',0);
|
||||
mTimeEnd();
|
||||
},
|
||||
xTruncate: async function(fid,size){
|
||||
mTimeStart('xTruncate');
|
||||
log("xTruncate(",arguments[0],")");
|
||||
let rc = 0;
|
||||
const fh = __openFiles[fid];
|
||||
wTimeStart('xTruncate');
|
||||
try{
|
||||
affirmNotRO('xTruncate', fh);
|
||||
await fh.accessHandle.truncate(size);
|
||||
@ -350,13 +380,14 @@ const vfsAsyncImpls = {
|
||||
error("xTruncate():",e,fh);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
|
||||
}
|
||||
wTimeEnd();
|
||||
storeAndNotify('xTruncate',rc);
|
||||
mTimeEnd();
|
||||
},
|
||||
xWrite: async function(fid,n,offset){
|
||||
mTimeStart('xWrite');
|
||||
log("xWrite(",arguments[0],")");
|
||||
let rc;
|
||||
wTimeStart('xWrite');
|
||||
try{
|
||||
const fh = __openFiles[fid];
|
||||
affirmNotRO('xWrite', fh);
|
||||
@ -367,13 +398,14 @@ const vfsAsyncImpls = {
|
||||
}catch(e){
|
||||
error("xWrite():",e,fh);
|
||||
rc = state.sq3Codes.SQLITE_IOERR_WRITE;
|
||||
}finally{
|
||||
wTimeEnd();
|
||||
}
|
||||
storeAndNotify('xWrite',rc);
|
||||
mTimeEnd();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
const initS11n = ()=>{
|
||||
// Achtung: this code is 100% duplicated in the other half of this proxy!
|
||||
if(state.s11n) return state.s11n;
|
||||
@ -403,46 +435,69 @@ const initS11n = ()=>{
|
||||
serialization for simplicy of implementation, but if that
|
||||
proves imperformant then a lower-level approach will be
|
||||
created.
|
||||
|
||||
If passed "too much data" (more that the shared buffer size
|
||||
it will either throw or truncate the data (not certain
|
||||
which)). This routine is only intended for serializing OPFS
|
||||
VFS arguments and (in at least one special case) result
|
||||
values, and the buffer is sized to be able to comfortably
|
||||
handle those.
|
||||
|
||||
If passed no arguments then it zeroes out the serialization
|
||||
state.
|
||||
*/
|
||||
state.s11n.serialize = function(...args){
|
||||
const json = jsonEncoder.encode(JSON.stringify(args));
|
||||
viewSz.setInt32(0, json.byteLength, state.littleEndian);
|
||||
viewJson.set(json);
|
||||
if(args.length){
|
||||
const json = jsonEncoder.encode(JSON.stringify(args));
|
||||
viewSz.setInt32(0, json.byteLength, state.littleEndian);
|
||||
viewJson.set(json);
|
||||
}else{
|
||||
viewSz.setInt32(0, 0, state.littleEndian);
|
||||
}
|
||||
};
|
||||
return state.s11n;
|
||||
};
|
||||
|
||||
const waitLoop = function(){
|
||||
const waitLoop = async function f(){
|
||||
const opHandlers = Object.create(null);
|
||||
for(let k of Object.keys(state.opIds)){
|
||||
for(let k of Object.keys(state.rcIds)){
|
||||
const o = Object.create(null);
|
||||
opHandlers[state.opIds[k]] = o;
|
||||
o.key = k;
|
||||
o.f = vfsAsyncImpls[k];// || toss("No vfsAsyncImpls[",k,"]");
|
||||
}
|
||||
const sabOP = state.sabOP;
|
||||
for(;;){
|
||||
let metricsTimer = self.location.port>=1024 ? performance.now() : 0;
|
||||
// ^^^ in dev environment, dump out these metrics one time after a delay.
|
||||
while(true){
|
||||
try {
|
||||
Atomics.store(sabOP, state.opIds.whichOp, 0);
|
||||
Atomic.wait(sabOP, state.opIds.whichOp);
|
||||
const opId = Atomics.load(sabOP, state.opIds.whichOp);
|
||||
if('timed-out'===Atomics.wait(state.sabOPView, state.opIds.whichOp, 0, 150)){
|
||||
continue;
|
||||
}
|
||||
const opId = Atomics.load(state.sabOPView, state.opIds.whichOp);
|
||||
Atomics.store(state.sabOPView, state.opIds.whichOp, 0);
|
||||
const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId);
|
||||
const args = state.s11n.deserialize();
|
||||
log("whichOp =",opId,hnd,args);
|
||||
const rc = 0/*TODO: run op*/;
|
||||
Atomics.store(sabOP, state.rcIds[hnd.key], rc);
|
||||
Atomics.notify(sabOP, state.rcIds[hnd.key]);
|
||||
//warn("waitLoop() whichOp =",opId, hnd, args);
|
||||
if(hnd.f) await hnd.f(...args);
|
||||
else error("Missing callback for opId",opId);
|
||||
}catch(e){
|
||||
error('in waitLoop():',e.message);
|
||||
}finally{
|
||||
// We can't call metrics.dump() from the dev console because this
|
||||
// thread is continually tied up in Atomics.wait(), so let's
|
||||
// do, for dev purposes only, a dump one time after 60 seconds.
|
||||
if(metricsTimer && (performance.now() > metricsTimer + 60000)){
|
||||
metrics.dump();
|
||||
metricsTimer = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
navigator.storage.getDirectory().then(function(d){
|
||||
const wMsg = (type)=>postMessage({type});
|
||||
state.rootDir = d;
|
||||
log("state.rootDir =",state.rootDir);
|
||||
self.onmessage = async function({data}){
|
||||
log("self.onmessage()",data);
|
||||
self.onmessage = function({data}){
|
||||
switch(data.type){
|
||||
case 'opfs-async-init':{
|
||||
/* Receive shared state from synchronous partner */
|
||||
@ -469,20 +524,7 @@ navigator.storage.getDirectory().then(function(d){
|
||||
metrics.reset();
|
||||
log("init state",state);
|
||||
wMsg('opfs-async-inited');
|
||||
break;
|
||||
}
|
||||
default:{
|
||||
let err;
|
||||
const m = vfsAsyncImpls[data.type] || toss("Unknown message type:",data.type);
|
||||
try {
|
||||
await m(...data.args).catch((e)=>err=e);
|
||||
}catch(e){
|
||||
err = e;
|
||||
}
|
||||
if(err){
|
||||
error("Error handling",data.type+"():",e);
|
||||
storeAndNotify(data.type, state.sq3Codes.SQLITE_ERROR);
|
||||
}
|
||||
waitLoop();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
21
manifest
21
manifest
@ -1,5 +1,5 @@
|
||||
C OPFS\sVFS:\sfurther\sinternal\srefactoring\stowards\sexperimenting\swith\sa\snew\scomms\smodel.
|
||||
D 2022-09-20T03:31:02.139
|
||||
C An\salternative\smessaging\sstrategy\sfor\sthe\sOPFS\sVFS\sproxy\swhich\suses\sonly\sSharedArrayBuffer\sand\sAtomics,\sinstead\sof\sworker\smessages,\sfor\scommunication\s(only\sthe\sinitial\sone-time\shandshake\sduring\sinitialization\suses\sworker\smessages).\sIt\sruns\sspeedtest1\sapprox.\s15-20%\sfaster\sbut\sstill\s20-ish%\sslower\sthan\sWASMFS.
|
||||
D 2022-09-20T08:27:57.073
|
||||
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
|
||||
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
|
||||
F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
|
||||
@ -474,7 +474,7 @@ F ext/userauth/user-auth.txt e6641021a9210364665fe625d067617d03f27b04
|
||||
F ext/userauth/userauth.c 7f00cded7dcaa5d47f54539b290a43d2e59f4b1eb5f447545fa865f002fc80cb
|
||||
F ext/wasm/EXPORTED_FUNCTIONS.fiddle 7fb73f7150ab79d83bb45a67d257553c905c78cd3d693101699243f36c5ae6c3
|
||||
F ext/wasm/EXPORTED_RUNTIME_METHODS.fiddle a004bd5eeeda6d3b28d16779b7f1a80305bfe009dfc7f0721b042967f0d39d02
|
||||
F ext/wasm/GNUmakefile b6a5b642e8b3e587d3edcfeb6b6275acbe4730293f4ad46c4997cd932d57aec5
|
||||
F ext/wasm/GNUmakefile b65cd280059febd4f034b856139fde52f7edb7326133f7e0588b8efcf396709c
|
||||
F ext/wasm/README.md e1ee1e7c321c6a250bf78a84ca6f5882890a237a450ba5a0649c7a8399194c52
|
||||
F ext/wasm/api/EXPORTED_FUNCTIONS.sqlite3-api 150a793a47205b8009ac934f3b6d6ebf67b965c072339aaa25ce808a19e116cc
|
||||
F ext/wasm/api/EXPORTED_RUNTIME_METHODS.sqlite3-api 1ec3c73e7d66e95529c3c64ac3de2470b0e9e7fbf7a5b41261c367cf4f1b7287
|
||||
@ -484,7 +484,7 @@ F ext/wasm/api/post-js-header.js 0e853b78db83cb1c06b01663549e0e8b4f377f12f5a2d9a
|
||||
F ext/wasm/api/sqlite3-api-cleanup.js 8564a6077cdcaea9a9f428a019af8a05887f0131e6a2a1e72a7ff1145fadfe77
|
||||
F ext/wasm/api/sqlite3-api-glue.js 366d580c8e5bf7fcf4c6dee6f646c31f5549bd417ea03a59a0acca00e8ecce30
|
||||
F ext/wasm/api/sqlite3-api-oo1.js 2d13dddf0d2b4168a9249f124134d37924331e5b55e05dba18b6d661fbeefe48
|
||||
F ext/wasm/api/sqlite3-api-opfs.js 351459d571166ff4cebaccd6b8aad2b0fe5eac54a8c777ba52c31c931a3eb2e2
|
||||
F ext/wasm/api/sqlite3-api-opfs.js 1df64b2a11b7f71ba5c7a5807b4d62727e2c712e537eb563caedc67c5a38d149
|
||||
F ext/wasm/api/sqlite3-api-prologue.js 0d2639387b94c30f492d4aea6e44fb7b16720808678464559458fd2ae3759655
|
||||
F ext/wasm/api/sqlite3-api-worker1.js ee4cf149cbacb63d06b536674f822aa5088b7e022cdffc69f1f36cebe2f9fea0
|
||||
F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9
|
||||
@ -513,13 +513,13 @@ F ext/wasm/jaccwabyt/jaccwabyt_test.exports 5ff001ef975c426ffe88d7d8a6e96ec725e5
|
||||
F ext/wasm/scratchpad-wasmfs-main.html 20cf6f1a8f368e70d01e8c17200e3eaa90f1c8e1029186d836d14b83845fbe06
|
||||
F ext/wasm/scratchpad-wasmfs-main.js f0836e3576df7a89390d777bb53e142e559e8a79becfb2a5a976490b05a1c4fa
|
||||
F ext/wasm/speedtest1-wasmfs.html 9d8cd19eab8854d17f7129aa11607cae6f6d9857c505a4aef13000588583d93e
|
||||
F ext/wasm/speedtest1-worker.html 7bd3ede33e08067f59e8d6e3dc25016fc930222b059d66b2554203bf262aba02
|
||||
F ext/wasm/speedtest1-worker.html ede59f2c1884bf72e3d650064604b48703c81848250b19b8063d260aa3a2201d
|
||||
F ext/wasm/speedtest1-worker.js 11e7f68cedd2a83b0e638f94c1d2f58406ba672a7e88b66bff5d4f4284e8ba16
|
||||
F ext/wasm/speedtest1.html 512addeb3c27c94901178b7bcbde83a6f95c093f9ebe16a2959a0aa0d828cf1d
|
||||
F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x
|
||||
F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0
|
||||
F ext/wasm/sql/001-sudoku.sql 35b7cb7239ba5d5f193bc05ec379bcf66891bce6f2a5b3879f2f78d0917299b5
|
||||
F ext/wasm/sqlite3-opfs-async-proxy.js 462081970a6a46d9b2c386474aacad2d81e6629bb554d6cad5c58515f08c8a38
|
||||
F ext/wasm/sqlite3-opfs-async-proxy.js 038ecd8558abc3f46cfedd560093fef4d460af8c0d0009ab84f2abdc10916a6a
|
||||
F ext/wasm/sqlite3-worker1-promiser.js 4fd0465688a28a75f1d4ee4406540ba494f49844e3cad0670d0437a001943365
|
||||
F ext/wasm/sqlite3-worker1.js 0c1e7626304543969c3846573e080c082bf43bcaa47e87d416458af84f340a9e
|
||||
F ext/wasm/test-opfs-vfs.html eb69dda21eb414b8f5e3f7c1cc0f774103cc9c0f87b2d28a33419e778abfbab5
|
||||
@ -2026,8 +2026,11 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93
|
||||
F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
|
||||
F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
|
||||
F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
|
||||
P d4d63e4580ad8d497310608175308c03c517e051d7865cb66aa0b10356612d7d
|
||||
R 7fef73d3d6085edd0d87db08b80a36c1
|
||||
P 5ca412ced24b4e3af5f467e710a597ed440badf7b8335346aade11d3cad3d1a1
|
||||
R eef8f82c266e71b1fc83e097c56f095e
|
||||
T *branch * opfs-proxy-atomics
|
||||
T *sym-opfs-proxy-atomics *
|
||||
T -sym-fiddle-opfs * Cancelled\sby\sbranch.
|
||||
U stephan
|
||||
Z d9dc6ab88cc2471cfc1f140af8fec063
|
||||
Z 1c049de5a4413363c0d5cd656309295d
|
||||
# Remove this line to create a well-formed Fossil manifest.
|
||||
|
@ -1 +1 @@
|
||||
5ca412ced24b4e3af5f467e710a597ed440badf7b8335346aade11d3cad3d1a1
|
||||
a83ee3082d89439ea3ad5737e63e25bebb0f91895aca006ce5fecf5b93a2651a
|
Loading…
Reference in New Issue
Block a user