Project

General

Profile

Download (15.8 KB) Statistics
| Branch: | Tag: | Revision:

haketilo / common / indexeddb.js @ 92fc67cf

1
/**
2
 * This file is part of Haketilo.
3
 *
4
 * Function: Facilitate use of IndexedDB within Haketilo.
5
 *
6
 * Copyright (C) 2021, 2022 Wojtek Kosior <koszko@koszko.org>
7
 *
8
 * This program is free software: you can redistribute it and/or modify
9
 * it under the terms of the GNU General Public License as published by
10
 * the Free Software Foundation, either version 3 of the License, or
11
 * (at your option) any later version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * As additional permission under GNU GPL version 3 section 7, you
19
 * may distribute forms of that code without the copy of the GNU
20
 * GPL normally required by section 4, provided you include this
21
 * license notice and, in case of non-source distribution, a URL
22
 * through which recipients can access the Corresponding Source.
23
 * If you modify file(s) with this exception, you may extend this
24
 * exception to your version of the file(s), but you are not
25
 * obligated to do so. If you do not wish to do so, delete this
26
 * exception statement from your version.
27
 *
28
 * As a special exception to the GPL, any HTML file which merely
29
 * makes function calls to this code, and for that purpose
30
 * includes it by reference shall be deemed a separate work for
31
 * copyright law purposes. If you modify this code, you may extend
32
 * this exception to your version of the code, but you are not
33
 * obligated to do so. If you do not wish to do so, delete this
34
 * exception statement from your version.
35
 *
36
 * You should have received a copy of the GNU General Public License
37
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
38
 *
39
 * I, Wojtek Kosior, thereby promise not to sue for violation of this file's
40
 * license. Although I request that you do not make use of this code in a
41
 * proprietary program, I am not going to enforce this in court.
42
 */
43

    
44
#IMPORT common/entities.js
45
#IMPORT common/broadcast.js
46

    
47
let initial_data = (
48
#IF UNIT_TEST
49
    {}
50
#ELSE
51
#INCLUDE default_settings.json
52
#ENDIF
53
);
54

    
55
/* Update when changes are made to database schema. Must have 3 elements */
56
const db_version = [1, 0, 0];
57

    
58
const nr_reductor = ([i, s], num) => [i - 1, s + num * 1024 ** i];
59
const version_nr = ver => ver.slice(0, 3).reduce(nr_reductor, [2, 0])[1];
60

    
61
const stores = 	[
62
    ["file",      {keyPath: "sha256"}],
63
    ["file_uses", {keyPath: "sha256"}],
64
    ["resource",  {keyPath: "identifier"}],
65
    ["mapping",   {keyPath: "identifier"}],
66
    ["setting",   {keyPath: "name"}],
67
    ["blocking",  {keyPath: "pattern"}],
68
    ["repo",      {keyPath: "url"}]
69
];
70

    
71
let db = null;
72

    
73
/* Generate a Promise that resolves when an IndexedDB request succeeds. */
74
async function wait_request(idb_request)
75
{
76
    let resolve, reject;
77
    const waiter = new Promise((...cbs) => [resolve, reject] = cbs);
78
    [idb_request.onsuccess, idb_request.onerror] = [resolve, reject];
79
    return waiter;
80
}
81

    
82
/* asynchronous wrapper for IDBObjectStore's get() method. */
83
async function idb_get(transaction, store_name, key)
84
{
85
    const req = transaction.objectStore(store_name).get(key);
86
    return (await wait_request(req)).target.result;
87
}
88
#EXPORT idb_get
89

    
90
/* asynchronous wrapper for IDBObjectStore's put() method. */
91
async function idb_put(transaction, store_name, object)
92
{
93
    return wait_request(transaction.objectStore(store_name).put(object));
94
}
95

    
96
/* asynchronous wrapper for IDBObjectStore's delete() method. */
97
async function idb_del(transaction, store_name, key)
98
{
99
    return wait_request(transaction.objectStore(store_name).delete(key));
100
}
101

    
102
async function perform_upgrade(event) {
103
    const opened_db = event.target.result;
104

    
105
    /* When we move to a new database schema, we will add upgrade logic here. */
106
    if (event.oldVersion > 0)
107
	throw "bad db version: " + event.oldVersion;
108

    
109
    let store;
110
    for (const [store_name, key_mode] of stores)
111
	store = opened_db.createObjectStore(store_name, key_mode);
112

    
113
    const ctx = make_context(store.transaction, initial_data.file);
114
    await _save_items(initial_data.resource, initial_data.mapping, [], ctx);
115

    
116
    return opened_db;
117
}
118

    
119
/* Open haketilo database, asynchronously return an IDBDatabase object. */
120
async function get_db() {
121
    if (db)
122
	return db;
123

    
124
    let resolve, reject;
125
    const waiter = new Promise((...cbs) => [resolve, reject] = cbs);
126

    
127
    const request = indexedDB.open("haketilo", version_nr(db_version));
128
    request.onsuccess       = ev => resolve(ev.target.result);
129
    request.onerror         = ev => reject("db error: " + ev.target.errorCode);
130
    request.onupgradeneeded = ev => perform_upgrade(ev).then(resolve, reject);
131

    
132
    const opened_db = await waiter;
133

    
134
    if (db)
135
	opened_db.close();
136
    else
137
	db = opened_db;
138

    
139
    return db;
140
}
141
#EXPORT  get_db  AS get
142

    
143
/* Helper function used by make_context(). */
144
function reject_discard(context)
145
{
146
    broadcast.discard(context.sender);
147
    broadcast.close(context.sender);
148
    context.reject();
149
}
150

    
151
/* Helper function used by make_context(). */
152
function resolve_flush(context)
153
{
154
    broadcast.close(context.sender);
155
    context.resolve();
156
}
157

    
158
/* Helper function used by start_items_transaction() and get_db(). */
159
function make_context(transaction, files)
160
{
161
    const sender = broadcast.sender_connection();
162

    
163
    files = files || {};
164
    let resolve, reject;
165
    const result = new Promise((...cbs) => [resolve, reject] = cbs);
166

    
167
    const context =
168
	  {sender, transaction, resolve, reject, result, files, file_uses: {}};
169

    
170
    transaction.oncomplete = () => resolve_flush(context);
171
    transaction.onerror = () => reject_discard(context);
172

    
173
    return context;
174
}
175

    
176
/*
177
 * item_store_names should be an array with either string "mapping", string
178
 * "resource" or both. files should be an object with an "sha256" property whose
179
 * values will be yet another object with values being contents of files that
180
 * are to be possibly saved in this transaction and keys being hexadecimal
181
 * representations of files' SHA256 sums.
182
 *
183
 * Returned is a context object wrapping the transaction and handling the
184
 * counting of file references in IndexedDB.
185
 */
186
async function start_items_transaction(item_store_names, files)
187
{
188
    const db = await get_db();
189
    const scope = [...item_store_names, "file", "file_uses"];
190
    return make_context(db.transaction(scope, "readwrite"), files);
191
}
192
#EXPORT start_items_transaction
193

    
194
async function incr_file_uses(context, file_ref, by=1)
195
{
196
    const sha256 = file_ref.sha256;
197
    let uses = context.file_uses[sha256];
198
    if (uses === undefined) {
199
	uses = await idb_get(context.transaction, "file_uses", sha256);
200
	if (uses)
201
	    [uses.new, uses.initial] = [false, uses.uses];
202
	else
203
	    uses = {sha256, uses: 0, new: true, initial: 0};
204

    
205
	context.file_uses[sha256] = uses;
206
    }
207

    
208
    uses.uses = uses.uses + by;
209
}
210

    
211
const decr_file_uses = (ctx, file_ref) => incr_file_uses(ctx, file_ref, -1);
212

    
213
async function finalize_transaction(context)
214
{
215
    for (const uses of Object.values(context.file_uses)) {
216
	if (uses.uses < 0)
217
	    console.error("internal error: uses < 0 for file " + uses.sha256);
218

    
219
	const is_new       = uses.new;
220
	const initial_uses = uses.initial;
221
	const sha256       = uses.sha256;
222

    
223
	delete uses.new;
224
	delete uses.initial;
225

    
226
	if (uses.uses < 1) {
227
	    if (!is_new) {
228
		idb_del(context.transaction, "file_uses", sha256);
229
		idb_del(context.transaction, "file",      sha256);
230
	    }
231

    
232
	    continue;
233
	}
234

    
235
	if (uses.uses === initial_uses)
236
	    continue;
237

    
238
	idb_put(context.transaction, "file_uses", uses);
239

    
240
	if (initial_uses > 0)
241
	    continue;
242

    
243
	const file = context.files.sha256[sha256];
244
	if (file === undefined) {
245
	    context.transaction.abort();
246
	    throw "file not present: " + sha256;
247
	}
248

    
249
	idb_put(context.transaction, "file", {sha256, contents: file});
250
    }
251

    
252
    return context.result;
253
}
254
#EXPORT finalize_transaction
255

    
256
/*
257
 * How a sample data argument to the function below might look like:
258
 *
259
 * data = {
260
 *     resource: {
261
 *         "resource1": {
262
 *             "1": {
263
 *                 // some stuff
264
 *             },
265
 *             "1.1": {
266
 *                 // some stuff
267
 *             }
268
 *         },
269
 *         "resource2": {
270
 *             "0.4.3": {
271
 *                 // some stuff
272
 *             }
273
 *         },
274
 *     },
275
 *     mapping: {
276
 *         "mapping1": {
277
 *             "2": {
278
 *                 // some stuff
279
 *             }
280
 *         },
281
 *         "mapping2": {
282
 *             "0.1": {
283
 *                 // some stuff
284
 *             }
285
 *         },
286
 *     },
287
 *     file: {
288
 *         sha256: {
289
 *             "f9444510dc7403e41049deb133f6892aa6a63c05591b2b59e4ee5b234d7bbd99": "console.log(\"hello\");\n",
290
 *             "b857cd521cc82fff30f0d316deba38b980d66db29a5388eb6004579cf743c6fd": "console.log(\"bye\");"
291
 *         }
292
 *     }
293
 * }
294
 */
295
async function save_items(data)
296
{
297
    const item_store_names = ["resource", "mapping"];
298
    if ("repo" in data)
299
	item_store_names.push("repo");
300

    
301
    const context = await start_items_transaction(item_store_names, data.file);
302

    
303
    return _save_items(data.resource, data.mapping, data.repo || [], context);
304
}
305
#EXPORT save_items
306

    
307
async function _save_items(resources, mappings, repos, context)
308
{
309
    resources = Object.values(resources || {}).map(entities.get_newest);
310
    mappings  = Object.values(mappings  || {}).map(entities.get_newest);
311

    
312
    for (const item of resources.concat(mappings))
313
	await save_item(item, context);
314

    
315
    for (const repo_url of repos)
316
	await idb_put(context.transaction, "repo", {url: repo_url});
317

    
318
    await finalize_transaction(context);
319
}
320

    
321
/*
322
 * Save given definition of a resource/mapping to IndexedDB. If the definition
323
 * (passed as `item`) references files that are not already present in
324
 * IndexedDB, those files should be provided as values of the `files' object
325
 * used to create the transaction context.
326
 *
327
 * context should be one returned from start_items_transaction() and should be
328
 * later passed to finalize_transaction() so that files depended on are added to
329
 * IndexedDB and files that are no longer depended on after this operation are
330
 * removed from IndexedDB.
331
 */
332
async function save_item(item, context)
333
{
334
    for (const file_ref of entities.get_files(item))
335
	await incr_file_uses(context, file_ref);
336

    
337
    broadcast.prepare(context.sender, `idb_changes_${item.type}`,
338
		      item.identifier);
339
    await _remove_item(item.type, item.identifier, context, false);
340
    await idb_put(context.transaction, item.type, item);
341
}
342
#EXPORT save_item
343

    
344
/* Helper function used by remove_item() and save_item(). */
345
async function _remove_item(store_name, identifier, context)
346
{
347
    const item = await idb_get(context.transaction, store_name, identifier);
348
    if (item !== undefined) {
349
	for (const file_ref of entities.get_files(item))
350
	    await decr_file_uses(context, file_ref);
351
    }
352
}
353

    
354
/*
355
 * Remove definition of a resource/mapping from IndexedDB.
356
 *
357
 * context should be one returned from start_items_transaction() and should be
358
 * later passed to finalize_transaction() so that files depended on are added to
359
 * IndexedDB and files that are no longer depended on after this operation are
360
 * removed from IndexedDB.
361
 */
362
async function remove_item(store_name, identifier, context)
363
{
364
    broadcast.prepare(context.sender, `idb_changes_${store_name}`, identifier);
365
    await _remove_item(store_name, identifier, context);
366
    await idb_del(context.transaction, store_name, identifier);
367
}
368

    
369
const remove_resource = (id, ctx) => remove_item("resource", id, ctx);
370
#EXPORT remove_resource
371

    
372
const remove_mapping = (id, ctx) => remove_item("mapping",  id, ctx);
373
#EXPORT remove_mapping
374

    
375
/* Function to retrieve all items from a given store. */
376
async function get_all(store_name)
377
{
378
    const transaction = (await get_db()).transaction([store_name]);
379
    const all_req = transaction.objectStore(store_name).getAll();
380

    
381
    return (await wait_request(all_req)).target.result;
382
}
383
#EXPORT get_all
384

    
385
/*
386
 * A simplified kind of transaction for modifying stores without special
387
 * inter-store integrity constraints ("setting", "blocking", "repo").
388
 */
389
async function start_simple_transaction(store_name)
390
{
391
    const db = await get_db();
392
    return make_context(db.transaction(store_name, "readwrite"), {});
393
}
394

    
395
/* Functions to access the "setting" store. */
396
async function set_setting(name, value)
397
{
398
    const context = await start_simple_transaction("setting");
399
    broadcast.prepare(context.sender, "idb_changes_setting", name);
400
    await idb_put(context.transaction, "setting", {name, value});
401
    return finalize_transaction(context);
402
}
403
#EXPORT set_setting
404

    
405
async function get_setting(name)
406
{
407
    const transaction = (await get_db()).transaction("setting");
408
    return ((await idb_get(transaction, "setting", name)) || {}).value;
409
}
410
#EXPORT get_setting
411

    
412
/* Functions to access the "blocking" store. */
413
async function set_allowed(pattern, allow=true)
414
{
415
    const context = await start_simple_transaction("blocking");
416
    broadcast.prepare(context.sender, "idb_changes_blocking", pattern);
417
    if (allow === null)
418
	await idb_del(context.transaction, "blocking", pattern);
419
    else
420
	await idb_put(context.transaction, "blocking", {pattern, allow});
421
    return finalize_transaction(context);
422
}
423
#EXPORT set_allowed
424

    
425
const set_disallowed = pattern => set_allowed(pattern, false);
426
#EXPORT set_disallowed
427

    
428
const set_default_allowing = pattern => set_allowed(pattern, null);
429
#EXPORT set_default_allowing
430

    
431
async function get_allowing(pattern)
432
{
433
    const transaction = (await get_db()).transaction("blocking");
434
    return ((await idb_get(transaction, "blocking", pattern)) || {}).allow;
435
}
436
#EXPORT get_allowing
437

    
438
/* Functions to access the "repo" store. */
439
async function set_repo(url, remove=false)
440
{
441
    const context = await start_simple_transaction("repo");
442
    broadcast.prepare(context.sender, "idb_changes_repo", url);
443
    if (remove)
444
	await idb_del(context.transaction, "repo", url);
445
    else
446
	await idb_put(context.transaction, "repo", {url});
447
    return finalize_transaction(context);
448
}
449
#EXPORT set_repo
450

    
451
const del_repo = url => set_repo(url, true);
452
#EXPORT del_repo
453

    
454
const get_repos = () => get_all("repo").then(list => list.map(obj => obj.url));
455
#EXPORT get_repos
456

    
457
/* Callback used when listening to broadcasts while tracking db changes. */
458
async function track_change(tracking, key)
459
{
460
    const transaction = (await get_db()).transaction([tracking.store_name]);
461
    const new_val = await idb_get(transaction, tracking.store_name, key);
462

    
463
    tracking.onchange({key, new_val});
464
}
465

    
466
/*
467
 * Monitor changes to `store_name` IndexedDB object store.
468
 *
469
 * `store_name` should be either "resource", "mapping", "setting", "blocking"
470
 * or "repo".
471
 *
472
 * `onchange` should be a callback that will be called when an item is added,
473
 * modified or removed from the store. The callback will be passed an object
474
 * representing the change as its first argument. This object will have the
475
 * form:
476
 * {
477
 *     key: "the identifier of modified resource/mapping or settings key",
478
 *     new_val: undefined // `undefined` if item removed, item object otherwise
479
 * }
480
 *
481
 * Returns a [tracking, all_current_items] array where `tracking` is an object
482
 * that can be later passed to untrack() to stop tracking changes and
483
 * `all_current_items` is an array of items currently present in the object
484
 * store.
485
 *
486
 * It is possible that `onchange` gets spuriously fired even when an item is not
487
 * actually modified or that it only gets called once after multiple quick
488
 * changes to an item.
489
 */
490
async function start_tracking(store_name, onchange)
491
{
492
    const tracking = {store_name, onchange};
493
    tracking.listener =
494
	broadcast.listener_connection(msg => track_change(tracking, msg[1]));
495
    broadcast.subscribe(tracking.listener, `idb_changes_${store_name}`);
496

    
497
    return [tracking, await get_all(store_name)];
498
}
499

    
500
const track = {};
501
const trackable = ["resource", "mapping", "setting", "blocking", "repo"];
502
for (const store_name of trackable)
503
    track[store_name] = onchange => start_tracking(store_name, onchange);
504
#EXPORT track
505

    
506
const untrack = tracking => broadcast.close(tracking.listener);
507
#EXPORT untrack
(4-4/10)