summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/IndexedDB/resources
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/web-platform/tests/IndexedDB/resources
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/IndexedDB/resources')
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/cross-origin-helper-frame.html37
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/file_to_save.txt1
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/idb-partitioned-basic-iframe.tentative.html80
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/idb-partitioned-coverage-iframe.tentative.html601
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/idb-partitioned-persistence-iframe.tentative.html76
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/idbfactory-origin-isolation-iframe.html50
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/idbworker.js34
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/interleaved-cursors-common.js188
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/nested-cloning-common.js211
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/reading-autoincrement-common.js93
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/support-promises.js355
-rw-r--r--testing/web-platform/tests/IndexedDB/resources/support.js225
12 files changed, 1951 insertions, 0 deletions
diff --git a/testing/web-platform/tests/IndexedDB/resources/cross-origin-helper-frame.html b/testing/web-platform/tests/IndexedDB/resources/cross-origin-helper-frame.html
new file mode 100644
index 0000000000..997c5a2b72
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/cross-origin-helper-frame.html
@@ -0,0 +1,37 @@
+<!doctype html>
+<meta charset="utf8">
+<title>Performs IndexedDB tasks in response to postMessage</title>
+<script>
+'use strict';
+
+self.addEventListener('message', async event => {
+ const action = event.data.action;
+ let response = null;
+ switch(action) {
+ case 'get-database-names': {
+ const dbInfos = await self.indexedDB.databases();
+ response = dbInfos.map(dbInfo => dbInfo.name);
+ break;
+ }
+
+ case 'delete-database': {
+ const dbName = event.data.name;
+ await new Promise((resolve, reject) => {
+ const request = indexedDB.deleteDatabase(dbName);
+ request.onsuccess = resolve;
+ request.onerror = reject;
+ });
+ response = true;
+ break;
+ }
+ }
+ event.source.postMessage({ action, response }, event.origin);
+ window.close();
+});
+
+// Make up for the fact that the opener of a cross-origin window has no way of
+// knowing when the window finishes loading.
+if (window.opener !== null) {
+ window.opener.postMessage({ action: null, response: 'ready' }, '*');
+}
+</script>
diff --git a/testing/web-platform/tests/IndexedDB/resources/file_to_save.txt b/testing/web-platform/tests/IndexedDB/resources/file_to_save.txt
new file mode 100644
index 0000000000..3f5238e841
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/file_to_save.txt
@@ -0,0 +1 @@
+File to save to IndexedDB. \ No newline at end of file
diff --git a/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-basic-iframe.tentative.html b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-basic-iframe.tentative.html
new file mode 100644
index 0000000000..ed6bbf272f
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-basic-iframe.tentative.html
@@ -0,0 +1,80 @@
+<!doctype html>
+<meta charset="utf-8">
+<script>
+const dbName = "users";
+
+// Create the database at v1 and detect success via `onsuccess`.
+function createDatabase() {
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.open(dbName, 1);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = (e) => {
+ e.target.result.close();
+ resolve();
+ }
+ });
+}
+
+// Open the database at v2 and detect existance via `onupgradeneeded`.
+function doesDatabaseExist() {
+ let didExist = false;
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.open(dbName, 2);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = (e) => {
+ e.target.result.close();
+ deleteDatabase().then(() => resolve(didExist));
+ };
+ dbRequest.onupgradeneeded = (e) => {
+ didExist = e.oldVersion != 0;
+ };
+ });
+}
+
+// Delete the database and detect success via `onsuccess`.
+function deleteDatabase() {
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.deleteDatabase(dbName);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = () => resolve();
+ });
+}
+
+window.addEventListener("load", () => {
+ if (!parent.opener) {
+ // Step 2
+ createDatabase().then(() => {
+ parent.postMessage(
+ {message: "same-site iframe loaded"},
+ parent.origin,
+ );
+ });
+ } else {
+ // Step 4
+ doesDatabaseExist().then((result) => {
+ parent.opener.postMessage(
+ {
+ message: "cross-site iframe loaded",
+ doesDatabaseExist: result,
+ },
+ parent.opener.origin,
+ );
+ });
+ }
+});
+
+// Step 6
+window.addEventListener("message", (e) => {
+ if (e.data.message == "delete database") {
+ deleteDatabase().then(() => {
+ e.source.postMessage(
+ {message: "database deleted"},
+ e.source.origin,
+ );
+ });
+ }
+});
+</script>
diff --git a/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-coverage-iframe.tentative.html b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-coverage-iframe.tentative.html
new file mode 100644
index 0000000000..a7be7e2cc9
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-coverage-iframe.tentative.html
@@ -0,0 +1,601 @@
+<!doctype html>
+<meta charset=utf-8>
+<script src="/resources/testharness.js"></script>
+<script src="support.js"></script>
+<!-- TODO(crbug.com/1218100): We should verify the full IDB surface area inside
+an iframe, but for now a single test with an assortment of verifications is
+sufficient to test third party storage partitioning didn't break anything. -->
+
+<!-- This block is from delete-request-queue.htm -->
+<script>
+let saw;
+indexeddb_test(
+ (t, db) => {
+ saw = expect(t, ['delete1', 'delete2']);
+ let r = indexedDB.deleteDatabase(db.name);
+ r.onerror = t.unreached_func('delete should succeed');
+ r.onsuccess = t.step_func(e => saw('delete1'));
+ },
+ (t, db) => {
+ let r = indexedDB.deleteDatabase(db.name);
+ r.onerror = t.unreached_func('delete should succeed');
+ r.onsuccess = t.step_func(e => saw('delete2'));
+
+ db.close();
+ },
+ 'Deletes are processed in order');
+</script>
+
+<!-- This block is from idbcursor-advance-continue-async.htm -->
+<script>
+function upgrade_func(t, db, tx) {
+ var objStore = db.createObjectStore("test");
+ objStore.createIndex("index", "");
+
+ objStore.add("data", 1);
+ objStore.add("data2", 2);
+}
+
+indexeddb_test(
+ upgrade_func,
+ function(t, db) {
+ var count = 0;
+ var rq = db.transaction("test", "readonly", {durability: 'relaxed'}).objectStore("test").openCursor();
+
+ rq.onsuccess = t.step_func(function(e) {
+ if (!e.target.result) {
+ assert_equals(count, 2, 'count');
+ t.done();
+ return;
+ }
+ var cursor = e.target.result;
+
+ switch(count) {
+ case 0:
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, 1)
+ cursor.advance(1)
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, 1)
+ break
+
+ case 1:
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, 2)
+ cursor.advance(1)
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, 2)
+ break
+
+ default:
+ assert_unreached("Unexpected count: " + count)
+ }
+
+ count++;
+ });
+ rq.onerror = t.unreached_func("unexpected error")
+ },
+ document.title + " - advance"
+);
+
+indexeddb_test(
+ upgrade_func,
+ function(t, db) {
+ var count = 0;
+ var rq = db.transaction("test", "readonly", {durability: 'relaxed'}).objectStore("test").index("index").openCursor();
+
+ rq.onsuccess = t.step_func(function(e) {
+ if (!e.target.result) {
+ assert_equals(count, 2, 'count');
+ t.done();
+ return;
+ }
+ var cursor = e.target.result;
+
+ switch(count) {
+ case 0:
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, "data")
+ assert_equals(cursor.primaryKey, 1)
+ cursor.continue("data2")
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, "data")
+ assert_equals(cursor.primaryKey, 1)
+ break
+
+ case 1:
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, "data2")
+ assert_equals(cursor.primaryKey, 2)
+ cursor.continue()
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, "data2")
+ assert_equals(cursor.primaryKey, 2)
+ break
+
+ default:
+ assert_unreached("Unexpected count: " + count)
+ }
+
+ count++;
+ });
+ rq.onerror = t.unreached_func("unexpected error")
+ },
+ document.title + " - continue"
+);
+
+indexeddb_test(
+ upgrade_func,
+ function(t, db) {
+ var count = 0;
+ var rq = db.transaction("test", "readonly", {durability: 'relaxed'}).objectStore("test").index("index").openCursor();
+
+ rq.onsuccess = t.step_func(function(e) {
+ if (!e.target.result) {
+ assert_equals(count, 2, 'count');
+ t.done();
+ return;
+ }
+ var cursor = e.target.result;
+ cursor.advance(1)
+
+ switch(count) {
+ case 0:
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, "data")
+ assert_equals(cursor.primaryKey, 1)
+ break
+
+ case 1:
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, "data2")
+ assert_equals(cursor.primaryKey, 2)
+ break
+
+ default:
+ assert_unreached("Unexpected count: " + count)
+ }
+
+ count++;
+ });
+ rq.onerror = t.unreached_func("unexpected error")
+ },
+ document.title + " - fresh advance still async"
+);
+
+indexeddb_test(
+ upgrade_func,
+ function(t, db) {
+ var count = 0;
+ var rq = db.transaction("test", "readonly", {durability: 'relaxed'}).objectStore("test").openCursor();
+
+ rq.onsuccess = t.step_func(function(e) {
+ if (!e.target.result) {
+ assert_equals(count, 2, 'count');
+ t.done();
+ return;
+ }
+ var cursor = e.target.result;
+ cursor.continue()
+
+ switch(count) {
+ case 0:
+ assert_equals(cursor.value, "data")
+ assert_equals(cursor.key, 1)
+ break
+
+ case 1:
+ assert_equals(cursor.value, "data2")
+ assert_equals(cursor.key, 2)
+ break
+
+ default:
+ assert_unreached("Unexpected count: " + count)
+ }
+
+ count++;
+ });
+ rq.onerror = t.unreached_func("unexpected error")
+ },
+ document.title + " - fresh continue still async"
+);
+</script>
+
+<!-- This block is from idbindex_getAll.htm -->
+<script>
+var alphabet = 'abcdefghijklmnopqrstuvwxyz'.split('');
+var ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'.split('');
+
+function getall_test(func, name) {
+ indexeddb_test(
+ function(t, connection, tx) {
+ var store = connection.createObjectStore('generated',
+ {autoIncrement: true, keyPath: 'id'});
+ var index = store.createIndex('test_idx', 'upper');
+ alphabet.forEach(function(letter) {
+ store.put({ch: letter, upper: letter.toUpperCase()});
+ });
+
+ store = connection.createObjectStore('out-of-line', null);
+ index = store.createIndex('test_idx', 'upper');
+ alphabet.forEach(function(letter) {
+ store.put({ch: letter, upper: letter.toUpperCase()}, letter);
+ });
+
+ store = connection.createObjectStore('out-of-line-not-unique', null);
+ index = store.createIndex('test_idx', 'half');
+ alphabet.forEach(function(letter) {
+ if (letter <= 'm')
+ store.put({ch: letter, half: 'first'}, letter);
+ else
+ store.put({ch: letter, half: 'second'}, letter);
+ });
+
+ store = connection.createObjectStore('out-of-line-multi', null);
+ index = store.createIndex('test_idx', 'attribs', {multiEntry: true});
+ alphabet.forEach(function(letter) {
+ attrs = [];
+ if (['a', 'e', 'i', 'o', 'u'].indexOf(letter) != -1)
+ attrs.push('vowel');
+ else
+ attrs.push('consonant');
+ if (letter == 'a')
+ attrs.push('first');
+ if (letter == 'z')
+ attrs.push('last');
+ store.put({ch: letter, attribs: attrs}, letter);
+ });
+
+ store = connection.createObjectStore('empty', null);
+ index = store.createIndex('test_idx', 'upper');
+ },
+ func,
+ name
+ );
+}
+
+function createGetAllRequest(t, storeName, connection, range, maxCount) {
+ var transaction = connection.transaction(storeName, 'readonly');
+ var store = transaction.objectStore(storeName);
+ var index = store.index('test_idx');
+ var req = index.getAll(range, maxCount);
+ req.onerror = t.unreached_func('getAll request should succeed');
+ return req;
+}
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection, 'C');
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), ['c']);
+ assert_array_equals(data.map(function(e) { return e.upper; }), ['C']);
+ t.done();
+ });
+ }, 'Single item get');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'empty', connection);
+ req.onsuccess = t.step_func(function(evt) {
+ assert_array_equals(evt.target.result, [],
+ 'getAll() on empty object store should return an empty array');
+ t.done();
+ });
+ }, 'Empty object store');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection);
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), alphabet);
+ assert_array_equals(data.map(function(e) { return e.upper; }), ALPHABET);
+ t.done();
+ });
+ }, 'Get all keys');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection, undefined,
+ 10);
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'abcdefghij'.split(''));
+ assert_array_equals(data.map(function(e) { return e.upper; }), 'ABCDEFGHIJ'.split(''));
+ t.done();
+ });
+ }, 'maxCount=10');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection,
+ IDBKeyRange.bound('G', 'M'));
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'ghijklm'.split(''));
+ assert_array_equals(data.map(function(e) { return e.upper; }), 'GHIJKLM'.split(''));
+ t.done();
+ });
+ }, 'Get bound range');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection,
+ IDBKeyRange.bound('G', 'M'), 3);
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'ghi'.split(''));
+ assert_array_equals(data.map(function(e) { return e.upper; }), 'GHI'.split(''));
+ t.done();
+ });
+ }, 'Get bound range with maxCount');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection,
+ IDBKeyRange.bound('G', 'K', false, true));
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'ghij'.split(''));
+ assert_array_equals(data.map(function(e) { return e.upper; }), 'GHIJ'.split(''));
+ t.done();
+ });
+ }, 'Get upper excluded');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection,
+ IDBKeyRange.bound('G', 'K', true, false));
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'hijk'.split(''));
+ assert_array_equals(data.map(function(e) { return e.upper; }), 'HIJK'.split(''));
+ t.done();
+ });
+ }, 'Get lower excluded');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'generated',
+ connection, IDBKeyRange.bound(4, 15), 3);
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_true(Array.isArray(data));
+ assert_equals(data.length, 0);
+ t.done();
+ });
+ }, 'Get bound range (generated) with maxCount');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line',
+ connection, "Doesn't exist");
+ req.onsuccess = t.step_func(function(evt) {
+ assert_array_equals(evt.target.result, [],
+ 'getAll() using a nonexistent key should return an empty array');
+ t.done();
+ req.onerror = t.unreached_func('getAll request should succeed');
+ });
+ }, 'Non existent key');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line', connection,
+ undefined, 0);
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), alphabet);
+ assert_array_equals(data.map(function(e) { return e.upper; }), ALPHABET);
+ t.done();
+ });
+ }, 'maxCount=0');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line-not-unique', connection,
+ 'first');
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), 'abcdefghijklm'.split(''));
+ assert_true(data.every(function(e) { return e.half === 'first'; }));
+ t.done();
+ });
+ }, 'Retrieve multiEntry key');
+
+getall_test(function(t, connection) {
+ var req = createGetAllRequest(t, 'out-of-line-multi', connection,
+ 'vowel');
+ req.onsuccess = t.step_func(function(evt) {
+ var data = evt.target.result;
+ assert_class_string(data, 'Array', 'result should be an array');
+ assert_array_equals(data.map(function(e) { return e.ch; }), ['a', 'e', 'i', 'o', 'u']);
+ assert_array_equals(data[0].attribs, ['vowel', 'first']);
+ assert_true(data.every(function(e) { return e.attribs[0] === 'vowel'; }));
+ t.done();
+ });
+ }, 'Retrieve one key multiple values');
+</script>
+
+<!-- This block is from idbobjectstore_openKeyCursor.htm -->
+<script>
+function store_test(func, name) {
+ indexeddb_test(
+ function(t, db, tx) {
+ var store = db.createObjectStore("store");
+ for (var i = 0; i < 10; ++i) {
+ store.put("value: " + i, i);
+ }
+ },
+ function(t, db) {
+ var tx = db.transaction("store", "readonly", {durability: 'relaxed'});
+ var store = tx.objectStore("store");
+ func(t, db, tx, store);
+ }, name);
+}
+
+store_test(function(t, db, tx, store) {
+ var expected = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ var actual = [];
+ var request = store.openKeyCursor();
+ request.onsuccess = t.step_func(function() {
+ var cursor = request.result;
+ if (!cursor)
+ return;
+ assert_equals(cursor.direction, "next");
+ assert_false("value" in cursor);
+ assert_equals(indexedDB.cmp(cursor.key, cursor.primaryKey), 0);
+ actual.push(cursor.key);
+ cursor.continue();
+ });
+
+ tx.onabort = t.unreached_func("transaction aborted");
+ tx.oncomplete = t.step_func(function() {
+ assert_array_equals(expected, actual, "keys should match");
+ t.done();
+ });
+
+}, "IDBObjectStore.openKeyCursor() - forward iteration");
+
+store_test(function(t, db, tx, store) {
+ var expected = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0];
+ var actual = [];
+ var request = store.openKeyCursor(null, "prev");
+ request.onsuccess = t.step_func(function() {
+ var cursor = request.result;
+ if (!cursor)
+ return;
+ assert_equals(cursor.direction, "prev");
+ assert_false("value" in cursor);
+ assert_equals(indexedDB.cmp(cursor.key, cursor.primaryKey), 0);
+ actual.push(cursor.key);
+ cursor.continue();
+ });
+
+ tx.onabort = t.unreached_func("transaction aborted");
+ tx.oncomplete = t.step_func(function() {
+ assert_array_equals(expected, actual, "keys should match");
+ t.done();
+ });
+
+}, "IDBObjectStore.openKeyCursor() - reverse iteration");
+
+store_test(function(t, db, tx, store) {
+ var expected = [4, 5, 6];
+ var actual = [];
+ var request = store.openKeyCursor(IDBKeyRange.bound(4, 6));
+ request.onsuccess = t.step_func(function() {
+ var cursor = request.result;
+ if (!cursor)
+ return;
+ assert_equals(cursor.direction, "next");
+ assert_false("value" in cursor);
+ assert_equals(indexedDB.cmp(cursor.key, cursor.primaryKey), 0);
+ actual.push(cursor.key);
+ cursor.continue();
+ });
+
+ tx.onabort = t.unreached_func("transaction aborted");
+ tx.oncomplete = t.step_func(function() {
+ assert_array_equals(expected, actual, "keys should match");
+ t.done();
+ });
+
+}, "IDBObjectStore.openKeyCursor() - forward iteration with range");
+
+store_test(function(t, db, tx, store) {
+ var expected = [6, 5, 4];
+ var actual = [];
+ var request = store.openKeyCursor(IDBKeyRange.bound(4, 6), "prev");
+ request.onsuccess = t.step_func(function() {
+ var cursor = request.result;
+ if (!cursor)
+ return;
+ assert_equals(cursor.direction, "prev");
+ assert_false("value" in cursor);
+ assert_equals(indexedDB.cmp(cursor.key, cursor.primaryKey), 0);
+ actual.push(cursor.key);
+ cursor.continue();
+ });
+
+ tx.onabort = t.unreached_func("transaction aborted");
+ tx.oncomplete = t.step_func(function() {
+ assert_array_equals(expected, actual, "keys should match");
+ t.done();
+ });
+
+}, "IDBObjectStore.openKeyCursor() - reverse iteration with range");
+
+store_test(function(t, db, tx, store) {
+ assert_throws_dom("DataError", function() { store.openKeyCursor(NaN); },
+ "openKeyCursor should throw on invalid number key");
+ assert_throws_dom("DataError", function() { store.openKeyCursor(new Date(NaN)); },
+ "openKeyCursor should throw on invalid date key");
+ assert_throws_dom("DataError", function() {
+ var cycle = [];
+ cycle.push(cycle);
+ store.openKeyCursor(cycle);
+ }, "openKeyCursor should throw on invalid array key");
+ assert_throws_dom("DataError", function() { store.openKeyCursor({}); },
+ "openKeyCursor should throw on invalid key type");
+ setTimeout(t.step_func(function() {
+ assert_throws_dom("TransactionInactiveError", function() { store.openKeyCursor(); },
+ "openKeyCursor should throw if transaction is inactive");
+ t.done();
+ }), 0);
+
+}, "IDBObjectStore.openKeyCursor() - invalid inputs");
+</script>
+
+<!-- This block is from idbtransaction.htm -->
+<script>
+async_test(function(t) {
+ var dbname = "idbtransaction-" + document.location + t.name;
+ indexedDB.deleteDatabase(dbname);
+ var open_rq = indexedDB.open(dbname);
+
+ open_rq.onblocked = t.unreached_func('open_rq.onblocked');
+ open_rq.onerror = t.unreached_func('open_rq.onerror');
+
+ open_rq.onupgradeneeded = t.step_func(function(e) {
+ t.add_cleanup(function() {
+ open_rq.onerror = function(e) {
+ e.preventDefault();
+ };
+ open_rq.result.close();
+ indexedDB.deleteDatabase(open_rq.result.name);
+ });
+
+ assert_equals(e.target, open_rq, "e.target is reusing the same IDBOpenDBRequest");
+ assert_equals(e.target.transaction, open_rq.transaction, "IDBOpenDBRequest.transaction");
+
+ assert_true(e.target.transaction instanceof IDBTransaction, "transaction instanceof IDBTransaction");
+ t.done();
+ });
+
+}, document.title + " - request gotten by the handler");
+
+async_test(function(t) {
+ var dbname = "idbtransaction-" + document.location + t.name;
+ indexedDB.deleteDatabase(dbname);
+ var open_rq = indexedDB.open(dbname);
+
+ assert_equals(open_rq.transaction, null, "IDBOpenDBRequest.transaction");
+ assert_equals(open_rq.source, null, "IDBOpenDBRequest.source");
+ assert_equals(open_rq.readyState, "pending", "IDBOpenDBRequest.readyState");
+
+ assert_true(open_rq instanceof IDBOpenDBRequest, "open_rq instanceof IDBOpenDBRequest");
+ assert_equals(open_rq + "", "[object IDBOpenDBRequest]", "IDBOpenDBRequest (open_rq)");
+
+ open_rq.onblocked = t.unreached_func('open_rq.onblocked');
+ open_rq.onerror = t.unreached_func('open_rq.onerror');
+
+ open_rq.onupgradeneeded = t.step_func(function() {
+ t.add_cleanup(function() {
+ open_rq.onerror = function(e) {
+ e.preventDefault();
+ };
+ open_rq.result.close();
+ indexedDB.deleteDatabase(open_rq.result.name);
+ });
+ t.done();
+ });
+
+}, document.title + " - request returned by open()");
+</script>
diff --git a/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-persistence-iframe.tentative.html b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-persistence-iframe.tentative.html
new file mode 100644
index 0000000000..ad6869f945
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/idb-partitioned-persistence-iframe.tentative.html
@@ -0,0 +1,76 @@
+<!doctype html>
+<meta charset="utf-8">
+<script>
+const dbName = "users";
+
+// Create the database at v1 and detect success via `onsuccess`.
+function createDatabase() {
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.open(dbName, 1);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = (e) => {
+ e.target.result.close();
+ resolve();
+ }
+ });
+}
+
+// Open the database at v2 and detect existance via `onupgradeneeded`.
+function doesDatabaseExist() {
+ let didExist = false;
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.open(dbName, 2);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = (e) => {
+ e.target.result.close();
+ deleteDatabase().then(() => resolve(didExist));
+ };
+ dbRequest.onupgradeneeded = (e) => {
+ didExist = e.oldVersion != 0;
+ };
+ });
+}
+
+// Delete the database and detect success via `onsuccess`.
+function deleteDatabase() {
+ return new Promise((resolve, reject) => {
+ var dbRequest = window.indexedDB.deleteDatabase(dbName);
+ dbRequest.onblocked = () => reject();
+ dbRequest.onerror = () => reject();
+ dbRequest.onsuccess = () => resolve();
+ });
+}
+
+// Step 2
+window.addEventListener("load", () => {
+ parent.postMessage(
+ {message: "iframe loaded"},
+ "*",
+ );
+});
+
+window.addEventListener("message", (e) => {
+ if (e.data.message == "create database") {
+ // Step 4
+ createDatabase().then(() => {
+ parent.postMessage(
+ {message: "database created"},
+ "*",
+ );
+ });
+ } else if (e.data.message == "check database") {
+ // Step 6
+ doesDatabaseExist().then((result) => {
+ parent.postMessage(
+ {
+ message: "database checked",
+ doesDatabaseExist: result,
+ },
+ "*",
+ );
+ });
+ }
+});
+</script>
diff --git a/testing/web-platform/tests/IndexedDB/resources/idbfactory-origin-isolation-iframe.html b/testing/web-platform/tests/IndexedDB/resources/idbfactory-origin-isolation-iframe.html
new file mode 100644
index 0000000000..0f16bcadaa
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/idbfactory-origin-isolation-iframe.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<title>This iframe keeps a transaction on a database alive indefinitely to test</title>
+<script>
+
+// Keeps the passed transaction alive indefinitely (by making requests
+// against the named store). Returns a function that asserts that the
+// transaction has not already completed and then ends the request loop so that
+// the transaction may autocommit and complete.
+function keep_alive(tx, store_name) {
+ let completed = false;
+ tx.addEventListener('complete', () => { completed = true; });
+
+ let keepSpinning = true;
+
+ function spin() {
+ if (!keepSpinning)
+ return;
+ tx.objectStore(store_name).get(0).onsuccess = spin;
+ }
+ spin();
+
+ return () => {
+ assert_false(completed, 'Transaction completed while kept alive');
+ keepSpinning = false;
+ };
+}
+
+async function run() {
+ const dbs_to_delete = await indexedDB.databases();
+ for (const db_info of dbs_to_delete) {
+ let request = indexedDB.deleteDatabase(db_info.name);
+ await new Promise((resolve, reject) => {
+ request.onsuccess = resolve;
+ request.onerror = reject;
+ });
+ }
+
+ var openRequest = indexedDB.open('db-isolation-test');
+ openRequest.onupgradeneeded = () => {
+ openRequest.result.createObjectStore('s');
+ };
+ openRequest.onsuccess = () => {
+ var tx = openRequest.result.transaction('s', 'readonly', {durability: 'relaxed'});
+ keep_alive(tx, 's');
+ window.parent.postMessage("keep_alive_started", "*");
+ };
+}
+
+run();
+</script>
diff --git a/testing/web-platform/tests/IndexedDB/resources/idbworker.js b/testing/web-platform/tests/IndexedDB/resources/idbworker.js
new file mode 100644
index 0000000000..04a421fa38
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/idbworker.js
@@ -0,0 +1,34 @@
+var db
+
+self.addEventListener('message', MessageHandler, false)
+
+function MessageHandler(e)
+{
+ var open_rq, idb = self.indexedDB || self.msIndexedDB || self.webkitIndexedDB || self.mozIndexedDB
+
+ if (!idb)
+ {
+ self.postMessage(false)
+ return
+ }
+ else
+ self.postMessage(true)
+
+ open_rq = idb.open("webworker101", 1)
+
+ open_rq.onupgradeneeded = function(e) {
+ db = e.target.result
+ db.createObjectStore("store")
+ .add("test", 1)
+ }
+ open_rq.onsuccess = function(e) {
+ db = e.target.result
+ db.onerror = function() { self.postMessage("db.error") }
+ db.transaction("store", "readonly", {durability: 'relaxed'}).objectStore("store").get(1).onsuccess = function(e) {
+ self.postMessage(e.target.result)
+ db.close()
+ }
+ }
+ open_rq.onerror = function() { self.postMessage("open.error") }
+ open_rq.onblocked = function() { self.postMessage("open.blocked") }
+}
diff --git a/testing/web-platform/tests/IndexedDB/resources/interleaved-cursors-common.js b/testing/web-platform/tests/IndexedDB/resources/interleaved-cursors-common.js
new file mode 100644
index 0000000000..09ed078c1f
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/interleaved-cursors-common.js
@@ -0,0 +1,188 @@
+// Infrastructure shared by interleaved-cursors-{small,large}.html
+
+// Number of objects that each iterator goes over.
+const itemCount = 10;
+
+// Ratio of small objects to large objects.
+const largeObjectRatio = 5;
+
+// Size of large objects. This should exceed the size of a block in the storage
+// method underlying the browser's IndexedDB implementation. For example, this
+// needs to exceed the LevelDB block size on Chrome, and the SQLite block size
+// on Firefox.
+const largeObjectSize = 48 * 1024;
+
+function objectKey(cursorIndex, itemIndex) {
+ return `${cursorIndex}-key-${itemIndex}`;
+}
+
+function objectValue(cursorIndex, itemIndex) {
+ if ((cursorIndex * itemCount + itemIndex) % largeObjectRatio === 0) {
+ // We use a typed array (as opposed to a string) because IndexedDB
+ // implementations may serialize strings using UTF-8 or UTF-16, yielding
+ // larger IndexedDB entries than we'd expect. It's very unlikely that an
+ // IndexedDB implementation would use anything other than the raw buffer to
+ // serialize a typed array.
+ const buffer = new Uint8Array(largeObjectSize);
+
+ // Some IndexedDB implementations, like LevelDB, compress their data blocks
+ // before storing them to disk. We use a simple 32-bit xorshift PRNG, which
+ // should be sufficient to foil any fast generic-purpose compression scheme.
+
+ // 32-bit xorshift - the seed can't be zero
+ let state = 1000 + (cursorIndex * itemCount + itemIndex);
+
+ for (let i = 0; i < largeObjectSize; ++i) {
+ state ^= state << 13;
+ state ^= state >> 17;
+ state ^= state << 5;
+ buffer[i] = state & 0xff;
+ }
+
+ return buffer;
+ }
+ return [cursorIndex, 'small', itemIndex];
+}
+
+// Writes the objects to be read by one cursor. Returns a promise that resolves
+// when the write completes.
+//
+// We want to avoid creating a large transaction, because that is outside the
+// test's scope, and it's a bad practice. So we break up the writes across
+// multiple transactions. For simplicity, each transaction writes all the
+// objects that will be read by a cursor.
+function writeCursorObjects(database, cursorIndex) {
+ return new Promise((resolve, reject) => {
+ const transaction = database.transaction('cache', 'readwrite', {durability: 'relaxed'});
+ transaction.onabort = () => { reject(transaction.error); };
+
+ const store = transaction.objectStore('cache');
+ for (let i = 0; i < itemCount; ++i) {
+ store.put({
+ key: objectKey(cursorIndex, i), value: objectValue(cursorIndex, i)});
+ }
+ transaction.oncomplete = resolve;
+ });
+}
+
+// Returns a promise that resolves when the store has been populated.
+function populateTestStore(testCase, database, cursorCount) {
+ let promiseChain = Promise.resolve();
+
+ for (let i = 0; i < cursorCount; ++i)
+ promiseChain = promiseChain.then(() => writeCursorObjects(database, i));
+
+ return promiseChain;
+}
+
+// Reads cursors in an interleaved fashion, as shown below.
+//
+// Given N cursors, each of which points to the beginning of a K-item sequence,
+// the following accesses will be made.
+//
+// OC(i) = open cursor i
+// RD(i, j) = read result of cursor i, which should be at item j
+// CC(i) = continue cursor i
+// | = wait for onsuccess on the previous OC or CC
+//
+// OC(1) | RD(1, 1) OC(2) | RD(2, 1) OC(3) | ... | RD(n-1, 1) CC(n) |
+// RD(n, 1) CC(1) | RD(1, 2) CC(2) | RD(2, 2) CC(3) | ... | RD(n-1, 2) CC(n) |
+// RD(n, 2) CC(1) | RD(1, 3) CC(2) | RD(2, 3) CC(3) | ... | RD(n-1, 3) CC(n) |
+// ...
+// RD(n, k-1) CC(1) | RD(1, k) CC(2) | RD(2, k) CC(3) | ... | RD(n-1, k) CC(n) |
+// RD(n, k) done
+function interleaveCursors(testCase, store, cursorCount) {
+ return new Promise((resolve, reject) => {
+ // The cursors used for iteration are stored here so each cursor's onsuccess
+ // handler can call continue() on the next cursor.
+ const cursors = [];
+
+ // The results of IDBObjectStore.openCursor() calls are stored here so we
+ // we can change the requests' onsuccess handler after every
+ // IDBCursor.continue() call.
+ const requests = [];
+
+ const checkCursorState = (cursorIndex, itemIndex) => {
+ const cursor = cursors[cursorIndex];
+ assert_equals(cursor.key, objectKey(cursorIndex, itemIndex));
+ assert_equals(cursor.value.key, objectKey(cursorIndex, itemIndex));
+ assert_equals(
+ cursor.value.value.join('-'),
+ objectValue(cursorIndex, itemIndex).join('-'));
+ };
+
+ const openCursor = (cursorIndex, callback) => {
+ const request = store.openCursor(
+ IDBKeyRange.lowerBound(objectKey(cursorIndex, 0)));
+ requests[cursorIndex] = request;
+
+ request.onsuccess = testCase.step_func(() => {
+ const cursor = request.result;
+ cursors[cursorIndex] = cursor;
+ checkCursorState(cursorIndex, 0);
+ callback();
+ });
+ request.onerror = event => reject(request.error);
+ };
+
+ const readItemFromCursor = (cursorIndex, itemIndex, callback) => {
+ const request = requests[cursorIndex];
+ request.onsuccess = testCase.step_func(() => {
+ const cursor = request.result;
+ cursors[cursorIndex] = cursor;
+ checkCursorState(cursorIndex, itemIndex);
+ callback();
+ });
+
+ const cursor = cursors[cursorIndex];
+ cursor.continue();
+ };
+
+ // We open all the cursors one at a time, then cycle through the cursors and
+ // call continue() on each of them. This access pattern causes maximal
+ // trashing to an LRU cursor cache. Eviction scheme aside, any cache will
+ // have to evict some cursors, and this access pattern verifies that the
+ // cache correctly restores the state of evicted cursors.
+ const steps = [];
+ for (let cursorIndex = 0; cursorIndex < cursorCount; ++cursorIndex)
+ steps.push(openCursor.bind(null, cursorIndex));
+ for (let itemIndex = 1; itemIndex < itemCount; ++itemIndex) {
+ for (let cursorIndex = 0; cursorIndex < cursorCount; ++cursorIndex)
+ steps.push(readItemFromCursor.bind(null, cursorIndex, itemIndex));
+ }
+
+ const runStep = (stepIndex) => {
+ if (stepIndex === steps.length) {
+ resolve();
+ return;
+ }
+ steps[stepIndex](() => { runStep(stepIndex + 1); });
+ };
+ runStep(0);
+ });
+}
+
+function cursorTest(cursorCount) {
+ promise_test(testCase => {
+ return createDatabase(testCase, (database, transaction) => {
+ const store = database.createObjectStore('cache',
+ { keyPath: 'key', autoIncrement: true });
+ }).then(database => {
+ return populateTestStore(testCase, database, cursorCount).then(
+ () => database);
+ }).then(database => {
+ database.close();
+ }).then(() => {
+ return openDatabase(testCase);
+ }).then(database => {
+ const transaction = database.transaction('cache', 'readonly', {durability: 'relaxed'});
+ transaction.onabort = () => { reject(transaction.error); };
+
+ const store = transaction.objectStore('cache');
+ return interleaveCursors(testCase, store, cursorCount).then(
+ () => database);
+ }).then(database => {
+ database.close();
+ });
+ }, `${cursorCount} cursors`);
+}
diff --git a/testing/web-platform/tests/IndexedDB/resources/nested-cloning-common.js b/testing/web-platform/tests/IndexedDB/resources/nested-cloning-common.js
new file mode 100644
index 0000000000..db5f710ceb
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/nested-cloning-common.js
@@ -0,0 +1,211 @@
+'use strict';
+
+// Should be large enough to trigger large value handling in the IndexedDB
+// engines that have special code paths for large values.
+const wrapThreshold = 128 * 1024;
+
+// Returns an IndexedDB value created from a descriptor.
+//
+// See the bottom of the file for descriptor samples.
+function createValue(descriptor) {
+ if (typeof(descriptor) != 'object')
+ return descriptor;
+
+ if (Array.isArray(descriptor))
+ return descriptor.map((element) => createValue(element));
+
+ if (!descriptor.hasOwnProperty('type')) {
+ const value = {};
+ for (let property of Object.getOwnPropertyNames(descriptor))
+ value[property] = createValue(descriptor[property]);
+ return value;
+ }
+
+ switch (descriptor.type) {
+ case 'blob':
+ return new Blob(
+ [largeValue(descriptor.size, descriptor.seed)],
+ { type: descriptor.mimeType });
+ case 'buffer':
+ return largeValue(descriptor.size, descriptor.seed);
+ }
+}
+
+// Checks an IndexedDB value against a descriptor.
+//
+// Returns a Promise that resolves if the value passes the check.
+//
+// See the bottom of the file for descriptor samples.
+function checkValue(testCase, value, descriptor) {
+ if (typeof(descriptor) != 'object') {
+ assert_equals(
+ descriptor, value,
+ 'IndexedDB result should match put() argument');
+ return Promise.resolve();
+ }
+
+ if (Array.isArray(descriptor)) {
+ assert_true(
+ Array.isArray(value),
+ 'IndexedDB result type should match put() argument');
+ assert_equals(
+ descriptor.length, value.length,
+ 'IndexedDB result array size should match put() argument');
+
+ const subChecks = [];
+ for (let i = 0; i < descriptor.length; ++i)
+ subChecks.push(checkValue(testCase, value[i], descriptor[i]));
+ return Promise.all(subChecks);
+ }
+
+ if (!descriptor.hasOwnProperty('type')) {
+ assert_array_equals(
+ Object.getOwnPropertyNames(value).sort(),
+ Object.getOwnPropertyNames(descriptor).sort(),
+ 'IndexedDB result object properties should match put() argument');
+ const subChecks = [];
+ return Promise.all(Object.getOwnPropertyNames(descriptor).map(property =>
+ checkValue(testCase, value[property], descriptor[property])));
+ }
+
+ switch (descriptor.type) {
+ case 'blob':
+ assert_class_string(
+ value, 'Blob',
+ 'IndexedDB result class should match put() argument');
+ assert_equals(
+ descriptor.mimeType, value.type,
+ 'IndexedDB result Blob MIME type should match put() argument');
+ assert_equals(descriptor.size, value.size, 'incorrect Blob size');
+ return new Promise((resolve, reject) => {
+ const reader = new FileReader();
+ reader.onloadend = testCase.step_func(() => {
+ if (reader.error) {
+ reject(reader.error);
+ return;
+ }
+ const view = new Uint8Array(reader.result);
+ assert_equals(
+ view.join(','),
+ largeValue(descriptor.size, descriptor.seed).join(','),
+ 'IndexedDB result Blob content should match put() argument');
+ resolve();
+ });
+ reader.readAsArrayBuffer(value);
+ });
+
+ case 'buffer':
+ assert_class_string(
+ value, 'Uint8Array',
+ 'IndexedDB result type should match put() argument');
+ assert_equals(
+ value.join(','),
+ largeValue(descriptor.size, descriptor.seed).join(','),
+ 'IndexedDB result typed array content should match put() argument');
+ return Promise.resolve();
+ }
+}
+
+function cloningTestInternal(label, valueDescriptors, options) {
+ promise_test(testCase => {
+ return createDatabase(testCase, (database, transaction) => {
+ let store;
+ if (options.useKeyGenerator) {
+ store = database.createObjectStore(
+ 'test-store', { keyPath: 'primaryKey', autoIncrement: true });
+ } else {
+ store = database.createObjectStore('test-store');
+ }
+ for (let i = 0; i < valueDescriptors.length; ++i) {
+ if (options.useKeyGenerator) {
+ store.put(createValue(valueDescriptors[i]));
+ } else {
+ store.put(createValue(valueDescriptors[i]), i + 1);
+ }
+ }
+ }).then(database => {
+ const transaction = database.transaction(['test-store'], 'readonly');
+ const store = transaction.objectStore('test-store');
+ const subChecks = [];
+ let resultIndex = 0;
+ for (let i = 0; i < valueDescriptors.length; ++i) {
+ subChecks.push(new Promise((resolve, reject) => {
+ const requestIndex = i;
+ const primaryKey = requestIndex + 1;
+ const request = store.get(primaryKey);
+ request.onerror =
+ testCase.step_func(() => { reject(request.error); });
+ request.onsuccess = testCase.step_func(() => {
+ assert_equals(
+ resultIndex, requestIndex,
+ 'IDBRequest success events should be fired in request order');
+ ++resultIndex;
+
+ const result = request.result;
+ if (options.useKeyGenerator) {
+ assert_equals(
+ result.primaryKey, primaryKey,
+ 'IndexedDB result should have auto-incremented primary key');
+ delete result.primaryKey;
+ }
+ resolve(checkValue(
+ testCase, result, valueDescriptors[requestIndex]));
+ });
+ }));
+ }
+
+ subChecks.push(new Promise((resolve, reject) => {
+ const requestIndex = valueDescriptors.length;
+ const request = store.getAll();
+ request.onerror =
+ testCase.step_func(() => { reject(request.error); });
+ request.onsuccess = testCase.step_func(() => {
+ assert_equals(
+ resultIndex, requestIndex,
+ 'IDBRequest success events should be fired in request order');
+ ++resultIndex;
+ const result = request.result;
+ if (options.useKeyGenerator) {
+ for (let i = 0; i < valueDescriptors.length; ++i) {
+ const primaryKey = i + 1;
+ assert_equals(
+ result[i].primaryKey, primaryKey,
+ 'IndexedDB result should have auto-incremented primary key');
+ delete result[i].primaryKey;
+ }
+ }
+ resolve(checkValue(testCase, result, valueDescriptors));
+ });
+ }));
+
+ return Promise.all(subChecks);
+ });
+ }, label);
+}
+
+// Performs a series of put()s and verifies that get()s and getAll() match.
+//
+// Each element of the valueDescriptors array is fed into createValue(), and the
+// resulting value is written to IndexedDB via a put() request. After the writes
+// complete, the values are read in the same order in which they were written.
+// Last, all the results are read one more time via a getAll().
+//
+// The test verifies that the get() / getAll() results match the arguments to
+// put() and that the order in which the get() result events are fired matches
+// the order of the get() requests.
+function cloningTest(label, valueDescriptors) {
+ cloningTestInternal(label, valueDescriptors, { useKeyGenerator: false });
+}
+
+// cloningTest, with coverage for key generators.
+//
+// This creates two tests. One test performs a series of put()s and verifies
+// that get()s and getAll() match, exactly like cloningTestWithoutKeyGenerator.
+// The other test performs the same put()s in an object store with a key
+// generator, and checks that the key generator works properly.
+function cloningTestWithKeyGenerator(label, valueDescriptors) {
+ cloningTestInternal(label, valueDescriptors, { useKeyGenerator: false });
+ cloningTestInternal(
+ label + " with key generator", valueDescriptors,
+ { useKeyGenerator: true });
+}
diff --git a/testing/web-platform/tests/IndexedDB/resources/reading-autoincrement-common.js b/testing/web-platform/tests/IndexedDB/resources/reading-autoincrement-common.js
new file mode 100644
index 0000000000..45c8ffef92
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/reading-autoincrement-common.js
@@ -0,0 +1,93 @@
+// Returns the "name" property written to the object with the given ID.
+function nameForId(id) {
+ return `Object ${id}`;
+}
+
+// Initial database setup used by all the reading-autoincrement tests.
+async function setupAutoincrementDatabase(testCase) {
+ const database = await createDatabase(testCase, database => {
+ const store = database.createObjectStore(
+ 'store', { autoIncrement: true, keyPath: 'id' });
+ store.createIndex('by_name', 'name', { unique: true });
+ store.createIndex('by_id', 'id', { unique: true });
+
+ // Cover writing from the initial upgrade transaction.
+ for (let i = 1; i <= 16; ++i) {
+ if (i % 2 == 0) {
+ store.put({name: nameForId(i), id: i});
+ } else {
+ store.put({name: nameForId(i)});
+ }
+ }
+ });
+
+ // Cover writing from a subsequent transaction.
+ const transaction = database.transaction(['store'], 'readwrite');
+ const store = transaction.objectStore('store');
+ for (let i = 17; i <= 32; ++i) {
+ if (i % 2 == 0) {
+ store.put({name: nameForId(i), id: i});
+ } else {
+ store.put({name: nameForId(i)});
+ }
+ }
+ await promiseForTransaction(testCase, transaction);
+
+ return database;
+}
+
+// Returns the IDs used by the object store, sorted as strings.
+//
+// This is used to determine the correct order of records when retrieved from an
+// index that uses stringified IDs.
+function idsSortedByStringCompare() {
+ const stringIds = [];
+ for (let i = 1; i <= 32; ++i)
+ stringIds.push(i);
+ stringIds.sort((a, b) => indexedDB.cmp(`${a}`, `${b}`));
+ return stringIds;
+}
+
+async function iterateCursor(testCase, cursorRequest, callback) {
+ // This uses requestWatcher() directly instead of using promiseForRequest()
+ // inside the loop to avoid creating multiple EventWatcher instances. In turn,
+ // this avoids ending up with O(N) listeners for the request and O(N^2)
+ // dispatched events.
+ const eventWatcher = requestWatcher(testCase, cursorRequest);
+ while (true) {
+ const event = await eventWatcher.wait_for('success');
+ const cursor = event.target.result;
+ if (cursor === null)
+ return;
+ callback(cursor);
+ cursor.continue();
+ }
+}
+
+// Returns equivalent information to getAllKeys() by iterating a cursor.
+//
+// Returns an array with one dictionary per entry in the source. The dictionary
+// has the properties "key" and "primaryKey".
+async function getAllKeysViaCursor(testCase, cursorSource) {
+ const results = [];
+ await iterateCursor(testCase, cursorSource.openKeyCursor(), cursor => {
+ results.push({ key: cursor.key, primaryKey: cursor.primaryKey });
+ });
+ return results;
+}
+
+// Returns equivalent information to getAll() by iterating a cursor.
+//
+// Returns an array with one dictionary per entry in the source. The dictionary
+// has the properties "key", "primaryKey" and "value".
+async function getAllViaCursor(testCase, cursorSource) {
+ const results = [];
+ await iterateCursor(testCase, cursorSource.openCursor(), cursor => {
+ results.push({
+ key: cursor.key,
+ primaryKey: cursor.primaryKey,
+ value: cursor.value,
+ });
+ });
+ return results;
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/IndexedDB/resources/support-promises.js b/testing/web-platform/tests/IndexedDB/resources/support-promises.js
new file mode 100644
index 0000000000..9128bfe151
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/support-promises.js
@@ -0,0 +1,355 @@
+'use strict';
+
+// Returns an IndexedDB database name that is unique to the test case.
+function databaseName(testCase) {
+ return 'db' + self.location.pathname + '-' + testCase.name;
+}
+
+// EventWatcher covering all the events defined on IndexedDB requests.
+//
+// The events cover IDBRequest and IDBOpenDBRequest.
+function requestWatcher(testCase, request) {
+ return new EventWatcher(testCase, request,
+ ['blocked', 'error', 'success', 'upgradeneeded']);
+}
+
+// EventWatcher covering all the events defined on IndexedDB transactions.
+//
+// The events cover IDBTransaction.
+function transactionWatcher(testCase, request) {
+ return new EventWatcher(testCase, request, ['abort', 'complete', 'error']);
+}
+
+// Promise that resolves with an IDBRequest's result.
+//
+// The promise only resolves if IDBRequest receives the "success" event. Any
+// other event causes the promise to reject with an error. This is correct in
+// most cases, but insufficient for indexedDB.open(), which issues
+// "upgradeneded" events under normal operation.
+function promiseForRequest(testCase, request) {
+ const eventWatcher = requestWatcher(testCase, request);
+ return eventWatcher.wait_for('success').then(event => event.target.result);
+}
+
+// Promise that resolves when an IDBTransaction completes.
+//
+// The promise resolves with undefined if IDBTransaction receives the "complete"
+// event, and rejects with an error for any other event.
+function promiseForTransaction(testCase, request) {
+ const eventWatcher = transactionWatcher(testCase, request);
+ return eventWatcher.wait_for('complete').then(() => {});
+}
+
+// Migrates an IndexedDB database whose name is unique for the test case.
+//
+// newVersion must be greater than the database's current version.
+//
+// migrationCallback will be called during a versionchange transaction and will
+// given the created database, the versionchange transaction, and the database
+// open request.
+//
+// Returns a promise. If the versionchange transaction goes through, the promise
+// resolves to an IndexedDB database that should be closed by the caller. If the
+// versionchange transaction is aborted, the promise resolves to an error.
+function migrateDatabase(testCase, newVersion, migrationCallback) {
+ return migrateNamedDatabase(
+ testCase, databaseName(testCase), newVersion, migrationCallback);
+}
+
+// Migrates an IndexedDB database.
+//
+// newVersion must be greater than the database's current version.
+//
+// migrationCallback will be called during a versionchange transaction and will
+// given the created database, the versionchange transaction, and the database
+// open request.
+//
+// Returns a promise. If the versionchange transaction goes through, the promise
+// resolves to an IndexedDB database that should be closed by the caller. If the
+// versionchange transaction is aborted, the promise resolves to an error.
+function migrateNamedDatabase(
+ testCase, databaseName, newVersion, migrationCallback) {
+ // We cannot use eventWatcher.wait_for('upgradeneeded') here, because
+ // the versionchange transaction auto-commits before the Promise's then
+ // callback gets called.
+ return new Promise((resolve, reject) => {
+ const request = indexedDB.open(databaseName, newVersion);
+ request.onupgradeneeded = testCase.step_func(event => {
+ const database = event.target.result;
+ const transaction = event.target.transaction;
+ let shouldBeAborted = false;
+ let requestEventPromise = null;
+
+ // We wrap IDBTransaction.abort so we can set up the correct event
+ // listeners and expectations if the test chooses to abort the
+ // versionchange transaction.
+ const transactionAbort = transaction.abort.bind(transaction);
+ transaction.abort = () => {
+ transaction._willBeAborted();
+ transactionAbort();
+ }
+ transaction._willBeAborted = () => {
+ requestEventPromise = new Promise((resolve, reject) => {
+ request.onerror = event => {
+ event.preventDefault();
+ resolve(event.target.error);
+ };
+ request.onsuccess = () => reject(new Error(
+ 'indexedDB.open should not succeed for an aborted ' +
+ 'versionchange transaction'));
+ });
+ shouldBeAborted = true;
+ }
+
+ // If migration callback returns a promise, we'll wait for it to resolve.
+ // This simplifies some tests.
+ const callbackResult = migrationCallback(database, transaction, request);
+ if (!shouldBeAborted) {
+ request.onerror = null;
+ request.onsuccess = null;
+ requestEventPromise = promiseForRequest(testCase, request);
+ }
+
+ // requestEventPromise needs to be the last promise in the chain, because
+ // we want the event that it resolves to.
+ resolve(Promise.resolve(callbackResult).then(() => requestEventPromise));
+ });
+ request.onerror = event => reject(event.target.error);
+ request.onsuccess = () => {
+ const database = request.result;
+ testCase.add_cleanup(() => { database.close(); });
+ reject(new Error(
+ 'indexedDB.open should not succeed without creating a ' +
+ 'versionchange transaction'));
+ };
+ }).then(databaseOrError => {
+ if (databaseOrError instanceof IDBDatabase)
+ testCase.add_cleanup(() => { databaseOrError.close(); });
+ return databaseOrError;
+ });
+}
+
+// Creates an IndexedDB database whose name is unique for the test case.
+//
+// setupCallback will be called during a versionchange transaction, and will be
+// given the created database, the versionchange transaction, and the database
+// open request.
+//
+// Returns a promise that resolves to an IndexedDB database. The caller should
+// close the database.
+function createDatabase(testCase, setupCallback) {
+ return createNamedDatabase(testCase, databaseName(testCase), setupCallback);
+}
+
+// Creates an IndexedDB database.
+//
+// setupCallback will be called during a versionchange transaction, and will be
+// given the created database, the versionchange transaction, and the database
+// open request.
+//
+// Returns a promise that resolves to an IndexedDB database. The caller should
+// close the database.
+function createNamedDatabase(testCase, databaseName, setupCallback) {
+ const request = indexedDB.deleteDatabase(databaseName);
+ return promiseForRequest(testCase, request).then(() => {
+ testCase.add_cleanup(() => { indexedDB.deleteDatabase(databaseName); });
+ return migrateNamedDatabase(testCase, databaseName, 1, setupCallback)
+ });
+}
+
+// Opens an IndexedDB database without performing schema changes.
+//
+// The given version number must match the database's current version.
+//
+// Returns a promise that resolves to an IndexedDB database. The caller should
+// close the database.
+function openDatabase(testCase, version) {
+ return openNamedDatabase(testCase, databaseName(testCase), version);
+}
+
+// Opens an IndexedDB database without performing schema changes.
+//
+// The given version number must match the database's current version.
+//
+// Returns a promise that resolves to an IndexedDB database. The caller should
+// close the database.
+function openNamedDatabase(testCase, databaseName, version) {
+ const request = indexedDB.open(databaseName, version);
+ return promiseForRequest(testCase, request).then(database => {
+ testCase.add_cleanup(() => { database.close(); });
+ return database;
+ });
+}
+
+// The data in the 'books' object store records in the first example of the
+// IndexedDB specification.
+const BOOKS_RECORD_DATA = [
+ { title: 'Quarry Memories', author: 'Fred', isbn: 123456 },
+ { title: 'Water Buffaloes', author: 'Fred', isbn: 234567 },
+ { title: 'Bedrock Nights', author: 'Barney', isbn: 345678 },
+];
+
+// Creates a 'books' object store whose contents closely resembles the first
+// example in the IndexedDB specification.
+const createBooksStore = (testCase, database) => {
+ const store = database.createObjectStore('books',
+ { keyPath: 'isbn', autoIncrement: true });
+ store.createIndex('by_author', 'author');
+ store.createIndex('by_title', 'title', { unique: true });
+ for (const record of BOOKS_RECORD_DATA)
+ store.put(record);
+ return store;
+}
+
+// Creates a 'books' object store whose contents closely resembles the first
+// example in the IndexedDB specification, just without autoincrementing.
+const createBooksStoreWithoutAutoIncrement = (testCase, database) => {
+ const store = database.createObjectStore('books',
+ { keyPath: 'isbn' });
+ store.createIndex('by_author', 'author');
+ store.createIndex('by_title', 'title', { unique: true });
+ for (const record of BOOKS_RECORD_DATA)
+ store.put(record);
+ return store;
+}
+
+// Creates a 'not_books' object store used to test renaming into existing or
+// deleted store names.
+function createNotBooksStore(testCase, database) {
+ const store = database.createObjectStore('not_books');
+ store.createIndex('not_by_author', 'author');
+ store.createIndex('not_by_title', 'title', { unique: true });
+ return store;
+}
+
+// Verifies that an object store's indexes match the indexes used to create the
+// books store in the test database's version 1.
+//
+// The errorMessage is used if the assertions fail. It can state that the
+// IndexedDB implementation being tested is incorrect, or that the testing code
+// is using it incorrectly.
+function checkStoreIndexes (testCase, store, errorMessage) {
+ assert_array_equals(
+ store.indexNames, ['by_author', 'by_title'], errorMessage);
+ const authorIndex = store.index('by_author');
+ const titleIndex = store.index('by_title');
+ return Promise.all([
+ checkAuthorIndexContents(testCase, authorIndex, errorMessage),
+ checkTitleIndexContents(testCase, titleIndex, errorMessage),
+ ]);
+}
+
+// Verifies that an object store's key generator is in the same state as the
+// key generator created for the books store in the test database's version 1.
+//
+// The errorMessage is used if the assertions fail. It can state that the
+// IndexedDB implementation being tested is incorrect, or that the testing code
+// is using it incorrectly.
+function checkStoreGenerator(testCase, store, expectedKey, errorMessage) {
+ const request = store.put(
+ { title: 'Bedrock Nights ' + expectedKey, author: 'Barney' });
+ return promiseForRequest(testCase, request).then(result => {
+ assert_equals(result, expectedKey, errorMessage);
+ });
+}
+
+// Verifies that an object store's contents matches the contents used to create
+// the books store in the test database's version 1.
+//
+// The errorMessage is used if the assertions fail. It can state that the
+// IndexedDB implementation being tested is incorrect, or that the testing code
+// is using it incorrectly.
+function checkStoreContents(testCase, store, errorMessage) {
+ const request = store.get(123456);
+ return promiseForRequest(testCase, request).then(result => {
+ assert_equals(result.isbn, BOOKS_RECORD_DATA[0].isbn, errorMessage);
+ assert_equals(result.author, BOOKS_RECORD_DATA[0].author, errorMessage);
+ assert_equals(result.title, BOOKS_RECORD_DATA[0].title, errorMessage);
+ });
+}
+
+// Verifies that index matches the 'by_author' index used to create the
+// by_author books store in the test database's version 1.
+//
+// The errorMessage is used if the assertions fail. It can state that the
+// IndexedDB implementation being tested is incorrect, or that the testing code
+// is using it incorrectly.
+function checkAuthorIndexContents(testCase, index, errorMessage) {
+ const request = index.get(BOOKS_RECORD_DATA[2].author);
+ return promiseForRequest(testCase, request).then(result => {
+ assert_equals(result.isbn, BOOKS_RECORD_DATA[2].isbn, errorMessage);
+ assert_equals(result.title, BOOKS_RECORD_DATA[2].title, errorMessage);
+ });
+}
+
+// Verifies that an index matches the 'by_title' index used to create the books
+// store in the test database's version 1.
+//
+// The errorMessage is used if the assertions fail. It can state that the
+// IndexedDB implementation being tested is incorrect, or that the testing code
+// is using it incorrectly.
+function checkTitleIndexContents(testCase, index, errorMessage) {
+ const request = index.get(BOOKS_RECORD_DATA[2].title);
+ return promiseForRequest(testCase, request).then(result => {
+ assert_equals(result.isbn, BOOKS_RECORD_DATA[2].isbn, errorMessage);
+ assert_equals(result.author, BOOKS_RECORD_DATA[2].author, errorMessage);
+ });
+}
+
+// Returns an Uint8Array with pseudorandom data.
+//
+// The PRNG should be sufficient to defeat compression schemes, but it is not
+// cryptographically strong.
+function largeValue(size, seed) {
+ const buffer = new Uint8Array(size);
+
+ // 32-bit xorshift - the seed can't be zero
+ let state = 1000 + seed;
+
+ for (let i = 0; i < size; ++i) {
+ state ^= state << 13;
+ state ^= state >> 17;
+ state ^= state << 5;
+ buffer[i] = state & 0xff;
+ }
+
+ return buffer;
+}
+
+async function deleteAllDatabases(testCase) {
+ const dbs_to_delete = await indexedDB.databases();
+ for( const db_info of dbs_to_delete) {
+ let request = indexedDB.deleteDatabase(db_info.name);
+ let eventWatcher = requestWatcher(testCase, request);
+ await eventWatcher.wait_for('success');
+ }
+}
+
+// Keeps the passed transaction alive indefinitely (by making requests
+// against the named store). Returns a function that asserts that the
+// transaction has not already completed and then ends the request loop so that
+// the transaction may autocommit and complete.
+function keepAlive(testCase, transaction, storeName) {
+ let completed = false;
+ transaction.addEventListener('complete', () => { completed = true; });
+
+ let keepSpinning = true;
+
+ function spin() {
+ if (!keepSpinning)
+ return;
+ transaction.objectStore(storeName).get(0).onsuccess = spin;
+ }
+ spin();
+
+ return testCase.step_func(() => {
+ assert_false(completed, 'Transaction completed while kept alive');
+ keepSpinning = false;
+ });
+}
+
+// Return a promise that resolves after a setTimeout finishes to break up the
+// scope of a function's execution.
+function timeoutPromise(ms) {
+ return new Promise(resolve => { setTimeout(resolve, ms); });
+}
diff --git a/testing/web-platform/tests/IndexedDB/resources/support.js b/testing/web-platform/tests/IndexedDB/resources/support.js
new file mode 100644
index 0000000000..18ed971ad7
--- /dev/null
+++ b/testing/web-platform/tests/IndexedDB/resources/support.js
@@ -0,0 +1,225 @@
+/* Delete created databases
+ *
+ * Go through each finished test, see if it has an associated database. Close
+ * that and delete the database. */
+add_completion_callback(function(tests)
+{
+ for (var i in tests)
+ {
+ if(tests[i].db)
+ {
+ tests[i].db.close();
+ self.indexedDB.deleteDatabase(tests[i].db.name);
+ }
+ }
+});
+
+function fail(test, desc) {
+ return test.step_func(function(e) {
+ if (e && e.message && e.target.error)
+ assert_unreached(desc + " (" + e.target.error.name + ": " + e.message + ")");
+ else if (e && e.message)
+ assert_unreached(desc + " (" + e.message + ")");
+ else if (e && e.target.readyState === 'done' && e.target.error)
+ assert_unreached(desc + " (" + e.target.error.name + ")");
+ else
+ assert_unreached(desc);
+ });
+}
+
+function createdb(test, dbname, version)
+{
+ var rq_open = createdb_for_multiple_tests(dbname, version);
+ return rq_open.setTest(test);
+}
+
+function createdb_for_multiple_tests(dbname, version) {
+ var rq_open,
+ fake_open = {},
+ test = null,
+ dbname = (dbname ? dbname : "testdb-" + new Date().getTime() + Math.random() );
+
+ if (version)
+ rq_open = self.indexedDB.open(dbname, version);
+ else
+ rq_open = self.indexedDB.open(dbname);
+
+ function auto_fail(evt, current_test) {
+ /* Fail handlers, if we haven't set on/whatever/, don't
+ * expect to get event whatever. */
+ rq_open.manually_handled = {};
+
+ rq_open.addEventListener(evt, function(e) {
+ if (current_test !== test) {
+ return;
+ }
+
+ test.step(function() {
+ if (!rq_open.manually_handled[evt]) {
+ assert_unreached("unexpected open." + evt + " event");
+ }
+
+ if (e.target.result + '' == '[object IDBDatabase]' &&
+ !this.db) {
+ this.db = e.target.result;
+
+ this.db.onerror = fail(test, 'unexpected db.error');
+ this.db.onabort = fail(test, 'unexpected db.abort');
+ this.db.onversionchange =
+ fail(test, 'unexpected db.versionchange');
+ }
+ });
+ });
+ rq_open.__defineSetter__("on" + evt, function(h) {
+ rq_open.manually_handled[evt] = true;
+ if (!h)
+ rq_open.addEventListener(evt, function() {});
+ else
+ rq_open.addEventListener(evt, test.step_func(h));
+ });
+ }
+
+ // add a .setTest method to the IDBOpenDBRequest object
+ Object.defineProperty(rq_open, 'setTest', {
+ enumerable: false,
+ value: function(t) {
+ test = t;
+
+ auto_fail("upgradeneeded", test);
+ auto_fail("success", test);
+ auto_fail("blocked", test);
+ auto_fail("error", test);
+
+ return this;
+ }
+ });
+
+ return rq_open;
+}
+
+function assert_key_equals(actual, expected, description) {
+ assert_equals(indexedDB.cmp(actual, expected), 0, description);
+}
+
+// Usage:
+// indexeddb_test(
+// (test_object, db_connection, upgrade_tx, open_request) => {
+// // Database creation logic.
+// },
+// (test_object, db_connection, open_request) => {
+// // Test logic.
+// test_object.done();
+// },
+// 'Test case description');
+function indexeddb_test(upgrade_func, open_func, description, options) {
+ async_test(function(t) {
+ options = Object.assign({upgrade_will_abort: false}, options);
+ var dbname = location + '-' + t.name;
+ var del = indexedDB.deleteDatabase(dbname);
+ del.onerror = t.unreached_func('deleteDatabase should succeed');
+ var open = indexedDB.open(dbname, 1);
+ open.onupgradeneeded = t.step_func(function() {
+ var db = open.result;
+ t.add_cleanup(function() {
+ // If open didn't succeed already, ignore the error.
+ open.onerror = function(e) {
+ e.preventDefault();
+ };
+ db.close();
+ indexedDB.deleteDatabase(db.name);
+ });
+ var tx = open.transaction;
+ upgrade_func(t, db, tx, open);
+ });
+ if (options.upgrade_will_abort) {
+ open.onsuccess = t.unreached_func('open should not succeed');
+ } else {
+ open.onerror = t.unreached_func('open should succeed');
+ open.onsuccess = t.step_func(function() {
+ var db = open.result;
+ if (open_func)
+ open_func(t, db, open);
+ });
+ }
+ }, description);
+}
+
+// Call with a Test and an array of expected results in order. Returns
+// a function; call the function when a result arrives and when the
+// expected number appear the order will be asserted and test
+// completed.
+function expect(t, expected) {
+ var results = [];
+ return result => {
+ results.push(result);
+ if (results.length === expected.length) {
+ assert_array_equals(results, expected);
+ t.done();
+ }
+ };
+}
+
+// Checks to see if the passed transaction is active (by making
+// requests against the named store).
+function is_transaction_active(tx, store_name) {
+ try {
+ const request = tx.objectStore(store_name).get(0);
+ request.onerror = e => {
+ e.preventDefault();
+ e.stopPropagation();
+ };
+ return true;
+ } catch (ex) {
+ assert_equals(ex.name, 'TransactionInactiveError',
+ 'Active check should either not throw anything, or throw ' +
+ 'TransactionInactiveError');
+ return false;
+ }
+}
+
+// Keeps the passed transaction alive indefinitely (by making requests
+// against the named store). Returns a function that asserts that the
+// transaction has not already completed and then ends the request loop so that
+// the transaction may autocommit and complete.
+function keep_alive(tx, store_name) {
+ let completed = false;
+ tx.addEventListener('complete', () => { completed = true; });
+
+ let keepSpinning = true;
+
+ function spin() {
+ if (!keepSpinning)
+ return;
+ tx.objectStore(store_name).get(0).onsuccess = spin;
+ }
+ spin();
+
+ return () => {
+ assert_false(completed, 'Transaction completed while kept alive');
+ keepSpinning = false;
+ };
+}
+
+// Returns a new function. After it is called |count| times, |func|
+// will be called.
+function barrier_func(count, func) {
+ let n = 0;
+ return () => {
+ if (++n === count)
+ func();
+ };
+}
+
+// Create an IndexedDB by executing script on the given remote context
+// with |dbName| and |version|.
+async function createIndexedDBForTesting(rc, dbName, version) {
+ await rc.executeScript((dbName, version) => {
+ let request = indexedDB.open(dbName, version);
+ request.onupgradeneeded = () => {
+ request.result.createObjectStore('store');
+ }
+ request.onversionchange = () => {
+ fail(t, 'unexpectedly received versionchange event.');
+ }
+ }, [dbName, version]);
+}