summaryrefslogtreecommitdiffstats
path: root/comm/mailnews/db/gloda/test
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--comm/mailnews/db/gloda/test/moz.build12
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_gloda_content.js226
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_junk.js217
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_messages.js1461
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_query_messages.js729
-rw-r--r--comm/mailnews/db/gloda/test/unit/head_gloda.js19
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm431
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm847
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm293
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_corrupt_database.js86
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_folder_logic.js60
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js299
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js34
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js31
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_addressbook.js139
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js210
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_compaction.js395
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js49
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js42
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_local.js133
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js265
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_intl.js355
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_migration.js151
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js445
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_emitter.js746
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_msg_search.js155
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js144
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration.js62
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js12
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_core.js658
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js37
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js40
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js894
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_startup_offline.js53
-rw-r--r--comm/mailnews/db/gloda/test/unit/xpcshell.ini38
41 files changed, 9982 insertions, 0 deletions
diff --git a/comm/mailnews/db/gloda/test/moz.build b/comm/mailnews/db/gloda/test/moz.build
new file mode 100644
index 0000000000..c16fdd2b6c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/moz.build
@@ -0,0 +1,12 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPCSHELL_TESTS_MANIFESTS += ["unit/xpcshell.ini"]
+
+TESTING_JS_MODULES.gloda += [
+ "unit/resources/GlodaQueryHelper.jsm",
+ "unit/resources/GlodaTestHelper.jsm",
+ "unit/resources/GlodaTestHelperFunctions.jsm",
+]
diff --git a/comm/mailnews/db/gloda/test/unit/base_gloda_content.js b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
new file mode 100644
index 0000000000..d106015b48
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
@@ -0,0 +1,226 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent. This may also be implicitly tested by indexing
+ * and fulltext query tests (on messages), but the buck stops here for the
+ * content stuff.
+ *
+ * Currently, we just test quoting removal and that the content turns out right.
+ * We do not actually verify that the quoted blocks are correct (aka we might
+ * screw up eating the greater-than signs). (We have no known consumers who
+ * care about the quoted blocks.)
+ */
+
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+// We need to be able to get at GlodaFundAttr to check the number of whittler
+// invocations.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* ===== Data ===== */
+var messageInfos = [
+ {
+ name: "no quoting",
+ bode: [
+ [true, "I like hats"],
+ [true, "yes I do!"],
+ [true, "I like hats!"],
+ [true, "How bout you?"],
+ ],
+ },
+ {
+ name: "no quoting, whitespace removal",
+ bode: [
+ [true, "robots are nice..."],
+ [true, ""],
+ [true, "except for the bloodlust"],
+ ],
+ },
+ {
+ name: "bottom posting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, ">"], // This quoted blank line is significant! no lose!
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "I do enjoy them as well."],
+ [true, ""],
+ [true, "Bob"],
+ ],
+ },
+ {
+ name: "top posting",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ ],
+ },
+ {
+ name: "top posting with trailing whitespace, no intro",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ [false, ""],
+ [false, ""],
+ ],
+ },
+ {
+ name: "interspersed quoting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [true, "I concur with this point."],
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "this point also resonates with me."],
+ [false, ""],
+ [false, "> I like hats!"],
+ [false, "> How bout you?"],
+ [false, ""],
+ [true, "Verily!"],
+ ],
+ },
+ {
+ name: "german style",
+ bode: [
+ [false, "Mark Banner <bugzilla@standard8.plus.invalid> wrote:"],
+ [false, "\xa0"],
+ [
+ false,
+ "> We haven't nailed anything down in detail yet, depending on how we are ",
+ ],
+ [
+ true,
+ "That sounds great and would definitely be appreciated by localizers.",
+ ],
+ [false, ""],
+ ],
+ },
+ {
+ name: "tortuous interference",
+ bode: [
+ [false, "> wrote"],
+ [true, "running all the time"],
+ [false, "> wrote"],
+ [true, "cheese"],
+ [false, ""],
+ ],
+ },
+];
+
+function setup_create_message(info) {
+ info.body = { body: info.bode.map(tupe => tupe[1]).join("\r\n") };
+ info.expected = info.bode
+ .filter(tupe => tupe[0])
+ .map(tupe => tupe[1])
+ .join("\n");
+
+ info._synMsg = msgGen.makeMessage(info);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ // Let's not assume an ordering.
+ for (let iMsg = 0; iMsg < messageInfos.length; iMsg++) {
+ if (messageInfos[iMsg]._synMsg == aSynthMessage) {
+ messageInfos[iMsg]._glodaMsg = aGlodaMessage;
+ }
+ }
+}
+
+/**
+ * Actually inject all the messages we created above.
+ */
+async function setup_inject_messages() {
+ // Create the messages from messageInfo.
+ messageInfos.forEach(info => {
+ setup_create_message(info);
+ });
+ let msgSet = new SyntheticMessageSet(messageInfos.map(info => info._synMsg));
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], { verifier: glodaInfoStasher })
+ );
+}
+
+function test_stream_message(info) {
+ // Currying the function for simpler usage with `base_gloda_content_tests`.
+ return () => {
+ let msgHdr = info._glodaMsg.folderMessage;
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_message_content(
+ info,
+ info._synMsg,
+ info._glodaMsg,
+ aMsgHdr,
+ aMimeMsg
+ );
+ });
+ };
+}
+
+// Instrument GlodaFundAttr so we can check the count.
+var originalWhittler = GlodaFundAttr.contentWhittle;
+var whittleCount = 0;
+GlodaFundAttr.contentWhittle = function (...aArgs) {
+ whittleCount++;
+ return originalWhittler.apply(this, aArgs);
+};
+
+function verify_message_content(aInfo, aSynMsg, aGlodaMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ throw new Error(
+ "Message streaming should work; check test_mime_emitter.js first"
+ );
+ }
+
+ whittleCount = 0;
+ let content = Gloda.getMessageContent(aGlodaMsg, aMimeMsg);
+ if (whittleCount != 1) {
+ throw new Error("Whittle count is " + whittleCount + " but should be 1!");
+ }
+
+ Assert.equal(content.getContentString(), aInfo.expected, "Message streamed");
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_gloda_content_tests = [
+ test_sanity_test_environment,
+ setup_inject_messages,
+ ...messageInfos.map(e => {
+ return test_stream_message(e);
+ }),
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_junk.js b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
new file mode 100644
index 0000000000..8529f24a56
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
@@ -0,0 +1,217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing in the face of junk classification and junk folders. It is
+ * gloda policy not to index junk mail.
+ *
+ * A similar test that moving things to the trash folder is deletion happens in
+ * base_index_messages.js.
+ */
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var messageInjection;
+
+const SPAM_BODY = { body: "superspam superspam superspam eevil eevil eevil" };
+const HAM_BODY = { body: "ham ham ham nice nice nice happy happy happy" };
+
+/**
+ * Make SPAM_BODY be known as spammy and HAM_BODY be known as hammy.
+ */
+async function setup_spam_filter() {
+ let [, spamSet, hamSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ { count: 1, body: HAM_BODY },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([spamSet, hamSet], []));
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ let junkListener = {
+ onMessageClassified() {
+ promiseResolve();
+ },
+ };
+
+ // Ham.
+ dump(`Marking message: ${hamSet.getMsgHdr(0)} as ham.`);
+ MailServices.junk.setMessageClassification(
+ hamSet.getMsgURI(0),
+ null, // no old classification
+ MailServices.junk.GOOD,
+ null,
+ junkListener
+ );
+ await promise;
+
+ // Reset promise for junkListener.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Spam.
+ dump(`Marking message: ${spamSet.getMsgHdr(0)} as spam.`);
+ MailServices.junk.setMessageClassification(
+ spamSet.getMsgURI(0),
+ null, // No old classification.
+ MailServices.junk.JUNK,
+ null,
+ junkListener
+ );
+ await promise;
+}
+
+/**
+ * Because gloda defers indexing until after junk, we should never index a
+ * message that gets marked as junk. So if we inject a message that will
+ * definitely be marked as junk (thanks to use of terms that guarantee it),
+ * the indexer should never index it.
+ *
+ * ONLY THIS TEST ACTUALLY RELIES ON THE BAYESIAN CLASSIFIER.
+ */
+async function test_never_indexes_a_message_marked_as_junk() {
+ // Event-driven does not index junk.
+
+ // Make a message that will be marked as junk from the get-go.
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ ]);
+ // Since the message is junk, gloda should not index it!
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Folder sweep does not index junk.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Reset the training data so the bayesian classifier stops doing things.
+ */
+function reset_spam_filter() {
+ MailServices.junk.resetTrainingData();
+}
+
+/**
+ * Marking a message as junk is equivalent to deleting the message, un-mark it
+ * and it should go back to being a happy message (with the same gloda-id!).
+ *
+ * THIS TEST DOES NOT RELY ON THE BAYESIAN CLASSIFIER.
+ */
+
+async function test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure() {
+ // Mark as junk is deletion.
+ // Create a message; it should get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let glodaId = msgSet.glodaMessages[0].id;
+ // Mark it as junk.
+ msgSet.setJunk(true);
+ // It will appear deleted after the event.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+ // Mark as non-junk gets indexed.
+ msgSet.setJunk(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ // We should have reused the existing gloda message so it should keep the id.
+ Assert.equal(glodaId, msgSet.glodaMessages[0].id);
+}
+
+/**
+ * Moving a message to the junk folder is equivalent to deletion. Gloda does
+ * not index junk folders at all, which is why this is an important and
+ * independent determination from marking a message directly as junk.
+ *
+ * The move to the junk folder is performed without using any explicit junk
+ * support code. This ends up being effectively the same underlying logic test
+ * as base_index_messages' test of moving a message to the trash folder.
+ */
+async function test_message_moving_to_junk_folder_is_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the junk folder.
+ await messageInjection.moveMessages(
+ msgSet,
+ await messageInjection.getJunkFolder()
+ );
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // We do not index the junk folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // (Well, the conversation will die, but we can't see that.)
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+ Assert.ok(messageInjection.messageGenerator, "Sanity that msgGen is set.");
+}
+
+/* exported tests */
+var base_index_junk_tests = [
+ test_sanity_test_environment,
+ setup_spam_filter,
+ test_never_indexes_a_message_marked_as_junk,
+ reset_spam_filter,
+ test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure,
+ test_message_moving_to_junk_folder_is_deletion,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_messages.js b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
new file mode 100644
index 0000000000..bea2337d7f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
@@ -0,0 +1,1461 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our indexing prowess. This includes both our ability to
+ * properly be triggered by events taking place in thunderbird as well as our
+ * ability to correctly extract/index the right data.
+ * In general, if these tests pass, things are probably working quite well.
+ *
+ * This test has local, IMAP online, IMAP offline, and IMAP online-become-offline
+ * variants. See the text_index_messages_*.js files.
+ *
+ * Things we don't test that you think we might test:
+ * - Full-text search. Happens in query testing.
+ */
+
+var { MailUtils } = ChromeUtils.import("resource:///modules/MailUtils.jsm");
+var { NetUtil } = ChromeUtils.import("resource://gre/modules/NetUtil.jsm");
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var {
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+ resumeFromSimulatedHang,
+ permuteMessages,
+ makeABCardForAddressPair,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { SyntheticMessageSet, SyntheticPartMultiMixed, SyntheticPartLeaf } =
+ ChromeUtils.import("resource://testing-common/mailnews/MessageGenerator.jsm");
+var { TagNoun } = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+
+// Whether we can expect fulltext results
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+var messageInjection;
+var msgGen;
+var scenarios;
+
+/* ===== Indexing Basics ===== */
+
+/**
+ * Index a message, wait for a commit, make sure the header gets the property
+ * set correctly. Then modify the message, verify the dirty property shows
+ * up, flush again, and make sure the dirty property goes clean again.
+ */
+async function test_pending_commit_tracker_flushes_correctly() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Before the flush, there should be no gloda-id property.
+ let msgHdr = msgSet.getMsgHdr(0);
+ // Get it as a string to make sure it's empty rather than possessing a value.
+ Assert.equal(msgHdr.getStringProperty("gloda-id"), "");
+
+ await waitForGlodaDBFlush();
+
+ // After the flush there should be a gloda-id property and it should
+ // equal the gloda id.
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+
+ // Make sure no dirty property was written.
+ Assert.equal(msgHdr.getStringProperty("gloda-dirty"), "");
+
+ // Modify the message.
+ msgSet.setRead(true);
+ await waitForGlodaIndexer(msgSet);
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Now there should be a dirty property and it should be 1.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageDirty
+ );
+
+ // Flush.
+ await waitForGlodaDBFlush();
+
+ // Now dirty should be 0 and the gloda id should still be the same.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageClean
+ );
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+}
+
+/**
+ * Make sure that PendingCommitTracker causes a msgdb commit to occur so that
+ * if the nsIMsgFolder's msgDatabase attribute has already been nulled
+ * (which is normally how we force a msgdb commit), that the changes to the
+ * header actually hit the disk.
+ */
+async function test_pending_commit_causes_msgdb_commit() {
+ // New message, index it.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Force the msgDatabase closed; the sqlite commit will not yet have occurred.
+ messageInjection.getRealInjectionFolder(folder).msgDatabase = null;
+ // Make the commit happen, this causes the header to get set.
+ await waitForGlodaDBFlush();
+
+ // Force a GC. This will kill off the header and the database, losing data
+ // if we are not protecting it.
+ Cu.forceGC();
+
+ // Now retrieve the header and make sure it has the gloda id set!
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-id"),
+ msgSet.glodaMessages[0].id
+ );
+}
+
+/**
+ * Give the indexing sweep a workout.
+ *
+ * This includes:
+ * - Basic indexing sweep across never-before-indexed folders.
+ * - Indexing sweep across folders with just some changes.
+ * - Filthy pass.
+ */
+async function test_indexing_sweep() {
+ // -- Never-before-indexed folders.
+ // Turn off event-driven indexing.
+ configureGlodaIndexing({ event: false });
+
+ let [[folderA], setA1, setA2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+ let [, setB1, setB2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ { count: 2 },
+ ]);
+ let [[folderC], setC1, setC2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+
+ // Make sure that event-driven job gets nuked out of existence
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Turn on event-driven indexing again; this will trigger a sweep.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setA1, setA2, setB1, setB2, setC1, setC2])
+ );
+
+ // -- Folders with some changes, pending commits.
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA1.setRead(true);
+ setB2.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setB2]));
+
+ // -- Folders with some changes, no pending commits.
+ // Force a commit to clear out our pending commits.
+ await waitForGlodaDBFlush();
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA2.setRead(true);
+ setB1.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA2, setB1]));
+
+ // -- Filthy foldering indexing.
+ // Just mark the folder filthy and make sure that we reindex everyone.
+ // IMPORTANT! The trick of marking the folder filthy only works because
+ // we flushed/committed the database above; the PendingCommitTracker
+ // is not aware of bogus filthy-marking of folders.
+ // We leave the verification of the implementation details to
+ // test_index_sweep_folder.js.
+ let glodaFolderC = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folderC)
+ );
+ // Marked gloda folder dirty.
+ glodaFolderC._dirtyStatus = glodaFolderC.kFolderFilthy;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setC1, setC2]));
+
+ // -- Forced folder indexing.
+ var callbackInvoked = false;
+ GlodaMsgIndexer.indexFolder(
+ messageInjection.getRealInjectionFolder(folderA),
+ {
+ force: true,
+ callback() {
+ callbackInvoked = true;
+ },
+ }
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setA2]));
+ Assert.ok(callbackInvoked);
+}
+
+/**
+ * We used to screw up and downgrade filthy folders to dirty if we saw an event
+ * happen in the folder before we got to the folder; this tests that we no
+ * longer do that.
+ */
+async function test_event_driven_indexing_does_not_mess_with_filthy_folders() {
+ // Add a folder with a message.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Fake marking the folder filthy.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Generate an event in the folder.
+ msgSet.setRead(true);
+ // Make sure the indexer did not do anything and the folder is still filthy.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ Assert.equal(glodaFolder._dirtyStatus, glodaFolder.kFolderFilthy);
+ // Also, the message should not have actually gotten marked dirty.
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-dirty"), 0);
+
+ // Let's make the message un-read again for consistency with the gloda state.
+ msgSet.setRead(false);
+ // Make the folder dirty and let an indexing sweep take care of this so we
+ // don't get extra events in subsequent tests.
+ glodaFolder._dirtyStatus = glodaFolder.kFolderDirty;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // The message won't get indexed though.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_indexing_never_priority() {
+ // Add a folder with a bunch of messages.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index it, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Verify that the setter and getter do the right thing.
+ Assert.equal(
+ glodaFolder.indexingPriority,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder.id,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([folder], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_setting_indexing_priority_never_while_indexing() {
+ if (!messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForIndexingHang();
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed. Also, make sure a cleanup
+ // was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { cleanedUp: 1 }));
+}
+
+/* ===== Threading / Conversation Grouping ===== */
+
+var gSynMessages = [];
+function allMessageInSameConversation(aSynthMessage, aGlodaMessage, aConvID) {
+ if (aConvID === undefined) {
+ return aGlodaMessage.conversationID;
+ }
+ Assert.equal(aConvID, aGlodaMessage.conversationID);
+ // Cheat and stash the synthetic message (we need them for one of the IMAP
+ // tests).
+ gSynMessages.push(aSynthMessage);
+ return aConvID;
+}
+
+/**
+ * Test our conversation/threading logic in the straight-forward direct
+ * reply case, the missing intermediary case, and the siblings with missing
+ * parent case. We also test all permutations of receipt of those messages.
+ * (Also tests that we index new messages.)
+ */
+async function test_threading_direct_reply() {
+ let permutationMessages = await permuteMessages(
+ scenarios.directReply,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+async function test_threading_missing_intermediary() {
+ let permutationMessages = await permuteMessages(
+ scenarios.missingIntermediary,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+async function test_threading_siblings_missing_parent() {
+ let permutationMessages = await permuteMessages(
+ scenarios.siblingsMissingParent,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+/**
+ * Test the bit that says "if we're fulltext-indexing the message and we
+ * discover it didn't have any attachments, clear the attachment bit from the
+ * message header".
+ */
+async function test_attachment_flag() {
+ // Create a synthetic message with an attachment that won't normally be listed
+ // in the attachment pane (Content-Disposition: inline, no filename, and
+ // displayable inline).
+ let smsg = msgGen.makeMessage({
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ let msgSet = new SyntheticMessageSet([smsg]);
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attachment_flag,
+ })
+ );
+}
+
+function verify_attachment_flag(smsg, gmsg) {
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentNames.length, 0);
+ Assert.equal(gmsg.attachmentInfos.length, 0);
+ Assert.equal(
+ false,
+ gmsg.folderMessage.flags & Ci.nsMsgMessageFlags.Attachment
+ );
+ }
+}
+/* ===== Fundamental Attributes (per GlodaFundAttr.jsm) ===== */
+
+/**
+ * Save the synthetic message created in test_attributes_fundamental for the
+ * benefit of test_attributes_fundamental_from_disk.
+ */
+var fundamentalSyntheticMessage;
+var fundamentalFolderHandle;
+/**
+ * We're saving this one so that we can move the message later and verify that
+ * the attributes are consistent.
+ */
+var fundamentalMsgSet;
+var fundamentalGlodaMsgAttachmentUrls;
+/**
+ * Save the resulting gloda message id corresponding to the
+ * fundamentalSyntheticMessage so we can use it to query the message from disk.
+ */
+var fundamentalGlodaMessageId;
+
+/**
+ * Test that we extract the 'fundamental attributes' of a message properly
+ * 'Fundamental' in this case is talking about the attributes defined/extracted
+ * by gloda's GlodaFundAttr.jsm and perhaps the core message indexing logic itself
+ * (which show up as kSpecial* attributes in GlodaFundAttr.jsm anyways.)
+ */
+async function test_attributes_fundamental() {
+ // Create a synthetic message with attachment.
+ let smsg = msgGen.makeMessage({
+ name: "test message",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartLeaf({ body: "I like cheese!" }),
+ msgGen.makeMessage({ body: { body: "I like wine!" } }), // That's one attachment.
+ ]),
+ attachments: [
+ { filename: "bob.txt", body: "I like bread!" }, // And that's another one.
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ fundamentalSyntheticMessage = smsg;
+ let msgSet = new SyntheticMessageSet([smsg]);
+ fundamentalMsgSet = msgSet;
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attributes_fundamental,
+ })
+ );
+}
+
+function verify_attributes_fundamental(smsg, gmsg) {
+ // Save off the message id for test_attributes_fundamental_from_disk.
+ fundamentalGlodaMessageId = gmsg.id;
+ if (gmsg.attachmentInfos) {
+ fundamentalGlodaMsgAttachmentUrls = gmsg.attachmentInfos.map(
+ att => att.url
+ );
+ } else {
+ fundamentalGlodaMsgAttachmentUrls = [];
+ }
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(fundamentalFolderHandle).URI
+ );
+
+ // -- Subject
+ Assert.equal(smsg.subject, gmsg.conversation.subject);
+ Assert.equal(smsg.subject, gmsg.subject);
+
+ // -- Contact/identity information.
+ // - From
+ // Check the e-mail address.
+ Assert.equal(gmsg.from.kind, "email");
+ Assert.equal(smsg.fromAddress, gmsg.from.value);
+ // Check the name.
+ Assert.equal(smsg.fromName, gmsg.from.contact.name);
+
+ // - To
+ Assert.equal(smsg.toAddress, gmsg.to[0].value);
+ Assert.equal(smsg.toName, gmsg.to[0].contact.name);
+
+ // Date
+ Assert.equal(smsg.date.valueOf(), gmsg.date.valueOf());
+
+ // -- Message ID
+ Assert.equal(smsg.messageId, gmsg.headerMessageID);
+
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentTypes.length, 1);
+ Assert.equal(gmsg.attachmentTypes[0], "text/plain");
+ Assert.equal(gmsg.attachmentNames.length, 1);
+ Assert.equal(gmsg.attachmentNames[0], "bob.txt");
+
+ let expectedInfos = [
+ // The name for that one is generated randomly.
+ { contentType: "message/rfc822" },
+ { name: "bob.txt", contentType: "text/plain" },
+ ];
+ let expectedSize = 14;
+ Assert.equal(gmsg.attachmentInfos.length, 2);
+ for (let [i, attInfos] of gmsg.attachmentInfos.entries()) {
+ for (let k in expectedInfos[i]) {
+ Assert.equal(attInfos[k], expectedInfos[i][k]);
+ }
+ // Because it's unreliable and depends on the platform.
+ Assert.ok(Math.abs(attInfos.size - expectedSize) <= 2);
+ // Check that the attachment URLs are correct.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal: Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+
+ try {
+ // Will throw if the URL is invalid.
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ do_throw(new Error("Invalid attachment URL"));
+ }
+ }
+ } else {
+ // Make sure we don't actually get attachments!
+ Assert.equal(gmsg.attachmentTypes, null);
+ Assert.equal(gmsg.attachmentNames, null);
+ }
+}
+
+/**
+ * We now move the message into another folder, wait for it to be indexed,
+ * and make sure the magic url getter for GlodaAttachment returns a proper
+ * URL.
+ */
+async function test_moved_message_attributes() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ // Don't ask me why, let destFolder = MessageInjection.make_empty_folder would result in a
+ // random error when running test_index_messages_imap_offline.js ...
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+ fundamentalFolderHandle = destFolder;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([ignoreSet]));
+
+ // This is a fast move (third parameter set to true).
+ await messageInjection.moveMessages(fundamentalMsgSet, destFolder, true);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([fundamentalMsgSet], {
+ verifier(newSynMsg, newGlodaMsg) {
+ // Verify we still have the same number of attachments.
+ Assert.equal(
+ fundamentalGlodaMsgAttachmentUrls.length,
+ newGlodaMsg.attachmentInfos.length
+ );
+ for (let [i, attInfos] of newGlodaMsg.attachmentInfos.entries()) {
+ // Verify the url has changed.
+ Assert.notEqual(fundamentalGlodaMsgAttachmentUrls[i], attInfos.url);
+ // And verify that the new url is still valid.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal:
+ Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+ try {
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ new Error("Invalid attachment URL");
+ }
+ }
+ },
+ fullyIndexed: 0,
+ })
+ );
+}
+
+/**
+ * We want to make sure that all of the fundamental properties also are there
+ * when we load them from disk. Nuke our cache, query the message back up.
+ * We previously used getMessagesByMessageID to get the message back, but he
+ * does not perform a full load-out like a query does, so we need to use our
+ * query mechanism for this.
+ */
+async function test_attributes_fundamental_from_disk() {
+ nukeGlodaCachesAndCollections();
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE).id(
+ fundamentalGlodaMessageId
+ );
+ await queryExpect(
+ query,
+ [fundamentalSyntheticMessage],
+ verify_attributes_fundamental_from_disk,
+ function (smsg) {
+ return smsg.messageId;
+ }
+ );
+}
+
+/**
+ * We are just a wrapper around verify_attributes_fundamental, adapting the
+ * return callback from getMessagesByMessageID.
+ *
+ * @param aGlodaMessageLists This should be [[theGlodaMessage]].
+ */
+function verify_attributes_fundamental_from_disk(aGlodaMessage) {
+ // Teturn the message id for test_attributes_fundamental_from_disk's benefit.
+ verify_attributes_fundamental(fundamentalSyntheticMessage, aGlodaMessage);
+ return aGlodaMessage.headerMessageID;
+}
+
+/* ===== Explicit Attributes (per GlodaExplicitAttr.jsm) ===== */
+
+/**
+ * Test the attributes defined by GlodaExplicitAttr.jsm.
+ */
+async function test_attributes_explicit() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ // -- Tags
+ // Note that the tag service does not guarantee stable nsIMsgTag references,
+ // nor does noun_tag go too far out of its way to provide stability.
+ // However, it is stable as long as we don't spook it by bringing new tags
+ // into the equation.
+ let tagOne = TagNoun.getTag("$label1");
+ let tagTwo = TagNoun.getTag("$label2");
+
+ msgSet.addTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+
+ msgSet.addTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.equal(gmsg.tags.indexOf(tagTwo), -1);
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Test non-query-able attributes
+ */
+async function test_attributes_cant_query() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ let readDbAttr = Gloda.getAttrDef(GlodaConstants.BUILT_IN, "read");
+ let readId = readDbAttr.id;
+
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messageAttributes WHERE attributeID = ?1",
+ readId
+ );
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Have the participants be in our addressbook prior to indexing so that we can
+ * verify that the hand-off to the addressbook indexer does not cause breakage.
+ */
+async function test_people_in_addressbook() {
+ var senderPair = msgGen.makeNameAndAddress(),
+ recipPair = msgGen.makeNameAndAddress();
+
+ // - Add both people to the address book.
+ makeABCardForAddressPair(senderPair);
+ makeABCardForAddressPair(recipPair);
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, to: [recipPair], from: senderPair },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0],
+ senderIdentity = gmsg.from,
+ recipIdentity = gmsg.to[0];
+
+ Assert.notEqual(senderIdentity.contact, null);
+ Assert.ok(senderIdentity.inAddressBook);
+
+ Assert.notEqual(recipIdentity.contact, null);
+ Assert.ok(recipIdentity.inAddressBook);
+}
+
+/* ===== Fulltexts Indexing ===== */
+
+/**
+ * Make sure that we are using the saneBodySize flag. This is basically the
+ * test_sane_bodies test from test_mime_emitter but we pull the indexedBodyText
+ * off the message to check and also make sure that the text contents slice
+ * off the end rather than the beginning.
+ */
+async function test_streamed_bodies_are_size_capped() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ let hugeString =
+ "qqqqxxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx \r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ let bodyString = "aabb" + hugeString + "xxyy";
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: bodyString, contentType: "text/plain" },
+ });
+ let msgSet = new SyntheticMessageSet([synMsg]);
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.ok(gmsg.indexedBodyText.startsWith("aabb"));
+ Assert.ok(!gmsg.indexedBodyText.includes("xxyy"));
+
+ if (gmsg.indexedBodyText.length > 20 * 1024 + 58 + 10) {
+ do_throw(
+ "Indexed body text is too big! (" + gmsg.indexedBodyText.length + ")"
+ );
+ }
+}
+
+/* ===== Message Deletion ===== */
+/**
+ * Test actually deleting a message on a per-message basis (not just nuking the
+ * folder like emptying the trash does.)
+ *
+ * Logic situations:
+ * - Non-last message in a conversation, twin.
+ * - Non-last message in a conversation, not a twin.
+ * - Last message in a conversation
+ */
+async function test_message_deletion() {
+ // Non-last message in conv, twin.
+ // Create and index two messages in a conversation.
+ let [, convSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([convSet], { augment: true }));
+
+ // Twin the first message in a different folder owing to our reliance on
+ // message-id's in the SyntheticMessageSet logic. (This is also why we broke
+ // up the indexing waits too.)
+ let twinFolder = await messageInjection.makeEmptyFolder();
+ let twinSet = new SyntheticMessageSet([convSet.synMessages[0]]);
+ await messageInjection.addSetsToFolders([twinFolder], [twinSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([twinSet], { augment: true }));
+
+ // Split the conv set into two helper sets.
+ let firstSet = convSet.slice(0, 1); // The twinned first message in the thread.
+ let secondSet = convSet.slice(1, 2); // The un-twinned second thread message.
+
+ // Make sure we can find the message (paranoia).
+ let firstQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ firstQuery.id(firstSet.glodaMessages[0].id);
+ let firstColl = await queryExpect(firstQuery, firstSet);
+
+ // Delete it (not trash! delete!).
+ await MessageInjection.deleteMessages(firstSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [firstSet] }));
+ // And our collection from that query should now be empty.
+ Assert.equal(firstColl.items.length, 0);
+
+ // Make sure it no longer shows up in a standard query.
+ firstColl = await queryExpect(firstQuery, []);
+
+ // Make sure it shows up in a privileged query.
+ let privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ let firstGlodaId = firstSet.glodaMessages[0].id;
+ privQuery.id(firstGlodaId);
+ await queryExpect(privQuery, firstSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure it no longer shows up in a privileged query; since it has a twin
+ // we don't need to leave it as a ghost.
+ await queryExpect(privQuery, []);
+
+ // Make sure that the messagesText entry got blown away.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ firstGlodaId
+ );
+
+ // Make sure the conversation still exists.
+ let conv = twinSet.glodaMessages[0].conversation;
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(conv.id);
+ let convColl = await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, no longer a twin => ghost.
+
+ // Make sure nuking the twin didn't somehow kill them both.
+ let twinQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // Let's search on the message-id now that there is no ambiguity.
+ twinQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ let twinColl = await queryExpect(twinQuery, twinSet);
+
+ // Delete the twin.
+ await MessageInjection.deleteMessages(twinSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [twinSet] }));
+ // It should disappear from the collection.
+ Assert.equal(twinColl.items.length, 0);
+
+ // No longer show up in the standard query.
+ twinColl = await queryExpect(twinQuery, []);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ await queryExpect(privQuery, twinSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The message should be marked as a ghost now that the deletion pass.
+ // Ghosts have no fulltext rows, so check for that.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ twinSet.glodaMessages[0].id
+ );
+
+ // It still should show up in the privileged query; it's a ghost!
+ let privColl = await queryExpect(privQuery, twinSet);
+ // Make sure it looks like a ghost.
+ let twinGhost = privColl.items[0];
+ Assert.equal(twinGhost._folderID, null);
+ Assert.equal(twinGhost._messageKey, null);
+
+ // Make sure the conversation still exists.
+ await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, not a twin.
+ // This should blow away the message, the ghosts, and the conversation.
+
+ // Second message should still be around.
+ let secondQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ secondQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ let secondColl = await queryExpect(secondQuery, secondSet);
+
+ // Delete it and make sure it gets marked deleted appropriately.
+ await MessageInjection.deleteMessages(secondSet);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [secondSet] }));
+ Assert.equal(secondColl.items.length, 0);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ await queryExpect(privQuery, secondSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // It should no longer show up in a privileged query; we killed the ghosts.
+ await queryExpect(privQuery, []);
+
+ // - The conversation should have disappeared too.
+ // (we have no listener to watch for it to have disappeared from convQuery but
+ // this is basically how glodaTestHelper does its thing anyways.)
+ Assert.equal(convColl.items.length, 0);
+
+ // Make sure the query fails to find it too.
+ await queryExpect(convQuery, []);
+
+ // -- Identity culling verification.
+ // The identities associated with that message should no longer exist, nor
+ // should their contacts.
+}
+
+async function test_moving_to_trash_marks_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the trash.
+ await messageInjection.trashMessages(msgSet);
+
+ // We do not index the trash folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/**
+ * Deletion that occurs because a folder got deleted.
+ * There is no hand-holding involving the headers that were in the folder.
+ */
+async function test_folder_nuking_message_deletion() {
+ // Create and index two messages in a conversation.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Delete the folder.
+ messageInjection.deleteFolder(folder);
+ // That does generate the deletion events if the messages were in-memory,
+ // which these are.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // This should have caused us to mark all the messages as deleted; the
+ // messages should no longer show up in an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/* ===== Folder Move/Rename/Copy (Single and Nested) ===== */
+
+async function test_folder_deletion_nested() {
+ // Add a folder with a bunch of messages.
+ let [[folder1], msgSet1] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ let [[folder2], msgSet2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index these folders, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet1, msgSet2], { augment: true })
+ );
+ // The move has to be performed after the indexing, because otherwise, on
+ // IMAP, the moved message header are different entities and it's not msgSet2
+ // that ends up indexed, but the fresh headers
+ await MessageInjection.moveFolder(folder2, folder1);
+
+ // Add a trash folder, and move folder1 into it.
+ let trash = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Trash,
+ ]);
+ await MessageInjection.moveFolder(folder1, trash);
+
+ let folders = MessageInjection.get_nsIMsgFolder(trash).descendants;
+ Assert.equal(folders.length, 2);
+ let [newFolder1, newFolder2] = folders;
+
+ let glodaFolder1 = Gloda.getFolderForFolder(newFolder1);
+ let glodaFolder2 = Gloda.getFolderForFolder(newFolder2);
+
+ // Verify that Gloda properly marked this folder as not to be indexed anymore.
+ Assert.equal(
+ glodaFolder1.indexingPriority,
+ glodaFolder1.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], { deleted: [msgSet1, msgSet2] })
+ );
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder1.id,
+ glodaFolder1.kIndexingNeverPriority
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder2.id,
+ glodaFolder2.kIndexingNeverPriority
+ );
+
+ if (messageInjection.messageInjectionIsLocal()) {
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([newFolder1], [{ count: 1 }]);
+ await messageInjection.makeNewSetsInFolders([newFolder2], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ }
+}
+
+/* ===== IMAP Nuances ===== */
+
+/**
+ * Verify that for IMAP folders we still see an index a message that is added
+ * as read.
+ */
+async function test_imap_add_unread_to_folder() {
+ if (messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, read: true },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+/* ===== Message Moving ===== */
+
+/**
+ * Moving a message between folders should result in us knowing that the message
+ * is in the target location.
+ */
+async function test_message_moving() {
+ // - Inject and insert.
+ // Source folder with the message we care about.
+ let [[srcFolder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ // Dest folder with some messages in it to test some wacky local folder moving
+ // logic. (Local moves try and update the correspondence immediately.)
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+
+ // We want the gloda message mapping.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet, ignoreSet], { augment: true })
+ );
+ let gmsg = msgSet.glodaMessages[0];
+ // Save off the message key so we can make sure it changes.
+ let oldMessageKey = msgSet.getMsgHdr(0).messageKey;
+
+ // - Fastpath (offline) move it to a new folder.
+ // Initial move.
+ await messageInjection.moveMessages(msgSet, destFolder, true);
+
+ // - Make sure gloda sees it in the new folder.
+ // Since we are doing offline IMAP moves, the fast-path should be taken and
+ // so we should receive an itemsModified notification without a call to
+ // Gloda.grokNounItem.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { fullyIndexed: 0 }));
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(destFolder).URI
+ );
+
+ // - Make sure the message key is correct!
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+ // Sanity check that the messageKey actually changed for the message.
+ Assert.notEqual(gmsg.messageKey, oldMessageKey);
+
+ // - Make sure the indexer's _keyChangedBatchInfo dict is empty.
+ for (let evilKey in GlodaMsgIndexer._keyChangedBatchInfo) {
+ let evilValue = GlodaMsgIndexer._keyChangedBatchInfo[evilKey];
+ throw new Error(
+ "GlodaMsgIndexer._keyChangedBatchInfo should be empty but" +
+ "has key:\n" +
+ evilKey +
+ "\nAnd value:\n",
+ evilValue + "."
+ );
+ }
+
+ // - Slowpath (IMAP online) move it back to its origin folder.
+ // Move it back.
+ await messageInjection.moveMessages(msgSet, srcFolder, false);
+ // In the IMAP case we will end up reindexing the message because we will
+ // not be able to fast-path, but the local case will still be fast-pathed.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ fullyIndexed: messageInjection.messageInjectionIsLocal() ? 0 : 1,
+ })
+ );
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(srcFolder).URI
+ );
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+}
+
+/**
+ * Moving a gloda-indexed message out of a filthy folder should result in the
+ * destination message not having a gloda-id.
+ */
+
+/* ===== Message Copying ===== */
+
+/* ===== Sweep Complications ==== */
+
+/**
+ * Make sure that a message indexed by event-driven indexing does not
+ * get reindexed by sweep indexing that follows.
+ */
+async function test_sweep_indexing_does_not_reindex_event_indexed() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Wait for the event sweep to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a sweep of the folder.
+ GlodaMsgIndexer.indexFolder(messageInjection.getRealInjectionFolder(folder));
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Verify that moving apparently gloda-indexed messages from a filthy folder or
+ * one that simply should not be gloda indexed does not result in the target
+ * messages having the gloda-id property on them. To avoid messing with too
+ * many invariants we do the 'folder should not be gloda indexed' case.
+ * Uh, and of course, the message should still get indexed once we clear the
+ * filthy gloda-id off of it given that it is moving from a folder that is not
+ * indexed to one that is indexed.
+ */
+async function test_filthy_moves_slash_move_from_unindexed_to_indexed() {
+ // - Inject.
+ // The source folder needs a flag so we don't index it.
+ let srcFolder = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Junk,
+ ]);
+ // The destination folder has to be something we want to index though.
+ let destFolder = await messageInjection.makeEmptyFolder();
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [srcFolder],
+ [{ count: 1 }]
+ );
+
+ // - Mark with a bogus gloda-id.
+ msgSet.getMsgHdr(0).setUint32Property("gloda-id", 9999);
+
+ // - Disable event driven indexing so we don't get interference from indexing.
+ configureGlodaIndexing({ event: false });
+
+ // - Move.
+ await messageInjection.moveMessages(msgSet, destFolder);
+
+ // - Verify the target has no gloda-id!
+ dump(`checking ${msgSet.getMsgHdr(0)}`);
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-id"), 0);
+
+ // - Re-enable indexing and let the indexer run.
+ // We don't want to affect other tests.
+ configureGlodaIndexing({});
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(scenarios, "Sanity that scenarios is set");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_index_messages_tests = [
+ test_sanity_test_environment,
+ test_pending_commit_tracker_flushes_correctly,
+ test_pending_commit_causes_msgdb_commit,
+ test_indexing_sweep,
+ test_event_driven_indexing_does_not_mess_with_filthy_folders,
+
+ test_threading_direct_reply,
+ test_threading_missing_intermediary,
+ test_threading_siblings_missing_parent,
+ test_attachment_flag,
+ test_attributes_fundamental,
+ test_moved_message_attributes,
+ test_attributes_fundamental_from_disk,
+ test_attributes_explicit,
+ test_attributes_cant_query,
+
+ test_people_in_addressbook,
+
+ test_streamed_bodies_are_size_capped,
+
+ test_imap_add_unread_to_folder,
+ test_message_moving,
+
+ test_message_deletion,
+ test_moving_to_trash_marks_deletion,
+ test_folder_nuking_message_deletion,
+
+ test_sweep_indexing_does_not_reindex_event_indexed,
+
+ test_filthy_moves_slash_move_from_unindexed_to_indexed,
+
+ test_indexing_never_priority,
+ test_setting_indexing_priority_never_while_indexing,
+
+ test_folder_deletion_nested,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_query_messages.js b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
new file mode 100644
index 0000000000..02b8cceb1a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
@@ -0,0 +1,729 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our querying support. We build up a deterministic little
+ * 'world' of messages spread across multiple conversations, multiple folders
+ * and multiple authors. To verify expected negative results, in addition to
+ * the 'peoples' in our world clique, we also have 'outlier' contacts that do
+ * not communicate with the others (but are also spread across folders).
+ *
+ * This is broadly intended to test all of our query features and mechanisms
+ * (apart from our specialized search implementation, which is tested by
+ * test_search_messages.js), but is probably not the place to test specific
+ * edge-cases if they do not easily fit into the 'world' data set.
+ *
+ * I feel like having the 'world' mishmash as a data source may muddle things
+ * more than it should, but it is hard to deny the benefit of not having to
+ * define a bunch of message corpuses entirely specialized for each test.
+ */
+
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/**
+ * Whether we expect fulltext results. IMAP folders that are offline shouldn't
+ * have their bodies indexed.
+ */
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+/* ===== Populate ===== */
+var world = {
+ phase: 0,
+
+ // A list of tuples of [name, email] of length NUM_AUTHORS.
+ peoples: null,
+ NUM_AUTHORS: 5,
+ // Maps each author (as defined by their email address) to the list of
+ // (synthetic) messages they have 'authored'.
+ authorGroups: {},
+
+ NUM_CONVERSATIONS: 3,
+ // The last message (so far) in each conversation.
+ lastMessagesInConvos: [],
+ // Maps the message-id of the root message in a conversation to the list of
+ // synthetic messages in the conversation.
+ conversationGroups: {},
+ // A list of lists of synthetic messages, organized by the conversation they
+ // belong to.
+ conversationLists: [],
+ // A list of gloda conversation id's, each corresponding to the entries in
+ // converastionLists.
+ glodaConversationIds: [],
+
+ NUM_FOLDERS: 2,
+ MESSAGES_PER_FOLDER: 11,
+ // A list of lists of synthetic messages, one list per folder.
+ folderClumps: [],
+ // A list of nsIMsgFolders, with each folder containing the messages in the
+ // corresponding list in folderClumps.
+ glodaFolders: [],
+
+ outlierAuthor: null,
+ outlierFriend: null,
+
+ // Messages authored by contacts in the "peoples" group.
+ peoplesMessages: [],
+ // Messages authored by outlierAuthor and outlierFriend.
+ outlierMessages: [],
+};
+
+/**
+ * Given a number, provide a unique term. This is for the benefit of the search
+ * logic. This entails using a unique prefix to avoid accidental collision
+ * with terms outside our control and then just generating unique character
+ * strings in a vaguely base-26 style. To avoid the porter stemmer causing odd
+ * things to happen we actually double every numerically driven character.
+ */
+function uniqueTermGenerator(aNum) {
+ let s = "uniq";
+ do {
+ let l = String.fromCharCode(97 + (aNum % 26));
+ s += l + l;
+ aNum = Math.floor(aNum / 26);
+ } while (aNum);
+ return s;
+}
+
+var UNIQUE_OFFSET_CONV = 0;
+var UNIQUE_OFFSET_AUTHOR = 26;
+var UNIQUE_OFFSET_BODY = 0;
+var UNIQUE_OFFSET_SUBJECT = 26 * 26;
+var UNIQUE_OFFSET_ATTACHMENT = 26 * 26 * 26;
+
+/**
+ * Categorize a synthetic message by conversation/folder/people in the 'world'
+ * structure. This is then used by the test code to generate and verify query
+ * data.
+ *
+ * @param aSynthMessage The synthetic message.
+ */
+function categorizeMessage(aSynthMessage) {
+ // Lump by author.
+ let author = aSynthMessage.fromAddress;
+ if (!(author in world.authorGroups)) {
+ world.authorGroups[author] = [];
+ }
+ world.authorGroups[author].push(aSynthMessage);
+
+ // Lump by conversation, keying off of the originator's message id.
+ let originator = aSynthMessage;
+ while (originator.parent) {
+ originator = originator.parent;
+ }
+ if (!(originator.messageId in world.conversationGroups)) {
+ world.conversationGroups[originator.messageId] = [];
+ }
+ world.conversationGroups[originator.messageId].push(aSynthMessage);
+ world.conversationLists[aSynthMessage.iConvo].push(aSynthMessage);
+
+ // Folder lumping happens in a big glob.
+}
+
+/**
+ * Generate messages in a single folder, categorizing them as we go.
+ *
+ * Key message characteristics:
+ * - Whenever a 'peoples' sends a message, they send it to all 'peoples',
+ * including themselves.
+ */
+function generateFolderMessages() {
+ let messages = [],
+ smsg;
+
+ let iAuthor = 0;
+ for (let iMessage = 0; iMessage < world.MESSAGES_PER_FOLDER; iMessage++) {
+ let iConvo = iMessage % world.NUM_CONVERSATIONS;
+
+ // We need missing messages to create ghosts, so periodically add an extra
+ // unknown into the equation. we do this prior to the below step because
+ // then we don't hose up all the fancy body creation the next step does.
+ if (iMessage % 3 == 1) {
+ smsg = msgGen.makeMessage({ inReplyTo: smsg });
+ }
+
+ let convUniqueSubject = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let convUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let authorUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_AUTHOR + iAuthor
+ );
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ smsg = msgGen.makeMessage({
+ inReplyTo: world.lastMessagesInConvos[iConvo],
+ // Note that the reply-logic will ignore our subject, luckily that does
+ // not matter! (since it will just copy the subject)
+ subject: convUniqueSubject,
+ body: {
+ body: convUniqueBody + " " + authorUniqueBody,
+ },
+ attachments: [
+ {
+ filename: convUniqueAttachment + ".conv",
+ body: "content does not matter. only life matters.",
+ contentType: "application/x-test",
+ },
+ ],
+ });
+
+ // MakeMessage is not exceedingly clever right now, we need to overwrite
+ // From and To.
+ smsg.from = world.peoples[iAuthor];
+ iAuthor = (iAuthor + iConvo + 1) % world.NUM_AUTHORS;
+ // So, everyone is talking to everyone for this stuff.
+ smsg.to = world.peoples;
+ world.lastMessagesInConvos[iConvo] = smsg;
+ // Simplify categorizeMessage and glodaInfoStasher's life.
+ smsg.iConvo = iConvo;
+
+ categorizeMessage(smsg);
+ messages.push(smsg);
+ world.peoplesMessages.push(smsg);
+ }
+
+ smsg = msgGen.makeMessage();
+ smsg.from = world.outlierAuthor;
+ smsg.to = [world.outlierFriend];
+ // Do not lump it.
+ messages.push(smsg);
+ world.outlierMessages.push(smsg);
+
+ world.folderClumps.push(messages);
+
+ return new SyntheticMessageSet(messages);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ if (aSynthMessage.iConvo !== undefined) {
+ world.glodaConversationIds[aSynthMessage.iConvo] =
+ aGlodaMessage.conversation.id;
+ }
+ if (world.glodaFolders.length <= world.phase) {
+ world.glodaFolders.push(aGlodaMessage.folder);
+ }
+}
+
+// We override these for the IMAP tests.
+var pre_setup_populate_hook = function default_pre_setup_populate_hook() {};
+var post_setup_populate_hook = function default_post_setup_populate_hook() {};
+
+// First, we must populate our message store with delicious messages.
+async function setup_populate() {
+ world.glodaHolderCollection = Gloda.explicitCollection(
+ GlodaConstants.NOUN_MESSAGE,
+ []
+ );
+
+ world.peoples = msgGen.makeNamesAndAddresses(world.NUM_AUTHORS);
+ world.outlierAuthor = msgGen.makeNameAndAddress();
+ world.outlierFriend = msgGen.makeNameAndAddress();
+ // Set up the per-conversation values with blanks initially.
+ for (let iConvo = 0; iConvo < world.NUM_CONVERSATIONS; iConvo++) {
+ world.lastMessagesInConvos.push(null);
+ world.conversationLists.push([]);
+ world.glodaConversationIds.push(null);
+ }
+
+ let setOne = generateFolderMessages();
+ let folderOne = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderOne], [setOne]);
+ // If this is the online_to_offline variant (indicated by goOffline) we want
+ // to make the messages available offline. This should trigger an event
+ // driven re-indexing of the messages which should make the body available
+ // for fulltext queries.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setOne]));
+ await messageInjection.makeFolderAndContentsOffline(folderOne);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setOne], { verifier: glodaInfoStasher })
+ );
+
+ world.phase++;
+ let setTwo = generateFolderMessages();
+ let folderTwo = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderTwo], [setTwo]);
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setTwo]));
+ await messageInjection.makeFolderAndContentsOffline(folderTwo);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setTwo], { verifier: glodaInfoStasher })
+ );
+}
+
+/* ===== Non-text queries ===== */
+
+/* === messages === */
+
+/**
+ * Takes a list of mutually exclusive queries and a list of the resulting
+ * collections and ensures that the collections from one query do not pass the
+ * query.test() method of one of the other queries. To restate, the queries
+ * must not have any overlapping results, or we will get angry without
+ * justification.
+ */
+function verify_nonMatches(aQueries, aCollections) {
+ for (let i = 0; i < aCollections.length; i++) {
+ let testQuery = aQueries[i];
+ let nonmatches = aCollections[(i + 1) % aCollections.length].items;
+
+ for (let item of nonmatches) {
+ if (testQuery.test(item)) {
+ dump("item: " + JSON.stringify(item) + "\n");
+ dump("constraints: " + JSON.stringify(testQuery._constraints) + "\n");
+ do_throw(
+ "Something should not match query.test(), but it does: " + item
+ );
+ }
+ }
+ }
+}
+
+var ts_convNum = 0;
+/* preserved state for the non-match testing performed by
+ * test_query_messages_by_conversation_nonmatches.
+ */
+var ts_convQueries = [];
+var ts_convCollections = [];
+/**
+ * Query conversations by gloda conversation-id, saving the queries and
+ * resulting collections in ts_convQueries and ts_convCollections for the
+ * use of test_query_messages_by_conversation_nonmatches who verifies the
+ * query.test() logic doesn't match on things it should not match on.
+ *
+ * @tests gloda.noun.message.attr.conversation
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_conversation() {
+ let convNum = ts_convNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.conversation(world.glodaConversationIds[convNum]);
+
+ ts_convQueries.push(query);
+ ts_convCollections.push(
+ await queryExpect(query, world.conversationLists[convNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_conversation_nonmatches() {
+ verify_nonMatches(ts_convQueries, ts_convCollections);
+}
+
+var ts_folderNum = 0;
+var ts_folderQueries = [];
+var ts_folderCollections = [];
+/**
+ * @tests gloda.noun.message.attr.folder
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_folder() {
+ let folderNum = ts_folderNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.folder(world.glodaFolders[folderNum]);
+
+ ts_folderQueries.push(query);
+ ts_folderCollections.push(
+ await queryExpect(query, world.folderClumps[folderNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_folder_nonmatches() {
+ verify_nonMatches(ts_folderQueries, ts_folderCollections);
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeader()
+ */
+async function test_get_message_for_header() {
+ // Pick an arbitrary message.
+ let glodaMessage = ts_convCollections[1].items[0];
+ // Find the synthetic message that matches (ordering must not be assumed).
+ let synthMessage = world.conversationLists[1].find(
+ sm => sm.messageId == glodaMessage.headerMessageID
+ );
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeader,
+ queryThis: Gloda,
+ args: [glodaMessage.folderMessage],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ [synthMessage]
+ );
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeaders()
+ */
+async function test_get_messages_for_headers() {
+ let messageCollection = ts_convCollections[0];
+ let headers = messageCollection.items.map(m => m.folderMessage);
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeaders,
+ queryThis: Gloda,
+ args: [headers],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ world.conversationLists[0]
+ );
+}
+
+// At this point we go run the identity and contact tests for side-effects.
+
+var ts_messageIdentityQueries = [];
+var ts_messageIdentityCollections = [];
+/**
+ * @tests gloda.noun.message.attr.involves
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_identity_peoples() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(peoplesIdentityCollection.items[0]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.peoplesMessages)
+ );
+}
+
+/**
+ * @tests gloda.noun.message.attr.involves
+ */
+async function test_query_messages_by_identity_outlier() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(outlierIdentityCollection.items[0]);
+ // This also tests our ability to have two intersecting constraints! hooray!.
+ query.involves(outlierIdentityCollection.items[1]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.outlierMessages)
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_identity_nonmatches() {
+ verify_nonMatches(ts_messageIdentityQueries, ts_messageIdentityCollections);
+}
+
+/* exported test_query_messages_by_contact */
+function test_query_messages_by_contact() {
+ // IOU
+}
+
+var ts_messagesDateQuery;
+/**
+ * @tests gloda.noun.message.attr.date
+ * @tests gloda.datastore.sqlgen.kConstraintRanges
+ */
+async function test_query_messages_by_date() {
+ ts_messagesDateQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // We are clearly relying on knowing the generation sequence here,
+ // fuggedaboutit.
+ ts_messagesDateQuery.dateRange([
+ world.peoplesMessages[1].date,
+ world.peoplesMessages[2].date,
+ ]);
+ await queryExpect(ts_messagesDateQuery, world.peoplesMessages.slice(1, 3));
+}
+
+/**
+ * @tests gloda.query.test.kConstraintRanges
+ */
+function test_query_messages_by_date_nonmatches() {
+ if (
+ ts_messagesDateQuery.test(world.peoplesMessages[0]) ||
+ ts_messagesDateQuery.test(world.peoplesMessages[3])
+ ) {
+ do_throw("The date testing mechanism is busted.");
+ }
+}
+
+/* === contacts === */
+/* exported test_query_contacts_by_popularity */
+function test_query_contacts_by_popularity() {
+ // IOU
+}
+
+/* === identities === */
+
+/* ===== Text-based queries ===== */
+
+/* === conversations === */
+
+/* exported test_query_conversations_by_subject_text */
+function test_query_conversations_by_subject_text() {}
+
+/* === messages === */
+
+/**
+ * Test subject searching using the conversation unique subject term.
+ *
+ * @tests gloda.noun.message.attr.subjectMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_subject_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convSubjectTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.subjectMatches(convSubjectTerm);
+ await queryExpect(query, world.conversationLists[convNum]);
+}
+
+/**
+ * Test body searching using the conversation unique body term.
+ *
+ * @tests gloda.noun.message.attr.bodyMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_body_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convBodyTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.bodyMatches(convBodyTerm);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test attachment name searching using the conversation unique attachment term.
+ *
+ * @tests gloda.noun.message.attr.attachmentNamesMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_attachment_names() {
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.attachmentNamesMatch(convUniqueAttachment);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test author name fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_name() {
+ let [authorName, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorName);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test author mail address fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_email() {
+ let [, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorMail);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test recipient name fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_name() {
+ let name = world.peoples[0][0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(name);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/**
+ * Test recipient mail fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_email() {
+ let [, mail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(mail);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/* === contacts === */
+
+var contactLikeQuery;
+/**
+ * @tests gloda.noun.contact.attr.name
+ * @tests gloda.datastore.sqlgen.kConstraintStringLike
+ */
+async function test_query_contacts_by_name() {
+ // Let's use like... we need to test that.
+ contactLikeQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ let personName = world.peoples[0][0];
+ // Chop off the first and last letter... this isn't the most edge-case
+ // handling way to roll, but LOOK OVER THERE? IS THAT ELVIS?
+ let personNameSubstring = personName.substring(1, personName.length - 1);
+ contactLikeQuery.nameLike(
+ contactLikeQuery.WILDCARD,
+ personNameSubstring,
+ contactLikeQuery.WILDCARD
+ );
+
+ await queryExpect(contactLikeQuery, [personName]);
+}
+
+/**
+ * @tests gloda.query.test.kConstraintStringLike
+ */
+function test_query_contacts_by_name_nonmatch() {
+ let otherContact = outlierIdentityCollection.items[0].contact;
+ if (contactLikeQuery.test(otherContact)) {
+ do_throw("The string LIKE mechanism as applied to contacts does not work.");
+ }
+}
+
+/* === identities === */
+
+var peoplesIdentityQuery;
+var peoplesIdentityCollection;
+async function test_query_identities_for_peoples() {
+ peoplesIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ peoplesIdentityQuery.kind("email");
+ let peopleAddrs = world.peoples.map(nameAndAddr => nameAndAddr[1]);
+ peoplesIdentityQuery.value.apply(peoplesIdentityQuery, peopleAddrs);
+ peoplesIdentityCollection = await queryExpect(
+ peoplesIdentityQuery,
+ peopleAddrs
+ );
+}
+
+var outlierIdentityQuery;
+var outlierIdentityCollection;
+async function test_query_identities_for_outliers() {
+ outlierIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ outlierIdentityQuery.kind("email");
+ let outlierAddrs = [world.outlierAuthor[1], world.outlierFriend[1]];
+ outlierIdentityQuery.value.apply(outlierIdentityQuery, outlierAddrs);
+ outlierIdentityCollection = await queryExpect(
+ outlierIdentityQuery,
+ outlierAddrs
+ );
+}
+
+function test_query_identities_by_kind_and_value_nonmatches() {
+ verify_nonMatches(
+ [peoplesIdentityQuery, outlierIdentityQuery],
+ [peoplesIdentityCollection, outlierIdentityCollection]
+ );
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_query_messages_tests = [
+ test_sanity_test_environment,
+ function pre_setup_populate() {
+ pre_setup_populate_hook();
+ },
+ setup_populate,
+ function post_setup_populate() {
+ post_setup_populate_hook();
+ },
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation_nonmatches,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder_nonmatches,
+ test_get_message_for_header,
+ test_get_messages_for_headers,
+ // Need to do the identity and contact lookups so we can have their results
+ // for the other message-related queries.
+ test_query_identities_for_peoples,
+ test_query_identities_for_outliers,
+ test_query_identities_by_kind_and_value_nonmatches,
+ // Back to messages!
+ test_query_messages_by_identity_peoples,
+ test_query_messages_by_identity_outlier,
+ test_query_messages_by_identity_nonmatches,
+ test_query_messages_by_date,
+ test_query_messages_by_date_nonmatches,
+ // Fulltext
+ test_query_messages_by_subject_text,
+ test_query_messages_by_body_text,
+ test_query_messages_by_attachment_names,
+ test_query_messages_by_authorMatches_name,
+ test_query_messages_by_authorMatches_email,
+ test_query_messages_by_recipients_name,
+ test_query_messages_by_recipients_email,
+ // Like
+ test_query_contacts_by_name,
+ test_query_contacts_by_name_nonmatch,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/head_gloda.js b/comm/mailnews/db/gloda/test/unit/head_gloda.js
new file mode 100644
index 0000000000..fb8edbd24e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/head_gloda.js
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { mailTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MailTestUtils.jsm"
+);
+
+// Ensure the profile directory is set up
+do_get_profile();
+
+var gDEPTH = "../../../../../";
+
+registerCleanupFunction(function () {
+ load(gDEPTH + "mailnews/resources/mailShutdown.js");
+});
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
new file mode 100644
index 0000000000..e8234f1a97
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
@@ -0,0 +1,431 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["queryExpect", "sqlExpectCount", "sqlRun"];
+
+/*
+ * This file provides gloda query helpers for the test infrastructure.
+ */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.queryHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var _defaultExpectationExtractors = {};
+_defaultExpectationExtractors[GlodaConstants.NOUN_MESSAGE] = [
+ function expectExtract_message_gloda(aGlodaMessage) {
+ return aGlodaMessage.headerMessageID;
+ },
+ function expectExtract_message_synth(aSynthMessage) {
+ return aSynthMessage.messageId;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_CONTACT] = [
+ function expectExtract_contact_gloda(aGlodaContact) {
+ return aGlodaContact.name;
+ },
+ function expectExtract_contact_name(aName) {
+ return aName;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_IDENTITY] = [
+ function expectExtract_identity_gloda(aGlodaIdentity) {
+ return aGlodaIdentity.value;
+ },
+ function expectExtract_identity_address(aAddress) {
+ return aAddress;
+ },
+];
+
+function expectExtract_default_toString(aThing) {
+ return aThing.toString();
+}
+
+/**
+ * @see queryExpect for info on what we do.
+ */
+class QueryExpectationListener {
+ constructor(
+ aExpectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ aCallerStackFrame
+ ) {
+ this.expectedSet = aExpectedSet;
+ this.glodaExtractor = aGlodaExtractor;
+ this.orderVerifier = aOrderVerifier;
+ this.completed = false;
+ this.callerStackFrame = aCallerStackFrame;
+ // Track our current 'index' in the results for the (optional) order verifier,
+ // but also so we can provide slightly more useful debug output.
+ this.nextIndex = 0;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ onItemsAdded(aItems, aCollection) {
+ log.debug("QueryExpectationListener onItemsAdded received.");
+ for (let item of aItems) {
+ let glodaStringRep;
+ try {
+ glodaStringRep = this.glodaExtractor(item);
+ } catch (ex) {
+ this._reject(
+ new Error(
+ "Gloda extractor threw during query expectation.\n" +
+ "Item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // Make sure we were expecting this guy.
+ if (glodaStringRep in this.expectedSet) {
+ delete this.expectedSet[glodaStringRep];
+ } else {
+ this._reject(
+ new Error(
+ "Query returned unexpected result!\n" +
+ "Item:\n" +
+ item +
+ "\nExpected set:\n" +
+ this.expectedSet +
+ "\nCaller:\n" +
+ this.callerStackFrame
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ if (this.orderVerifier) {
+ try {
+ this.orderVerifier(this.nextIndex, item, aCollection);
+ } catch (ex) {
+ // If the order was wrong, we could probably go for an output of what
+ // we actually got...
+ dump("Order Problem detected. Dump of data:\n");
+ for (let [iThing, thing] of aItems.entries()) {
+ dump(
+ iThing +
+ ": " +
+ thing +
+ (aCollection.stashedColumns
+ ? ". " + aCollection.stashedColumns[thing.id].join(", ")
+ : "") +
+ "\n"
+ );
+ }
+ this._reject(ex);
+ return; // We don't have to continue for more checks.
+ }
+ }
+ this.nextIndex++;
+
+ // Make sure the query's test method agrees with the database about this.
+ if (!aCollection.query.test(item)) {
+ this._reject(
+ new Error(
+ "Query test returned false when it should have been true on.\n" +
+ "Extracted:\n" +
+ glodaStringRep +
+ "\nItem:\n" +
+ item
+ )
+ );
+ }
+ }
+ }
+ onItemsModified(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsModified received. Nothing done."
+ );
+ }
+ onItemsRemoved(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsRemoved received. Nothing done."
+ );
+ }
+ onQueryCompleted(aCollection) {
+ log.debug("QueryExpectationListener onQueryCompleted received.");
+ // We may continue to match newly added items if we leave our query as it
+ // is, so let's become explicit to avoid related troubles.
+ aCollection.becomeExplicit();
+
+ // `expectedSet` should now be empty.
+ for (let key in this.expectedSet) {
+ let value = this.expectedSet[key];
+ this._reject(
+ new Error(
+ "Query should have returned:\n" +
+ key +
+ " (" +
+ value +
+ ").\n" +
+ "But " +
+ this.nextIndex +
+ " was seen."
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // If no error is thrown then we're fine here.
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
+
+/**
+ * Execute the given query, verifying that the result set contains exactly the
+ * contents of the expected set; no more, no less. Since we expect that the
+ * query will result in gloda objects, but your expectations will not be posed
+ * in terms of gloda objects (though they could be), we rely on extractor
+ * functions to take the gloda result objects and the expected result objects
+ * into the same string.
+ * If you don't provide extractor functions, we will use our defaults (based on
+ * the query noun type) if available, or assume that calling toString is
+ * sufficient.
+ *
+ * @param aQuery Either a query to execute, or a dict with the following keys:
+ * - queryFunc: The function to call that returns a function.
+ * - queryThis: The 'this' to use for the invocation of queryFunc.
+ * - args: A list (possibly empty) or arguments to precede the traditional
+ * arguments to query.getCollection.
+ * - nounId: The (numeric) noun id of the noun type expected to be returned.
+ * @param aExpectedSet The list of expected results from the query where each
+ * item is suitable for extraction using aExpectedExtractor. We have a soft
+ * spot for SyntheticMessageSets and automatically unbox them.
+ * @param aGlodaExtractor The extractor function to take an instance of the
+ * gloda representation and return a string for comparison/equivalence
+ * against that returned by the expected extractor (against the input
+ * instance in aExpectedSet.) The value returned must be unique for all
+ * of the expected gloda representations of the expected set. If omitted,
+ * the default extractor for the gloda noun type is used. If no default
+ * extractor exists, toString is called on the item.
+ * @param aExpectedExtractor The extractor function to take an instance from the
+ * values in the aExpectedSet and return a string for comparison/equivalence
+ * against that returned by the gloda extractor. The value returned must
+ * be unique for all of the values in the expected set. If omitted, the
+ * default extractor for the presumed input type based on the gloda noun
+ * type used for the query is used, failing over to toString.
+ * @param aOrderVerifier Optional function to verify the order the results are
+ * received in. Function signature should be of the form (aZeroBasedIndex,
+ * aItem, aCollectionResultIsFor).
+ */
+async function queryExpect(
+ aQuery,
+ aExpectedSet,
+ aGlodaExtractor,
+ aExpectedExtractor,
+ aOrderVerifier
+) {
+ if (aQuery.test) {
+ aQuery = {
+ queryFunc: aQuery.getCollection,
+ queryThis: aQuery,
+ args: [],
+ nounId: aQuery._nounDef.id,
+ };
+ }
+
+ if ("synMessages" in aExpectedSet) {
+ aExpectedSet = aExpectedSet.synMessages;
+ }
+
+ // - set extractor functions to defaults if omitted
+ if (aGlodaExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aGlodaExtractor = _defaultExpectationExtractors[aQuery.nounId][0];
+ } else {
+ aGlodaExtractor = expectExtract_default_toString;
+ }
+ }
+ if (aExpectedExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aExpectedExtractor = _defaultExpectationExtractors[aQuery.nounId][1];
+ } else {
+ aExpectedExtractor = expectExtract_default_toString;
+ }
+ }
+
+ // - build the expected set
+ let expectedSet = {};
+ for (let item of aExpectedSet) {
+ try {
+ expectedSet[aExpectedExtractor(item)] = item;
+ } catch (ex) {
+ throw new Error(
+ "Expected extractor threw during query expectation for item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ );
+ }
+ }
+
+ // - create the listener...
+ let listener = new QueryExpectationListener(
+ expectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ Components.stack.caller
+ );
+ aQuery.args.push(listener);
+ let queryValue = aQuery.queryFunc.apply(aQuery.queryThis, aQuery.args);
+ // Wait for the QueryListener to finish.
+ await listener.promise;
+ return queryValue;
+}
+
+/**
+ * Asynchronously run a SQL statement against the gloda database. This can grow
+ * binding logic and data returning as needed.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlRun(sql) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createAsyncStatement(sql);
+ let rows = null;
+
+ let promiseResolve;
+ let promiseReject;
+ let promise = new Promise((resolve, reject) => {
+ promiseResolve = resolve;
+ promiseReject = reject;
+ });
+ // Running SQL.
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ if (!rows) {
+ rows = [];
+ }
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ rows.push(row);
+ }
+ },
+ handleError(aError) {
+ promiseReject(
+ new Error("SQL error!\nResult:\n" + aError + "\nSQL:\n" + sql)
+ );
+ },
+ handleCompletion() {
+ promiseResolve(rows);
+ },
+ });
+ stmt.finalize();
+ return promise;
+}
+
+/**
+ * Run an (async) SQL statement against the gloda database. The statement
+ * should be a SELECT COUNT; we check the count against aExpectedCount.
+ * Any additional arguments are positionally bound to the statement.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlExpectCount(aExpectedCount, aSQLString, ...params) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createStatement(aSQLString);
+
+ for (let iArg = 0; iArg < params.length; iArg++) {
+ GlodaDatastore._bindVariant(stmt, iArg, params[iArg]);
+ }
+
+ let desc = [aSQLString, ...params];
+ // Running SQL count.
+ let listener = new SqlExpectationListener(
+ aExpectedCount,
+ desc,
+ Components.stack.caller
+ );
+ stmt.executeAsync(listener);
+ // We don't need the statement anymore.
+ stmt.finalize();
+
+ await listener.promise;
+}
+
+class SqlExpectationListener {
+ constructor(aExpectedCount, aDesc, aCallerStackFrame) {
+ this.actualCount = null;
+ this.expectedCount = aExpectedCount;
+ this.sqlDesc = aDesc;
+ this.callerStackFrame = aCallerStackFrame;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ handleResult(aResultSet) {
+ let row = aResultSet.getNextRow();
+ if (!row) {
+ this._reject(
+ new Error(
+ "No result row returned from caller:\n" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this.actualCount = row.getInt64(0);
+ }
+
+ handleError(aError) {
+ this._reject(
+ new Error(
+ "SQL error from caller:\n" +
+ this.callerStackFrame +
+ "\nResult:\n" +
+ aError +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ }
+
+ handleCompletion(aReason) {
+ if (this.actualCount != this.expectedCount) {
+ this._reject(
+ new Error(
+ "Actual count of " +
+ this.actualCount +
+ "does not match expected count of:\n" +
+ this.expectedCount +
+ "\nFrom caller:" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
new file mode 100644
index 0000000000..a4c092400b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
@@ -0,0 +1,847 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides gloda testing infrastructure.
+ *
+ * A few words about how tests should expect to interact with indexing:
+ *
+ * By default, we enable only event-driven indexing with an infinite work queue
+ * length. This means that all messages will be queued for indexing as they
+ * are added or modified. You should await to |waitForGlodaIndexer| to wait
+ * until the indexer completes. If you want to assert that certain messages
+ * will have been indexed during that pass, you can pass them as arguments to
+ * |assertExpectedMessagesIndexed|.
+ * There is no need to tell us to expect the messages to be indexed prior to the
+ * waiting as long as nothing spins the event loop after you perform the action
+ * that triggers indexing. None of our existing xpcshell tests do this, but it
+ * is part of the mozmill idiom for its waiting mechanism, so be sure to not
+ * perform a mozmill wait without first telling us to expect the messages.
+ */
+
+const EXPORTED_SYMBOLS = [
+ "assertExpectedMessagesIndexed",
+ "glodaTestHelperInitialize",
+ "nukeGlodaCachesAndCollections",
+ "prepareIndexerForTesting",
+ "waitForGlodaIndexer",
+];
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.testHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var indexMessageState;
+
+/**
+ * Create a 'me' identity of "me@localhost" for the benefit of Gloda. At the
+ * time of this writing, Gloda only initializes Gloda.myIdentities and
+ * Gloda.myContact at startup with no event-driven updates. As such, this
+ * function needs to be called prior to gloda startup.
+ */
+function createMeIdentity() {
+ let identity = MailServices.accounts.createIdentity;
+ identity.email = "me@localhost";
+ identity.fullName = "Me";
+}
+// And run it now.
+createMeIdentity();
+
+// Set the gloda prefs.
+// "yes" to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// "no" to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+var ENVIRON_MAPPINGS = [
+ {
+ envVar: "GLODA_DATASTORE_EXPLAIN_TO_PATH",
+ prefName: "mailnews.database.global.datastore.explainToPath",
+ },
+];
+
+// Propagate environment variables to prefs as appropriate:
+for (let { envVar, prefName } of ENVIRON_MAPPINGS) {
+ if (Services.env.exists(envVar)) {
+ Services.prefs.setCharPref(prefName, Services.env.get(envVar));
+ }
+}
+
+/**
+ * Side note:
+ * Keep them in the global scope so that a Cu.forceGC() call won't purge them.
+ */
+var collectionListener;
+
+/**
+ * Registers MessageInjection listeners and Gloda listeners for our tests.
+ *
+ * @param {MessageInjection} messageInjection Instance of MessageInjection
+ * to register Events to.
+ */
+function glodaTestHelperInitialize(messageInjection) {
+ // Initialize the message state if we are dealing with messages. At some
+ // point we probably want to just completely generalize the indexing state.
+ // That point is likely when our testing infrastructure needs the support
+ // provided by `indexMessageState` for things other than messages.
+ indexMessageState = new IndexMessageState();
+
+ collectionListener = new GlodaCollectionListener();
+ new TestAttributeProvider();
+ new MsgsClassifiedListener();
+
+ // Add a hook that makes folders not filthy when we first see them.
+ messageInjection.registerMessageInjectionListener({
+ /**
+ * By default all folders start out filthy. This is great in the real world
+ * but I went and wrote all the unit tests without entirely thinking about
+ * how this affected said unit tests. So we add a listener so that we can
+ * force the folders to be clean.
+ * This is okay and safe because messageInjection always creates the folders
+ * without any messages in them.
+ */
+ onRealFolderCreated(aRealFolder) {
+ log.debug(
+ `onRealFolderCreated through MessageInjection received. ` +
+ `Make folder: ${aRealFolder.name} clean for Gloda.`
+ );
+ let glodaFolder = Gloda.getFolderForFolder(aRealFolder);
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+ },
+
+ /**
+ * Make waitForGlodaIndexer know that it should wait for a msgsClassified
+ * event whenever messages have been injected, at least if event-driven
+ * indexing is enabled.
+ */
+ onInjectingMessages() {
+ log.debug(
+ "onInjectingMessages through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+
+ /**
+ * This basically translates to "we are triggering an IMAP move" and has
+ * the ramification that we should expect a msgsClassified event because
+ * the destination will see the header get added at some point.
+ */
+ onMovingMessagesWithoutDestHeaders() {
+ log.debug(
+ "onMovingMessagesWithoutDestHeaders through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+ });
+ log.debug("glodaTestHelperInitialize finished.");
+}
+
+class IndexMessageState {
+ data = new GlodaIndexerData();
+
+ constructor() {
+ prepareIndexerForTesting();
+ // Continue the preparing by assigning the hook recover and hook cleanup.
+ GlodaIndexer._unitTestHookRecover = this._testHookRecover;
+ GlodaIndexer._unitTestHookCleanup = this._testHookCleanup;
+ }
+
+ resetData() {
+ this.data = new GlodaIndexerData();
+ }
+
+ // The synthetic message sets passed in to |assertExpectedMessagesIndexed|.
+ synMessageSets = [];
+ // The user-specified accumulate-style verification function.
+ verifier() {
+ return this.data.data.verifier;
+ }
+ // Should we augment the synthetic sets with gloda message info?
+ augmentSynSets() {
+ return this.data.data.augment;
+ }
+ deletionSynSets() {
+ return this.data.data.deleted;
+ }
+
+ // Expected value of |_workerRecoveredCount| at assertion time.
+ expectedWorkerRecoveredCount() {
+ return this.data.data.recovered;
+ }
+ // Expected value of |_workerFailedToRecoverCount| at assertion time.
+ expectedFailedToRecoverCount() {
+ return this.data.data.failedToRecover;
+ }
+ // Expected value of |_workerCleanedUpCount| at assertion time.
+ expectedCleanedUpCount() {
+ return this.data.data.cleanedUp;
+ }
+ // Expected value of |_workerHadNoCleanUpCount| at assertion time.
+ expectedHadNoCleanUpCount() {
+ return this.data.data.hadNoCleanUp;
+ }
+ /**
+ * The number of messages that were fully (re)indexed using
+ * Gloda.grokNounItem.
+ */
+ _numFullIndexed = 0;
+ // Expected value of |_numFullIndexed| at assertion time.
+ expectedNumFullIndexed() {
+ return this.data.data.fullyIndexed;
+ }
+
+ // The number of times a worker had a recover helper and it recovered.
+ _workerRecoveredCount = 0;
+ // The number of times a worker had a recover helper and it did not recover.
+ _workerFailedToRecoverCount = 0;
+ // The number of times a worker had a cleanup helper and it cleaned up.
+ _workerCleanedUpCount = 0;
+ // The number of times a worker had no cleanup helper but there was a cleanup.
+ _workerHadNoCleanUpCount = 0;
+
+ /**
+ * Beware this scoping for this class is lost where _testHookRecover is used.
+ *
+ * @param aRecoverResult
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookRecover(aRecoverResult, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer recovery hook fired" +
+ "\nrecover result:\n" +
+ aRecoverResult +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle:\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aRecoverResult) {
+ indexMessageState._workerRecoveredCount++;
+ } else {
+ indexMessageState._workerFailedToRecoverCount++;
+ }
+ }
+
+ /**
+ * Beware this scoping for this class is lost where _testHookCleanup is used.
+ *
+ * @param aHadCleanupFunc
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookCleanup(aHadCleanupFunc, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer cleanup hook fired" +
+ "\nhad cleanup?\n" +
+ aHadCleanupFunc +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aHadCleanupFunc) {
+ indexMessageState._workerCleanedUpCount++;
+ } else {
+ indexMessageState._workerHadNoCleanUpCount++;
+ }
+ }
+ _jsonifyCallbackHandleState(aCallbackHandle) {
+ return {
+ _stringRep: aCallbackHandle.activeStack.length + " active generators",
+ activeStackLength: aCallbackHandle.activeStack.length,
+ contextStack: aCallbackHandle.contextStack,
+ };
+ }
+
+ /**
+ * The gloda messages indexed since the last call to |waitForGlodaIndexer|.
+ */
+ _glodaMessagesByMessageId = [];
+ _glodaDeletionsByMessageId = [];
+
+ _numItemsAdded = 0;
+
+ applyGlodaIndexerData(data) {
+ this.data.applyData(data);
+ }
+
+ /**
+ * A list of events that we need to see before we allow ourselves to perform
+ * the indexer check. For example, if "msgsClassified" is in here, it means
+ * that whether the indexer is active or not is irrelevant until we have
+ * seen that msgsClassified event.
+ */
+ interestingEvents = [];
+}
+
+function prepareIndexerForTesting() {
+ if (!GlodaIndexer.enabled) {
+ throw new Error(
+ "The gloda indexer is somehow not enabled. This is problematic."
+ );
+ }
+ // Make the indexer be more verbose about indexing for us.
+ GlodaIndexer._unitTestSuperVerbose = true;
+ GlodaMsgIndexer._unitTestSuperVerbose = true;
+ // Lobotomize the adaptive indexer.
+ // The indexer doesn't need to worry about load; zero his rescheduling time.
+ GlodaIndexer._INDEX_INTERVAL = 0;
+ // The indexer already registered for the idle service; we must remove this
+ // or "idle" notifications will still get sent via the observer mechanism.
+ let realIdleService = GlodaIndexer._idleService;
+ realIdleService.removeIdleObserver(
+ GlodaIndexer,
+ GlodaIndexer._indexIdleThresholdSecs
+ );
+ // Pretend we are always idle.
+ GlodaIndexer._idleService = {
+ idleTime: 1000,
+ addIdleObserver() {
+ // There is no actual need to register with the idle observer, and if
+ // we do, the stupid "idle" notification will trigger commits.
+ },
+ removeIdleObserver() {},
+ };
+ // We want the event-driven indexer to always handle indexing and never spill
+ // to an indexing sweep unless a test intentionally does so.
+ GlodaIndexer._indexMaxEventQueueMessages = 10000;
+ // Lobotomize the adaptive indexer's constants.
+ GlodaIndexer._cpuTargetIndexTime = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_ACTIVE = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_IDLE = 10000000;
+ GlodaIndexer._CPU_IS_BUSY_TIME = 10000000;
+ GlodaIndexer._PAUSE_LATE_IS_BUSY_TIME = 10000000;
+
+ delete GlodaIndexer._indexTokens;
+ GlodaIndexer.__defineGetter__("_indexTokens", function () {
+ return GlodaIndexer._CPU_MAX_TOKENS_PER_BATCH;
+ });
+ GlodaIndexer.__defineSetter__("_indexTokens", function () {});
+
+ // This includes making commits only happen when we the unit tests explicitly
+ // tell them to.
+ GlodaIndexer._MINIMUM_COMMIT_TIME = 10000000;
+ GlodaIndexer._MAXIMUM_COMMIT_TIME = 10000000;
+}
+
+class GlodaIndexerData {
+ data = {
+ verifier: null,
+ augment: false,
+ deleted: [],
+ fullyIndexed: null,
+
+ // Things should not be recovering or failing and cleaning up unless the test
+ // is expecting it.
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ };
+
+ /**
+ * Applies data shallow.
+ * Only the first level of keys are applied and replaced complete
+ * if given via param data. No deep merge.
+ *
+ * @param {*} data
+ */
+ applyData(data) {
+ this.data = {
+ ...this.data,
+ ...data,
+ };
+ }
+}
+
+/**
+ * Note that if the indexer is not currently active we assume it has already
+ * completed; we do not entertain the possibility that it has not yet started.
+ * Since the indexer is 'active' as soon as it sees an event, this does mean
+ * that you need to wait to make sure the indexing event has happened before
+ * calling us. This is reasonable.
+ */
+async function waitForGlodaIndexer() {
+ let eventsPending = TestUtils.waitForCondition(() => {
+ if (indexMessageState.interestingEvents.length > 1) {
+ // Events still pending. See msgClassified event and
+ // messageInjection.registerMessageInjectionListener.
+ return false;
+ }
+ // Events finished.
+ return true;
+ });
+ let indexerRunning = TestUtils.waitForCondition(() => {
+ if (GlodaIndexer.indexing) {
+ // Still indexing.
+ return false;
+ }
+ // Indexing finished.
+ return true;
+ });
+
+ log.debug(
+ "waitForGlodaIndexer waiting for intrestingEvents and GlodaIndexer.indexing."
+ );
+
+ // If we are waiting on certain events to occur first, block on those.
+ await Promise.all([eventsPending, indexerRunning]);
+}
+
+/**
+ * Each time a msgClassified Event is fired and it is present
+ * in IndexMessageState.interestingEvents it will be removed.
+ */
+class MsgsClassifiedListener {
+ /**
+ * Events pending for the tests.
+ * (we want this to happen after gloda registers its own listener, and it
+ * does.)
+ */
+ constructor() {
+ MailServices.mfn.addListener(
+ this,
+ Ci.nsIMsgFolderNotificationService.msgsClassified
+ );
+ }
+ /**
+ * If this was an expected interesting event, remove it from the list.
+ * If an event happens that we did not expect, it does not matter. We know
+ * this because we add events we care about to interestingEvents before they
+ * can possibly be fired.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ log.debug("MsgsClassifiedListener msgsClassified received.");
+ let idx = indexMessageState.interestingEvents.indexOf("msgsClassified");
+ if (idx != -1) {
+ log.debug("Remove intrestingEvent through msgsClassified.");
+ // Remove the interesting Event as we received it here.
+ indexMessageState.interestingEvents.splice(idx, 1);
+ }
+ }
+}
+
+/**
+ * This AttributeProvider helps us testing Gloda.
+ * With the `process` method the Collections will be noticed
+ * through listeners.
+ * (onItemsAdded, onItemsModified, onItemsRemoved, onQueryComplete)
+ */
+class TestAttributeProvider {
+ providerName = "glodaTestHelper:fakeProvider";
+ constructor() {
+ // Register us with gloda as an attribute provider so that we can
+ // distinguish between fully reindexed messages and fastpath indexed
+ // messages.
+ Gloda._attrProviderOrderByNoun[GlodaConstants.NOUN_MESSAGE].push({
+ providerName: this.providerName,
+ process: this.process,
+ });
+ }
+ /**
+ * Fake attribute provider processing function so we can distinguish
+ * between fully reindexed messages and fast-path modified messages.
+ * Process has to be invoked for the GlodaCollectionListener
+ */
+ *process(aItem, aRawReps, aIsConceptuallyNew, aCallbackHandle) {
+ indexMessageState._numFullIndexed++;
+
+ yield GlodaConstants.kWorkDone;
+ }
+}
+
+/**
+ * This class tracks a GlodaCollection (created by Gloda._wildcardCollection).
+ * The listeners for this collection which will notify our IndexMessageState
+ * are defined here.
+ */
+class GlodaCollectionListener {
+ // Our catch-all message collection that nets us all messages passing by.
+ catchAllCollection = null;
+ constructor() {
+ this.catchAllCollection = Gloda._wildcardCollection(
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.catchAllCollection.listener = this;
+ }
+ /*
+ * Our catch-all collection listener. Any time a new message gets indexed,
+ * we should receive an onItemsAdded call. Any time an existing message
+ * gets reindexed, we should receive an onItemsModified call. Any time an
+ * existing message actually gets purged from the system, we should receive
+ * an onItemsRemoved call.
+ */
+ onItemsAdded(aItems) {
+ log.debug("GlodaCollectionListener onItemsAdded received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item.folderMessage +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+
+ // Simulate some other activity clearing out the the current folder's
+ // cached database, which used to kill the indexer's enumerator.
+ if (++indexMessageState._numItemsAdded == 3) {
+ log.debug("GlodaCollectionListener simulate other activity.");
+ GlodaMsgIndexer._indexingFolder.msgDatabase = null;
+ }
+ }
+
+ onItemsModified(aItems) {
+ log.debug("GlodaCollectionListener onItemsModified received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+ }
+
+ onItemsRemoved(aItems) {
+ log.debug("GlodaCollectionListener onItemsRemoved received.");
+ for (let item of aItems) {
+ if (
+ item.headerMessageID in indexMessageState._glodaDeletionsByMessageId
+ ) {
+ throw new Error(
+ "Gloda message " +
+ item +
+ "already deleted once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaDeletionsByMessageId."
+ );
+ indexMessageState._glodaDeletionsByMessageId[item.headerMessageID] = item;
+ }
+ }
+ onQueryComplete(aCollection) {
+ log.debug(
+ "GlodaCollectionListener onQueryComplete received. Nothing done."
+ );
+ }
+}
+
+/**
+ * Assert that the set of messages indexed is exactly the set passed in.
+ * If a verification function is provided, use it on a per-message basis
+ * to make sure the resulting gloda message looks like it should given the
+ * synthetic message.
+ *
+ * Throws Errors if something is not according and returns always [true, string]
+ * for `Assert.ok` in your tests. This ensures proper testing output.
+ *
+ * @param {SyntheticMessage[]} aSynMessageSets A list of SyntheticMessageSets
+ * containing exactly the messages we should expect to see.
+ * @param [aConfig.verifier] The function to call to verify that the indexing
+ * had the desired result. Takes arguments aSynthMessage (the synthetic
+ * message just indexed), aGlodaMessage (the gloda message representation of
+ * the indexed message), and aPreviousResult (the value last returned by the
+ * verifier function for this given set of messages, or undefined if it is
+ * the first message.)
+ * @param [aConfig.augment=false] Should we augment the synthetic message sets
+ * with references to their corresponding gloda messages? The messages
+ * will show up in a 'glodaMessages' list on the syn set.
+ * @param {SyntheticMessageSet[]} [aConfig.deleted] A list of SyntheticMessageSets
+ * containing messages that should be recognized as deleted by the gloda
+ * indexer in this pass.
+ * @param [aConfig.fullyIndexed] A count of the number of messages we expect
+ * to observe being fully indexed. This is relevant because in the case
+ * of message moves, gloda may generate an onItemsModified notification but
+ * not reindex the message. This attribute allows the tests to distinguish
+ * between the two cases.
+ * @returns {[true, string]}
+ */
+function assertExpectedMessagesIndexed(aSynMessageSets, aConfig) {
+ indexMessageState.synMessageSets = aSynMessageSets;
+
+ indexMessageState.applyGlodaIndexerData(aConfig);
+
+ // Check that we have a gloda message for every syn message and verify.
+ for (let msgSet of indexMessageState.synMessageSets) {
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages = [];
+ }
+ for (let [iSynMsg, synMsg] of msgSet.synMessages.entries()) {
+ if (!(synMsg.messageId in indexMessageState._glodaMessagesByMessageId)) {
+ let msgHdr = msgSet.getMsgHdr(iSynMsg);
+ throw new Error(
+ "Header " +
+ msgHdr.messageId +
+ " in folder: " +
+ (msgHdr ? msgHdr.folder.name : "no header?") +
+ " should have been indexed."
+ );
+ }
+
+ let glodaMsg =
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId];
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages.push(glodaMsg);
+ }
+
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId] = null;
+
+ let verifier = indexMessageState.verifier();
+ let previousValue = undefined;
+ if (verifier) {
+ try {
+ // Looking if a previous value have been present.
+ previousValue = verifier(synMsg, glodaMsg, previousValue);
+ } catch (ex) {
+ throw new Error(
+ "Verification failure: " +
+ synMsg +
+ " is not close enough to " +
+ glodaMsg +
+ "; basing this on exception: " +
+ ex
+ );
+ }
+ }
+ }
+ }
+
+ // Check that we don't have any extra gloda messages. (lacking syn msgs)
+ for (let messageId in indexMessageState._glodaMessagesByMessageId) {
+ let glodaMsg = indexMessageState._glodaMessagesByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message:\n" +
+ glodaMsg +
+ "\nShould not have been indexed.\n" +
+ "Source header:\n" +
+ glodaMsg.folderMessage
+ );
+ }
+ }
+
+ if (indexMessageState.deletionSynSets()) {
+ for (let msgSet of indexMessageState.deletionSynSets()) {
+ for (let synMsg of msgSet.synMessages) {
+ if (
+ !(synMsg.messageId in indexMessageState._glodaDeletionsByMessageId)
+ ) {
+ throw new Error(
+ "Synthetic message " + synMsg + " did not get deleted!"
+ );
+ }
+
+ indexMessageState._glodaDeletionsByMessageId[synMsg.messageId] = null;
+ }
+ }
+ }
+
+ // Check that we don't have unexpected deletions.
+ for (let messageId in indexMessageState._glodaDeletionsByMessageId) {
+ let glodaMsg = indexMessageState._glodaDeletionsByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message with message id " +
+ messageId +
+ " was " +
+ "unexpectedly deleted!"
+ );
+ }
+ }
+
+ if (
+ indexMessageState.expectedWorkerRecoveredCount() != null &&
+ indexMessageState.expectedWorkerRecoveredCount() !=
+ indexMessageState._workerRecoveredCount
+ ) {
+ throw new Error(
+ "Expected worker-recovered count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedWorkerRecoveredCount() +
+ "\nActual:\n" +
+ indexMessageState._workerRecoveredCount
+ );
+ }
+ if (
+ indexMessageState.expectedFailedToRecoverCount() != null &&
+ indexMessageState.expectedFailedToRecoverCount() !=
+ indexMessageState._workerFailedToRecoverCount
+ ) {
+ throw new Error(
+ "Expected worker-failed-to-recover count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedFailedToRecoverCount() +
+ "\nActual:\n" +
+ indexMessageState._workerFailedToRecoverCount
+ );
+ }
+ if (
+ indexMessageState.expectedCleanedUpCount() != null &&
+ indexMessageState.expectedCleanedUpCount() !=
+ indexMessageState._workerCleanedUpCount
+ ) {
+ throw new Error(
+ "Expected worker-cleaned-up count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedCleanedUpCount() +
+ "\nActual:\n" +
+ indexMessageState._workerCleanedUpCount
+ );
+ }
+ if (
+ indexMessageState.expectedHadNoCleanUpCount() != null &&
+ indexMessageState.expectedHadNoCleanUpCount() !=
+ indexMessageState._workerHadNoCleanUpCount
+ ) {
+ throw new Error(
+ "Expected worker-had-no-cleanup count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedHadNoCleanUpCount() +
+ "\nActual\n" +
+ indexMessageState._workerHadNoCleanUpCount
+ );
+ }
+
+ if (
+ indexMessageState.expectedNumFullIndexed() != null &&
+ indexMessageState.expectedNumFullIndexed() !=
+ indexMessageState._numFullIndexed
+ ) {
+ throw new Error(
+ "Expected number of fully indexed messages did not match.\n" +
+ "Expected:\n" +
+ indexMessageState.expectedNumFullIndexed() +
+ "\nActual:\n" +
+ indexMessageState._numFullIndexed
+ );
+ }
+
+ // Cleanup of internal tracking values in the IndexMessageState
+ // for new tests.
+ resetIndexMessageState();
+
+ // If no error has been thrown till here were fine!
+ // Return values for Assert.ok.
+ // Using like Assert.ok(...assertExpectedMessagesIndexed()).
+ return [true, "Expected messages were indexed."];
+}
+
+/**
+ * Resets the IndexMessageState
+ *
+ * @TODO more docs
+ */
+function resetIndexMessageState() {
+ indexMessageState.synMessageSets = [];
+ indexMessageState._glodaMessagesByMessageId = [];
+ indexMessageState._glodaDeletionsByMessageId = [];
+
+ indexMessageState._workerRecoveredCount = 0;
+ indexMessageState._workerFailedToRecoverCount = 0;
+ indexMessageState._workerCleanedUpCount = 0;
+ indexMessageState._workerHadNoCleanUpCount = 0;
+
+ indexMessageState._numFullIndexed = 0;
+ indexMessageState.resetData();
+}
+
+/**
+ * Wipe out almost everything from the clutches of the GlodaCollectionManager.
+ * By default, it is caching things and knows about all the non-GC'ed
+ * collections. Tests may want to ensure that their data is loaded from disk
+ * rather than relying on the cache, and so, we exist.
+ * The exception to everything is that Gloda's concept of myContact and
+ * myIdentities needs to have its collections still be reachable or invariants
+ * are in danger of being "de-invarianted".
+ * The other exception to everything are any catch-all-collections used by our
+ * testing/indexing process. We don't scan for them, we just hard-code their
+ * addition if they exist.
+ */
+function nukeGlodaCachesAndCollections() {
+ // Explode if the GlodaCollectionManager somehow doesn't work like we think it
+ // should. (I am reluctant to put this logic in there, especially because
+ // knowledge of the Gloda contact/identity collections simply can't be known
+ // by the colleciton manager.)
+ if (
+ GlodaCollectionManager._collectionsByNoun === undefined ||
+ GlodaCollectionManager._cachesByNoun === undefined
+ ) {
+ // We don't check the Gloda contact/identities things because they might not
+ // get initialized if there are no identities, which is the case for our
+ // unit tests right now...
+ throw new Error(
+ "Try and remember to update the testing infrastructure when you " +
+ "change things!"
+ );
+ }
+
+ // We can just blow away the known collections.
+ GlodaCollectionManager._collectionsByNoun = {};
+ // But then we have to put the myContact / myIdentities junk back.
+ if (Gloda._myContactCollection) {
+ GlodaCollectionManager.registerCollection(Gloda._myContactCollection);
+ GlodaCollectionManager.registerCollection(Gloda._myIdentitiesCollection);
+ }
+ // Don't forget our testing catch-all collection.
+ if (collectionListener.catchAllCollection) {
+ // Empty it out in case it has anything in it.
+ collectionListener.catchAllCollection.clear();
+ // And now we can register it.
+ GlodaCollectionManager.registerCollection(
+ collectionListener.catchAllCollection
+ );
+ }
+
+ // Caches aren't intended to be cleared, but we also don't want to lose our
+ // caches, so we need to create new ones from the ashes of the old ones.
+ let oldCaches = GlodaCollectionManager._cachesByNoun;
+ GlodaCollectionManager._cachesByNoun = {};
+ for (let nounId in oldCaches) {
+ let cache = oldCaches[nounId];
+ GlodaCollectionManager.defineCache(cache._nounDef, cache._maxCacheSize);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
new file mode 100644
index 0000000000..f7a5199ba3
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
@@ -0,0 +1,293 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "configureGlodaIndexing",
+ "waitForGlodaDBFlush",
+ "waitForIndexingHang",
+ "resumeFromSimulatedHang",
+ "permuteMessages",
+ "makeABCardForAddressPair",
+];
+
+/*
+ * This file provides gloda testing infrastructure functions which are not coupled
+ * with the IndexMessageState from GlodaTestHelper.jsm
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.helperFunctions",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Resume execution when the db has run all the async statements whose execution
+ * was queued prior to this call. We trigger a commit to accomplish this,
+ * although this could also be accomplished without a commit. (Though we would
+ * have to reach into GlodaDatastore.jsm and get at the raw connection or extend
+ * datastore to provide a way to accomplish this.)
+ */
+async function waitForGlodaDBFlush() {
+ // We already have a mechanism to do this by forcing a commit. Arguably,
+ // it would be better to use a mechanism that does not induce an fsync.
+ var savedDepth = GlodaDatastore._transactionDepth;
+ if (!savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ GlodaDatastore.runPostCommit(promiseResolve);
+ // We don't actually need to run things to zero. We can just wait for the
+ // outer transaction to close itself.
+ GlodaDatastore._commitTransaction();
+ if (savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+ await promise;
+}
+
+/**
+ * An injected fault exception.
+ */
+function InjectedFault(aWhy) {
+ this.message = aWhy;
+}
+InjectedFault.prototype = {
+ toString() {
+ return "[InjectedFault: " + this.message + "]";
+ },
+};
+
+function _inject_failure_on_MsgHdrToMimeMessage() {
+ throw new InjectedFault("MsgHdrToMimeMessage");
+}
+
+let hangResolve;
+let hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+});
+
+function _simulate_hang_on_MsgHdrToMimeMessage(...aArgs) {
+ hangResolve([MsgHdrToMimeMessage, null, aArgs]);
+}
+
+/**
+ * If you have configured gloda to hang while indexing, this is the thing
+ * you wait on to make sure the indexer actually gets to the point where it
+ * hangs.
+ */
+async function waitForIndexingHang() {
+ await hangPromise;
+}
+
+/**
+ * Configure gloda indexing. For most settings, the settings get clobbered by
+ * the next time this method is called. Omitted settings reset to the defaults.
+ * However, anything labeled as a 'sticky' setting stays that way until
+ * explicitly changed.
+ *
+ * @param {boolean} [aArgs.event=true] Should event-driven indexing be enabled
+ * (true) or disabled (false)? Right now, this actually suppresses
+ * indexing... the semantics will be ironed out as-needed.
+ * @param [aArgs.hangWhile] Must be either omitted (for don't force a hang) or
+ * "streaming" indicating that we should do a no-op instead of performing
+ * the message streaming. This will manifest as a hang until
+ * |resumeFromSimulatedHang| is invoked or the test explicitly causes the
+ * indexer to abort (in which case you do not need to call the resume
+ * function.) You must omit injectFaultIn if you use hangWhile.
+ * @param [aArgs.injectFaultIn=null] Must be omitted (for don't inject a
+ * failure) or "streaming" indicating that we should inject a failure when
+ * the message indexer attempts to stream a message. The fault will be an
+ * appropriate exception. You must omit hangWhile if you use injectFaultIn.
+ */
+function configureGlodaIndexing(aArgs) {
+ let shouldSuppress = "event" in aArgs ? !aArgs.event : false;
+ if (shouldSuppress != GlodaIndexer.suppressIndexing) {
+ log.debug(`Setting suppress indexing to ${shouldSuppress}.`);
+ GlodaIndexer.suppressIndexing = shouldSuppress;
+ }
+
+ if ("hangWhile" in aArgs) {
+ log.debug(`Enabling hang injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.hangWhile) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _simulate_hang_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.hangWhile + " is not a legal choice for hangWhile"
+ );
+ }
+ } else if ("injectFaultIn" in aArgs) {
+ log.debug(`Enabling fault injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.injectFaultIn) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _inject_failure_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.injectFaultIn + " is not a legal choice for injectFaultIn"
+ );
+ }
+ } else {
+ if (GlodaMsgIndexer._MsgHdrToMimeMessageFunc != MsgHdrToMimeMessage) {
+ log.debug("Clearing hang/fault injection.");
+ }
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc = MsgHdrToMimeMessage;
+ }
+}
+
+/**
+ * Call this to resume from the hang induced by configuring the indexer with
+ * a "hangWhile" argument to |configureGlodaIndexing|.
+ *
+ * @param [aJustResumeExecution=false] Should we just poke the callback driver
+ * for the indexer rather than continuing the call. You would likely want
+ * to do this if you committed a lot of violence while in the simulated
+ * hang and proper resumption would throw exceptions all over the place.
+ * (For example; if you hang before streaming and destroy the message
+ * header while suspended, resuming the attempt to stream will throw.)
+ */
+async function resumeFromSimulatedHang(aJustResumeExecution) {
+ if (aJustResumeExecution) {
+ log.debug("Resuming from simulated hang with direct wrapper callback.");
+ GlodaIndexer._wrapCallbackDriver();
+ } else {
+ let [func, dis, args] = await hangPromise;
+ log.debug(`Resuming from simulated hang with call to: ${func.name}.`);
+ func.apply(dis, args);
+ }
+ // Reset the promise for the hang.
+ hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+ });
+}
+
+/**
+ * Prepares permutations for messages with aScenarioMaker. Be sure to wait for the indexer
+ * for every permutation and verify the result.
+ *
+ * This process is executed once for each possible permutation of observation
+ * of the synthetic messages. (Well, we cap it; brute-force test your logic
+ * on your own time; you should really only be feeding us minimal scenarios.)
+ *
+ * @param aScenarioMaker A function that, when called, will generate a series
+ * of SyntheticMessage instances. Each call to this method should generate
+ * a new set of conceptually equivalent, but not identical, messages. This
+ * allows us to process without having to reset our state back to nothing each
+ * time. (This is more to try and make sure we run the system with a 'dirty'
+ * state than a bid for efficiency.)
+ * @param {MessageInjection} messageInjection An instance to use for permuting
+ * the messages and creating folders.
+ *
+ * @returns {[async () => SyntheticMessageSet]} Await it sequentially with a for...of loop.
+ * Wait for each element for the Indexer and assert afterwards.
+ */
+async function permuteMessages(aScenarioMaker, messageInjection) {
+ let folder = await messageInjection.makeEmptyFolder();
+
+ // To calculate the permutations, we need to actually see what gets produced.
+ let scenarioMessages = aScenarioMaker();
+ let numPermutations = Math.min(factorial(scenarioMessages.length), 32);
+
+ let permutations = [];
+ for (let iPermutation = 0; iPermutation < numPermutations; iPermutation++) {
+ permutations.push(async () => {
+ log.debug(`Run permutation: ${iPermutation + 1} / ${numPermutations}`);
+ // If this is not the first time through, we need to create a new set.
+ if (iPermutation) {
+ scenarioMessages = aScenarioMaker();
+ }
+ scenarioMessages = permute(scenarioMessages, iPermutation);
+ let scenarioSet = new SyntheticMessageSet(scenarioMessages);
+ await messageInjection.addSetsToFolders([folder], [scenarioSet]);
+ return scenarioSet;
+ });
+ }
+ return permutations;
+}
+
+/**
+ * A simple factorial function used to calculate the number of permutations
+ * possible for a given set of messages.
+ */
+function factorial(i, rv) {
+ if (i <= 1) {
+ return rv || 1;
+ }
+ return factorial(i - 1, (rv || 1) * i); // tail-call capable
+}
+
+/**
+ * Permute an array given a 'permutation id' that is an integer that fully
+ * characterizes the permutation through the decisions that need to be made
+ * at each step.
+ *
+ * @param aArray Source array that is destructively processed.
+ * @param aPermutationId The permutation id. A permutation id of 0 results in
+ * the original array's sequence being maintained.
+ */
+function permute(aArray, aPermutationId) {
+ let out = [];
+ for (let i = aArray.length; i > 0; i--) {
+ let offset = aPermutationId % i;
+ out.push(aArray[offset]);
+ aArray.splice(offset, 1);
+ aPermutationId = Math.floor(aPermutationId / i);
+ }
+ return out;
+}
+
+/**
+ * Add a name-and-address pair as generated by `makeNameAndAddress` to the
+ * personal address book.
+ */
+function makeABCardForAddressPair(nameAndAddress) {
+ // XXX bug 314448 demands that we trigger creation of the ABs... If we don't
+ // do this, then the call to addCard will fail if someone else hasn't tickled
+ // this.
+ MailServices.ab.directories;
+
+ // kPABData is copied from abSetup.js
+ let kPABData = {
+ URI: "jsaddrbook://abook.sqlite",
+ };
+ let addressBook = MailServices.ab.getDirectory(kPABData.URI);
+
+ let card = Cc["@mozilla.org/addressbook/cardproperty;1"].createInstance(
+ Ci.nsIAbCard
+ );
+ card.displayName = nameAndAddress[0];
+ card.primaryEmail = nameAndAddress[1];
+
+ // Just save the new node straight away.
+ addressBook.addCard(card);
+
+ log.debug(`Adding address book card for: ${nameAndAddress}`);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
new file mode 100644
index 0000000000..ff186e871a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
@@ -0,0 +1,86 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test does not use glodaTestHelper because:
+ * 1) We need to do things as part of the test without gloda having remotely
+ * thought about opening the database.
+ * 2) We expect and desire that the logger produce a warning and glodaTestHelper
+ * takes the view that warnings = death.
+ *
+ * We do use the rest of the test infrastructure though.
+ */
+
+// -- Do configure the gloda prefs though...
+// Yes to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// No to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+// We'll start with this datastore ID, and make sure it gets overwritten
+// when the index is rebuilt.
+var kDatastoreIDPref = "mailnews.database.global.datastore.id";
+var kOriginalDatastoreID = "47e4bad6-fedc-4931-bf3f-d2f4146ac63e";
+Services.prefs.setCharPref(kDatastoreIDPref, kOriginalDatastoreID);
+
+/**
+ * Create an illegal=corrupt database and make sure that we log a message and
+ * still end up happy.
+ */
+add_task(function test_corrupt_databases_get_reported_and_blown_away() {
+ // - Get the file path.
+ let dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // - Protect dangerous people from themselves.
+ // (There should not be a database at this point; if there is one, we are
+ // not in the sandbox profile we expect. I wouldn't bother except we're
+ // going out of our way to write gibberish whereas gloda accidentally
+ // opening a valid database is bad but not horrible.)
+ if (dbFile.exists()) {
+ do_throw("There should not be a database at this point.");
+ }
+
+ // - Create the file.
+ dump("Creating gibberish file\n");
+ let ostream = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(
+ Ci.nsIFileOutputStream
+ );
+ ostream.init(dbFile, -1, -1, 0);
+ let fileContents = "I'm in ur database not being a database.\n";
+ ostream.write(fileContents, fileContents.length);
+ ostream.close();
+
+ // - Init gloda, get warnings.
+ dump("Init gloda\n");
+ var { Gloda } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+ );
+ dump("Gloda inited, checking\n");
+
+ // - Make sure the datastore has an actual database.
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ // Make sure that the datastoreID was overwritten
+ Assert.notEqual(Gloda.datastoreID, kOriginalDatastoreID);
+ // And for good measure, make sure that the pref was also overwritten
+ let currentDatastoreID = Services.prefs.getCharPref(kDatastoreIDPref);
+ Assert.notEqual(currentDatastoreID, kOriginalDatastoreID);
+ // We'll also ensure that the Gloda.datastoreID matches the one stashed
+ // in prefs...
+ Assert.equal(currentDatastoreID, Gloda.datastoreID);
+ // And finally, we'll make sure that the datastoreID is a string with length
+ // greater than 0.
+ Assert.equal(typeof Gloda.datastoreID, "string");
+ Assert.ok(Gloda.datastoreID.length > 0);
+
+ if (!GlodaDatastore.asyncConnection) {
+ do_throw("No database connection suggests no database!");
+ }
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_folder_logic.js b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
new file mode 100644
index 0000000000..6625258daa
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
@@ -0,0 +1,60 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the gloda folder logic.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Tests in this file assume that returned folders are nsIMsgFolders and not
+ // handles which currently only local injection supports.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Newly created folders should not be filthy (at least as long as they have
+ * nothing in them.)
+ */
+add_task(async function test_newly_created_folders_start_clean() {
+ let msgFolder = await messageInjection.makeEmptyFolder();
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderClean);
+});
+
+/**
+ * Deleted folders should not leave behind any mapping, and that mapping
+ * definitely should not interfere with a newly created folder of the same
+ * name.
+ */
+add_task(async function test_deleted_folder_tombstones_get_forgotten() {
+ let oldFolder = await messageInjection.makeEmptyFolder("volver");
+ let oldGlodaFolder = Gloda.getFolderForFolder(oldFolder);
+ messageInjection.deleteFolder(oldFolder);
+
+ // The tombstone needs to know it is deleted.
+ Assert.ok(oldGlodaFolder._deleted);
+
+ let newFolder = await messageInjection.makeEmptyFolder("volver");
+ let newGlodaFolder = Gloda.getFolderForFolder(newFolder);
+
+ // This folder better not be the same and better not think it is deleted.
+ Assert.notEqual(oldGlodaFolder, newGlodaFolder);
+ Assert.ok(!newGlodaFolder._deleted);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
new file mode 100644
index 0000000000..d938208c9b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
@@ -0,0 +1,299 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test file recycles part of test_intl.js. What we do is insert into the
+ * fulltext index two messages:
+ * - one has tokens 'aa' and 'bbb',
+ * - one is from a previous test and has CJK characters in it.
+ *
+ * We want to test that the behavior of the tokenizer is as expected (namely,
+ * that it drops two-letter tokens unless they're CJK bigrams), and that
+ * GlodaMsgSearcher.jsm properly drops two-letter tokens (unless CJK) from the search
+ * terms to avoid issuing a query that will definitely return no results.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaFolder } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* ===== Tests ===== */
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ ],
+ },
+ // -- Regular case. Make sure two-letter tokens do not match, since the
+ // tokenizer is supposed to drop them. Also make sure that a three-letter
+ // token matches.
+ {
+ name: "Boring ASCII",
+ actual: "aa bbb",
+ encodings: {
+ "utf-8": ["=?utf-8?q?aa_bbb?=", "aa bbb"],
+ },
+ searchPhrases: [
+ { body: "aa", match: false },
+ { body: "bbb", match: true },
+ ],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_cjk() {
+ await indexPhrase(intlPhrases[0]);
+});
+
+add_task(async function test_index_regular() {
+ await indexPhrase(intlPhrases[1]);
+});
+
+/**
+ * - Check that the 'aa' token was never emitted (we don't emit two-letter
+ * tokens unless they're CJK).
+ * - Check that the '\u81ea\u52d5' token was emitted, because it's CJK.
+ * - Check that the 'bbb' token was duly emitted (three letters is more than two
+ * letters so it's tokenized).
+ */
+add_task(async function test_token_count() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'aa'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'bbb'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH '\u81ea\u52d5'"
+ );
+});
+
+add_task(async function test_fulltextsearch_cjk() {
+ await test_fulltextsearch(intlPhrases[0]);
+});
+
+add_task(async function test_fulltextsearch_regular() {
+ await test_fulltextsearch(intlPhrases[1]);
+});
+
+/**
+ * We make sure that the Gloda module that builds the query drops two-letter
+ * tokens, otherwise this would result in an empty search (no matches for
+ * two-letter tokens).
+ */
+add_task(async function test_query_builder() {
+ // aa should be dropped, and we have one message containing the bbb token.
+ await msgSearchExpectCount(1, "aa bbb");
+ // The CJK part should not be dropped, and match message 1; the bbb token
+ // should not be dropped, and match message 2; 0 results returned because no
+ // message has the two tokens in it.
+ await msgSearchExpectCount(0, "\u81ea\u52d5 bbb");
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |test_fulltextsearch|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("Using character set:\n" + charset + "\nActual:\n" + actual + "\n");
+ dump("Subject:\n" + subject + "\nSubject length:\n" + subject.length + "\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name:" +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function test_fulltextsearch(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
+
+/**
+ * Pass a query string to the GlodaMsgSearcher, run the corresponding SQL query,
+ * and check the resulted count is what we want.
+ *
+ * Use like so:
+ * await msgSearchExpectCount(1, "I like cheese");
+ */
+async function msgSearchExpectCount(aCount, aFulltextStr) {
+ // Let the GlodaMsgSearcher build its query
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ let conn = GlodaDatastore.asyncConnection;
+ let query = searcher.buildFulltextQuery();
+
+ // Brace yourself, brutal monkey-patching NOW
+ let sql, args;
+ let oldFunc = GlodaDatastore._queryFromSQLString;
+ GlodaDatastore._queryFromSQLString = function (aSql, aArgs) {
+ sql = aSql;
+ args = aArgs;
+ };
+ query.getCollection();
+ GlodaDatastore._queryFromSQLString = oldFunc;
+
+ // Bind the parameters
+ let stmt = conn.createStatement(sql);
+ for (let [iBinding, bindingValue] of args.entries()) {
+ GlodaDatastore._bindVariant(stmt, iBinding, bindingValue);
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ let i = 0;
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ for (
+ let row = aResultSet.getNextRow();
+ row;
+ row = aResultSet.getNextRow()
+ ) {
+ i++;
+ }
+ },
+
+ handleError(aError) {
+ do_throw(new Error("Error: " + aError.message));
+ },
+
+ handleCompletion(aReason) {
+ if (aReason != Ci.mozIStorageStatementCallback.REASON_FINISHED) {
+ do_throw(new Error("Query canceled or aborted!"));
+ }
+
+ if (i != aCount) {
+ throw new Error(
+ "Didn't get the expected number of rows: got " +
+ i +
+ " expected " +
+ aCount +
+ " SQL: " +
+ sql
+ );
+ }
+ promiseResolve();
+ },
+ });
+ stmt.finalize();
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
new file mode 100644
index 0000000000..3c59de4233
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
@@ -0,0 +1,34 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for IMAP messages that are offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
new file mode 100644
index 0000000000..f02a6750b4
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
new file mode 100644
index 0000000000..9d0b0d4103
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
@@ -0,0 +1,139 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Check that events update identity._hasAddressBookCard correctly.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var EMAIL_ADDRESS = "all.over@the.world.invalid";
+var DISPLAY_NAME = "every day";
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Create an e-mail so the identity can exist.
+ */
+add_setup(async function () {
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, from: [DISPLAY_NAME, EMAIL_ADDRESS] }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Okay, but it knows it has no card because indexing thinks stuff.
+ // So let's flush all caches and create a query that just knows about the
+ // identity.
+ nukeGlodaCachesAndCollections();
+
+ let identQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ identQuery.kind("email");
+ identQuery.value(EMAIL_ADDRESS);
+ await queryExpect(identQuery, [EMAIL_ADDRESS]);
+
+ // Now the identity exists. Make sure it is in cache.
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.notEqual(identity, null);
+
+ // And make sure it has no idea what the current state of the card is.
+ if (identity._hasAddressBookCard !== undefined) {
+ do_throw(
+ "We should have no idea about the state of the ab card, but " +
+ "it's: " +
+ identity._hasAddressBookCard
+ );
+ }
+});
+
+/**
+ * Add a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+/**
+ * Remove the card we added in setup, make sure we update the cached identity
+ * ab card state.
+ */
+add_task(function test_remove_card_cache_indication() {
+ delete_card(EMAIL_ADDRESS);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, false);
+});
+
+/**
+ * Add again a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+function add_card(aEmailAddress, aDisplayName) {
+ Cc["@mozilla.org/addressbook/services/addressCollector;1"]
+ .getService(Ci.nsIAbAddressCollector)
+ .collectSingleAddress(aEmailAddress, aDisplayName, true, true);
+}
+
+function get_card_for_email(aEmailAddress) {
+ for (let book of MailServices.ab.directories) {
+ let card = book.cardForEmailAddress(aEmailAddress);
+ if (card) {
+ return [book, card];
+ }
+ }
+ return [null, null];
+}
+
+function delete_card(aEmailAddress) {
+ let [book, card] = get_card_for_email(aEmailAddress);
+
+ MailServices.ab.getDirectory(book.URI).deleteCards([card]);
+}
+
+function get_cached_gloda_identity_for_email(aEmailAddress) {
+ return GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + aEmailAddress.toLowerCase()
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
new file mode 100644
index 0000000000..5920ac981e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
@@ -0,0 +1,210 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that we fail on bad messages by marking the messages as bad rather than
+ * exploding or something bad like that.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_BAD_MESSAGE_ID = 2;
+
+var illegalMessageTemplates = [
+ // -- Authors
+ {
+ name: "no author",
+ clobberHeaders: {
+ From: "",
+ },
+ },
+ {
+ name: "too many authors (> 1)",
+ clobberHeaders: {
+ From: "Tweedle Dee <dee@example.com>, Tweedle Dum <dum@example.com>",
+ },
+ },
+];
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_illegal_message_no_author() {
+ await illegal_message(illegalMessageTemplates[0]);
+});
+add_task(async function test_illegal_message_too_many_authors() {
+ await illegal_message(illegalMessageTemplates[1]);
+});
+
+/**
+ * A byzantine failure to stream should not sink us. Fake a failure.
+ */
+add_task(async function test_streaming_failure() {
+ configureGlodaIndexing({ injectFaultIn: "streaming" });
+
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1 }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+
+ configureGlodaIndexing({});
+});
+
+/**
+ * If we have one bad message followed by a good message, the good message
+ * should still get indexed. Additionally, if we do a sweep on the folder,
+ * we should not attempt to index the message again.
+ */
+add_task(async function test_recovery_and_no_second_attempts() {
+ let [, goodSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: { From: "" } }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([goodSet], { recovered: 1 }));
+
+ // Index the folder; no messages should get indexed and there should be no
+ // failure things.
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+});
+
+/**
+ * Make sure that we attempt to reindex a dirty bad message and that when we
+ * fail that we clear the dirty bit.
+ */
+add_task(async function test_reindex_on_dirty_clear_dirty_on_fail() {
+ // Inject a new illegal message
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [
+ {
+ count: 1,
+ clobberHeaders: illegalMessageTemplates[0].clobberHeaders,
+ },
+ ]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Mark the message dirty, force the folder to be indexed.
+ let msgHdr = msgSet.getMsgHdr(0);
+ msgHdr.setUint32Property("gloda-dirty", 1);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+
+ // Check again with filthy.
+ msgHdr.setUint32Property("gloda-dirty", 2);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+});
+
+/**
+ * Using exciting templates from |illegalMessageTemplates|, verify that gloda
+ * fails to index them and marks the messages bad.
+ */
+async function illegal_message(aInfo) {
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: aInfo.clobberHeaders }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed.
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_compaction.js b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
new file mode 100644
index 0000000000..7b6923ab61
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
@@ -0,0 +1,395 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that gloda does the right things in terms of compaction. Major cases:
+ *
+ * - Compaction occurs while we are in the process of indexing a folder. We
+ * want to make sure we stop indexing cleanly
+ *
+ * - A folder that we have already indexed gets compacted. We want to make sure
+ * that we update the message keys for all involved. This means verifying
+ * that both the on-disk representations and in-memory representations are
+ * correct.
+ *
+ * - Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the
+ * compaction. (Simulating the user quitting before all compactions have
+ * been processed.)
+ *
+ * - Moves/deletes that happen after a compaction but before we process the
+ * compaction generate a special type of edge case that we need to check.
+ *
+ * There is also a less interesting case:
+ *
+ * - Make sure that the indexer does not try and start indexing a folder that is
+ * in the process of being compacted.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ resumeFromSimulatedHang,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ /*
+ * All the rest of the gloda tests (should) work with maildir, but this test
+ * only works/makes sense with mbox, so force it to always use mbox. This
+ * allows developers to manually change the default to maildir and have the
+ * gloda tests run with that.
+ */
+ Services.prefs.setCharPref(
+ "mail.serverDefaultStoreContractID",
+ "@mozilla.org/msgstore/berkeleystore;1"
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function compaction_indexing_pass_none_pending_commit() {
+ await compaction_indexing_pass({
+ name: "none pending commit",
+ forceCommit: true,
+ });
+});
+add_task(async function compaction_indexing_pass_all_pending_commit() {
+ await compaction_indexing_pass({
+ name: "all pending commit",
+ forceCommit: false,
+ });
+});
+
+/**
+ * Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the compaction.
+ * (Simulating the user quitting before all compactions have been processed.)
+ */
+add_task(async function test_sweep_performs_compaction() {
+ let [[folder], moveSet, staySet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 1 }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet, staySet], { augment: true })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ // Disable event-driven indexing so there is no way the compaction job can
+ // get worked.
+ configureGlodaIndexing({ event: false });
+
+ // Compact.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Make sure the folder is marked compacted.
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.ok(glodaFolder.compacted);
+
+ // Re-enable indexing and fire up an indexing pass.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure the compaction happened.
+ verify_message_keys(staySet);
+});
+
+/**
+ * Make sure that if we compact a folder then move messages out of it and/or
+ * delete messages from it before its compaction pass happens that the
+ * compaction pass properly marks the messages deleted.
+ */
+add_task(
+ async function test_moves_and_deletions_on_compacted_folder_edge_case() {
+ let [[folder], compactMoveSet, moveSet, delSet, staySet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed(
+ [compactMoveSet, moveSet, delSet, staySet],
+ {
+ augment: true,
+ }
+ )
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactMoveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactMoveSet]));
+
+ // Disable indexing because we don't want to process the compaction.
+ configureGlodaIndexing({ event: false });
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Delete
+ // Because of the compaction, the PendingCommitTracker forgot that the message
+ // we are deleting got indexed; we will receive no event.
+ await MessageInjection.deleteMessages(delSet);
+
+ // - Move
+ // Same deal on the move, except that it will try and trigger event-based
+ // indexing in the target folder...
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ // Kill the event-based indexing job of the target; we want the indexing sweep
+ // to see it as a move.
+ dump("killing all indexing jobs\n");
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Indexing pass
+ // Re-enable indexing so we can do a sweep.
+ configureGlodaIndexing({ event: true });
+
+ // This will trigger compaction (per the previous unit test) which should mark
+ // moveSet and delSet as deleted. Then it should happen in to the next
+ // folder and add moveSet again...
+ dump("triggering indexing sweep\n");
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet], {
+ deleted: [moveSet, delSet],
+ })
+ );
+
+ // Sanity check the compaction for giggles.
+ verify_message_keys(staySet);
+ }
+);
+
+/**
+ * Induce a compaction while we are in the middle of indexing. Make sure we
+ * clean up and that the folder ends
+ *
+ * Note that in order for compaction to happen there has to be something for
+ * compaction to do, so our prep involves moving a message to another folder.
+ * (Deletion actually produces more legwork for gloda whereas a local move is
+ * almost entirely free.)
+ */
+add_task(async function test_compaction_interrupting_indexing() {
+ // Create a folder with a message inside.
+ let [[folder], compactionFodderSet] =
+ await messageInjection.makeFoldersWithSets(1, [{ count: 1 }]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Move that message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactionFodderSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [folder],
+ [{ count: 1 }]
+ );
+
+ await waitForIndexingHang();
+
+ // Compact! This should kill the job and because of the compaction; no other
+ // reason should be able to do this.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed.
+ // Also, make sure a cleanup was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { cleanedUp: 1 }));
+});
+
+/**
+ *
+ */
+add_task(async function test_do_not_enter_compacting_folders() {
+ // Turn off indexing.
+ configureGlodaIndexing({ event: false });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Lie and claim we are compacting that folder.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder.compacting = true;
+
+ // Now try and force ourselves to index that folder and its message.
+ // Turn back on indexing.
+ configureGlodaIndexing({ event: true });
+
+ // Verify that the indexer completes without having indexed anything.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Verify that the message keys match between the message headers and the
+ * (augmented on) gloda messages that correspond to the headers.
+ */
+function verify_message_keys(aSynSet) {
+ let iMsg = 0;
+ for (let msgHdr of aSynSet.msgHdrs()) {
+ let glodaMsg = aSynSet.glodaMessages[iMsg++];
+ if (msgHdr.messageKey != glodaMsg.messageKey) {
+ throw new Error(
+ "Message header " +
+ msgHdr +
+ " should have message key " +
+ msgHdr.messageKey +
+ " but has key " +
+ glodaMsg.messageKey +
+ " per gloda msg " +
+ glodaMsg
+ );
+ }
+ }
+ dump("verified message keys after compaction\n");
+}
+
+/**
+ * Compact a folder that we were not indexing. Make sure gloda's representations
+ * get updated to the new message keys.
+ *
+ * This is parameterized because the logic has special cases to deal with
+ * messages that were pending commit that got blown away.
+ */
+async function compaction_indexing_pass(aParam) {
+ // Create 5 messages. We will move just the third message so the first two
+ // message keep their keys and the last two change. (We want 2 for both
+ // cases to avoid edge cases.)
+ let [[folder], sameSet, moveSet, shiftSet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ { count: 1 },
+ { count: 2 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([sameSet, moveSet, shiftSet], {
+ augment: true,
+ })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ if (aParam.forceCommit) {
+ await waitForGlodaDBFlush();
+ }
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+ // Wait for the compaction job to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ verify_message_keys(sameSet);
+ verify_message_keys(shiftSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
new file mode 100644
index 0000000000..0004373f7a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
@@ -0,0 +1,49 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for offline IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(function () {
+ // Set these preferences to stop the cache value "cachePDir" being fetched. This
+ // avoids errors on the javascript console, for which the test would otherwise fail.
+ // See bug 903402 for follow-up information.
+ Services.prefs.setComplexValue(
+ "browser.cache.disk.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ Services.prefs.setComplexValue(
+ "browser.cache.offline.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
new file mode 100644
index 0000000000..c144155799
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for online IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
new file mode 100644
index 0000000000..788b630d5b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for local junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
new file mode 100644
index 0000000000..a340122ef0
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are offline from the start.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
new file mode 100644
index 0000000000..4977dd5521
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..85031ec0ac
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are not offline at first, but
+ * are made offline later.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
new file mode 100644
index 0000000000..5441a3062c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing support for local messages.
+ */
+
+var {
+ glodaTestHelperInitialize,
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ messageInjection,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_reparse_of_local_folder_works() {
+ // Index a folder.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a db flush so we do not have any outstanding references to the
+ // folder or its headers.
+ await waitForGlodaDBFlush();
+
+ // Mark the summary invalid.
+ folder.msgDatabase.summaryValid = false;
+ // Clear the database so next time we have to reparse.
+ folder.msgDatabase.forceClosed();
+
+ // Force gloda to re-parse the folder again.
+ GlodaMsgIndexer.indexFolder(folder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Ensure that fromJSON for a non-singular attribute properly filters out
+ * "undefined" return values, specifically as it relates to tags. When the
+ * user removes them Gloda doesn't actually re-index the messages so the
+ * values will still be there when we next load the message.
+ *
+ * We directly monkey with the state of NounTag for no really good reason, but
+ * maybe it cuts down on disk I/O because we don't have to touch prefs.
+ */
+add_task(async function test_fromjson_of_removed_tag() {
+ // -- Inject
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Tag
+ let tag = TagNoun.getTag("$label4");
+ msgSet.addTag(tag.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.length, 1);
+ Assert.equal(gmsg.tags[0].key, tag.key);
+
+ // -- Forget about the tag, TagNoun!
+ delete TagNoun._tagMap[tag.key];
+ // This also means we have to replace the tag service with a liar.
+ let realTagService = TagNoun._msgTagService;
+ TagNoun._msgTagService = {
+ isValidKey() {
+ return false;
+ }, // Lies!
+ };
+
+ // -- Forget about the message, gloda!
+ let glodaId = gmsg.id;
+ nukeGlodaCachesAndCollections();
+
+ // -- Re-load the message.
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.id(glodaId);
+ let coll = await queryExpect(query, msgSet);
+
+ // -- Put the tag back in TagNoun before we check and possibly explode.
+ TagNoun._tagMap[tag.key] = tag;
+ TagNoun._msgTagService = realTagService;
+
+ // -- Verify the message apparently has no tags (despite no reindex).
+ gmsg = coll.items[0];
+ Assert.equal(gmsg.tags.length, 0);
+});
+
+/**
+ * Test that we are using hasOwnProperty or a properly guarding dict for
+ * NounTag so that if someone created a tag called "watch" and then deleted
+ * it, we don't end up exposing the watch function as the tag.
+ *
+ * Strictly speaking, this does not really belong here, but it's a matched set
+ * with the previous test.
+ */
+add_task(
+ function test_nountag_does_not_think_it_has_watch_tag_when_it_does_not() {
+ Assert.equal(TagNoun.fromJSON("watch"), undefined);
+ }
+);
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
new file mode 100644
index 0000000000..c3f79f0c21
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
@@ -0,0 +1,265 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests the folder indexing logic of Gloda._worker_folderIndex in
+ * the greater context of the sweep indexing mechanism in a whitebox fashion.
+ *
+ * Automated indexing is suppressed for the duration of this file.
+ *
+ * In order to test the phases of the logic we inject failures into
+ * GlodaIndexer._indexerGetEnumerator with a wrapper to control how far
+ * indexing gets. We also clobber or wrap other functions as needed.
+ */
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+
+/**
+ * We want to stop the GlodaMsgIndexer._indexerGetEnumerator after a
+ * set amount of folder indexing.
+ */
+const ENUMERATOR_SIGNAL_WORD = "STOP Me!";
+/**
+ * How many more enumerations before we should throw; 0 means don't throw.
+ */
+var stop_enumeration_after = 0;
+/**
+ * We hide the error in the promise chain. But we do have to know if it happens
+ * at another cycle.
+ */
+var error_is_thrown = false;
+/**
+ * Inject GlodaMsgIndexer._indexerGetEnumerator with our test indexerGetEnumerator.
+ */
+GlodaMsgIndexer._original_indexerGetEnumerator =
+ GlodaMsgIndexer._indexerGetEnumerator;
+/**
+ * Wrapper for GlodaMsgIndexer._indexerGetEnumerator to cause explosions.
+ */
+GlodaMsgIndexer._indexerGetEnumerator = function (...aArgs) {
+ if (stop_enumeration_after && !--stop_enumeration_after) {
+ error_is_thrown = true;
+ throw new Error(ENUMERATOR_SIGNAL_WORD);
+ }
+
+ return GlodaMsgIndexer._original_indexerGetEnumerator(...aArgs);
+};
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ // We do not want the event-driven indexer crimping our style.
+ configureGlodaIndexing({ event: false });
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * The value itself does not matter; it just needs to be present and be in a
+ * certain range for our logic testing.
+ */
+var arbitraryGlodaId = 4096;
+
+/**
+ * When we enter a filthy folder we should be marking all the messages as filthy
+ * that have gloda-id's and committing.
+ */
+add_task(async function test_propagate_filthy_from_folder_to_messages() {
+ // Mark the folder as filthy.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ ]);
+ let glodaFolder = Gloda.getFolderForFolder(folder);
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Mark each header with a gloda-id so they can get marked filthy.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", arbitraryGlodaId);
+ }
+
+ // Force the database to see it as filthy so we can verify it changes.
+ glodaFolder._datastore.updateFolderDirtyStatus(glodaFolder);
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderFilthy
+ );
+
+ // Index the folder, aborting at the second get enumerator request.
+ stop_enumeration_after = 2;
+
+ await spin_folder_indexer(folder);
+
+ // The folder should only be dirty.
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderDirty);
+ // Make sure the database sees it as dirty.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderDirty
+ );
+
+ // The messages should be filthy per the headers.
+ // We force a commit of the database.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageFilthy
+ );
+ }
+});
+
+/**
+ * Make sure our counting pass and our indexing passes gets it right. We test
+ * with 0,1,2 messages matching.
+ */
+add_task(async function test_count_pass() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ ]);
+
+ let hdrs = msgSet.msgHdrList;
+
+ // - (clean) messages with gloda-id's do not get indexed
+ // Nothing is indexed at this point, so all 2.
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 2);
+
+ // Pretend the first is indexed, leaving a count of 1.
+ hdrs[0].setUint32Property("gloda-id", arbitraryGlodaId);
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 1);
+
+ // Pretend both are indexed, count of 0.
+ hdrs[1].setUint32Property("gloda-id", arbitraryGlodaId);
+ // No explosion should happen since we should never get to the second
+ // enumerator.
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 0);
+
+ // - Dirty messages get indexed.
+ hdrs[0].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 1);
+
+ hdrs[1].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 2);
+});
+
+/**
+ * Create a folder indexing job for the given injection folder handle and
+ * run it until completion.
+ *
+ * The folder indexer will continue running on its own if we dont throw an Error in the
+ * GlodaMsgIndexer._indexerGetEnumerator
+ */
+async function spin_folder_indexer(aFolderHandle, aExpectedJobGoal) {
+ let msgFolder = messageInjection.getRealInjectionFolder(aFolderHandle);
+
+ // Cheat and use indexFolder to build the job for us.
+ GlodaMsgIndexer.indexFolder(msgFolder);
+ // Steal that job.
+ let job = GlodaIndexer._indexQueue.pop();
+ GlodaIndexer._indexingJobGoal--;
+
+ // Create the callbackHandle.
+ let callbackHandle = new CallbackHandle();
+ // Create the worker.
+ let worker = GlodaMsgIndexer._worker_folderIndex(job, callbackHandle);
+ try {
+ callbackHandle.pushAndGo(worker, null);
+ await Promise.race([
+ callbackHandle.promise,
+ TestUtils.waitForCondition(() => {
+ return error_is_thrown;
+ }),
+ ]);
+ } catch (ex) {
+ do_throw(ex);
+ }
+
+ if (aExpectedJobGoal !== undefined) {
+ Assert.equal(job.goal, aExpectedJobGoal);
+ }
+}
+
+/**
+ * Implements GlodaIndexer._callbackHandle's interface adapted to our async
+ * test driver. This allows us to run indexing workers directly in tests
+ * or support code.
+ *
+ * We do not do anything with the context stack or recovery. Use the actual
+ * indexer callback handler for that!
+ *
+ * Actually, we do very little at all right now. This will fill out as needs
+ * arise.
+ */
+class CallbackHandle {
+ constructor() {
+ this._promise = new Promise(resolve => {
+ this._resolve = resolve;
+ });
+ }
+
+ pushAndGo(aIterator, aContext) {
+ this.glodaWorkerAdapter(aIterator, this._resolve).catch(reason => {
+ if (!reason.message.match(ENUMERATOR_SIGNAL_WORD)) {
+ throw reason;
+ }
+ });
+ }
+
+ async glodaWorkerAdapter(aIter, resolve) {
+ while (!error_is_thrown) {
+ switch (aIter.next().value) {
+ case GlodaConstants.kWorkSync:
+ break;
+ case GlodaConstants.kWorkDone:
+ case GlodaConstants.kWorkDoneWithResult:
+ resolve();
+ return;
+ default:
+ break;
+ }
+ }
+ }
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_intl.js b/comm/mailnews/db/gloda/test/unit/test_intl.js
new file mode 100644
index 0000000000..e6e9868189
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_intl.js
@@ -0,0 +1,355 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Sanity check our encoding transforms and make sure the mozporter tokenizer
+ * is resulting in the expected fulltext search results. Specifically:
+ * - Check that subject, body, and attachment names are properly indexed;
+ * previously we screwed up at least one of these in terms of handling
+ * encodings properly.
+ * - Check that we can fulltext search on those things afterwards.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ "euc-jp": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\xbc\xab\xc6\xb0\xd3\xb4\xb2\xdf\xb5\xa1",
+ ],
+ "shift-jis": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\x8e\xa9\x93\xae\x9aS\x89\xdd\x8b@",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ { body: '"\u81ea\u52d5\u552e"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ // Now match from the back (bi-gram based).
+ { body: '"\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u8ca8\u6a5f"', match: true },
+ // Now everybody in the middle!
+ { body: '"\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u552e\u8ca8"', match: true },
+ { body: '"\u52d5\u552e"', match: true },
+ // -- Now match nobody!
+ // Nothing in common with the right answer.
+ { body: '"\u81eb\u52dc"', match: false },
+ // Too long, no match!
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f\u6a5f"', match: false },
+ // Minor change at the end.
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5e"', match: false },
+ ],
+ },
+ // Use two words where the last character is a multi-byte sequence and one of
+ // them is the last word in the string. This helps test an off-by-one error
+ // in both the asymmetric case (query's last character is last character in
+ // the tokenized string but it is not the last character in the body string)
+ // and symmetric case (last character in the query and the body).
+ {
+ name: "Czech diacritics",
+ actual: "Slov\u00e1cko Moravsk\u00e9 rodin\u011b",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?U2xvdsOhY2tvIE1vcmF2c2vDqSByb2RpbsSb?=",
+ "Slov\xc3\xa1cko Moravsk\xc3\xa9 rodin\xc4\x9b",
+ ],
+ },
+ searchPhrases: [
+ // -- Desired
+ // Match on exact for either word should work
+ { body: "Slov\u00e1cko", match: true },
+ { body: "Moravsk\u00e9", match: true },
+ { body: "rodin\u011b", match: true },
+ // The ASCII uppercase letters get case-folded
+ { body: "slov\u00e1cko", match: true },
+ { body: "moravsk\u00e9", match: true },
+ { body: "rODIN\u011b", match: true },
+ ],
+ },
+ // Ignore accent search!
+ {
+ name: "having accent: Paris",
+ actual: "Par\u00eds",
+ encodings: {
+ "utf-8": ["=?UTF-8?B?UGFyw61z?=", "Par\xc3\xads"],
+ },
+ searchPhrases: [{ body: "paris", match: true }],
+ },
+ // Case insensitive case for non-ASCII characters.
+ {
+ name: "Russian: new",
+ actual: "\u041d\u043e\u0432\u043e\u0435",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?0J3QvtCy0L7QtQ==?=",
+ "\xd0\x9d\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xb5",
+ ],
+ },
+ searchPhrases: [{ body: "\u043d\u043e\u0432\u043e\u0435", match: true }],
+ },
+ // Case-folding happens after decomposition.
+ {
+ name: "Awesome where A has a bar over it",
+ actual: "\u0100wesome",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=C4=80wesome?=", "\xc4\x80wesome"],
+ },
+ searchPhrases: [
+ { body: "\u0100wesome", match: true }, // Upper A-bar
+ { body: "\u0101wesome", match: true }, // Lower a-bar
+ { body: "Awesome", match: true }, // Upper A
+ { body: "awesome", match: true }, // Lower a
+ ],
+ },
+ // Deep decomposition happens and after that, case folding.
+ {
+ name: "Upper case upsilon with diaeresis and hook goes to small upsilon",
+ actual: "\u03d4esterday",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=CF=94esterday?=", "\xcf\x94esterday"],
+ },
+ searchPhrases: [
+ { body: "\u03d4esterday", match: true }, // Y_: 03d4 => 03d2 (decomposed)
+ { body: "\u03d3esterday", match: true }, // Y_' 03d3 => 03d2 (decomposed)
+ { body: "\u03d2esterday", match: true }, // Y_ 03d2 => 03a5 (decomposed)
+ { body: "\u03a5esterday", match: true }, // Y 03a5 => 03c5 (lowercase)
+ { body: "\u03c5esterday", match: true }, // y 03c5 (final state)
+ ],
+ },
+ // Full-width alphabet.
+ // Even if search phrases are ASCII, it has to hit.
+ {
+ name: "Full-width Thunderbird",
+ actual:
+ "\uff34\uff48\uff55\uff4e\uff44\uff45\uff52\uff42\uff49\uff52\uff44",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?77y0772I772V772O772E772F772S772C772J772S772E?=",
+ "\xef\xbc\xb4\xef\xbd\x88\xef\xbd\x95\xef\xbd\x8e\xef\xbd\x84\xef\xbd\x85\xef\xbd\x92\xef\xbd\x82\xef\xbd\x89\xef\xbd\x92\xef\xbd\x84",
+ ],
+ },
+ searchPhrases: [
+ // Full-width lower.
+ {
+ body: "\uff34\uff28\uff35\uff2e\uff24\uff25\uff32\uff22\uff29\uff32\uff24",
+ match: true,
+ },
+ // Half-width.
+ { body: "Thunderbird", match: true },
+ ],
+ },
+ // Half-width Katakana with voiced sound mark.
+ // Even if search phrases are full-width, it has to hit.
+ {
+ name: "Half-width Katakana: Thunderbird (SANDAABAADO)",
+ actual: "\uff7b\uff9d\uff80\uff9e\uff70\uff8a\uff9e\uff70\uff84\uff9e",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?7727776d776A776e772w776K776e772w776E776e?=",
+ "\xef\xbd\xbb\xef\xbe\x9d\xef\xbe\x80\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x8a\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x84\xef\xbe\x9e",
+ ],
+ },
+ searchPhrases: [
+ { body: "\u30b5\u30f3\u30c0\u30fc\u30d0\u30fc\u30c9", match: true },
+ ],
+ },
+ // Thai: Would you like to see the movie?
+ {
+ name: "Thai: query movie word into Thai language content",
+ actual:
+ "\u0e04\u0e38\u0e13\u0e2d\u0e22\u0e32\u0e01\u0e44\u0e1b\u0e14\u0e39\u0e2b\u0e19\u0e31\u0e07",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?4LiE4Li44LiT4Lit4Lii4Liy4LiB4LmE4Lib4LiU4Li54Lir4LiZ4Lix4LiH?=",
+ "\xe0\xb8\x84\xe0\xb8\xb8\xe0\xb8\x93\xe0\xb8\xad\xe0\xb8\xa2\xe0\xb8\xb2\xe0\xb8\x81\xe0\xb9\x84\xe0\xb8\x9b\xe0\xb8\x94\xe0\xb8\xb9\xe0\xb8\xab\xe0\xb8\x99\xe0\xb8\xb1\xe0\xb8\x87",
+ ],
+ },
+ searchPhrases: [{ body: "\u0e2b\u0e19\u0e31\u0e07", match: true }],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Use mbox injection because the fake server chokes sometimes right now.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await indexPhrase(phrase);
+ }
+});
+
+add_task(async function flush_db() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+});
+
+add_task(async function test_fulltextsearch_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await fulltextsearchPhrase(phrase);
+ }
+});
+
+/**
+ * Names with encoded commas in them can screw up our mail address parsing if
+ * we perform the mime decoding prior to handing the mail address off for
+ * parsing.
+ */
+add_task(async function test_encoding_complications_with_mail_addresses() {
+ let basePair = msgGen.makeNameAndAddress();
+ // The =2C encodes a comma!
+ let encodedCommaPair = ["=?iso-8859-1?Q?=DFnake=2C_=DFammy?=", basePair[1]];
+ // "Snake, Sammy", but with a much cooler looking S-like character!
+ let decodedName = "\u00dfnake, \u00dfammy";
+ // Use the thing with the comma in it for all cases; previously there was an
+ // asymmetry between to and cc...
+ let smsg = msgGen.makeMessage({
+ from: encodedCommaPair,
+ to: [encodedCommaPair],
+ cc: [encodedCommaPair],
+ });
+ function verify_sammy_snake(unused, gmsg) {
+ Assert.equal(gmsg.from.contact.name, decodedName);
+ Assert.equal(gmsg.to.length, 1);
+ Assert.equal(gmsg.to[0].id, gmsg.from.id);
+ Assert.equal(gmsg.cc.length, 1);
+ Assert.equal(gmsg.cc[0].id, gmsg.from.id);
+ }
+
+ let synSet = new SyntheticMessageSet([smsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_sammy_snake })
+ );
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |fulltextsearchPhrase|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("using character set: " + charset + " actual: " + actual + "\n");
+ dump("subject: " + subject + " (len: " + subject.length + ")\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name: " +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function fulltextsearchPhrase(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_migration.js b/comm/mailnews/db/gloda/test/unit/test_migration.js
new file mode 100644
index 0000000000..f7e1bc334d
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_migration.js
@@ -0,0 +1,151 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test migration logic by artificially inducing or simulating the problem, then
+ * trigger the migration logic, then verify things ended up correct, including
+ * the schema version so a second pass of the logic doesn't happen. (As
+ * opposed to checking in an example of a broken database and running against
+ * that.)
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush, makeABCardForAddressPair } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlRun } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_OLD_BAD_MESSAGE_ID = 1;
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Fix the fallout from bug 732372 (with this patch for bug 734507) which left
+ * identities whose e-mails were in the address book without contacts and then
+ * broke messages involving them.
+ */
+add_task(async function test_fix_missing_contacts_and_fallout() {
+ // -- Setup
+
+ // - Create 4 e-mail addresses, 2 of which are in the address book. (We want
+ // to make sure we have to iterate, hence >1).
+ let abPeeps = msgGen.makeNamesAndAddresses(2);
+ let nonAbPeeps = msgGen.makeNamesAndAddresses(2);
+ makeABCardForAddressPair(abPeeps[0]);
+ makeABCardForAddressPair(abPeeps[1]);
+
+ // - Create messages of the genres [from, to]: [inAB, inAB], [inAB, !inAB],
+ // [!inAB, inAB], [!inAB, !inAB]. The permutations are black box overkill.
+ // Smear the messages over multiple folders for realism.
+ let [, yesyesMsgSet, yesnoMsgSet, noyesMsgSet, nonoMsgSet] =
+ await messageInjection.makeFoldersWithSets(3, [
+ { count: 2, from: abPeeps[0], to: [abPeeps[1]] },
+ { count: 2, from: abPeeps[1], to: nonAbPeeps },
+ { count: 2, from: nonAbPeeps[0], to: abPeeps },
+ { count: 2, from: nonAbPeeps[1], to: [nonAbPeeps[0]] },
+ ]);
+
+ // Union the yeses together; we don't care about their composition.
+ let yesMsgSet = yesyesMsgSet.union(yesnoMsgSet).union(noyesMsgSet),
+ noMsgSet = nonoMsgSet;
+
+ // - Let gloda index the messages so the identities get created.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([yesMsgSet, noMsgSet], { augment: true })
+ );
+ // The messages are now indexed and the contacts created.
+
+ // - Compel an indexing sweep so the folder's dirty statuses get cleared
+ GlodaMsgIndexer.initialSweep();
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([])); // (no new messages to index)
+
+ // - Force a DB commit so the pending commit tracker gets emptied out
+ // (otherwise we need to worry about its state overriding our clobbering)
+ await waitForGlodaDBFlush();
+
+ // - Delete the contact records for the people in the address book.
+ await sqlRun(
+ "DELETE FROM contacts WHERE id IN (" +
+ yesMsgSet.glodaMessages[0].from.contact.id +
+ ", " +
+ yesMsgSet.glodaMessages[0].to[0].contact.id +
+ ")"
+ );
+
+ // - Nuke the gloda caches so we totally forget those contact records.
+ nukeGlodaCachesAndCollections();
+
+ // - Manually mark the messages involving the inAB people with the _old_ bad
+ // id marker so that our scan will see them.
+ for (let msgHdr of yesMsgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", GLODA_OLD_BAD_MESSAGE_ID);
+ }
+
+ // - Mark the db schema version to the version with the bug (26).
+ // Sanity check that gloda actually populates the value with the current
+ // version correctly.
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+ GlodaDatastore._actualSchemaVersion = 26;
+ await sqlRun("PRAGMA user_version = 26");
+ // Make sure that took, since we check it below as a success indicator.
+ let verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), 26);
+
+ // -- Test
+ // - Trigger the migration logic and request an indexing sweep.
+ GlodaMsgIndexer.disable();
+ GlodaMsgIndexer.enable();
+ GlodaMsgIndexer.initialSweep();
+
+ // - Wait for the indexer to complete, expecting that the messages that we
+ // marked bad will get indexed but not the good messages.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([yesMsgSet], { augment: true }));
+
+ // - Verify that the identities have contacts again.
+ // Must have the contact object.
+ Assert.notEqual(yesMsgSet.glodaMessages[0].from.contact, undefined);
+ // The contact's name should come from the address book card
+ Assert.equal(yesMsgSet.glodaMessages[0].from.contact.name, abPeeps[0][0]);
+
+ // - Verify that the schema version changed from gloda's perspective and from
+ // the db's perspective.
+ verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), GlodaDatastore._schemaVersion);
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
new file mode 100644
index 0000000000..2e18fbe11f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
@@ -0,0 +1,445 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the byte-counting libmime facility, to make sure that what
+ * is streamed to us is actually labeled with the right size.
+ */
+
+/*
+ * Do not include glodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiRelated,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+add_setup(function () {
+ // Sanity check: figure out how many bytes the original text occupies in UTF-8 encoding
+ Assert.equal(
+ new TextEncoder().encode(originalText).length,
+ originalTextByteCount
+ );
+
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+var htmlText = "<html><head></head><body>I am HTML! Woo! </body></html>";
+
+var partHtml = new SyntheticPartLeaf(htmlText, {
+ contentType: "text/html",
+});
+
+// This text is 168 characters long, and occupies 173 bytes when encoded in
+// UTF-8. (We make sure it occupies 173 bytes in run_test below). Note that
+// you cannot use this text directly because it isn't pure ASCII. You must use
+// one of the encoded forms below.
+var originalText =
+ "Longtemps, je me suis couché de bonne heure. Parfois, à " +
+ "peine ma bougie éteinte, mes yeux se fermaient si vite que je n'avais pas le " +
+ "temps de me dire : « Je m'endors. »";
+var originalTextByteCount = 173;
+
+var b64Text =
+ "TG9uZ3RlbXBzLCBqZSBtZSBzdWlzIGNvdWNow6kgZGUgYm9ubmUgaGV1cmUuIFBhcmZvaXMs\n" +
+ "IMOgIHBlaW5lIG1hIGJvdWdpZSDDqXRlaW50ZSwgbWVzIHlldXggc2UgZmVybWFpZW50IHNp\n" +
+ "IHZpdGUgcXVlIGplIG4nYXZhaXMgcGFzIGxlIHRlbXBzIGRlIG1lIGRpcmUgOiDCqyBKZSBt\n" +
+ "J2VuZG9ycy4gwrsK";
+
+var qpText =
+ "Longtemps,=20je=20me=20suis=20couch=C3=A9=20de=20bonne=20heure.=20Parfois,=\n" +
+ "=20=C3=A0=20peine=20ma=20bougie=20=C3=A9teinte,=20mes=20yeux=20se=20fermaie=\n" +
+ "nt=20si=20vite=20que=20je=20n'avais=20pas=20le=20temps=20de=20me=20dire=20:=\n" +
+ "=20=C2=AB=20Je=20m'endors.=20=C2=BB";
+
+var uuText =
+ "begin 666 -\n" +
+ 'M3&]N9W1E;7!S+"!J92!M92!S=6ES(&-O=6-HPZD@9&4@8F]N;F4@:&5U<F4N\n' +
+ "M(%!A<F9O:7,L(,.@('!E:6YE(&UA(&)O=6=I92##J71E:6YT92P@;65S('EE\n" +
+ "M=7@@<V4@9F5R;6%I96YT('-I('9I=&4@<75E(&IE(&XG879A:7,@<&%S(&QE\n" +
+ "G('1E;7!S(&1E(&UE(&1I<F4@.B#\"JR!*92!M)V5N9&]R<RX@PKL*\n" +
+ "\n" +
+ "end";
+
+var yencText =
+ "Hello there --\n" +
+ "=ybegin line=128 size=174 name=jane.doe\n" +
+ "\x76\x99\x98\x91\x9e\x8f\x97\x9a\x9d\x56\x4a\x94\x8f\x4a\x97\x8f" +
+ "\x4a\x9d\x9f\x93\x9d\x4a\x8d\x99\x9f\x8d\x92\xed\xd3\x4a\x8e\x8f" +
+ "\x4a\x8c\x99\x98\x98\x8f\x4a\x92\x8f\x9f\x9c\x8f\x58\x4a\x7a\x8b" +
+ "\x9c\x90\x99\x93\x9d\x56\x4a\xed\xca\x4a\x9a\x8f\x93\x98\x8f\x4a" +
+ "\x97\x8b\x4a\x8c\x99\x9f\x91\x93\x8f\x4a\xed\xd3\x9e\x8f\x93\x98" +
+ "\x9e\x8f\x56\x4a\x97\x8f\x9d\x4a\xa3\x8f\x9f\xa2\x4a\x9d\x8f\x4a" +
+ "\x90\x8f\x9c\x97\x8b\x93\x8f\x98\x9e\x4a\x9d\x93\x4a\xa0\x93\x9e" +
+ "\x8f\x4a\x9b\x9f\x8f\x4a\x94\x8f\x4a\x98\x51\x8b\xa0\x8b\x93\x9d" +
+ "\x0d\x0a\x4a\x9a\x8b\x9d\x4a\x96\x8f\x4a\x9e\x8f\x97\x9a\x9d\x4a" +
+ "\x8e\x8f\x4a\x97\x8f\x4a\x8e\x93\x9c\x8f\x4a\x64\x4a\xec\xd5\x4a" +
+ "\x74\x8f\x4a\x97\x51\x8f\x98\x8e\x99\x9c\x9d\x58\x4a\xec\xe5\x34" +
+ "\x0d\x0a" +
+ "=yend size=174 crc32=7efccd8e\n";
+
+// That completely exotic encoding is only detected if there is no content type
+// on the message, which is usually the case in newsgroups. I hate you yencode!
+// var partYencText = new SyntheticPartLeaf("I am text! Woo!\n\n" + yencText, {
+// contentType: "",
+// charset: "",
+// format: "",
+// });
+
+var partUUText = new SyntheticPartLeaf(
+ "I am text! With uuencode... noes...\n\n" + uuText,
+ {
+ contentType: "",
+ charset: "",
+ format: "",
+ }
+);
+
+var tachText = {
+ filename: "bob.txt",
+ body: qpText,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+};
+
+var tachInlineText = {
+ filename: "foo.txt",
+ body: qpText,
+ format: null,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+ disposition: "inline",
+};
+
+// Images have a different behavior than other attachments: they are displayed
+// inline most of the time, so there are two different code paths that need to
+// enable streaming and byte counting to the JS mime emitter.
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachPdf = {
+ filename: "bob.pdf",
+ contentType: "application/pdf",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachUU = {
+ filename: "john.doe",
+ contentType: "application/x-uuencode",
+ encoding: "uuencode",
+ charset: null,
+ format: null,
+ body: uuText,
+};
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ encoding: "base64",
+ body: b64Text,
+};
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: b64Text,
+};
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+new SyntheticPartLeaf(relImage.body, relImage);
+
+var messageInfos = [
+ {
+ name: "uuencode inline",
+ bodyPart: partUUText,
+ subject: "duh",
+ epsilon: 1,
+ checkTotalSize: false,
+ },
+ // Encoding type specific to newsgroups, not interested, gloda doesn't even
+ // treat this as an attachment (probably because gloda requires an attachment
+ // to have a content-type, which these yencoded parts don't have), but size IS
+ // counted properly nonetheless.
+ /* {
+ name: 'text/plain with yenc inline',
+ bodyPart: partYencText,
+ subject: "yEnc-Prefix: \"jane.doe\" 174 yEnc bytes - yEnc test (1)",
+ },*/
+ // Inline image, not interested either, gloda doesn't keep that as an
+ // attachment (probably a deliberate choice), size is NOT counted properly.
+ // (don't want to investigate, I doubt it's a useful information anyway.)
+ /* {
+ name: 'multipart/related',
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },*/
+ // This doesn't really make sense because it returns the length of the
+ // encoded blob without the envelope. Disabling as part of bug 711980.
+ /* {
+ name: '.eml attachment',
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({ body: { body: qpText,
+ charset: "UTF-8",
+ encoding: "quoted-printable" } }),
+ ]),
+ epsilon: 1,
+ },*/
+ // All of the other common cases work fine.
+ {
+ name: 'all sorts of "real" attachments',
+ bodyPart: partHtml,
+ attachments: [
+ tachImage,
+ tachPdf,
+ tachUU,
+ tachApplication,
+ tachText,
+ tachInlineText,
+ ],
+ epsilon: 2,
+ },
+];
+
+add_task(async function test_message_attachments() {
+ for (let messageInfo of messageInfos) {
+ await message_attachments(messageInfo);
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: originalText } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var bogusMessageInfos = [
+ // In this case, the wooooo part is not an attachment, so its bytes won't be
+ // counted (size will end up being 0 bytes). We don't check the size, but
+ // check_bogus_parts makes sure we're able to come up with a resulting size
+ // for the MimeMessage.
+ //
+ // In that very case, since message M is an attachment, libmime will count M's
+ // bytes, and we could have MimeMessages prefer the size libmime tells them
+ // (when they have it), rather than recursively computing their sizes. I'm not
+ // sure changing jsmimeemitter.js is worth the trouble just for buggy
+ // messages...
+ {
+ name: ".eml attachment with inner MimeUnknown",
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({
+ // <--- M
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([
+ partHtml,
+ new SyntheticPartLeaf(htmlText, { contentType: "woooooo" }),
+ ]),
+ ]),
+ }),
+ ]),
+ epsilon: 6,
+ checkSize: false,
+ },
+];
+
+add_task(async function test_bogus_messages(info) {
+ for (let bogusMessageInfo of bogusMessageInfos) {
+ await bogus_messages(bogusMessageInfo);
+ }
+});
+
+add_task(async function test_have_attachments() {
+ // The goal here is to explicitly check that these messages have attachments.
+ let number = 1;
+ let synMsg = msgGen.makeMessage({
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiMixed([partHtml, partTachVCard]),
+ number,
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.equal(aMimeMsg.allUserAttachments.length, number);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+});
+
+async function message_attachments(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_attachments(
+ aMimeMsg,
+ info.epsilon,
+ "checkTotalSize" in info ? info.checkTotalSize : undefined
+ );
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
+
+function check_attachments(aMimeMsg, epsilon, checkTotalSize) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ /* It is hard to get a byte count that's perfectly accurate. When composing
+ * the message, the MIME structure goes like this (for an encoded attachment):
+ *
+ * XXXXXXXXXX
+ * XXXXXXXXXX <-- encoded block
+ * XXXXXXXXXX
+ * <-- newline
+ * --chopchop <-- MIME separator
+ *
+ * libmime counts bytes all the way up to the separator, which means it counts
+ * the bytes for the extra line. Since newlines in emails are \n, most of the
+ * time we get att.size = 174 instead of 173.
+ *
+ * The good news is, it's just a fixed extra cost. There no issues with the
+ * inner contents of the attachment, you can add as many newlines as you want
+ * in it, Unix or Windows, the count won't get past the bounds.
+ */
+
+ Assert.ok(aMimeMsg.allUserAttachments.length > 0);
+
+ let totalSize = htmlText.length;
+
+ for (let att of aMimeMsg.allUserAttachments) {
+ dump("*** Attachment now is " + att.name + " " + att.size + "\n");
+ Assert.ok(Math.abs(att.size - originalTextByteCount) <= epsilon);
+ totalSize += att.size;
+ }
+
+ // Undefined means true.
+ if (checkTotalSize !== false) {
+ dump(
+ "*** Total size comparison: " + totalSize + " vs " + aMimeMsg.size + "\n"
+ );
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+function check_bogus_parts(aMimeMsg, { epsilon, checkSize }) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ // First make sure the size is computed properly
+ let x = parseInt(aMimeMsg.size);
+ Assert.ok(!isNaN(x));
+
+ let sep = "@mozilla.org/windows-registry-key;1" in Cc ? "\r\n" : "\n";
+
+ if (checkSize) {
+ let partSize = 0;
+ // The attachment, although a MimeUnknown part, is actually plain/text that
+ // contains the whole attached message, including headers. Count them.
+ for (let k in bogusMessage.headers) {
+ let v = bogusMessage.headers[k];
+ partSize += (k + ": " + v + sep).length;
+ }
+ // That's the newline between the headers and the message body.
+ partSize += sep.length;
+ // That's the message body.
+ partSize += originalTextByteCount;
+ // That's the total length that's to be returned by the MimeMessage abstraction.
+ let totalSize = htmlText.length + partSize;
+ dump(totalSize + " vs " + aMimeMsg.size + "\n");
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+async function bogus_messages(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_bogus_parts(aMimeMsg, info);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
new file mode 100644
index 0000000000..3380a0937e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
@@ -0,0 +1,746 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the JS Mime Emitter to make sure it doesn't choke on any
+ * scenarios.
+ *
+ * We do not test, but should consider testing:
+ * - MimeEncryptedPKCS7, whatever that translates to.
+ * - apple double
+ * - sun attachment
+ */
+
+/*
+ * Do not include GlodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiAlternative,
+ SyntheticDegeneratePartEmpty,
+ SyntheticPartMultiSignedSMIME,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiSignedPGP,
+ SyntheticPartMultiRelated,
+ SyntheticPartMultiDigest,
+ SyntheticPartMultiParallel,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+// While we're at it, we'll also test the correctness of the GlodaAttachment
+// representation, esp. its "I just need the part information to rebuild the
+// URLs" claim.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+
+const DEATH_TO_NEWLINE_TYPE_THINGS = /[\r\n]+/g;
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+var partText = new SyntheticPartLeaf("I am text! Woo!");
+var partHtml = new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+);
+var partEnriched = new SyntheticPartLeaf(
+ "<bold><italic>I am not a popular format! sad woo :(</italic></bold>",
+ {
+ contentType: "text/enriched",
+ }
+);
+var partAlternative = new SyntheticPartMultiAlternative([partText, partHtml]);
+var partMailingListFooter = new SyntheticPartLeaf("I am an annoying footer!");
+
+// We need to make sure a part that has content-disposition: attachment, even
+// though it doesn't have any filename, still is treated as an attachment.
+var tachNoFilename = {
+ body: "I like Bordeaux wine",
+ contentType: "text/plain",
+ disposition: "attachment",
+};
+
+// This is an external attachment, i.e. a mime part that basically says "go find
+// the attachment on disk, assuming it still exists, here's the path to the file
+// on disk". It turns out feed enclosures are presented in the exact same way,
+// so this covers this case as well.
+var tachExternal = {
+ body:
+ "You deleted an attachment from this message. The original MIME headers for the attachment were:\n" +
+ "Content-Type: image/png;\n" +
+ ' name="conversations-bug1.png"\n' +
+ "Content-Transfer-Encoding: base64\n" +
+ "Content-Disposition: attachment;\n" +
+ ' filename="conversations-bug1.png"',
+ contentType: "image/png",
+ filename: "conversations-bug1.png",
+ charset: null,
+ format: null,
+ encoding: "base64",
+ extraHeaders: {
+ "X-Mozilla-External-Attachment-URL": "file:///tmp/conversations-bug1.png",
+ "X-Mozilla-Altered": 'AttachmentDetached; date="Wed Aug 03 11:11:33 2011"',
+ },
+};
+var tachText = { filename: "bob.txt", body: "I like cheese!" };
+var partTachText = new SyntheticPartLeaf(tachText.body, tachText);
+var tachInlineText = {
+ filename: "foo.txt",
+ body: "Rock the mic",
+ format: null,
+ charset: null,
+ disposition: "inline",
+};
+new SyntheticPartLeaf(tachInlineText.body, tachInlineText);
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: "YWJj\n",
+};
+var partTachImage = new SyntheticPartLeaf(tachImage.body, tachImage);
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: "YWJj\n",
+};
+var partRelImage = new SyntheticPartLeaf(relImage.body, relImage);
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ body: "funk!",
+};
+var partTachApplication = new SyntheticPartLeaf(
+ tachApplication.body,
+ tachApplication
+);
+
+var partTachMessages = [msgGen.makeMessage(), msgGen.makeMessage()];
+
+var partEmpty = new SyntheticDegeneratePartEmpty();
+
+var messageInfos = [
+ // -- Simple
+ {
+ name: "text/plain",
+ bodyPart: partText,
+ },
+ {
+ name: "text/html",
+ bodyPart: partHtml,
+ },
+ // -- Simply ugly
+ {
+ name: "text/enriched",
+ bodyPart: partEnriched,
+ },
+ // -- Simple w/attachment
+ {
+ name: "text/plain w/text attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachText],
+ },
+ {
+ name: "text/plain w/image attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/plain w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/plain w/app attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachApplication],
+ },
+ {
+ name: "text/html w/text attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachText],
+ },
+ {
+ name: "text/html w/image attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/html w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/html w/app attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachApplication],
+ },
+ // -- Alternatives
+ {
+ name: "multipart/alternative: text/plain, text/html",
+ bodyPart: partAlternative,
+ },
+ {
+ name: "multipart/alternative plain/html w/text attachment",
+ bodyPart: partAlternative,
+ attachments: [tachText],
+ },
+ {
+ name: "multipart/alternative plain/html w/image attachment",
+ bodyPart: partAlternative,
+ attachments: [tachImage],
+ },
+ {
+ name: "multipart/alternative plain/html w/vcard attachment",
+ bodyPart: partAlternative,
+ attachments: [tachVCard],
+ },
+ {
+ name: "multipart/alternative plain/html w/app attachment",
+ bodyPart: partAlternative,
+ attachments: [tachApplication],
+ },
+ // -- S/MIME.
+ {
+ name: "S/MIME alternative",
+ bodyPart: new SyntheticPartMultiSignedSMIME(partAlternative),
+ },
+ {
+ name: "S/MIME alternative with text attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachText])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachImage])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachVCard])
+ ),
+ },
+ {
+ name: "S/MIME alternative with app attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachApplication])
+ ),
+ },
+ {
+ name: "S/MIME alternative wrapped in mailing list",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiSignedSMIME(partAlternative),
+ partMailingListFooter,
+ ]),
+ },
+ // -- PGP signature
+ // We mainly care that all the content-type parameters show up.
+ {
+ name: "PGP signed alternative",
+ bodyPart: new SyntheticPartMultiSignedPGP(partAlternative),
+ },
+ // -- Attached RFC822
+ {
+ // Not your average attachment, pack ourselves for now.
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachMessages[0],
+ ]),
+ },
+ // -- Multipart/related
+ {
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },
+ {
+ name: "multipart/related inside multipart/alternative",
+ bodyPart: new SyntheticPartMultiAlternative([
+ partText,
+ new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ ]),
+ },
+ // -- Multipart/digest
+ {
+ name: "multipart/digest",
+ bodyPart: new SyntheticPartMultiDigest(partTachMessages.concat()),
+ },
+ // -- Multipart/parallel (allegedly the same as mixed)
+ {
+ name: "multipart/parallel",
+ bodyPart: new SyntheticPartMultiParallel([partText, partTachImage]),
+ },
+ // --- Previous bugs
+ // -- Bug 495057, text/enriched was being dumb
+ {
+ name: "text/enriched inside related",
+ bodyPart: new SyntheticPartMultiRelated([partEnriched]),
+ },
+ // -- Empty sections
+ // This was a crasher because the empty part made us try and close the
+ // child preceding the empty part a second time. The nested multipart led
+ // to the crash providing evidence of the double-close bug but there was
+ // nothing inherently nested-multipart-requiring to trigger the double-close
+ // bug.
+ {
+ name: "nested multipart with empty multipart section",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([partAlternative, partTachText]),
+ partEmpty,
+ ]),
+ },
+ {
+ name: "empty multipart section produces no child",
+ bodyPart: new SyntheticPartMultiMixed([partText, partEmpty, partTachText]),
+ },
+];
+
+add_setup(async function () {
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ registerCleanupFunction(function () {
+ GlodaDatastore.shutdown();
+ });
+});
+
+add_task(async function test_stream_message() {
+ for (let messageInfo of messageInfos) {
+ await stream_message(messageInfo);
+ }
+});
+
+/**
+ * Stream
+ */
+add_task(async function test_sane_bodies() {
+ // 60 bytes long... (becomes 59 on the other side when \r is dropped)
+ let hugeString =
+ "don't know what you want but I can't stream it anymore...\r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ // This will come out to be 60k, of course.
+ Assert.equal(hugeString.length, 60 * Math.pow(2, powahsOfTwo));
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: hugeString, contentType: "text/plain" },
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ let bodyPart = aMimeMsg.parts[0];
+ // (the \r gets gone, so it's only 59 per line)
+ if (bodyPart.body.length > 20 * 1024 + 59) {
+ do_throw(
+ "Mime body length is " +
+ bodyPart.body.length +
+ " bytes long but should not be!"
+ );
+ }
+ promiseResolve();
+ },
+ false,
+ { saneBodySize: true }
+ );
+
+ await promise;
+});
+
+// Additional testing for the correctness of allAttachments and
+// allUserAttachments representation
+
+var partTachNestedMessages = [
+ // Looks like the synthetic part generator appends the charset=ISO-8859-1 part
+ // all by itself. That allows us to create a non-UTF-8 subject, and ensure the
+ // resulting attachment name is indeed São Paulo.eml.
+ msgGen.makeMessage({
+ subject: "S" + String.fromCharCode(0xe3) + "o Paulo",
+ bodyPart: new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+ ),
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage],
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage, tachApplication],
+ }),
+];
+
+var attMessagesParams = [
+ {
+ attachments: [tachNoFilename],
+ },
+ {
+ attachments: [tachExternal],
+ },
+ {
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[0],
+ ]),
+ },
+ {
+ name: "attached rfc822 w. image inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[1],
+ ]),
+ },
+ {
+ name: "attached x/funky + attached rfc822 w. (image + x/funky) inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachApplication,
+ partTachNestedMessages[2],
+ ]),
+ },
+];
+
+var expectedAttachmentsInfo = [
+ {
+ allAttachmentsContentTypes: ["text/plain"],
+ allUserAttachmentsContentTypes: ["text/plain"],
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["image/png"],
+ },
+ {
+ allAttachmentsContentTypes: [],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ firstAttachmentName: "S\u00e3o Paulo.eml",
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ },
+ {
+ allAttachmentsContentTypes: [
+ "application/x-funky",
+ "image/png",
+ "application/x-funky",
+ ],
+ allUserAttachmentsContentTypes: ["application/x-funky", "message/rfc822"],
+ },
+];
+
+add_task(async function test_attachments_correctness() {
+ for (let [i, params] of attMessagesParams.entries()) {
+ let synMsg = msgGen.makeMessage(params);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ try {
+ let expected = expectedAttachmentsInfo[i];
+ if ("firstAttachmentName" in expected) {
+ let att = aMimeMsg.allUserAttachments[0];
+ Assert.equal(att.name.length, expected.firstAttachmentName.length);
+ for (let j = 0; j < att.name.length; ++j) {
+ Assert.equal(
+ att.name.charCodeAt(j),
+ expected.firstAttachmentName.charCodeAt(j)
+ );
+ }
+ }
+
+ Assert.equal(
+ aMimeMsg.allAttachments.length,
+ expected.allAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allAttachmentsContentTypes[j]
+ );
+ }
+
+ Assert.equal(
+ aMimeMsg.allUserAttachments.length,
+ expected.allUserAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allUserAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allUserAttachmentsContentTypes[j]
+ );
+ }
+
+ // Test
+ for (let att of aMimeMsg.allUserAttachments) {
+ let uri = aMsgHdr.folder.getUriForMsg(aMsgHdr);
+ let glodaAttachment = GlodaFundAttr.glodaAttFromMimeAtt(
+ { folderMessageURI: uri },
+ att
+ );
+ // The GlodaAttachment appends the filename, which is not always
+ // present
+ Assert.ok(glodaAttachment.url.startsWith(att.url));
+ }
+ } catch (e) {
+ dump(aMimeMsg.prettyString() + "\n");
+ do_throw(e);
+ }
+
+ promiseResolve();
+ },
+ false
+ );
+
+ await promise;
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: "whatever" } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var weirdMessageInfos = [
+ // This message has an unnamed part as an attachment (with
+ // Content-Disposition: inline and which is displayable inline). Previously,
+ // libmime would emit notifications for this to be treated as an attachment,
+ // name Part 1.2. Now it's not the case anymore, so we should ensure this
+ // message has no attachments.
+ {
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ },
+];
+
+add_task(async function test_part12_not_an_attachment() {
+ let synMsg = msgGen.makeMessage(weirdMessageInfos[0]);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.ok(aMimeMsg.allUserAttachments.length == 0);
+ Assert.ok(aMimeMsg.allAttachments.length == 0);
+ } catch (e) {
+ do_throw(e);
+ }
+ promiseResolve();
+ });
+
+ await promise;
+});
+
+async function stream_message(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_stream_message(info, synMsg, aMsgHdr, aMimeMsg);
+ promiseResolve();
+ });
+
+ await promise;
+}
+/**
+ * Verify the streamed results are what we wanted. For now, this just means
+ * receiving a representation; we don't check it for correctness.
+ */
+function verify_stream_message(aInfo, aSynMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+ try {
+ // aMimeMsg is normalized; it only ever actually gets one child.
+ verify_body_part_equivalence(aSynMsg.bodyPart, aMimeMsg.parts[0]);
+ } catch (ex) {
+ dump("Something was wrong with the MIME rep!\n!!!!!!!!\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like: \n" + aMimeMsg.prettyString(true, " ", true) + "\n\n"
+ );
+ do_throw(ex);
+ }
+
+ dump("Everything is just fine.\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like:\n " + aMimeMsg.prettyString(true, " ", false) + "\n\n"
+ );
+}
+
+/**
+ * Applies any transformations to the synthetic body part that we would expect
+ * to happen to a message during its libmime journey. It may be better to
+ * just put the expected translations in the synthetic body part instead of
+ * trying to make this method do anything complex.
+ */
+function synTransformBody(aSynBodyPart) {
+ let text = aSynBodyPart.body.trim();
+ // This transforms things into HTML apparently.
+ if (aSynBodyPart._contentType == "text/enriched") {
+ // Our job here is just to transform just enough for our example above.
+ // We also could have provided a manual translation on the body part.
+ text = text.replace(/bold/g, "B").replace(/italic/g, "I");
+ }
+ return text;
+}
+
+function verify_body_part_equivalence(aSynBodyPart, aMimePart) {
+ // The content-type devoid of parameters should match.
+ Assert.equal(aSynBodyPart._contentType, aMimePart.contentType);
+
+ // The header representation of the content-type should also match unless
+ // this is an rfc822 part, in which case it should only match for the
+ // actual contents.
+ if (aMimePart.contentType != "message/rfc822") {
+ Assert.equal(
+ aSynBodyPart.contentTypeHeaderValue.replace(
+ DEATH_TO_NEWLINE_TYPE_THINGS,
+ ""
+ ),
+ aMimePart.get("content-type").replace(DEATH_TO_NEWLINE_TYPE_THINGS, "")
+ );
+ }
+
+ // XXX body part checking will get brittle if we ever actually encode things!
+ if (
+ aSynBodyPart.body &&
+ !aSynBodyPart._filename &&
+ aSynBodyPart._contentType.startsWith("text/")
+ ) {
+ Assert.equal(
+ synTransformBody(aSynBodyPart),
+ aMimePart.body
+ .trim()
+ .replace(/\r/g, "")
+ // Remove stuff added by libmime for HTML parts.
+ .replace(
+ /[\n]*<meta http-equiv="content-type" content="text\/html; .*">[\n]*/g,
+ ""
+ )
+ .replace(/[\n]+<\/body>/, "</body>")
+ );
+ }
+ if (aSynBodyPart.parts) {
+ let iPart;
+ let realPartOffsetCompensator = 0;
+ for (iPart = 0; iPart < aSynBodyPart.parts.length; iPart++) {
+ let subSyn = aSynBodyPart.parts[iPart];
+ // If this is a degenerate empty, it should not produce output, so
+ // compensate for the offset drift and get on with our lives.
+ if (subSyn instanceof SyntheticDegeneratePartEmpty) {
+ realPartOffsetCompensator--;
+ continue;
+ }
+ let subMime = aMimePart.parts[iPart + realPartOffsetCompensator];
+ // Our special case is the signature, which libmime does not expose to us.
+ // Ignore! (Also, have our too-many-part checker below not trip on this.)
+ if (subSyn._contentType != "application/x-pkcs7-signature") {
+ if (subMime == null) {
+ do_throw(
+ "No MIME part matching " + subSyn.contentTypeHeaderValue + "\n"
+ );
+ }
+ verify_body_part_equivalence(subSyn, subMime);
+ }
+ }
+ // Only check if there are still more mime parts; don't check for a count
+ // mismatch (the PKCS case from above needs to be handled).
+ if (iPart < aMimePart.parts.length) {
+ do_throw("MIME part has more sub-parts than syn part?");
+ }
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_msg_search.js b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
new file mode 100644
index 0000000000..2c8ea1c528
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
@@ -0,0 +1,155 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test GlodaMsgSearcher.jsm our heuristic-based fulltext search mechanism. Things we
+ * generally want to verify:
+ * - fulltext weighting by where the match happened works.
+ * - static interestingness impacts things appropriately.
+ *
+ * Our general strategy is to create two messages each with a unique string
+ * placed in controlled places and whatever intentional message manipulation
+ * is required to set things up. Then we query using a GlodaMsgSearcher with
+ * the limit set to 1. Only the message we expect should come back.
+ * Keep in mind in all tests that our underlying ranking mechanism is based on
+ * time so the date of each message is relevant but should not be significant
+ * because our score boost factor should always be well in excess of the one
+ * hour increment between messages.
+ *
+ * Previously, we relied on the general equivalence of the logic in
+ * test_query_core to our message search logic.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var uniqueCounter = 0;
+var messageInjection;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Verify that the ranking function is using the weights as expected. We do not
+ * need to test all the permutations
+ */
+add_task(async function test_fulltext_weighting_by_column() {
+ let ustr = unique_string();
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, body: { body: ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, subjSet);
+});
+
+/**
+ * A term mentioned 3 times in the body is worth more than twice in the subject.
+ * (This is because the subject saturates at one occurrence worth 2.0 and the
+ * body does not saturate until 10, each worth 1.0.)
+ */
+add_task(async function test_fulltext_weighting_saturation() {
+ let ustr = unique_string();
+ let double_ustr = ustr + " " + ustr;
+ let thrice_ustr = ustr + " " + ustr + " " + ustr;
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: double_ustr },
+ { count: 1, body: { body: thrice_ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, bodySet);
+});
+
+/**
+ * Use a starred message with the same fulltext match characteristics as another
+ * message to verify the preference goes the right way. Have the starred
+ * message be the older message for safety.
+ */
+add_task(async function test_static_interestingness_boost_works() {
+ let ustr = unique_string();
+ let [, starred, notStarred] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, subject: ustr },
+ ]);
+ // Index in their native state.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred, notStarred]));
+ // Star and index.
+ starred.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred]));
+ // Stars upon thars wins.
+ await asyncMsgSearcherExpect(ustr, starred);
+});
+
+/**
+ * Make sure that the query does not retrieve more than actually matches.
+ */
+add_task(async function test_joins_do_not_return_everybody() {
+ let ustr = unique_string();
+ let [, subjSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet]));
+ await asyncMsgSearcherExpect(ustr, subjSet, 2);
+});
+
+/**
+ * Generate strings like "aaaaa", "aabaa", "aacaa", etc. The idea with the
+ * suffix is to avoid the porter stemmer from doing something weird that
+ * collapses things.
+ */
+function unique_string() {
+ let uval = uniqueCounter++;
+ let s =
+ String.fromCharCode(97 + Math.floor(uval / (26 * 26))) +
+ String.fromCharCode(97 + (Math.floor(uval / 26) % 26)) +
+ String.fromCharCode(97 + (uval % 26)) +
+ "aa";
+ return s;
+}
+
+/**
+ * Wrap the construction of a GlodaMsgSearcher with a limit of 1 and feed it to
+ * queryExpect.
+ *
+ * @param aFulltextStr The fulltext query string which GlodaMsgSearcher will
+ * parse.
+ * @param aExpectedSet The expected result set. Make sure that the size of the
+ * set is consistent with aLimit.
+ * @param [aLimit=1]
+ *
+ * Use like so:
+ * await asyncMsgSearchExpect("foo bar", someSynMsgSet);
+ */
+async function asyncMsgSearcherExpect(aFulltextStr, aExpectedSet, aLimit) {
+ let limit = aLimit ? aLimit : 1;
+ Services.prefs.setIntPref("mailnews.database.global.search.msg.limit", limit);
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ await queryExpect(searcher.buildFulltextQuery(), aExpectedSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
new file mode 100644
index 0000000000..128720ee76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
@@ -0,0 +1,144 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test noun_mimetype. Exists because I just changed its implementation and I'm
+ * afraid I may have damaged it and it's hard to tell, so ironically a unit test
+ * is the easiest solution. (Don't you hate it when the right thing to do is
+ * also the easy thing to do?)
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MimeTypeNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounMimetype.jsm"
+);
+
+var passResults = [];
+var curPassResults;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ let messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_new_pass_first_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_first_time() {
+ test_basics();
+});
+
+/**
+ * Do two passes of test_basics making sure that persisted values really
+ * persist.
+ */
+add_task(async function test_new_pass_second_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_second_time() {
+ test_basics();
+});
+
+add_task(function verify_passes_are_the_same() {
+ var firstPassResults = passResults[0];
+ for (let iType = 0; iType < curPassResults.length; iType++) {
+ for (let iPass = 1; iPass < passResults.length; iPass++) {
+ Assert.equal(firstPassResults[iType].id, passResults[iPass][iType].id);
+ }
+ }
+});
+
+add_task(function test_parameters() {
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ Assert.equal(plain, MimeTypeNoun.getMimeType('text/plain; charset="UTF-8"'));
+});
+
+/**
+ * Setup a new 'pass' by nuking the MimeTypeNoun's state if it has any. The
+ * goal here is to verify that the database persistence is actually working,
+ * and we can only do that if we convince it to nuke its authoritative 'cache'
+ * and grab a new copy.
+ */
+async function new_pass() {
+ // We have to nuke if it has already happened.
+ if (passResults.length) {
+ MimeTypeNoun._mimeTypes = {};
+ MimeTypeNoun._mimeTypesByID = {};
+ MimeTypeNoun._mimeTypeHighID = {};
+ MimeTypeNoun._highID = 0;
+ MimeTypeNoun._init();
+ }
+ curPassResults = [];
+ passResults.push(curPassResults);
+
+ // The mime type does some async stuff... make sure we don't advance until
+ // it is done with said stuff.
+ await waitForGlodaDBFlush();
+}
+
+function test_basics() {
+ let python;
+ // If this is not the first pass, check for python before other things to
+ // make sure we're not just relying on consistent logic rather than actual
+ // persistence.
+ if (passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+
+ let jpeg = MimeTypeNoun.getMimeType("image/jpeg");
+ curPassResults.push(jpeg);
+
+ let png = MimeTypeNoun.getMimeType("image/png");
+ curPassResults.push(png);
+
+ let html = MimeTypeNoun.getMimeType("text/html");
+ curPassResults.push(html);
+
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ curPassResults.push(plain);
+
+ // If this is for the first time, check for python now (see above).
+ if (!passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+ // But always add it to the results now, as we need consistent ordering
+ // since we use a list.
+ curPassResults.push(python);
+
+ // Sanity-checking the parsing.
+ Assert.equal(jpeg.type, "image");
+ Assert.equal(jpeg.subType, "jpeg");
+
+ // - Make sure the numeric trickiness for the block stuff is actually doing
+ // the right thing!
+ const BLOCK_SIZE = MimeTypeNoun.TYPE_BLOCK_SIZE;
+ // Same blocks.
+ Assert.equal(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(png.id / BLOCK_SIZE)
+ );
+ Assert.equal(
+ Math.floor(html.id / BLOCK_SIZE),
+ Math.floor(plain.id / BLOCK_SIZE)
+ );
+ // Different blocks.
+ Assert.notEqual(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(html.id / BLOCK_SIZE)
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
new file mode 100644
index 0000000000..e47eac75bc
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
@@ -0,0 +1,62 @@
+/**
+ * Atypical gloda unit test that tests nuke migration. Gloda is not designed
+ * to be shutdown and started up again in the same process lifetime. It tries
+ * to be clever with caching accessors that clobber themselves out of existence
+ * which are hard to make come back to life, and probably other things.
+ *
+ * So what we do is create a global-messages-db.sqlite with an unacceptably
+ * old schema version before tickling gloda to startup. If gloda comes up
+ * with a database connection and it has the right schema version, we declare
+ * that gloda has successfully loaded. Our only historical screw-up here was
+ * very blatant (and was actually a result of trying to avoid complexity in
+ * the nuke path! oh the irony!) so we don't need to get all hardcore.
+ */
+
+/**
+ * The DB version to use. We set this as a non-const variable so that
+ * test_nuke_migration_from_future.js can change it.
+ */
+var BAD_DB_VERSION_TO_USE = 2;
+
+/**
+ * Synchronously create and close the out-of-date database. Because we are
+ * only using synchronous APIs, we know everything is in fact dead. GC being
+ * what it is, the various C++ objects will probably stay alive through the
+ * next test, but will be inert because we have closed the database.
+ */
+function make_out_of_date_database() {
+ // Get the path to our global database
+ var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // Create the database
+ var dbConnection = Services.storage.openUnsharedDatabase(dbFile);
+ dbConnection.schemaVersion = BAD_DB_VERSION_TO_USE;
+
+ // Close the database (will throw if there's a problem closing)
+ dbConnection.close();
+}
+
+// some copied and pasted preference setup from glodaTestHelper that is
+// appropriate here.
+// yes to indexing
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// no to a sweep we don't control
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+function run_test() {
+ // - make the old database
+ make_out_of_date_database();
+
+ // - tickle gloda
+ // GlodaPublic.jsm loads Gloda.jsm which self-initializes and initializes the datastore
+ ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ Assert.notEqual(GlodaDatastore.asyncConnection, null);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
new file mode 100644
index 0000000000..f60c1dd29e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
@@ -0,0 +1,12 @@
+/**
+ * There are actually two ways the nuke migration can be invoked. From
+ * a database too far from the future, and too far from the past. This
+ * one is the future one. We must keep ourselves safe from time-traveling
+ * grandchildren!
+ */
+
+/* import-globals-from test_nuke_migration.js */
+load("test_nuke_migration.js");
+
+// pick something so far forward it will never get used!
+BAD_DB_VERSION_TO_USE = 100000000;
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_core.js b/comm/mailnews/db/gloda/test/unit/test_query_core.js
new file mode 100644
index 0000000000..0849a62d50
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_core.js
@@ -0,0 +1,658 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test the mechanics our query functionality. Tests in this file are intended
+ * to cover extreme boundary cases and things that are just unlikely to happen
+ * in reasonable message use-cases. (Which is to say, it could be hard to
+ * formulate a set of synthetic messages that result in the situation we want
+ * to test for.)
+ */
+
+var { prepareIndexerForTesting } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+
+/* ===== Test Noun ===== */
+/*
+ * Introduce a simple noun type for our testing so that we can avoid having to
+ * deal with the semantics of messages/friends and all their complexity.
+ */
+
+var WidgetProvider = {
+ providerName: "widget",
+ *process() {
+ yield GlodaConstants.kWorkDone;
+ },
+};
+
+add_setup(function () {
+ // Don't initialize the index message state
+ prepareIndexerForTesting();
+ GlodaIndexer.registerIndexer(GenericIndexer);
+ Gloda.addIndexerListener(genericIndexerCallback);
+});
+
+var WidgetNoun;
+add_task(function setup_test_noun_and_attributes() {
+ // --- noun
+ WidgetNoun = Gloda.defineNoun({
+ name: "widget",
+ clazz: Widget,
+ allowsArbitraryAttrs: true,
+ // It is vitally important to our correctness that we allow caching
+ // otherwise our in-memory representations will not be canonical and the db
+ // will load some. Or we could add things to collections as we index them.
+ cache: true,
+ cacheCost: 32,
+ schema: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["intCol", "NUMBER", "inum"],
+ // datePRTime is special and creates a Date object.
+ ["dateCol", "NUMBER", "datePRTime"],
+ ["strCol", "STRING", "str"],
+ ["notabilityCol", "NUMBER", "notability"],
+ ["textOne", "STRING", "text1"],
+ ["textTwo", "STRING", "text2"],
+ ],
+ indices: {
+ intCol: ["intCol"],
+ strCol: ["strCol"],
+ },
+ fulltextColumns: [
+ ["fulltextOne", "TEXT", "text1"],
+ ["fulltextTwo", "TEXT", "text2"],
+ ],
+ genericAttributes: true,
+ },
+ });
+
+ const EXT_NAME = "test";
+
+ // --- special (on-row) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "inum",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "intCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "date",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "dateCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_DATE,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "str",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "strCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ });
+
+ // --- fulltext attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text1",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextOne",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text2",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextTwo",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "fulltextAll",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: WidgetNoun.tableName + "Text",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+
+ // --- external (attribute-storage) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "singleIntAttr",
+ singular: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "multiIntAttr",
+ singular: false,
+ emptySetIsSignificant: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+});
+
+/* ===== Tests ===== */
+
+const ALPHABET = "abcdefghijklmnopqrstuvwxyz";
+add_task(async function test_lots_of_string_constraints() {
+ let stringConstraints = [];
+ for (let i = 0; i < 2049; i++) {
+ stringConstraints.push(
+ ALPHABET[Math.floor(i / (ALPHABET.length * 2)) % ALPHABET.length] +
+ ALPHABET[Math.floor(i / ALPHABET.length) % ALPHABET.length] +
+ ALPHABET[i % ALPHABET.length] +
+ // Throw in something that will explode if not quoted
+ // and use an uneven number of things so if we fail
+ // to quote it won't get quietly eaten.
+ "'\""
+ );
+ }
+
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.str.apply(query, stringConstraints);
+
+ await queryExpect(query, []);
+});
+
+/* === Query === */
+
+/**
+ * Use a counter so that each test can have its own unique value for intCol so
+ * that it can use that as a constraint. Otherwise we would need to purge
+ * between every test. That's not an unreasonable alternative, but this works.
+ * Every test should increment this before using it.
+ */
+var testUnique = 100;
+
+/**
+ * Widgets with multiIntAttr populated with one or more values.
+ */
+var nonSingularWidgets;
+/**
+ * Widgets with multiIntAttr unpopulated.
+ */
+var singularWidgets;
+
+add_task(async function setup_non_singular_values() {
+ testUnique++;
+ let origin = new Date("2007/01/01");
+ nonSingularWidgets = [
+ new Widget(testUnique, origin, "ns1", 0, "", ""),
+ new Widget(testUnique, origin, "ns2", 0, "", ""),
+ ];
+ singularWidgets = [
+ new Widget(testUnique, origin, "s1", 0, "", ""),
+ new Widget(testUnique, origin, "s2", 0, "", ""),
+ ];
+ nonSingularWidgets[0].multiIntAttr = [1, 2];
+ nonSingularWidgets[1].multiIntAttr = [3];
+ singularWidgets[0].multiIntAttr = [];
+ // And don't bother setting it on singularWidgets[1].
+
+ GenericIndexer.indexObjects(nonSingularWidgets.concat(singularWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+add_task(async function test_query_has_value_for_non_singular() {
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr();
+ await queryExpect(query, nonSingularWidgets);
+});
+
+/**
+ * We should find the one singular object where we set the multiIntAttr to an
+ * empty set. We don't find the one without the attribute since that's
+ * actually something different.
+ * We also want to test that re-indexing properly adds/removes the attribute
+ * so change the object and make sure everything happens correctly.
+ *
+ * @tests gloda.datastore.sqlgen.kConstraintIn.emptySet
+ * @tests gloda.query.test.kConstraintIn.emptySet
+ */
+add_task(async function test_empty_set_logic() {
+ // - Initial query based on the setup previously.
+ dump("Initial index case\n");
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [singularWidgets[0]]);
+
+ // - Make one of the non-singulars move to empty and move the guy who matched
+ // to no longer match.
+ dump("Incremental index case\n");
+ nonSingularWidgets[0].multiIntAttr = [];
+ singularWidgets[0].multiIntAttr = [4, 5];
+
+ GenericIndexer.indexObjects([nonSingularWidgets[0], singularWidgets[0]]);
+ await promiseGenericIndexerCallback;
+
+ // Reset promise;
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+
+ query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [nonSingularWidgets[0]]);
+
+ // Make sure that the query doesn't explode when it has to handle a case
+ // that's not supposed to match.
+ Assert.ok(!query.test(singularWidgets[0]));
+});
+
+/* === Search === */
+/*
+ * The conceit of our search is that more recent messages are better than older
+ * messages. But at the same time, we care about some messages more than
+ * others (in general), and we care about messages that match search terms
+ * more strongly too. So we introduce a general 'score' heuristic which we
+ * then apply to message timestamps to make them appear more recent. We
+ * then order by this 'date score' hybrid, which we dub "dascore". Such a
+ * flattening heuristic is over-simple, but believed to be sufficient to
+ * generally get us the messages we want. Post-processing based can then
+ * be more multi-dimensional and what not, but that is beyond the scope of
+ * this unit test.
+ */
+
+/**
+ * How much time boost should a 'score point' amount to? The authoritative,
+ * incontrivertible answer, across all time and space, is a week.
+ * Gloda and storage like to store things as PRTime and so we do it too,
+ * even though milliseconds are the actual granularity of JS Date instances.
+ */
+const SCORE_TIMESTAMP_FACTOR = 1000 * 1000 * 60 * 60 * 24 * 7;
+
+/**
+ * How many score points for each fulltext match?
+ */
+const SCORE_FOR_FULLTEXT_MATCH = 1;
+
+/**
+ * Roughly how many characters are in each offset match.
+ */
+const OFFSET_CHARS_PER_FULLTEXT_MATCH = 8;
+
+var fooWidgets = null;
+var barBazWidgets = null;
+
+add_task(async function setup_search_ranking_idiom() {
+ // --- Build some widgets for testing.
+ // Use inum to represent the expected result sequence
+ // Setup a base date.
+ let origin = new Date("2008/01/01");
+ let daymore = new Date("2008/01/02");
+ let monthmore = new Date("2008/02/01");
+ fooWidgets = [
+ // -- Setup the term "foo" to do frequency tests.
+ new Widget(5, origin, "", 0, "", "foo"),
+ new Widget(4, origin, "", 0, "", "foo foo"),
+ new Widget(3, origin, "", 0, "foo", "foo foo"),
+ new Widget(2, origin, "", 0, "foo foo", "foo foo"),
+ new Widget(1, origin, "", 0, "foo foo", "foo foo foo"),
+ new Widget(0, origin, "", 0, "foo foo foo", "foo foo foo"),
+ ];
+ barBazWidgets = [
+ // -- Setup score and matches to boost older messages over newer messages.
+ new Widget(7, origin, "", 0, "", "bar"), // score boost: 1 + date: 0
+ new Widget(6, daymore, "", 0, "", "bar"), // 1 + 0+
+ new Widget(5, origin, "", 1, "", "bar"), // 2 + 0
+ new Widget(4, daymore, "", 0, "bar", "bar"), // 2 + 0+
+ new Widget(3, origin, "", 1, "bar", "baz"), // 3 + 0
+ new Widget(2, monthmore, "", 0, "", "bar"), // 1 + 4
+ new Widget(1, origin, "", 0, "bar baz", "bar baz bar bar"), // 6 + 0
+ new Widget(0, origin, "", 1, "bar baz", "bar baz bar bar"), // 7 + 0
+ ];
+
+ GenericIndexer.indexObjects(fooWidgets.concat(barBazWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+// Add one because the last snippet shouldn't have a trailing space.
+const OFFSET_SCORE_SQL_SNIPPET =
+ "(((length(osets) + 1) / " +
+ OFFSET_CHARS_PER_FULLTEXT_MATCH +
+ ") * " +
+ SCORE_FOR_FULLTEXT_MATCH +
+ ")";
+
+const SCORE_SQL_SNIPPET = "(" + OFFSET_SCORE_SQL_SNIPPET + " + notabilityCol)";
+
+const DASCORE_SQL_SNIPPET =
+ "((" + SCORE_SQL_SNIPPET + " * " + SCORE_TIMESTAMP_FACTOR + ") + dateCol)";
+
+const WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL =
+ "SELECT ext_widget.*, offsets(ext_widgetText) AS osets " +
+ "FROM ext_widget, ext_widgetText WHERE ext_widgetText MATCH ?" +
+ " AND ext_widget.id == ext_widgetText.docid";
+
+/**
+ * Used by queryExpect to verify
+ */
+function verify_widget_order_and_stashing(
+ aZeroBasedIndex,
+ aWidget,
+ aCollection
+) {
+ Assert.equal(aZeroBasedIndex, aWidget.inum);
+ if (
+ !aCollection.stashedColumns[aWidget.id] ||
+ !aCollection.stashedColumns[aWidget.id].length
+ ) {
+ do_throw("no stashed information for widget: " + aWidget);
+ }
+}
+
+/**
+ * Test the fundamentals of the search ranking idiom we use elsewhere. This
+ * is primarily a simplified
+ */
+add_task(async function test_search_ranking_idiom_offsets() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7.
+ // dascore becomes 0-based column number 8.
+ outerWrapColumns: [DASCORE_SQL_SNIPPET + " AS dascore"],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8],
+ });
+ query.fulltextAll("foo");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ fooWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+add_task(async function test_search_ranking_idiom_score() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7
+ // dascore becomes 0-based column number 8
+ outerWrapColumns: [
+ DASCORE_SQL_SNIPPET + " AS dascore",
+ SCORE_SQL_SNIPPET + " AS dabore",
+ "dateCol",
+ ],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8, 9, 10],
+ });
+ query.fulltextAll("bar OR baz");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ barBazWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+/**
+ * Generic indexing mechanism; does nothing special, just uses
+ * Gloda.grokNounItem. Call GenericIndexer.indexNewObjects() to queue
+ * queue your objects for initial indexing.
+ */
+var GenericIndexer = {
+ _log: console.createInstance({
+ prefix: "gloda.test",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.test.loglevel",
+ }),
+ /* public interface */
+ name: "generic_indexer",
+ enable() {
+ this.enabled = true;
+ },
+ disable() {
+ this.enabled = false;
+ },
+ get workers() {
+ return [
+ [
+ "generic",
+ {
+ worker: this._worker_index_generic,
+ },
+ ],
+ ];
+ },
+ initialSweep() {},
+ /* mock interface */
+ enabled: false,
+ initialSweepCalled: false,
+ indexObjects(aObjects) {
+ indexingInProgress = true;
+ this._log.debug(
+ "enqueuing " +
+ aObjects.length +
+ " generic objects with id: " +
+ aObjects[0].NOUN_ID
+ );
+ GlodaIndexer.indexJob(new IndexingJob("generic", null, aObjects.concat()));
+ },
+ /* implementation */
+ *_worker_index_generic(aJob, aCallbackHandle) {
+ this._log.debug(
+ "Beginning indexing " + aJob.items.length + " generic items"
+ );
+ for (let item of aJob.items) {
+ this._log.debug("Indexing: " + item);
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ item,
+ {},
+ item.id === undefined,
+ item.id === undefined,
+ aCallbackHandle,
+ item.NOUN_DEF.cache
+ )
+ );
+ item._stash();
+ }
+
+ yield GlodaConstants.kWorkDone;
+ this._log.debug("Done indexing");
+ },
+};
+
+var indexingInProgress = false;
+var promiseGenericIndexerCallbackResolve;
+var promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+});
+function genericIndexerCallback(aStatus) {
+ // If indexingInProgress is false, we've received the synthetic
+ // notification, so ignore it.
+ if (indexingInProgress && aStatus == GlodaConstants.kIndexerIdle) {
+ indexingInProgress = false;
+ promiseGenericIndexerCallbackResolve();
+ }
+}
+
+/**
+ * Simple test object.
+ *
+ * Has some tricks for gloda indexing to deal with gloda's general belief that
+ * things are immutable. When we get indexed we stash all of our attributes
+ * at that time in _indexStash. Then when we get cloned we propagate our
+ * current attributes over to the cloned object and restore _indexStash. This
+ * sets things up the way gloda expects them as long as we never de-persist
+ * from the db.
+ */
+function Widget(inum, date, str, notability, text1, text2) {
+ this._id = undefined;
+ this._inum = inum;
+ this._date = date;
+ this._str = str;
+ this._notability = notability;
+ this._text1 = text1;
+ this._text2 = text2;
+
+ this._indexStash = null;
+ this._restoreStash = null;
+}
+Widget.prototype = {
+ _clone() {
+ let clonus = new Widget(
+ this._inum,
+ this._date,
+ this._str,
+ this._notability,
+ this._text1,
+ this._text2
+ );
+ clonus._id = this._id;
+ clonus._iAmAClone = true;
+
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key.startsWith("_")) {
+ continue;
+ }
+ clonus[key] = value;
+ if (key in this._indexStash) {
+ this[key] = this._indexStash[key];
+ }
+ }
+
+ return clonus;
+ },
+ _stash() {
+ this._indexStash = {};
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key[0].startsWith("_")) {
+ continue;
+ }
+ this._indexStash[key] = value;
+ }
+ },
+
+ get id() {
+ return this._id;
+ },
+ set id(aVal) {
+ this._id = aVal;
+ },
+
+ // Gloda's attribute idiom demands that row attributes be prefixed with a '_'
+ // (Because Gloda.grokNounItem detects attributes by just walking.). This
+ // could be resolved by having the special attributes moot these dudes, but
+ // that's not how things are right now.
+ get inum() {
+ return this._inum;
+ },
+ set inum(aVal) {
+ this._inum = aVal;
+ },
+ get date() {
+ return this._date;
+ },
+ set date(aVal) {
+ this._date = aVal;
+ },
+
+ get datePRTime() {
+ return this._date.valueOf() * 1000;
+ },
+ // We need a special setter to convert back from PRTime to an actual
+ // date object.
+ set datePRTime(aVal) {
+ this._date = new Date(aVal / 1000);
+ },
+
+ get str() {
+ return this._str;
+ },
+ set str(aVal) {
+ this._str = aVal;
+ },
+ get notability() {
+ return this._notability;
+ },
+ set notability(aVal) {
+ this._notability = aVal;
+ },
+ get text1() {
+ return this._text1;
+ },
+ set text1(aVal) {
+ this._text1 = aVal;
+ },
+ get text2() {
+ return this._text2;
+ },
+ set text2(aVal) {
+ this._text2 = aVal;
+ },
+
+ toString() {
+ return "" + this.id;
+ },
+};
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
new file mode 100644
index 0000000000..93b4a9ec34
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
@@ -0,0 +1,37 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were offline before they were
+ * indexed.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
new file mode 100644
index 0000000000..368252a5e6
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..0788c15ff7
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were indexed, then made available
+ * offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
new file mode 100644
index 0000000000..c88fe1aa4e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
new file mode 100644
index 0000000000..efe489974e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
@@ -0,0 +1,894 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that S/MIME messages are properly displayed and that the MimeMessage
+ * representation is correct.
+ */
+
+var { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+function initNSS() {
+ // Copy the NSS database files over.
+ let profile = FileUtils.getDir("ProfD", []);
+ let files = ["cert9.db", "key4.db"];
+ let directory = do_get_file("../../../../data/db-tinderbox-invalid");
+ for (let f of files) {
+ let keydb = directory.clone();
+ keydb.append(f);
+ keydb.copyTo(profile, f);
+ }
+
+ // Ensure NSS is initialized.
+ Cc["@mozilla.org/psm;1"].getService(Ci.nsISupports);
+}
+
+add_setup(async function () {
+ initNSS();
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+add_task(async function test_smime_mimemsg() {
+ let msg = msgGen.makeEncryptedSMimeMessage({
+ from: ["Tinderbox", "tinderbox@foo.invalid"],
+ to: [["Tinderbox", "tinderbox@foo.invalid"]],
+ subject: "Albertine disparue (La Fugitive)",
+ body: { body: encrypted_blurb },
+ });
+ let synSet = new SyntheticMessageSet([msg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ // Make sure by default, MimeMessages do not include encrypted parts
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can't "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 0);
+ // Make sure we can't see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 0);
+ promiseResolve();
+ },
+ true,
+ {}
+ );
+
+ await promise;
+
+ // Reset promise.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Now what about we specifically ask to "see" the encrypted parts?
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts[0].parts.length, 2);
+ // Make sure we can see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 1);
+ Assert.equal(aMimeMsg.allUserAttachments[0].contentType, "image/jpeg");
+ promiseResolve();
+ // Extra little bit of testing
+ },
+ true,
+ {
+ examineEncryptedParts: true,
+ }
+ );
+ await promise;
+});
+
+var encrypted_blurb =
+ "MIAGCSqGSIb3DQEHA6CAMIACAQAxgf8wgfwCAQAwZTBgMQswCQYDVQQGEwJTVzETMBEGA1UE\n" +
+ "CBMKVGVzdCBTdGF0ZTERMA8GA1UEBxMIVGVzdCBMb2MxETAPBgNVBAoTCFRlc3QgT3JnMRYw\n" +
+ "FAYDVQQDEw1TTUlNRSBUZXN0IENBAgEFMA0GCSqGSIb3DQEBAQUABIGAJ6gUwBMmtiIIF4ii\n" +
+ "SzkMP5vh6kCztLuF7yy/To27ZUlNOjBZZRuiwcQHiZx0aZXVhtAZcLgQKRcDwwGGd0xGvBIW\n" +
+ "dHO/gJlVX0frePMALZx/NIUtbN1cjtwDAezcTmTshiosYmlzzpPnTkgPDNDezxbN4bdBfWRu\n" +
+ "vA7aVTWGn/YwgAYJKoZIhvcNAQcBMBQGCCqGSIb3DQMHBAgV77BzGUrfiqCABIIgAGLhaWnP\n" +
+ "VOgC/TGjXhAk+kjv2g4Oi8qJIJ9CWXGnBjqMAAkTgUBspqc6rxY23gIrnYbLxX3Ik+YM9je0\n" +
+ "XP/ECiY44C8lGTKIOYAE5S58w9HCrtHn3tWid8h9Yc4TJrlJ8DRv0AnpOIsob1oqkDGuIjSt\n" +
+ "sKkr2tR8t632ARoEqyWdoHIVdKVkCE7gIICHn03e/0e5Aye4dLWttTNcCwqClXR9W6QsNPuA\n" +
+ "ZWvxBCBzN8SmqkdJilFFbFusup2ON69oFTFpX8CzaUYoXI6LgxuX435fWsXJUfDI077NWQrB\n" +
+ "LbnqM6UAoYkLPYRL+hTtYE4Z8o8sU/3n5yaq6WtCRUWz+ukQWKfDq2MDWqTVI12CCy505npv\n" +
+ "2bvNUxZHInfmSzbdmTty2aaSWnuGzWI8jnA/LdPS+0ly8fkZV9tU5n46uAYOFzcVGfA94iIr\n" +
+ "8+ftcVSSLCu5qpjOdYi1iVg/sR2sjhq3gcS+CxOGjdR1s+UWmWdBnulQ0yks7/PTjlztGVvV\n" +
+ "PYkmJQ/1io3whu0UPGdUOINTFKyfca8OHnPtkAqsTBqqxnEaXVsaD4QI859u7ZiKfUL08vC2\n" +
+ "cmwHTN7iVGyMe9IfaKxXPDi3WWbOi5Aafc5KDeX3sgzC01LoIaWqTrm756GEj7dJ9vsKzlxO\n" +
+ "Xfz95oVq1/pDwUcPtTtDLWPtQHRmBl711qzVvUozT9p3GCmvzDHETlMQa45/m5jp4jEHlA1j\n" +
+ "GFX/Y0G8Y5Zziv9JD2sYc+78H5f7IMrHibKRlnsIuCvcxazUB0CfiUO5Q4Xe82bSS09C1IvJ\n" +
+ "/I79HN0KNGN4es+x/0eyIlYD3dcm3uqDpsv0ghMEPBKogqDLMzZUwW3bQxn8bMqB/zL+6hLm\n" +
+ "1197EESFEYrs6yzVnuap+vnfaqk+vprwe2Kasl1vIl1h3K+PZvsjdQHqX1WfZRWQ41eKHX/M\n" +
+ "cR5Kn8fhi/4ddt8IK2i+OeCbkRsRnBIhGpcP2pkVaH0EtZ45nbxbs1qlFbWC4nWAJ3UlmnSe\n" +
+ "eO5QOErFgwJX9W1hUWiAgyDqMWcdWLYPQJ4Gw9yqwrEP6baILArF1oZyc9XgSBzZn/7kTw6h\n" +
+ "TeCSKu0QCK1jQXUKbftl76ftFh6L/mEPWG8CZP02GnDQx5eEoUhEIS4tf3Ltc/8ey6k62R8C\n" +
+ "gMLsUdOusI61w18bNW0ffVc+N+C8j8uWbc8w4dL4DHnfz/oFUjuk0AlpZE8ii7GNqszBgirq\n" +
+ "wQ3WdXwpD4Q/j/hru040ONElMJr7HO6ipL1oP7nbIR7JHoJmht4G39pXJ86XfJmtzMuu0MxC\n" +
+ "UTcLt1Sz87HzrMO9eWdApGo6qvwSwapAQC48nXY/WDRHgxjji6EQLwO0wF4Rlwlo4SsW3nwm\n" +
+ "NtOBsjKsEQ6/WILvRAziAPlp7+v13QfLrrzmnWFwKE6h9KQ/wpLL9/TAoy76FHoRvZgT3x20\n" +
+ "Vo9Fe7nZbc6qEc9/DbwShxWMbsU8vlzrxm4pgOC7I4jftUgolQ+NE78sQHH4XefHDKXWxRvx\n" +
+ "H8HVU/TPsj+2cEHM2WlVOXlYdtlobx20DSiOvhWdkW45Zw+9SaVkGw/IhCVkLi0UKuQV1gou\n" +
+ "lA4FeTVs0WY7jUdZB6c3DYgu4o5gxVvpRKOmwNp7rVIjsGuAjC91FN3DGQYlsyItLlZd8Yli\n" +
+ "FqGL6B2HTehmOGwtc6pfzbUJj9X9biZlQBigS3waDC0ei7HUq5M0ztyZv71dg+ZA39F0ZlVD\n" +
+ "CszjUNp847Lvt91JVmQdH0HTPu7Qfb/l3qX6LARTCgFfLGzjdcthzxyWEU/oCurUj9E1MwxX\n" +
+ "pfr8AX9/ajgCCS9bBvV0luYe/+0xqrzbnZw3m3ljfpxx5k78HFVuYhXt4iEsgtbXhJuLr/EJ\n" +
+ "B+Cu2YaQhXrvtyfi4EkOLoOcIzu5hs8V4hPebDbhDQKDcF3EhzYZ0k2YlfXnUx2Uk1Xw/x7n\n" +
+ "bLKVIpw0xSnVWdj3XeHLwEwh+T6/uthhi99iiXNQikxwbrEU4Y5IVAjh/JfKywIgPcXnaDqR\n" +
+ "1anwP8a+QQcD3U9neOvIZVx4fA/Ide5svJEkJ6gccel7kMAGD3/R14VfasqjBc0XhoEZT4PN\n" +
+ "xuW8fZIKPkxU4KEgM2VlzB9ZgTTcfUbUMmaCWioQEwfF7J2PhmIl6pBUiBFUdPv9+TnE4riG\n" +
+ "Cm5myUQEap9SFIjWRbLidy4+ZOK1rA34zNT4CnknLWFruygn8EzpgQVlru5no+qppchbOjyH\n" +
+ "O+Yz9VGs+SjoQlMl1HjtM2GQeNizP7AsLLd/R+jQ0Al4+KmM0Z8obTtYKUjG5rlwtNzcxyjv\n" +
+ "tvEhXeWjD4xGkWN8Xhf7VQX2dM7APMIiyyMNaNDVZvWxU9DpJjt4F+mhQFk4Yk5ao+Bs23MV\n" +
+ "XI4b0GanjnGzu5bHMUngkHLISMNGcDicT5JzfVYMbiM2pDakaaZWQ/ztQW5gWzjYFpj/Yffg\n" +
+ "ThvYXUi71gTZqHZiybqu6UI4iBOXc3mXbKzN3XwBSfCODFHJj5A9Lzh4pVBrndC7APfgn+Cm\n" +
+ "6ga7DmPZI1igomTOiIcF5+i7AOW/8hnv9hlsxN3D7mrIiJuRAkCD56kGBkCEMnZ1EA5nk49+\n" +
+ "k1s+XKPKPskxz8XrD2vhPL9ToSXQl/i+b+bh7jBIi+2KJh5XoiM9CCqP3B7bjxwx9qvtq7nD\n" +
+ "/+Zn4B2qCxxGI5d92mV4d1KGanbzHSZh1PJyQHrRcMMdoHMEVl1AW+YPffkwQrnRef1AZm9D\n" +
+ "ZB8B5LJvvjyNXsVGicPYM+RZwthk9Eko0W17u8fC3I/TST8c+kNqJihNhJW3y70plmSe/na4\n" +
+ "G4XeSHdbHsOWHq8CkRW83jk+2G0BE+1Y7YQt9jLOgVlIm6qYr1ov629575zV3ebyxXtkQY0g\n" +
+ "mjoal1nGJCrCp7GAl/c5KMK66T03RXEY+sBZZ2sbv6FiB6+xHreUI7k+JCUJ/uoW6c/8ithM\n" +
+ "L0gMRpxZrhksRcaBDXa8Mp4lyrqf3QWiowznSIyKPm7i0FjGGul/SESz7cKe/8RjJbKnx4TP\n" +
+ "dZ5G/+dhOZwXoisiGSj4CdXq6KKY62C1Pfvnf9elYJMo7GT8+6REYXrCQEoTIAw9zkQGD/FJ\n" +
+ "L6PvXunheXSHY454jau9JqqQdYaroYVrIHD9AINJPKluaToyT62oOL2CcG3dB0Yw1SZfUASa\n" +
+ "P36CevQjjs9GhLeFrqXXYx9ItqbYZKMiHDarjf3KgOzRhFS97n4OaZgn7Yc/tOvtXTMlYSAy\n" +
+ "M4pw2vISXcuaSl6mQzbllYuWk2sqt+rpt+/l0Hd/TfLVzp4mMq84cKerXSL271oc/2Sary/l\n" +
+ "wRHj50Wz0gIxjyfg1FgegnDmaeDCuMwSTFjrlUaV7FSKPZqaVr4LBQbyL5fsd2VrO4mQfmdO\n" +
+ "rwd7+CojtVraeyrNcwC6inBoPOa07A1aYB+bGKhwn/7n6YJEdX8AtTtir1u4r9rIPeUyv+nA\n" +
+ "QpPkPie5R481ZEgApFhyvFy6+etmHBPEpr5PguDzX1Una8sOBfBxDMVCLdn6lHA/ebDCDrLn\n" +
+ "JobzOLmW8G8cXwTmgxr1r5KbvoUaWfSZtJYL6M3b4Ix73GfAhbH30eAbgRya+IHrTx2Nhy0q\n" +
+ "pU1mgbM1aV4OhZ3wZXga8tpWnohVcTIXUfQhBYwJXCxVj6lR6mVd+4WKZT5Tz1twrYxI1ZGD\n" +
+ "HRIatLWeshiULj2KNVtTkc0w4HqIw6gVEwYSojzduuhrtXZMsBVImyV9151ZFL/oDgMQEOEm\n" +
+ "qIill8STDIz2bFF+FzkLLW+l5TeJ9rS4mrO1ffKdVWWL/PFlBvP39PHTkSv7+MYKhobbzccA\n" +
+ "ydjzdauQVn28lXVIMpF9UWmMeyWZlogGNECxb7NAPwvzONGvak//dBKEsqnGquNfNHfFJoMZ\n" +
+ "S5Ts8Br8rc0LW0zyLpLls3p+AnyJQQArteqraSodGk6X18BIbJc2avhbzGJnegacFhTr+e6a\n" +
+ "7niVgn1/P9PNo/SfMYZLWTIUKLkHq9GDhuniHqGM7tcdujI+Orit/uLVYaHDEMVKUDvJuJGj\n" +
+ "z+EybiUvIvpWjY7nWRjmtwTzR8JFUnltTGoLbcnA0Fmtu3rQCOuECYbUvH2bbtJBjatmA38+\n" +
+ "AotExnchuqDI13HVm9OY2CjyD4cJonvmjpz60xwFnr3HGp8pZNNFmvY2udGKUYhNF1X8mb9c\n" +
+ "vgs8SiT3Lf1HNXfayy+F+kLkXqBNZLnGfRHWKOAWSEj8dXiJ0ScLmAvoJTbC18s3yYoK3o2X\n" +
+ "z1sY+RERhyJ3UmFHuQ5q75w2mKz4l0kzHA6bfwHvLbTps7sNkkhT403KU8RbxNmsQDgFMCfw\n" +
+ "BaJnTNyQFJTVgljTEnFsaUAhEOgyoCAFvwe7eKTGO2NqqX9hrWcEoXSa6FgnLQvT49SZHrYC\n" +
+ "poVRVZdJ6sqnjSy7OxT+WbuQufc44TEYeGuHjH444yS7ZCMVyjNaQDRvWPYuXmFp8Anw5lO+\n" +
+ "xLb+LMEgeFKcVMjtnYLZTTgY6UtqMr18BzwHKft6+ATzyUc1zsHv9Ap7mmdRakLFa+8QbXvc\n" +
+ "+AfVbOsmcY8Bmin0nKIL9nfOUPahEMQBN1NN3dOWM/5qa3REk1Cx3rIaB/jsU9f9zUztg9MV\n" +
+ "kvplfOVYoxUsBoAhCjjzPmCgVbp6Gnr/Ebd2vFvDsokp0yHw7Cgb0mBznsntRnkb2cEB0tvw\n" +
+ "fBhK7YeETx8W3A+PKSkn1AwvoG6WeiM6CFh2yp/VYjabwfrTrfVbXpk4epzCLU8WTyPvuxv3\n" +
+ "DDH4s/Zl0CMIqL2oydtEN11ARdwXi3AImYsix/cWLzV5r1UN6NN0B0y9zmT5BrCElrJKJxYb\n" +
+ "NmafkxyrCFGnjWFIRzw4s/GGm/dBx0DGBizioedTttqjnF0rfF2pM/MVf/udCdd6uQyYlGZz\n" +
+ "AxW6ZKX0TPj7bvPyRgzqXBXTfd23kYVH/lvHEsKxnMb2F9A9LYun63jPFSiHXCahU4WcuzZK\n" +
+ "aH6h+cnY3xJn8+P2e4m4pTDMHdsgBQs4upMTxrxhH01MnUgbKz6IA2KV9y8H24PzzqJawh02\n" +
+ "xhdMHVuV396LvvjICg4OWzvFdEFdWDEZ4ph4nYTHN62TsQUwa8t3MBbKeW4mlIQXqGNAhfN6\n" +
+ "UR8nqf4H56oAMTvsvNS8EoCgcu/L9C5TrDnldYf3Zhyx51A0ufvpSNR6onWOKzVF/qwtyn/C\n" +
+ "y5l9X4c/0uCbff2nkYUqVAkfgD/hdEXiO0kdku6ptnWbNUPU76pQDQ5vD6sfe/8ZsRF68Eay\n" +
+ "XhvbZYmXCVn7azZeEps3EiOKCL4cazE508fLyjC/fNc1WMdyIve1lhXGI8uJ7/lB6tJ6CucL\n" +
+ "WT4OX6kHZh4I7mXy2+lezAELmrP3eU7YduHemlXqqlOrnw8pwGEVCsxGmCv6DdJNehk3wCJv\n" +
+ "GcdygTynL5d5fGe1mP2zxZjW9kscNX1nwf1+sz6chZ3jXpiBTRXICh66vk3UbyS3eZk8NKYL\n" +
+ "dY+/cN1O4jtipgHGq8EPUefBVRH+DmjTqFA05qHAaV/fZ53xLWm8YVTI/DS9fbbPZprOBeib\n" +
+ "GoMdA+a0Sqh6RdIWlaFXYYJUspp+rI1FlOBZvgy8Z5K5oGajE6RM06EeB7DPtI1/K+jRXa5O\n" +
+ "YXacRu/lgDlZvevVsSj27Oy6A+rbfo5oafhMMCLArtGlY4ENMk+u/ztvoxPlos9vCUV6NSFj\n" +
+ "znenH7iv5TUvv5gm4n1NCSZ9Db+zW5DQS8Gm5iGUsRj6VX5hZ1pMl2df43B6I5BwCKnq2eYn\n" +
+ "mpDzvUXUku9C/RkTxf/xfaIG30+whnY9Id4MWzWNNIJicvEdJkDgE5iRfwsVntbQYGwctmxs\n" +
+ "209aIk/KjeGWPOyg6TFYF5ZJMe/0XVSr2Bci3cj7GWeFc2FrFB/5nfExErrT4+e+9GMCyXcz\n" +
+ "bIbj45WCoA3Lgo2vh7bZV7xy6iXv358kl7bahH2/IvjUPGn3EKQY8ApoTNrRXvKAt7P4Q7zM\n" +
+ "HrRSQ+iDYZ3BCmoWfXMzRmRJbAzvC1akeduykIwQkL8QP7z7n33ntPlP2n1rDLI+LoDSOC3o\n" +
+ "bJzafHOOAH2J/MWOI61Tj7+FWyGIPihUf4rZqFXnoZkBpy/fRb/+qmSmIZ3YPiDdwICnCerU\n" +
+ "0BLeaWRD4aie51FyZ5fR+tXmTu7JDC+GRKp4EARokJgL4CTnuSGY9TaYKsoKrwST/9kKQrlM\n" +
+ "ISOGV8yTnLTzhs01EijkNEJZkJwg7QYxsJ8x9zLDL44fCL+KALLpkHEmUQdkLwy5DQV97qL+\n" +
+ "/6bSyxgLBiEHRJQns3HHGlUvNt2naUPCukRO7ieIlrPPSaL199yPcgjmFIBiXptTm9fZJRzE\n" +
+ "rkwkIeGlXzxhSpLHApOnDZaeNlVE+5NyNHOxbbBxfrc5Xmg68ZESXwxKeZAF4GM11OBLzj/f\n" +
+ "r6iGBayidg/uYZ5D0CCSyTDT1Y5RKFFe1DieQey1bj9oIuE+jo9coYLc7XUK8cnlOqLRl9Kt\n" +
+ "kcB4t5JAqob/ZttXhHnZ8J3QUpprXYYQ9c4NrYf4KEy1+femS8uGnuBZgUM1Tun5EjSeKxMB\n" +
+ "cY8gGkXcsuLzRpAtwifgHM2R6dgOq7g2nwB4wQYiILSqAsSH0QKNb+tS3NKyfNsg1tJK1PSI\n" +
+ "vOjRQCkzaII1IureIWrUikWCbQWqTDW/PazEr3HG9+BMs1JMUbEviA6ljNZz478Xbc+mA9yI\n" +
+ "RsqILUos/MCjKEhYn/qq+BsKtKmSC0nsZ3KXQcLbq7O/RZU85Dr+N+wyhieT8vu+4hb0mqrn\n" +
+ "FZwyMQt2WpnqaNk5tw92/Gw/Ad5q6ACt3PZiG4GrG3NNaKxadwkN9POzyN4zn+7gq3cyF/uN\n" +
+ "imAv6aVHaiD002PMWHIMKUOFwmS9AV3iskmW+swH9UyLPnWDejvUs8jW6mmeD3TOR8sRQv8q\n" +
+ "KwcvrscKtEXmBvFDYh3UcIcu/j5wb7WLwhNi3XOpGHEgg2MjDf5ti0kkrR68VEc+XBvnAYV7\n" +
+ "5EIrxI1qfkNcgXKRdOg6msLv6a9QSgJunwjACXM7Zv96MHMEETgkNr7DO+woHjWcPl4AYV4k\n" +
+ "HgPGUISEGUQr6/c1penqLiExW+iVj8Y5uLj3c/PNQLMhnttckHWVCz6wlqxmvoUQHgEl3Qd5\n" +
+ "pODBWHyC2FZku+Xuyu2o+GHxj10hYfsEl/qoDqqvW4TGlTz16MQrSV3SMs/i6SHmq5eiuhMf\n" +
+ "Hj6nkt3hljgHA1YawbFL58hj4x2DAyeYFfLY1YEBMH3K6JLxUdD0c02lecUDOqUxBrp+/qp2\n" +
+ "4KIqFLZ3+z7Wzx8WI0DzKYyZK79+VV7+Imv+DpOTaLFLu7nymvPeOgbzTsrJbJQo560EXpLl\n" +
+ "wID5Z36x9P/A54q0i/mhTzK/RtYYhqgaV4+GmP7XxA58zulNAJIVcsmgXKiD1GpmOR8c8EDm\n" +
+ "kMGEcrACXBOkpEJHp07J5vD8gfWublIG3MzeoTjeBhUJM7G9H5r6tNHdB4Ak+TMVfjcN0vbZ\n" +
+ "UtVCiQJqR8USTwNCumY3EtcMiXGVM3CRTTLai+IZVmLqED7SL3kpOdFcthMk5K0L0j1Ootan\n" +
+ "wFE2QhcmMVP6x8kH9cJVhbhLHWYbO/vg1AcLE7YOPRD3DVId+3dTZo0JVDC6RQKpOuUBolbH\n" +
+ "P8GpxBg4IcKqyMAA/1+FzaLicvXPzk7rKFkXjL5cgervdWF8Xx6gaihVXRfR7AiWOy38I0GH\n" +
+ "RJI8WC8NruvGHN71Oi0VKiyGD8o4tlGZyQoeRU02Z7cM1X493wCEVUuBEXYI5ax7wIcl25AD\n" +
+ "+WAv2iBZ3gHNNyCSJZM/Tqk2/2B35pfotVMgs67fnUy9tpm3n9nOdm/FgReSu3CBM3JZmYtf\n" +
+ "tOfqq3Xpu/3WnhWjkqDVmgaQ42PWtxYU32ah3M+EHHhkYSIG/csaSkVlyGYul3BsfeZ4jCvK\n" +
+ "MvVFFD2Kzkyt8zKKQlA7Zzyf900aFNhU5SkX70s94Bk3WXHXD5DRQRYHWmruCFVkFJXyaiZj\n" +
+ "qWBVKP3Gv6OXSc9IRimu6p0l0TaDxxjNoPskg6dXHTV5uTcgOKfRohgudjQC20VmamOp8IGd\n" +
+ "1muj9L82CT7elonqA0E6HFZfJqJIfxq/wSFVG7wiB9Gwjoj1xgB7bSzbglpOV/ReBPcv1ivl\n" +
+ "KsJmK9nlmfS4Y9MPWuctSROg9QVEOWq/XowOm6+Y4bpKpDhmmpsUpMsDtOJnrvSWJwcwWRRB\n" +
+ "+2Z3H6kIEUXDq1cjLsrBIWRTwb//h0Sbb2Kb1cUHnQQAjlhkSlOpaEMTzQb7GMojunx8Yeb9\n" +
+ "ff/1l4/1tqVSxX61AJuJyywGyk9AIsDIm1WW6P+P5AVRsy5xu61qrL60GHlMxtfm7ZSLAeR7\n" +
+ "GvBOgDitOE+llhzZSjwdaESxSAvnhFfM5TOCSj5YNBfLaI8bVxn4Br342GV7nufFqOLkp4rr\n" +
+ "3pcNbQvsb+k7kkdyNMNtOQfG/Ojf8YTGoanvDYrtB/0Euu0TXR86ljXPIJOT/4nhue4149SO\n" +
+ "9lboxBH6iaP8AGxn+2/pzCbcOXjDzcD/i1DoQXVcwfniiMf6S+CHb38Os3KTO49YsMYjrDPP\n" +
+ "9L2IurXfUHONlljI1T6GFV1RfRCBfO5XklduPaR4+4B0JLhU6+UKl9vdTphhwrYTuJ8I3wkD\n" +
+ "6DO4hvktTjl/IPLyYPU1w48W3cZ++P/wJNtIYl5I/ZSNfAzefc8SQh7kcnVnDoocElfWHfg6\n" +
+ "oZL0MSe088uFDAxaJTLxDaDIbzjBkwaiRYSBQ+SQVBmUlP1EjLbrwdayi2IidFj2Mr6nv4KZ\n" +
+ "4HUlmmVMSvg4K2Iv5NGgAmAjYngYSveCdDkYXQgOXldxnzVTzRRP+nEAtFepLx6TZjSjawqL\n" +
+ "nZ+N0/BCJ5UkldplLALg+5kdHCLwcdkz+H4YsB2sLE8zULM9JJW88DGBKXKue4J8GkhJlY9i\n" +
+ "1y1pdTW6mvC0J0oMAe2ULkrakIdyGgNghwjnDMaf85niB1A4+qjN0K3uGGjRyWddJH/Pnv+Z\n" +
+ "7A9dmkRNnYMFEkyFYTkbfmE2fHr4MY+YwlwjE7f69LmKEcai/is9L/Lqv5Onb8W6N06l54s1\n" +
+ "iYKzFFqo/gc0UJsiBhPmSKMNvoeoUpi0yUgXDPtw5+9HD/hqFSXqWGh2uR1vOUi85k0f1eOe\n" +
+ "zzkIBzcL3on0y03D74cB1QtjBAS2lwTXzjyEbitB4AxHyp5L13tPJs4l2uo8JXpL8u0HmJVR\n" +
+ "w6AOL/rV6elTYkuxnq5aOq8WQcm+1cYY4fPdT7ZRwVy0ZfHpN6VsqmMNIoAUyRgy86sYU2E+\n" +
+ "UMTeKZzD1+T2LbbV38AQh2kaLlSNuNkoFIjFZZvth/vubqIjHlmsw2MeZqXZIs3dBeA/1GL8\n" +
+ "s0k5ix2Obdy1t+w1e0d+y/ei1IzsxHRdBvrn1YDqdFw4xdUreJ3FSTrsTePlSWVjJXKGm13h\n" +
+ "hFjuCqELnR05+au1dFSbiAlbMPM6W/cebi+/0GmvIvfRaqrbvRJoUWxfgaFcanrlin7a11Pb\n" +
+ "6pFV47mIKHxWQiYq0z3kq+QQ1YqXvxMdM7eIg0PEOygB4Wp2FwIG0ZcEFfdq5CPveormJ/EZ\n" +
+ "NOFrIHZXkFl8fT4x8LFLWNmlQwoVqeQGOs51CYQF7YPXjFx64mV0RXz/umA5/Un6fHjKS5Yq\n" +
+ "7ZIhx+JPX4+s3RrxbUjbq4hCCa2MSBBQONhdmXtKKIf+TNvnimm9je5bt3Nu79A2OYbAzvb3\n" +
+ "cOEcQqieXzqj358oIxwd7BL2xLEMbe2Z+1bDXK+YwyJpXNF0Ech6Vbh4PSLHpW1jCoIn5HCP\n" +
+ "4K28TdrXOwwKkac1WjaQCw0RztZEatpJW1PyhQ0n8xcegTqT+6nyifeTbEKuUYXhCaJa0spg\n" +
+ "Xx1yv6G+ieBg5owSZ3DQSQ4GmaZ4GBgFePkqroihA4C1bs2FbrRWRFVRWAAEZYdcgHOyBWNG\n" +
+ "KLGntWv2VWf7yid8+oSQLExsYHBGdYMTJCbU53fuAnJYE4DJJ15Vztj0TO74KqKrkTtxfog7\n" +
+ "5CdFia/OvcCruLblCFLcrRyhsW3YKUxHmgpAPoSN4/46Bz+ob+CCkd6RJzwjnhfIgbXqKRLE\n" +
+ "8KfsCqksHp1p3hEgvm3iDuqHfBP/7O/T5V753HBhuAzFZlaOzQsjBfzK+BMXP3zp+DEpGwUL\n" +
+ "Pd/DG0fa6odMTqPs/TUblpHeANF88+XRkgB/hucv+K7h13bfRRYPMM4zephlWBzBDIaoazv9\n" +
+ "SvRyy21B4vRTXrwbTkSZXTtEFCb2027l+ycCayD9XXCLQUhjSrsI8SB+9qC/i827HcLF7X20\n" +
+ "L/8Na6qnRTinmwkBUDk+o6APUlR6sDpX+uf1bOyiV6oF0wy59+kXi9oCjupzPBOatSM8ka47\n" +
+ "6tcHJ6na0wJ+Z7EjcaOqy26OYcPT2m3wvquK00JLHCaTDisK3cQ9178FxZmpD8i09AsLVWuz\n" +
+ "r/dmucYAxjKMQzV6+q94S42EThtTbw3LJURF/8QNLk8AZKwVuaw7zz5+8F/bc2qtrUr762t1\n" +
+ "KN+9Ul8Kc2N5IxAS+klFXPfA1isfvbm88737wa3Tk1N54QIvDXVLBJg4OzvjkQAPai9lPqUK\n" +
+ "Tj3LrtYGPDTaRyRXpsH0ehIZ66TRobSaBBrL4VeopHzoWOutlTLlSSjZ1Grn6SFGdH/i998Q\n" +
+ "64ucbkyejUbFT6SgOzDN3rnl9ppqnDPOCk60WAeosAJdf4tndoYGGQQnQpsBh8uLCkyyu4z2\n" +
+ "di/om5c1yNSJsv6j2jQQiPsMX+ef+27mdAj9pUXQSRnl3oZRvQMQ7VmKsa8NBByU05MwSvOn\n" +
+ "vuEKgPq5CL+2Spnjcll+wWQsDF6OZMb2cM7PmLTGTI9LKnPnDPEhz4borQfch3jHR/EVtsmg\n" +
+ "BX6xmoD7gQdXPWBFTvwT7ljRJ4v5O0v/4p56rTneZZwBBIIgAOfncYVNGur0g1ZaFAujgzEG\n" +
+ "/PLpgIqn2rjHU+zmUuf28MvHdWxVNgSar7qMRp67M6UM6RExfuv1vzWw+ogYWeiQOYMYcBqP\n" +
+ "4p1Dm0ZxwWaqgllea7MCmniOrEGNizUMlvYIJoYcKJFVHz4Jbxy9pzGVL58Kbmwa1ZDwSXqC\n" +
+ "YHcVLer9yxoZpuDnIhRXHUnDx6Iw6QDiKpMQqJcFKf0YJTUrhN2M17kUaOD7TY0zHhDznFHY\n" +
+ "Oe0hlEu1y/FEwNxueg8tpjGVivXTX5E/81RMpUHKenlM8WbA7GQepFiIrcZTsnZ9jBCXLPGu\n" +
+ "CI00YwbNnzV/EsYsHAcvwIQBlBDVjSjkxoBaBmDsVpLawCh/SGEAl99Fe1/08OKHceGPDxko\n" +
+ "wZ3Sge2vC2ydyu4+LVnypr29R3sv53cnApKlt0uplnF4rbpBSbTCgH6IR0Aq/aYQUW032HtX\n" +
+ "wuPhxgIp7Yf5mi5rd3MwyLhsTQ7dFhXZT1kecAXAMo2x2BAo98yJfmvXM90hIwlXHwp11ped\n" +
+ "MTzc47I5XC+dR3YTHbxUKC7RCo2OjiLsT6UocM0vqyxkkJrUWHuC9vGHNEA3wmJuj/Tncr+r\n" +
+ "/bLYzx9TWcN0st02kCC4wUjQJuNlCZLjmnCrr6Y8Yrm592pv3ztcVD+cbgjwptpxN4OXTreI\n" +
+ "7Py1P0BRRC7N3I+W8OVsszHpjGsEqxFDdyRL7VtUWMR85c1cJKvmYWeSSVX0YlNsbMtVledB\n" +
+ "ViJg/2Qa6vU5lB3WXIyXOuJVEo9B6ua60Fg+HlKDHEl/5bOqOzW5pgTz2BclmAb+NvhEdl6a\n" +
+ "SzNSHFrCqmmG6Nb9DCT9wcvvs74PN2QFHm1vxPymLoEQYZ1o0oI9puayLFpMykIK4N8Kinp5\n" +
+ "iUWxh3t+V3L/yz6jHXiL2pR3UYBrfzRb+bOumTD5ENLil/3P8BngPSCvYAfRMOrBj6EAIoZi\n" +
+ "HTaCqKN2K7LefPum/AQXfE5oHHJXWkS5Zx+DiKVmwJcQuzqO5j+sJxuUlZXQnSR28g++33WK\n" +
+ "zZsrMU1MolLmEFArfC2Z1o2dxtk2FIQVq/mNhq79sfU+xmCEaGyUV84NCFpXMTe2z0m8gQA9\n" +
+ "/v+Arqi7hCbtq2AyUFNwlUlBjdAxtoPNUj5E9iPfpQVZLUTGM8H4C5kJkOXYtb+XKeoKRLx5\n" +
+ "VCESit4KBnFfx4Egptm58q2CDUOb441YhMQKUR2TCCgLPJFZBKexz0jJpWHoCBBNj5lbAeQk\n" +
+ "3Hrpj2ErGttnVxL/pFEOY0u22FWHeXdELaBs0bvbQ/8WHGUg9THFzhZtvo+utuFGpmU+gK+a\n" +
+ "XCvYMtSxSBFoSSwA4v/YTc12QBO/Dm5xINzupyx9cfkbUgrRRbP/ORXB+KIkL3uQEa6UwRzo\n" +
+ "NdZlGOySsXHLmMkICx1TxWHiTjVbrk0tAvSjIiCgdW3kFVAqGNovgl259anhCkXxbnLUMMsc\n" +
+ "sAVW0cdy3DPLjbab5tCSjbpLE8g7KxGTX6jgwjZVEDEkvhk2JwqaxQdhp6JsZIOMSxOmhhSa\n" +
+ "+zZ2V3amEkQs6Ks+3MOPRF233G33dfkmkaq8oPNOXzROimZod7RaYTJYlfl3kBHx2Gd33ID3\n" +
+ "OR2Z5ZywURCEUZ1tmidgJaChiT42hfkTNI+Y11S0DKHoQZfDQQ4gOpoGo8qn8GntVyVx25nA\n" +
+ "VpxqsbddA6diporOmNx76M7+tuSKN8KpqHpv1K1+Bv180rqa/oZ+PXxO1nu3Iv+drzvMuSXs\n" +
+ "ityJ/DRhzg3Hdz8ZJOUuKb06AfhMDcFGOpCAz5KVN6wr9/bD53Ig0KU/gUKDd2vBsPemKSL5\n" +
+ "FKKKuHf5LYVMELEfgEwhcnen5tT+wvh+UOVit6YLHSQ3uoNW9REzBwEsBcSM2xHRlg+oPw7V\n" +
+ "K6CoW1SZdRt3P6ixVDbU5IAz9oH3owqC27FK1poBSXTEg6+AodSdKD2TOqyAaP3a5+/QoGya\n" +
+ "uQntOxj2mU9rtGP2p7wQuL48ya6waALfx+8N/P18hlILF8x7K+JPBZ+0BWhMNEF9BgPOau//\n" +
+ "THHwFMvjc0yVlRtChlhzEjhAhvcK9WpM7c0R6N5vBm7M9477PbGkNZzMFqduJxTw+hxja2oZ\n" +
+ "gjcm9JXGFbYb1ATE/8WDh5dy4H49azbAb70mf9XxzvllCUCdor8TXkjqTp8qyof7P81BUknL\n" +
+ "g8vYzpY3D8eoKFwyS/f0QQic0t0/wbRVZ/tiW9qzzKaAppKINddPfVXlGUKbSKsXy5rjcg+f\n" +
+ "rD4WKauGPgTs+kOpCOAOxAd46wEP0CoLnjALeVsP6q+yNic2Mxa2FUN2fQ7Am8IWV73cnkP0\n" +
+ "RK/tcmGOmFkg73KJSl/FC3yNxG8HLQmcY/IeW+Z0PVLTj5tzWer2cey9/JTHzzOLvqEjDpZH\n" +
+ "bbsS7lOi+oxEEHHRlOM7PECSsMc9C/AQohyDyHNYPEqo1XjRmTUSU6ozbgcLDucrpAIjvVYm\n" +
+ "8Cz1icS3xZCO97XtqSGd6LsMYWlCHvQ6RJAcuBxL8sasJHkz5QZ4TG1xArSRDdz+bO/4Df+Q\n" +
+ "R5HTXGqY6cFs9CLG6O/vpzGKCaeaIjKVIZTTl93Ql988Y3Rk/NQFpWRoIWtrMC0Lpu04Vmop\n" +
+ "qYLPJCFEdCctbhiD/SXjUR6unYXHPAPGWwpRmUF8gQChRng+R5bzpmMXGAUOP8W7lvthh5+g\n" +
+ "66o+0kvtxImNox3up83hSnsU9xv5n37j9T3pttub3ozQIJTudiHS6uNLbKwDCbCvrdvY9vMu\n" +
+ "8D2LSmNC1b7QHkU7R7Bq6R8DWdvm+T+LKqgqodpoInMsN/p70ShybyVQAOg7RNUzw7k8RJKV\n" +
+ "TdxHFAxEVpS/PiBs3JFwL8QpOMVhmgK3O6Ictn/TW249fQ5qEEA7LLMY6H/TZmYWg/EWfTzL\n" +
+ "wd4bGfdMoY+IRjMsxfX4Z1vLVAo83VbtgvbKFpLb1EO7Kc7zuCS0w1BeQ5++eAnZCy3GaTUk\n" +
+ "vFAkjZkU64NaObuZ1/4hyMMGnzNZYnNraZ0+wNOFdLquhi6F5wjsbep9kf/VZfNJscWNIhsd\n" +
+ "+okxW4QlBC9smcIQJfpYx+ycVttGXQ7acP6U5NmVKf/TCu30Ltev6/SXtLlWVzFMFO6ZgKrG\n" +
+ "4xlUqiSn2L0P8AmjvWEPAyL7f3E8iarGS8mKnAq+h/LeyQPD5M8sDhrBDsBweQJghnRavj5/\n" +
+ "kg9MalKxnbYxB79uzRi3Cqz1nNJxP/sAyUi4/c7+PU4T0xQkoU3BioXURhCXZMcOOBSwSEGy\n" +
+ "LCpJbPMRSnX6gveGth2ba7os14cRSG44LPe9BDjrJwSvVV4Pv12OeNPqwH/tvyaVi5V2UvGn\n" +
+ "J9t8EK+rYLlZJs65g7oxaTIcBpkRIzElLMGNmXsEHkGc5PQeJC48C+yho5cKq84lDq0XHlMv\n" +
+ "atYV/u5N/w7Ta+nOQGn41GTOyZmAqddNwpabhszmzx32klOHwNWdM/xoqXze0SHBEMSYaXfW\n" +
+ "cOecJNbWpmIoFs+gxt6AKnOYWC/UdaBN+NPUmyQh56LNBPXHInMGc+TJpJR2BhLryKYbMRiG\n" +
+ "3KcysiWiSOujHeMhohFMUm/DUfy1LgMT8T+bQGrCIvhAjpQn5uqtB2xBMtnD4Rc6KxTyY/HT\n" +
+ "VhVtQqITCY4wy3yv15lIGxe0LLGGnVtYJqo5EEe6hQg9eXOhH6dhCDKMQ8InV+H55fAB7dnq\n" +
+ "7gZhYwjUh3+cbQHnamh/qovVNY/4sTHOP0i+13ekbw/Q7zTq27bWPGyWrfa1vsMFqBZD4vVQ\n" +
+ "1/dkZvzpdWc0uJqqSw1p0vVaHddjAwaoBqqYLwIbhrhDPYqpkQuBnNLxSoYf296ut3Z6tcxX\n" +
+ "PSOt9Z5XGK0f3XdQQSOyP2ujB9KI8sNgPCC3BpXcqb0shalUXwltnRpAsLzRnxjOujR48rxA\n" +
+ "li/1wGpRxFPNsA0dG9/kGGN/FKdYW9J38fC8YVM1gpFDrvENuiGqKxdTnAQqwNTQ4YMZKgIU\n" +
+ "spsCCOA26YRsJwRYRn3Ajw9wpTR22OG9SwmZlhgsvFxVRiDRa0KlysJVpF4n5C3F7oQtroiD\n" +
+ "86oThYaQN3ylOr8qpf4ks/rl5QHoY7j72FAaqn/9hef1C3kAh6vF85ZliGXKY4tV3gBLMgZX\n" +
+ "L08CCTUsBQG+1qeRY3UKaigBTfsbYfxU/CLayCoEV95Y4j6yFV1GDG/OuYN6hSIjw9hl3p5t\n" +
+ "4iSmAuH4jkdQFWAile59e5ewt9KuJwxjyCFpn2gREx6LBImTDAQ9YW1AManPRtvriv0mnmG/\n" +
+ "x3Pm826Jteq8pd0Vi6pLLATWjzAz+GyrtmMjk1InY0sUXdMzMfWczWBedZKCLzd2WB1tCoUt\n" +
+ "g2ZnQO3nBV/+t3yTH15cNtkG74Kk/3itRBxz1kPvLjMMwQrlErfIF5zOQ/SFXaJoiC18jIFp\n" +
+ "1aDng/elbbjpz8Y6ZQdYlwZAJt14Pgmd9oCiT8nw7cNzJkzhPw1g3MSjHiqndHNeP3J16Rp2\n" +
+ "wGnvYwGTWA2sbPgtPSv61mstrs0ZW8+JbqknLn6lRxfnODqiwH8jR723GrJGHWRwwFOLN0SY\n" +
+ "eKO7T6OPsxdiWSnDb587DzdcPV8UjwU92sdtxJPJTE5AP3ER/GFlrRtJWoJNEc4FPQPEbxSI\n" +
+ "kf8ziZWlEcwztvZyeKv/iOqmGBULuXXjFVRYn+PLXJ7rXIMo/FC4rp8wOpVy1Kr82UoJdriE\n" +
+ "+KRpOMZAqyBoQhnzqT3KSI2fzfKlKLg4XFajzjKgvA25Lt4t0FiTX0oPjT5xXy3nLMPqJkSa\n" +
+ "1xk8jA/WhFzm1H7KPjttN3Cl7Q4II+NnbxXrZ3jxZ0pAQkbR1goH3QrBDkr888Gxp4RpyUqd\n" +
+ "sgplw5FdAIGLuPZD20JkSAtJI9MuYJtndWYm1xO6aIrpCsG05E2NVSr7ziyaEEuiL1Xc8TlT\n" +
+ "//v4JMO9As9x/Pcik1mD8f7a8qLibt4+yboD1/Vra4SgfWyWaniG326q5Upk8Bl1hksCKKTO\n" +
+ "7vSEp32TaP90SOuH054HQc4Ki0ffye0aBJMifV77RVz6GErggO6iyIsFjSVpCi+bwQZ6wrkk\n" +
+ "lV3znF1li5e8dGkfMv8G/F7rCpecpvYQPD4+8PPmIELFAoRXw/PKsFXf2z9Jj3KxCirGmnWa\n" +
+ "6pV7BuKiXH2ir11ZD4zrZ8Qi2SlAJ4VfY3BIgt2nkZ8FRkmT0wroc+Basp8PDcuKzgT2HBgX\n" +
+ "r9ZhanQBsf1OZxaU33jeGUd03f4Kgf22xawruBhcdwlfRybZSUQHGpiTbhflPn6n1L697/xv\n" +
+ "kr4StZ2YIb2UHppAWbDBxZOvBct4tBi7L3A5hr+/TQr2em7kYbyrDn1x8wgNxvk7mJ2s58Dk\n" +
+ "b8Sw+XG0UnmuLhrPBF6Q7juOHN2BTaSn2X8IPtOmf5Md3KCBwb8xoIz1VUMGlgyQpvu6dL6p\n" +
+ "DDFkeCWmZloPz5tlZfwDtvgzrPxykz5sl9nwu3T5nQeufx8z76FmN1ACbxbKP4cUD29WPVRX\n" +
+ "fXQOdkzT2ogLgDkVXvOMZgeiLJ8Ws2nWPXKct4EsrykjhPvkdFLv5D65hvAnWYXBldq4DUfz\n" +
+ "tYYzorGqiyQT0p27FA6z/ohsOzkrYT5DHmOcgMJCItgnifuFh2LnXPpmW+PGPtHY4Ij7hAaC\n" +
+ "XCE++XLdlHsrEpx0Fv2f3zjmdLYRRLFkYq/g5jMWw0xAhTx9MyLBNSOTELeEZ1gOMyEUBMkg\n" +
+ "64uTVRkSZCNjOMj8QuzozG0QT8zKXUPZufka7ltYMt/LrJvUx1PqeX/Hf5hd7ZTj/2xdOZlA\n" +
+ "DcaB5H8jclPjsFn2HoLeVHnaKt1ImdQMmJpktGzC20rT1ZVqg/jIhm1hEC6rhIgXI6UaXxl7\n" +
+ "9sun22kYio6itWgJFtlQvdEgiTHlYF5Agq6Yeiv9/gw2HTnd0BFL1RHrYeUHHBxvM4Nfalu0\n" +
+ "kVRhhnJBpa5kvP74Ck1DpSaSQ6ftLOmbJ0LBZQbWxPuH1bOcztDPxW5s7F5dPqfKRfzD57s+\n" +
+ "CktZTUI5jCkxGUdsLboqCaX/9ne6mr/KQqWNbkJ6Vpl/uBMa3Iuk4UdbVdLPa9QB37vxLChI\n" +
+ "E0iRpbPCa9GBdvyf1iTlvSEAJ+xkKaxKf3DFt4ro2+CcUllEG51wegf09GacjX0vtmrJVsZm\n" +
+ "rMnt96KXdL+DtJicFFovTuu4ssf6lV2cIrKLbHBrcNuHjCAuuhsF/r3p5kewh2ZZFfkqfQ73\n" +
+ "T6XlHrAB7+jVKRPCavljLiiU/mWIZ5caadS1wDlf0Yoor76bIpr5Ifn6QiV7O1zOuboZwL4k\n" +
+ "QhLgRCCD2wn4BkeGccn4quAZSFvEpL4G9vjl5efeEI71WegKBIwEqL4w8eJCitufg1I47Bz7\n" +
+ "k8/tPa2R3qZmoS5pTW4ObX84i+nbTpuVanJ6BmaLqS/Imti8pOnu/+Nk57DYAqz/+PboZNqo\n" +
+ "wQ+d9/s7/ORYYxD085yJvTZdTsldaslunLviDXPE6WUVtt3XzNxCR7cUxNcIh8kOwbxPkwhF\n" +
+ "nLdqQVDHs8KWUb/mJPUkipwWxnwlb/nSjs+6T2P6ansxq4FNFQJeXVCLF3Mnc4ZDeC3GB/KK\n" +
+ "21Z3JYUAynAWuK5y4N3Ed3GUHhJjUReBvW6T+3MsgHapQbzvHvKTmueIuxa9nHXsUaxojV7V\n" +
+ "PNxp9TRvUX5KLJ+OPZsVhut32zpe0/HdSHeUVawdIun1chs73Gb67bZA0vnhirbASCStNnyB\n" +
+ "gTaw4o53N99N/11/i3zurK5bxqnAhEfe+H8cY5qwVOf3zksctxdjBO6OyfG7EyEbFLgxt3MK\n" +
+ "rRzwleobPeYBAp0Lotu+iBngfg9EcoC3kh7XTx2Kqc8OGISjRF7Vsf42AVWxNZc6Y1Z2kfcm\n" +
+ "zJil/iTM8sNSfbhOQ4HDA5Sn+WJXFRkz1fx+7O8bpikDBZAanEUDxO7gsn/VFezgIqJZsJGN\n" +
+ "4U2Y+C3TkRT7jxvYISFJtTr7KzQEJurFvjHUBjf+KcDc4J4CAQdDilAro4auJm9ji1k7+6dn\n" +
+ "rd5iX4Uu1GIs92wWbZ+jI7CwWDCG8GFwaPXa3+rfMgzWQLK5Z5papSZ3HTU2zEFNj4w51M5n\n" +
+ "4N9hmyZolUROZ/Md7gB5lI73EcAxVmbmpSCQ+tTarj3jIfzXU8gx3xrTx/IjhqYFX1jvzf1n\n" +
+ "Q6BNzyctkUAVpilUv8FFdCVl5qVhNHcOzzXGemxUNT/m5e/1P0dAk/dt3bgw1HfGvzvhoXG0\n" +
+ "19OMLCpf64P8uQbq53Dg6dlWXIQt8Bpg61x9z53kdD02AsK8LPy6H9O9HdQIgJX29o3BLwT5\n" +
+ "wMinuRUzgKPscuLOlHS9wCXbTJKa7mAK5gt4wf5Cpks6Ps7TYY2bq5AF0cUlHNnhiJ5XnbiA\n" +
+ "wB87rVdZLJaLHJRkw2P/Fd9xuEAHfmFqHkOHIF4g9dlPOV1nAzetM/B88QTWUta7W6uH9SrP\n" +
+ "wHkvN3D+Dri1KpAyGNauMJTXCl4iyF+9+oCD2IrXYo/imlGiNHvgoiBQeSnG//F5ZV4typ4u\n" +
+ "akQZu4NvOjI7fmkr4JW2w+hAo1zhNGCsEyl7jjU9x//xtfpKT2dZfg7JY6C2LlqyMbDXJFO0\n" +
+ "ru54525F7mHpJD1MG1a58G+bBhVGA1NxB1OSzmC9fdIpkFPsE01/bv0lcM22Shd6Y3jWW+U+\n" +
+ "4KupG6U7+RWwnNfQE8EwYAt3FLhHUz5SfdctalR8W2xG1HaUB911r7dX1/v9Hj617wYsgLwD\n" +
+ "rfQRJiQuMpleYjlsRGW9gonyH0k4WYHb4WbAB74QSkV4NYiqoYh5CRPzfpG3gCosNDw3pbil\n" +
+ "ZmA6MGB7x4EtviOMbyNbHy4SgLXpRxhOrBSFokvLseV9RsNW2xlXbS07zl1IFIo2GqZFvG7I\n" +
+ "RuvREl8D+83OMskSwKltdTIubJlLrFNPKbAXnXk4IIGRykhlkv+68zfP1hVqR2B7CTElHTvs\n" +
+ "VaLMtXKDPRvRae02HpiDCbzVKBMVlyttetXQSXg6d2YY9mT6O3ZlYri5aM47j1vwEnmgurSt\n" +
+ "hwoJF0mVCmbvNWR2JXLZ8IG8LP+xdkop7bBufL3Urt34iRucih0krQMp0txmIp3N9V8Bou5l\n" +
+ "Ce8Hc0J4uvcf5y3UHa28PydhK6XAJP8j4Lfkmkz0XrcXed4Z8psdsN+A78rJUHOsemcz1xmt\n" +
+ "r+qHdvDCW3SJ6vAS1NeaaKE7KepaWGFpIyA7uAegKvVKzSMigJZqF0DVhN6kVo675hBifJsz\n" +
+ "yZ+6douRnIqITYIrT0pF96O0D1totzUJ+zLTH+sOsrVusBDDNrad8ZX/YirSiS5vMDeyPKB5\n" +
+ "DJ6e0LgGhOyVigqNM/EBngFfk4OsKCHNi56KfQ3Egn0LAT7krK72KW2ml287CRJbnSYjLyIl\n" +
+ "PH6Alfa7wje4s48AVM2D2w7sAQl7PNr9fuOFcRnDIfjsWQUMAo/m8jsqKZYeBXy8RNXbeMdh\n" +
+ "KqieZIbWJhLJ85EGwcadWXNF60IeCa/ZXov0emYNMnN7uF1ZR5nIVUyDMV9MzG2RxcpTb1lO\n" +
+ "qaauedNmP0gI7l1OSCNz/Dt1KgzP38dg5YOi71RGrxYyz7Kva5NHiFhI3mWHJEdmRpnx142m\n" +
+ "Zy3MtpIPYoMWOxpyi9oEOPps1VvXxChVO1bePOh3CPdqzONsAXz4+P38R6MMEtiYQ3qOxv/F\n" +
+ "j+bE+UNAIyG2PAfKtaXOJ8rW8qLIMUP5aPL4/gkGDSRuvSBWpo4oWTfLwtI+FLkJSursuOha\n" +
+ "+96QakdwiSJ6p+yWaB/ex3AhULVsYWaBdV71daW9GHsa4tsPReoRcfHYHvXQy6LC6fppPiGV\n" +
+ "9iwhXbbfvuaQhn2Nb7B2j1ovG8wqtfyk+j+39asVFyNTaQiB0kA/KNu/NAi+ZNTtBaskvIjp\n" +
+ "4fFYn3pBV70OIiueCJbQTMzzCCqkPzQXtfcnvBLrDwwl4f8M59elOgPHCOBKOkEsgIf3SbNS\n" +
+ "2DreFkeMpcIed6vDDXIK1PIqmremOmSnJvoa4okRyu2SdXekQWknq2rpm20mySpeJd23/QXG\n" +
+ "gsNPPW8lVUYKDOY/YcjoxFzjRemhDZiivlN+4KBLkATO3x3sU/ZD1EOXSSCk2t8J6nzSCLPk\n" +
+ "JdLhaz/V7Lqt7ML5hmlYO30oF1wUS5U9Sx0vrWO62lvzj6FYiw75er57GfnF9n6RUl1VEwOk\n" +
+ "8NgYy+/XQkXLqExe50ueeKTICtEP0YwNekgKlrgKKwEtM2VGiyzSzZ0PL63yeNixOcVuh5zy\n" +
+ "WmsY1VgPzdZ2FwAzxtvBcYPEpkL0R5U1fmAhLjwAzd4jDG11Uo+bhpwTA+mf9KTlw6hwV83V\n" +
+ "ivNDJ+SXvLG+l2Bbu1dK+CLDB011U4lDV10EfvP+Op5keWlTY8nCozy3SLcm3LYkcnSB8aKg\n" +
+ "bRgOM2ZWO8gxxmYfub5OsOeTWoA8X1OEwOUgIA99KOu1p8PPr/tJuyuQd81KLUsdFUSlqmXs\n" +
+ "vHEpF03T825RTrmyFkusRXUCSgX3dvoxQ+Cgwkac2+Amrs3tz9FhVF3dZbgGuTXEIqb9pheB\n" +
+ "rlLGpmzJc4UXdsFZh2qSpFq5Of8aLgrXDYD2Z4S0oL2qvsjXF9rTTfqnWSTpbdKqj3AavDEO\n" +
+ "yFbhBRSNZhrI5/Fi96CWkx7JDk4boeX1REmRqQr11O+emU2eJq5em156zMBKaY5qeOX6kcyl\n" +
+ "BQrpnwq12Di+tVZ6IHamNEpobZYh7Om7l96FGsPgrCt8k7AAtsDRMbslBotcv+uzIGuiRC6N\n" +
+ "rmn6fLSbkp7M6dqDCXnQYzH1rIIIAhHH8t9kjUmv+QjdbTZV7UM/3mV/U+35i+dUi7uEoq4l\n" +
+ "pIORSDnGYj/mCvqa13pe+HKB3+dvr2G1n0Ouh8zg9weANoXwvRQ1/WQcrZLTS28woIssJgWM\n" +
+ "tRDeNQzpguB7B3GgoLf6N/4/3Nj+S9cPtnrgV3u1i7Cb3tMPOzpPmUjFtahJ09pt/CTnSKvC\n" +
+ "MZt9mu6B71hRdMb7mpwswVv50HwWtBVDIQ/nMwa1UX0iLmxZ0kRRYEzvsqyrcIPrxYzIwdgu\n" +
+ "eaoQXggEqcuEwT2k+rN5l8oBYoW6y4IHaFvuo63keiQRzGyDdtjxwvu7HaUyU0tJbcTBygcs\n" +
+ "TtBOMlVcxHGRrc2R5VD7lZTCWx86ROwI1j8WtpX94HuY+siOqLFonUiEurKss/4ehfbVSfcf\n" +
+ "TyHS4h+6lobZaFfoIkN5rW5iju5rzOWQxbtKfz0Fbl5bbs1qe4tJSHOG+Wsp16gP3W1qIB5L\n" +
+ "EgLc579ve9CFE57TBIR8zGsCRhDarWLRNk786yHSB11td0esF/9bMAA3RMMBe9UVSKIP/mdi\n" +
+ "L6C3XWzOzDWihUAs9VuDOWogl2+PZ0yMp21AjeRvo0afZYLYQwNng9fksmMC3qlD9Z0ssXX1\n" +
+ "109RZsDAo4jMOv4MZV95JTJqq5Ti2TvOf7FtyWubLkHxFTjoaoc5Vi3saXpM051if4pI1JDb\n" +
+ "WFmkxnOcxosVzcGvF36FGWuV3tx3BIPTY9p5Y3h43f8RLE2J0AmZVT9EBbWDEFYcHogjNGfT\n" +
+ "S7psFrm7FOBXwTmNQ70aUL/7sheXyftywrKl89In4E+Qfp7ARoq6hbrTUM4XP2q3Onyl6UVb\n" +
+ "qEZwvX8fzK3XACUcfyHXfunvn8NRGJk0EUgxf+GIXd4h76j8s0vyOhgTV9/m9uOs/SzPaoFg\n" +
+ "QAeiOgVWfaJTO3Ra7Lu931otDh1+e+Km/+kbx1cD55hNuHcS9wlU+ohwKD7d8jMXoLgQvXjz\n" +
+ "uKD/Zn3FMxyNgwhLJPYfD/tDqR6dRVWGQXbiibW7iieYD0IrHfsbUN2KQ/SraDEypcVSxXTh\n" +
+ "5OCJioK8C2p8fUuggfJTRhzUtVhnBwRA/eV44/b99Ifo8o52+n5g7eFo7KAnRAEAjRkHK+Gk\n" +
+ "Tm10mVPIl1Vsrz/j/NT+c6aRjq8RBIIgAEYUdOjDp3jyydz9k44SgJsSdnHnYlSeYwtsxaHI\n" +
+ "zf5Dm+cI324tx6Gdq0t7cxtrlEijUkegAVBSy+PF2kb4aeVc+GjosWjJ9r7yLzV2Lsz6j/Nl\n" +
+ "3vBXXdy9Ho4ZqOrlb7usA8ecbGxZ4wPdNysVRjiNJxDBvs2SV1BJ9HtQ3gUHek1KwrYRq4uF\n" +
+ "oxsmV2J5NV2e6mYCukTCvZyHpUA9ZIuhn/U4llnoOAaJXdgNou6FEUblyBe1QQ6FWmP0xcVq\n" +
+ "MnHKY0FpBYkGX8X6suyD6NdcCPU64wyqmmBX+hfmFEoEUjmSlpQ//au9voQPSUGBk1BHeyi1\n" +
+ "+oD7uy5xesBlcnenbmzAJVk1CWpdbvvII92ZAGLeQlK1JC/xiqvdkfQZ0ifkH56M838ZnPZJ\n" +
+ "rQCW9TB9Gv53QZ09x/P2b3VB+58X+UMxeYl2gU5dZIC1ZABOGihh0zLukCayCj3pgE5u2udu\n" +
+ "ZZjmNYvIx1khp02kWZxBl/R7mLn6sPJp8AJlHZvhg0eJFdnclPzveocADzfHPeTpFn+APBnz\n" +
+ "WPCGh5E04F5mujrGuKeRJaiRwYC8PEEIzrMSTCKthMOHts/xY3Ic26ULTMxPjI11fIZ11R8x\n" +
+ "54UYrrCZo8g7pWrxyBipkjbg85diLCQy5+rAxinVubo+gdSxnbpiVOMwRkY9lNt0/7Vy7c1Y\n" +
+ "yrTOG/2sU3DbopXfwf9JNkwP14Ba5pJXHy1yy3EsM9yR+KPc52dpz33m29BxGmOTTKIVVjov\n" +
+ "w1LFJkFkLuSX46/1bx4CTd/T8+EcGw+LRQYi8qw3xrXclhEJ171ZTp0XpE2ownDUcTwRiUjj\n" +
+ "x/E+pwFrwMnKTWt83ol+xWsU83N7w+DhoQ7jmi4CWK35J5JAh/mk5ofGNozJAmOdgk7xixbV\n" +
+ "uEkvOAGj9kwfFYtR7+N8ab69lAEFNbt/mBbd8ZfPudqi46DdMM94n26nFupuwRaNYEmZbYJf\n" +
+ "xZ1G0fCBgj9dLjw+ZlnC+nVnhkY7ZG9/WtHW1bIorRvJGnbk03MBzydKC150edeW8leqBp+v\n" +
+ "bp07c372rZHCC3G2J33xn36qlOJ3/zWf1wPbuJIrrF8tY9mcttpzzF2Qopb8oCVEECjaRlQU\n" +
+ "EkgAEb1xxIW0syvLQRuKSOKz0DU4kKUxoHbyN4PQ9lw3zYKQFTBh1CeuZqlYyuPuG0Pfj+Jh\n" +
+ "Lj6KVvRjdEgXG366qjEpa74cJ6rvqiTpbkBysvPzNfwA9SPNXUM3S+pAmOmaPwvcPsWVpevn\n" +
+ "/CYtksAPtKW7wO1wj/GmQ2kzDPZ5hKqVUkpvDPwZyLkWwLseiOX95g1/9Zz0UR0tRBnPzW0L\n" +
+ "pLttlajn3kv+zs99yQqjgrIVbuDj2kT222Q5FaiKYCaPGt6XlaHF7krMl6ojU4sLlYY5frME\n" +
+ "jKQBA79r75vHvDyRmgeBr3VstOI+su+pZCNEPlmYOzzWPOFJpZQEoz6wKhRlPMbxCufV79n3\n" +
+ "FmZFk+XIYaIXypz7TMaCiy2M1uzKD3ChuJc2SiKCLF4oGKMPG4iIcYWkKPuqxaBB4MHWSJw7\n" +
+ "FxU0SGBgQTQpuPZNNSygrHOcVThaIhsTIvPEbnYHG5FQvOHa5zuEEaep5Oj4PAy86vIj1SOp\n" +
+ "Y09gNrOt9PJgOvWIVCPYaAWUBONqx8B4SyqsccXpPVzrnTdN3qXlAhnBx6k9qwRCsttgASrx\n" +
+ "M+JSSiSakCBKpsHwuzceG8cG1NbWMYNQZbk8L3ojNVEqOG7awufWqOf1TNQv0A8a7cE6MLwy\n" +
+ "pCKrb+A/oawJq/pU4AUdEDvGKsnq771Ektk8uLXM9nxhM03vsT1Tv0CvIgtWC0DTnbT9DcpQ\n" +
+ "txMcv//WtOhZ2O2OXBhruf20KbdmbehBvYpFmLsfjVH5500MR38FaOfo7MTveaHfXIPK51TM\n" +
+ "OY1JLzLjZNtwrXDSLE2paTokynENryw60MRUnPRbqcImP3Ro5kFM/wQ4QmaHK/P8c9b+S5UY\n" +
+ "nrdDsFOCr65X+8/1DeX+jFHO8TGkZ5/+C3boM9sHEk59GTG8Ly8myYWDSEeNV1QgxAuFsRgi\n" +
+ "iz7aJ5QD2IfRIMGao++g8N7rYZg2GfprfWgBdOXV7A1CTCYmszdQDLxkLmd2uRUgrFOyJBeh\n" +
+ "1d4oAVem9rljEmhKhA9VfYR5GBxjKIauP8wUsE460dxh/Y5dx/UgTcoMM2EOozkZT07KemPY\n" +
+ "NIsLuVcIrpjx+4tIh9Jqxzj96IEf4R36sf0/mAi4Vr5i7ih7hNf9WqBQXJgmxn1jP9zyY/P5\n" +
+ "5Tj4eyAD+N7apNndwWvvWakk+RSSY1wZOs1/8qlNThl+Dx7xbQjXYGJ0l4Y1BrwmBsca/gMk\n" +
+ "hZ3KarNsb2ywJzL5ddbUr3CbSZuVVtzHQTeOEOTAkKgSYemFEwVADepFfP+N+CkQD9l/jPtb\n" +
+ "JtP3NxEne8OWrGZXDfH962jXVoVGo/n7LArJCk2eCBRLu2GeeI5U7t60+D3kvilKm3KzCNCG\n" +
+ "HoSmE3iOzRmCmzBam640WW4L2IEVrhdlYyIcLpHDev+FEIRF58KrTKR3+zsHncst8yHo2SJ+\n" +
+ "nPJor3ow4UxlPi9W5sciz0vqaBUw6GDI+4UssTWZXew5P9KnQXu27QCKFX174ol/Xj7MPK5G\n" +
+ "20QXGuHcUE6WeVbu/R046begqyWmfAIBSfsYJzh9lp/RKTwdbVd+eVI6Q96MXarLb16JdZom\n" +
+ "RCjaZKu96g6xl711JMHqP8ckYgghrqLvg67Fx7b0RmGCmX9d1UjxWWBkKRa1fcCyCNlzGdRD\n" +
+ "QjZ1/+SbAMDDnzuBjPE1r8RSfW1maD11JU506s6/N1U90oXe3jmgPovjyvPUo7Kfu7mdxc7c\n" +
+ "DAPdg2wTq79gcQ2dOKMHYUCBa9zqBAoZlXeDY8MCWBFB2oH1s4ZYVd1ZXOdo880T/QFNKmAt\n" +
+ "fkxFaXKwuVBbz832ntMngm3219LTs7dV5zQj7Ualn7XtHDUoptl14m/7K1Kvp47tW1TNdtNA\n" +
+ "5gcQsxY7enKaL1M/ymQPCB2Vtq88gAS1g81gudXWwSsgR/Ibd4chHXjX0AtLgSzQTO8njFcE\n" +
+ "tPqXcf6jxPH0hy55M1j+uTbCg0+eFHOsWvtak6DHmCXKJEVMtjDK8tu8Aqs3dszlC1vcc9wY\n" +
+ "7Tm6AJPb2NXD+Ly+/b3H2RsyUYU2GmgyKU9DTzm0Mso075CAg5Kijo0KmTbJtOdkqFdpd9rf\n" +
+ "PjA+c/Lt/VFyvyRkUppHyOmecwIqW+N/1gS3ZPWZv3CpO4kBL+I9VszGpe8OxiQBv8PVmWy2\n" +
+ "j5alpanRWLIxJUqxDrR++cwvY+zk06i/cj9PaA7ZS+IjXTgYCEJjJe5gLNikVYtXeh06biiZ\n" +
+ "NKV++wfi7BwbTk/zapIFl/aYPjricw+OioitAvtMlNy9TLfazmpMjepWGu6eyEmsJUu+PyIJ\n" +
+ "qLr4zp6nPL7NDkpC+d+NW+26UKJDFuFu/9G0zzzAFIqHCAtY4iVgHTQBeZjUXhyrkQMIlbhz\n" +
+ "/AmEAYHjNJMOcj/1fNp17bCJKsq2F9PVspsDAFWs5aM3xoiMdPZbacv/J1LnZzxqLTw4ayOq\n" +
+ "xISNpve4zdHKBaMXL711fnZ9Re5dvRxBtsikW76m/GYsZqtCXc4dAgwz5bJZTwCErKv2HEwA\n" +
+ "1czSfxIKtLZfgcn2OpUTu/pCXXwjUgrW76mMxx+Ew5lRzyQuZmh8FEWLVEfl6ZUfWF/uTp9Q\n" +
+ "2pdnrqZSfGa44NCqafNdCoBRA8+gBeAVRb4fDc3gdQMaJvgM/Q05BYk+x/7QSq33aPEteKH8\n" +
+ "WE10HHKeCF/Q72BfWs4fKAe9uLOdCSpxzzbW4ng+XcguAIsKW7BMGtH8mfr7Hx0VONL8o7Jr\n" +
+ "cE035LUC7EedjiZSXBXmIqoZ/WBd4IM3I9w0MCQqORg6eqsjcsYeioF0KEwyu8U64W88KqnV\n" +
+ "9UkUbf1pdO1vV31/r2FAfJL+zj+t2syzcDzaB931FtdFGb4mF9WBQMDAXZ/tRgyh7J/L/n/g\n" +
+ "zy6SMmsqB7XJelHjNXmi0o/Mz4GzneVzYomV5iwRctD6RP8dxhUDFgNdFZKepu5vke5Ly+ot\n" +
+ "mild9RHtQFL8tfoUVfAGdHTFByujfryb3agmM9Z1JccGa0qSXWUVOGLNWHDeBoKglSavkYGo\n" +
+ "60Ik/PVq5rn3Z9BraKsNEM1pa6IE6jyMJLmK3kxzs7F4AFpMV7fpzUMPhEPrQk+PTflWt+L8\n" +
+ "tttXzOOdeFeD0Vm8dqvmye8hhL5KwORhZ5Zj9vdKG9XZwxb3YL+o41AK1K2j5u2M/PzdpgQf\n" +
+ "oX0te214mUo+6MrYi5/2WG1Quzg4ZK3iyGDNKL8ezu380DyoYCXgzBkd3j4FxppxXVi/ARD7\n" +
+ "1mYABAPecrRIxDq9i8wf42ih+nm9M9n4CQ6LCTSLeH/uZPZeIGagFuIhHIeQe8DsQ2G9eH5p\n" +
+ "a7czcNHkWn755Uor3JcR3o+jQ9APDd43W57norYHXnRJomN+45PWIEhGyOMNozh3fZ3ySQ/C\n" +
+ "dDNiReFjxm1gWwSF1C58vZJl65nnup/r7ni6Ht2gia9yxA38EeVg5yELuthsxwTbbjk47VYv\n" +
+ "qRv3B5d2HcjfS0zg4mHjwSZLu+ia5GquBAOPKgotUkti0ianeA6SZ88owXsmqPV/fUwrwFkE\n" +
+ "qC+2TueF4hVHXXrArg+Y8HfFmNdHLFALhnCzpYKIvwAh+3ruQ6degZgl+LYW5gLPoPaZoy10\n" +
+ "c1oVkdvaIrk3WSp1wmg0X7MB5lKQwg/9pH9aZ5Fg66L/NmYHck5SKlnx/Snmc8bSMcBl1m/G\n" +
+ "QLzxOQaFVE7sTRw3hRiOos2mA/13y1pF0/yxT0sICyfy/8JtLXTFYSzIvr1YqdJWPXHRkC3d\n" +
+ "vaS3FJ7sS2zBxy/rqE5Ee7S+nkQH40BFgVBZ1Y1HC9h3PX1YyFgTT7DG884Na8mPbmkBtrw+\n" +
+ "TulraUhj9pk4Hl6sGlDyMYMJBXoNollqyVCzYAlrOWtVKroCYYo3OXV7Doa0cJBmfi6ZCDbB\n" +
+ "3g9DFd48Zn2jn93r7m3TZWlBLj+pGVieBddOhA/sV1RJcWWpojhlks2zjpPsnbg6PLo+w8Jl\n" +
+ "eJXii0rAir5oHxlGZjED7Q0vXfaaAv+eZsNHjzZrYty5sBI/5csCns/RaERoKRWnPjqAihFf\n" +
+ "G4R4n86fpR3qQXS2LCzUipcQ28qydzRGAkzJndFCLtyGhoaVkMaETK8kX/k2iuf471Xxyj0F\n" +
+ "CW8jabKPKeUCAtmGQhbwDFSrJvTmfa1QaUMRiIsNkU7wUnbdh1nEasT55GWVLZdbqWRmJzTj\n" +
+ "YB2jXv4/QN4Iie8EMRCaZyL3NLtFya/Vh2sUbqGsFw16YSZZK2E3fWfM94qcOPjHiJyPxoeT\n" +
+ "CaUdSGuSm+hTFLDoh+LZfwAJEeGMUDAZ/QgiwkunszCNI7FVEhyjcPEOjZ97ijODUhfO1r2T\n" +
+ "neGCYIoNVMzR4n0nGL8fALyxirV9F+dBZlIwCWPDZWwes+8A9buAJhoAZ7umJFYhCQiqXC+q\n" +
+ "ZY0glVYTnaS4fNQ8hTdi7SIN86WUAZiNm5LIlIUHk4ysOonExQ4VB6lCIHJrj+ucK4v4ZthK\n" +
+ "fP9w8kiqpm52fln2dqygroU73DxXsOcMCi90JqUaqAgKiXD82pYVJdGkuOFcsRVKyI3BaN4c\n" +
+ "eaJg61/PBbmGdgUUeubiuG82aGo6nxYGUa7GAD/VEKyyoyl3ba2PvhSXe+Fw5LdsKUs7GM+9\n" +
+ "YJ4gwNW9upKSDoA2ZXubC6XJ/fvAPlkDQj03sBM82GUB6Y2Y30ptrODa2RlQr4/aH1ny0Tt0\n" +
+ "fWv4rEivi8ZsKHwvAyThGW4TQi6Qa6j0d/eabNJgKHnRAPXPZPuYTeudcGg5tbaKn6OeZBoW\n" +
+ "7XzN36cOel46M+3ecBYcYkbGhtjqaMCWa/8iTA5KzXRgK0n29qZZC1sbFECkji4ObMQyjMTO\n" +
+ "sZZyFrJfLo9XUfhj2EoJZxTnF7rIznjSXhZsxN2KcOlqF4hvULDuql/2AiW7nRKa3LnlhYc9\n" +
+ "z5IJ7vyDU/0bMcu6T+bo3qdDNvMbBZ3EZQQzzQqqu0Egx/MEyyXo3UQqlhq2ueZ8VkOIpWjc\n" +
+ "x62QOVhkzn/rb0ASB98UDCMBoAHnu4Vt8HhNVj9gtYzIwsLhCWWZtXe1qslbKHG9RoANsfdk\n" +
+ "utZ/GMSuUXjoV9NyQFAomsKHf30bTAU4nYkxj3RJviAO9aQojVMRqUW/tjZ8k3hb0u1G6WWD\n" +
+ "Wa6IZAhj/WiQTnFkiiY/hSWiAKm0Er+prhJmlLuaGwMjtQIMOyNpjK80TxK1BAgJdGQnreVf\n" +
+ "8HnXz9dHAAiFGaloyC0XtjaadnXdKCW0Kq2DvyPEapfWgKoJnDc94dX80plKfwps/Dg09uoI\n" +
+ "h9ctSWicYy5q2gg/rmy6fBjw3rrv6P63Iu/Yr7d23G1lC8MukkUa8fTB89wZ3n0Eh5+SplvG\n" +
+ "YyVY7kPXxaLS9WPa7GdpEFFsdLoJ5aX0ipGibkTATFWdZEgSCGwCYUpMYJW5K+FvMtzztlCn\n" +
+ "e9ZafDobzb1UFcXTbMcQye2BlFFQl3+qq3AJQi7+OzP8tmprRws5z1WfnUr/8vvps5f1PxpW\n" +
+ "kWcQv1GNlq9ICkiX10mEOq1tVW+nVowf4tw+2y1f9VyuKnKYXWhwlfvuYWfqZG++nXZANhUO\n" +
+ "5CbHpk2IYVk9W1sMNOWq/yIoJPRcVCoNwjz2M0d/ugpAcr7RrPtdG2JWeMPaW2PVkt5UCUy7\n" +
+ "kLjx+EY+EJrSjJfTKNAwNP3zMCPohFuTL7P8zW/qWdoNdr1J6Jn5epvFXxg/4AFPM+LnchuU\n" +
+ "tDxIVNH9If++y1P7wIywzSZVjlCEk+ayGtADUqCvnzwAzlYFCoBEhwTgd+KlEZoyIQYeS8nY\n" +
+ "v6ZkgL0prWsIP5Ctg5cm8yZdXzfyWZ2VQf6qQD0amZ8HjMI1TjdWTtw+sJrTSn6Y/aA9vJeo\n" +
+ "ekNLOBvlYs9UkkxoB2/P+KlWpNswm8ykO4F+kETaPRIj+0Jquc/DFO8loLZJpomo9iyu2+BP\n" +
+ "Yi4cZjRnUNyo+aDK2DXM7wGm5cR+SYHkxPRRYmhrxDdJ8GA8Y3pw+KU+j5DeKgBInnfuZiHV\n" +
+ "sfxmjdhn3OFDW5NZ5Z9PE1M+qDxNkve22sJmiMGPmbBGP8L6icoMyikAJPrvNUll+7qpgt39\n" +
+ "eZE+P3vufj8yXa7STW781cPiWGb9b09nri2b81gwan++4n1UTWRUloMWj4m9TFFY/xw7/1lq\n" +
+ "hP09aPwIZa+sy5m4WP5wD4Hp3l8VcobFeWii2PoU7HCDRyM5J1BFAtPL9l4Mpx4CWTM5OYzB\n" +
+ "H0ihCM8sNWbQFCttc9w4Bh+vzWWpFJPDqikr50aWTEgXB7LwvTx6LiKZBV9cF/MDx/8kn9qN\n" +
+ "geqJobwdsQB8zjVzmLsSEatDHkwpn9owhFV8l+BxavMvBIMGx1lc5zOQ2mXV/n+FVDnEo9fk\n" +
+ "rurpjd663byXVgGtot3dWyr3tUjvARqNjyK7uRUT8O5mK3yDbmtE6+Gtwvwemm7nD0btP1c8\n" +
+ "pY7OaE6MXWioOnzhLH/5spqrbGGV/aP6MeQ+HNKtR4Jx5ujIp/0dVKIvRJ9g+jXSfC8o4j7N\n" +
+ "zw0/+uDrV3Lx0/6IuovFeTYLyKLyuyclVv5hnNZ7msql4Ld/+2tekKGR9Li2tLiVFJ3yfOvy\n" +
+ "YpxDMi//6FgB5tXubPRDLKCP93qurNaCpEICApW5m8NpWvbYcoSSdZKM3YNNaevmP57XVO4D\n" +
+ "IXn2H/7yvA1NdmtFkOFKkkXvhO0JnrNlEa6NMx58WUm+7owtUEv7XT22S1aOpZq4sPky2yxD\n" +
+ "gwXqB5ygbnKbNYQb+mkkLIjiTecgFM72gKmtLDz6huaNvnpxK4uPo0QEqdRUhwD55fcoYiya\n" +
+ "0RsQhNjUAQXC5056WzDuz5xRahSQ2PbrT04pI4hzrlvOdJssi8TtKiL5UFjD67pwIcbmNnps\n" +
+ "1RXo4g2O1nef5/WHe048ZaPdV/pvBdTiEp3bjKFTlD35dUwFcOmq5+W964BmljjQYu/6rGdG\n" +
+ "3Sby2g/B+RCtEz7NB4GA3/5ah7SoJ0cimcA2HRF71Pa5T0cIkyEORSCA9pXrXi3pDz0RrqRQ\n" +
+ "4MsFEiTnJvl7K8MVRfGhVpZSxyvfC1WY5dZ760HKv+fBJAKPZywaIT7wg3Ka58t38u5ZiKFc\n" +
+ "mGzN6M4mvgKTG8EMKgjCcFc9v1IdkWC9vijufVcxfW3rFkPNnakWL0td9qHKq3/mlpxVpBY5\n" +
+ "aDGpdCyzIAmshRa7zXt3LzVWSLmnCzW3aNWd/eLmjLfA05e09lE5ZRF4lOAU5bIC0EB3+iLS\n" +
+ "OSfPE4APylT+7cMlkp/CdBbAfio4xrJbkvSgqwESXWisFgZ9Zih1b/APM68woGmpf5aCY2Wy\n" +
+ "0MqzuOpHerXyh/O8nai1zTyDz0Nqe5Z60ITQR98tV2DHsQDazPSU4Jp1zAA4QuW67i2xps1p\n" +
+ "g0IlOREYGCry/mCh2SPX79USHOq3trmd7OVCaaWHSzzlCuVjm3FCplHq+11/sAw9c7Y9lriS\n" +
+ "zp5li+GbZ1ZVWt38XrVoUkGrexy5Im09C0zNNYMNMMehHkLGXhDBAmBwDWgw8xP+SP8vBa6j\n" +
+ "jCI9AB3AOr7kDL688ts8B+8oYeY9/2UiH7HA7Lb9Lpz06ifrz0/Ojt535D/WPqvJj3r3NgXT\n" +
+ "f0mrcEuuFUfcjnRyKtPQevdgzX5ZRHsvyijAFAt9yUt9AlcTiZOtJerz0RIsTgq8T2tsp0Mx\n" +
+ "vtHsZWqgzKfnW2SGiFhi2aIPZgAZA5FKq5zwT6sWJsaN3iyzqU+4reKwYrx6ZKNu4fT93y23\n" +
+ "Via+Z9s4dy3JFG8hrIY06WG+9XOyFqIoccDiwFfqHGf45mAjuuy9x8SQ5eMWe57tVSFUWxwo\n" +
+ "7zDT80Lh6wWc3cPomT5OWz379x2WXmO4MxXdrx9AKBT2tUXF8aCDbtx3IhG4QRtjQ0STbkjV\n" +
+ "ftV1iQfzx9invUZlUWJOBYYO+ZvnJ8bsS9+ZlNShwxZD3Eq5RfGAyEIF4W+PS2xpZuQxGySZ\n" +
+ "C9iaxZjBGjWJ1N8XwD0c+Vsuyavzgfv3ns7dKSiarIr4znXJaBhS4kJaq7buQ7zVf1iHySHj\n" +
+ "MxkhErY0oZ8DJTxKJDuOYPfW6GtinXZpGXE3KMy1FDXUSH3RC1DdnNtQBpsbVxEjzaYD8Gzj\n" +
+ "d/rWJzE1qtTK/OwlHwZyN/5XDN4Rul76dZbqC5En2jcVo3wlh0wiOQMk5yjycX0exzEJMlU7\n" +
+ "JTlcwkR6zY/Pjgd6l6dvATedIQbS5gxeu7f0ePCbN9coIAEJF+/LtRSeONypYb0MlKxEfena\n" +
+ "LR4XQ4kH3q0ed8jl9E9pXmGJKzEL9RuXiRZw455wx3J/f8ywNWrQ4JWdXKVklTLR1QrBRPSo\n" +
+ "K0qKkC4thqs3dyxgDdywKKq/Yz5pa1KPbp6RL6Pof348nmDbbj8QG59agAaMoRrZnqJmB4DK\n" +
+ "IS2iu+ES6KSmauUTlI+ZRV3HBj5rwu3QDrhQb8w6uC3TY33RcYlFP3MVaHQnlG76tMxkHQ59\n" +
+ "E8WL1dtVTzhOhETiZJZeAgzCqKc9L6aEtHvWZdqnUoWDV0O4UUDMjpNu2o8xYH9S7cFDbrWV\n" +
+ "coBYOkk8H0B4V1toNM8IMSSGs38G0hO0aK9LHyrGfEDO6HCF4qt8K1jcvbZmbUGUvB42a1Hu\n" +
+ "A02aNM7hRsnEOpRCp0l30VSlhdB3tgb6mI1LvNXe3pwSd61Hr+DIx8xDZ0cGA+b2DP7hnYp/\n" +
+ "Z57jk2qNwTYl3Yb/K+QTiv7AN08YDg5pcmkwfR/wuOrwqQp/remhQXUivUu13pMik8YYlwMc\n" +
+ "x0r4r8EmloRkiU8OuIv05EueMspLJItIEnXxchN6BuXdmB1G9C8NN9jl4T2xsmaE0f1vMPRI\n" +
+ "5OOHmPdwoRvGC6qWkY2rpY34haRyTAWrDhELca3kIgIVgsvIikbTkQvhY2+2mTrtlVSDcYhk\n" +
+ "ngvRPIT9Q0je0IK3+3XLog+uLQykqtOYKfsA3hfAAKgnghIQjwx9TO5ys1yR7AIGeYj0fOjI\n" +
+ "+hhwEgeUe3fttRe10FGXei10Z62TXiF1skEL7odnWMpkQ4vN2n4H7LdG+dFkVU1cgJXI16cP\n" +
+ "BKrrknaRXmURgVrJk0sItjxKGzU0OG/U9amKT39LXTc6x8hhAOwNeJLUksGCQpdjsV5XBnw9\n" +
+ "5+1ekWc+MPQK+SSgxWGaNfDPw1IxEWehrIAKjRqFhlsGLY/wbgM0Y9g4XugGMey/Ibbzdvuz\n" +
+ "M6HdffYtEHdzoAAh5CEo1g7jQvzyPHVdEwhxDxV/MmcWB+B3D2AHvgE3EDealVbtp0sOBcKj\n" +
+ "NGXbOPnaI2YzkKKC6Z2DeQBBOLzz3saaoSAi6yy4b+xxK3bcEi64nGZGJKuNi3MpCBL4v71W\n" +
+ "7eUFRwKYyaHhLY3FKwTOs2paQysjDc2NIlOBSrJLfo1wgmU9sQJx7BQdkUkPZn+p7GdfFUoN\n" +
+ "k4akjIewzRwKzPTmIPVhzb4HrmbFp3EtxPlKF7Afzt+6DC1FhzSHVqMyTOL37WBCa+Qthw6l\n" +
+ "lLRtXnwAErLnnaom2qgWl24HFkvaEKu/X98eQqcf/mGcjKgHII6zfdkdHvS+lMlbdo7ATzC4\n" +
+ "Dhx50456H4Q0/4CZ5VE91q1sGglKl7o3KdFiWu+WmgKfJo6/Q9BsHNluLxRPJMA2qEv7/e7o\n" +
+ "JeJ6HBYHHdwB+1DU9VnNXdk8d1SlAGyBjVzE3s28bHRe4pLmwCh3CIbwiNn0NCXavMTMnA7H\n" +
+ "RbrW6eHdJE0AwWs1EX+SPi4PzhFkT5k8iQxQqHbRiKAoqnD5rbhqxifapAf2SA0LNrlbvGY8\n" +
+ "22kE11mwbu3QXvbhD7Ji/1U6E+z/DpYFz9xeXGdcZAFEbo3XfuHh7LQ3FKENTKFQQhVnuX9+\n" +
+ "a301TXP6se2nBIIgALj+F1K0JvkeZE0ZxpXrM+5U3lhSBmPWT8xNBJ7c+EiJtGEhOyQVUZMR\n" +
+ "mOgMJ8sWfEPHQFpgFiRPtw3/Od4vK5IFpQUPqQWCU5wZrp9qrxlwcQAPu+VG2QFbudaIKXJk\n" +
+ "udzf8ltnEc2bjGFh2opSvUsQgh0kOSTnLLVAov9fIf3qKUVeKFcG2xpFIl1BlelOTmKAU7rH\n" +
+ "diRY9ujoLTvkIg/9o+rk83GmPHR3xz3i6RSrOGeiuLZ55PffPNc7aju38GYw0PV02E7Vex6X\n" +
+ "dtmimBHav6a2WvhFZhFzG4O0jr26UKDXYDVHKb1at4ymDgiQ34KAZxT0ZxJmeNAq/KZeXXfZ\n" +
+ "0D7hZ/xhS1+3CohTQM/fG8P6lWa/ohDJirS3tjFqbm4VSjqJaOZqMmTM6SgIeTvypH52i/ZM\n" +
+ "caYsH+/BcGn2W0nv1ZHcjmuMHkrQ5UfFH4AqR99LYahcAMFYE87unzVln/ljrM2hUCkzQjBd\n" +
+ "qeR5Kgfsstnc0O0dcGdmPTRHgJDoZdQzRFN2M6CbwpHl9OO4EselflWw6Z5QwCzBkC/3Hbmp\n" +
+ "wBLZBE45JFiiIqkrxT0t5BAxEYGGyv/JSTygvY6TrsvCoH4AFVIQTi3gsy2/TdcEFU8zwrQ2\n" +
+ "5Pui05SlMlfcccuoRTMMH3qqhuzbuQMz4JgLe7UdIvcQkPGIUdUmqliXOSd1VfSjhVIrqxJe\n" +
+ "4PxKcWNUdGbstdujHh+/KvH4AauRpn9pHw/P/verYdaFFtHpSpADHahd23SGdeWVuhvBGCV1\n" +
+ "/AUb6AoGFXU+m5TV8J+DLH//yvYfzu2ajmTWHpo85/CSnxhdhwF5MWQ2mdIq/x8TC8MwRTDv\n" +
+ "iXs6QCKTGlmSieaQnV1DS6y3np1rJvZodA2/zR6CMNvXoU/R+9aYVVA8jBI4eVeMghn6vp4e\n" +
+ "E+QAlJNU9ji1xLKMzPbWJ5tXryiB+AOF/hH1U31xfFEL/XzDTE+v4rCBpi7xgYLl6CYDIziN\n" +
+ "7AJuq9RdHhLimkxqT7dYH+rPE6BUgoS3wUi1KKy3IfRESuJ3UBPitkCaUvWeE3uZrK40vj81\n" +
+ "VDC3GnXWNXxSRAkx67hi5CBTuWlhFhIrVs9VzTiODlmlf1ln/AcCfwV/xg0QQ0NcuVj6s3a4\n" +
+ "qUm//jigFtxx+AFymf+1ABprCVxD05+eKvH010FgqX5+QPUre2ikKmh9/Cmi5/P6swC1AQgE\n" +
+ "ykXGHatZBjewwuegFIa4fMlqOwQn3VG/JzpsXiQxL5cDpWlT4e58RE8GI9bKJeL5c0ceIxDN\n" +
+ "qQnMgf5HCIUeEhPqskz8Q7nr5T5BRcxQ1oVaVkhbvCAYYJyGE2PgxZbwGcO1qgHVFahWMJnK\n" +
+ "EE2vIcig1OZ4zRdld+3zOdk6q4HExzr/YxllZjFasjr99sDXRnmVTbFQ4qdCwAKtEXfx6dx7\n" +
+ "MnhQ/B/UF3hwl8ODl8uqAu7IhWEYr5LlsOD2rd+T9WiBJW5dyLoBLbhvuVyJzw3dnajipT51\n" +
+ "bDNxbsFft2X1bje54joCjpGpcuIEGntZpU65X4OQiv/cdXI4nV9LaDFvyCsqJ2xQohXSIt2Z\n" +
+ "/pDxDT/ohuCFJDVGItjOcequa/CFwpC+/kH70Pg/84dAFPMug/WgAoIe+cgJ1q5NZSPIBu69\n" +
+ "1bRxZvvaG9cMa/Bs3KLzjWCDzH3zRDUWx+vD0M8gEPjxzF2hFVnwslVPIawHR45fRV3NdDAS\n" +
+ "DMHwVtj4xbFG94OHnBGtEnAH3LTa7dM5CcHZEamHWqnVbASuQkZuiU1xrZEHqtNlNZ+rkO6a\n" +
+ "0m6izOJtlf2Mqw02tIsd0gMD03UOtHC1uie+ZcIiO1bFw6kEoSh9BB3jxt2G2QHf7nJA8o5x\n" +
+ "tJO5Q43AwUIh1evygnVDYSCNtlQ8R2wdCQ6QfUVMhfMxqGajA+SXsCHXPI4YrXGQTawussIN\n" +
+ "E0g63Q/oBxmq+XwarM0+cILrEoq6VfMzz6t5i1DQv/jVmGBlhuKw7V7XbxZV7QKjXhsAhDXq\n" +
+ "sFYxwI/4/AEiPMv2s/p2BNa7WbkgqHrQC3QHrVzwXQglO0x3+iqqSoR2qL0H6TF4QazQiXig\n" +
+ "i3dBIBS8JhdkJFEXY1ylbfSF3xl4DsDHoxHl8KZGYVcH6sThi5aumQLzYxDcstjU26agaSwp\n" +
+ "Uy0HcsZfCK2HVJfBgGJakiEqmjayZKryijz41vqgqqPj1A818TbjUE+SlewGHnnzJY6xDStb\n" +
+ "x2i/Mmu5bvymyiFWaQKKPM5/fOwkUbSO4I+P7JwqFYOIgtuEdKbf2SM9nzatn4FRSCzK9O/E\n" +
+ "pQb5hQCwSONawekPvYfVWHj3WKnUjzuWaUGvCj+h7x1NOgvUvf3P/VrFyUSXQS0zcCiixAJc\n" +
+ "s2S4tbfafNuYSUsSG7DWcastrLHWq8mUkKW/4J/ENONFjzmuXt/iXJt8vSrhWzIx2dMwUYcj\n" +
+ "/9BhwSjTVn4NmMKagxHiOXxwyFer6GbLylVP9+fXXyCt/fODm1lRBPpAdL0ycfrs90GZ1C6q\n" +
+ "gGUvbGHhXlzUmTE8pI5Ao7+m1rm2t2NWWH7IgSK1XggHr9TqGToebgHHbT+peP+7rj50EU0N\n" +
+ "lvGzbpVdoDx8Aj0k4OKDcggHR8vaw8bkuSTNn0yrGN2OlhNrZjzvy1QtH0b1kcVvrVnzJkTs\n" +
+ "gERrq37zfYrZ3nOYegLR1dvuvnl4LScLBVmLzis12XUFoQZ72NMsS4cEVhREkaKkbYrb5kWk\n" +
+ "/nh0ATDW9lC3/yvo/tS8MWsE/MHt5Bhnfb0zH8mYeBIaotjE64S1xwXLr6C+BqO73PlfCeul\n" +
+ "7c7BKZlO8yiQxTPQ1RbWaXqiNT1o/ztvVSYtwFZGWfIdwG7pyG+ewF5aQj2iyxQBiszR0JOL\n" +
+ "KODKBVjKiFqyBjRZ6o9R9orB553QhKbuVC4+vBaGh+P3UwQxlvs3rYE8zInMafcEoSTCoRh3\n" +
+ "x2pFg+mieOPeCXQ2wTSSEd3aF0w7dCNMUv5JKKNPnGgn67sg+2e3s0HoHg6xHvNZ+7FfhDJi\n" +
+ "KydDxAPW8I4f0A6hiQayPN4BavHVIfg5JsAwMkTNbdUBvrTxVLtN1089bPPT3MSEEKf1hNjX\n" +
+ "gb8h7Rgd6zGOv4ovWQHyTncB4d0L4ycP0cBgqi7wh3qhc+FeC9+PCa7DtN6rmxC3knSVOXnY\n" +
+ "8rnDnyA3WN1WgwY/eg0geejJgZglwJU4kb6YpkC3jrZfxgnRETwxmW0ezsHV3jxfTSdntvjl\n" +
+ "EMnrkZTvRX1WWWbjNfGCX6H1qwO0IAWK8PJ6rt1ESOaFGOAQW0d2V2kZpVn/RyuzWtj9VDhf\n" +
+ "ZnpJfh6t46AtbX1eVQx+iE5LhEzxE9keI2vVHTm3m3TVincByj+M7iXz31WNqwPHe11wUgY7\n" +
+ "10q/l6ZcfuJJpv1k+GAbEqkOyMcc5O8TEuGdaVlntU2GFUw56oBYaXuaF5EZ8iu+YnBOXorP\n" +
+ "Byxc+xGM18X5E00NisCWi+Tp6NbK//ig/FHIQDne8qxgBsF2RBiDfBm7TH2i/g49K+FwTEtQ\n" +
+ "dx3Liv5WY/KqTfoK0utGmTt8/HOQmchPrRRv4UaREKFoV6Vq2lBnNsI/SjbJ5E1h4bLNIF5t\n" +
+ "PnxOH9SGzvm3t5VRkyVtWLHn/U92j4mGelwNs0+Su2R3qet6Tjn0NpZI6rkOMN6t/e2+Q/5s\n" +
+ "Ll54iPUt0U7JUiS8ltRQW9pOFLhWnJNImAkHF9CT6ka/QMFk3Q0Gt7RiJDXzHcY3AHmdJ9KU\n" +
+ "b6m8nth3jpLjfbtf0nWNV6MqrsRyPNXpx/Eh6Uu7S+FUAIS+uk9ks6vl6yxStTqFBofoZQqK\n" +
+ "qfTB5MJi+G9XA31vuuYg6V5kyjxuJ2LIYgDuO7tX6Six10eJvjMHqFTdXUekU8JYeucN6o4k\n" +
+ "D0MF0VzTHW3BRCQNJn5w9xAx8KfxB98OArnJjx8KvJ1SQFm4JqpB80bIfC1TIBaArBlN1g7k\n" +
+ "FPsb+JM7YMXrH6Y47u+1ThnmXxZwzsiPwRfD8NcNDGZGcwJvKQdGyd5IMS1db4r8PSMDjB83\n" +
+ "4v+9VOesOI68XrxFvYF49xozS7Uda0lGr3Pz7LFkZTeX+32BfYyMojy7+DrOyUFmUnaaxWpT\n" +
+ "wMp3V6Cj8pm8yGa8OW/ZidqBpMs9cOMy0+ObPvQz5x9p2Fb2yZ833xakHB2pLyNUqrsVzlvW\n" +
+ "CZo2AMGHFZ4Oz58YYWEao0QXWMtRkAEVawYcmkfVocqvuVvVWzh1Z19VujPjsD6pwRbnAGnH\n" +
+ "Gkha1w7GIRsIHvBC+zKJVnPO5VF8O9Vj7cgTgHK529o+w6OgjKrjubcPqopQgSwWAzVS42Xb\n" +
+ "FaFTvYzcdnB5te41pwy7sn3wDQq7fGXFvLfmFJQ3bWlXbc6IXwH6P0DAK8GKU/bp6dv7O+XB\n" +
+ "OBofFA6NRLCbUcBU61GsuNpVIltfLjI90CaGMGwRxGLgpfbTUxNzMBR4qn7E6wb66DR6iQ4Q\n" +
+ "4FyO5TaDHwkZmEgdr2yDWQJx7otQdEc1Gtho3rscsgP3n8wEfGzWCnWLvI4amlpF+lKL8x/I\n" +
+ "lgGUIQgbA/uHzuelF7zxhpXBVYtgiRGLCXkE25foYsTMHXvv51wyrJ+6agLd7ARNL6DVGP3l\n" +
+ "I15G8+ZTwq7ypHdab1IhTLyASjnBZZmPUjGVjC/lCDgc1smm3fFv9ORGpwpdrte9eL3X2Vkf\n" +
+ "D6K4yHuyoVdZN1Br5i1yV1jo884IT+mXgL2CvwONs/flu6cSI91qXgTtXB7m7PzQXARwS+XG\n" +
+ "UTcMaLlxq3Wy04/cg0hM4CiMSQbTcV1vnP1OetmvKXr/qaBhe5guawCfKlJu1vCPUng7Ff2h\n" +
+ "bKi97D/D1x1/ScA8+W5RdxuRLWAE8JFDMA7jHxOYrX21MTTra55pGa/V6i3fJ5NLNAR1aa+z\n" +
+ "RHDNss+/vTdiDYV+ZHkOST+rZE6SAC8KfMZfxrpIyBhaPMB2mZ+iOGi/H4vJS+q/X0COPyU5\n" +
+ "2oEMLyLUnlq+yu9kNskTJNRcn9UCCXEMzUut9/I9dN8JOXxPjF5uHZww0M7qC8DJaLa3tP2P\n" +
+ "QwRADNK1UlDFKmtkg0ZdjeYGpY5Um6sOT4zz7v6TCFLmTCXNiPlrPccvaySLANU3jbQnlJ1E\n" +
+ "ed63D1G+B7TO9IA8cJQZ3Px/H05Wv2ucAc3/rcpumpXRN1RfPKn+XNoglKcd/tM/oJwdCoNI\n" +
+ "iozf5SJmpBYBbOix/AJdb9BkD8shT2IQCevY/wjYJJfmLYA/kVyVDrzJwTYX+9EvgaiF5oNI\n" +
+ "1fBZ55iHr2tG6AdoumK8NpsxxFrOhB+uhl/BfO/YseuGi04rrlfZTn9+cA8vRB0VvFEu5It4\n" +
+ "YVVg4nVsGyoJTCalj/YJb0ZHQzb2Z9qA6l9wRx072k9kUT+iODt/TFmn+D1vVi/YV2ivX79K\n" +
+ "yTyyDprqv+iixouNXwbkqGNWSK6m8DSXfVeyK1vnDTMMUaHwDL3KhDGKIYqU2f1BIiLHNSQw\n" +
+ "XlTIxKYgMShdxCZKxXqPLebDBdkMUGKjIUV+oCryrqlaCMG8nRACZos9rmtTokXdkJA+PXve\n" +
+ "UlqiwJZyMSBjk9qdR+t/Sh0IxUXF0eAtrngJffVEgfQCXdtfUS8YqZeONpDQIYtfCfzjRuoD\n" +
+ "oN1t+FpGXp7M3t0E+CT9ImT1KQsnLAxsqeoJu2NGZVSFXRuba8l2c2tlWfq8o3dNiznWoMxC\n" +
+ "i0B+JLLBIhmzhz2pQOFWHg1FgrKhcqqlm4nnA9scFwP04Ly2uZmpvIBXyf126NMkXky24+mG\n" +
+ "D+BglZabg8au7Ndx0ROpQj6BDc8B7/MZWxDXrNtMYiYgqe1pzAZpK8CketC15t/x7l82BYUX\n" +
+ "hwpAn+Nd760mJjqhC+gzQahH09GqmjDLOe+v13KYUGmCnSEg4+FLXfiN1z9mY9St3DELfjC2\n" +
+ "m+cW3XupwZ8OQ8zErkjzW4zjsvQ4Xhz/6pmpEc3t7OJ1BMc6NhSHIYp98S9615OrfxEPPP6E\n" +
+ "QhR8d8nw0Yzi59bFFsEYRvI0ODqRfQeaM5jgqBooCNrV+KI3qvOmh2CgWg1ma+Verp8VvZNq\n" +
+ "GBnmjw3qQJC2PGGc5ioIVZNbbeZRPXzhrlbk88WaYIgUJ72gsk0Kba3diSqJJ1BuUVBJhakX\n" +
+ "Tx4qxv/seRggUgO3ell5E0e3a5xIEr/DycYI44i6LcYEn1eTCGtfuKHhcKv66nF+8iabaowN\n" +
+ "JIc8fhXO+vXK/tEBHC457Mskn5vSiAeZpWqQHQ8h2xpPTbmPnpYvgSxmmQZBpwv4R8s8PL3i\n" +
+ "XE8gtTyC4/fp8HN6WqG8Zq7wXnrdxyzA8Dw555oRnuJ+WvUXgk19rFm9VdAcXG6vwBhLYMcJ\n" +
+ "ZygnaYviqDmUldjCSZRhEWQNEeg0Xu4a4+lln7W9YeZkvQ3zj692pM5/bxfhRc2KvpfM/zNo\n" +
+ "qCP9ebJbn1vc9nFDDSfK6XAf5XH/7JoEZsXiLC7NN7R9x6RK3Rotupg1qMGtQn/FhJU7vscT\n" +
+ "JMBDfL4acoCpiII/hX54kN1nfQlPxEiVhco7FH3ZcuZ9FFpjy+uIyrsdH4QlyLXWsPq0Dajn\n" +
+ "CAi8om64U7GLayL+Lli72nHt8KWPxrCpDgVkYd1sNp4/QgBNfvsD8dOjAbCe4JzWz5Pr6k2Z\n" +
+ "OGnbXbQbptA/2r8ey/8AMHgUCU7VBZagsrYquYYskylXgtIl4QAoSieXjbsoTKRSjEs4KzUn\n" +
+ "v1C/dA0arWK4e2makIWFVrJH9OmLq7fF4nXsvKwjaz65k2rcHUCg7mQGHC07/9NyQnqE0UUx\n" +
+ "knlHKYvnRu7b5SjLBh6JqN0sbaDdh8vvmZS+zR1TlQ+Uq/ajfpWr1QPfqrgXooTI0KzVJHpw\n" +
+ "ske2e8072lsEW3sIP6WTdv4Q6vJJev7vAKmBOUMLWxtXK56/lH9H+mYlxNpi13NLpN0cNhk6\n" +
+ "1C8buigM8CNd1ePQyxbzAEbVqjP0bMDMI3PxuQBCF6MbDr2/wG6bed/qyYbRYOo4feW0Nsao\n" +
+ "itmFy7s9ZBPXynpAvDqKxSrhW3BNBQIA82KGohQKpRXhi4dr87LJTtu39bin6WLBXredeCH0\n" +
+ "5Jr5jJEdABo7Inkf+wR16svhzJzzpLAuEl8MOUDdZ6PDJS5B2Vnw1zuFbMedhbHz3EWAOkGL\n" +
+ "d35zyMy8TEudBq5lxplIZ0SjPEaJz6wuc2E1Mil2VFIYP5TAPWjFpgr4DB5LIi88aYz74/xd\n" +
+ "lX2VLkWJKuWcXnTaeff46PoXSTpQ+5AGir0fHD2FEhzT5AUx3FF0BKdvXqxeT1QBiYKDMR3p\n" +
+ "MPC0X+3efqz6wAeriiLpIOPauTBtHaaSkSjqOJtoVGkW48Anv7pyMsfwH1U8ayUDmE/6Rz7p\n" +
+ "jKiowjP2aXnGiqIjV18O6zLpW7QHFpvyylda6DrArFbBMIItElZvBmDLafqt/iOT4XKOA92U\n" +
+ "sG3KonD+ZSteuS38MPt4jxYNnnxyBdh5UIpvZ5UhVeHPTt7sjAROdyJnSvuhBUEv/OgDunQl\n" +
+ "+2gsdjn/sTvSCvg2uiXkBIxEm2rXNByEXAzt8eqlNqiNCNN3Z+3Itb8VNQFIWV22BGZWl/+3\n" +
+ "wN2uj/QfcDel5oi22wbhjNkxVfR0BmTefHIuK6yfxE0Gc/om86JLnjT1VaaXYjX8RCd/XRfo\n" +
+ "mkExlaP/JWqK4gpNWStrGHnhN1eqRQiCibAWk2ykzwe0q/QFWcYz9TNGEqbc7tZTg17vSVkX\n" +
+ "+O3FWnofEa4qV8rHBrAGL1mUjYZd8A9LUSQN4K7F+McPSE1vgzRM2146WExBEyx0n7YEtAPJ\n" +
+ "qSrpjQnz5H5TVUYgA7rs0CjQ7nHnGSzyxK+t3GUj4EMzljQO5zwsfBwQTPORvn9Skw46sUx+\n" +
+ "fLbGKt5Fo0RQiUoW6jHMmQ4d/76sBm2PiHfGLAHz5ldeeFvM9MFl1+aMjmZDjhxQkW2uNvd6\n" +
+ "iAPJJvmVf7szHloFgt8Rj2MlBMCnSUnv5cH2RTxRVrBKOuJ9sXHJWjyIABm1n4zYoI8veTzi\n" +
+ "vSjZz0zWaJpYhDC8XB3qaR3Oj29zZiUmCuVND84EVogig7sfjiRDVAmAfvEW21wTAdMwa7MH\n" +
+ "GbmgQs2dzFxfnLpsF8602HEb+41X4W4emzymxQ69YjCjpho27bNo7GM+Im/ye7afFb9dbkKB\n" +
+ "V7f1tn4fv1FkS4fyBqVx+v35rYqjOQFoA9jnFjkx/qwqG9z3MW8D5/zvlaQ7iw8sy9Vki0J3\n" +
+ "E8ge+GvtMaklCAmLsU1OSi5VM46R7h8KlJ9FEnd/ti3QA7DHxrko0gsZXna+fBVGs/wx9dLp\n" +
+ "ZKIrJy35Hi1Jz6ScpFeX3yGT3qo5WKfmLzTxDpVbZ7O06+uidndAsEO6LIEC4s+iTrylvC+4\n" +
+ "RhFt4ECZ0uqP+aOmM/l69K1RLGEAtwvZeo2/3XDyTkEmpa3g9PZtuSrN5QIQk9YKK/JdrskG\n" +
+ "oj7VqUmy3UbWam1xXaPzOF0nU8loT7ibsscCdAp9ePrn8wAJONMOPfIrcOxe+itqALWDl9OS\n" +
+ "tmR+nbLdV/pxDarCeEJphYgNxgLdKwOpN3BlB1EemKkOSwedqBGupAsszVw7uuc15hfOY5z9\n" +
+ "TV6OdnbG8Ne4JML7Iy48hcFG5i1yNk7up0xzvIrqPPOmZiBR5+0d7b5oByfHZBUy6+19ok6K\n" +
+ "0q4bddPWpNIyEErsddXcEoL1Iic2zkAPYB/IbqSyKv5aub1M/kqwvluw4FzZ0dDpJHewrO9/\n" +
+ "8uWwRhlmgHSCTqkJpUx8U2GrmX+Rn992cBFkuKoV+KceuBxwLsg+uG5c1Ml+kam5V3PrjHez\n" +
+ "TY/DoV/VnM8froXvBEaTw3NtdaYMz81+O5wzuYN1D2YnkIbZqESXEstnNna0vezcsOiEmg8Y\n" +
+ "Z/47oz3vU7+g1YXqOtWn6lnxzDTWe5W3FaCtAE4NmMgfXjn0wnIHFEEADRmwGO9+ftxvx8Uy\n" +
+ "wAMQMy753rCu5IebsKpy+Fe5UQAUSy9Xa3OptkgG14EayOCvq6rAyGa5AQdeMKX6PMT+g7co\n" +
+ "hYSVwIXUXzv32q2nV+FpfXPC2DkfgeIlWCWFaaBsSkG6G66JA/IYojkfDJDXYyQV4bjSp6A5\n" +
+ "hI5EzIcajmF45shoBa4wBJ5NrwJx1Mfu7uqfjZCUhP52gD9vIcC3975ReTQIgVfngDwNkcok\n" +
+ "xP7WWPUt/Q+2ZlYEANNgm/XMSgEN63FPvAs62ljNcLp0YCuXpsztDLXsrDKoXkM6LjGhSkXd\n" +
+ "shDR1TQ2GvWb76YWicgbNq5j5FdmWK1GbxxpdzRtkVBaqyHOFB7gAlGYFtF8CXlrhXKxySRZ\n" +
+ "jHWCPbDD0MHSZudWn3tvOoVTxDKG5PA3AqYKl8PoY0vJgNlGR6UrJQ0kDqWyeCODuXkfOVdF\n" +
+ "9ID79DTrNVDMtoQq270z6JSjRXA4VUAaZVzFdqFhsproIY7McL8J4luVfwc2lBhcNwyt0g0r\n" +
+ "M+3zfWELP0e6OxuSt5bsvuB9VtXngtmu4mEXse+oeyiHmuF9kTlsUASB2kne4AsnhqyIklVI\n" +
+ "eaofT3+JgoX4Kpy6vesU7jmUmdDQ5C5d3ccQTLJlNHiFmwQkitO7cpYN+lsLYO57kiWWMjWd\n" +
+ "tL5pg/tOycjsMcqZ+DOPQhcWa7c7WaIJilsqqQA9jKEeurQAr2sxN83BZ0ej1HDEbwA5cpu2\n" +
+ "M1gvCUIgTifKL+EdKzKTleSnMhWSgPgQHGBrkbBXoR5S7XrqCkpCXwpKhwXRCBplzSy8AHuo\n" +
+ "36LK63ofUKWSnrtqQHlLeLGs9k9lSxq5sWELRLhi0vuVe70YSY756VRvsA3V+Rh3h9zOU3mJ\n" +
+ "ob+0WCmzMzrCEkb50qF1mO6nE07gcy2nZ2fZXPlNJLBfPF48kGzNmUUuRG3dRwvGvNt6foMK\n" +
+ "+140jw8Q+/YnwOXahNM23BpkUhvrRaYhLjIOC9ak+uMdIu5ZyjT7CgerH2SDoSOD8CuGYOuI\n" +
+ "Z6vjGBKxEX92UDClwgiIK/2YfgAIpGEAQOWCSRWitU6Jhex/SFi7aVYJ92Biw4wBtcHaHuJo\n" +
+ "4TSSRe67z+LA5X823HG6ibh1nR+u1BIFCgoPKRLpt6w6LUArJZqbYSNyCc/rCynkf5Wz8ZRk\n" +
+ "kC3rDVWmeWAtvLU9k6i1KOUk4vFtaePeGxTNolybo98cOYlj+JFs5mWR+ro6/n4Ryr/IgK0n\n" +
+ "lMvvbIiQM4ckslusg7JOimp+Qvo3hKdbbLLu9ezLZbX6xgT1H5e6Zif3lg8zpbESDCoZ+ZkA\n" +
+ "2X0Q84ofDRRH7beKV+IkG1uIvai+DVlFB1aWvCbV1N2YFX2kdVVYvKuiSxlt66kehSlKsyQg\n" +
+ "U3VnWezaEUhriQrN6u3uqjgAHj0GPhfbtXLNkF7cqb77SreTM1Mkxl/Tx0NHAmPnOvs6DJYO\n" +
+ "goG5W1ywekIpkmDBzXMeFTnjCaXDyBQgpsUklUASySUeJxV39Y2iehjJRiShgyFO1MGF48u2\n" +
+ "ZYZAUN8c1J87DLgy6+pZg5m9eZ/Y5Q1uIP0vnKYA13PmCEvlOdcqb8bgimSixNpWIm58GAc8\n" +
+ "EOtQFCkrwhGq66lDiVBEEhJi7nllTV1WBiZpU//mCqPwV12MYjX45UJlAogpQH6D9rJWEfaC\n" +
+ "xjYyxSpF63jOxkkpcrD89UehYm3bq4eDOGUBW4bFj86iEX0b5Ic3dxMVtK6F/fWGWb823+fF\n" +
+ "mcVKVXV4d8kFAOboGPlC6nJTX9hP6n3CcdHBpU7D4+yamKSPMN0oorOveTkNwofDWwT/xXKC\n" +
+ "Qszrxv56awpebYOByT7CrVnyT1WdsOafrt2r9g7DPUqJwBMPjuuipBNAb5syK90bNWxRwsRz\n" +
+ "+gKSzzg0clu3UfSWof0Kdffclc5FPKPICAcfoVFonUwS2FzmiKpfOI88xVJMv6MjxtxERgiM\n" +
+ "DuBRK/ebHX775Fq/acD6EWAbqN6fysPaBLAoQ0D7RRweEFY8ULWnnVT43OJPO4cP/oYYBIIg\n" +
+ "ANKPCZsvO0TH1Rt1Q7BPtwuOKTt+RBdeXSSF6K3FaLTJH1zCtsVyeRQIjCLZKcssRJy1FGcQ\n" +
+ "OdAnbNIZ68EkC79ESzQ5w/nmXZ85BQBcy5Kez5M0w0f3T2QxBsS7+meWyArZpL1WVJOq+Sca\n" +
+ "6vT+M2Vz09xhBd03Trzyiob/YhmS9UCqlbcGNN6Q61yBT4y0FegjC3Sn5ky7hIP/528rWr4n\n" +
+ "QjjWo2GtcLLoTXRjleIL7VsZPRJ/c5oyWlkwBMX91T3Ta7uhKh85YqChm+6wq++Ov0V9tbxQ\n" +
+ "3JcVjH0lQy0U5dvLWiefkM+AsAJMkKyas+PVuRgIuBFvasILF5dnachcwF7Uunun09hq62nK\n" +
+ "zh3Coy0jSEfcHU92BHoSLisAt/A/ufIMvyqjdLMHnLX6vsWEUj+0XhlqSgAnFED2ngk4sM5q\n" +
+ "/TEH7Z7E/COP9nwJc8HHpIAz50YUgoar77TKZXFYhbc3Zw4Onvl2dYqWOkoTV6qjQ8qOR2Km\n" +
+ "34kCm4PqhHwgJvkMLp7LLX7W+YIg9cqd/rygIxEf6NoIWkp+9DJFfuCMF2qeT8jRnaSHs0To\n" +
+ "OdIrFlUi+V7SGos6AP6R44gkeuXNyon2LD3DnABmmqyjKM9JtqWgxtn/vLNBgOwBcqIp0l3q\n" +
+ "3Znk4QKEIIMCNszdCSZUJwBW5CZDQ4F3ai6X2lxN9g4kmX3n5yKtLkjrWZMfCIwjU51cQYBo\n" +
+ "15Ue0N1+E8WDXHYZ9ahvXWo+dBMmX3bf3SWl+U9xwiljAzZ9DOi/ABrdZsMkVu2N/nm2h7iE\n" +
+ "tf0YqOhwwdenGCNhRJg6MMzoXQLvB68hHO1gZj+Wo6SYLr5DKqdHT/2aXLPiLQtFtWwcwASB\n" +
+ "HGOVqMzMTLa/uqwy9Qr7kF6hddargZwGBozYNCAT1TS/eqGNBm18y7a691qoDg2vzSuHiEAN\n" +
+ "piz/kWW1skxWHFlmdtVT8zHkxg88/h0DAeMiupXvfhr3DygVOih7onC+0L1wgvAOHc/Mcuoq\n" +
+ "qr7rTdm2tdl9sYz9gGKcprVQJ94HW9Io+PJZi/cG1x483V+1K2mWcSDPUImRXNGx/VoSfaJR\n" +
+ "DlMVvNuhBYuVBvjrkx6XDndJPZSjzEDQliwV4rLb8VJJ70faOkI5WKcqesjYzebxqT5J2f48\n" +
+ "KLuZ9rjiW67e3fg8UW5RQyIjJwGwIglPKy+CEuBQaJCZxpzMZ3yzbttiDCkV0NlzuT9exdTr\n" +
+ "ExDsZg6kiZPivYQdQDOJBhhY3LpwZHr7FCjN/QhjC01W5fmbQNTanieL3IU0GjopnjvI748q\n" +
+ "cV2GFVpHWZECq2xGnPBTMfTRkmSQ8WlGcVJ/nMku+Ww0iN4UsWk/m/7ij3JK+KcgVtMHPJxG\n" +
+ "7VbxfnAzdHTSD1K3t1wI3NA5A0Imwd3erO1LPVuMw82PsjM0hUdR4Dtvd/GsCFLlCuUwePBU\n" +
+ "k2ZucLdRPWwdJT/Fy4rX+qbxhfRmvtFw12MzDcLS7sKYvGXIMd129Os6xv8h4Wk6Gag0TrnS\n" +
+ "74sdi8PWo4oK+pDTu8Cb3wZRrTEq9af9XQkFXIWCw8YHgCivEL71XUBAQGPhMF+sdifsgo6I\n" +
+ "ziwheYUtH9pXq2Jo6i4YvfedhJlPssmGento9OHGWTywi3ZHWbAc1h9A4kTivCh2zPO9ee5D\n" +
+ "VTvZuhIbhdA86G0lI+sQRCnLyLjZe7oeK0yOJDzMLUztNHFQhQ+kqQkZvt+bRqvcOYeWn9BI\n" +
+ "d3X+HdyutBWhhMSoknuEByDiK9zo30Wf1yOYt2xqb5p4fvSbFgkP3beD7jcznuEw6TNVApcc\n" +
+ "JpzyqITwXnZo+mJ38CCrPMYEr6RxNTA+XH4uav4BqrSoD5k1IrKqcXUOVoyUk1GOb/fcqUdl\n" +
+ "XJzCb3B0tCKM2qdBrtn7qUc/a7RhzNSwlousUe1OvMeZR3POIQecOjHVH7nD/ihlfhnTc5zl\n" +
+ "s03/ydPiCl5MIHNkaD8ZcIL9s+ejs+g1Mj8r15srXIqW4Q9HXYnwTyWmsTRtKKSKId3IHTmx\n" +
+ "tGycpLLhdZRuUao+lmzvwN6j4C2q3sgiISqnT3Qnti/8ZQxtaJ5yfu8tmGqX9kNlJA5JSzew\n" +
+ "CiEFf2LtG7ZPWoHrleY3zhLrMwbPWdKENohfZuCsZGmhqiqmO0FcOy2NosX3pUjiMrVet/RS\n" +
+ "x24k4Cec2xA6cThnuzBJ5TKxdclLIoNj9tNMsH2sUUEfIY0JcSLntHkdd2S6cb9NyWDCYi4W\n" +
+ "30+ibNY+RYug4Z3AjBMSUqdiKPLO+seP02kHiKm3IzVMQ1zg1abC3dMUgBfxOVOOqHcPaJ0K\n" +
+ "6/hQYhH9CxWggGF5R1yB5Rq6mHw5eD/nnUINjIc8D/dkO0j3hDpOLbtpeAW/O+3RUlAewO+K\n" +
+ "Hqy4B4WkvVTD4estRV8sl0R9hJpSMfXtlGXjkcTujVLcG5XVnzooNYr5QOHoiwS33Lk2aswV\n" +
+ "07ZFgzADntGdBV1oWlX4bEvH4Uhw0UQ3WfSu9Ejv5Lea+Ttp3ygktMGPcrAb1GMUlYBK0twr\n" +
+ "smvHaPAvW0YWN1yFsXEYC8Uhked7n/9IiYBQr1ddUVhjFPYgt9Wb7pbqj0ZXWacLsh8rybGX\n" +
+ "JhsIOKC1Q3ELNoSQU4XR6G3Iq5+sq0YF3R5doJVeqYK4ui4U4uvoqIyOIfAD+Fkd2B5ZedA9\n" +
+ "wR67vjxlsfXISLA5KGFnnFKuAO+k7XcxD5uScCPqz/7WLUl3qSZkL6FdRfJ5hDBh4OSmeqR3\n" +
+ "OkLz4x1PRUjcpcXYhSvnNmsjz88+xZE+uaUASTchKhj3GNvV8tRfXDkgeKOjFnrTCc9ti1vl\n" +
+ "hVfUhFBtVgXcZ6yTWhLTjkZxU6oK3Or1jNfGJ3+8OSGYfRIuSFT0xgi/IND8UYCg2wJVKjhb\n" +
+ "Ysah6CnUeQJuhWlAeHX0avGczd1wuVZVaxbHxtOiwi4IS/qYzTU8R0tFjT6sOOKkC8V1gHca\n" +
+ "AY8DS4uThZj8NZrKCQNRjxjLZvd4O9BqVM4zoVIc0/MumfKAzpj51QXtWsfeL6aUwLcjli4E\n" +
+ "cfk0h3FG0PXw6xmZQMZqRNbVDXydziMXg0tpwHBg9b3zTl5d10DGnMT0mkeVl5j+PhUO0Mmt\n" +
+ "sCXDiaZDVxFXwAIkSz6/5pdn2Iom+8GUe0qCctrEkL6T5hequlQsZAIw4VExd0FdW5zt1lnB\n" +
+ "cBFmUofzV36LG2BHqLXYj9FU+pUiMiOOlP3kPtvFwDmOIMBDHAsUJOIHUX4LHUjR/tAz5+Vn\n" +
+ "cPQVkqqUrps9sQ+syXLHrAPO7qZdRuRyLwjAxARhJozT1rOl39Qv2SnwK/OqP3UzTbA41U5q\n" +
+ "4zcveXZc+C/4zlufru36fLdMtzwnKnumewnUBdGF930V5aD1qsU4UAp/mDnnFZd3yN86ofWM\n" +
+ "fwcX80kZptrl2nxK5Zx3q/u5cPC0uFbbptHHYsPO+AGL5oPo+D6aJXbFh5BFT+od6+f3QFae\n" +
+ "icjnHtPnglfHlDMNCu3pjXDrCX1MpZKaNkfk2mL0rC5kXyOhsbSlZAjb74Xu76VZSIXQ7ad2\n" +
+ "P+c+67bVuG2/eTsiXwGjz/VxTfuQzdORdT6g8IFK9LxYsmeAO6dn9eoKGl4I1V6Dpwa5eLyo\n" +
+ "m6Y7Zi6h3Xe/1y7QQqsdtVRuc6HTSDnS912YqeAMCY1dBuRmRNlnBsVJvpJANAU6l937R52F\n" +
+ "yKZ82C3le/OAPYwJFy7KRpp0OyEwU+DLt7jE47Y5gA+pXJkBNBw2MJoGIKOv8CIXCEg0BPx6\n" +
+ "t7YVvs/H/qKkNLN+2Z1V+u8STlJEq+S2u5jGDBgsJ1JfrXu+difolZkLM32c8TgyplwlPtB0\n" +
+ "uQ6g56Z2Wn0hIcznfpHJLjsAAymRqa6ymEEG+RYGfG//pCyl2IBfx0tNJrdVDLdR0bkSzSEx\n" +
+ "Zmqs3mo05YW08fIaguvZI5TwShuj4VECmS19hvx++dRzg4KPB0nwbUcWXZ4cpXMCGrm37NbR\n" +
+ "wvHfaRByNmVF8e33H9lkMibyT0HMCc29twvMf3EMFSbdKvWlxt3P9hckos4nPKhzhxMKK+x1\n" +
+ "Sk8u9iV2IyNNaTKXhWn/6QdN8+yT8ALuuDyfNmvrnASD6xX1W/qA9i1KFf2ae8S0Y6YeJlgJ\n" +
+ "jq74MN02Z2DD+2EI05a/+fTPDtfnnQ7QZZKcANxXqmhjBBswQpi8fzvG+Zl+CESkJeMnNCaF\n" +
+ "9Pk0DclEeULVWNLZlsPBdfk3zvjCAVPm3L6MoxSKMb40cI5ICmTGpyfouof2YwxW/rvR1P8O\n" +
+ "2Ekkto5tesyxuFLjEkzmMbqDsVe+XBFzTXB+juygxPgkxtEmvw0EAzXp427BmLsPxBdKkA9s\n" +
+ "e5J6xGHkA+3hmOKTWizloRDR+fEQY18QkHp0oj8LYM9mlFhtVu2bbvd9AMJA0F9ypNCDsBOk\n" +
+ "Q9oBsecvfJ4W42uWXsE4jtUCthiJCljRVrNCqT8LS+wohHLKKhr7Ka70umS4PVzYHuvPE97t\n" +
+ "KmfhSJx+oO3yW9feaJQA668/Qc0lyJZ91flDEcKyKoHSWH7gFewPBwuSsU+Tk5wqI15PWf1X\n" +
+ "4lI1mOPaYg31zgckJSvh4YOME3+HTwNAun8gU8h73bMeAO8l6Fu7ijnyin+zBfNCjMgm+tWp\n" +
+ "zvecGCpqbIgbPEMYWqRbo0hvvO6BVQTNWeIMdZY3iZD4PHdHLI76Cuk3jbtvDf44k5m8e6mx\n" +
+ "w+MYOC1R7ep5STedsvmjdW5Fhs0W3oKl9OXk6DkRKFkhhEIZo7LN5KsjIzjkF1Lwj+nzJ3PH\n" +
+ "aPfPq3IAdI25DyKkeZFglrMGDNS8zlNkHzlYohNfBwi15aqwWnT+Us9KogYjpQFqYHt235aI\n" +
+ "JjqPPrQx12lh90DZSuBv00dRsT6nJ6lpAApSj1zfiOUscfc5SgJTV2/WtwmslYy4dQLQMoX3\n" +
+ "JaQwhMlp8ymkKSmTbsLGqJg0PraxzfnpNAsuH/rmr0vmsCVfePbf9ioKlvxJAzqtJ349dSOX\n" +
+ "RgFHcjSuMP/oGV5sL5Q9hKVQ+Lu+iavwR7FP8RVrK+hUzihdeLRueDbCN52UTCkdSrHkynog\n" +
+ "NMKgF/ISuQA3l28NSGdRnpCcKXH7YGbEkpoAd1JoxwtKkoPKsdZaezInM28lFvfY4EIgyeS6\n" +
+ "ec4z6bghHgQIj9CygiWsVeZWm2f8MWf/jHUcp9vZqI0t0hfHtCrsZAWbb68np+3lHhM8CiqA\n" +
+ "KDbW8M3BKeMCWNGMyadYADu+sX3MFjbgHnDncDCfNl1ZhWmtifpIPzjVkmWjwyEgYsSdC7yT\n" +
+ "L09tPrH72poWJRkoWjr0vWqHuqH2dYgUyUw3j63+7Cq6gYcG3ZUvdD8LTVfP6wj53m6GW4/f\n" +
+ "P3la/iS9jSU514dR7/ZsZvxmyxMg7ebjfiyh3oHUJQnJj8xG0gmiys2q7wL+xuKuSZSbOPsy\n" +
+ "z37Rve4xzlSR9ZltIjRx3oL0c+c2VQ2xs1+cIOhiU1udENvjseivKeFOGj+uUrJlCBXIGZep\n" +
+ "gMd6pTHNyLCuwkEnYKc3vxxWe2yeaZxQrBqfI690jq9uGRvmA9JQK9CnSAP9524300JhaGhF\n" +
+ "Mg4J8YmoSv9+gCsbsq90uAiLSrIkeIRpGmg3TAayntJ1lOXnSDZhZAJh3CTk7T8E3zJS+GG6\n" +
+ "mbAvcvo7WRL880W00ZOBtZBEhM7dkIxyqib9zn41SGyAWZAVy6g2G1aRnbz4G+edfwQ1H5jf\n" +
+ "iNGL6KTMioItA8ZpJQQ12aXTqylFi+5wT2N+pdUqBurQWoCnLhY2O1irbIfCUIwnDk5D3a7/\n" +
+ "ySgtiotJLjGkEL+dMcLGqZOo8G1yw0kbjo+iy0mM6MkmM03fTw5KNxjl26UpkjK1Il7vHhtZ\n" +
+ "SN7IqnMQ1gRIYjyIBkS8TRG1z4T/w4Lrh0fqAvy0etZj0Gv38XrlpSI03YrADGD0rI1Z+VZE\n" +
+ "VF0viG+iea7DHg4sP6AtKaHajZcUlinE2/pq9VD03enHxHBqcpo3v34VFlwBVgUfa1Bx5qky\n" +
+ "pOyqJ5XMBjjCcF3UT2GiQ2HigmyFbC8Wx0gAYy7BEmgBVhfqZAUeicrSlY/8hm300j1kkXS1\n" +
+ "QajIqPPWCM9BNqkN1fmsVnL7Npevg8h9zVKoczQH9lZmqnDzW/qckwu4McV8m75LRTNP+ADJ\n" +
+ "KDHREz2Y4VMjGjYoY/xJLwCOZyWd05yqQRiX1ijLjPQtA1BnWVvmtSY7l+V1V+jtjSMwQmNq\n" +
+ "AGirprCNyOBWfUv+lMvxAscCsdMwSWb48bFEQqGW+6onPYHi3QdtJ73U8yGYqCdX2z8Ri5OB\n" +
+ "ceLq8oe11qIIad9IOoklGwE0tvcxl8tZ+uDKV3t/sqCmrFV1/eYdTwaHn0tDVbdjwp78ZvXX\n" +
+ "7r2RgU5ePyP37f05wOgZTfD97KqO2l8oERl6SO9FgsPD7MZqA5MOL9CwLa9kFDe3PRW0OTlm\n" +
+ "5LbCWRNPR+X6qyd7zhWfOsjdyhRa9QYa82q4IOyeUtMHRiy4n9vRSzKVdGlCxbHxkoL8gVzC\n" +
+ "cjhKgnOn1xbMVD9TOPRS4ywu65DiEz3yH5ZUQdeUcxNAIfsyIJF4uLilBctx4QfRg2yk7mYZ\n" +
+ "HN/b5yrCzLIEGT50jbRqQVLdg38ZtnlG/BrvnsQesWqyfw8HQsZgY3Tr50TGxvqBIZpn+ywu\n" +
+ "UADKWcFnnxqggFmlEY/Cnove/yW/6AdZceiq6paNaW2eZY6PKSOgE6LaitOiHw1PCjUCjXVV\n" +
+ "3wts+LSjrFMf4x4QiruEXXu+V5VjX7jatDE+ko9Uz6IU0BkTHi8dublk7fgMq7UopTF/xsnR\n" +
+ "Vwv58q8+YnL+21cF+NQYj2QRPZ+s+xdIcGccrseOILXodpFbVPDGPqkKGIz8qb4STNtM5G4g\n" +
+ "qRTy/lh+oX/8tOll7q2EIYxkUBMUmeFA22S6lmCisiBzwtJT2P6571POxVvG75CvX+6YDUyt\n" +
+ "27K10jYStSrweUrNIO/KjrJ/yb9nSWOaLrni0y/42fa6L1YN9kg3VM4BCMhz9hO9N6gk1jiG\n" +
+ "aSrD6St1f91OLCoYuxjq8aKeo3uRS1mNXdpePIStKTd8ebEE9HMbYCgN9bdnTkmA7KhvQmKU\n" +
+ "5coukIyfFzgST1zngRoNU5HOJTlrb+eMgzdJqciTP64dEPtq5s39kVl18Ks8KsmUiR/eQnqZ\n" +
+ "7wESvxQEPH6JTojnzUrVctzuMWSiAU3o8EK6t7LjyZovKg1Ve1W6FKLerv6PZ+Jbmns1XjqU\n" +
+ "7pJ9pZNoCcjwWPXSx0M97cdEtrcxVMgxB05LEWIyJblPA3flZLpEOVwSAHuJyWXz4PRKJW1k\n" +
+ "56/uSuEAI0DHtfonYz9LQ5zTlmmYAneJTSGU6PZrzzuvGrHegSSZkPRtfVH8C4RmNqUK7iWT\n" +
+ "2MPaHzCAYGXa0AdX98pTh3uPx4LNuL1TrVTrofpypSbatuMVae4588PrAnCcW8yES1wG5Zvz\n" +
+ "tVCzdciw6bZ14dr6EDn9YBDcgqBeFsUpPS8zuQhQeU7/repqjRB8p4KhuZvSb8bMbACYcQhy\n" +
+ "cPvvinsGYHQJ9lrTgEhJetocgrJJxqe1OKnu10uWDg2h/sgaibl0jTmvaQ7Y9FwTZ5NUbNMG\n" +
+ "EeqvVnzsnLsgTAksIRzziIp4ZHFS3INDy+S6VvIDnca/mGTjwlkdgjC75kyihAsmdExopl9R\n" +
+ "W1awVUnXFQXaN7GQYwGApOwZV7VAUxIcfuy+TJjZYg1Fac9mI8RtsipJpfxZ7ZvKKBD7liQl\n" +
+ "kkWUxMdkkYkj0mtIG5Xpiswj/S0gl9wNxzvUNA1tZ/1zXuAmOtL1qGcJF6VBN8+sVS4vxoDb\n" +
+ "xPlWpgVJpZcnuX7qAuehsi70r/51aGCVfcEdVFapntsY8h5X0bbp4F9IcvlUaOqXrMc9IXoq\n" +
+ "nG0jrQ3rLGlYZo2i2gIMBdUCDZkLTlQ/0FGTF2nQi0htDC4cjo++c/y/PVrZ4UPcQdPJaOcE\n" +
+ "z/csRks02TriAc9dspI8dzF7/6qXUgUFPUEcRWv+hS5j2VL3zKlIhRQ14dDS8tHnAUrmoEoK\n" +
+ "YP7GrC/3L7YRaYbom5OrMhz/waZCIz/ZjSwxfd3LvrMMA8fuAKTu1t4qXGdZ7ocYMIQPLxMg\n" +
+ "pUe51B9xwnyiU7Ky0sPBX8s7KDItEO+YXJM32fwB7egddz5qzO09SskraOGloWab8nY0YX0k\n" +
+ "PDtmXUT5J+uzERFrfoZmpnta4qJoE1SKGyS/4L7+30mRrSaD2o+sUyWd+kP6pG23PiGUtqcf\n" +
+ "iQMSoGwehTfSC3cG5XyVbfkXVOukIA+jh5ysABs3KotOPikgVQJCYDS+JnTCkOZKrf0DwRfL\n" +
+ "/roKcv2/ON7W1o25W+QN2yIhNbN88Rjt/5twSB/SDFePFmItzinkibv5y+GZxr9HEaKZ/1jy\n" +
+ "myG1cI8gIUj/nihfEQ/WEjWSyJsO4smvu2Uf7ZN4zTSk/QRukyVrmoOq4dKzStiWssF980Ho\n" +
+ "LCbKzCJFcy13so3MqDtORxtDSp+960XWMOyVHZGUKRgWIKEX2AbJpaEvwCYKdeYAZzIuCwO9\n" +
+ "O1ixtfv1KvzMzCjF4Sk9mucFcVWeEeX7Uw5DDCZ01t8uXxiFkjh+bDal39a7NF+VNDiaAmrZ\n" +
+ "ezysTyjA9h7Su9uizTyeK/eZ+w8hDn1wUe9CgcCGz8PUJxOv14qcUNpZussP0hQVErIWPtH3\n" +
+ "mPRoClx/o+AGTVtLRCR7Fjy6n6Zq8SyGDKf/0xa1QufjHmoqumtnx7tNCsS748+Ys6PFwlVv\n" +
+ "OXcpimPVisL1kfrQHDdBodXnKNZW3rWNtFGYBP1VPVUxqJJ283WRyb/X9bVGx1gPaHzFQj7w\n" +
+ "z7BVhMpPmBt/vAXdIvDGsQGb/16O52bucATPWyOQMLUBVorR07v/5NolSzHYi19ehveceUHb\n" +
+ "omX5uAPXqSR6DvUFbG/n+rOTsYCTlqNIiF4Dm3iAjizMMh15MWvV0+PHyLLl8xcYze9Eb5eO\n" +
+ "5dMRERZCNejwJ6PfFKS46XkfmLCZbWbj30CU6Qb6reC4v6sucId63+TghD3CmbyQ8MZwjB7P\n" +
+ "e4gm1deP0Gw4EqvQvsi7Kq8WQii8OYLO0HSKyF8jzfb8JnIe57A0mge5Ru0KaC8blGuO0e5V\n" +
+ "Mm1SQDXuHJC1dErv4jd3+9Yj4TFgRYumplQR24jYCpC4OyFqcPEBJleAdOkrb775sa48rUqM\n" +
+ "F9dZjP+1MJNhEhgv7g/LLUUuKwwEa6o/Ksbvx6fNikWX+40EyS/wvKzpTZ1vsVxIHMmae2C/\n" +
+ "x3C5YVFN20PUc9VsXjQrFw2T7Q3rtqWgXRgFJrcoY7NDyvJY0UyCBqcWp28N6MKOhXztA6mG\n" +
+ "kS3gZXkpKE+q7yJCByjKLs6D1vTQgj241bswl8KxjlQLw+iC2VYAtZoC4O2BWyD4K9rL7H/I\n" +
+ "i6/ppxf6ofzNv5JHZ/7if19WP/n5XlX9XuMX0ZPU15nRXpapt1hOtT1ER1+bzifyOsywoKYE\n" +
+ "12IgKsb+/LK4k9KlKP+93S+yoFtSWKfunvA0Lyb3Js8h8OY8Kq1Izzw4UjO5npX/uYq18VDr\n" +
+ "LzU6q8d1IX7rYhgrWOmARkzzFbKQ7V9FGyxpbp23Fp4y/GY/F1wnZGRk9CUgdI7ZXIp+pR3R\n" +
+ "eVSECRmLpMEAgKwcoVY5SE7mZYMEh5W3T4GUKgZhIuZPL/8/OoX3HivC2q/A+BMxbJCBL/mx\n" +
+ "bsRTpRvPK8cJgf4QxgE5ylWDpamz52nuNNHxxl+Z7vm0MZ3I7Gj6Fc1pHd+ZhCCjKmIlAseq\n" +
+ "Lofz0yafH0NPd5F8T9onl9Of3ekLWjHSZqNKZVRgLqGPxcM4QMQSQ8vsme/bPABmhrMKZ1YG\n" +
+ "VBpYtJzVTnG55m3r8sFPmPNZ5tdLiLFj5WQvClnVE1Q/eU/1iOjVin9tIfDSf5O+/x6PYs4O\n" +
+ "wHayiyxiAurfPEqlzFcnPznn7R3r+L0mcwGu6YWNOduY3TwoF2NyHnaVzVQrmOQgCE0yD56X\n" +
+ "z2Ur7HwoNYi7Nzfz0CBV2gN3PLGdiYn7J6wyBa5zR5jDhMF0W+oza/+tZFPVjo73PRT4DYc3\n" +
+ "HEHjkHzDJETGdzOHPuQyTIyZGxx2BIyeReQn6oOIREtIub+Ct3KWd7CnW5wMbFaXEYHCagfj\n" +
+ "5/rl0uGBS4KsYrMEXvGCHlJcVDmelx80rBN+Vo6Yrkj8B1LOEgM1lR+9LOBXPrhf9+PRZ4Qp\n" +
+ "9HuoO7Z6x3R4JgERXrEmZUJVj/JVIaF0fjylTj88257mCRWxyO5YpfhBF2s0qLYdMvNKUAzM\n" +
+ "/NjP8oODt3TU0Xt30RTu06amfY6ZnWTX9uzaOffi2BrWrcib97frCqPdOEdPhRcqniIL5U1k\n" +
+ "YnVrSSDUQolLqs32MGdaRCkpGF1lN7YQCBRNXFf0f2KvzC4svuDfsqEnTL7R2Vcu+akAshXQ\n" +
+ "sSjEXmKd49Ky7sZZnEbmfnDaB/+4ZWAYk4gajJib5ewB2pHzp5muKInTyECgYMc2ReVD/tz/\n" +
+ "pd9NG1NzdStchS082PL8DwRJx6HvWjVo9sSd9DGIfVonx6txQ34QCTF9psQ2R0Q1LRtJLyZd\n" +
+ "Dgehsti4GCBZdAQ8dT+2sG4QxKTHwaHCp2mDNWI86eaoZ5q4m0UG+kJrKZM+bbZARbBS5Go6\n" +
+ "8cK2JiiRh465rPEh+CuWJQT12Whk89nbe5nvq+ILez8iYVj3IiBwy2FAVtidPIEgYfVI5TWD\n" +
+ "ElfmXJXR7r2bMCY7RfLR6u8JWLJEoHEGsB2DqUwWixPRaxMbvGMm6t3nwbnhqgeJFNpW+Ntc\n" +
+ "PpZF5XSFj0xTqr6M6alUd26vC7CuXc/MDe29raHZ2k95R8zEnfl/p0HiErPGGA0rA6HmY1L4\n" +
+ "m2yc4wmPwg0cW1m8T9U4bPQaXWQ19wOqrBFety/T+m+3Y/L8aGoHmQlNJpqzbw/DmorcmyjB\n" +
+ "B3EHg5pmn+AwxuQOjTolFP+mmW593LEdkuBpITZHa9mLl5Q2ts3ABIIB4IUmz0F0Z0EOYEjt\n" +
+ "lKryNdgwXjLJF5zLjcs+Rn7FuD9LWs9FRKS8hTYFxGwcnMJLbFgWobGIK8VwXUlhiuj4dlOH\n" +
+ "Llq6eerJUz96gUR5dY0pjci+uVhF9Pr0uJKeGCHJLluqJ8hvE6r0qyXJquWdMgFU480YKlAB\n" +
+ "5XXxVI2geOurRMSSoUXKOk/ZR3i41orN7/gZQPZXvZbNPSVNifbJnqhi0qy9nBsiEtV05tQ4\n" +
+ "kCBnnQmAlNgq//AnuN1H+UNjHxUvtU80yBZMsfbz0BZ6MWF/AlTXEwNnBTXpQI9hYus83AR9\n" +
+ "lht+11eNmwTEVj9VGQVk1S0OTCWe9Gv3mxrPyFGhOJ8vFtBDhpVjSZ5cFCPhGMCZxjrIbzf4\n" +
+ "xjz4fPdSnN3XpBRxuE0FW39coYHX4jNn2FhKtOljHUZjrFL91ZYYo2xdou7VgE7GfVvb7V70\n" +
+ "MiK0OsW8du1c8Iawqmb0H1cWo/GCA8TaFdjfXOWZjEfHpXJvGqW+zcYn2DN0UNYnuP4ITOd4\n" +
+ "A3OQiTaX1XV4M+vKOR1A0OzFty0IxMxcTEwSQM1JQ+zpE11DBMWf4JEo35uAmtvHXPjlyHd2\n" +
+ "YY0ohoV70z8CGMrBN6ws5zIE7n3q7klEWHds5PZMDlzoPZd2rwQIYAM4FwEheYIAAAAAAAAA\n" +
+ "AAAA";
diff --git a/comm/mailnews/db/gloda/test/unit/test_startup_offline.js b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
new file mode 100644
index 0000000000..d7ba435d76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
@@ -0,0 +1,53 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/*
+ * Test gloda starts up with indexing suppressed when offline at startup.
+ */
+
+var messageInjection;
+
+add_setup(async function () {
+ // We must do this before the first load otherwise gloda is started without
+ // picking up the necessary initialisation.
+ Services.io.manageOfflineStatus = false;
+ Services.io.offline = true;
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_gloda_offline_startup() {
+ // Set up a folder for indexing and check the message doesn't get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Now go online...
+ Services.io.offline = false;
+
+ // ...and check we have done the indexing and indexed the message.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+});
diff --git a/comm/mailnews/db/gloda/test/unit/xpcshell.ini b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
new file mode 100644
index 0000000000..4efbf97583
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
@@ -0,0 +1,38 @@
+[DEFAULT]
+head = head_gloda.js
+tail =
+support-files = base_*.js resources/*
+prefs =
+ gloda.loglevel=Debug
+
+[test_corrupt_database.js]
+[test_folder_logic.js]
+[test_fts3_tokenizer.js]
+[test_gloda_content_imap_offline.js]
+[test_gloda_content_local.js]
+[test_index_addressbook.js]
+[test_index_bad_messages.js]
+[test_index_compaction.js]
+[test_index_junk_imap_offline.js]
+[test_index_junk_imap_online.js]
+[test_index_junk_local.js]
+[test_index_messages_imap_offline.js]
+[test_index_messages_imap_online.js]
+[test_index_messages_imap_online_to_offline.js]
+[test_index_messages_local.js]
+[test_index_sweep_folder.js]
+[test_intl.js]
+[test_migration.js]
+[test_mime_attachments_size.js]
+[test_mime_emitter.js]
+[test_msg_search.js]
+[test_noun_mimetype.js]
+[test_nuke_migration.js]
+[test_nuke_migration_from_future.js]
+[test_query_core.js]
+[test_query_messages_imap_offline.js]
+[test_query_messages_imap_online.js]
+[test_query_messages_imap_online_to_offline.js]
+[test_query_messages_local.js]
+[test_smime_mimemsg_representation.js]
+[test_startup_offline.js]