summaryrefslogtreecommitdiffstats
path: root/comm/mailnews/db/gloda/test/unit/resources
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm431
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm847
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm293
3 files changed, 1571 insertions, 0 deletions
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
new file mode 100644
index 0000000000..e8234f1a97
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
@@ -0,0 +1,431 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["queryExpect", "sqlExpectCount", "sqlRun"];
+
+/*
+ * This file provides gloda query helpers for the test infrastructure.
+ */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.queryHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var _defaultExpectationExtractors = {};
+_defaultExpectationExtractors[GlodaConstants.NOUN_MESSAGE] = [
+ function expectExtract_message_gloda(aGlodaMessage) {
+ return aGlodaMessage.headerMessageID;
+ },
+ function expectExtract_message_synth(aSynthMessage) {
+ return aSynthMessage.messageId;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_CONTACT] = [
+ function expectExtract_contact_gloda(aGlodaContact) {
+ return aGlodaContact.name;
+ },
+ function expectExtract_contact_name(aName) {
+ return aName;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_IDENTITY] = [
+ function expectExtract_identity_gloda(aGlodaIdentity) {
+ return aGlodaIdentity.value;
+ },
+ function expectExtract_identity_address(aAddress) {
+ return aAddress;
+ },
+];
+
+function expectExtract_default_toString(aThing) {
+ return aThing.toString();
+}
+
+/**
+ * @see queryExpect for info on what we do.
+ */
+class QueryExpectationListener {
+ constructor(
+ aExpectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ aCallerStackFrame
+ ) {
+ this.expectedSet = aExpectedSet;
+ this.glodaExtractor = aGlodaExtractor;
+ this.orderVerifier = aOrderVerifier;
+ this.completed = false;
+ this.callerStackFrame = aCallerStackFrame;
+ // Track our current 'index' in the results for the (optional) order verifier,
+ // but also so we can provide slightly more useful debug output.
+ this.nextIndex = 0;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ onItemsAdded(aItems, aCollection) {
+ log.debug("QueryExpectationListener onItemsAdded received.");
+ for (let item of aItems) {
+ let glodaStringRep;
+ try {
+ glodaStringRep = this.glodaExtractor(item);
+ } catch (ex) {
+ this._reject(
+ new Error(
+ "Gloda extractor threw during query expectation.\n" +
+ "Item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // Make sure we were expecting this guy.
+ if (glodaStringRep in this.expectedSet) {
+ delete this.expectedSet[glodaStringRep];
+ } else {
+ this._reject(
+ new Error(
+ "Query returned unexpected result!\n" +
+ "Item:\n" +
+ item +
+ "\nExpected set:\n" +
+ this.expectedSet +
+ "\nCaller:\n" +
+ this.callerStackFrame
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ if (this.orderVerifier) {
+ try {
+ this.orderVerifier(this.nextIndex, item, aCollection);
+ } catch (ex) {
+ // If the order was wrong, we could probably go for an output of what
+ // we actually got...
+ dump("Order Problem detected. Dump of data:\n");
+ for (let [iThing, thing] of aItems.entries()) {
+ dump(
+ iThing +
+ ": " +
+ thing +
+ (aCollection.stashedColumns
+ ? ". " + aCollection.stashedColumns[thing.id].join(", ")
+ : "") +
+ "\n"
+ );
+ }
+ this._reject(ex);
+ return; // We don't have to continue for more checks.
+ }
+ }
+ this.nextIndex++;
+
+ // Make sure the query's test method agrees with the database about this.
+ if (!aCollection.query.test(item)) {
+ this._reject(
+ new Error(
+ "Query test returned false when it should have been true on.\n" +
+ "Extracted:\n" +
+ glodaStringRep +
+ "\nItem:\n" +
+ item
+ )
+ );
+ }
+ }
+ }
+ onItemsModified(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsModified received. Nothing done."
+ );
+ }
+ onItemsRemoved(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsRemoved received. Nothing done."
+ );
+ }
+ onQueryCompleted(aCollection) {
+ log.debug("QueryExpectationListener onQueryCompleted received.");
+ // We may continue to match newly added items if we leave our query as it
+ // is, so let's become explicit to avoid related troubles.
+ aCollection.becomeExplicit();
+
+ // `expectedSet` should now be empty.
+ for (let key in this.expectedSet) {
+ let value = this.expectedSet[key];
+ this._reject(
+ new Error(
+ "Query should have returned:\n" +
+ key +
+ " (" +
+ value +
+ ").\n" +
+ "But " +
+ this.nextIndex +
+ " was seen."
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // If no error is thrown then we're fine here.
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
+
+/**
+ * Execute the given query, verifying that the result set contains exactly the
+ * contents of the expected set; no more, no less. Since we expect that the
+ * query will result in gloda objects, but your expectations will not be posed
+ * in terms of gloda objects (though they could be), we rely on extractor
+ * functions to take the gloda result objects and the expected result objects
+ * into the same string.
+ * If you don't provide extractor functions, we will use our defaults (based on
+ * the query noun type) if available, or assume that calling toString is
+ * sufficient.
+ *
+ * @param aQuery Either a query to execute, or a dict with the following keys:
+ * - queryFunc: The function to call that returns a function.
+ * - queryThis: The 'this' to use for the invocation of queryFunc.
+ * - args: A list (possibly empty) or arguments to precede the traditional
+ * arguments to query.getCollection.
+ * - nounId: The (numeric) noun id of the noun type expected to be returned.
+ * @param aExpectedSet The list of expected results from the query where each
+ * item is suitable for extraction using aExpectedExtractor. We have a soft
+ * spot for SyntheticMessageSets and automatically unbox them.
+ * @param aGlodaExtractor The extractor function to take an instance of the
+ * gloda representation and return a string for comparison/equivalence
+ * against that returned by the expected extractor (against the input
+ * instance in aExpectedSet.) The value returned must be unique for all
+ * of the expected gloda representations of the expected set. If omitted,
+ * the default extractor for the gloda noun type is used. If no default
+ * extractor exists, toString is called on the item.
+ * @param aExpectedExtractor The extractor function to take an instance from the
+ * values in the aExpectedSet and return a string for comparison/equivalence
+ * against that returned by the gloda extractor. The value returned must
+ * be unique for all of the values in the expected set. If omitted, the
+ * default extractor for the presumed input type based on the gloda noun
+ * type used for the query is used, failing over to toString.
+ * @param aOrderVerifier Optional function to verify the order the results are
+ * received in. Function signature should be of the form (aZeroBasedIndex,
+ * aItem, aCollectionResultIsFor).
+ */
+async function queryExpect(
+ aQuery,
+ aExpectedSet,
+ aGlodaExtractor,
+ aExpectedExtractor,
+ aOrderVerifier
+) {
+ if (aQuery.test) {
+ aQuery = {
+ queryFunc: aQuery.getCollection,
+ queryThis: aQuery,
+ args: [],
+ nounId: aQuery._nounDef.id,
+ };
+ }
+
+ if ("synMessages" in aExpectedSet) {
+ aExpectedSet = aExpectedSet.synMessages;
+ }
+
+ // - set extractor functions to defaults if omitted
+ if (aGlodaExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aGlodaExtractor = _defaultExpectationExtractors[aQuery.nounId][0];
+ } else {
+ aGlodaExtractor = expectExtract_default_toString;
+ }
+ }
+ if (aExpectedExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aExpectedExtractor = _defaultExpectationExtractors[aQuery.nounId][1];
+ } else {
+ aExpectedExtractor = expectExtract_default_toString;
+ }
+ }
+
+ // - build the expected set
+ let expectedSet = {};
+ for (let item of aExpectedSet) {
+ try {
+ expectedSet[aExpectedExtractor(item)] = item;
+ } catch (ex) {
+ throw new Error(
+ "Expected extractor threw during query expectation for item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ );
+ }
+ }
+
+ // - create the listener...
+ let listener = new QueryExpectationListener(
+ expectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ Components.stack.caller
+ );
+ aQuery.args.push(listener);
+ let queryValue = aQuery.queryFunc.apply(aQuery.queryThis, aQuery.args);
+ // Wait for the QueryListener to finish.
+ await listener.promise;
+ return queryValue;
+}
+
+/**
+ * Asynchronously run a SQL statement against the gloda database. This can grow
+ * binding logic and data returning as needed.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlRun(sql) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createAsyncStatement(sql);
+ let rows = null;
+
+ let promiseResolve;
+ let promiseReject;
+ let promise = new Promise((resolve, reject) => {
+ promiseResolve = resolve;
+ promiseReject = reject;
+ });
+ // Running SQL.
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ if (!rows) {
+ rows = [];
+ }
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ rows.push(row);
+ }
+ },
+ handleError(aError) {
+ promiseReject(
+ new Error("SQL error!\nResult:\n" + aError + "\nSQL:\n" + sql)
+ );
+ },
+ handleCompletion() {
+ promiseResolve(rows);
+ },
+ });
+ stmt.finalize();
+ return promise;
+}
+
+/**
+ * Run an (async) SQL statement against the gloda database. The statement
+ * should be a SELECT COUNT; we check the count against aExpectedCount.
+ * Any additional arguments are positionally bound to the statement.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlExpectCount(aExpectedCount, aSQLString, ...params) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createStatement(aSQLString);
+
+ for (let iArg = 0; iArg < params.length; iArg++) {
+ GlodaDatastore._bindVariant(stmt, iArg, params[iArg]);
+ }
+
+ let desc = [aSQLString, ...params];
+ // Running SQL count.
+ let listener = new SqlExpectationListener(
+ aExpectedCount,
+ desc,
+ Components.stack.caller
+ );
+ stmt.executeAsync(listener);
+ // We don't need the statement anymore.
+ stmt.finalize();
+
+ await listener.promise;
+}
+
+class SqlExpectationListener {
+ constructor(aExpectedCount, aDesc, aCallerStackFrame) {
+ this.actualCount = null;
+ this.expectedCount = aExpectedCount;
+ this.sqlDesc = aDesc;
+ this.callerStackFrame = aCallerStackFrame;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ handleResult(aResultSet) {
+ let row = aResultSet.getNextRow();
+ if (!row) {
+ this._reject(
+ new Error(
+ "No result row returned from caller:\n" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this.actualCount = row.getInt64(0);
+ }
+
+ handleError(aError) {
+ this._reject(
+ new Error(
+ "SQL error from caller:\n" +
+ this.callerStackFrame +
+ "\nResult:\n" +
+ aError +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ }
+
+ handleCompletion(aReason) {
+ if (this.actualCount != this.expectedCount) {
+ this._reject(
+ new Error(
+ "Actual count of " +
+ this.actualCount +
+ "does not match expected count of:\n" +
+ this.expectedCount +
+ "\nFrom caller:" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
new file mode 100644
index 0000000000..a4c092400b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
@@ -0,0 +1,847 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides gloda testing infrastructure.
+ *
+ * A few words about how tests should expect to interact with indexing:
+ *
+ * By default, we enable only event-driven indexing with an infinite work queue
+ * length. This means that all messages will be queued for indexing as they
+ * are added or modified. You should await to |waitForGlodaIndexer| to wait
+ * until the indexer completes. If you want to assert that certain messages
+ * will have been indexed during that pass, you can pass them as arguments to
+ * |assertExpectedMessagesIndexed|.
+ * There is no need to tell us to expect the messages to be indexed prior to the
+ * waiting as long as nothing spins the event loop after you perform the action
+ * that triggers indexing. None of our existing xpcshell tests do this, but it
+ * is part of the mozmill idiom for its waiting mechanism, so be sure to not
+ * perform a mozmill wait without first telling us to expect the messages.
+ */
+
+const EXPORTED_SYMBOLS = [
+ "assertExpectedMessagesIndexed",
+ "glodaTestHelperInitialize",
+ "nukeGlodaCachesAndCollections",
+ "prepareIndexerForTesting",
+ "waitForGlodaIndexer",
+];
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.testHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var indexMessageState;
+
+/**
+ * Create a 'me' identity of "me@localhost" for the benefit of Gloda. At the
+ * time of this writing, Gloda only initializes Gloda.myIdentities and
+ * Gloda.myContact at startup with no event-driven updates. As such, this
+ * function needs to be called prior to gloda startup.
+ */
+function createMeIdentity() {
+ let identity = MailServices.accounts.createIdentity;
+ identity.email = "me@localhost";
+ identity.fullName = "Me";
+}
+// And run it now.
+createMeIdentity();
+
+// Set the gloda prefs.
+// "yes" to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// "no" to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+var ENVIRON_MAPPINGS = [
+ {
+ envVar: "GLODA_DATASTORE_EXPLAIN_TO_PATH",
+ prefName: "mailnews.database.global.datastore.explainToPath",
+ },
+];
+
+// Propagate environment variables to prefs as appropriate:
+for (let { envVar, prefName } of ENVIRON_MAPPINGS) {
+ if (Services.env.exists(envVar)) {
+ Services.prefs.setCharPref(prefName, Services.env.get(envVar));
+ }
+}
+
+/**
+ * Side note:
+ * Keep them in the global scope so that a Cu.forceGC() call won't purge them.
+ */
+var collectionListener;
+
+/**
+ * Registers MessageInjection listeners and Gloda listeners for our tests.
+ *
+ * @param {MessageInjection} messageInjection Instance of MessageInjection
+ * to register Events to.
+ */
+function glodaTestHelperInitialize(messageInjection) {
+ // Initialize the message state if we are dealing with messages. At some
+ // point we probably want to just completely generalize the indexing state.
+ // That point is likely when our testing infrastructure needs the support
+ // provided by `indexMessageState` for things other than messages.
+ indexMessageState = new IndexMessageState();
+
+ collectionListener = new GlodaCollectionListener();
+ new TestAttributeProvider();
+ new MsgsClassifiedListener();
+
+ // Add a hook that makes folders not filthy when we first see them.
+ messageInjection.registerMessageInjectionListener({
+ /**
+ * By default all folders start out filthy. This is great in the real world
+ * but I went and wrote all the unit tests without entirely thinking about
+ * how this affected said unit tests. So we add a listener so that we can
+ * force the folders to be clean.
+ * This is okay and safe because messageInjection always creates the folders
+ * without any messages in them.
+ */
+ onRealFolderCreated(aRealFolder) {
+ log.debug(
+ `onRealFolderCreated through MessageInjection received. ` +
+ `Make folder: ${aRealFolder.name} clean for Gloda.`
+ );
+ let glodaFolder = Gloda.getFolderForFolder(aRealFolder);
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+ },
+
+ /**
+ * Make waitForGlodaIndexer know that it should wait for a msgsClassified
+ * event whenever messages have been injected, at least if event-driven
+ * indexing is enabled.
+ */
+ onInjectingMessages() {
+ log.debug(
+ "onInjectingMessages through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+
+ /**
+ * This basically translates to "we are triggering an IMAP move" and has
+ * the ramification that we should expect a msgsClassified event because
+ * the destination will see the header get added at some point.
+ */
+ onMovingMessagesWithoutDestHeaders() {
+ log.debug(
+ "onMovingMessagesWithoutDestHeaders through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+ });
+ log.debug("glodaTestHelperInitialize finished.");
+}
+
+class IndexMessageState {
+ data = new GlodaIndexerData();
+
+ constructor() {
+ prepareIndexerForTesting();
+ // Continue the preparing by assigning the hook recover and hook cleanup.
+ GlodaIndexer._unitTestHookRecover = this._testHookRecover;
+ GlodaIndexer._unitTestHookCleanup = this._testHookCleanup;
+ }
+
+ resetData() {
+ this.data = new GlodaIndexerData();
+ }
+
+ // The synthetic message sets passed in to |assertExpectedMessagesIndexed|.
+ synMessageSets = [];
+ // The user-specified accumulate-style verification function.
+ verifier() {
+ return this.data.data.verifier;
+ }
+ // Should we augment the synthetic sets with gloda message info?
+ augmentSynSets() {
+ return this.data.data.augment;
+ }
+ deletionSynSets() {
+ return this.data.data.deleted;
+ }
+
+ // Expected value of |_workerRecoveredCount| at assertion time.
+ expectedWorkerRecoveredCount() {
+ return this.data.data.recovered;
+ }
+ // Expected value of |_workerFailedToRecoverCount| at assertion time.
+ expectedFailedToRecoverCount() {
+ return this.data.data.failedToRecover;
+ }
+ // Expected value of |_workerCleanedUpCount| at assertion time.
+ expectedCleanedUpCount() {
+ return this.data.data.cleanedUp;
+ }
+ // Expected value of |_workerHadNoCleanUpCount| at assertion time.
+ expectedHadNoCleanUpCount() {
+ return this.data.data.hadNoCleanUp;
+ }
+ /**
+ * The number of messages that were fully (re)indexed using
+ * Gloda.grokNounItem.
+ */
+ _numFullIndexed = 0;
+ // Expected value of |_numFullIndexed| at assertion time.
+ expectedNumFullIndexed() {
+ return this.data.data.fullyIndexed;
+ }
+
+ // The number of times a worker had a recover helper and it recovered.
+ _workerRecoveredCount = 0;
+ // The number of times a worker had a recover helper and it did not recover.
+ _workerFailedToRecoverCount = 0;
+ // The number of times a worker had a cleanup helper and it cleaned up.
+ _workerCleanedUpCount = 0;
+ // The number of times a worker had no cleanup helper but there was a cleanup.
+ _workerHadNoCleanUpCount = 0;
+
+ /**
+ * Beware this scoping for this class is lost where _testHookRecover is used.
+ *
+ * @param aRecoverResult
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookRecover(aRecoverResult, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer recovery hook fired" +
+ "\nrecover result:\n" +
+ aRecoverResult +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle:\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aRecoverResult) {
+ indexMessageState._workerRecoveredCount++;
+ } else {
+ indexMessageState._workerFailedToRecoverCount++;
+ }
+ }
+
+ /**
+ * Beware this scoping for this class is lost where _testHookCleanup is used.
+ *
+ * @param aHadCleanupFunc
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookCleanup(aHadCleanupFunc, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer cleanup hook fired" +
+ "\nhad cleanup?\n" +
+ aHadCleanupFunc +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aHadCleanupFunc) {
+ indexMessageState._workerCleanedUpCount++;
+ } else {
+ indexMessageState._workerHadNoCleanUpCount++;
+ }
+ }
+ _jsonifyCallbackHandleState(aCallbackHandle) {
+ return {
+ _stringRep: aCallbackHandle.activeStack.length + " active generators",
+ activeStackLength: aCallbackHandle.activeStack.length,
+ contextStack: aCallbackHandle.contextStack,
+ };
+ }
+
+ /**
+ * The gloda messages indexed since the last call to |waitForGlodaIndexer|.
+ */
+ _glodaMessagesByMessageId = [];
+ _glodaDeletionsByMessageId = [];
+
+ _numItemsAdded = 0;
+
+ applyGlodaIndexerData(data) {
+ this.data.applyData(data);
+ }
+
+ /**
+ * A list of events that we need to see before we allow ourselves to perform
+ * the indexer check. For example, if "msgsClassified" is in here, it means
+ * that whether the indexer is active or not is irrelevant until we have
+ * seen that msgsClassified event.
+ */
+ interestingEvents = [];
+}
+
+function prepareIndexerForTesting() {
+ if (!GlodaIndexer.enabled) {
+ throw new Error(
+ "The gloda indexer is somehow not enabled. This is problematic."
+ );
+ }
+ // Make the indexer be more verbose about indexing for us.
+ GlodaIndexer._unitTestSuperVerbose = true;
+ GlodaMsgIndexer._unitTestSuperVerbose = true;
+ // Lobotomize the adaptive indexer.
+ // The indexer doesn't need to worry about load; zero his rescheduling time.
+ GlodaIndexer._INDEX_INTERVAL = 0;
+ // The indexer already registered for the idle service; we must remove this
+ // or "idle" notifications will still get sent via the observer mechanism.
+ let realIdleService = GlodaIndexer._idleService;
+ realIdleService.removeIdleObserver(
+ GlodaIndexer,
+ GlodaIndexer._indexIdleThresholdSecs
+ );
+ // Pretend we are always idle.
+ GlodaIndexer._idleService = {
+ idleTime: 1000,
+ addIdleObserver() {
+ // There is no actual need to register with the idle observer, and if
+ // we do, the stupid "idle" notification will trigger commits.
+ },
+ removeIdleObserver() {},
+ };
+ // We want the event-driven indexer to always handle indexing and never spill
+ // to an indexing sweep unless a test intentionally does so.
+ GlodaIndexer._indexMaxEventQueueMessages = 10000;
+ // Lobotomize the adaptive indexer's constants.
+ GlodaIndexer._cpuTargetIndexTime = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_ACTIVE = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_IDLE = 10000000;
+ GlodaIndexer._CPU_IS_BUSY_TIME = 10000000;
+ GlodaIndexer._PAUSE_LATE_IS_BUSY_TIME = 10000000;
+
+ delete GlodaIndexer._indexTokens;
+ GlodaIndexer.__defineGetter__("_indexTokens", function () {
+ return GlodaIndexer._CPU_MAX_TOKENS_PER_BATCH;
+ });
+ GlodaIndexer.__defineSetter__("_indexTokens", function () {});
+
+ // This includes making commits only happen when we the unit tests explicitly
+ // tell them to.
+ GlodaIndexer._MINIMUM_COMMIT_TIME = 10000000;
+ GlodaIndexer._MAXIMUM_COMMIT_TIME = 10000000;
+}
+
+class GlodaIndexerData {
+ data = {
+ verifier: null,
+ augment: false,
+ deleted: [],
+ fullyIndexed: null,
+
+ // Things should not be recovering or failing and cleaning up unless the test
+ // is expecting it.
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ };
+
+ /**
+ * Applies data shallow.
+ * Only the first level of keys are applied and replaced complete
+ * if given via param data. No deep merge.
+ *
+ * @param {*} data
+ */
+ applyData(data) {
+ this.data = {
+ ...this.data,
+ ...data,
+ };
+ }
+}
+
+/**
+ * Note that if the indexer is not currently active we assume it has already
+ * completed; we do not entertain the possibility that it has not yet started.
+ * Since the indexer is 'active' as soon as it sees an event, this does mean
+ * that you need to wait to make sure the indexing event has happened before
+ * calling us. This is reasonable.
+ */
+async function waitForGlodaIndexer() {
+ let eventsPending = TestUtils.waitForCondition(() => {
+ if (indexMessageState.interestingEvents.length > 1) {
+ // Events still pending. See msgClassified event and
+ // messageInjection.registerMessageInjectionListener.
+ return false;
+ }
+ // Events finished.
+ return true;
+ });
+ let indexerRunning = TestUtils.waitForCondition(() => {
+ if (GlodaIndexer.indexing) {
+ // Still indexing.
+ return false;
+ }
+ // Indexing finished.
+ return true;
+ });
+
+ log.debug(
+ "waitForGlodaIndexer waiting for intrestingEvents and GlodaIndexer.indexing."
+ );
+
+ // If we are waiting on certain events to occur first, block on those.
+ await Promise.all([eventsPending, indexerRunning]);
+}
+
+/**
+ * Each time a msgClassified Event is fired and it is present
+ * in IndexMessageState.interestingEvents it will be removed.
+ */
+class MsgsClassifiedListener {
+ /**
+ * Events pending for the tests.
+ * (we want this to happen after gloda registers its own listener, and it
+ * does.)
+ */
+ constructor() {
+ MailServices.mfn.addListener(
+ this,
+ Ci.nsIMsgFolderNotificationService.msgsClassified
+ );
+ }
+ /**
+ * If this was an expected interesting event, remove it from the list.
+ * If an event happens that we did not expect, it does not matter. We know
+ * this because we add events we care about to interestingEvents before they
+ * can possibly be fired.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ log.debug("MsgsClassifiedListener msgsClassified received.");
+ let idx = indexMessageState.interestingEvents.indexOf("msgsClassified");
+ if (idx != -1) {
+ log.debug("Remove intrestingEvent through msgsClassified.");
+ // Remove the interesting Event as we received it here.
+ indexMessageState.interestingEvents.splice(idx, 1);
+ }
+ }
+}
+
+/**
+ * This AttributeProvider helps us testing Gloda.
+ * With the `process` method the Collections will be noticed
+ * through listeners.
+ * (onItemsAdded, onItemsModified, onItemsRemoved, onQueryComplete)
+ */
+class TestAttributeProvider {
+ providerName = "glodaTestHelper:fakeProvider";
+ constructor() {
+ // Register us with gloda as an attribute provider so that we can
+ // distinguish between fully reindexed messages and fastpath indexed
+ // messages.
+ Gloda._attrProviderOrderByNoun[GlodaConstants.NOUN_MESSAGE].push({
+ providerName: this.providerName,
+ process: this.process,
+ });
+ }
+ /**
+ * Fake attribute provider processing function so we can distinguish
+ * between fully reindexed messages and fast-path modified messages.
+ * Process has to be invoked for the GlodaCollectionListener
+ */
+ *process(aItem, aRawReps, aIsConceptuallyNew, aCallbackHandle) {
+ indexMessageState._numFullIndexed++;
+
+ yield GlodaConstants.kWorkDone;
+ }
+}
+
+/**
+ * This class tracks a GlodaCollection (created by Gloda._wildcardCollection).
+ * The listeners for this collection which will notify our IndexMessageState
+ * are defined here.
+ */
+class GlodaCollectionListener {
+ // Our catch-all message collection that nets us all messages passing by.
+ catchAllCollection = null;
+ constructor() {
+ this.catchAllCollection = Gloda._wildcardCollection(
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.catchAllCollection.listener = this;
+ }
+ /*
+ * Our catch-all collection listener. Any time a new message gets indexed,
+ * we should receive an onItemsAdded call. Any time an existing message
+ * gets reindexed, we should receive an onItemsModified call. Any time an
+ * existing message actually gets purged from the system, we should receive
+ * an onItemsRemoved call.
+ */
+ onItemsAdded(aItems) {
+ log.debug("GlodaCollectionListener onItemsAdded received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item.folderMessage +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+
+ // Simulate some other activity clearing out the the current folder's
+ // cached database, which used to kill the indexer's enumerator.
+ if (++indexMessageState._numItemsAdded == 3) {
+ log.debug("GlodaCollectionListener simulate other activity.");
+ GlodaMsgIndexer._indexingFolder.msgDatabase = null;
+ }
+ }
+
+ onItemsModified(aItems) {
+ log.debug("GlodaCollectionListener onItemsModified received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+ }
+
+ onItemsRemoved(aItems) {
+ log.debug("GlodaCollectionListener onItemsRemoved received.");
+ for (let item of aItems) {
+ if (
+ item.headerMessageID in indexMessageState._glodaDeletionsByMessageId
+ ) {
+ throw new Error(
+ "Gloda message " +
+ item +
+ "already deleted once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaDeletionsByMessageId."
+ );
+ indexMessageState._glodaDeletionsByMessageId[item.headerMessageID] = item;
+ }
+ }
+ onQueryComplete(aCollection) {
+ log.debug(
+ "GlodaCollectionListener onQueryComplete received. Nothing done."
+ );
+ }
+}
+
+/**
+ * Assert that the set of messages indexed is exactly the set passed in.
+ * If a verification function is provided, use it on a per-message basis
+ * to make sure the resulting gloda message looks like it should given the
+ * synthetic message.
+ *
+ * Throws Errors if something is not according and returns always [true, string]
+ * for `Assert.ok` in your tests. This ensures proper testing output.
+ *
+ * @param {SyntheticMessage[]} aSynMessageSets A list of SyntheticMessageSets
+ * containing exactly the messages we should expect to see.
+ * @param [aConfig.verifier] The function to call to verify that the indexing
+ * had the desired result. Takes arguments aSynthMessage (the synthetic
+ * message just indexed), aGlodaMessage (the gloda message representation of
+ * the indexed message), and aPreviousResult (the value last returned by the
+ * verifier function for this given set of messages, or undefined if it is
+ * the first message.)
+ * @param [aConfig.augment=false] Should we augment the synthetic message sets
+ * with references to their corresponding gloda messages? The messages
+ * will show up in a 'glodaMessages' list on the syn set.
+ * @param {SyntheticMessageSet[]} [aConfig.deleted] A list of SyntheticMessageSets
+ * containing messages that should be recognized as deleted by the gloda
+ * indexer in this pass.
+ * @param [aConfig.fullyIndexed] A count of the number of messages we expect
+ * to observe being fully indexed. This is relevant because in the case
+ * of message moves, gloda may generate an onItemsModified notification but
+ * not reindex the message. This attribute allows the tests to distinguish
+ * between the two cases.
+ * @returns {[true, string]}
+ */
+function assertExpectedMessagesIndexed(aSynMessageSets, aConfig) {
+ indexMessageState.synMessageSets = aSynMessageSets;
+
+ indexMessageState.applyGlodaIndexerData(aConfig);
+
+ // Check that we have a gloda message for every syn message and verify.
+ for (let msgSet of indexMessageState.synMessageSets) {
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages = [];
+ }
+ for (let [iSynMsg, synMsg] of msgSet.synMessages.entries()) {
+ if (!(synMsg.messageId in indexMessageState._glodaMessagesByMessageId)) {
+ let msgHdr = msgSet.getMsgHdr(iSynMsg);
+ throw new Error(
+ "Header " +
+ msgHdr.messageId +
+ " in folder: " +
+ (msgHdr ? msgHdr.folder.name : "no header?") +
+ " should have been indexed."
+ );
+ }
+
+ let glodaMsg =
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId];
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages.push(glodaMsg);
+ }
+
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId] = null;
+
+ let verifier = indexMessageState.verifier();
+ let previousValue = undefined;
+ if (verifier) {
+ try {
+ // Looking if a previous value have been present.
+ previousValue = verifier(synMsg, glodaMsg, previousValue);
+ } catch (ex) {
+ throw new Error(
+ "Verification failure: " +
+ synMsg +
+ " is not close enough to " +
+ glodaMsg +
+ "; basing this on exception: " +
+ ex
+ );
+ }
+ }
+ }
+ }
+
+ // Check that we don't have any extra gloda messages. (lacking syn msgs)
+ for (let messageId in indexMessageState._glodaMessagesByMessageId) {
+ let glodaMsg = indexMessageState._glodaMessagesByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message:\n" +
+ glodaMsg +
+ "\nShould not have been indexed.\n" +
+ "Source header:\n" +
+ glodaMsg.folderMessage
+ );
+ }
+ }
+
+ if (indexMessageState.deletionSynSets()) {
+ for (let msgSet of indexMessageState.deletionSynSets()) {
+ for (let synMsg of msgSet.synMessages) {
+ if (
+ !(synMsg.messageId in indexMessageState._glodaDeletionsByMessageId)
+ ) {
+ throw new Error(
+ "Synthetic message " + synMsg + " did not get deleted!"
+ );
+ }
+
+ indexMessageState._glodaDeletionsByMessageId[synMsg.messageId] = null;
+ }
+ }
+ }
+
+ // Check that we don't have unexpected deletions.
+ for (let messageId in indexMessageState._glodaDeletionsByMessageId) {
+ let glodaMsg = indexMessageState._glodaDeletionsByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message with message id " +
+ messageId +
+ " was " +
+ "unexpectedly deleted!"
+ );
+ }
+ }
+
+ if (
+ indexMessageState.expectedWorkerRecoveredCount() != null &&
+ indexMessageState.expectedWorkerRecoveredCount() !=
+ indexMessageState._workerRecoveredCount
+ ) {
+ throw new Error(
+ "Expected worker-recovered count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedWorkerRecoveredCount() +
+ "\nActual:\n" +
+ indexMessageState._workerRecoveredCount
+ );
+ }
+ if (
+ indexMessageState.expectedFailedToRecoverCount() != null &&
+ indexMessageState.expectedFailedToRecoverCount() !=
+ indexMessageState._workerFailedToRecoverCount
+ ) {
+ throw new Error(
+ "Expected worker-failed-to-recover count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedFailedToRecoverCount() +
+ "\nActual:\n" +
+ indexMessageState._workerFailedToRecoverCount
+ );
+ }
+ if (
+ indexMessageState.expectedCleanedUpCount() != null &&
+ indexMessageState.expectedCleanedUpCount() !=
+ indexMessageState._workerCleanedUpCount
+ ) {
+ throw new Error(
+ "Expected worker-cleaned-up count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedCleanedUpCount() +
+ "\nActual:\n" +
+ indexMessageState._workerCleanedUpCount
+ );
+ }
+ if (
+ indexMessageState.expectedHadNoCleanUpCount() != null &&
+ indexMessageState.expectedHadNoCleanUpCount() !=
+ indexMessageState._workerHadNoCleanUpCount
+ ) {
+ throw new Error(
+ "Expected worker-had-no-cleanup count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedHadNoCleanUpCount() +
+ "\nActual\n" +
+ indexMessageState._workerHadNoCleanUpCount
+ );
+ }
+
+ if (
+ indexMessageState.expectedNumFullIndexed() != null &&
+ indexMessageState.expectedNumFullIndexed() !=
+ indexMessageState._numFullIndexed
+ ) {
+ throw new Error(
+ "Expected number of fully indexed messages did not match.\n" +
+ "Expected:\n" +
+ indexMessageState.expectedNumFullIndexed() +
+ "\nActual:\n" +
+ indexMessageState._numFullIndexed
+ );
+ }
+
+ // Cleanup of internal tracking values in the IndexMessageState
+ // for new tests.
+ resetIndexMessageState();
+
+ // If no error has been thrown till here were fine!
+ // Return values for Assert.ok.
+ // Using like Assert.ok(...assertExpectedMessagesIndexed()).
+ return [true, "Expected messages were indexed."];
+}
+
+/**
+ * Resets the IndexMessageState
+ *
+ * @TODO more docs
+ */
+function resetIndexMessageState() {
+ indexMessageState.synMessageSets = [];
+ indexMessageState._glodaMessagesByMessageId = [];
+ indexMessageState._glodaDeletionsByMessageId = [];
+
+ indexMessageState._workerRecoveredCount = 0;
+ indexMessageState._workerFailedToRecoverCount = 0;
+ indexMessageState._workerCleanedUpCount = 0;
+ indexMessageState._workerHadNoCleanUpCount = 0;
+
+ indexMessageState._numFullIndexed = 0;
+ indexMessageState.resetData();
+}
+
+/**
+ * Wipe out almost everything from the clutches of the GlodaCollectionManager.
+ * By default, it is caching things and knows about all the non-GC'ed
+ * collections. Tests may want to ensure that their data is loaded from disk
+ * rather than relying on the cache, and so, we exist.
+ * The exception to everything is that Gloda's concept of myContact and
+ * myIdentities needs to have its collections still be reachable or invariants
+ * are in danger of being "de-invarianted".
+ * The other exception to everything are any catch-all-collections used by our
+ * testing/indexing process. We don't scan for them, we just hard-code their
+ * addition if they exist.
+ */
+function nukeGlodaCachesAndCollections() {
+ // Explode if the GlodaCollectionManager somehow doesn't work like we think it
+ // should. (I am reluctant to put this logic in there, especially because
+ // knowledge of the Gloda contact/identity collections simply can't be known
+ // by the colleciton manager.)
+ if (
+ GlodaCollectionManager._collectionsByNoun === undefined ||
+ GlodaCollectionManager._cachesByNoun === undefined
+ ) {
+ // We don't check the Gloda contact/identities things because they might not
+ // get initialized if there are no identities, which is the case for our
+ // unit tests right now...
+ throw new Error(
+ "Try and remember to update the testing infrastructure when you " +
+ "change things!"
+ );
+ }
+
+ // We can just blow away the known collections.
+ GlodaCollectionManager._collectionsByNoun = {};
+ // But then we have to put the myContact / myIdentities junk back.
+ if (Gloda._myContactCollection) {
+ GlodaCollectionManager.registerCollection(Gloda._myContactCollection);
+ GlodaCollectionManager.registerCollection(Gloda._myIdentitiesCollection);
+ }
+ // Don't forget our testing catch-all collection.
+ if (collectionListener.catchAllCollection) {
+ // Empty it out in case it has anything in it.
+ collectionListener.catchAllCollection.clear();
+ // And now we can register it.
+ GlodaCollectionManager.registerCollection(
+ collectionListener.catchAllCollection
+ );
+ }
+
+ // Caches aren't intended to be cleared, but we also don't want to lose our
+ // caches, so we need to create new ones from the ashes of the old ones.
+ let oldCaches = GlodaCollectionManager._cachesByNoun;
+ GlodaCollectionManager._cachesByNoun = {};
+ for (let nounId in oldCaches) {
+ let cache = oldCaches[nounId];
+ GlodaCollectionManager.defineCache(cache._nounDef, cache._maxCacheSize);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
new file mode 100644
index 0000000000..f7a5199ba3
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
@@ -0,0 +1,293 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "configureGlodaIndexing",
+ "waitForGlodaDBFlush",
+ "waitForIndexingHang",
+ "resumeFromSimulatedHang",
+ "permuteMessages",
+ "makeABCardForAddressPair",
+];
+
+/*
+ * This file provides gloda testing infrastructure functions which are not coupled
+ * with the IndexMessageState from GlodaTestHelper.jsm
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.helperFunctions",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Resume execution when the db has run all the async statements whose execution
+ * was queued prior to this call. We trigger a commit to accomplish this,
+ * although this could also be accomplished without a commit. (Though we would
+ * have to reach into GlodaDatastore.jsm and get at the raw connection or extend
+ * datastore to provide a way to accomplish this.)
+ */
+async function waitForGlodaDBFlush() {
+ // We already have a mechanism to do this by forcing a commit. Arguably,
+ // it would be better to use a mechanism that does not induce an fsync.
+ var savedDepth = GlodaDatastore._transactionDepth;
+ if (!savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ GlodaDatastore.runPostCommit(promiseResolve);
+ // We don't actually need to run things to zero. We can just wait for the
+ // outer transaction to close itself.
+ GlodaDatastore._commitTransaction();
+ if (savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+ await promise;
+}
+
+/**
+ * An injected fault exception.
+ */
+function InjectedFault(aWhy) {
+ this.message = aWhy;
+}
+InjectedFault.prototype = {
+ toString() {
+ return "[InjectedFault: " + this.message + "]";
+ },
+};
+
+function _inject_failure_on_MsgHdrToMimeMessage() {
+ throw new InjectedFault("MsgHdrToMimeMessage");
+}
+
+let hangResolve;
+let hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+});
+
+function _simulate_hang_on_MsgHdrToMimeMessage(...aArgs) {
+ hangResolve([MsgHdrToMimeMessage, null, aArgs]);
+}
+
+/**
+ * If you have configured gloda to hang while indexing, this is the thing
+ * you wait on to make sure the indexer actually gets to the point where it
+ * hangs.
+ */
+async function waitForIndexingHang() {
+ await hangPromise;
+}
+
+/**
+ * Configure gloda indexing. For most settings, the settings get clobbered by
+ * the next time this method is called. Omitted settings reset to the defaults.
+ * However, anything labeled as a 'sticky' setting stays that way until
+ * explicitly changed.
+ *
+ * @param {boolean} [aArgs.event=true] Should event-driven indexing be enabled
+ * (true) or disabled (false)? Right now, this actually suppresses
+ * indexing... the semantics will be ironed out as-needed.
+ * @param [aArgs.hangWhile] Must be either omitted (for don't force a hang) or
+ * "streaming" indicating that we should do a no-op instead of performing
+ * the message streaming. This will manifest as a hang until
+ * |resumeFromSimulatedHang| is invoked or the test explicitly causes the
+ * indexer to abort (in which case you do not need to call the resume
+ * function.) You must omit injectFaultIn if you use hangWhile.
+ * @param [aArgs.injectFaultIn=null] Must be omitted (for don't inject a
+ * failure) or "streaming" indicating that we should inject a failure when
+ * the message indexer attempts to stream a message. The fault will be an
+ * appropriate exception. You must omit hangWhile if you use injectFaultIn.
+ */
+function configureGlodaIndexing(aArgs) {
+ let shouldSuppress = "event" in aArgs ? !aArgs.event : false;
+ if (shouldSuppress != GlodaIndexer.suppressIndexing) {
+ log.debug(`Setting suppress indexing to ${shouldSuppress}.`);
+ GlodaIndexer.suppressIndexing = shouldSuppress;
+ }
+
+ if ("hangWhile" in aArgs) {
+ log.debug(`Enabling hang injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.hangWhile) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _simulate_hang_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.hangWhile + " is not a legal choice for hangWhile"
+ );
+ }
+ } else if ("injectFaultIn" in aArgs) {
+ log.debug(`Enabling fault injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.injectFaultIn) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _inject_failure_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.injectFaultIn + " is not a legal choice for injectFaultIn"
+ );
+ }
+ } else {
+ if (GlodaMsgIndexer._MsgHdrToMimeMessageFunc != MsgHdrToMimeMessage) {
+ log.debug("Clearing hang/fault injection.");
+ }
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc = MsgHdrToMimeMessage;
+ }
+}
+
+/**
+ * Call this to resume from the hang induced by configuring the indexer with
+ * a "hangWhile" argument to |configureGlodaIndexing|.
+ *
+ * @param [aJustResumeExecution=false] Should we just poke the callback driver
+ * for the indexer rather than continuing the call. You would likely want
+ * to do this if you committed a lot of violence while in the simulated
+ * hang and proper resumption would throw exceptions all over the place.
+ * (For example; if you hang before streaming and destroy the message
+ * header while suspended, resuming the attempt to stream will throw.)
+ */
+async function resumeFromSimulatedHang(aJustResumeExecution) {
+ if (aJustResumeExecution) {
+ log.debug("Resuming from simulated hang with direct wrapper callback.");
+ GlodaIndexer._wrapCallbackDriver();
+ } else {
+ let [func, dis, args] = await hangPromise;
+ log.debug(`Resuming from simulated hang with call to: ${func.name}.`);
+ func.apply(dis, args);
+ }
+ // Reset the promise for the hang.
+ hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+ });
+}
+
+/**
+ * Prepares permutations for messages with aScenarioMaker. Be sure to wait for the indexer
+ * for every permutation and verify the result.
+ *
+ * This process is executed once for each possible permutation of observation
+ * of the synthetic messages. (Well, we cap it; brute-force test your logic
+ * on your own time; you should really only be feeding us minimal scenarios.)
+ *
+ * @param aScenarioMaker A function that, when called, will generate a series
+ * of SyntheticMessage instances. Each call to this method should generate
+ * a new set of conceptually equivalent, but not identical, messages. This
+ * allows us to process without having to reset our state back to nothing each
+ * time. (This is more to try and make sure we run the system with a 'dirty'
+ * state than a bid for efficiency.)
+ * @param {MessageInjection} messageInjection An instance to use for permuting
+ * the messages and creating folders.
+ *
+ * @returns {[async () => SyntheticMessageSet]} Await it sequentially with a for...of loop.
+ * Wait for each element for the Indexer and assert afterwards.
+ */
+async function permuteMessages(aScenarioMaker, messageInjection) {
+ let folder = await messageInjection.makeEmptyFolder();
+
+ // To calculate the permutations, we need to actually see what gets produced.
+ let scenarioMessages = aScenarioMaker();
+ let numPermutations = Math.min(factorial(scenarioMessages.length), 32);
+
+ let permutations = [];
+ for (let iPermutation = 0; iPermutation < numPermutations; iPermutation++) {
+ permutations.push(async () => {
+ log.debug(`Run permutation: ${iPermutation + 1} / ${numPermutations}`);
+ // If this is not the first time through, we need to create a new set.
+ if (iPermutation) {
+ scenarioMessages = aScenarioMaker();
+ }
+ scenarioMessages = permute(scenarioMessages, iPermutation);
+ let scenarioSet = new SyntheticMessageSet(scenarioMessages);
+ await messageInjection.addSetsToFolders([folder], [scenarioSet]);
+ return scenarioSet;
+ });
+ }
+ return permutations;
+}
+
+/**
+ * A simple factorial function used to calculate the number of permutations
+ * possible for a given set of messages.
+ */
+function factorial(i, rv) {
+ if (i <= 1) {
+ return rv || 1;
+ }
+ return factorial(i - 1, (rv || 1) * i); // tail-call capable
+}
+
+/**
+ * Permute an array given a 'permutation id' that is an integer that fully
+ * characterizes the permutation through the decisions that need to be made
+ * at each step.
+ *
+ * @param aArray Source array that is destructively processed.
+ * @param aPermutationId The permutation id. A permutation id of 0 results in
+ * the original array's sequence being maintained.
+ */
+function permute(aArray, aPermutationId) {
+ let out = [];
+ for (let i = aArray.length; i > 0; i--) {
+ let offset = aPermutationId % i;
+ out.push(aArray[offset]);
+ aArray.splice(offset, 1);
+ aPermutationId = Math.floor(aPermutationId / i);
+ }
+ return out;
+}
+
+/**
+ * Add a name-and-address pair as generated by `makeNameAndAddress` to the
+ * personal address book.
+ */
+function makeABCardForAddressPair(nameAndAddress) {
+ // XXX bug 314448 demands that we trigger creation of the ABs... If we don't
+ // do this, then the call to addCard will fail if someone else hasn't tickled
+ // this.
+ MailServices.ab.directories;
+
+ // kPABData is copied from abSetup.js
+ let kPABData = {
+ URI: "jsaddrbook://abook.sqlite",
+ };
+ let addressBook = MailServices.ab.getDirectory(kPABData.URI);
+
+ let card = Cc["@mozilla.org/addressbook/cardproperty;1"].createInstance(
+ Ci.nsIAbCard
+ );
+ card.displayName = nameAndAddress[0];
+ card.primaryEmail = nameAndAddress[1];
+
+ // Just save the new node straight away.
+ addressBook.addCard(card);
+
+ log.debug(`Adding address book card for: ${nameAndAddress}`);
+}