summaryrefslogtreecommitdiffstats
path: root/comm/mailnews/db/gloda
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--comm/mailnews/db/gloda/.project11
-rw-r--r--comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm576
-rw-r--r--comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm501
-rw-r--r--comm/mailnews/db/gloda/components/components.conf25
-rw-r--r--comm/mailnews/db/gloda/components/moz.build13
-rw-r--r--comm/mailnews/db/gloda/content/autocomplete-richlistitem.js644
-rw-r--r--comm/mailnews/db/gloda/content/glodacomplete.js466
-rw-r--r--comm/mailnews/db/gloda/jar.mn8
-rw-r--r--comm/mailnews/db/gloda/modules/Collection.jsm834
-rw-r--r--comm/mailnews/db/gloda/modules/Everybody.jsm23
-rw-r--r--comm/mailnews/db/gloda/modules/Facet.jsm599
-rw-r--r--comm/mailnews/db/gloda/modules/Gloda.jsm2275
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaConstants.jsm250
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaContent.jsm285
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDataModel.jsm1020
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDatabind.jsm210
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDatastore.jsm4402
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm188
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm947
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaIndexer.jsm1491
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm310
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm361
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaPublic.jsm45
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm642
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm175
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaUtils.jsm84
-rw-r--r--comm/mailnews/db/gloda/modules/IndexMsg.jsm3464
-rw-r--r--comm/mailnews/db/gloda/modules/MimeMessage.jsm821
-rw-r--r--comm/mailnews/db/gloda/modules/NounFreetag.jsm91
-rw-r--r--comm/mailnews/db/gloda/modules/NounMimetype.jsm582
-rw-r--r--comm/mailnews/db/gloda/modules/NounTag.jsm97
-rw-r--r--comm/mailnews/db/gloda/modules/SuffixTree.jsm381
-rw-r--r--comm/mailnews/db/gloda/modules/moz.build31
-rw-r--r--comm/mailnews/db/gloda/moz.build13
-rw-r--r--comm/mailnews/db/gloda/test/moz.build12
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_gloda_content.js226
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_junk.js217
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_messages.js1461
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_query_messages.js729
-rw-r--r--comm/mailnews/db/gloda/test/unit/head_gloda.js19
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm431
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm847
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm293
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_corrupt_database.js86
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_folder_logic.js60
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js299
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js34
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js31
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_addressbook.js139
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js210
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_compaction.js395
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js49
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js42
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_local.js133
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js265
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_intl.js355
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_migration.js151
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js445
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_emitter.js746
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_msg_search.js155
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js144
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration.js62
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js12
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_core.js658
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js37
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js40
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js894
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_startup_offline.js53
-rw-r--r--comm/mailnews/db/gloda/test/unit/xpcshell.ini38
75 files changed, 31847 insertions, 0 deletions
diff --git a/comm/mailnews/db/gloda/.project b/comm/mailnews/db/gloda/.project
new file mode 100644
index 0000000000..08f9557936
--- /dev/null
+++ b/comm/mailnews/db/gloda/.project
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>gloda</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ </buildSpec>
+ <natures>
+ </natures>
+</projectDescription>
diff --git a/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm b/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm
new file mode 100644
index 0000000000..98f67eadda
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm
@@ -0,0 +1,576 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * glautocomp.js decides which autocomplete item type to
+ * use when one enters text in global search box. There are
+ * following types of autocomplete item: gloda-contact-chunk-richlistitem,
+ * gloda-fulltext-all-richlistitem, gloda-fulltext-single-richlistitem, gloda-multi-richlistitem,
+ * gloda-single-identity-richlistitem, gloda-single-tag-richlistitem.
+ */
+
+var EXPORTED_SYMBOLS = ["GlodaAutoComplete"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var Gloda = null;
+var MultiSuffixTree = null;
+var TagNoun = null;
+var FreeTagNoun = null;
+
+function ResultRowFullText(aItem, words, typeForStyle) {
+ this.item = aItem;
+ this.words = words;
+ this.typeForStyle = "gloda-fulltext-" + typeForStyle + "-richlistitem";
+}
+ResultRowFullText.prototype = {
+ multi: false,
+ fullText: true,
+};
+
+function ResultRowSingle(aItem, aCriteriaType, aCriteria, aExplicitNounID) {
+ this.nounID = aExplicitNounID || aItem.NOUN_ID;
+ this.nounDef = Gloda._nounIDToDef[this.nounID];
+ this.criteriaType = aCriteriaType;
+ this.criteria = aCriteria;
+ this.item = aItem;
+ this.typeForStyle = "gloda-single-" + this.nounDef.name + "-richlistitem";
+}
+ResultRowSingle.prototype = {
+ multi: false,
+ fullText: false,
+};
+
+function ResultRowMulti(aNounID, aCriteriaType, aCriteria, aQuery) {
+ this.nounID = aNounID;
+ this.nounDef = Gloda._nounIDToDef[aNounID];
+ this.criteriaType = aCriteriaType;
+ this.criteria = aCriteria;
+ this.collection = aQuery.getCollection(this);
+ this.collection.becomeExplicit();
+ this.renderer = null;
+}
+ResultRowMulti.prototype = {
+ multi: true,
+ typeForStyle: "gloda-multi-richlistitem",
+ fullText: false,
+ onItemsAdded(aItems) {
+ if (this.renderer) {
+ for (let [, item] of aItems.entries()) {
+ this.renderer.renderItem(item);
+ }
+ }
+ },
+ onItemsModified(aItems) {},
+ onItemsRemoved(aItems) {},
+ onQueryCompleted() {},
+};
+
+function nsAutoCompleteGlodaResult(aListener, aCompleter, aString) {
+ this.listener = aListener;
+ this.completer = aCompleter;
+ this.searchString = aString;
+ this._results = [];
+ this._pendingCount = 0;
+ this._problem = false;
+ // Track whether we have reported anything to the complete controller so
+ // that we know not to send notifications to it during calls to addRows
+ // prior to that point.
+ this._initiallyReported = false;
+
+ this.wrappedJSObject = this;
+}
+nsAutoCompleteGlodaResult.prototype = {
+ getObjectAt(aIndex) {
+ return this._results[aIndex] || null;
+ },
+ markPending(aCompleter) {
+ this._pendingCount++;
+ },
+ markCompleted(aCompleter) {
+ if (--this._pendingCount == 0 && this.active) {
+ this.listener.onSearchResult(this.completer, this);
+ }
+ },
+ announceYourself() {
+ this._initiallyReported = true;
+ this.listener.onSearchResult(this.completer, this);
+ },
+ addRows(aRows) {
+ if (!aRows.length) {
+ return;
+ }
+ this._results.push.apply(this._results, aRows);
+ if (this._initiallyReported && this.active) {
+ this.listener.onSearchResult(this.completer, this);
+ }
+ },
+ // ==== nsIAutoCompleteResult
+ searchString: null,
+ get searchResult() {
+ if (this._problem) {
+ return Ci.nsIAutoCompleteResult.RESULT_FAILURE;
+ }
+ if (this._results.length) {
+ return !this._pendingCount
+ ? Ci.nsIAutoCompleteResult.RESULT_SUCCESS
+ : Ci.nsIAutoCompleteResult.RESULT_SUCCESS_ONGOING;
+ }
+ return !this._pendingCount
+ ? Ci.nsIAutoCompleteResult.RESULT_NOMATCH
+ : Ci.nsIAutoCompleteResult.RESULT_NOMATCH_ONGOING;
+ },
+ active: false,
+ defaultIndex: -1,
+ errorDescription: null,
+ get matchCount() {
+ return this._results === null ? 0 : this._results.length;
+ },
+ // this is the lower text, (shows the url in firefox)
+ // we try and show the contact's name here.
+ getValueAt(aIndex) {
+ let thing = this._results[aIndex];
+ return thing.name || thing.value || thing.subject || null;
+ },
+ getLabelAt(aIndex) {
+ return this.getValueAt(aIndex);
+ },
+ // rich uses this to be the "title". it is the upper text
+ // we try and show the identity here.
+ getCommentAt(aIndex) {
+ let thing = this._results[aIndex];
+ if (thing.value) {
+ // identity
+ return thing.contact.name;
+ }
+ return thing.name || thing.subject;
+ },
+ // rich uses this to be the "type"
+ getStyleAt(aIndex) {
+ let row = this._results[aIndex];
+ return row.typeForStyle;
+ },
+ // rich uses this to be the icon
+ getImageAt(aIndex) {
+ let thing = this._results[aIndex];
+ if (!thing.value) {
+ return null;
+ }
+
+ return ""; // we don't want to use gravatars as is.
+ /*
+ let md5hash = GlodaUtils.md5HashString(thing.value);
+ let gravURL = "http://www.gravatar.com/avatar/" + md5hash +
+ "?d=identicon&s=32&r=g";
+ return gravURL;
+ */
+ },
+ getFinalCompleteValueAt(aIndex) {
+ return this.getValueAt(aIndex);
+ },
+ removeValueAt() {},
+ _stop() {},
+};
+
+var MAX_POPULAR_CONTACTS = 200;
+
+/**
+ * Complete contacts/identities based on name/email. Instant phase is based on
+ * a suffix-tree built of popular contacts/identities. Delayed phase relies
+ * on a LIKE search of all known contacts.
+ */
+function ContactIdentityCompleter() {
+ // get all the contacts
+ let contactQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ contactQuery.orderBy("-popularity").limit(MAX_POPULAR_CONTACTS);
+ this.contactCollection = contactQuery.getCollection(this, null);
+ this.contactCollection.becomeExplicit();
+}
+ContactIdentityCompleter.prototype = {
+ _popularitySorter(a, b) {
+ return b.popularity - a.popularity;
+ },
+ complete(aResult, aString) {
+ if (aString.length < 3) {
+ // In CJK, first name or last name is sometime used as 1 character only.
+ // So we allow autocompleted search even if 1 character.
+ //
+ // [U+3041 - U+9FFF ... Full-width Katakana, Hiragana
+ // and CJK Ideograph
+ // [U+AC00 - U+D7FF ... Hangul
+ // [U+F900 - U+FFDC ... CJK compatibility ideograph
+ if (!aString.match(/[\u3041-\u9fff\uac00-\ud7ff\uf900-\uffdc]/)) {
+ return false;
+ }
+ }
+
+ let matches;
+ if (this.suffixTree) {
+ matches = this.suffixTree.findMatches(aString.toLowerCase());
+ } else {
+ matches = [];
+ }
+
+ // let's filter out duplicates due to identity/contact double-hits by
+ // establishing a map based on the contact id for these guys.
+ // let's also favor identities as we do it, because that gets us the
+ // most accurate gravat, potentially
+ let contactToThing = {};
+ for (let iMatch = 0; iMatch < matches.length; iMatch++) {
+ let thing = matches[iMatch];
+ if (
+ thing.NOUN_ID == GlodaConstants.NOUN_CONTACT &&
+ !(thing.id in contactToThing)
+ ) {
+ contactToThing[thing.id] = thing;
+ } else if (thing.NOUN_ID == GlodaConstants.NOUN_IDENTITY) {
+ contactToThing[thing.contactID] = thing;
+ }
+ }
+ // and since we can now map from contacts down to identities, map contacts
+ // to the first identity for them that we find...
+ matches = Object.keys(contactToThing)
+ .map(id => contactToThing[id])
+ .map(val =>
+ val.NOUN_ID == GlodaConstants.NOUN_IDENTITY ? val : val.identities[0]
+ );
+
+ let rows = matches.map(
+ match => new ResultRowSingle(match, "text", aResult.searchString)
+ );
+ aResult.addRows(rows);
+
+ // - match against database contacts / identities
+ let pending = { contactToThing, pendingCount: 2 };
+
+ let contactQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ contactQuery.nameLike(
+ contactQuery.WILDCARD,
+ aString,
+ contactQuery.WILDCARD
+ );
+ pending.contactColl = contactQuery.getCollection(this, aResult);
+ pending.contactColl.becomeExplicit();
+
+ let identityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ identityQuery
+ .kind("email")
+ .valueLike(identityQuery.WILDCARD, aString, identityQuery.WILDCARD);
+ pending.identityColl = identityQuery.getCollection(this, aResult);
+ pending.identityColl.becomeExplicit();
+
+ aResult._contactCompleterPending = pending;
+
+ return true;
+ },
+ onItemsAdded(aItems, aCollection) {},
+ onItemsModified(aItems, aCollection) {},
+ onItemsRemoved(aItems, aCollection) {},
+ onQueryCompleted(aCollection) {
+ // handle the initial setup case...
+ if (aCollection.data == null) {
+ // cheat and explicitly add our own contact...
+ if (
+ Gloda.myContact &&
+ !(Gloda.myContact.id in this.contactCollection._idMap)
+ ) {
+ this.contactCollection._onItemsAdded([Gloda.myContact]);
+ }
+
+ // the set of identities owned by the contacts is automatically loaded as part
+ // of the contact loading...
+ // (but only if we actually have any contacts)
+ this.identityCollection =
+ this.contactCollection.subCollections[GlodaConstants.NOUN_IDENTITY];
+
+ let contactNames = this.contactCollection.items.map(
+ c => c.name.replace(" ", "").toLowerCase() || "x"
+ );
+ // if we had no contacts, we will have no identity collection!
+ let identityMails;
+ if (this.identityCollection) {
+ identityMails = this.identityCollection.items.map(i =>
+ i.value.toLowerCase()
+ );
+ }
+
+ // The suffix tree takes two parallel lists; the first contains strings
+ // while the second contains objects that correspond to those strings.
+ // In the degenerate case where identityCollection does not exist, it will
+ // be undefined. Calling concat with an argument of undefined simply
+ // duplicates the list we called concat on, and is thus harmless. Our
+ // use of && on identityCollection allows its undefined value to be
+ // passed through to concat. identityMails will likewise be undefined.
+ this.suffixTree = new MultiSuffixTree(
+ contactNames.concat(identityMails),
+ this.contactCollection.items.concat(
+ this.identityCollection && this.identityCollection.items
+ )
+ );
+
+ return;
+ }
+
+ // handle the completion case
+ let result = aCollection.data;
+ let pending = result._contactCompleterPending;
+
+ if (--pending.pendingCount == 0) {
+ let possibleDudes = [];
+
+ let contactToThing = pending.contactToThing;
+
+ let items;
+
+ // check identities first because they are better than contacts in terms
+ // of display
+ items = pending.identityColl.items;
+ for (let iIdentity = 0; iIdentity < items.length; iIdentity++) {
+ let identity = items[iIdentity];
+ if (!(identity.contactID in contactToThing)) {
+ contactToThing[identity.contactID] = identity;
+ possibleDudes.push(identity);
+ // augment the identity with its contact's popularity
+ identity.popularity = identity.contact.popularity;
+ }
+ }
+ items = pending.contactColl.items;
+ for (let iContact = 0; iContact < items.length; iContact++) {
+ let contact = items[iContact];
+ if (!(contact.id in contactToThing)) {
+ contactToThing[contact.id] = contact;
+ possibleDudes.push(contact.identities[0]);
+ }
+ }
+
+ // sort in order of descending popularity
+ possibleDudes.sort(this._popularitySorter);
+ let rows = possibleDudes.map(
+ dude => new ResultRowSingle(dude, "text", result.searchString)
+ );
+ result.addRows(rows);
+ result.markCompleted(this);
+
+ // the collections no longer care about the result, make it clear.
+ delete pending.identityColl.data;
+ delete pending.contactColl.data;
+ // the result object no longer needs us or our data
+ delete result._contactCompleterPending;
+ }
+ },
+};
+
+/**
+ * Complete tags that are used on contacts.
+ */
+function ContactTagCompleter() {
+ FreeTagNoun.populateKnownFreeTags();
+ this._buildSuffixTree();
+ FreeTagNoun.addListener(this);
+}
+ContactTagCompleter.prototype = {
+ _buildSuffixTree() {
+ let tagNames = [],
+ tags = [];
+ for (let [tagName, tag] of Object.entries(FreeTagNoun.knownFreeTags)) {
+ tagNames.push(tagName.toLowerCase());
+ tags.push(tag);
+ }
+ this._suffixTree = new MultiSuffixTree(tagNames, tags);
+ this._suffixTreeDirty = false;
+ },
+ onFreeTagAdded(aTag) {
+ this._suffixTreeDirty = true;
+ },
+ complete(aResult, aString) {
+ // now is not the best time to do this; have onFreeTagAdded use a timer.
+ if (this._suffixTreeDirty) {
+ this._buildSuffixTree();
+ }
+
+ if (aString.length < 2) {
+ // No async mechanism that will add new rows.
+ return false;
+ }
+
+ let tags = this._suffixTree.findMatches(aString.toLowerCase());
+ let rows = [];
+ for (let tag of tags) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ query.freeTags(tag);
+ let resRow = new ResultRowMulti(
+ GlodaConstants.NOUN_CONTACT,
+ "tag",
+ tag.name,
+ query
+ );
+ rows.push(resRow);
+ }
+ aResult.addRows(rows);
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+/**
+ * Complete tags that are used on messages
+ */
+function MessageTagCompleter() {
+ this._buildSuffixTree();
+}
+MessageTagCompleter.prototype = {
+ _buildSuffixTree() {
+ let tagNames = [],
+ tags = [];
+ let tagArray = TagNoun.getAllTags();
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ tagNames.push(tag.tag.toLowerCase());
+ tags.push(tag);
+ }
+ this._suffixTree = new MultiSuffixTree(tagNames, tags);
+ this._suffixTreeDirty = false;
+ },
+ complete(aResult, aString) {
+ if (aString.length < 2) {
+ return false;
+ }
+
+ let tags = this._suffixTree.findMatches(aString.toLowerCase());
+ let rows = [];
+ for (let tag of tags) {
+ let resRow = new ResultRowSingle(tag, "tag", tag.tag, TagNoun.id);
+ rows.push(resRow);
+ }
+ aResult.addRows(rows);
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+/**
+ * Complete with helpful hints about full-text search
+ */
+function FullTextCompleter() {}
+FullTextCompleter.prototype = {
+ complete(aResult, aSearchString) {
+ if (aSearchString.length < 4) {
+ return false;
+ }
+ // We use code very similar to that in GlodaMsgSearcher.jsm, except that we
+ // need to detect when we found phrases, as well as strip commas.
+ aSearchString = aSearchString.trim();
+ let terms = [];
+ let phraseFound = false;
+ while (aSearchString) {
+ let term = "";
+ if (aSearchString.startsWith('"')) {
+ let endIndex = aSearchString.indexOf(aSearchString[0], 1);
+ // eat the quote if it has no friend
+ if (endIndex == -1) {
+ aSearchString = aSearchString.substring(1);
+ continue;
+ }
+ phraseFound = true;
+ term = aSearchString.substring(1, endIndex).trim();
+ if (term) {
+ terms.push(term);
+ }
+ aSearchString = aSearchString.substring(endIndex + 1);
+ continue;
+ }
+
+ let spaceIndex = aSearchString.indexOf(" ");
+ if (spaceIndex == -1) {
+ terms.push(aSearchString.replace(/,/g, ""));
+ break;
+ }
+
+ term = aSearchString.substring(0, spaceIndex).replace(/,/g, "");
+ if (term) {
+ terms.push(term);
+ }
+ aSearchString = aSearchString.substring(spaceIndex + 1);
+ }
+
+ if (terms.length == 1 && !phraseFound) {
+ aResult.addRows([new ResultRowFullText(aSearchString, terms, "single")]);
+ } else {
+ aResult.addRows([new ResultRowFullText(aSearchString, terms, "all")]);
+ }
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+function GlodaAutoComplete() {
+ this.wrappedJSObject = this;
+ try {
+ // set up our awesome globals!
+ if (Gloda === null) {
+ let loadNS = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+ );
+ Gloda = loadNS.Gloda;
+
+ loadNS = ChromeUtils.import("resource:///modules/gloda/GlodaUtils.jsm");
+ loadNS = ChromeUtils.import("resource:///modules/gloda/SuffixTree.jsm");
+ MultiSuffixTree = loadNS.MultiSuffixTree;
+ loadNS = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+ TagNoun = loadNS.TagNoun;
+ loadNS = ChromeUtils.import("resource:///modules/gloda/NounFreetag.jsm");
+ FreeTagNoun = loadNS.FreeTagNoun;
+ }
+
+ this.completers = [];
+ this.curResult = null;
+
+ this.completers.push(new FullTextCompleter()); // not async.
+ this.completers.push(new ContactIdentityCompleter()); // potentially async.
+ this.completers.push(new ContactTagCompleter()); // not async.
+ this.completers.push(new MessageTagCompleter()); // not async.
+ } catch (e) {
+ console.error(e);
+ }
+}
+
+GlodaAutoComplete.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIAutoCompleteSearch"]),
+
+ startSearch(aString, aParam, aResult, aListener) {
+ try {
+ let result = new nsAutoCompleteGlodaResult(aListener, this, aString);
+ // save this for hacky access to the search. I somewhat suspect we simply
+ // should not be using the formal autocomplete mechanism at all.
+ // Used in glodacomplete.xml.
+ this.curResult = result;
+
+ // Guard against late async results being sent.
+ this.curResult.active = true;
+
+ if (aParam == "global") {
+ for (let completer of this.completers) {
+ // they will return true if they have something pending.
+ if (completer.complete(result, aString)) {
+ result.markPending(completer);
+ }
+ }
+ // } else {
+ // It'd be nice to do autocomplete in the quicksearch modes based
+ // on the specific values for that mode in the current view.
+ // But we don't do that yet.
+ }
+
+ result.announceYourself();
+ } catch (e) {
+ console.error(e);
+ }
+ },
+
+ stopSearch() {
+ this.curResult.active = false;
+ },
+};
diff --git a/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm b/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm
new file mode 100644
index 0000000000..0ee1737f16
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm
@@ -0,0 +1,501 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var EXPORTED_SYMBOLS = ["MimeMessageEmitter"];
+
+var kStateUnknown = 0;
+var kStateInHeaders = 1;
+var kStateInBody = 2;
+var kStateInAttachment = 3;
+
+/**
+ * When the saneBodySize flag is active, limit body parts to at most this many
+ * bytes. See |MsgHdrToMimeMessage| for more information on the flag.
+ *
+ * The choice of 20k was made on the very scientific basis of running a query
+ * against my indexed e-mail and finding the point where these things taper
+ * off. I chose 20 because things had tapered off pretty firmly by 16, so
+ * 20 gave it some space and it was also the end of a mini-plateau.
+ */
+var MAX_SANE_BODY_PART_SIZE = 20 * 1024;
+
+/**
+ * Custom nsIMimeEmitter to build a sub-optimal javascript representation of a
+ * MIME message. The intent is that a better mechanism than is evolved to
+ * provide a javascript-accessible representation of the message.
+ *
+ * Processing occurs in two passes. During the first pass, libmime is parsing
+ * the stream it is receiving, and generating header and body events for all
+ * MimeMessage instances it encounters. This provides us with the knowledge
+ * of each nested message in addition to the top level message, their headers
+ * and sort-of their bodies. The sort-of is that we may get more than
+ * would normally be displayed in cases involving multipart/alternatives.
+ * We have augmented libmime to have a notify_nested_options parameter which
+ * is enabled when we are the consumer. This option causes MimeMultipart to
+ * always emit a content-type header (via addHeaderField), defaulting to
+ * text/plain when an explicit value is not present. Additionally,
+ * addHeaderField is called with a custom "x-jsemitter-part-path" header with
+ * the value being the part path (ex: 1.2.2). Having the part path greatly
+ * simplifies our life for building the part hierarchy.
+ * During the second pass, the libmime object model is traversed, generating
+ * attachment notifications for all leaf nodes. From our perspective, this
+ * means file attachments and embedded messages (message/rfc822). We use this
+ * pass to create the attachment objects proper, which we then substitute into
+ * the part tree we have already built.
+ */
+function MimeMessageEmitter() {
+ this._mimeMsg = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+ );
+ this._utils = ChromeUtils.import("resource:///modules/gloda/GlodaUtils.jsm");
+
+ this._url = null;
+ this._partRE = this._utils.GlodaUtils.PART_RE;
+
+ this._outputListener = null;
+
+ this._curPart = null;
+ this._curAttachment = null;
+ this._partMap = {};
+ this._bogusPartTranslation = {};
+
+ this._state = kStateUnknown;
+
+ this._writeBody = false;
+}
+
+var deathToNewlines = /\n/g;
+
+MimeMessageEmitter.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIMimeEmitter"]),
+
+ initialize(aUrl, aChannel, aFormat) {
+ this._url = aUrl;
+ this._curPart = new this._mimeMsg.MimeMessage();
+ // the partName is intentionally ""! not a place-holder!
+ this._curPart.partName = "";
+ this._curAttachment = "";
+ this._partMap[""] = this._curPart;
+
+ // pull options across...
+ let options = this._mimeMsg.MsgHdrToMimeMessage.OPTION_TUNNEL;
+ this._saneBodySize =
+ options && "saneBodySize" in options ? options.saneBodySize : false;
+
+ this._mimeMsg.MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aUrl.spec] =
+ this._curPart;
+ },
+
+ complete() {
+ this._url = null;
+
+ this._outputListener = null;
+
+ this._curPart = null;
+ this._curAttachment = null;
+ this._partMap = null;
+ this._bogusPartTranslation = null;
+ },
+
+ setPipe(aInputStream, aOutputStream) {
+ // we do not care about these
+ },
+ set outputListener(aListener) {
+ this._outputListener = aListener;
+ },
+ get outputListener() {
+ return this._outputListener;
+ },
+
+ _stripParams(aValue) {
+ let indexSemi = aValue.indexOf(";");
+ if (indexSemi >= 0) {
+ aValue = aValue.substring(0, indexSemi);
+ }
+ return aValue;
+ },
+
+ _beginPayload(aContentType) {
+ let contentTypeNoParams = this._stripParams(aContentType).toLowerCase();
+ if (
+ contentTypeNoParams == "text/plain" ||
+ contentTypeNoParams == "text/html" ||
+ contentTypeNoParams == "text/enriched"
+ ) {
+ this._curPart = new this._mimeMsg.MimeBody(contentTypeNoParams);
+ this._writeBody = true;
+ } else if (contentTypeNoParams == "message/rfc822") {
+ // startHeader will take care of this
+ this._curPart = new this._mimeMsg.MimeMessage();
+ // do not fall through into the content-type setting case; this
+ // content-type needs to get clobbered by the actual content-type of
+ // the enclosed message.
+ this._writeBody = false;
+ return;
+ } else if (contentTypeNoParams.startsWith("multipart/")) {
+ // this is going to fall-down with TNEF encapsulation and such, we really
+ // need to just be consuming the object model.
+ this._curPart = new this._mimeMsg.MimeContainer(contentTypeNoParams);
+ this._writeBody = false;
+ } else {
+ this._curPart = new this._mimeMsg.MimeUnknown(contentTypeNoParams);
+ this._writeBody = false;
+ }
+ // put the full content-type in the headers and normalize out any newlines
+ this._curPart.headers["content-type"] = [
+ aContentType.replace(deathToNewlines, ""),
+ ];
+ },
+
+ // ----- Header Routines
+ /**
+ * StartHeader provides the base case for our processing. It is the first
+ * notification we receive when processing begins on the outer rfc822
+ * message. We do not receive an x-jsemitter-part-path notification for the
+ * message, but the aIsRootMailHeader tells us everything we need to know.
+ * (Or it would if we hadn't already set everything up in initialize.)
+ *
+ * When dealing with nested RFC822 messages, we will receive the
+ * addHeaderFields for the content-type and the x-jsemitter-part-path
+ * prior to the startHeader call. This is because the MIME multipart
+ * container that holds the message is the one generating the notification.
+ * For that reason, we do not process them here, but instead in
+ * addHeaderField and _beginPayload.
+ *
+ * We do need to track our state for addHeaderField's benefit though.
+ */
+ startHeader(aIsRootMailHeader, aIsHeaderOnly, aMsgID, aOutputCharset) {
+ this._state = kStateInHeaders;
+ },
+ /**
+ * Receives a header field name and value for the current MIME part, which
+ * can be an rfc822/message or one of its sub-parts.
+ *
+ * The emitter architecture treats rfc822/messages as special because it was
+ * architected around presentation. In that case, the organizing concept
+ * is the single top-level rfc822/message. (It did not 'look into' nested
+ * messages in most cases.)
+ * As a result the interface is biased towards being 'in the headers' or
+ * 'in the body', corresponding to calls to startHeader and startBody,
+ * respectively.
+ * This information is interesting to us because the message itself is an
+ * odd pseudo-mime-part. Because it has only one child, its headers are,
+ * in a way, its payload, but they also serve as the description of its
+ * MIME child part. This introduces a complication in that we see the
+ * content-type for the message's "body" part before we actually see any
+ * of the headers. To deal with this, we punt on the construction of the
+ * body part to the call to startBody() and predicate our logic on the
+ * _state field.
+ */
+ addHeaderField(aField, aValue) {
+ if (this._state == kStateInBody) {
+ aField = aField.toLowerCase();
+ if (aField == "content-type") {
+ this._beginPayload(aValue, true);
+ } else if (aField == "x-jsemitter-part-path") {
+ // This is either naming the current part, or referring to an already
+ // existing part (in the case of multipart/related on its second pass).
+ // As such, check if the name already exists in our part map.
+ let partName = this._stripParams(aValue);
+ // if it does, then make the already-existing part at that path current
+ if (partName in this._partMap) {
+ this._curPart = this._partMap[partName];
+ this._writeBody = "body" in this._curPart;
+ } else {
+ // otherwise, name the part we are holding onto and place it.
+ this._curPart.partName = partName;
+ this._placePart(this._curPart);
+ }
+ } else if (aField == "x-jsemitter-encrypted" && aValue == "1") {
+ this._curPart.isEncrypted = true;
+ }
+ // There is no other field to be emitted in the body case other than the
+ // ones we just handled. (They were explicitly added for the js
+ // emitter.)
+ } else if (this._state == kStateInHeaders) {
+ let lowerField = aField.toLowerCase();
+ if (lowerField in this._curPart.headers) {
+ this._curPart.headers[lowerField].push(aValue);
+ } else {
+ this._curPart.headers[lowerField] = [aValue];
+ }
+ }
+ },
+ addAllHeaders(aAllHeaders, aHeaderSize) {
+ // This is called by the parsing code after the calls to AddHeaderField (or
+ // AddAttachmentField if the part is an attachment), and seems to serve
+ // a specialized, quasi-redundant purpose. (nsMimeBaseEmitter creates a
+ // nsIMimeHeaders instance and hands it to the nsIMsgMailNewsUrl.)
+ // nop
+ },
+ writeHTMLHeaders(aName) {
+ // It doesn't look like this should even be part of the interface; I think
+ // only the nsMimeHtmlDisplayEmitter::EndHeader call calls this signature.
+ // nop
+ },
+ endHeader(aName) {},
+ updateCharacterSet(aCharset) {
+ // we do not need to worry about this. it turns out this notification is
+ // exclusively for the benefit of the UI. libmime, believe it or not,
+ // is actually doing the right thing under the hood and handles all the
+ // encoding issues for us.
+ // so, get ready for the only time you will ever hear this:
+ // three cheers for libmime!
+ },
+
+ /**
+ * Place a part in its proper location; requires the parent to be present.
+ * However, we no longer require in-order addition of children. (This is
+ * currently a hedge against extension code doing wacky things. Our
+ * motivating use-case is multipart/related which actually does generate
+ * everything in order on its first pass, but has a wacky second pass. It
+ * does not actually trigger the out-of-order code because we have
+ * augmented the libmime code to generate its x-jsemitter-part-path info
+ * a second time, in which case we reuse the part we already created.)
+ *
+ * @param aPart Part to place.
+ */
+ _placePart(aPart) {
+ let partName = aPart.partName;
+ this._partMap[partName] = aPart;
+
+ let [storagePartName, , parentPart] = this._findOrCreateParent(partName);
+ let lastDotIndex = storagePartName.lastIndexOf(".");
+ if (parentPart !== undefined) {
+ let indexInParent =
+ parseInt(storagePartName.substring(lastDotIndex + 1)) - 1;
+ // handle out-of-order notification...
+ if (indexInParent < parentPart.parts.length) {
+ parentPart.parts[indexInParent] = aPart;
+ } else {
+ while (indexInParent > parentPart.parts.length) {
+ parentPart.parts.push(null);
+ }
+ parentPart.parts.push(aPart);
+ }
+ }
+ },
+
+ /**
+ * In case the MIME structure is wrong, (i.e. we have no parent to add the
+ * current part to), this function recursively makes sure we create the
+ * missing bits in the hierarchy.
+ * What happens in the case of encrypted emails (mimecryp.cpp):
+ * 1. is the message
+ * 1.1 doesn't exist
+ * 1.1.1 is the multipart/alternative that holds the text/plain and text/html
+ * 1.1.1.1 is text/plain
+ * 1.1.1.2 is text/html
+ * This function fills the missing bits.
+ */
+ _findOrCreateParent(aPartName) {
+ let partName = aPartName + "";
+ let parentName = partName.substring(0, partName.lastIndexOf("."));
+ let parentPart;
+ if (parentName in this._partMap) {
+ parentPart = this._partMap[parentName];
+ let lastDotIndex = partName.lastIndexOf(".");
+ let indexInParent = parseInt(partName.substring(lastDotIndex + 1)) - 1;
+ if (
+ "parts" in parentPart &&
+ indexInParent == parentPart.parts.length - 1
+ ) {
+ return [partName, parentName, parentPart];
+ }
+ return this._findAnotherContainer(aPartName);
+ }
+
+ // Find the grandparent
+ let [, , grandParentPart] = this._findOrCreateParent(parentName);
+ // Create the missing part.
+ parentPart = new this._mimeMsg.MimeContainer("multipart/fake-container");
+ // Add it to the grandparent, remember we added it in the hierarchy.
+ grandParentPart.parts.push(parentPart);
+ this._partMap[parentName] = parentPart;
+ return [partName, parentName, parentPart];
+ },
+
+ /**
+ * In the case of UUEncoded attachments, libmime tells us about the attachment
+ * as a child of a MimeBody. This obviously doesn't make us happy, so in case
+ * libmime wants us to attach an attachment to something that's not a
+ * container, we walk up the mime tree to find a suitable container to hold
+ * the attachment.
+ * The results are cached so that they're consistent across calls — this
+ * ensures the call to _replacePart works fine.
+ */
+ _findAnotherContainer(aPartName) {
+ if (aPartName in this._bogusPartTranslation) {
+ return this._bogusPartTranslation[aPartName];
+ }
+
+ let parentName = aPartName + "";
+ let parentPart;
+ while (!(parentPart && "parts" in parentPart) && parentName.length) {
+ parentName = parentName.substring(0, parentName.lastIndexOf("."));
+ parentPart = this._partMap[parentName];
+ }
+ let childIndex = parentPart.parts.length;
+ let fallbackPartName =
+ (parentName ? parentName + "." : "") + (childIndex + 1);
+ return (this._bogusPartTranslation[aPartName] = [
+ fallbackPartName,
+ parentName,
+ parentPart,
+ ]);
+ },
+
+ /**
+ * In the case of attachments, we need to replace an existing part with a
+ * more representative part...
+ *
+ * @param aPart Part to place.
+ */
+ _replacePart(aPart) {
+ // _partMap always maps the libmime names to parts
+ let partName = aPart.partName;
+ this._partMap[partName] = aPart;
+
+ let [storagePartName, , parentPart] = this._findOrCreateParent(partName);
+
+ let childNamePart = storagePartName.substring(
+ storagePartName.lastIndexOf(".") + 1
+ );
+ let childIndex = parseInt(childNamePart) - 1;
+
+ // The attachment has been encapsulated properly in a MIME part (most of
+ // the cases). This does not hold for UUencoded-parts for instance (see
+ // test_mime_attachments_size.js for instance).
+ if (childIndex < parentPart.parts.length) {
+ let oldPart = parentPart.parts[childIndex];
+ parentPart.parts[childIndex] = aPart;
+ // copy over information from the original part
+ aPart.parts = oldPart.parts;
+ aPart.headers = oldPart.headers;
+ aPart.isEncrypted = oldPart.isEncrypted;
+ } else {
+ parentPart.parts[childIndex] = aPart;
+ }
+ },
+
+ // ----- Attachment Routines
+ // The attachment processing happens after the initial streaming phase (during
+ // which time we receive the messages, both bodies and headers). Our caller
+ // traverses the libmime child object hierarchy, emitting an attachment for
+ // each leaf object or sub-message.
+ startAttachment(aName, aContentType, aUrl, aIsExternalAttachment) {
+ this._state = kStateInAttachment;
+
+ // we need to strip our magic flags from the URL; this regexp matches all
+ // the specific flags that the jsmimeemitter understands (we abuse the URL
+ // parameters to pass information all the way to here)
+ aUrl = aUrl.replace(
+ /((header=filter|emitter=js|examineEncryptedParts=(true|false)))&?/g,
+ ""
+ );
+ // the url should contain a part= piece that tells us the part name, which
+ // we then use to figure out where to place that part if it's a real
+ // attachment.
+ let partMatch, partName;
+ if (aUrl.startsWith("http") || aUrl.startsWith("file")) {
+ // if we have a remote url, unlike non external mail part urls, it may also
+ // contain query strings starting with ?; PART_RE does not handle this.
+ partMatch = aUrl.match(/[?&]part=[^&]+$/);
+ partMatch = partMatch && partMatch[0];
+ partName = partMatch && partMatch.split("part=")[1];
+ } else {
+ partMatch = this._partRE.exec(aUrl);
+ partName = partMatch && partMatch[1];
+ }
+ this._curAttachment = partName;
+
+ if (aContentType == "message/rfc822") {
+ // we want to offer extension authors a way to see attachments as the
+ // message readers sees them, which means attaching an extra url property
+ // to the part that was already created before
+ if (partName) {
+ // we disguise this MimeMessage into something that can be used as a
+ // MimeAttachment so that it is transparent for the user code
+ this._partMap[partName].url = aUrl;
+ this._partMap[partName].isExternal = aIsExternalAttachment;
+ this._partMap[partName].name = aName;
+ }
+ } else if (partName) {
+ let part = new this._mimeMsg.MimeMessageAttachment(
+ partName,
+ aName,
+ aContentType,
+ aUrl,
+ aIsExternalAttachment
+ );
+ // replace the existing part with the attachment...
+ this._replacePart(part);
+ }
+ },
+ addAttachmentField(aField, aValue) {
+ // What gets passed in here is X-Mozilla-PartURL with a value that
+ // is completely identical to aUrl from the call to startAttachment.
+ // (it's the same variable they use in each case). As such, there is
+ // no reason to handle that here.
+ // However, we also pass information about the size of the attachment, and
+ // that we want to handle
+ if (
+ aField == "X-Mozilla-PartSize" &&
+ this._curAttachment in this._partMap
+ ) {
+ this._partMap[this._curAttachment].size = parseInt(aValue);
+ }
+ },
+ endAttachment() {
+ // don't need to do anything here, since we don't care about the headers.
+ },
+ endAllAttachments() {
+ // nop
+ },
+
+ // ----- Body Routines
+ /**
+ * We don't get an x-jsemitter-part-path for the message body, and we ignored
+ * our body part's content-type in addHeaderField, so this serves as our
+ * notice to set up the part (giving it a name).
+ */
+ startBody(aIsBodyOnly, aMsgID, aOutCharset) {
+ this._state = kStateInBody;
+
+ let subPartName =
+ this._curPart.partName == "" ? "1" : this._curPart.partName + ".1";
+ this._beginPayload(this._curPart.get("content-type", "text/plain"));
+ this._curPart.partName = subPartName;
+ this._placePart(this._curPart);
+ },
+
+ /**
+ * Write to the body. When saneBodySize is active, we stop adding if we are
+ * already at the limit for this body part.
+ */
+ writeBody(aBuf, aSize, aOutAmountWritten) {
+ if (
+ this._writeBody &&
+ (!this._saneBodySize || this._curPart.size < MAX_SANE_BODY_PART_SIZE)
+ ) {
+ this._curPart.appendBody(aBuf);
+ }
+ },
+
+ endBody() {},
+
+ // ----- Generic Write (confusing)
+ // (binary data writing...)
+ write(aBuf, aSize, aOutAmountWritten) {
+ // we don't actually ever get called because we don't have the attachment
+ // binary payloads pass through us, but we do the following just in case
+ // we did get called (otherwise the caller gets mad and throws exceptions).
+ aOutAmountWritten.value = aSize;
+ },
+
+ // (string writing)
+ utilityWrite(aBuf) {
+ this.write(aBuf, aBuf.length, {});
+ },
+};
diff --git a/comm/mailnews/db/gloda/components/components.conf b/comm/mailnews/db/gloda/components/components.conf
new file mode 100644
index 0000000000..52d2739bcd
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/components.conf
@@ -0,0 +1,25 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ "cid": "{3bbe4d77-3f70-4252-9500-bc00c26f476d}",
+ "contract_ids": ["@mozilla.org/autocomplete/search;1?name=gloda"],
+ "jsm": "resource:///modules/GlodaAutoComplete.jsm",
+ "constructor": "GlodaAutoComplete",
+ },
+ {
+ "cid": "{8cddbbbc-7ced-46b0-a936-8cddd1928c24}",
+ "contract_ids": [
+ "@mozilla.org/gloda/jsmimeemitter;1",
+ ],
+ "jsm": "resource:///modules/MimeMessageEmitter.jsm",
+ "constructor": "MimeMessageEmitter",
+ "categories": {
+ "mime-emitter": "@mozilla.org/messenger/mimeemitter;1?type=application/x-js-mime-message"
+ },
+ },
+]
diff --git a/comm/mailnews/db/gloda/components/moz.build b/comm/mailnews/db/gloda/components/moz.build
new file mode 100644
index 0000000000..c2f151a815
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/moz.build
@@ -0,0 +1,13 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXTRA_JS_MODULES += [
+ "GlodaAutoComplete.jsm",
+ "MimeMessageEmitter.jsm",
+]
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
diff --git a/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js b/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js
new file mode 100644
index 0000000000..916c6ef5d5
--- /dev/null
+++ b/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js
@@ -0,0 +1,644 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* global MozXULElement, MozElements */
+
+// Wrap in a block to prevent leaking to window scope.
+{
+ const gGlodaCompleteStrings = Services.strings.createBundle(
+ "chrome://messenger/locale/glodaComplete.properties"
+ );
+
+ /**
+ * The MozGlodacompleteBaseRichlistitem widget is the
+ * abstract base class for all the gloda autocomplete items.
+ *
+ * @abstract
+ * @augments {MozElements.MozRichlistitem}
+ */
+ class MozGlodacompleteBaseRichlistitem extends MozElements.MozRichlistitem {
+ connectedCallback() {
+ if (this.delayConnectedCallback()) {
+ return;
+ }
+ this._boundaryCutoff = null;
+ }
+
+ get boundaryCutoff() {
+ if (!this._boundaryCutoff) {
+ this._boundaryCutoff = Services.prefs.getIntPref(
+ "toolkit.autocomplete.richBoundaryCutoff"
+ );
+ }
+ return this._boundaryCutoff;
+ }
+
+ _getBoundaryIndices(aText, aSearchTokens) {
+ // Short circuit for empty search ([""] == "")
+ if (aSearchTokens == "") {
+ return [0, aText.length];
+ }
+
+ // Find which regions of text match the search terms.
+ let regions = [];
+ for (let search of aSearchTokens) {
+ let matchIndex;
+ let startIndex = 0;
+ let searchLen = search.length;
+
+ // Find all matches of the search terms, but stop early for perf.
+ let lowerText = aText.toLowerCase().substr(0, this.boundaryCutoff);
+ while ((matchIndex = lowerText.indexOf(search, startIndex)) >= 0) {
+ // Start the next search from where this one finished.
+ startIndex = matchIndex + searchLen;
+ regions.push([matchIndex, startIndex]);
+ }
+ }
+
+ // Sort the regions by start position then end position.
+ regions = regions.sort(function (a, b) {
+ let start = a[0] - b[0];
+ return start == 0 ? a[1] - b[1] : start;
+ });
+
+ // Generate the boundary indices from each region.
+ let start = 0;
+ let end = 0;
+ let boundaries = [];
+ for (let i = 0; i < regions.length; i++) {
+ // We have a new boundary if the start of the next is past the end.
+ let region = regions[i];
+ if (region[0] > end) {
+ // First index is the beginning of match.
+ boundaries.push(start);
+ // Second index is the beginning of non-match.
+ boundaries.push(end);
+
+ // Track the new region now that we've stored the previous one.
+ start = region[0];
+ }
+
+ // Push back the end index for the current or new region.
+ end = Math.max(end, region[1]);
+ }
+
+ // Add the last region.
+ boundaries.push(start);
+ boundaries.push(end);
+
+ // Put on the end boundary if necessary.
+ if (end < aText.length) {
+ boundaries.push(aText.length);
+ }
+
+ // Skip the first item because it's always 0.
+ return boundaries.slice(1);
+ }
+
+ _getSearchTokens(aSearch) {
+ let search = aSearch.toLowerCase();
+ return search.split(/\s+/);
+ }
+
+ _needsAlternateEmphasis(aText) {
+ for (let i = aText.length - 1; i >= 0; i--) {
+ let charCode = aText.charCodeAt(i);
+ // Arabic, Syriac, Indic languages are likely to have ligatures
+ // that are broken when using the main emphasis styling.
+ if (0x0600 <= charCode && charCode <= 0x109f) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ _setUpDescription(aDescriptionElement, aText) {
+ // Get rid of all previous text.
+ while (aDescriptionElement.hasChildNodes()) {
+ aDescriptionElement.lastChild.remove();
+ }
+
+ // Get the indices that separate match and non-match text.
+ let search = this.getAttribute("text");
+ let tokens = this._getSearchTokens(search);
+ let indices = this._getBoundaryIndices(aText, tokens);
+
+ // If we're searching for something that needs alternate emphasis,
+ // we'll need to check the text that we match.
+ let checkAlt = this._needsAlternateEmphasis(search);
+
+ let next;
+ let start = 0;
+ let len = indices.length;
+ // Even indexed boundaries are matches, so skip the 0th if it's empty.
+ for (let i = indices[0] == 0 ? 1 : 0; i < len; i++) {
+ next = indices[i];
+ let text = aText.substr(start, next - start);
+ start = next;
+
+ if (i % 2 == 0) {
+ // Emphasize the text for even indices
+ let span = aDescriptionElement.appendChild(
+ document.createElementNS("http://www.w3.org/1999/xhtml", "span")
+ );
+ span.className =
+ checkAlt && this._needsAlternateEmphasis(text)
+ ? "ac-emphasize-alt"
+ : "ac-emphasize-text";
+ span.textContent = text;
+ } else {
+ // Otherwise, it's plain text
+ aDescriptionElement.appendChild(document.createTextNode(text));
+ }
+ }
+ }
+
+ _setUpOverflow(aParentBox, aEllipsis) {
+ // Hide the ellipsis in case there's just enough to not underflow.
+ aEllipsis.hidden = true;
+
+ // Start with the parent's width and subtract off its children.
+ let tooltip = [];
+ let children = aParentBox.children;
+ let widthDiff = aParentBox.getBoundingClientRect().width;
+
+ for (let i = 0; i < children.length; i++) {
+ // Only consider a child if it actually takes up space.
+ let childWidth = children[i].getBoundingClientRect().width;
+ if (childWidth > 0) {
+ // Subtract a little less to account for subpixel rounding.
+ widthDiff -= childWidth - 0.5;
+
+ // Add to the tooltip if it's not hidden and has text.
+ let childText = children[i].textContent;
+ if (childText) {
+ tooltip.push(childText);
+ }
+ }
+ }
+
+ // If the children take up more space than the parent.. overflow!
+ if (widthDiff < 0) {
+ // Re-show the ellipsis now that we know it's needed.
+ aEllipsis.hidden = false;
+
+ // Separate text components with a ndash --
+ aParentBox.tooltipText = tooltip.join(" \u2013 ");
+ }
+ }
+
+ _doUnderflow(aName) {
+ // Hide the ellipsis right when we know we're underflowing instead of
+ // waiting for the timeout to trigger the _setUpOverflow calculations.
+ this[aName + "Box"].tooltipText = "";
+ this[aName + "OverflowEllipsis"].hidden = true;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodacompleteBaseRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ /**
+ * The MozGlodaContactChunkRichlistitem widget displays an autocomplete item with
+ * contact chunk: e.g. image, name and description of the contact.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaContactChunkRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ static get inheritedAttributes() {
+ return {
+ "description.ac-comment": "selected",
+ "label.ac-comment": "selected",
+ "description.ac-url-text": "selected",
+ "label.ac-url-text": "selected",
+ };
+ }
+
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-contact-chunk-richlistitem");
+ this.appendChild(
+ MozXULElement.parseXULToFragment(`
+ <vbox>
+ <hbox>
+ <hbox class="ac-title"
+ flex="1"
+ onunderflow="_doUnderflow('_name');">
+ <description class="ac-normal-text ac-comment"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-comment"
+ hidden="true"></label>
+ </hbox>
+ <hbox>
+ <hbox class="ac-url"
+ flex="1"
+ onunderflow="_doUnderflow('_identity');">
+ <description class="ac-normal-text ac-url-text"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-url-text"
+ hidden="true"></label>
+ </hbox>
+ </vbox>
+ `)
+ );
+
+ let ellipsis = "\u2026";
+ try {
+ ellipsis = Services.prefs.getComplexValue(
+ "intl.ellipsis",
+ Ci.nsIPrefLocalizedString
+ ).data;
+ } catch (ex) {
+ // Do nothing.. we already have a default.
+ }
+
+ this._identityOverflowEllipsis = this.querySelector("label.ac-url-text");
+ this._nameOverflowEllipsis = this.querySelector("label.ac-comment");
+
+ this._identityOverflowEllipsis.value = ellipsis;
+ this._nameOverflowEllipsis.value = ellipsis;
+
+ this._identityBox = this.querySelector(".ac-url");
+ this._identity = this.querySelector("description.ac-url-text");
+
+ this._nameBox = this.querySelector(".ac-title");
+ this._name = this.querySelector("description.ac-comment");
+
+ this._adjustAcItem();
+
+ this.initializeAttributeInheritance();
+ }
+
+ get label() {
+ let identity = this.obj;
+ return identity.accessibleLabel;
+ }
+
+ _adjustAcItem() {
+ let contact = this.obj;
+
+ if (contact == null) {
+ return;
+ }
+
+ let identity = contact.identities[0];
+
+ // Emphasize the matching search terms for the description.
+ this._setUpDescription(this._name, contact.name);
+ this._setUpDescription(this._identity, identity.value);
+
+ // Set up overflow on a timeout because the contents of the box
+ // might not have a width yet even though we just changed them.
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._nameBox,
+ this._nameOverflowEllipsis
+ );
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._identityBox,
+ this._identityOverflowEllipsis
+ );
+ }
+ }
+
+ customElements.define(
+ "gloda-contact-chunk-richlistitem",
+ MozGlodaContactChunkRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaFulltextAllRichlistitem widget displays an autocomplete full text of
+ * all the items: e.g. full text explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaFulltextAllRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-fulltext-all-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation");
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesMentioningMany.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.words.join(", "))
+ );
+ this.appendChild(this._explanation);
+ }
+
+ get label() {
+ return "full text search: " + this.row.item; // what is this for? l10n?
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaFulltextAllRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-fulltext-all-richlistitem",
+ MozGlodaFulltextAllRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaFulltextAllRichlistitem widget displays an autocomplete full text
+ * of single item: e.g. full text explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaFulltextSingleRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-fulltext-single-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation", "gloda-fulltext-single");
+ this._parameters = document.createXULElement("description");
+
+ this.appendChild(this._explanation);
+ this.appendChild(this._parameters);
+
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesMentioning.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.item)
+ );
+ }
+
+ get label() {
+ return "full text search: " + this.row.item;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaFulltextSingleRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-fulltext-single-richlistitem",
+ MozGlodaFulltextSingleRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaMultiRichlistitem widget displays an autocomplete description of multiple
+ * type items: e.g. explanation of the items.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaMultiRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-multi-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._identityHolder = document.createXULElement("hbox");
+ this._identityHolder.setAttribute("flex", "1");
+
+ this.appendChild(this._explanation);
+ this.appendChild(this._identityHolder);
+ this._adjustAcItem();
+ }
+
+ get label() {
+ return this._explanation.value;
+ }
+
+ renderItem(aObj) {
+ let node = document.createXULElement("richlistitem");
+
+ node.obj = aObj;
+ node.setAttribute(
+ "type",
+ "gloda-" + this.row.nounDef.name + "-chunk-richlistitem"
+ );
+
+ this._identityHolder.appendChild(node);
+ }
+
+ _adjustAcItem() {
+ // clear out any lingering children.
+ while (this._identityHolder.hasChildNodes()) {
+ this._identityHolder.lastChild.remove();
+ }
+
+ let row = this.row;
+ if (row == null) {
+ return;
+ }
+
+ this._explanation.value =
+ row.nounDef.name + "s " + row.criteriaType + "ed " + row.criteria;
+
+ // render anyone already in there.
+ for (let item of row.collection.items) {
+ this.renderItem(item);
+ }
+ // listen up, yo.
+ row.renderer = this;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaMultiRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define("gloda-multi-richlistitem", MozGlodaMultiRichlistitem, {
+ extends: "richlistitem",
+ });
+
+ /**
+ * The MozGlodaSingleIdentityRichlistitem widget displays an autocomplete item with
+ * single identity: e.g. image, name and description of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaSingleIdentityRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ static get inheritedAttributes() {
+ return {
+ "description.ac-comment": "selected",
+ "label.ac-comment": "selected",
+ "description.ac-url-text": "selected",
+ "label.ac-url-text": "selected",
+ };
+ }
+
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+
+ this.setAttribute("is", "gloda-single-identity-richlistitem");
+ this.appendChild(
+ MozXULElement.parseXULToFragment(`
+ <hbox class="gloda-single-identity">
+ <vbox>
+ <hbox>
+ <hbox class="ac-title"
+ flex="1"
+ onunderflow="_doUnderflow('_name');">
+ <description class="ac-normal-text ac-comment"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-comment"
+ hidden="true"></label>
+ </hbox>
+ <hbox>
+ <hbox class="ac-url"
+ flex="1"
+ onunderflow="_doUnderflow('_identity');">
+ <description class="ac-normal-text ac-url-text"
+ inherits="selected"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-url-text"
+ hidden="true"></label>
+ </hbox>
+ </vbox>
+ </hbox>
+ `)
+ );
+
+ let ellipsis = "\u2026";
+ try {
+ ellipsis = Services.prefs.getComplexValue(
+ "intl.ellipsis",
+ Ci.nsIPrefLocalizedString
+ ).data;
+ } catch (ex) {
+ // Do nothing.. we already have a default.
+ }
+
+ this._identityOverflowEllipsis = this.querySelector("label.ac-url-text");
+ this._nameOverflowEllipsis = this.querySelector("label.ac-comment");
+
+ this._identityOverflowEllipsis.value = ellipsis;
+ this._nameOverflowEllipsis.value = ellipsis;
+
+ this._identityBox = this.querySelector(".ac-url");
+ this._identity = this.querySelector("description.ac-url-text");
+
+ this._nameBox = this.querySelector(".ac-title");
+ this._name = this.querySelector("description.ac-comment");
+
+ this._adjustAcItem();
+
+ this.initializeAttributeInheritance();
+ }
+
+ get label() {
+ let identity = this.row.item;
+ return identity.accessibleLabel;
+ }
+
+ _adjustAcItem() {
+ let identity = this.row.item;
+
+ if (identity == null) {
+ return;
+ }
+
+ // Emphasize the matching search terms for the description.
+ this._setUpDescription(this._name, identity.contact.name);
+ this._setUpDescription(this._identity, identity.value);
+
+ // Set up overflow on a timeout because the contents of the box
+ // might not have a width yet even though we just changed them.
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._nameBox,
+ this._nameOverflowEllipsis
+ );
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._identityBox,
+ this._identityOverflowEllipsis
+ );
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaSingleIdentityRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-single-identity-richlistitem",
+ MozGlodaSingleIdentityRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaSingleTagRichlistitem widget displays an autocomplete item with
+ * single tag: e.g. explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaSingleTagRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-single-tag-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation", "gloda-single");
+ this.appendChild(this._explanation);
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesTagged.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.item.tag)
+ );
+ }
+
+ get label() {
+ return "tag " + this.row.item.tag;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaSingleTagRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-single-tag-richlistitem",
+ MozGlodaSingleTagRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+}
diff --git a/comm/mailnews/db/gloda/content/glodacomplete.js b/comm/mailnews/db/gloda/content/glodacomplete.js
new file mode 100644
index 0000000000..64578d4143
--- /dev/null
+++ b/comm/mailnews/db/gloda/content/glodacomplete.js
@@ -0,0 +1,466 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals MozElements, MozXULElement */
+
+"use strict";
+
+// Wrap in a block to prevent leaking to window scope.
+{
+ const MozPopupElement = MozElements.MozElementMixin(XULPopupElement);
+
+ /**
+ * The MozGlodacompleteRichResultPopup class creates the panel
+ * to append all the results for the gloda search autocomplete.
+ *
+ * @augments {MozPopupElement}
+ */
+ class MozGlodacompleteRichResultPopup extends MozPopupElement {
+ constructor() {
+ super();
+
+ this.addEventListener("popupshowing", event => {
+ // If normalMaxRows wasn't already set by the input, then set it here
+ // so that we restore the correct number when the popup is hidden.
+
+ // Null-check this.mInput; see bug 1017914
+ if (this._normalMaxRows < 0 && this.mInput) {
+ this._normalMaxRows = this.mInput.maxRows;
+ }
+
+ this.mPopupOpen = true;
+ });
+
+ this.addEventListener("popupshown", event => {
+ if (this._adjustHeightOnPopupShown) {
+ delete this._adjustHeightOnPopupShown;
+ this.adjustHeight();
+ }
+ });
+
+ this.addEventListener("popuphiding", event => {
+ let isListActive = true;
+ if (this.selectedIndex == -1) {
+ isListActive = false;
+ }
+ this.mInput.controller.stopSearch();
+ this.mPopupOpen = false;
+
+ // Reset the maxRows property to the cached "normal" value (if there's
+ // any), and reset normalMaxRows so that we can detect whether it was set
+ // by the input when the popupshowing handler runs.
+
+ // Null-check this.mInput; see bug 1017914
+ if (this.mInput && this._normalMaxRows > 0) {
+ this.mInput.maxRows = this._normalMaxRows;
+ }
+ this._normalMaxRows = -1;
+ // If the list was being navigated and then closed, make sure
+ // we fire accessible focus event back to textbox
+
+ // Null-check this.mInput; see bug 1017914
+ if (isListActive && this.mInput) {
+ this.mInput.mIgnoreFocus = true;
+ this.mInput._focus();
+ this.mInput.mIgnoreFocus = false;
+ }
+ });
+
+ this.attachShadow({ mode: "open" });
+
+ let slot = document.createElement("slot");
+ slot.part = "content";
+ this.shadowRoot.appendChild(slot);
+ }
+
+ connectedCallback() {
+ if (this.delayConnectedCallback()) {
+ return;
+ }
+ this.textContent = "";
+
+ this.mInput = null;
+
+ this.mPopupOpen = false;
+
+ this._currentIndex = 0;
+
+ /**
+ * This is the default number of rows that we give the autocomplete
+ * popup when the textbox doesn't have a "maxrows" attribute
+ * for us to use.
+ */
+ this.defaultMaxRows = 6;
+
+ /**
+ * In some cases (e.g. when the input's dropmarker button is clicked),
+ * the input wants to display a popup with more rows. In that case, it
+ * should increase its maxRows property and store the "normal" maxRows
+ * in this field. When the popup is hidden, we restore the input's
+ * maxRows to the value stored in this field.
+ *
+ * This field is set to -1 between uses so that we can tell when it's
+ * been set by the input and when we need to set it in the popupshowing
+ * handler.
+ */
+ this._normalMaxRows = -1;
+
+ this._previousSelectedIndex = -1;
+
+ this.mLastMoveTime = Date.now();
+
+ this.mousedOverIndex = -1;
+
+ this.richlistbox = document.createXULElement("richlistbox");
+ this.richlistbox.setAttribute("flex", "1");
+ this.richlistbox.classList.add("autocomplete-richlistbox");
+
+ this.appendChild(this.richlistbox);
+
+ if (!this.listEvents) {
+ this.listEvents = {
+ handleEvent: event => {
+ if (!this.parentNode) {
+ return;
+ }
+
+ switch (event.type) {
+ case "mouseup":
+ // Don't call onPopupClick for the scrollbar buttons, thumb,
+ // slider, etc. If we hit the richlistbox and not a
+ // richlistitem, we ignore the event.
+ if (
+ event.target.closest("richlistbox, richlistitem").localName ==
+ "richlistitem"
+ ) {
+ this.onPopupClick(event);
+ }
+ break;
+ case "mousemove":
+ if (Date.now() - this.mLastMoveTime <= 30) {
+ return;
+ }
+
+ let item = event.target.closest("richlistbox, richlistitem");
+
+ // If we hit the richlistbox and not a richlistitem, we ignore
+ // the event.
+ if (item.localName == "richlistbox") {
+ return;
+ }
+
+ let index = this.richlistbox.getIndexOfItem(item);
+
+ this.mousedOverIndex = index;
+
+ if (item.selectedByMouseOver) {
+ this.richlistbox.selectedIndex = index;
+ }
+
+ this.mLastMoveTime = Date.now();
+ break;
+ }
+ },
+ };
+ this.richlistbox.addEventListener("mouseup", this.listEvents);
+ this.richlistbox.addEventListener("mousemove", this.listEvents);
+ }
+ }
+
+ // nsIAutoCompletePopup
+ get input() {
+ return this.mInput;
+ }
+
+ get overrideValue() {
+ return null;
+ }
+
+ get popupOpen() {
+ return this.mPopupOpen;
+ }
+
+ get maxRows() {
+ return (this.mInput && this.mInput.maxRows) || this.defaultMaxRows;
+ }
+
+ set selectedIndex(val) {
+ if (val != this.richlistbox.selectedIndex) {
+ this._previousSelectedIndex = this.richlistbox.selectedIndex;
+ }
+ this.richlistbox.selectedIndex = val;
+ // Since ensureElementIsVisible may cause an expensive Layout flush,
+ // invoke it only if there may be a scrollbar, so if we could fetch
+ // more results than we can show at once.
+ // maxResults is the maximum number of fetched results, maxRows is the
+ // maximum number of rows we show at once, without a scrollbar.
+ if (this.mPopupOpen && this.maxResults > this.maxRows) {
+ // when clearing the selection (val == -1, so selectedItem will be
+ // null), we want to scroll back to the top. see bug #406194
+ this.richlistbox.ensureElementIsVisible(
+ this.richlistbox.selectedItem || this.richlistbox.firstElementChild
+ );
+ }
+ }
+
+ get selectedIndex() {
+ return this.richlistbox.selectedIndex;
+ }
+
+ get maxResults() {
+ // This is how many richlistitems will be kept around.
+ // Note, this getter may be overridden, or instances
+ // can have the nomaxresults attribute set to have no
+ // limit.
+ if (this.getAttribute("nomaxresults") == "true") {
+ return Infinity;
+ }
+
+ return 20;
+ }
+
+ get matchCount() {
+ return Math.min(this.mInput.controller.matchCount, this.maxResults);
+ }
+
+ get overflowPadding() {
+ return Number(this.getAttribute("overflowpadding"));
+ }
+
+ set view(val) {}
+
+ get view() {
+ return this.mInput.controller;
+ }
+
+ closePopup() {
+ if (this.mPopupOpen) {
+ this.hidePopup();
+ this.style.removeProperty("--panel-width");
+ }
+ }
+
+ getNextIndex(aReverse, aAmount, aIndex, aMaxRow) {
+ if (aMaxRow < 0) {
+ return -1;
+ }
+
+ let newIdx = aIndex + (aReverse ? -1 : 1) * aAmount;
+ if (
+ (aReverse && aIndex == -1) ||
+ (newIdx > aMaxRow && aIndex != aMaxRow)
+ ) {
+ newIdx = aMaxRow;
+ } else if ((!aReverse && aIndex == -1) || (newIdx < 0 && aIndex != 0)) {
+ newIdx = 0;
+ }
+
+ if (
+ (newIdx < 0 && aIndex == 0) ||
+ (newIdx > aMaxRow && aIndex == aMaxRow)
+ ) {
+ aIndex = -1;
+ } else {
+ aIndex = newIdx;
+ }
+
+ return aIndex;
+ }
+
+ onPopupClick(aEvent) {
+ this.input.controller.handleEnter(true, aEvent);
+ }
+
+ onSearchBegin() {
+ this.mousedOverIndex = -1;
+
+ if (typeof this._onSearchBegin == "function") {
+ this._onSearchBegin();
+ }
+ }
+
+ openAutocompletePopup(aInput, aElement) {
+ // until we have "baseBinding", (see bug #373652) this allows
+ // us to override openAutocompletePopup(), but still call
+ // the method on the base class
+ this._openAutocompletePopup(aInput, aElement);
+ }
+
+ _openAutocompletePopup(aInput, aElement) {
+ if (!this.mPopupOpen) {
+ // It's possible that the panel is hidden initially
+ // to avoid impacting startup / new window performance
+ aInput.popup.hidden = false;
+
+ this.mInput = aInput;
+ // clear any previous selection, see bugs 400671 and 488357
+ this.selectedIndex = -1;
+
+ let width = aElement.getBoundingClientRect().width;
+ this.style.setProperty(
+ "--panel-width",
+ (width > 100 ? width : 100) + "px"
+ );
+ // invalidate() depends on the width attribute
+ this._invalidate();
+
+ this.openPopup(aElement, "after_start", 0, 0, false, false);
+ }
+ }
+
+ invalidate(reason) {
+ // Don't bother doing work if we're not even showing
+ if (!this.mPopupOpen) {
+ return;
+ }
+
+ this._invalidate(reason);
+ }
+
+ _invalidate(reason) {
+ setTimeout(() => this.adjustHeight(), 0);
+
+ // remove all child nodes because we never want to reuse them.
+ while (this.richlistbox.hasChildNodes()) {
+ this.richlistbox.lastChild.remove();
+ }
+
+ this._currentIndex = 0;
+ this._appendCurrentResult();
+ }
+
+ _collapseUnusedItems() {
+ let existingItemsCount = this.richlistbox.children.length;
+ for (let i = this.matchCount; i < existingItemsCount; ++i) {
+ let item = this.richlistbox.children[i];
+
+ item.collapsed = true;
+ if (typeof item._onCollapse == "function") {
+ item._onCollapse();
+ }
+ }
+ }
+
+ adjustHeight() {
+ // Figure out how many rows to show
+ let rows = this.richlistbox.children;
+ let numRows = Math.min(this.matchCount, this.maxRows, rows.length);
+
+ // Default the height to 0 if we have no rows to show
+ let height = 0;
+ if (numRows) {
+ let firstRowRect = rows[0].getBoundingClientRect();
+ if (this._rlbPadding == undefined) {
+ let style = window.getComputedStyle(this.richlistbox);
+ let paddingTop = parseInt(style.paddingTop) || 0;
+ let paddingBottom = parseInt(style.paddingBottom) || 0;
+ this._rlbPadding = paddingTop + paddingBottom;
+ }
+
+ // The class `forceHandleUnderflow` is for the item might need to
+ // handle OverUnderflow or Overflow when the height of an item will
+ // be changed dynamically.
+ for (let i = 0; i < numRows; i++) {
+ if (rows[i].classList.contains("forceHandleUnderflow")) {
+ rows[i].handleOverUnderflow();
+ }
+ }
+
+ let lastRowRect = rows[numRows - 1].getBoundingClientRect();
+ // Calculate the height to have the first row to last row shown
+ height = lastRowRect.bottom - firstRowRect.top + this._rlbPadding;
+ }
+
+ let currentHeight = this.richlistbox.getBoundingClientRect().height;
+ if (height <= currentHeight) {
+ this._collapseUnusedItems();
+ }
+ this.richlistbox.style.removeProperty("height");
+ // We need to get the ceiling of the calculated value to ensure that the box fully contains
+ // all of its contents and doesn't cause a scrollbar since nsIBoxObject only expects a
+ // `long`. e.g. if `height` is 99.5 the richlistbox would render at height 99px with a
+ // scrollbar for the extra 0.5px.
+ this.richlistbox.height = Math.ceil(height);
+ }
+
+ _appendCurrentResult() {
+ let controller = this.mInput.controller;
+ let glodaCompleter = Cc[
+ "@mozilla.org/autocomplete/search;1?name=gloda"
+ ].getService(Ci.nsIAutoCompleteSearch).wrappedJSObject;
+
+ // Process maxRows per chunk to improve performance and user experience
+ for (let i = 0; i < this.maxRows; i++) {
+ if (this._currentIndex >= this.matchCount) {
+ return;
+ }
+
+ let item;
+
+ // trim the leading/trailing whitespace
+ let trimmedSearchString = controller.searchString.trim();
+ let result = glodaCompleter.curResult;
+
+ item = document.createXULElement("richlistitem", {
+ is: result.getStyleAt(this._currentIndex),
+ });
+
+ // set these attributes before we set the class
+ // so that we can use them from the constructor
+ let row = result.getObjectAt(this._currentIndex);
+ item.setAttribute("text", trimmedSearchString);
+ item.setAttribute("type", result.getStyleAt(this._currentIndex));
+
+ item.row = row;
+
+ // set the class at the end so we can use the attributes
+ // in the xbl constructor
+ item.className = "autocomplete-richlistitem";
+ this.richlistbox.appendChild(item);
+ this._currentIndex++;
+ }
+
+ // yield after each batch of items so that typing the url bar is responsive
+ setTimeout(() => this._appendCurrentResult(), 0);
+ }
+
+ selectBy(aReverse, aPage) {
+ try {
+ let amount = aPage ? 5 : 1;
+
+ // because we collapsed unused items, we can't use this.richlistbox.getRowCount(), we need to use the matchCount
+ this.selectedIndex = this.getNextIndex(
+ aReverse,
+ amount,
+ this.selectedIndex,
+ this.matchCount - 1
+ );
+ if (this.selectedIndex == -1) {
+ this.input._focus();
+ }
+ } catch (ex) {
+ // do nothing - occasionally timer-related js errors happen here
+ // e.g. "this.selectedIndex has no properties", when you type fast and hit a
+ // navigation key before this popup has opened
+ }
+ }
+
+ disconnectedCallback() {
+ if (this.listEvents) {
+ this.richlistbox.removeEventListener("mouseup", this.listEvents);
+ this.richlistbox.removeEventListener("mousemove", this.listEvents);
+ delete this.listEvents;
+ }
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodacompleteRichResultPopup, [
+ Ci.nsIAutoCompletePopup,
+ ]);
+ customElements.define(
+ "glodacomplete-rich-result-popup",
+ MozGlodacompleteRichResultPopup,
+ { extends: "panel" }
+ );
+}
diff --git a/comm/mailnews/db/gloda/jar.mn b/comm/mailnews/db/gloda/jar.mn
new file mode 100644
index 0000000000..6dbf20d9c3
--- /dev/null
+++ b/comm/mailnews/db/gloda/jar.mn
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+gloda.jar:
+% content gloda %content/
+ content/glodacomplete.js (content/glodacomplete.js)
+ content/autocomplete-richlistitem.js (content/autocomplete-richlistitem.js)
diff --git a/comm/mailnews/db/gloda/modules/Collection.jsm b/comm/mailnews/db/gloda/modules/Collection.jsm
new file mode 100644
index 0000000000..e229161fc9
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Collection.jsm
@@ -0,0 +1,834 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaCollection", "GlodaCollectionManager"];
+
+var LOG = console.createInstance({
+ prefix: "gloda.collection",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * @namespace Central registry and logic for all collections.
+ *
+ * The collection manager is a singleton that has the following tasks:
+ * - Let views of objects (nouns) know when their objects have changed. For
+ * example, an attribute has changed due to user action.
+ * - Let views of objects based on queries know when new objects match their
+ * query, or when their existing objects no longer match due to changes.
+ * - Caching/object-identity maintenance. It is ideal if we only ever have
+ * one instance of an object at a time. (More specifically, only one instance
+ * per database row 'id'.) The collection mechanism lets us find existing
+ * instances to this end. Caching can be directly integrated by being treated
+ * as a special collection.
+ */
+var GlodaCollectionManager = {
+ _collectionsByNoun: {},
+ _cachesByNoun: {},
+
+ /**
+ * Registers the existence of a collection with the collection manager. This
+ * is done using a weak reference so that the collection can go away if it
+ * wants to.
+ */
+ registerCollection(aCollection) {
+ let collections;
+ let nounID = aCollection.query._nounDef.id;
+ if (!(nounID in this._collectionsByNoun)) {
+ collections = this._collectionsByNoun[nounID] = [];
+ } else {
+ // purge dead weak references while we're at it
+ collections = this._collectionsByNoun[nounID].filter(aRef => aRef.get());
+ this._collectionsByNoun[nounID] = collections;
+ }
+ collections.push(Cu.getWeakReference(aCollection));
+ },
+
+ getCollectionsForNounID(aNounID) {
+ if (!(aNounID in this._collectionsByNoun)) {
+ return [];
+ }
+
+ // generator would be nice, but I suspect get() is too expensive to use
+ // twice (guard/predicate and value)
+ let weakCollections = this._collectionsByNoun[aNounID];
+ let collections = [];
+ for (let iColl = 0; iColl < weakCollections.length; iColl++) {
+ let collection = weakCollections[iColl].get();
+ if (collection) {
+ collections.push(collection);
+ }
+ }
+ return collections;
+ },
+
+ defineCache(aNounDef, aCacheSize) {
+ this._cachesByNoun[aNounDef.id] = new GlodaLRUCacheCollection(
+ aNounDef,
+ aCacheSize
+ );
+ },
+
+ /**
+ * Attempt to locate an instance of the object of the given noun type with the
+ * given id. Counts as a cache hit if found. (And if it wasn't in a cache,
+ * but rather a collection, it is added to the cache.)
+ */
+ cacheLookupOne(aNounID, aID, aDoCache) {
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ if (aID in cache._idMap) {
+ let item = cache._idMap[aID];
+ return cache.hit(item);
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ if (aID in collection._idMap) {
+ let item = collection._idMap[aID];
+ if (cache) {
+ cache.add([item]);
+ }
+ return item;
+ }
+ }
+
+ LOG.debug("cacheLookupOne:\nhit null");
+ return null;
+ },
+
+ /**
+ * Lookup multiple nouns by ID from the cache/existing collections.
+ *
+ * @param aNounID The kind of noun identified by its ID.
+ * @param aIDMap A dictionary/map whose keys must be gloda noun ids for the
+ * given noun type and whose values are ignored.
+ * @param aTargetMap An object to hold the noun id's (key) and noun instances
+ * (value) for the noun instances that were found available in memory
+ * because they were cached or in existing query collections.
+ * @param [aDoCache=true] Should we add any items to the cache that we found
+ * in collections that were in memory but not in the cache? You would
+ * likely want to pass false if you are only updating in-memory
+ * representations rather than performing a new query.
+ *
+ * @returns [The number that were found, the number that were not found,
+ * a dictionary whose keys are the ids of noun instances that
+ * were not found.]
+ */
+ cacheLookupMany(aNounID, aIDMap, aTargetMap, aDoCache) {
+ let foundCount = 0,
+ notFoundCount = 0,
+ notFound = {};
+
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ for (let key in aIDMap) {
+ let cacheValue = cache._idMap[key];
+ if (cacheValue === undefined) {
+ notFoundCount++;
+ notFound[key] = null;
+ } else {
+ foundCount++;
+ aTargetMap[key] = cacheValue;
+ cache.hit(cacheValue);
+ }
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ for (let key in notFound) {
+ let collValue = collection._idMap[key];
+ if (collValue !== undefined) {
+ aTargetMap[key] = collValue;
+ delete notFound[key];
+ foundCount++;
+ notFoundCount--;
+ if (cache) {
+ cache.add([collValue]);
+ }
+ }
+ }
+ }
+
+ return [foundCount, notFoundCount, notFound];
+ },
+
+ /**
+ * Friendlier version of |cacheLookupMany|; takes a list of ids and returns
+ * an object whose keys and values are the gloda id's and instances of the
+ * instances that were found. We don't tell you who we didn't find. The
+ * assumption is this is being used for in-memory updates where we only need
+ * to tweak what is in memory.
+ */
+ cacheLookupManyList(aNounID, aIds) {
+ let checkMap = {},
+ targetMap = {};
+ for (let id of aIds) {
+ checkMap[id] = null;
+ }
+ // do not promote found items into the cache
+ this.cacheLookupMany(aNounID, checkMap, targetMap, false);
+ return targetMap;
+ },
+
+ /**
+ * Attempt to locate an instance of the object of the given noun type with the
+ * given id. Counts as a cache hit if found. (And if it wasn't in a cache,
+ * but rather a collection, it is added to the cache.)
+ */
+ cacheLookupOneByUniqueValue(aNounID, aUniqueValue, aDoCache) {
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ if (aUniqueValue in cache._uniqueValueMap) {
+ let item = cache._uniqueValueMap[aUniqueValue];
+ return cache.hit(item);
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ if (aUniqueValue in collection._uniqueValueMap) {
+ let item = collection._uniqueValueMap[aUniqueValue];
+ if (cache) {
+ cache.add([item]);
+ }
+ return item;
+ }
+ }
+
+ return null;
+ },
+
+ /**
+ * Checks whether the provided item with the given id is actually a duplicate
+ * of an instance that already exists in the cache/a collection. If it is,
+ * the pre-existing instance is returned and counts as a cache hit. If it
+ * is not, the passed-in instance is added to the cache and returned.
+ */
+ cacheLoadUnifyOne(aItem) {
+ let items = [aItem];
+ this.cacheLoadUnify(aItem.NOUN_ID, items);
+ return items[0];
+ },
+
+ /**
+ * Given a list of items, check if any of them already have duplicate,
+ * canonical, instances in the cache or collections. Items with pre-existing
+ * instances are replaced by those instances in the provided list, and each
+ * counts as a cache hit. Items without pre-existing instances are added
+ * to the cache and left intact.
+ */
+ cacheLoadUnify(aNounID, aItems, aCacheIfMissing) {
+ let cache = this._cachesByNoun[aNounID];
+ if (aCacheIfMissing === undefined) {
+ aCacheIfMissing = true;
+ }
+
+ // track the items we haven't yet found in a cache/collection (value) and
+ // their index in aItems (key). We're somewhat abusing the dictionary
+ // metaphor with the intent of storing tuples here. We also do it because
+ // it allows random-access deletion theoretically without cost. (Since
+ // we delete during iteration, that may be wrong, but it sounds like the
+ // semantics still work?)
+ let unresolvedIndexToItem = {};
+ let numUnresolved = 0;
+
+ if (cache) {
+ for (let iItem = 0; iItem < aItems.length; iItem++) {
+ let item = aItems[iItem];
+
+ if (item.id in cache._idMap) {
+ let realItem = cache._idMap[item.id];
+ // update the caller's array with the reference to the 'real' item
+ aItems[iItem] = realItem;
+ cache.hit(realItem);
+ } else {
+ unresolvedIndexToItem[iItem] = item;
+ numUnresolved++;
+ }
+ }
+
+ // we're done if everyone was a hit.
+ if (numUnresolved == 0) {
+ return;
+ }
+ } else {
+ for (let iItem = 0; iItem < aItems.length; iItem++) {
+ unresolvedIndexToItem[iItem] = aItems[iItem];
+ }
+ numUnresolved = aItems.length;
+ }
+
+ let needToCache = [];
+ // next, let's fall back to our collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ for (let [iItem, item] of Object.entries(unresolvedIndexToItem)) {
+ if (item.id in collection._idMap) {
+ let realItem = collection._idMap[item.id];
+ // update the caller's array to now have the 'real' object
+ aItems[iItem] = realItem;
+ // flag that we need to cache this guy (we use an inclusive cache)
+ needToCache.push(realItem);
+ // we no longer need to resolve this item...
+ delete unresolvedIndexToItem[iItem];
+ // stop checking collections if we got everybody
+ if (--numUnresolved == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ // anything left in unresolvedIndexToItem should be added to the cache
+ // unless !aCacheIfMissing. plus, we already have 'needToCache'
+ if (cache && aCacheIfMissing) {
+ cache.add(
+ needToCache.concat(
+ Object.keys(unresolvedIndexToItem).map(
+ key => unresolvedIndexToItem[key]
+ )
+ )
+ );
+ }
+ },
+
+ cacheCommitDirty() {
+ for (let id in this._cachesByNoun) {
+ let cache = this._cachesByNoun[id];
+ cache.commitDirty();
+ }
+ },
+
+ /**
+ * Notifies the collection manager that an item has been loaded and should
+ * be cached, assuming caching is active.
+ */
+ itemLoaded(aItem) {
+ let cache = this._cachesByNoun[aItem.NOUN_ID];
+ if (cache) {
+ cache.add([aItem]);
+ }
+ },
+
+ /**
+ * Notifies the collection manager that multiple items has been loaded and
+ * should be cached, assuming caching is active.
+ */
+ itemsLoaded(aNounID, aItems) {
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ cache.add(aItems);
+ }
+ },
+
+ /**
+ * This should be called when items are added to the global database. This
+ * should generally mean during indexing by indexers or an attribute
+ * provider.
+ * We walk all existing collections for the given noun type and add the items
+ * to the collection if the item meets the query that defines the collection.
+ */
+ itemsAdded(aNounID, aItems) {
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ cache.add(aItems);
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let addItems = aItems.filter(item => collection.query.test(item));
+ if (addItems.length) {
+ collection._onItemsAdded(addItems);
+ }
+ }
+ },
+ /**
+ * This should be called when items in the global database are modified. For
+ * example, as a result of indexing. This should generally only be called
+ * by indexers or by attribute providers.
+ * We walk all existing collections for the given noun type. For items
+ * currently included in each collection but should no longer be (per the
+ * collection's defining query) we generate onItemsRemoved events. For items
+ * not currently included in the collection but should now be, we generate
+ * onItemsAdded events. For items included that still match the query, we
+ * generate onItemsModified events.
+ */
+ itemsModified(aNounID, aItems) {
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let added = [],
+ modified = [],
+ removed = [];
+ for (let item of aItems) {
+ if (item.id in collection._idMap) {
+ // currently in... but should it still be there?
+ if (collection.query.test(item)) {
+ modified.push(item); // yes, keep it
+ } else if (!collection.query.frozen) {
+ // oy, so null queries really don't want any notifications, and they
+ // sorta fit into our existing model, except for the removal bit.
+ // so we need a specialized check for them, and we're using the
+ // frozen attribute to this end.
+ removed.push(item); // no, bin it
+ }
+ } else if (collection.query.test(item)) {
+ // not in, should it be?
+ added.push(item); // yep, add it
+ }
+ }
+ if (added.length) {
+ collection._onItemsAdded(added);
+ }
+ if (modified.length) {
+ collection._onItemsModified(modified);
+ }
+ if (removed.length) {
+ collection._onItemsRemoved(removed);
+ }
+ }
+ },
+ /**
+ * This should be called when items in the global database are permanently-ish
+ * deleted. (This is distinct from concepts like message deletion which may
+ * involved trash folders or other modified forms of existence. Deleted
+ * means the data is gone and if it were to come back, it would come back
+ * via an itemsAdded event.)
+ * We walk all existing collections for the given noun type. For items
+ * currently in the collection, we generate onItemsRemoved events.
+ *
+ * @param aItemIds A list of item ids that are being deleted.
+ */
+ itemsDeleted(aNounID, aItemIds) {
+ // cache
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ for (let itemId of aItemIds) {
+ if (itemId in cache._idMap) {
+ cache.deleted(cache._idMap[itemId]);
+ }
+ }
+ }
+
+ // collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let removeItems = aItemIds
+ .filter(itemId => itemId in collection._idMap)
+ .map(itemId => collection._idMap[itemId]);
+ if (removeItems.length) {
+ collection._onItemsRemoved(removeItems);
+ }
+ }
+ },
+ /**
+ * Like |itemsDeleted| but for the case where the deletion is based on an
+ * attribute that SQLite can more efficiently check than we can and where the
+ * cost of scanning the in-memory items is presumably much cheaper than
+ * trying to figure out what actually got deleted.
+ *
+ * Since we are doing an in-memory walk, this is obviously O(n) where n is the
+ * number of noun instances of a given type in-memory. We are assuming this
+ * is a reasonable number of things and that this type of deletion call is
+ * not going to happen all that frequently. If these assumptions are wrong,
+ * callers are advised to re-think the whole situation.
+ *
+ * @param aNounID Type of noun we are talking about here.
+ * @param aFilter A filter function that returns true when the item should be
+ * thought of as deleted, or false if the item is still good. Screw this
+ * up and you will get some seriously wacky bugs, yo.
+ */
+ itemsDeletedByAttribute(aNounID, aFilter) {
+ // cache
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ for (let id in cache._idMap) {
+ let item = cache._idMap[id];
+ if (aFilter(item)) {
+ cache.deleted(item);
+ }
+ }
+ }
+
+ // collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let removeItems = collection.items.filter(aFilter);
+ if (removeItems.length) {
+ collection._onItemsRemoved(removeItems);
+ }
+ }
+ },
+};
+
+/**
+ * @class A current view of the set of first-class nouns meeting a given query.
+ * Assuming a listener is present, events are
+ * generated when new objects meet the query, existing objects no longer meet
+ * the query, or existing objects have experienced a change in attributes that
+ * does not affect their ability to be present (but the listener may care about
+ * because it is exposing those attributes).
+ * @class
+ */
+function GlodaCollection(
+ aNounDef,
+ aItems,
+ aQuery,
+ aListener,
+ aMasterCollection
+) {
+ // if aNounDef is null, we are just being invoked for subclassing
+ if (aNounDef === undefined) {
+ return;
+ }
+
+ this._nounDef = aNounDef;
+ // should we also maintain a unique value mapping...
+ if (this._nounDef.usesUniqueValue) {
+ this._uniqueValueMap = {};
+ }
+
+ this.pendingItems = [];
+ this._pendingIdMap = {};
+ this.items = [];
+ this._idMap = {};
+
+ // force the listener to null for our call to _onItemsAdded; no events for
+ // the initial load-out.
+ this._listener = null;
+ if (aItems && aItems.length) {
+ this._onItemsAdded(aItems);
+ }
+
+ this.query = aQuery || null;
+ if (this.query) {
+ this.query.collection = this;
+ if (this.query.options.stashColumns) {
+ this.stashedColumns = {};
+ }
+ }
+ this._listener = aListener || null;
+
+ this.deferredCount = 0;
+ this.resolvedCount = 0;
+
+ if (aMasterCollection) {
+ this.masterCollection = aMasterCollection.masterCollection;
+ } else {
+ this.masterCollection = this;
+ /** a dictionary of dictionaries. at the top level, the keys are noun IDs.
+ * each of these sub-dictionaries maps the IDs of desired noun instances to
+ * the actual instance, or null if it has not yet been loaded.
+ */
+ this.referencesByNounID = {};
+ /**
+ * a dictionary of dictionaries. at the top level, the keys are noun IDs.
+ * each of the sub-dictionaries maps the IDs of the _recognized parent
+ * noun_ to the list of children, or null if the list has not yet been
+ * populated.
+ *
+ * So if we have a noun definition A with ID 1 who is the recognized parent
+ * noun of noun definition B with ID 2, AND we have an instance A(1) with
+ * two children B(10), B(11), then an example might be: {2: {1: [10, 11]}}.
+ */
+ this.inverseReferencesByNounID = {};
+ this.subCollections = {};
+ }
+}
+
+GlodaCollection.prototype = {
+ get listener() {
+ return this._listener;
+ },
+ set listener(aListener) {
+ this._listener = aListener;
+ },
+
+ /**
+ * If this collection still has a query associated with it, drop the query
+ * and replace it with an 'explicit query'. This means that the Collection
+ * Manager will not attempt to match new items indexed to the system against
+ * our query criteria.
+ * Once you call this method, your collection's listener will no longer
+ * receive onItemsAdded notifications that are not the result of your
+ * initial database query. It will, however, receive onItemsModified
+ * notifications if items in the collection are re-indexed.
+ */
+ becomeExplicit() {
+ if (!(this.query instanceof this._nounDef.explicitQueryClass)) {
+ this.query = new this._nounDef.explicitQueryClass(this);
+ }
+ },
+
+ /**
+ * Clear the contents of this collection. This only makes sense for explicit
+ * collections or wildcard collections. (Actual query-based collections
+ * should represent the state of the query, so unless we're going to delete
+ * all the items, clearing the collection would violate that constraint.)
+ */
+ clear() {
+ this._idMap = {};
+ if (this._uniqueValueMap) {
+ this._uniqueValueMap = {};
+ }
+ this.items = [];
+ },
+
+ _onItemsAdded(aItems) {
+ this.items.push.apply(this.items, aItems);
+ if (this._uniqueValueMap) {
+ for (let item of this.items) {
+ this._idMap[item.id] = item;
+ this._uniqueValueMap[item.uniqueValue] = item;
+ }
+ } else {
+ for (let item of this.items) {
+ this._idMap[item.id] = item;
+ }
+ }
+ if (this._listener) {
+ try {
+ this._listener.onItemsAdded(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsAdded: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ _onItemsModified(aItems) {
+ if (this._listener) {
+ try {
+ this._listener.onItemsModified(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsModified: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ /**
+ * Given a list of items that definitely no longer belong in this collection,
+ * remove them from the collection and notify the listener. The 'tricky'
+ * part is that we need to remove the deleted items from our list of items.
+ */
+ _onItemsRemoved(aItems) {
+ // we want to avoid the O(n^2) deletion performance case, and deletion
+ // should be rare enough that the extra cost of building the deletion map
+ // should never be a real problem.
+ let deleteMap = {};
+ // build the delete map while also nuking from our id map/unique value map
+ for (let item of aItems) {
+ deleteMap[item.id] = true;
+ delete this._idMap[item.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[item.uniqueValue];
+ }
+ }
+ let items = this.items;
+ // in-place filter. probably needless optimization.
+ let iWrite = 0;
+ for (let iRead = 0; iRead < items.length; iRead++) {
+ let item = items[iRead];
+ if (!(item.id in deleteMap)) {
+ items[iWrite++] = item;
+ }
+ }
+ items.splice(iWrite);
+
+ if (this._listener) {
+ try {
+ this._listener.onItemsRemoved(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsRemoved: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ _onQueryCompleted() {
+ this.query.completed = true;
+ if (this._listener && this._listener.onQueryCompleted) {
+ this._listener.onQueryCompleted(this);
+ }
+ },
+};
+
+/**
+ * Create an LRU cache collection for the given noun with the given size.
+ *
+ * @class
+ */
+function GlodaLRUCacheCollection(aNounDef, aCacheSize) {
+ GlodaCollection.call(this, aNounDef, null, null, null);
+
+ this._head = null; // aka oldest!
+ this._tail = null; // aka newest!
+ this._size = 0;
+ // let's keep things sane, and simplify our logic a little...
+ if (aCacheSize < 32) {
+ aCacheSize = 32;
+ }
+ this._maxCacheSize = aCacheSize;
+}
+/**
+ * @class A LRU-discard cache. We use a doubly linked-list for the eviction
+ * tracking. Since we require that there is at most one LRU-discard cache per
+ * noun class, we simplify our lives by adding our own attributes to the
+ * cached objects.
+ * @augments GlodaCollection
+ */
+GlodaLRUCacheCollection.prototype = new GlodaCollection();
+GlodaLRUCacheCollection.prototype.add = function (aItems) {
+ for (let item of aItems) {
+ if (item.id in this._idMap) {
+ // DEBUGME so, we're dealing with this, but it shouldn't happen. need
+ // trace-debuggage.
+ continue;
+ }
+ this._idMap[item.id] = item;
+ if (this._uniqueValueMap) {
+ this._uniqueValueMap[item.uniqueValue] = item;
+ }
+
+ item._lruPrev = this._tail;
+ // we do have to make sure that we will set _head the first time we insert
+ // something
+ if (this._tail !== null) {
+ this._tail._lruNext = item;
+ } else {
+ this._head = item;
+ }
+ item._lruNext = null;
+ this._tail = item;
+
+ this._size++;
+ }
+
+ while (this._size > this._maxCacheSize) {
+ let item = this._head;
+
+ // we never have to deal with the possibility of needing to make _head/_tail
+ // null.
+ this._head = item._lruNext;
+ this._head._lruPrev = null;
+ // (because we are nice, we will delete the properties...)
+ delete item._lruNext;
+ delete item._lruPrev;
+
+ // nuke from our id map
+ delete this._idMap[item.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[item.uniqueValue];
+ }
+
+ // flush dirty items to disk (they may not have this attribute, in which
+ // case, this returns false, which is fine.)
+ if (item.dirty) {
+ this._nounDef.objUpdate.call(this._nounDef.datastore, item);
+ delete item.dirty;
+ }
+
+ this._size--;
+ }
+};
+
+GlodaLRUCacheCollection.prototype.hit = function (aItem) {
+ // don't do anything in the 0 or 1 items case, or if we're already
+ // the last item
+ if (this._head === this._tail || this._tail === aItem) {
+ return aItem;
+ }
+
+ // - unlink the item
+ if (aItem._lruPrev !== null) {
+ aItem._lruPrev._lruNext = aItem._lruNext;
+ } else {
+ this._head = aItem._lruNext;
+ }
+ // (_lruNext cannot be null)
+ aItem._lruNext._lruPrev = aItem._lruPrev;
+ // - link it in to the end
+ this._tail._lruNext = aItem;
+ aItem._lruPrev = this._tail;
+ aItem._lruNext = null;
+ // update tail tracking
+ this._tail = aItem;
+
+ return aItem;
+};
+
+GlodaLRUCacheCollection.prototype.deleted = function (aItem) {
+ // unlink the item
+ if (aItem._lruPrev !== null) {
+ aItem._lruPrev._lruNext = aItem._lruNext;
+ } else {
+ this._head = aItem._lruNext;
+ }
+ if (aItem._lruNext !== null) {
+ aItem._lruNext._lruPrev = aItem._lruPrev;
+ } else {
+ this._tail = aItem._lruPrev;
+ }
+
+ // (because we are nice, we will delete the properties...)
+ delete aItem._lruNext;
+ delete aItem._lruPrev;
+
+ // nuke from our id map
+ delete this._idMap[aItem.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[aItem.uniqueValue];
+ }
+
+ this._size--;
+};
+
+/**
+ * If any of the cached items are dirty, commit them, and make them no longer
+ * dirty.
+ */
+GlodaLRUCacheCollection.prototype.commitDirty = function () {
+ // we can only do this if there is an update method available...
+ if (!this._nounDef.objUpdate) {
+ return;
+ }
+
+ for (let iItem in this._idMap) {
+ let item = this._idMap[iItem];
+ if (item.dirty) {
+ LOG.debug("flushing dirty: " + item);
+ this._nounDef.objUpdate.call(this._nounDef.datastore, item);
+ delete item.dirty;
+ }
+ }
+};
diff --git a/comm/mailnews/db/gloda/modules/Everybody.jsm b/comm/mailnews/db/gloda/modules/Everybody.jsm
new file mode 100644
index 0000000000..4f33134ef9
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Everybody.jsm
@@ -0,0 +1,23 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [];
+
+const { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+GlodaFundAttr.init();
+const { GlodaExplicitAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaExplicitAttr.jsm"
+);
+GlodaExplicitAttr.init();
+
+ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+ChromeUtils.import("resource:///modules/gloda/NounFreetag.jsm");
+ChromeUtils.import("resource:///modules/gloda/NounMimetype.jsm");
+ChromeUtils.import("resource:///modules/gloda/IndexMsg.jsm");
+const { GlodaABAttrs } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgIndexer.jsm"
+);
+GlodaABAttrs.init();
diff --git a/comm/mailnews/db/gloda/modules/Facet.jsm b/comm/mailnews/db/gloda/modules/Facet.jsm
new file mode 100644
index 0000000000..96425b8838
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Facet.jsm
@@ -0,0 +1,599 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides faceting logic.
+ */
+
+var EXPORTED_SYMBOLS = ["FacetDriver", "FacetUtils"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "Gloda",
+ "resource:///modules/gloda/GlodaPublic.jsm"
+);
+
+/**
+ * Decides the appropriate faceters for the noun type and drives the faceting
+ * process. This class and the faceters are intended to be reusable so that
+ * you only need one instance per faceting session. (Although each faceting
+ * pass is accordingly destructive to previous results.)
+ *
+ * Our strategy for faceting is to process one attribute at a time across all
+ * the items in the provided set. The alternative would be to iterate over
+ * the items and then iterate over the attributes on each item. While both
+ * approaches have caching downsides
+ */
+function FacetDriver(aNounDef, aWindow) {
+ this.nounDef = aNounDef;
+ this._window = aWindow;
+
+ this._makeFaceters();
+}
+FacetDriver.prototype = {
+ /**
+ * Populate |this.faceters| with a set of faceters appropriate to the noun
+ * definition associated with this instance.
+ */
+ _makeFaceters() {
+ let faceters = (this.faceters = []);
+
+ function makeFaceter(aAttrDef, aFacetDef) {
+ let facetType = aFacetDef.type;
+
+ if (aAttrDef.singular) {
+ if (facetType == "date") {
+ faceters.push(new DateFaceter(aAttrDef, aFacetDef));
+ } else {
+ faceters.push(new DiscreteFaceter(aAttrDef, aFacetDef));
+ }
+ } else if (facetType == "nonempty?") {
+ faceters.push(new NonEmptySetFaceter(aAttrDef, aFacetDef));
+ } else {
+ faceters.push(new DiscreteSetFaceter(aAttrDef, aFacetDef));
+ }
+ }
+
+ for (let key in this.nounDef.attribsByBoundName) {
+ let attrDef = this.nounDef.attribsByBoundName[key];
+ // ignore attributes that do not want to be faceted
+ if (!attrDef.facet) {
+ continue;
+ }
+
+ makeFaceter(attrDef, attrDef.facet);
+
+ if ("extraFacets" in attrDef) {
+ for (let facetDef of attrDef.extraFacets) {
+ makeFaceter(attrDef, facetDef);
+ }
+ }
+ }
+ },
+ /**
+ * Asynchronously facet the provided items, calling the provided callback when
+ * completed.
+ */
+ go(aItems, aCallback, aCallbackThis) {
+ this.items = aItems;
+ this.callback = aCallback;
+ this.callbackThis = aCallbackThis;
+
+ this._nextFaceter = 0;
+ this._drive();
+ },
+
+ _MAX_FACETING_TIMESLICE_MS: 100,
+ _FACETING_YIELD_DURATION_MS: 0,
+ _driveWrapper(aThis) {
+ aThis._drive();
+ },
+ _drive() {
+ let start = Date.now();
+
+ while (this._nextFaceter < this.faceters.length) {
+ let faceter = this.faceters[this._nextFaceter++];
+ // for now we facet in one go, but the long-term plan allows for them to
+ // be generators.
+ faceter.facetItems(this.items);
+
+ let delta = Date.now() - start;
+ if (delta > this._MAX_FACETING_TIMESLICE_MS) {
+ this._window.setTimeout(
+ this._driveWrapper,
+ this._FACETING_YIELD_DURATION_MS,
+ this
+ );
+ return;
+ }
+ }
+
+ // we only get here once we are done with the faceters
+ this.callback.call(this.callbackThis);
+ },
+};
+
+var FacetUtils = {
+ _groupSizeComparator(a, b) {
+ return b[1].length - a[1].length;
+ },
+
+ /**
+ * Given a list where each entry is a tuple of [group object, list of items
+ * belonging to that group], produce a new list of the top grouped items. We
+ * used to also produce an "other" aggregation, but that turned out to be
+ * conceptually difficult to deal with, so that's gone, leaving this method
+ * with much less to do.
+ *
+ * @param aAttrDef The attribute for the facet we are working with.
+ * @param aGroups The list of groups built for the facet.
+ * @param aMaxCount The number of result rows you want back.
+ */
+ makeTopGroups(aAttrDef, aGroups, aMaxCount) {
+ let nounDef = aAttrDef.objectNounDef;
+ let realGroupsToUse = aMaxCount;
+
+ let orderedBySize = aGroups.concat();
+ orderedBySize.sort(this._groupSizeComparator);
+
+ // - get the real groups to use and order them by the attribute comparator
+ let outGroups = orderedBySize.slice(0, realGroupsToUse);
+ let comparator = nounDef.comparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ outGroups.sort(comparatorHelper);
+
+ return outGroups;
+ },
+};
+
+/**
+ * Facet discrete things like message authors, boolean values, etc. Only
+ * appropriate for use on singular values. Use |DiscreteSetFaceter| for
+ * non-singular values.
+ */
+function DiscreteFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DiscreteFaceter.prototype = {
+ type: "discrete",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ if (this.attrDef.objectNounDef.isPrimitive) {
+ return this.facetPrimitiveItems(aItems);
+ }
+ return this.facetComplexItems(aItems);
+ },
+ /**
+ * Facet an attribute whose value is primitive, meaning that it is a raw
+ * numeric value or string, rather than a complex object.
+ */
+ facetPrimitiveItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+
+ let valStrToVal = {};
+ let groups = (this.groups = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ if (val === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ // We need to use hasOwnProperty because we cannot guarantee that the
+ // contents of val won't collide with the attributes in Object.prototype.
+ if (groups.hasOwnProperty(val)) {
+ groups[val].push(item);
+ } else {
+ groups[val] = [item];
+ valStrToVal[val] = val;
+ this.groupCount++;
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ valStrToVal[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+ /**
+ * Facet an attribute whose value is a complex object that can be identified
+ * by its 'id' attribute. This is the case where the value is itself a noun
+ * instance.
+ */
+ facetComplexItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+ let idAttr = this.facetDef.groupIdAttr;
+
+ let groups = (this.groups = {});
+ let groupMap = (this.groupMap = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ if (val === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ let valId = val == null ? null : val[idAttr];
+ // We need to use hasOwnProperty because tag nouns are complex objects
+ // with id's that are non-numeric and so can collide with the contents
+ // of Object.prototype. (Note: the "tags" attribute is actually handled
+ // by the DiscreteSetFaceter.)
+ if (groupMap.hasOwnProperty(valId)) {
+ groups[valId].push(item);
+ } else {
+ groupMap[valId] = val;
+ groups[valId] = [item];
+ this.groupCount++;
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ groupMap[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+};
+
+/**
+ * Facet sets of discrete items. For example, tags applied to messages.
+ *
+ * The main differences between us and |DiscreteFaceter| are:
+ * - The empty set is notable.
+ * - Specific set configurations could be interesting, but are not low-hanging
+ * fruit.
+ */
+function DiscreteSetFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DiscreteSetFaceter.prototype = {
+ type: "discrete",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ if (this.attrDef.objectNounDef.isPrimitive) {
+ return this.facetPrimitiveItems(aItems);
+ }
+ return this.facetComplexItems(aItems);
+ },
+ /**
+ * Facet an attribute whose value is primitive, meaning that it is a raw
+ * numeric value or string, rather than a complex object.
+ */
+ facetPrimitiveItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+
+ let groups = (this.groups = {});
+ let valStrToVal = {};
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ if (vals == null || vals.length == 0) {
+ vals = [null];
+ }
+ for (let val of vals) {
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ // We need to use hasOwnProperty because we cannot guarantee that the
+ // contents of val won't collide with the attributes in
+ // Object.prototype.
+ if (groups.hasOwnProperty(val)) {
+ groups[val].push(item);
+ } else {
+ groups[val] = [item];
+ valStrToVal[val] = val;
+ this.groupCount++;
+ }
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ valStrToVal[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+ /**
+ * Facet an attribute whose value is a complex object that can be identified
+ * by its 'id' attribute. This is the case where the value is itself a noun
+ * instance.
+ */
+ facetComplexItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+ let idAttr = this.facetDef.groupIdAttr;
+
+ let groups = (this.groups = {});
+ let groupMap = (this.groupMap = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ if (vals == null || vals.length == 0) {
+ vals = [null];
+ }
+ for (let val of vals) {
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ let valId = val == null ? null : val[idAttr];
+ // We need to use hasOwnProperty because tag nouns are complex objects
+ // with id's that are non-numeric and so can collide with the contents
+ // of Object.prototype.
+ if (groupMap.hasOwnProperty(valId)) {
+ groups[valId].push(item);
+ } else {
+ groupMap[valId] = val;
+ groups[valId] = [item];
+ this.groupCount++;
+ }
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ groupMap[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+};
+
+/**
+ * Given a non-singular attribute, facet it as if it were a boolean based on
+ * whether there is anything in the list (set).
+ */
+function NonEmptySetFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+NonEmptySetFaceter.prototype = {
+ type: "boolean",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+
+ let trueValues = [];
+ let falseValues = [];
+
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals == null || vals.length == 0) {
+ falseValues.push(item);
+ } else {
+ trueValues.push(item);
+ }
+ }
+
+ this.orderedGroups = [];
+ if (trueValues.length) {
+ this.orderedGroups.push([true, trueValues]);
+ }
+ if (falseValues.length) {
+ this.orderedGroups.push([false, falseValues]);
+ }
+ this.groupCount = this.orderedGroups.length;
+ },
+ makeQuery(aGroupValues, aInclusive) {
+ let query = (this.query = lazy.Gloda.newQuery(GlodaConstants.NOUN_MESSAGE));
+
+ let constraintFunc = query[this.attrDef.boundName];
+ constraintFunc.call(query);
+
+ // Our query is always for non-empty lists (at this time), so we want to
+ // invert if they're excluding 'true' or including 'false', which means !=.
+ let invert = aGroupValues[0] != aInclusive;
+
+ return [query, invert];
+ },
+};
+
+/**
+ * Facet dates. We build a hierarchical nested structure of year, month, and
+ * day nesting levels. This decision was made speculatively in the hopes that
+ * it would allow us to do clustered analysis and that there might be a benefit
+ * for that. For example, if you search for "Christmas", we might notice
+ * clusters of messages around December of each year. We could then present
+ * these in a list as likely candidates, rather than a graphical timeline.
+ * Alternately, it could be used to inform a non-linear visualization. As it
+ * stands (as of this writing), it's just a complicating factor.
+ */
+function DateFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DateFaceter.prototype = {
+ type: "date",
+ /**
+ *
+ */
+ facetItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+
+ let years = (this.years = { _subCount: 0 });
+ // generally track the time range
+ let oldest = null,
+ newest = null;
+
+ this.validItems = [];
+
+ // just cheat and put us at the front...
+ this.groupCount = aItems.length ? 1000 : 0;
+ this.orderedGroups = null;
+
+ /** The number of items with a null/missing attribute. */
+ this.missing = 0;
+
+ /**
+ * The number of items with a date that is unreasonably far in the past or
+ * in the future. Old-wise, we are concerned about incorrectly formatted
+ * messages (spam) that end up placed around the UNIX epoch. New-wise,
+ * we are concerned about messages that can't be explained by users who
+ * don't know how to set their clocks (both the current user and people
+ * sending them mail), mainly meaning spam.
+ * We want to avoid having our clever time-scale logic being made useless by
+ * these unreasonable messages.
+ */
+ this.unreasonable = 0;
+ // feb 1, 1970
+ let tooOld = new Date(1970, 1, 1);
+ // 3 days from now
+ let tooNew = new Date(Date.now() + 3 * 24 * 60 * 60 * 1000);
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ // -- missing
+ if (val == null) {
+ this.missing++;
+ continue;
+ }
+
+ // -- unreasonable
+ if (val < tooOld || val > tooNew) {
+ this.unreasonable++;
+ continue;
+ }
+
+ this.validItems.push(item);
+
+ // -- time range
+ if (oldest == null) {
+ oldest = newest = val;
+ } else if (val < oldest) {
+ oldest = val;
+ } else if (val > newest) {
+ newest = val;
+ }
+
+ // -- bucket
+ // - year
+ let year,
+ valYear = val.getYear();
+ if (valYear in years) {
+ year = years[valYear];
+ year._dateCount++;
+ } else {
+ year = years[valYear] = {
+ _dateCount: 1,
+ _subCount: 0,
+ };
+ years._subCount++;
+ }
+
+ // - month
+ let month,
+ valMonth = val.getMonth();
+ if (valMonth in year) {
+ month = year[valMonth];
+ month._dateCount++;
+ } else {
+ month = year[valMonth] = {
+ _dateCount: 1,
+ _subCount: 0,
+ };
+ year._subCount++;
+ }
+
+ // - day
+ let valDate = val.getDate();
+ if (valDate in month) {
+ month[valDate].push(item);
+ } else {
+ month[valDate] = [item];
+ }
+ }
+
+ this.oldest = oldest;
+ this.newest = newest;
+ },
+
+ _unionMonth(aMonthObj) {
+ let dayItemLists = [];
+ for (let key in aMonthObj) {
+ let dayItemList = aMonthObj[key];
+ if (typeof key == "string" && key.startsWith("_")) {
+ continue;
+ }
+ dayItemLists.push(dayItemList);
+ }
+ return dayItemLists;
+ },
+
+ _unionYear(aYearObj) {
+ let monthItemLists = [];
+ for (let key in aYearObj) {
+ let monthObj = aYearObj[key];
+ if (typeof key == "string" && key.startsWith("_")) {
+ continue;
+ }
+ monthItemLists.push(this._unionMonth(monthObj));
+ }
+ return monthItemLists;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/Gloda.jsm b/comm/mailnews/db/gloda/modules/Gloda.jsm
new file mode 100644
index 0000000000..77b2288e53
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Gloda.jsm
@@ -0,0 +1,2275 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["Gloda"];
+
+const { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+const {
+ GlodaAttributeDBDef,
+ GlodaAccount,
+ GlodaConversation,
+ GlodaFolder,
+ GlodaMessage,
+ GlodaContact,
+ GlodaIdentity,
+ GlodaAttachment,
+} = ChromeUtils.import("resource:///modules/gloda/GlodaDataModel.jsm");
+const { GlodaCollection, GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { whittlerRegistry, mimeMsgToContentAndMeta } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaContent.jsm"
+);
+const { GlodaQueryClassFactory } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaQueryClassFactory.jsm"
+);
+const { GlodaUtils } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaUtils.jsm"
+);
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * @see |Gloda.BadItemContentsError|
+ */
+function BadItemContentsError(aMessage) {
+ this.message = aMessage;
+}
+BadItemContentsError.prototype = {
+ toString() {
+ return this.message;
+ },
+};
+
+/**
+ * Provides the user-visible (and extension visible) global database
+ * functionality. There is currently a dependency/ordering
+ * problem in that the concept of 'gloda' also includes some logic that is
+ * contributed by built-in extensions, if you will. Those built-in extensions
+ * (fundattr.js, GlodaExplicitAttr.jsm) also import this file. To avoid a circular
+ * dependency, those built-in extensions are loaded by Everybody.jsm. The
+ * simplest/best solution is probably to move Everybody.jsm to be Gloda.jsm and
+ * have it re-export only 'Gloda'. Gloda.jsm (this file) can then move to be
+ * gloda_int.js (or whatever our eventual naming scheme is), which built-in
+ * extensions can explicitly rely upon.
+ *
+ * === Concepts
+ *
+ * == Nouns
+ *
+ * Inspired by reasonable uses of triple-stores, I have tried to leverage
+ * existing model and terminology rather than rolling out own for everything.
+ * The idea with triple-stores is that you have a subject, a predicate, and an
+ * object. For example, if we are talking about a message, that is the
+ * subject, the predicate could roughly be sent-by, and the object a person.
+ * We can generalize this idea to say that the subject and objects are nouns.
+ * Since we want to be more flexible than only dealing with messages, we
+ * therefore introduce the concept of nouns as an organizing principle.
+ *
+ * == Attributes
+ *
+ * Our attributes definitions are basically our predicates. When we define
+ * an attribute, it's a label with a bunch of meta-data. Our attribute
+ * instances are basically a 'triple' in a triple-store. The attributes
+ * are stored in database rows that imply a specific noun-type (ex: the
+ * messageAttributes table), with an ID identifying the message which is our
+ * subject, an attribute ID which identifies the attribute definition in use
+ * (and therefore the predicate), plus an object ID (given context aka the
+ * noun type by the attribute's meta-data) which identifies the 'object'.
+ *
+ * == But...
+ *
+ * Things aren't entirely as clear as they could be right now, terminology/
+ * concept/implementation-wise. Some work is probably still in order.
+ *
+ * === Implementation
+ *
+ * == Nouns
+ *
+ * So, we go and define the nouns that are roughly the classes in our data
+ * model. Every 'class' we define in GlodaDataModel.jsm is a noun that gets defined
+ * here in the Gloda core. We provide sufficient meta-data about the noun to
+ * serialize/deserialize its representation from our database representation.
+ * Nouns do not have to be defined in this class, but can also be contributed
+ * by external code.
+ * We have a concept of 'first class' nouns versus non-first class nouns. The
+ * distinction is meant to be whether we can store meta-information about those
+ * nouns using attributes. Right now, only message are real first-class nouns,
+ * but we want to expand that to include contacts and eventually events and
+ * tasks as lightning-integration occurs. In practice, we are stretching the
+ * definition of first-class nouns slightly to include things we can't store
+ * meta-data about, but want to be able to query about. We do want to resolve
+ * this.
+ *
+ * == Attributes
+ *
+ * Attributes are defined by "attribute providers" who are responsible for
+ * taking an instance of a first-class noun (for which they are registered)
+ * plus perhaps some other meta-data, and returning a list of attributes
+ * extracted from that noun. For now, this means messages. Attribute
+ * providers may create new data records as a side-effect of the indexing
+ * process, although we have not yet fully dealt with the problem of deleting
+ * these records should they become orphaned in the database due to the
+ * purging of a message and its attributes.
+ * All of the 'core' gloda attributes are provided by the GlodaFundAttr.jsm and
+ * GlodaExplicitAttr.jsm providers.
+ *
+ * === (Notable) Future Work
+ *
+ * == Attributes
+ *
+ * Attribute mechanisms currently lack any support for 'overriding' attributes
+ * provided by other attribute providers. For example, the fundattr provider
+ * tells us who a message is 'from' based on the e-mail address present.
+ * However, other plugins may actually know better. For example, the bugzilla
+ * daemon e-mails based on bug activity although the daemon gets the credit
+ * as the official sender. A bugzilla plugin can easily extract the actual
+ * person/e-mail addressed who did something on the bug to cause the
+ * notification to be sent. In practice, we would like that person to be
+ * the 'sender' of the bugmail. But we can't really do that right, yet.
+ *
+ * @namespace
+ */
+var Gloda = {
+ /**
+ * Initialize logging, the datastore (SQLite database), the core nouns and
+ * attributes, and the contact and identities that belong to the presumed
+ * current user (based on accounts).
+ *
+ * Additional nouns and the core attribute providers are initialized by the
+ * Everybody.jsm module which ensures all of those dependencies are loaded
+ * (and initialized).
+ */
+ _init() {
+ this._initLogging();
+ GlodaDatastore._init(this._nounIDToDef);
+ this._initAttributes();
+ this._initMyIdentities();
+ },
+
+ _log: null,
+ /**
+ * Initialize logging; the error console window gets Warning/Error, and stdout
+ * (via dump) gets everything.
+ */
+ _initLogging() {
+ this._log = console.createInstance({
+ prefix: "gloda",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ this._log.info("Logging Initialized");
+ },
+
+ /**
+ * Callers should access the unique ID for the GlodaDatastore
+ * with this getter. If the GlodaDatastore has not been
+ * initialized, this value is null.
+ *
+ * @returns a UUID as a string, ex: "c4dd0159-9287-480f-a648-a4613e147fdb"
+ */
+ get datastoreID() {
+ return GlodaDatastore._datastoreID;
+ },
+
+ /**
+ * Lookup a gloda message from an nsIMsgDBHdr, with the result returned as a
+ * collection. Keep in mind that the message may not be indexed, so you
+ * may end up with an empty collection. (Also keep in mind that this query
+ * is asynchronous, so you will want your action-taking logic to be found
+ * in your listener's onQueryCompleted method; the result will not be in
+ * the collection when this method returns.)
+ *
+ * @param aMsgHdr The header of the message you want the gloda message for.
+ * @param aListener The listener that should be registered with the collection
+ * @param aData The (optional) value to set as the data attribute on the
+ * collection.
+ *
+ * @returns The collection that will receive the results.
+ *
+ * @testpoint gloda.ns.getMessageCollectionForHeader()
+ */
+ getMessageCollectionForHeader(aMsgHdr, aListener, aData) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.folder(aMsgHdr.folder).messageKey(aMsgHdr.messageKey);
+ return query.getCollection(aListener, aData);
+ },
+
+ /**
+ * Given a list of message headers, return a collection containing the gloda
+ * messages that correspond to those headers. Keep in mind that gloda may
+ * not have indexed all the messages, so the returned collection may not have
+ * a message for each header you provide. (Also keep in mind that this query
+ * is asynchronous, so you will want your action-taking logic to be found
+ * in your listener's onQueryCompleted method; no results will be present in
+ * the collection when this method returns.)
+ *
+ * @param aHeaders An array of headers
+ * @param aListener The listener that should be registered with the collection
+ * @param aData The (optional) value to set as the data attribute on the
+ * collection.
+ *
+ * @returns The collection that will receive the results.
+ *
+ * @testpoint gloda.ns.getMessageCollectionForHeaders()
+ */
+ getMessageCollectionForHeaders(aHeaders, aListener, aData) {
+ // group the headers by the folder they are found in
+ let headersByFolder = {};
+ for (let header of aHeaders) {
+ let folderURI = header.folder.URI;
+ let headersForFolder = headersByFolder[folderURI];
+ if (headersForFolder === undefined) {
+ headersByFolder[folderURI] = [header];
+ } else {
+ headersForFolder.push(header);
+ }
+ }
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let clause;
+ // build a query, using a separate union clause for each folder.
+ for (let folderURI in headersByFolder) {
+ let headersForFolder = headersByFolder[folderURI];
+ let folder = this.getFolderForFolder(headersForFolder[0].folder);
+ // if this is the first or clause, just use the query itself
+ if (!clause) {
+ clause = query;
+ } else {
+ // Create a new query clause via the 'or' command.
+ clause = query.or();
+ }
+
+ clause.folder(folder);
+ let messageKeys = headersForFolder.map(hdr => hdr.messageKey);
+ clause.messageKey.apply(clause, messageKeys);
+ }
+
+ return query.getCollection(aListener, aData);
+ },
+
+ /**
+ * @testpoint gloda.ns.getMessageContent
+ */
+ getMessageContent(aGlodaMessage, aMimeMsg) {
+ return mimeMsgToContentAndMeta(
+ aMimeMsg,
+ aGlodaMessage.folderMessage.folder
+ )[0];
+ },
+
+ getFolderForFolder(aMsgFolder) {
+ return GlodaDatastore._mapFolder(aMsgFolder);
+ },
+
+ /**
+ * Takes one or more strings containing lists of comma-delimited e-mail
+ * addresses with optional display names, and returns a list of sub-lists of
+ * identities, where each sub-list corresponds to each of the strings passed
+ * as arguments. These identities are loaded from the database if they
+ * already exist, or created if they do not yet exist.
+ * If the identities need to be created, they will also result in the
+ * creation of a gloda contact. If a display name was provided with the
+ * e-mail address, it will become the name of the gloda contact. If a
+ * display name was not provided, the e-mail address will also serve as the
+ * contact name.
+ * This method uses the indexer's callback handle mechanism, and does not
+ * obey traditional return semantics.
+ *
+ * We normalize all e-mail addresses to be lowercase as a normative measure.
+ *
+ * @param aCallbackHandle The GlodaIndexer callback handle (or equivalent)
+ * that you are operating under.
+ * @param aAddrGroups... One or more strings. Each string can contain zero or more
+ * e-mail addresses with display name. If more than one address is given,
+ * they should be comma-delimited. For example
+ * '"Bob Smith" <bob@example.com>' is an address with display name. Mime
+ * header decoding is performed, but is ignorant of any folder-level
+ * character set overrides.
+ * @returns via the callback handle mechanism, a list containing one sub-list
+ * for each string argument passed. Each sub-list contains zero or more
+ * GlodaIdentity instances corresponding to the addresses provided.
+ */
+ *getOrCreateMailIdentities(aCallbackHandle, ...aAddrGroups) {
+ let addresses = {};
+ let resultLists = [];
+
+ // parse the strings
+ for (let aMailAddresses of aAddrGroups) {
+ let parsed = GlodaUtils.parseMailAddresses(aMailAddresses);
+
+ let resultList = [];
+ resultLists.push(resultList);
+
+ for (let iAddress = 0; iAddress < parsed.count; iAddress++) {
+ let address = parsed.addresses[iAddress].toLowerCase();
+ if (address in addresses) {
+ addresses[address].push(resultList);
+ } else {
+ addresses[address] = [parsed.names[iAddress], resultList];
+ }
+ }
+ }
+
+ let addressList = Object.keys(addresses);
+ if (addressList.length == 0) {
+ yield aCallbackHandle.doneWithResult(resultLists);
+ // we should be stopped before we reach this point, but safety first.
+ return;
+ }
+
+ let query = this.newQuery(GlodaConstants.NOUN_IDENTITY);
+ query.kind("email");
+ query.value.apply(query, addressList);
+ let collection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ // put the identities in the appropriate result lists
+ for (let identity of collection.items) {
+ let nameAndResultLists = addresses[identity.value];
+ this._log.debug(
+ " found identity for '" +
+ nameAndResultLists[0] +
+ "' (" +
+ identity.value +
+ ")"
+ );
+ // index 0 is the name, skip it
+ for (let iResList = 1; iResList < nameAndResultLists.length; iResList++) {
+ nameAndResultLists[iResList].push(identity);
+ }
+ delete addresses[identity.value];
+ }
+
+ // create the identities that did not exist yet
+ for (let address in addresses) {
+ let nameAndResultLists = addresses[address];
+ let name = nameAndResultLists[0];
+
+ this._log.debug(" creating contact for '" + name + "' (" + address + ")");
+
+ // try and find an existing address book contact.
+ let card = MailServices.ab.cardForEmailAddress(address);
+ // XXX when we have the address book GUID stuff, we need to use that to
+ // find existing contacts... (this will introduce a new query phase
+ // where we batch all the GUIDs for an async query)
+ // XXX when the address book supports multiple e-mail addresses, we
+ // should also just create identities for any that don't yet exist
+
+ // if there is no name, just use the e-mail (the ab indexer actually
+ // processes the card's displayName for synchronization, so we don't
+ // need to do that.)
+ if (!name) {
+ name = address;
+ }
+
+ let contact = GlodaDatastore.createContact(null, null, name, 0, 0);
+
+ // we must create the identity. use a blank description because there's
+ // nothing to differentiate it from other identities, as this contact
+ // only has one initially (us).
+ // XXX when we have multiple e-mails and there is a meaning associated
+ // with each e-mail, try and use that to populate the description.
+ // XXX we are creating the identity here before we insert the contact.
+ // conceptually it is good for us to be creating the identity before
+ // exposing it to the address-book indexer, but we could get our id's
+ // in a bad way from not deferring the identity insertion until after
+ // the contact insertion.
+ let identity = GlodaDatastore.createIdentity(
+ contact.id,
+ contact,
+ "email",
+ address,
+ /* description */ "",
+ /* relay? */ false
+ );
+ contact._identities = [identity];
+
+ // give the address book indexer a chance if we have a card.
+ // (it will fix-up the name based on the card as appropriate)
+ if (card) {
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(contact, { card }, true, true, aCallbackHandle)
+ );
+ } else {
+ // grokNounItem will issue the insert for us...
+ GlodaDatastore.insertContact(contact);
+ }
+
+ for (let iResList = 1; iResList < nameAndResultLists.length; iResList++) {
+ nameAndResultLists[iResList].push(identity);
+ }
+ }
+
+ yield aCallbackHandle.doneWithResult(resultLists);
+ },
+
+ /**
+ * Dictionary of the user's known identities; key is the identity id, value
+ * is the actual identity. This is populated by _initMyIdentities based on
+ * the accounts defined.
+ */
+ myIdentities: {},
+ /**
+ * The contact corresponding to the current user. We are assuming that only
+ * a single user/human being uses the current profile. This is known to be
+ * a flawed assumption, but is the best first approximation available.
+ * The contact is based on the default account's default identity. The user
+ * can change both, if desired, in Account Settings.
+ *
+ * @TODO attempt to deal with multiple people using the same profile
+ */
+ myContact: null,
+ /**
+ * Populate myIdentities with all of our identities. Currently we do this
+ * by assuming that there is one human/user per profile, and that all of the
+ * accounts defined in the profile belong to them. The single contact is
+ * stored on myContact.
+ *
+ * @TODO deal with account addition/modification/removal
+ * @TODO attempt to deal with multiple people using the same profile
+ */
+ _initMyIdentities() {
+ let myContact = null;
+ let myIdentities = {};
+ // Process each email at most once; stored here.
+ let myEmailAddresses = new Set();
+
+ let fullName, fallbackName;
+ let existingIdentities = [];
+ let identitiesToCreate = [];
+
+ let allIdentities = MailServices.accounts.allIdentities;
+ let defaultMsgIdentity = MailServices.accounts.defaultAccount
+ ? MailServices.accounts.defaultAccount.defaultIdentity
+ : null;
+ let defaultMsgIdentityKey = defaultMsgIdentity
+ ? defaultMsgIdentity.key
+ : null;
+ let defaultIdentity;
+
+ // Nothing to do if there are no accounts/identities.
+ if (allIdentities.length == 0) {
+ return;
+ }
+
+ for (let msgIdentity of allIdentities) {
+ let emailAddress = msgIdentity.email;
+ let replyTo = msgIdentity.replyTo;
+ let msgIdentityDescription = msgIdentity.fullName || msgIdentity.email;
+ let isDefaultMsgIdentity = msgIdentity.key == defaultMsgIdentityKey;
+
+ if (!fullName || isDefaultMsgIdentity) {
+ fullName = msgIdentity.fullName;
+ }
+ if (!fallbackName || isDefaultMsgIdentity) {
+ fallbackName = msgIdentity.email;
+ }
+
+ // Find the identities if they exist, flag to create them if they don't.
+ for (let address of [emailAddress, replyTo]) {
+ if (!address) {
+ continue;
+ }
+ let parsed = GlodaUtils.parseMailAddresses(address);
+ if (myEmailAddresses.has(parsed.addresses[0])) {
+ continue;
+ }
+ let identity = GlodaDatastore.getIdentity("email", parsed.addresses[0]);
+ if (identity) {
+ if (identity.description != msgIdentityDescription) {
+ // If the user changed the identity name, update the db.
+ identity._description = msgIdentityDescription;
+ GlodaDatastore.updateIdentity(identity);
+ }
+ existingIdentities.push(identity);
+ if (isDefaultMsgIdentity) {
+ defaultIdentity = identity;
+ }
+ } else {
+ identitiesToCreate.push([
+ parsed.addresses[0],
+ msgIdentityDescription,
+ ]);
+ }
+ myEmailAddresses.add(parsed.addresses[0]);
+ }
+ }
+ // We need to establish the identity.contact portions of the relationship.
+ for (let identity of existingIdentities) {
+ identity._contact = GlodaDatastore.getContactByID(identity.contactID);
+ if (defaultIdentity && defaultIdentity.id == identity.id) {
+ if (identity.contact.name != (fullName || fallbackName)) {
+ // If the user changed the default identity, update the db.
+ identity.contact.name = fullName || fallbackName;
+ GlodaDatastore.updateContact(identity.contact);
+ }
+ defaultIdentity._contact = identity.contact;
+ }
+ }
+
+ if (defaultIdentity) {
+ // The contact is based on the default account's default identity.
+ myContact = defaultIdentity.contact;
+ } else if (existingIdentities.length) {
+ // Just use the first guy's contact.
+ myContact = existingIdentities[0].contact;
+ } else {
+ // Create a new contact.
+ myContact = GlodaDatastore.createContact(
+ null,
+ null,
+ fullName || fallbackName,
+ 0,
+ 0
+ );
+ GlodaDatastore.insertContact(myContact);
+ }
+
+ for (let emailAndDescription of identitiesToCreate) {
+ // XXX This won't always be of type "email" as we add new account types.
+ let identity = GlodaDatastore.createIdentity(
+ myContact.id,
+ myContact,
+ "email",
+ emailAndDescription[0],
+ emailAndDescription[1],
+ false
+ );
+ existingIdentities.push(identity);
+ }
+
+ for (let identity of existingIdentities) {
+ myIdentities[identity.id] = identity;
+ }
+
+ this.myContact = myContact;
+ this.myIdentities = myIdentities;
+ myContact._identities = Object.keys(myIdentities).map(
+ id => myIdentities[id]
+ );
+
+ // We need contacts to make these objects reachable via the collection
+ // manager.
+ this._myContactCollection = this.explicitCollection(
+ GlodaConstants.NOUN_CONTACT,
+ [this.myContact]
+ );
+ this._myIdentitiesCollection = this.explicitCollection(
+ GlodaConstants.NOUN_IDENTITY,
+ this.myContact._identities
+ );
+ },
+
+ /** Next Noun ID to hand out, these don't need to be persisted (for now). */
+ _nextNounID: 1000,
+
+ /**
+ * Maps noun names to noun IDs.
+ */
+ _nounNameToNounID: {},
+ /**
+ * Maps noun IDs to noun definition dictionaries. (Noun definition
+ * dictionaries provided to us at the time a noun was defined, plus some
+ * additional stuff we put in there.)
+ */
+ _nounIDToDef: {},
+
+ _managedToJSON(aItem) {
+ return aItem.id;
+ },
+
+ /**
+ * Define a noun. Takes a dictionary with the following keys/values:
+ *
+ * @param aNounDef.name The name of the noun. This is not a display name
+ * (anything being displayed needs to be localized, after all), but simply
+ * the canonical name for debugging purposes and for people to pass to
+ * lookupNoun. The suggested convention is lower-case-dash-delimited,
+ * with names being singular (since it's a single noun we are referring
+ * to.)
+ * @param aNounDef.class The 'class' to which an instance of the noun will
+ * belong (aka will pass an instanceof test). You may also provide this
+ * as 'clazz' if the keyword makes your IDE angry.
+ * @param aNounDef.allowsArbitraryAttrs Is this a 'first class noun'/can it be
+ * a subject, AKA can this noun have attributes stored on it that relate
+ * it to other things? For example, a message is first-class; we store
+ * attributes of messages. A date is not first-class now, nor is it
+ * likely to be; we will not store attributes about a date, although dates
+ * will be the objects of other subjects. (For example: we might
+ * associate a date with a calendar event, but the date is an attribute of
+ * the calendar event and not vice versa.)
+ * @param aNounDef.usesParameter A boolean indicating whether this noun
+ * requires use of the 'parameter' BLOB storage field on the attribute
+ * bindings in the database to persist itself. Use of parameters should
+ * be limited to a reasonable number of values (16-32 is okay, more than
+ * that is pushing it and 256 should be considered an absolute upper
+ * bound) because of the database organization. When false, your
+ * toParamAndValue function is expected to return null for the parameter
+ * and likewise your fromParamAndValue should expect ignore and generally
+ * ignore the argument.
+ * @param aNounDef.toParamAndValue A function that takes an instantiated noun
+ * instance and returns a 2-element list of [parameter, value] where
+ * parameter may only be non-null if you passed a usesParameter of true.
+ * Parameter may be of any type (BLOB), and value must be numeric (pass
+ * 0 if you don't need the value).
+ *
+ * @param aNounDef.isPrimitive True when the noun instance is a raw numeric
+ * value/string/boolean. False when the instance is an object. When
+ * false, it is assumed the attribute that serves as a unique identifier
+ * for the value is "id" unless 'idAttr' is provided.
+ * @param [aNounDef.idAttr="id"] For non-primitive nouns, this is the
+ * attribute on the object that uniquely identifies it.
+ *
+ * @param aNounDef.schema Unsupported mechanism by which you can define a
+ * table that corresponds to this noun. The table will be created if it
+ * does not exist.
+ * - name The table name; don't conflict with other things!
+ * - columns A list of [column name, sqlite type] tuples. You should
+ * always include a definition like ["id", "INTEGER PRIMARY KEY"] for
+ * now (and it should be the first column name too.) If you care about
+ * how the attributes are poked into your object (for example, you want
+ * underscores used for some of them because the attributes should be
+ * immutable), then you can include a third string that is the name of
+ * the attribute to use.
+ * - indices A dictionary of lists of column names, where the key name
+ * becomes the index name. Ex: {foo: ["bar"]} results in an index on
+ * the column "bar" where the index is named "foo".
+ */
+ defineNoun(aNounDef, aNounID) {
+ this._log.info("Defining noun: " + aNounDef.name);
+ if (aNounID === undefined) {
+ aNounID = this._nextNounID++;
+ }
+ aNounDef.id = aNounID;
+
+ // Let people whose editors get angry about illegal attribute names use
+ // clazz instead of class.
+ if (aNounDef.clazz) {
+ aNounDef.class = aNounDef.clazz;
+ }
+
+ if (!("idAttr" in aNounDef)) {
+ aNounDef.idAttr = "id";
+ }
+ if (!("comparator" in aNounDef)) {
+ aNounDef.comparator = function () {
+ throw new Error(
+ "Noun type '" + aNounDef.name + "' lacks a real comparator."
+ );
+ };
+ }
+
+ // We allow nouns to have data tables associated with them where we do all
+ // the legwork. The schema attribute is the gateway to this magical world
+ // of functionality. Said door is officially unsupported.
+ if (aNounDef.schema) {
+ if (!aNounDef.tableName) {
+ if (aNounDef.schema.name) {
+ aNounDef.tableName = "ext_" + aNounDef.schema.name;
+ } else {
+ aNounDef.tableName = "ext_" + aNounDef.name;
+ }
+ }
+ // this creates the data table and binder and hooks everything up
+ GlodaDatastore.createNounTable(aNounDef);
+
+ if (!aNounDef.toParamAndValue) {
+ aNounDef.toParamAndValue = function (aThing) {
+ if (aThing instanceof aNounDef.class) {
+ return [null, aThing.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aThing];
+ };
+ }
+ }
+
+ // if it has a table, you can query on it. seems straight-forward.
+ if (aNounDef.tableName) {
+ [
+ aNounDef.queryClass,
+ aNounDef.nullQueryClass,
+ aNounDef.explicitQueryClass,
+ aNounDef.wildcardQueryClass,
+ ] = GlodaQueryClassFactory(aNounDef);
+ aNounDef._dbMeta = {};
+ aNounDef.class.prototype.NOUN_ID = aNounDef.id;
+ aNounDef.class.prototype.NOUN_DEF = aNounDef;
+ aNounDef.toJSON = this._managedToJSON;
+
+ aNounDef.specialLoadAttribs = [];
+
+ // - define the 'id' constrainer
+ let idConstrainer = function (...aArgs) {
+ let constraint = [GlodaConstants.kConstraintIdIn, null, ...aArgs];
+ this._constraints.push(constraint);
+ return this;
+ };
+ aNounDef.queryClass.prototype.id = idConstrainer;
+ }
+ if (aNounDef.cache) {
+ let cacheCost = aNounDef.cacheCost || 1024;
+ let cacheBudget = aNounDef.cacheBudget || 128 * 1024;
+ let cacheSize = Math.floor(cacheBudget / cacheCost);
+ if (cacheSize) {
+ GlodaCollectionManager.defineCache(aNounDef, cacheSize);
+ }
+ }
+ aNounDef.attribsByBoundName = {};
+ aNounDef.domExposeAttribsByBoundName = {};
+
+ aNounDef.objectNounOfAttributes = [];
+
+ this._nounNameToNounID[aNounDef.name] = aNounID;
+ this._nounIDToDef[aNounID] = aNounDef;
+ aNounDef.actions = [];
+
+ this._attrProviderOrderByNoun[aNounDef.id] = [];
+ this._attrOptimizerOrderByNoun[aNounDef.id] = [];
+ this._attrProvidersByNoun[aNounDef.id] = {};
+
+ return aNounDef;
+ },
+
+ /**
+ * Lookup a noun (ID) suitable for passing to defineAttribute's various
+ * noun arguments. Throws an exception if the noun with the given name
+ * cannot be found; the assumption is that you can't live without the noun.
+ */
+ lookupNoun(aNounName) {
+ if (aNounName in this._nounNameToNounID) {
+ return this._nounNameToNounID[aNounName];
+ }
+
+ throw Error(
+ "Unable to locate noun with name '" +
+ aNounName +
+ "', but I " +
+ "do know about: " +
+ Object.keys(this._nounNameToNounID).join(", ")
+ );
+ },
+
+ /**
+ * Lookup a noun def given a name.
+ */
+ lookupNounDef(aNounName) {
+ return this._nounIDToDef[this.lookupNoun(aNounName)];
+ },
+
+ /**
+ * Define an action on a noun. During the prototype stage, this was conceived
+ * of as a way to expose all the constraints possible given a noun. For
+ * example, if you have an identity or a contact, you could use this to
+ * see all the messages sent from/to a given contact. It was likewise
+ * thought potentially usable for future expansion. For example, you could
+ * also decide to send an e-mail to a contact when you have the contact
+ * instance available.
+ * Outside of the 'expmess' checkbox-happy prototype, this functionality is
+ * not used. As such, this functionality should be considered in flux and
+ * subject to changes. Also, very open to specific suggestsions motivated
+ * by use cases.
+ * One conceptual issue raised by this mechanism is the interaction of actions
+ * with facts like "this message is read". We currently implement the 'fact'
+ * by defining an attribute with a 'boolean' noun type. To deal with this,
+ * in various places we pass-in the attribute as well as the noun value.
+ * Since the relationships for booleans and integers in these cases is
+ * standard and well-defined, this works out pretty well, but suggests we
+ * need to think things through.
+ *
+ * @param aNounID The ID of the noun you want to define an action on.
+ * @param aActionMeta The dictionary describing the noun. The dictionary
+ * should have the following fields:
+ * - actionType: a string indicating the type of action. Currently, only
+ * "filter" is a legal value.
+ * - actionTarget: the noun ID of the noun type on which this action is
+ * applicable. For example,
+ *
+ * The following should be present for actionType=="filter";
+ * - shortName: The name that should be used to display this constraint. For
+ * example, a checkbox-heavy UI might display a checkbox for each constraint
+ * using shortName as the label.
+ * - makeConstraint: A function that takes the attribute that is the source
+ * of the noun and the noun instance as arguments, and returns APV-style
+ * constraints. Since the APV-style query mechanism is now deprecated,
+ * this signature is deprecated. Probably the way to update this would be
+ * to pass in the query instance that constraints should be contributed to.
+ */
+ defineNounAction(aNounID, aActionMeta) {
+ let nounDef = this._nounIDToDef[aNounID];
+ nounDef.actions.push(aActionMeta);
+ },
+
+ /**
+ * Retrieve all of the actions (as defined using defineNounAction) for the
+ * given noun type (via noun ID) with the given action type (ex: filter).
+ */
+ getNounActions(aNounID, aActionType) {
+ let nounDef = this._nounIDToDef[aNounID];
+ if (!nounDef) {
+ return [];
+ }
+ return nounDef.actions.filter(
+ action => !aActionType || action.actionType == aActionType
+ );
+ },
+
+ /** Attribute providers in the sequence to process them. */
+ _attrProviderOrderByNoun: {},
+ /** Attribute providers that provide optimizers, in the sequence to proc. */
+ _attrOptimizerOrderByNoun: {},
+ /** Maps attribute providers to the list of attributes they provide */
+ _attrProviders: {},
+ /**
+ * Maps nouns to their attribute providers to a list of the attributes they
+ * provide for the noun.
+ */
+ _attrProvidersByNoun: {},
+
+ /**
+ * Define the core nouns (that are not defined elsewhere) and a few noun
+ * actions. Core nouns could be defined in other files, assuming dependency
+ * issues are resolved via the Everybody.jsm mechanism or something else.
+ * Right now, noun_tag defines the tag noun. If we broke more of these out,
+ * we would probably want to move the 'class' code from GlodaDataModel.jsm, the
+ * SQL table def and helper code from GlodaDatastore.jsm (and this code) to their
+ * own noun_*.js files. There are some trade-offs to be made, and I think
+ * we can deal with those once we start to integrate lightning/calendar and
+ * our noun space gets large and more heterogeneous.
+ */
+ _initAttributes() {
+ this.defineNoun(
+ {
+ name: "bool",
+ clazz: Boolean,
+ allowsArbitraryAttrs: false,
+ isPrimitive: true,
+ // favor true before false
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return b - a;
+ },
+ toParamAndValue(aBool) {
+ return [null, aBool ? 1 : 0];
+ },
+ },
+ GlodaConstants.NOUN_BOOLEAN
+ );
+ this.defineNoun(
+ {
+ name: "number",
+ clazz: Number,
+ allowsArbitraryAttrs: false,
+ continuous: true,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a - b;
+ },
+ toParamAndValue(aNum) {
+ return [null, aNum];
+ },
+ },
+ GlodaConstants.NOUN_NUMBER
+ );
+ this.defineNoun(
+ {
+ name: "string",
+ clazz: String,
+ allowsArbitraryAttrs: false,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.localeCompare(b);
+ },
+ toParamAndValue(aString) {
+ return [null, aString];
+ },
+ },
+ GlodaConstants.NOUN_STRING
+ );
+ this.defineNoun(
+ {
+ name: "date",
+ clazz: Date,
+ allowsArbitraryAttrs: false,
+ continuous: true,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a - b;
+ },
+ toParamAndValue(aDate) {
+ return [null, aDate.valueOf() * 1000];
+ },
+ },
+ GlodaConstants.NOUN_DATE
+ );
+ this.defineNoun(
+ {
+ name: "fulltext",
+ clazz: String,
+ allowsArbitraryAttrs: false,
+ continuous: false,
+ isPrimitive: true,
+ comparator(a, b) {
+ throw new Error("Fulltext nouns are not comparable!");
+ },
+ // as noted on NOUN_FULLTEXT, we just pass the string around. it never
+ // hits the database, so it's okay.
+ toParamAndValue(aString) {
+ return [null, aString];
+ },
+ },
+ GlodaConstants.NOUN_FULLTEXT
+ );
+
+ this.defineNoun(
+ {
+ name: "folder",
+ clazz: GlodaFolder,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ queryHelpers: {
+ /**
+ * Query for accounts based on the account associated with folders. We
+ * walk all of the folders associated with an account and put them in
+ * the list of folders that match if gloda would index them. This is
+ * unsuitable for producing a persistable constraint since it does not
+ * adapt for added/deleted folders. However, it is sufficient for
+ * faceting. Also, we don't persist constraints yet.
+ *
+ * @TODO The long-term solution is to move towards using arithmetic
+ * encoding on folder-id's like we use for MIME types and friends.
+ */
+ Account(aAttrDef, aArguments) {
+ let folderValues = [];
+ let seenRootFolders = {};
+ for (let iArg = 0; iArg < aArguments.length; iArg++) {
+ let givenFolder = aArguments[iArg];
+ let givenMsgFolder = givenFolder.getXPCOMFolder(
+ givenFolder.kActivityFolderOnlyNoData
+ );
+ let rootFolder = givenMsgFolder.rootFolder;
+
+ // skip processing this folder if we have already processed its
+ // root folder.
+ if (rootFolder.URI in seenRootFolders) {
+ continue;
+ }
+ seenRootFolders[rootFolder.URI] = true;
+
+ for (let folder of rootFolder.descendants) {
+ let folderFlags = folder.flags;
+
+ // Ignore virtual folders, non-mail folders.
+ // XXX this is derived from GlodaIndexer's shouldIndexFolder.
+ // This should probably just use centralized code or the like.
+ if (
+ !(folderFlags & Ci.nsMsgFolderFlags.Mail) ||
+ folderFlags & Ci.nsMsgFolderFlags.Virtual
+ ) {
+ continue;
+ }
+ // we only index local or IMAP folders
+ if (
+ !(folder instanceof Ci.nsIMsgLocalMailFolder) &&
+ !(folder instanceof Ci.nsIMsgImapMailFolder)
+ ) {
+ continue;
+ }
+
+ let glodaFolder = Gloda.getFolderForFolder(folder);
+ folderValues.push(glodaFolder);
+ }
+ }
+ return this._inConstraintHelper(aAttrDef, folderValues);
+ },
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ toParamAndValue(aFolderOrGlodaFolder) {
+ if (aFolderOrGlodaFolder instanceof GlodaFolder) {
+ return [null, aFolderOrGlodaFolder.id];
+ }
+ return [null, GlodaDatastore._mapFolder(aFolderOrGlodaFolder).id];
+ },
+ },
+ GlodaConstants.NOUN_FOLDER
+ );
+ this.defineNoun(
+ {
+ name: "account",
+ clazz: GlodaAccount,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ equals(a, b) {
+ if ((a && !b) || (!a && b)) {
+ return false;
+ }
+ if (!a && !b) {
+ return true;
+ }
+ return a.id == b.id;
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ },
+ GlodaConstants.NOUN_ACCOUNT
+ );
+ this.defineNoun(
+ {
+ name: "conversation",
+ clazz: GlodaConversation,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 512,
+ tableName: "conversations",
+ attrTableName: "messageAttributes",
+ attrIDColumnName: "conversationID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._conversationFromRow,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.subject.localeCompare(b.subject);
+ },
+ toParamAndValue(aConversation) {
+ if (aConversation instanceof GlodaConversation) {
+ return [null, aConversation.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aConversation];
+ },
+ },
+ GlodaConstants.NOUN_CONVERSATION
+ );
+ this.defineNoun(
+ {
+ name: "message",
+ clazz: GlodaMessage,
+ allowsArbitraryAttrs: true,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 2048,
+ tableName: "messages",
+ // we will always have a fulltext row, even for messages where we don't
+ // have the body available. this is because we want the subject indexed.
+ dbQueryJoinMagic:
+ " INNER JOIN messagesText ON messages.id = messagesText.rowid",
+ attrTableName: "messageAttributes",
+ attrIDColumnName: "messageID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._messageFromRow,
+ dbAttribAdjuster: GlodaDatastore.adjustMessageAttributes,
+ dbQueryValidityConstraintSuffix:
+ " AND +deleted = 0 AND +folderID IS NOT NULL AND +messageKey IS NOT NULL",
+ // This is what's used when we have no validity constraints, i.e. we allow
+ // for ghost messages, which do not have a row in the messagesText table.
+ dbQueryJoinMagicWithNoValidityConstraints:
+ " LEFT JOIN messagesText ON messages.id = messagesText.rowid",
+ objInsert: GlodaDatastore.insertMessage,
+ objUpdate: GlodaDatastore.updateMessage,
+ toParamAndValue(aMessage) {
+ if (aMessage instanceof GlodaMessage) {
+ return [null, aMessage.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aMessage];
+ },
+ },
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.defineNoun(
+ {
+ name: "contact",
+ clazz: GlodaContact,
+ allowsArbitraryAttrs: true,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 128,
+ tableName: "contacts",
+ attrTableName: "contactAttributes",
+ attrIDColumnName: "contactID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._contactFromRow,
+ dbAttribAdjuster: GlodaDatastore.adjustAttributes,
+ objInsert: GlodaDatastore.insertContact,
+ objUpdate: GlodaDatastore.updateContact,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ toParamAndValue(aContact) {
+ if (aContact instanceof GlodaContact) {
+ return [null, aContact.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aContact];
+ },
+ },
+ GlodaConstants.NOUN_CONTACT
+ );
+ this.defineNoun(
+ {
+ name: "identity",
+ clazz: GlodaIdentity,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 128,
+ usesUniqueValue: true,
+ tableName: "identities",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._identityFromRow,
+ /**
+ * Short string is the contact name, long string includes the identity
+ * value too, delimited by a colon. Not tremendously localizable.
+ */
+ userVisibleString(aIdentity, aLong) {
+ if (!aLong) {
+ return aIdentity.contact.name;
+ }
+ if (aIdentity.contact.name == aIdentity.value) {
+ return aIdentity.value;
+ }
+ return aIdentity.contact.name + " (" + aIdentity.value + ")";
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.contact.name.localeCompare(b.contact.name);
+ },
+ toParamAndValue(aIdentity) {
+ if (aIdentity instanceof GlodaIdentity) {
+ return [null, aIdentity.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aIdentity];
+ },
+ },
+ GlodaConstants.NOUN_IDENTITY
+ );
+ this.defineNoun(
+ {
+ name: "attachment-infos",
+ clazz: GlodaAttachment,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ toJSON(x) {
+ return [
+ x._name,
+ x._contentType,
+ x._size,
+ x._part,
+ x._externalUrl,
+ x._isExternal,
+ ];
+ },
+ fromJSON(x, aGlodaMessage) {
+ let [name, contentType, size, _part, _externalUrl, isExternal] = x;
+ return new GlodaAttachment(
+ aGlodaMessage,
+ name,
+ contentType,
+ size,
+ _part,
+ _externalUrl,
+ isExternal
+ );
+ },
+ },
+ GlodaConstants.NOUN_ATTACHMENT
+ );
+
+ // parameterized identity is just two identities; we store the first one
+ // (whose value set must be very constrainted, like the 'me' identities)
+ // as the parameter, the second (which does not need to be constrained)
+ // as the value.
+ this.defineNoun(
+ {
+ name: "parameterized-identity",
+ clazz: null,
+ allowsArbitraryAttrs: false,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ // First sort by the first identity in the tuple
+ // Since our general use-case is for the first guy to be "me", we only
+ // compare the identity value, not the name.
+ let fic = a[0].value.localeCompare(b[0].value);
+ if (fic) {
+ return fic;
+ }
+ // Next compare the second identity in the tuple, but use the contact
+ // this time to be consistent with our identity comparator.
+ return a[1].contact.name.localeCompare(b[1].contact.name);
+ },
+ computeDelta(aCurValues, aOldValues) {
+ let oldMap = {};
+ for (let tupe of aOldValues) {
+ let [originIdentity, targetIdentity] = tupe;
+ let targets = oldMap[originIdentity];
+ if (targets === undefined) {
+ targets = oldMap[originIdentity] = {};
+ }
+ targets[targetIdentity] = true;
+ }
+
+ let added = [],
+ removed = [];
+ for (let tupe of aCurValues) {
+ let [originIdentity, targetIdentity] = tupe;
+ let targets = oldMap[originIdentity];
+ if (targets === undefined || !(targetIdentity in targets)) {
+ added.push(tupe);
+ } else {
+ delete targets[targetIdentity];
+ }
+ }
+
+ for (let originIdentity in oldMap) {
+ let targets = oldMap[originIdentity];
+ for (let targetIdentity in targets) {
+ removed.push([originIdentity, targetIdentity]);
+ }
+ }
+
+ return [added, removed];
+ },
+ contributeObjDependencies(
+ aJsonValues,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ ) {
+ // nothing to do with a zero-length list
+ if (aJsonValues.length == 0) {
+ return false;
+ }
+
+ let nounIdentityDef =
+ Gloda._nounIDToDef[GlodaConstants.NOUN_IDENTITY];
+ let references = aReferencesByNounID[nounIdentityDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[nounIdentityDef.id] = {};
+ }
+
+ for (let tupe of aJsonValues) {
+ let [originIdentityID, targetIdentityID] = tupe;
+ if (!(originIdentityID in references)) {
+ references[originIdentityID] = null;
+ }
+ if (!(targetIdentityID in references)) {
+ references[targetIdentityID] = null;
+ }
+ }
+
+ return true;
+ },
+ resolveObjDependencies(
+ aJsonValues,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ ) {
+ let references = aReferencesByNounID[GlodaConstants.NOUN_IDENTITY];
+
+ let results = [];
+ for (let tupe of aJsonValues) {
+ let [originIdentityID, targetIdentityID] = tupe;
+ results.push([
+ references[originIdentityID],
+ references[targetIdentityID],
+ ]);
+ }
+
+ return results;
+ },
+ toJSON(aIdentityTuple) {
+ return [aIdentityTuple[0].id, aIdentityTuple[1].id];
+ },
+ toParamAndValue(aIdentityTuple) {
+ return [aIdentityTuple[0].id, aIdentityTuple[1].id];
+ },
+ },
+ GlodaConstants.NOUN_PARAM_IDENTITY
+ );
+
+ GlodaDatastore.getAllAttributes();
+ },
+
+ /**
+ * Create accessor functions to 'bind' an attribute to underlying normalized
+ * attribute storage, as well as creating the appropriate query object
+ * constraint helper functions. This name is somewhat of a misnomer because
+ * special attributes are not 'bound' (because specific/non-generic per-class
+ * code provides the properties) but still depend on this method to
+ * establish their constraint helper methods.
+ *
+ * @XXX potentially rename to not suggest binding is required.
+ */
+ _bindAttribute(aAttrDef, aSubjectNounDef) {
+ let objectNounDef = aAttrDef.objectNounDef;
+
+ // -- the query constraint helpers
+ if (aSubjectNounDef.queryClass !== undefined) {
+ let constrainer;
+ let canQuery = true;
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special == GlodaConstants.kSpecialFulltext
+ ) {
+ constrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintFulltext,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+ } else if (aAttrDef.canQuery || aAttrDef.attributeName.startsWith("_")) {
+ constrainer = function (...aArgs) {
+ let constraint = [GlodaConstants.kConstraintIn, aAttrDef, ...aArgs];
+ this._constraints.push(constraint);
+ return this;
+ };
+ } else {
+ constrainer = function () {
+ throw new Error(
+ "Cannot query on attribute " +
+ aAttrDef.attributeName +
+ " because its canQuery parameter hasn't been set to true." +
+ " Reading the comments about Gloda.defineAttribute may be a" +
+ " sensible thing to do now."
+ );
+ };
+ canQuery = false;
+ }
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName] = constrainer;
+
+ // Don't bind extra query-able attributes if we're unable to perform a
+ // search on the attribute.
+ if (!canQuery) {
+ return;
+ }
+
+ // - ranged value helper: fooRange
+ if (objectNounDef.continuous) {
+ // takes one or more tuples of [lower bound, upper bound]
+ let rangedConstrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintRanges,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + "Range"] =
+ rangedConstrainer;
+ }
+
+ // - string LIKE helper for special on-row attributes: fooLike
+ // (it is impossible to store a string as an indexed attribute, which is
+ // why we do this for on-row only.)
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special == GlodaConstants.kSpecialString
+ ) {
+ let likeConstrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintStringLike,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + "Like"] =
+ likeConstrainer;
+ }
+
+ // - Custom helpers provided by the noun type...
+ if ("queryHelpers" in objectNounDef) {
+ for (let name in objectNounDef.queryHelpers) {
+ let helper = objectNounDef.queryHelpers[name];
+ // we need a new closure...
+ let helperFunc = helper;
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + name] =
+ function (...aArgs) {
+ return helperFunc.call(this, aAttrDef, ...aArgs);
+ };
+ }
+ }
+ }
+ },
+
+ /**
+ * Names of attribute-specific localized strings and the JS attribute they are
+ * exposed as in the attribute's "strings" attribute (if the provider has a
+ * string bundle exposed on its "strings" attribute). They are rooted at
+ * "gloda.SUBJECT-NOUN-NAME.attr.ATTR-NAME.*".
+ *
+ * Please consult the localization notes in gloda.properties to understand
+ * what these are used for.
+ */
+ _ATTR_LOCALIZED_STRINGS: {
+ /* - Faceting */
+ facetNameLabel: "facetNameLabel",
+ noneLabel: "noneLabel",
+ includeLabel: "includeLabel",
+ excludeLabel: "excludeLabel",
+ remainderLabel: "remainderLabel",
+ mustMatchLabel: "mustMatchLabel",
+ cantMatchLabel: "cantMatchLabel",
+ mayMatchLabel: "mayMatchLabel",
+ mustMatchNoneLabel: "mustMatchNoneLabel",
+ mustMatchSomeLabel: "mustMatchSomeLabel",
+ mayMatchAnyLabel: "mayMatchAnyLabel",
+ },
+ /**
+ * Define an attribute and all its meta-data. Takes a single dictionary as
+ * its argument, with the following required properties:
+ *
+ * @param aAttrDef.provider The object instance providing a 'process' method.
+ * @param aAttrDef.extensionName The name of the extension providing these
+ * attributes.
+ * @param aAttrDef.attributeType The type of attribute, one of the values from
+ * the kAttr* enumeration.
+ * @param aAttrDef.attributeName The name of the attribute, which also doubles
+ * as the bound property name if you pass 'bind' a value of true. You are
+ * responsible for avoiding collisions, which presumably will mean
+ * checking/updating a wiki page in the future, or just prefixing your
+ * attribute name with your extension name or something like that.
+ * @param aAttrDef.bind Should this attribute be 'bound' as a convenience
+ * attribute on the subject's object (true/false)? For example, with an
+ * attributeName of "foo" and passing true for 'bind' with a subject noun
+ * of NOUN_MESSAGE, GlodaMessage instances will expose a "foo" getter that
+ * returns the value of the attribute. If 'singular' is true, this means
+ * an instance of the object class corresponding to the noun type or null
+ * if the attribute does not exist. If 'singular' is false, this means a
+ * list of instances of the object class corresponding to the noun type,
+ * where the list may be empty if no instances of the attribute are
+ * present.
+ * @param aAttrDef.bindName Optional override of attributeName for purposes of
+ * the binding property's name.
+ * @param aAttrDef.singular Is the attribute going to happen at most once
+ * (true), or potentially multiple times (false). This affects whether
+ * the binding returns a list or just a single item (which is null when
+ * the attribute is not present).
+ * @param [aAttrDef.emptySetIsSignificant=false] Should we
+ * @param aAttrDef.subjectNouns A list of object types (NOUNs) that this
+ * attribute can be set on. Each element in the list should be one of the
+ * NOUN_* constants or a dynamically registered noun type.
+ * @param aAttrDef.objectNoun The object type (one of the NOUN_* constants or
+ * a dynamically registered noun types) that is the 'object' in the
+ * traditional RDF triple. More pragmatically, in the database row used
+ * to represent an attribute, we store the subject (ex: message ID),
+ * attribute ID, and an integer which is the integer representation of the
+ * 'object' whose type you are defining right here.
+ */
+ defineAttribute(aAttrDef) {
+ // ensure required properties exist on aAttrDef
+ if (
+ !("provider" in aAttrDef) ||
+ !("extensionName" in aAttrDef) ||
+ !("attributeType" in aAttrDef) ||
+ !("attributeName" in aAttrDef) ||
+ !("singular" in aAttrDef) ||
+ !("subjectNouns" in aAttrDef) ||
+ !("objectNoun" in aAttrDef)
+ ) {
+ // perhaps we should have a list of required attributes, perchance with
+ // and explanation of what it holds, and use that to be friendlier?
+ throw Error(
+ "You omitted a required attribute defining property, please" +
+ " consult the documentation as penance."
+ );
+ }
+
+ // -- Fill in defaults
+ if (!("emptySetIsSignificant" in aAttrDef)) {
+ aAttrDef.emptySetIsSignificant = false;
+ }
+
+ if (!("canQuery" in aAttrDef)) {
+ aAttrDef.canQuery = !!aAttrDef.facet;
+ }
+
+ // return if the attribute has already been defined
+ if (aAttrDef.dbDef) {
+ return aAttrDef;
+ }
+
+ // - first time we've seen a provider init logic
+ if (!(aAttrDef.provider.providerName in this._attrProviders)) {
+ this._attrProviders[aAttrDef.provider.providerName] = [];
+ if (aAttrDef.provider.contentWhittle) {
+ whittlerRegistry.registerWhittler(aAttrDef.provider);
+ }
+ }
+
+ let compoundName = aAttrDef.extensionName + ":" + aAttrDef.attributeName;
+ // -- Database Definition
+ let attrDBDef;
+ if (compoundName in GlodaDatastore._attributeDBDefs) {
+ // the existence of the GlodaAttributeDBDef means that either it has
+ // already been fully defined, or has been loaded from the database but
+ // not yet 'bound' to a provider (and had important meta-info that
+ // doesn't go in the db copied over)
+ attrDBDef = GlodaDatastore._attributeDBDefs[compoundName];
+ } else {
+ // we need to create the attribute definition in the database
+ let attrID = null;
+ attrID = GlodaDatastore._createAttributeDef(
+ aAttrDef.attributeType,
+ aAttrDef.extensionName,
+ aAttrDef.attributeName,
+ null
+ );
+
+ attrDBDef = new GlodaAttributeDBDef(
+ GlodaDatastore,
+ attrID,
+ compoundName,
+ aAttrDef.attributeType,
+ aAttrDef.extensionName,
+ aAttrDef.attributeName
+ );
+ GlodaDatastore._attributeDBDefs[compoundName] = attrDBDef;
+ GlodaDatastore._attributeIDToDBDefAndParam[attrID] = [attrDBDef, null];
+ }
+
+ aAttrDef.dbDef = attrDBDef;
+ attrDBDef.attrDef = aAttrDef;
+
+ aAttrDef.id = aAttrDef.dbDef.id;
+
+ if ("bindName" in aAttrDef) {
+ aAttrDef.boundName = aAttrDef.bindName;
+ } else {
+ aAttrDef.boundName = aAttrDef.attributeName;
+ }
+
+ aAttrDef.objectNounDef = this._nounIDToDef[aAttrDef.objectNoun];
+ aAttrDef.objectNounDef.objectNounOfAttributes.push(aAttrDef);
+
+ // -- Facets
+ function normalizeFacetDef(aFacetDef) {
+ if (!("groupIdAttr" in aFacetDef)) {
+ aFacetDef.groupIdAttr = aAttrDef.objectNounDef.idAttr;
+ }
+ if (!("groupComparator" in aFacetDef)) {
+ aFacetDef.groupComparator = aAttrDef.objectNounDef.comparator;
+ }
+ if (!("filter" in aFacetDef)) {
+ aFacetDef.filter = null;
+ }
+ }
+ // No facet attribute means no facet desired; set an explicit null so that
+ // code can check without doing an "in" check.
+ if (!("facet" in aAttrDef)) {
+ aAttrDef.facet = null;
+ } else if (aAttrDef.facet === true) {
+ // Promote "true" facet values to the defaults. Where attributes have
+ // specified values, make sure we fill in any missing defaults.
+ aAttrDef.facet = {
+ type: "default",
+ groupIdAttr: aAttrDef.objectNounDef.idAttr,
+ groupComparator: aAttrDef.objectNounDef.comparator,
+ filter: null,
+ };
+ } else {
+ normalizeFacetDef(aAttrDef.facet);
+ }
+ if ("extraFacets" in aAttrDef) {
+ for (let facetDef of aAttrDef.extraFacets) {
+ normalizeFacetDef(facetDef);
+ }
+ }
+
+ function gatherLocalizedStrings(aBundle, aPropRoot, aStickIn) {
+ for (let propName in Gloda._ATTR_LOCALIZED_STRINGS) {
+ let attrName = Gloda._ATTR_LOCALIZED_STRINGS[propName];
+ try {
+ aStickIn[attrName] = aBundle.GetStringFromName(aPropRoot + propName);
+ } catch (ex) {
+ // do nothing. nsIStringBundle throws exceptions when not found
+ }
+ }
+ }
+
+ // -- L10n.
+ // If the provider has a string bundle, populate a "strings" attribute with
+ // our standard attribute strings that can be UI exposed.
+ if ("strings" in aAttrDef.provider && aAttrDef.facet) {
+ let bundle = aAttrDef.provider.strings;
+
+ // -- attribute strings
+ let attrStrings = (aAttrDef.facet.strings = {});
+ // we use the first subject the attribute applies to as the basis of
+ // where to get the string from. Mainly because we currently don't have
+ // any attributes with multiple subjects nor a use-case where we expose
+ // multiple noun types via the UI. (Just messages right now.)
+ let canonicalSubject = this._nounIDToDef[aAttrDef.subjectNouns[0]];
+ let propRoot =
+ "gloda." +
+ canonicalSubject.name +
+ ".attr." +
+ aAttrDef.attributeName +
+ ".";
+ gatherLocalizedStrings(bundle, propRoot, attrStrings);
+
+ // -- alias strings for synthetic facets
+ if ("extraFacets" in aAttrDef) {
+ for (let facetDef of aAttrDef.extraFacets) {
+ facetDef.strings = {};
+ let aliasPropRoot =
+ "gloda." + canonicalSubject.name + ".attr." + facetDef.alias + ".";
+ gatherLocalizedStrings(bundle, aliasPropRoot, facetDef.strings);
+ }
+ }
+ }
+
+ // -- Subject Noun Binding
+ for (
+ let iSubject = 0;
+ iSubject < aAttrDef.subjectNouns.length;
+ iSubject++
+ ) {
+ let subjectType = aAttrDef.subjectNouns[iSubject];
+ let subjectNounDef = this._nounIDToDef[subjectType];
+ this._bindAttribute(aAttrDef, subjectNounDef);
+
+ // update the provider maps...
+ if (
+ !this._attrProviderOrderByNoun[subjectType].includes(aAttrDef.provider)
+ ) {
+ this._attrProviderOrderByNoun[subjectType].push(aAttrDef.provider);
+ if (aAttrDef.provider.optimize) {
+ this._attrOptimizerOrderByNoun[subjectType].push(aAttrDef.provider);
+ }
+ this._attrProvidersByNoun[subjectType][aAttrDef.provider.providerName] =
+ [];
+ }
+ this._attrProvidersByNoun[subjectType][
+ aAttrDef.provider.providerName
+ ].push(aAttrDef);
+
+ subjectNounDef.attribsByBoundName[aAttrDef.boundName] = aAttrDef;
+ if (aAttrDef.domExpose) {
+ subjectNounDef.domExposeAttribsByBoundName[aAttrDef.boundName] =
+ aAttrDef;
+ }
+
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special & GlodaConstants.kSpecialColumn
+ ) {
+ subjectNounDef.specialLoadAttribs.push(aAttrDef);
+ }
+
+ // if this is a parent column attribute, make note of it so that if we
+ // need to do an inverse references lookup, we know what column we are
+ // issuing against.
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special === GlodaConstants.kSpecialColumnParent
+ ) {
+ subjectNounDef.parentColumnAttr = aAttrDef;
+ }
+
+ if (
+ aAttrDef.objectNounDef.tableName ||
+ aAttrDef.objectNounDef.contributeObjDependencies
+ ) {
+ subjectNounDef.hasObjDependencies = true;
+ }
+ }
+
+ this._attrProviders[aAttrDef.provider.providerName].push(aAttrDef);
+ return aAttrDef;
+ },
+
+ /**
+ * Retrieve the attribute provided by the given extension with the given
+ * attribute name. The original idea was that plugins would effectively
+ * name-space attributes, helping avoid collisions. Since we are leaning
+ * towards using binding heavily, this doesn't really help, as the collisions
+ * will just occur on the attribute name instead. Also, this can turn
+ * extensions into liars as name changes/moves to core/etc. happen.
+ *
+ * @TODO consider removing the extension name argument parameter requirement
+ */
+ getAttrDef(aPluginName, aAttrName) {
+ let compoundName = aPluginName + ":" + aAttrName;
+ return GlodaDatastore._attributeDBDefs[compoundName];
+ },
+
+ /**
+ * Create a new query instance for the given noun-type. This provides
+ * a generic way to provide constraint-based queries of any first-class
+ * nouns supported by the system.
+ *
+ * The idea is that every attribute on an object can be used to express
+ * a constraint on the query object. Constraints implicitly 'AND' together,
+ * but providing multiple arguments to a constraint function results in an
+ * 'OR'ing of those values. Additionally, you can call or() on the returned
+ * query to create an alternate query that is effectively a giant OR against
+ * all the constraints you create on the main query object (or any other
+ * alternate queries returned by or()). (Note: there is no nesting of these
+ * alternate queries. query.or().or() is equivalent to query.or())
+ * For each attribute, there is a constraint with the same name that takes
+ * one or more arguments. The arguments represent a set of OR values that
+ * objects matching the query can have. (If you want the constraint
+ * effectively ANDed together, just invoke the constraint function
+ * multiple times.) For example, newQuery(NOUN_PERSON).age(25) would
+ * constraint to all the people aged 25, while age(25, 26) would constrain
+ * to all the people age 25 or 26.
+ * For each attribute with a 'continuous' noun, there is a constraint with the
+ * attribute name with "Range" appended. It takes two arguments which are an
+ * inclusive lower bound and an inclusive lower bound for values in the
+ * range. If you would like an open-ended range on either side, pass null
+ * for that argument. If you would like to specify multiple ranges that
+ * should be ORed together, simply pass additional (pairs of) arguments.
+ * For example, newQuery(NOUN_PERSON).age(25,100) would constraint to all
+ * the people who are >= 25 and <= 100. Likewise age(25, null) would just
+ * return all the people who are 25 or older. And age(25,30,35,40) would
+ * return people who are either 25-30 or 35-30.
+ * There are also full-text constraint columns. In a nutshell, their
+ * arguments are the strings that should be passed to the SQLite FTS3
+ * MATCH clause.
+ *
+ * @param aNounID The (integer) noun-id of the noun you want to query on.
+ * @param aOptions an optional dictionary of query options, see the GlodaQuery
+ * class documentation.
+ */
+ newQuery(aNounID, aOptions) {
+ let nounDef = this._nounIDToDef[aNounID];
+ return new nounDef.queryClass(aOptions);
+ },
+
+ /**
+ * Create a collection/query for the given noun-type that only matches the
+ * provided items. This is to be used when you have an explicit set of items
+ * that you would still like to receive updates for.
+ */
+ explicitCollection(aNounID, aItems) {
+ let nounDef = this._nounIDToDef[aNounID];
+ let collection = new GlodaCollection(nounDef, aItems, null, null);
+ let query = new nounDef.explicitQueryClass(collection);
+ collection.query = query;
+ GlodaCollectionManager.registerCollection(collection);
+ return collection;
+ },
+
+ /**
+ * Debugging 'wildcard' collection creation support. A wildcard collection
+ * will 'accept' any new item instances presented to the collection manager
+ * as new. The result is that it allows you to be notified as new items
+ * as they are indexed, existing items as they are loaded from the database,
+ * etc.
+ * Because the items are added to the collection without limit, this will
+ * result in a leak if you don't do something to clean up after the
+ * collection. (Forgetting about the collection will suffice, as it is still
+ * weakly held.)
+ */
+ _wildcardCollection(aNounID, aItems) {
+ let nounDef = this._nounIDToDef[aNounID];
+ let collection = new GlodaCollection(nounDef, aItems, null, null);
+ let query = new nounDef.wildcardQueryClass(collection);
+ collection.query = query;
+ GlodaCollectionManager.registerCollection(collection);
+ return collection;
+ },
+
+ /**
+ * Attribute providers attempting to index something that experience a fatal
+ * problem should throw one of these. For example:
+ * "throw new Gloda.BadItemContentsError('Message lacks an author.');".
+ *
+ * We're not really taking advantage of this yet, but it's a good idea.
+ */
+ BadItemContentsError,
+
+ /* eslint-disable complexity */
+ /**
+ * Populate a gloda representation of an item given the thus-far built
+ * representation, the previous representation, and one or more raw
+ * representations. The attribute providers/optimizers for the given noun
+ * type are invoked, allowing them to contribute/alter things. Following
+ * that, we build and persist our attribute representations.
+ *
+ * The result of the processing ends up with attributes in 3 different forms:
+ * - Database attribute rows (to be added and removed).
+ * - In-memory representation.
+ * - JSON-able representation.
+ *
+ * @param aItem The noun instance you want processed.
+ * @param aRawReps A dictionary that we pass to the attribute providers.
+ * There is a(n implied) contract between the caller of grokNounItem for a
+ * given noun type and the attribute providers for that noun type, and we
+ * have nothing to do with it OTHER THAN inserting a 'trueGlodaRep'
+ * value into it. In the event of reindexing an existing object, the
+ * gloda representation we pass to the indexers is actually a clone that
+ * allows the asynchronous indexers to mutate the object without
+ * causing visible changes in the existing representation of the gloda
+ * object. We patch the changes back onto the original item atomically
+ * once indexing completes. The 'trueGlodaRep' is then useful for
+ * objects that hang off of the gloda instance that need a reference
+ * back to their containing object for API convenience purposes.
+ * @param aIsConceptuallyNew Is the item "new" in the sense that it would
+ * never have been visible from within user code? This translates into
+ * whether this should trigger an itemAdded notification or an
+ * itemModified notification.
+ * @param aIsRecordNew Is the item "new" in the sense that we should INSERT
+ * a record rather than UPDATE-ing a record. For example, when dealing
+ * with messages where we may have a ghost, the ghost message is not a
+ * new record, but is conceptually new.
+ * @param aCallbackHandle The GlodaIndexer-style callback handle that is being
+ * used to drive this processing in an async fashion. (See
+ * GlodaIndexer._callbackHandle).
+ * @param aDoCache Should we allow this item to be contributed to its noun
+ * cache?
+ */
+ *grokNounItem(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aIsRecordNew,
+ aCallbackHandle,
+ aDoCache
+ ) {
+ let itemNounDef = aItem.NOUN_DEF;
+ let attribsByBoundName = itemNounDef.attribsByBoundName;
+
+ this._log.info(" ** grokNounItem: " + itemNounDef.name);
+
+ let addDBAttribs = [];
+ let removeDBAttribs = [];
+
+ let jsonDict = {};
+
+ let aOldItem;
+ aRawReps.trueGlodaRep = aItem;
+ if (aIsConceptuallyNew) {
+ // there is no old item if we are new.
+ aOldItem = {};
+ } else {
+ aOldItem = aItem;
+ // we want to create a clone of the existing item so that we can know the
+ // deltas that happened for indexing purposes
+ aItem = aItem._clone();
+ }
+
+ // Have the attribute providers directly set properties on the aItem
+ let attrProviders = this._attrProviderOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrProviders.length; iProvider++) {
+ this._log.info(" * provider: " + attrProviders[iProvider].providerName);
+ yield aCallbackHandle.pushAndGo(
+ attrProviders[iProvider].process(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aCallbackHandle
+ )
+ );
+ }
+
+ let attrOptimizers = this._attrOptimizerOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrOptimizers.length; iProvider++) {
+ this._log.info(
+ " * optimizer: " + attrOptimizers[iProvider].providerName
+ );
+ yield aCallbackHandle.pushAndGo(
+ attrOptimizers[iProvider].optimize(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aCallbackHandle
+ )
+ );
+ }
+ this._log.info(" ** done with providers.");
+
+ // Iterate over the attributes on the item
+ for (let key of Object.keys(aItem)) {
+ let value = aItem[key];
+ // ignore keys that start with underscores, they are private and not
+ // persisted by our attribute mechanism. (they are directly handled by
+ // the object implementation.)
+ if (key.startsWith("_")) {
+ continue;
+ }
+ // find the attribute definition that corresponds to this key
+ let attrib = attribsByBoundName[key];
+ // if there's no attribute, that's not good, but not horrible.
+ if (attrib === undefined) {
+ this._log.warn("new proc ignoring attrib: " + key);
+ continue;
+ }
+
+ let attribDB = attrib.dbDef;
+ let objectNounDef = attrib.objectNounDef;
+
+ // - translate for our JSON rep
+ if (attrib.singular) {
+ if (objectNounDef.toJSON) {
+ jsonDict[attrib.id] = objectNounDef.toJSON(value);
+ } else {
+ jsonDict[attrib.id] = value;
+ }
+ } else if (objectNounDef.toJSON) {
+ let toJSON = objectNounDef.toJSON;
+ jsonDict[attrib.id] = [];
+ for (let subValue of value) {
+ jsonDict[attrib.id].push(toJSON(subValue));
+ }
+ } else {
+ jsonDict[attrib.id] = value;
+ }
+
+ let oldValue = aOldItem[key];
+
+ // the 'old' item is still the canonical one; update it
+ // do the update now, because we may skip operations on addDBAttribs and
+ // removeDBattribs, if the attribute is not to generate entries in
+ // messageAttributes
+ if (oldValue !== undefined || !aIsConceptuallyNew) {
+ aOldItem[key] = value;
+ }
+
+ // the new canQuery property has to be set to true to generate entries
+ // in the messageAttributes table. Any other truthy value (like a non
+ // empty string), will still make the message query-able but without
+ // using the database.
+ if (attrib.canQuery !== true) {
+ continue;
+ }
+
+ // - database index attributes
+
+ // perform a delta analysis against the old value, if we have one
+ if (oldValue !== undefined) {
+ // in the singular case if they don't match, it's one add and one remove
+ if (attrib.singular) {
+ // test for identicality, failing that, see if they have explicit
+ // equals support.
+ if (
+ value !== oldValue &&
+ (!value.equals || !value.equals(oldValue))
+ ) {
+ addDBAttribs.push(attribDB.convertValuesToDBAttributes([value])[0]);
+ removeDBAttribs.push(
+ attribDB.convertValuesToDBAttributes([oldValue])[0]
+ );
+ }
+ } else if (objectNounDef.computeDelta) {
+ // in the plural case, we have to figure the deltas accounting for
+ // possible changes in ordering (which is insignificant from an
+ // indexing perspective)
+ // some nouns may not meet === equivalence needs, so must provide a
+ // custom computeDelta method to help us out
+ let [valuesAdded, valuesRemoved] = objectNounDef.computeDelta(
+ value,
+ oldValue
+ );
+ // convert the values to database-style attribute rows
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesAdded)
+ );
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesRemoved)
+ );
+ } else {
+ // build a map of the previous values; we will delete the values as
+ // we see them so that we will know what old values are no longer
+ // present in the current set of values.
+ let oldValueMap = {};
+ for (let anOldValue of oldValue) {
+ // remember, the key is just the toString'ed value, so we need to
+ // store and use the actual value as the value!
+ oldValueMap[anOldValue] = anOldValue;
+ }
+ // traverse the current values...
+ let valuesAdded = [];
+ for (let curValue of value) {
+ if (curValue in oldValueMap) {
+ delete oldValueMap[curValue];
+ } else {
+ valuesAdded.push(curValue);
+ }
+ }
+ // anything still on oldValueMap was removed.
+ let valuesRemoved = Object.keys(oldValueMap).map(
+ key => oldValueMap[key]
+ );
+ // convert the values to database-style attribute rows
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesAdded)
+ );
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesRemoved)
+ );
+ }
+
+ // Add/remove the empty set indicator as appropriate.
+ if (attrib.emptySetIsSignificant) {
+ // if we are now non-zero but previously were zero, remove.
+ if (value.length && !oldValue.length) {
+ removeDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ } else if (!value.length && oldValue.length) {
+ // We are now zero length but previously were not, add.
+ addDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+ } else {
+ // no old value, all values are new
+ // add the db reps on the new values
+ if (attrib.singular) {
+ value = [value];
+ }
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(value)
+ );
+ // Add the empty set indicator for the attribute id if appropriate.
+ if (!value.length && attrib.emptySetIsSignificant) {
+ addDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+ }
+
+ // Iterate over any remaining values in old items for purge purposes.
+ for (let key of Object.keys(aOldItem)) {
+ let value = aOldItem[key];
+ // ignore keys that start with underscores, they are private and not
+ // persisted by our attribute mechanism. (they are directly handled by
+ // the object implementation.)
+ if (key.startsWith("_")) {
+ continue;
+ }
+ // ignore things we saw in the new guy
+ if (key in aItem) {
+ continue;
+ }
+
+ // find the attribute definition that corresponds to this key
+ let attrib = attribsByBoundName[key];
+ // if there's no attribute, that's not good, but not horrible.
+ if (attrib === undefined) {
+ continue;
+ }
+
+ // delete these from the old item, as the old item is canonical, and
+ // should no longer have these values
+ delete aOldItem[key];
+
+ if (attrib.canQuery !== true) {
+ this._log.debug(
+ "Not inserting attribute " +
+ attrib.attributeName +
+ " into the db, since we don't plan on querying on it"
+ );
+ continue;
+ }
+
+ if (attrib.singular) {
+ value = [value];
+ }
+ let attribDB = attrib.dbDef;
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(value)
+ );
+ // remove the empty set marker if there should have been one
+ if (!value.length && attrib.emptySetIsSignificant) {
+ removeDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+
+ aItem._jsonText = JSON.stringify(jsonDict);
+ this._log.debug(" json text: " + aItem._jsonText);
+
+ if (aIsRecordNew) {
+ this._log.debug(" inserting item");
+ itemNounDef.objInsert.call(itemNounDef.datastore, aItem);
+ } else {
+ this._log.debug(" updating item");
+ itemNounDef.objUpdate.call(itemNounDef.datastore, aItem);
+ }
+
+ this._log.debug(
+ " adjusting attributes, add: " + addDBAttribs + " rem: " + removeDBAttribs
+ );
+ itemNounDef.dbAttribAdjuster.call(
+ itemNounDef.datastore,
+ aItem,
+ addDBAttribs,
+ removeDBAttribs
+ );
+
+ if (!aIsConceptuallyNew && "_declone" in aOldItem) {
+ aOldItem._declone(aItem);
+ }
+
+ // Cache ramifications...
+ if (aDoCache === undefined || aDoCache) {
+ if (aIsConceptuallyNew) {
+ GlodaCollectionManager.itemsAdded(aItem.NOUN_ID, [aItem]);
+ } else {
+ GlodaCollectionManager.itemsModified(aOldItem.NOUN_ID, [aOldItem]);
+ }
+ }
+
+ this._log.debug(" done grokking.");
+
+ yield GlodaConstants.kWorkDone;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Processes a list of noun instances for their score within a given context.
+ * This is primarily intended for use by search ranking mechanisms, but could
+ * be used elsewhere too. (It does, however, depend on the complicity of the
+ * score method implementations to not get confused.)
+ *
+ * @param aItems The non-empty list of items to score.
+ * @param aContext A noun-specific dictionary that we just pass to the funcs.
+ * @param aExtraScoreFuncs A list of extra scoring functions to apply.
+ * @returns A list of integer scores equal in length to aItems.
+ */
+ scoreNounItems(aItems, aContext, aExtraScoreFuncs) {
+ let scores = [];
+ // bail if there is nothing to score
+ if (!aItems.length) {
+ return scores;
+ }
+
+ let itemNounDef = aItems[0].NOUN_DEF;
+ if (aExtraScoreFuncs == null) {
+ aExtraScoreFuncs = [];
+ }
+
+ for (let item of aItems) {
+ let score = 0;
+ let attrProviders = this._attrProviderOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrProviders.length; iProvider++) {
+ let provider = attrProviders[iProvider];
+ if (provider.score) {
+ score += provider.score(item);
+ }
+ }
+ for (let extraScoreFunc of aExtraScoreFuncs) {
+ score += extraScoreFunc(item, aContext);
+ }
+ scores.push(score);
+ }
+
+ return scores;
+ },
+};
+
+/* and initialize the Gloda object/NS before we return... */
+try {
+ Gloda._init();
+} catch (ex) {
+ Gloda._log.debug(
+ "Exception during Gloda init (" +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ "): " +
+ ex
+ );
+}
+/* but don't forget that we effectively depend on Everybody.jsm too, and
+ currently on our importer to be importing that if they need us fully armed
+ and operational. */
diff --git a/comm/mailnews/db/gloda/modules/GlodaConstants.jsm b/comm/mailnews/db/gloda/modules/GlodaConstants.jsm
new file mode 100644
index 0000000000..1e6d253f09
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaConstants.jsm
@@ -0,0 +1,250 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * The constants used by Gloda files. Avoid importing anything into this file.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaConstants"];
+
+var GlodaConstants = {
+ /**
+ * The indexer is idle.
+ */
+ kIndexerIdle: 0,
+ /**
+ * The indexer is doing something. We used to have other specific states, but
+ * they have been rendered irrelevant and wiped from existence.
+ */
+ kIndexerIndexing: 1,
+
+ /**
+ * Synchronous activities performed that can be thought of as one processing
+ * token. Potentially yield the event-loop and re-schedule for later based
+ * on how long we've actually taken/etc. The goal here is that code that
+ * is doing stuff synchronously yields with kWorkSync periodically to make
+ * sure that it doesn't dominate the event-loop. Unless the processing
+ * in question is particularly intensive, it should be reasonable to apply
+ * some decimation factor (ex: 32 or 64) with the general goal of yielding
+ * every 3-10 milliseconds.
+ */
+ kWorkSync: 0,
+ /**
+ * Asynchronous activity performed, you need to relinquish flow control and
+ * trust us to call callbackDriver later.
+ */
+ kWorkAsync: 1,
+ /**
+ * We are all done with our task, close us and figure out something else to do.
+ */
+ kWorkDone: 2,
+ /**
+ * We are not done with our task, but we think it's a good idea to take a
+ * breather because we believe we have tied up the event loop for a
+ * non-trivial amount of time. So please re-schedule us in the future.
+ *
+ * This is currently only used internally by the indexer's batching logic;
+ * minor changes may be required if used by actual indexers.
+ */
+ kWorkPause: 3,
+ /**
+ * We are done with our task, and have a result that we are returning. This
+ * should only be used by your callback handler's doneWithResult method.
+ * Ex: you are passed aCallbackHandle, and you do
+ * "yield aCallbackHandle.doneWithResult(myResult);".
+ */
+ kWorkDoneWithResult: 4,
+
+ /**
+ * An attribute that is a defining characteristic of the subject.
+ */
+ kAttrFundamental: 0,
+ /**
+ * An attribute that is an optimization derived from two or more fundamental
+ * attributes and exists solely to improve database query performance.
+ */
+ kAttrOptimization: 1,
+ /**
+ * An attribute that is derived from the content of the subject. For example,
+ * a message that references a bugzilla bug could have a "derived" attribute
+ * that captures the bugzilla reference. This is not
+ */
+ kAttrDerived: 2,
+ /**
+ * An attribute that is the result of an explicit and intentional user action
+ * upon the subject. For example, a tag placed on a message by a user (or
+ * at the user's request by a filter) is explicit.
+ */
+ kAttrExplicit: 3,
+ /**
+ * An attribute that is indirectly the result of a user's behaviour. For
+ * example, if a user consults a message multiple times, we may conclude that
+ * the user finds the message interesting. It is "implied", if you will,
+ * that the message is interesting.
+ */
+ kAttrImplicit: 4,
+
+ /**
+ * This attribute is not 'special'; it is stored as a (thing id, attribute id,
+ * attribute id) tuple in the database rather than on thing's row or on
+ * thing's fulltext row. (Where "thing" could be a message or any other
+ * first class noun.)
+ */
+ kSpecialNotAtAll: 0,
+ /**
+ * This attribute is stored as a numeric column on the row for the noun. The
+ * attribute definition should include this value as 'special' and the
+ * column name that stores the attribute as 'specialColumnName'.
+ */
+ kSpecialColumn: 16,
+ kSpecialColumnChildren: 16 | 1,
+ kSpecialColumnParent: 16 | 2,
+ /**
+ * This attribute is stored as a string column on the row for the noun. It
+ * differs from kSpecialColumn in that it is a string, which once had
+ * query ramifications and one day may have them again.
+ */
+ kSpecialString: 32,
+ /**
+ * This attribute is stored as a fulltext column on the fulltext table for
+ * the noun. The attribute definition should include this value as 'special'
+ * and the column name that stores the table as 'specialColumnName'.
+ */
+ kSpecialFulltext: 64,
+
+ /**
+ * The extensionName used for the attributes defined by core gloda plugins
+ * such as GlodaFundAttr.jsm and GlodaExplicitAttr.jsm.
+ */
+ BUILT_IN: "built-in",
+
+ /**
+ * Special sentinel value that will cause facets to skip a noun instance
+ * when an attribute has this value.
+ */
+ IGNORE_FACET: "ignore-facet",
+
+ /*
+ * The following are explicit noun IDs. While most extension-provided nouns
+ * will have dynamically allocated id's that are looked up by name, these
+ * id's can be relied upon to exist and be accessible via these
+ * pseudo-constants. It's not really clear that we need these, although it
+ * does potentially simplify code to not have to look up all of their nouns
+ * at initialization time.
+ */
+ /**
+ * Boolean values, expressed as 0/1 in the database and non-continuous for
+ * constraint purposes. Like numbers, such nouns require their attributes
+ * to provide them with context, lacking any of their own.
+ * Having this as a noun type may be a bad idea; a change of nomenclature
+ * (so that we are not claiming a boolean value is a noun, but still using
+ * it in the same way) or implementation to require each boolean noun
+ * actually be its own noun may be in order.
+ */
+ NOUN_BOOLEAN: 1,
+ /**
+ * A number, which could mean an integer or floating point values. We treat
+ * these as continuous, meaning that queries on them can have ranged
+ * constraints expressed on them. Lacking any inherent context, numbers
+ * depend on their attributes to parameterize them as required.
+ * Same deal as with NOUN_BOOLEAN, we may need to change this up conceptually.
+ */
+ NOUN_NUMBER: 2,
+ /**
+ * A (non-fulltext) string.
+ * Same deal as with NOUN_BOOLEAN, we may need to change this up conceptually.
+ */
+ NOUN_STRING: 3,
+ /** A date, encoded as a PRTime, represented as a js Date object. */
+ NOUN_DATE: 10,
+ /**
+ * Fulltext search support, somewhat magical. This is only intended to be
+ * used for kSpecialFulltext attributes, and exclusively as a constraint
+ * mechanism. The values are always represented as strings. It is presumed
+ * that the user of this functionality knows how to generate SQLite FTS3
+ * style MATCH queries, or is okay with us just gluing them together with
+ * " OR " when used in an or-constraint case. Gloda's query mechanism
+ * currently lacks the ability to to compile Gloda-style and-constraints
+ * into a single MATCH query, but it will turn out okay, just less
+ * efficiently than it could.
+ */
+ NOUN_FULLTEXT: 20,
+ /**
+ * Represents a MIME Type. We currently lack any human-intelligible
+ * descriptions of mime types.
+ */
+ NOUN_MIME_TYPE: 40,
+ /**
+ * Captures a message tag as well as when the tag's presence was observed,
+ * hoping to approximate when the tag was applied. It's a somewhat dubious
+ * attempt to not waste our opporunity to store a value along with the tag.
+ * (The tag is actually stored as an attribute parameter on the attribute
+ * definition, rather than a value in the attribute 'instance' for the
+ * message.)
+ */
+ NOUN_TAG: 50,
+ /**
+ * Doesn't actually work owing to a lack of an object to represent a folder.
+ * We do expose the folderURI and folderID of a message, but need to map that
+ * to a good abstraction. Probably something thin around a SteelFolder or
+ * the like; we would contribute the functionality to easily move from a
+ * folder to the list of gloda messages in that folder, as well as the
+ * indexing preferences for that folder.
+ *
+ * @TODO folder noun and related abstraction
+ */
+ NOUN_FOLDER: 100,
+ /**
+ * All messages belong to a conversation. See GlodaDataModel.jsm for the
+ * definition of the GlodaConversation class.
+ */
+ NOUN_CONVERSATION: 101,
+ /**
+ * A one-to-one correspondence with underlying (indexed) nsIMsgDBHdr
+ * instances. See GlodaDataModel.jsm for the definition of the GlodaMessage class.
+ */
+ NOUN_MESSAGE: 102,
+ /**
+ * Corresponds to a human being, who may have multiple electronic identities
+ * (a la NOUN_IDENTITY). There is no requirement for association with an
+ * address book contact, although when the address book contact exists,
+ * we want to be associated with it. See GlodaDataModel.jsm for the definition
+ * of the GlodaContact class.
+ */
+ NOUN_CONTACT: 103,
+ /**
+ * A single identity of a contact, who may have one or more. E-mail accounts,
+ * instant messaging accounts, social network site accounts, etc. are each
+ * identities. See GlodaDataModel.jsm for the definition of the GlodaIdentity
+ * class.
+ */
+ NOUN_IDENTITY: 104,
+ /**
+ * An attachment to a message. A message may have many different attachments.
+ */
+ NOUN_ATTACHMENT: 105,
+ /**
+ * An account related to a message. A message can have only one account.
+ */
+ NOUN_ACCOUNT: 106,
+
+ /**
+ * Parameterized identities, for use in the from-me, to-me, cc-me optimization
+ * cases. Not for reuse without some thought. These nouns use the parameter
+ * to store the 'me' identity that we are talking about, and the value to
+ * store the identity of the other party. So in both the from-me and to-me
+ * cases involving 'me' and 'foo@bar', the 'me' identity is always stored via
+ * the attribute parameter, and the 'foo@bar' identity is always stored as
+ * the attribute value. See GlodaFundAttr.jsm for more information on this, but
+ * you probably shouldn't be touching this unless you are fundattr.
+ */
+ NOUN_PARAM_IDENTITY: 200,
+
+ kConstraintIdIn: 0,
+ kConstraintIn: 1,
+ kConstraintRanges: 2,
+ kConstraintEquals: 3,
+ kConstraintStringLike: 4,
+ kConstraintFulltext: 5,
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaContent.jsm b/comm/mailnews/db/gloda/modules/GlodaContent.jsm
new file mode 100644
index 0000000000..5f1daf5e9c
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaContent.jsm
@@ -0,0 +1,285 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "GlodaContent",
+ "whittlerRegistry",
+ "mimeMsgToContentAndMeta",
+ "mimeMsgToContentSnippetAndMeta",
+];
+
+/**
+ * Given a MimeMsg and the corresponding folder, return the GlodaContent object.
+ *
+ * @param aMimeMsg: the MimeMessage instance
+ * @param folder: the nsIMsgDBFolder
+ * @returns an array containing the GlodaContent instance, and the meta dictionary
+ * that the Gloda content providers may have filled with useful data.
+ */
+
+function mimeMsgToContentAndMeta(aMimeMsg, folder) {
+ let content = new GlodaContent();
+ let meta = { subject: aMimeMsg.get("subject") };
+ let bodyLines = aMimeMsg.coerceBodyToPlaintext(folder).split(/\r?\n/);
+
+ for (let whittler of whittlerRegistry.getWhittlers()) {
+ whittler.contentWhittle(meta, bodyLines, content);
+ }
+
+ return [content, meta];
+}
+
+/**
+ * Given a MimeMsg, return the whittled content string, suitable for summarizing
+ * a message.
+ *
+ * @param aMimeMsg: the MimeMessage instance
+ * @param folder: the nsIMsgDBFolder
+ * @param length: optional number of characters to trim the whittled content.
+ * If the actual length of the message is greater than |length|, then the return
+ * value is the first (length-1) characters with an ellipsis appended.
+ * @returns an array containing the text of the snippet, and the meta dictionary
+ * that the Gloda content providers may have filled with useful data.
+ */
+
+function mimeMsgToContentSnippetAndMeta(aMimeMsg, folder, length) {
+ let [content, meta] = mimeMsgToContentAndMeta(aMimeMsg, folder);
+
+ let text = content.getContentSnippet(length + 1);
+ if (length && text.length > length) {
+ text = text.substring(0, length - 1) + "\u2026"; // ellipsis
+ }
+ return [text, meta];
+}
+
+/**
+ * A registry of gloda providers that have contentWhittle() functions.
+ * used by mimeMsgToContentSnippet, but populated by the Gloda object as it's
+ * processing providers.
+ */
+function WhittlerRegistry() {
+ this._whittlers = [];
+}
+
+WhittlerRegistry.prototype = {
+ /**
+ * Add a provider as a content whittler.
+ */
+ registerWhittler(provider) {
+ this._whittlers.push(provider);
+ },
+ /**
+ * get the list of content whittlers, sorted from the most specific to
+ * the most generic
+ */
+ getWhittlers() {
+ // Use the concat() trick to avoid mutating the internal object and
+ // leaking an internal representation.
+ return this._whittlers.concat().reverse();
+ },
+};
+
+const whittlerRegistry = new WhittlerRegistry();
+
+function GlodaContent() {
+ this._contentPriority = null;
+ this._producing = false;
+ this._hunks = [];
+}
+
+GlodaContent.prototype = {
+ kPriorityBase: 0,
+ kPriorityPerfect: 100,
+
+ kHunkMeta: 1,
+ kHunkQuoted: 2,
+ kHunkContent: 3,
+
+ _resetContent() {
+ this._keysAndValues = [];
+ this._keysAndDeltaValues = [];
+ this._hunks = [];
+ this._curHunk = null;
+ },
+
+ /* ===== Consumer API ===== */
+ hasContent() {
+ return this._contentPriority != null;
+ },
+
+ /**
+ * Return content suitable for snippet display. This means that no quoting
+ * or meta-data should be returned.
+ *
+ * @param aMaxLength The maximum snippet length desired.
+ */
+ getContentSnippet(aMaxLength) {
+ let content = this.getContentString();
+ if (aMaxLength) {
+ content = content.substring(0, aMaxLength);
+ }
+ return content;
+ },
+
+ getContentString(aIndexingPurposes) {
+ let data = "";
+ for (let hunk of this._hunks) {
+ if (hunk.hunkType == this.kHunkContent) {
+ if (data) {
+ data += "\n" + hunk.data;
+ } else {
+ data = hunk.data;
+ }
+ }
+ }
+
+ if (aIndexingPurposes) {
+ // append the values for indexing. we assume the keywords are cruft.
+ // this may be crazy, but things that aren't a science aren't an exact
+ // science.
+ for (let kv of this._keysAndValues) {
+ data += "\n" + kv[1];
+ }
+ for (let kon of this._keysAndValues) {
+ data += "\n" + kon[1] + "\n" + kon[2];
+ }
+ }
+
+ return data;
+ },
+
+ /* ===== Producer API ===== */
+ /**
+ * Called by a producer with the priority they believe their interpretation
+ * of the content comes in at.
+ *
+ * @returns true if we believe the producer's interpretation will be
+ * interesting and they should go ahead and generate events. We return
+ * false if we don't think they are interesting, in which case they should
+ * probably not issue calls to us, although we don't care. (We will
+ * ignore their calls if we return false, this allows the simplification
+ * of code that needs to run anyways.)
+ */
+ volunteerContent(aPriority) {
+ if (this._contentPriority === null || this._contentPriority < aPriority) {
+ this._contentPriority = aPriority;
+ this._resetContent();
+ this._producing = true;
+ return true;
+ }
+ this._producing = false;
+ return false;
+ },
+
+ keyValue(aKey, aValue) {
+ if (!this._producing) {
+ return;
+ }
+
+ this._keysAndValues.push([aKey, aValue]);
+ },
+ keyValueDelta(aKey, aOldValue, aNewValue) {
+ if (!this._producing) {
+ return;
+ }
+
+ this._keysAndDeltaValues.push([aKey, aOldValue, aNewValue]);
+ },
+
+ /**
+ * Meta lines are lines that have to do with the content but are not the
+ * content and can generally be related to an attribute that has been derived
+ * and stored on the item.
+ * For example, a bugzilla bug may note that an attachment was created; this
+ * is not content and wouldn't be desired in a snippet, but is still
+ * potentially interesting meta-data.
+ *
+ * @param aLineOrLines The line or list of lines that are meta-data.
+ * @param aAttr The attribute this meta-data is associated with.
+ * @param aIndex If the attribute is non-singular, indicate the specific
+ * index of the item in the attribute's bound list that the meta-data
+ * is associated with.
+ */
+ meta(aLineOrLines, aAttr, aIndex) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ this._curHunk = {
+ hunkType: this.kHunkMeta,
+ attr: aAttr,
+ index: aIndex,
+ data,
+ };
+ this._hunks.push(this._curHunk);
+ },
+ /**
+ * Quoted lines reference previous messages or what not.
+ *
+ * @param aLineOrLiens The line or list of lines that are quoted.
+ * @param aDepth The depth of the quoting.
+ * @param aOrigin The item that originated the original content, if known.
+ * For example, perhaps a GlodaMessage?
+ * @param aTarget A reference to the location in the original content, if
+ * known. For example, the index of a line in a message or something?
+ */
+ quoted(aLineOrLines, aDepth, aOrigin, aTarget) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ if (
+ !this._curHunk ||
+ this._curHunk.hunkType != this.kHunkQuoted ||
+ this._curHunk.depth != aDepth ||
+ this._curHunk.origin != aOrigin ||
+ this._curHunk.target != aTarget
+ ) {
+ this._curHunk = {
+ hunkType: this.kHunkQuoted,
+ data,
+ depth: aDepth,
+ origin: aOrigin,
+ target: aTarget,
+ };
+ this._hunks.push(this._curHunk);
+ } else {
+ this._curHunk.data += "\n" + data;
+ }
+ },
+
+ content(aLineOrLines) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ if (!this._curHunk || this._curHunk.hunkType != this.kHunkContent) {
+ this._curHunk = { hunkType: this.kHunkContent, data };
+ this._hunks.push(this._curHunk);
+ } else {
+ this._curHunk.data += "\n" + data;
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm b/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm
new file mode 100644
index 0000000000..d9361c079c
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm
@@ -0,0 +1,1020 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "GlodaAttributeDBDef",
+ "GlodaAccount",
+ "GlodaConversation",
+ "GlodaFolder",
+ "GlodaMessage",
+ "GlodaContact",
+ "GlodaIdentity",
+ "GlodaAttachment",
+];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var LOG = console.createInstance({
+ prefix: "gloda.datamodel",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * @class Represents a gloda attribute definition's DB form. This class
+ * stores the information in the database relating to this attribute
+ * definition. Access its attrDef attribute to get at the really juicy data.
+ * This main interesting thing this class does is serve as the keeper of the
+ * mapping from parameters to attribute ids in the database if this is a
+ * parameterized attribute.
+ */
+function GlodaAttributeDBDef(
+ aDatastore,
+ aID,
+ aCompoundName,
+ aAttrType,
+ aPluginName,
+ aAttrName
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._compoundName = aCompoundName;
+ this._attrType = aAttrType;
+ this._pluginName = aPluginName;
+ this._attrName = aAttrName;
+
+ this.attrDef = null;
+
+ /** Map parameter values to the underlying database id. */
+ this._parameterBindings = {};
+}
+
+GlodaAttributeDBDef.prototype = {
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get attributeName() {
+ return this._attrName;
+ },
+
+ get parameterBindings() {
+ return this._parameterBindings;
+ },
+
+ /**
+ * Bind a parameter value to the attribute definition, allowing use of the
+ * attribute-parameter as an attribute.
+ *
+ * @returns
+ */
+ bindParameter(aValue) {
+ // people probably shouldn't call us with null, but handle it
+ if (aValue == null) {
+ return this._id;
+ }
+ if (aValue in this._parameterBindings) {
+ return this._parameterBindings[aValue];
+ }
+ // no database entry exists if we are here, so we must create it...
+ let id = this._datastore._createAttributeDef(
+ this._attrType,
+ this._pluginName,
+ this._attrName,
+ aValue
+ );
+ this._parameterBindings[aValue] = id;
+ this._datastore.reportBinding(id, this, aValue);
+ return id;
+ },
+
+ /**
+ * Given a list of values, return a list (regardless of plurality) of
+ * database-ready [attribute id, value] tuples. This is intended to be used
+ * to directly convert the value of a property on an object that corresponds
+ * to a bound attribute.
+ *
+ * @param {Array} aInstanceValues An array of instance values regardless of
+ * whether or not the attribute is singular.
+ */
+ convertValuesToDBAttributes(aInstanceValues) {
+ let nounDef = this.attrDef.objectNounDef;
+ let dbAttributes = [];
+ if (nounDef.usesParameter) {
+ for (let instanceValue of aInstanceValues) {
+ let [param, dbValue] = nounDef.toParamAndValue(instanceValue);
+ dbAttributes.push([this.bindParameter(param), dbValue]);
+ }
+ } else if ("toParamAndValue" in nounDef) {
+ // Not generating any attributes is ok. This basically means the noun is
+ // just an informative property on the Gloda Message and has no real
+ // indexing purposes.
+ for (let instanceValue of aInstanceValues) {
+ dbAttributes.push([
+ this._id,
+ nounDef.toParamAndValue(instanceValue)[1],
+ ]);
+ }
+ }
+ return dbAttributes;
+ },
+
+ toString() {
+ return this._compoundName;
+ },
+};
+
+var GlodaHasAttributesMixIn = {
+ *enumerateAttributes() {
+ let nounDef = this.NOUN_DEF;
+ for (let key in this) {
+ let value = this[key];
+ let attrDef = nounDef.attribsByBoundName[key];
+ // we expect to not have attributes for underscore prefixed values (those
+ // are managed by the instance's logic. we also want to not explode
+ // should someone crap other values in there, we get both birds with this
+ // one stone.
+ if (attrDef === undefined) {
+ continue;
+ }
+ if (attrDef.singular) {
+ // ignore attributes with null values
+ if (value != null) {
+ yield [attrDef, [value]];
+ }
+ } else if (value.length) {
+ // ignore attributes with no values
+ yield [attrDef, value];
+ }
+ }
+ },
+
+ domContribute(aDomNode) {
+ let nounDef = this.NOUN_DEF;
+ for (let attrName in nounDef.domExposeAttribsByBoundName) {
+ let attr = nounDef.domExposeAttribsByBoundName[attrName];
+ if (this[attrName]) {
+ aDomNode.setAttribute(attr.domExpose, this[attrName]);
+ }
+ }
+ },
+};
+
+function MixIn(aConstructor, aMixIn) {
+ let proto = aConstructor.prototype;
+ for (let [name, func] of Object.entries(aMixIn)) {
+ if (name.startsWith("get_")) {
+ proto.__defineGetter__(name.substring(4), func);
+ } else {
+ proto[name] = func;
+ }
+ }
+}
+
+/**
+ * @class A gloda wrapper around nsIMsgIncomingServer.
+ */
+function GlodaAccount(aIncomingServer) {
+ this._incomingServer = aIncomingServer;
+}
+
+GlodaAccount.prototype = {
+ NOUN_ID: 106,
+ get id() {
+ return this._incomingServer.key;
+ },
+ get name() {
+ return this._incomingServer.prettyName;
+ },
+ get incomingServer() {
+ return this._incomingServer;
+ },
+ toString() {
+ return "Account: " + this.id;
+ },
+
+ toLocaleString() {
+ return this.name;
+ },
+};
+
+/**
+ * @class A gloda conversation (thread) exists so that messages can belong.
+ */
+function GlodaConversation(
+ aDatastore,
+ aID,
+ aSubject,
+ aOldestMessageDate,
+ aNewestMessageDate
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._subject = aSubject;
+ this._oldestMessageDate = aOldestMessageDate;
+ this._newestMessageDate = aNewestMessageDate;
+}
+
+GlodaConversation.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_CONVERSATION,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get subject() {
+ return this._subject;
+ },
+ get oldestMessageDate() {
+ return this._oldestMessageDate;
+ },
+ get newestMessageDate() {
+ return this._newestMessageDate;
+ },
+
+ getMessagesCollection(aListener, aData) {
+ let query = new GlodaMessage.prototype.NOUN_DEF.queryClass();
+ query.conversation(this._id).orderBy("date");
+ return query.getCollection(aListener, aData);
+ },
+
+ toString() {
+ return "Conversation:" + this._id;
+ },
+
+ toLocaleString() {
+ return this._subject;
+ },
+};
+
+function GlodaFolder(
+ aDatastore,
+ aID,
+ aURI,
+ aDirtyStatus,
+ aPrettyName,
+ aIndexingPriority
+) {
+ // _datastore is now set by GlodaDatastore
+ this._id = aID;
+ this._uri = aURI;
+ this._dirtyStatus = aDirtyStatus;
+ this._prettyName = aPrettyName;
+ this._account = null;
+ this._activeIndexing = false;
+ this._indexingPriority = aIndexingPriority;
+ this._deleted = false;
+ this._compacting = false;
+}
+
+GlodaFolder.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_FOLDER,
+ // set by GlodaDatastore
+ _datastore: null,
+
+ /** The folder is believed to be up-to-date */
+ kFolderClean: 0,
+ /** The folder has some un-indexed or dirty messages */
+ kFolderDirty: 1,
+ /** The folder needs to be entirely re-indexed, regardless of the flags on
+ * the messages in the folder. This state will be downgraded to dirty */
+ kFolderFilthy: 2,
+
+ _kFolderDirtyStatusMask: 0x7,
+ /**
+ * The (local) folder has been compacted and all of its message keys are
+ * potentially incorrect. This is not a possible state for IMAP folders
+ * because their message keys are based on UIDs rather than offsets into
+ * the mbox file.
+ */
+ _kFolderCompactedFlag: 0x8,
+
+ /** The folder should never be indexed. */
+ kIndexingNeverPriority: -1,
+ /** The lowest priority assigned to a folder. */
+ kIndexingLowestPriority: 0,
+ /** The highest priority assigned to a folder. */
+ kIndexingHighestPriority: 100,
+
+ /** The indexing priority for a folder if no other priority is assigned. */
+ kIndexingDefaultPriority: 20,
+ /** Folders marked check new are slightly more important I guess. */
+ kIndexingCheckNewPriority: 30,
+ /** Favorite folders are more interesting to the user, presumably. */
+ kIndexingFavoritePriority: 40,
+ /** The indexing priority for inboxes. */
+ kIndexingInboxPriority: 50,
+ /** The indexing priority for sent mail folders. */
+ kIndexingSentMailPriority: 60,
+
+ get id() {
+ return this._id;
+ },
+ get uri() {
+ return this._uri;
+ },
+ get dirtyStatus() {
+ return this._dirtyStatus & this._kFolderDirtyStatusMask;
+ },
+ /**
+ * Mark a folder as dirty if it was clean. Do nothing if it was already dirty
+ * or filthy. For use by GlodaMsgIndexer only. And maybe rkent and his
+ * marvelous extensions.
+ */
+ _ensureFolderDirty() {
+ if (this.dirtyStatus == this.kFolderClean) {
+ this._dirtyStatus =
+ (this.kFolderDirty & this._kFolderDirtyStatusMask) |
+ (this._dirtyStatus & ~this._kFolderDirtyStatusMask);
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+ /**
+ * Definitely for use only by GlodaMsgIndexer to downgrade the dirty status of
+ * a folder.
+ */
+ _downgradeDirtyStatus(aNewStatus) {
+ if (this.dirtyStatus != aNewStatus) {
+ this._dirtyStatus =
+ (aNewStatus & this._kFolderDirtyStatusMask) |
+ (this._dirtyStatus & ~this._kFolderDirtyStatusMask);
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+ /**
+ * Indicate whether this folder is currently being compacted. The
+ * |GlodaMsgIndexer| keeps this in-memory-only value up-to-date.
+ */
+ get compacting() {
+ return this._compacting;
+ },
+ /**
+ * Set whether this folder is currently being compacted. This is really only
+ * for the |GlodaMsgIndexer| to set.
+ */
+ set compacting(aCompacting) {
+ this._compacting = aCompacting;
+ },
+ /**
+ * Indicate whether this folder was compacted and has not yet been
+ * compaction processed.
+ */
+ get compacted() {
+ return Boolean(this._dirtyStatus & this._kFolderCompactedFlag);
+ },
+ /**
+ * For use only by GlodaMsgIndexer to set/clear the compaction state of this
+ * folder.
+ */
+ _setCompactedState(aCompacted) {
+ if (this.compacted != aCompacted) {
+ if (aCompacted) {
+ this._dirtyStatus |= this._kFolderCompactedFlag;
+ } else {
+ this._dirtyStatus &= ~this._kFolderCompactedFlag;
+ }
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+
+ get name() {
+ return this._prettyName;
+ },
+ toString() {
+ return "Folder:" + this._id;
+ },
+
+ toLocaleString() {
+ let xpcomFolder = this.getXPCOMFolder(this.kActivityFolderOnlyNoData);
+ if (!xpcomFolder) {
+ return this._prettyName;
+ }
+ return (
+ xpcomFolder.prettyName + " (" + xpcomFolder.rootFolder.prettyName + ")"
+ );
+ },
+
+ get indexingPriority() {
+ return this._indexingPriority;
+ },
+
+ /** We are going to index this folder. */
+ kActivityIndexing: 0,
+ /** Asking for the folder to perform header retrievals. */
+ kActivityHeaderRetrieval: 1,
+ /** We only want the folder for its metadata but are not going to open it. */
+ kActivityFolderOnlyNoData: 2,
+
+ /** Is this folder known to be actively used for indexing? */
+ _activeIndexing: false,
+ /** Get our indexing status. */
+ get indexing() {
+ return this._activeIndexing;
+ },
+ /**
+ * Set our indexing status. Normally, this will be enabled through passing
+ * an activity type of kActivityIndexing (which will set us), but we will
+ * still need to be explicitly disabled by the indexing code.
+ * When disabling indexing, we will call forgetFolderIfUnused to take care of
+ * shutting things down.
+ * We are not responsible for committing changes to the message database!
+ * That is on you!
+ */
+ set indexing(aIndexing) {
+ this._activeIndexing = aIndexing;
+ },
+
+ /**
+ * Retrieve the nsIMsgFolder instance corresponding to this folder, providing
+ * an explanation of why you are requesting it for tracking/cleanup purposes.
+ *
+ * @param aActivity One of the kActivity* constants. If you pass
+ * kActivityIndexing, we will set indexing for you, but you will need to
+ * clear it when you are done.
+ * @returns The nsIMsgFolder if available, null on failure.
+ */
+ getXPCOMFolder(aActivity) {
+ switch (aActivity) {
+ case this.kActivityIndexing:
+ // mark us as indexing, but don't bother with live tracking. we do
+ // that independently and only for header retrieval.
+ this.indexing = true;
+ break;
+ case this.kActivityHeaderRetrieval:
+ case this.kActivityFolderOnlyNoData:
+ // we don't have to do anything here.
+ break;
+ }
+
+ return MailServices.folderLookup.getFolderForURL(this.uri);
+ },
+
+ /**
+ * Retrieve a GlodaAccount instance corresponding to this folder.
+ *
+ * @returns The GlodaAccount instance.
+ */
+ getAccount() {
+ if (!this._account) {
+ let msgFolder = this.getXPCOMFolder(this.kActivityFolderOnlyNoData);
+ this._account = new GlodaAccount(msgFolder.server);
+ }
+ return this._account;
+ },
+};
+
+/**
+ * @class A message representation.
+ */
+function GlodaMessage(
+ aDatastore,
+ aID,
+ aFolderID,
+ aMessageKey,
+ aConversationID,
+ aConversation,
+ aDate,
+ aHeaderMessageID,
+ aDeleted,
+ aJsonText,
+ aNotability,
+ aSubject,
+ aIndexedBodyText,
+ aAttachmentNames
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._folderID = aFolderID;
+ this._messageKey = aMessageKey;
+ this._conversationID = aConversationID;
+ this._conversation = aConversation;
+ this._date = aDate;
+ this._headerMessageID = aHeaderMessageID;
+ this._jsonText = aJsonText;
+ this._notability = aNotability;
+ this._subject = aSubject;
+ this._indexedBodyText = aIndexedBodyText;
+ this._attachmentNames = aAttachmentNames;
+
+ // only set _deleted if we're deleted, otherwise the undefined does our
+ // speaking for us.
+ if (aDeleted) {
+ this._deleted = aDeleted;
+ }
+}
+
+GlodaMessage.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_MESSAGE,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get folderID() {
+ return this._folderID;
+ },
+ get messageKey() {
+ return this._messageKey;
+ },
+ get conversationID() {
+ return this._conversationID;
+ },
+ // conversation is special
+ get headerMessageID() {
+ return this._headerMessageID;
+ },
+ get notability() {
+ return this._notability;
+ },
+ set notability(aNotability) {
+ this._notability = aNotability;
+ },
+
+ get subject() {
+ return this._subject;
+ },
+ get indexedBodyText() {
+ return this._indexedBodyText;
+ },
+ get attachmentNames() {
+ return this._attachmentNames;
+ },
+
+ get date() {
+ return this._date;
+ },
+ set date(aNewDate) {
+ this._date = aNewDate;
+ },
+
+ get folder() {
+ // XXX due to a deletion bug it is currently possible to get in a state
+ // where we have an illegal folderID value. This will result in an
+ // exception. As a workaround, let's just return null in that case.
+ try {
+ if (this._folderID != null) {
+ return this._datastore._mapFolderID(this._folderID);
+ }
+ } catch (ex) {}
+ return null;
+ },
+ get folderURI() {
+ // XXX just like for folder, handle mapping failures and return null
+ try {
+ if (this._folderID != null) {
+ return this._datastore._mapFolderID(this._folderID).uri;
+ }
+ } catch (ex) {}
+ return null;
+ },
+ get account() {
+ // XXX due to a deletion bug it is currently possible to get in a state
+ // where we have an illegal folderID value. This will result in an
+ // exception. As a workaround, let's just return null in that case.
+ try {
+ if (this._folderID == null) {
+ return null;
+ }
+ let folder = this._datastore._mapFolderID(this._folderID);
+ return folder.getAccount();
+ } catch (ex) {}
+ return null;
+ },
+ get conversation() {
+ return this._conversation;
+ },
+
+ toString() {
+ // uh, this is a tough one...
+ return "Message:" + this._id;
+ },
+
+ _clone() {
+ return new GlodaMessage(
+ /* datastore */ null,
+ this._id,
+ this._folderID,
+ this._messageKey,
+ this._conversationID,
+ this._conversation,
+ this._date,
+ this._headerMessageID,
+ "_deleted" in this ? this._deleted : undefined,
+ "_jsonText" in this ? this._jsonText : undefined,
+ this._notability,
+ this._subject,
+ this._indexedBodyText,
+ this._attachmentNames
+ );
+ },
+
+ /**
+ * Provide a means of propagating changed values on our clone back to
+ * ourselves. This is required because of an object identity trick gloda
+ * does; when indexing an already existing object, all mutations happen on
+ * a clone of the existing object so that
+ */
+ _declone(aOther) {
+ if ("_content" in aOther) {
+ this._content = aOther._content;
+ }
+
+ // The _indexedAuthor/_indexedRecipients fields don't get updated on
+ // fulltext update so we don't need to propagate.
+ this._indexedBodyText = aOther._indexedBodyText;
+ this._attachmentNames = aOther._attachmentNames;
+ },
+
+ /**
+ * Mark this message as a ghost. Ghosts are characterized by having no folder
+ * id and no message key. They also are not deleted or they would be of
+ * absolutely no use to us.
+ *
+ * These changes are suitable for persistence.
+ */
+ _ghost() {
+ this._folderID = null;
+ this._messageKey = null;
+ if ("_deleted" in this) {
+ delete this._deleted;
+ }
+ },
+
+ /**
+ * Are we a ghost (which implies not deleted)? We are not a ghost if we have
+ * a definite folder location (we may not know our message key in the case
+ * of IMAP moves not fully completed) and are not deleted.
+ */
+ get _isGhost() {
+ return this._folderID == null && !this._isDeleted;
+ },
+
+ /**
+ * If we were dead, un-dead us.
+ */
+ _ensureNotDeleted() {
+ if ("_deleted" in this) {
+ delete this._deleted;
+ }
+ },
+
+ /**
+ * Are we deleted? This is private because deleted gloda messages are not
+ * visible to non-core-gloda code.
+ */
+ get _isDeleted() {
+ return "_deleted" in this && this._deleted;
+ },
+
+ /**
+ * Trash this message's in-memory representation because it should no longer
+ * be reachable by any code. The database record is gone, it's not coming
+ * back.
+ */
+ _objectPurgedMakeYourselfUnpleasant() {
+ this._id = null;
+ this._folderID = null;
+ this._messageKey = null;
+ this._conversationID = null;
+ this._conversation = null;
+ this.date = null;
+ this._headerMessageID = null;
+ },
+
+ /**
+ * Return the underlying nsIMsgDBHdr from the folder storage for this, or
+ * null if the message does not exist for one reason or another. We may log
+ * to our logger in the failure cases.
+ *
+ * This method no longer caches the result, so if you need to hold onto it,
+ * hold onto it.
+ *
+ * In the process of retrieving the underlying message header, we may have to
+ * open the message header database associated with the folder. This may
+ * result in blocking while the load happens, so you may want to try and find
+ * an alternate way to initiate the load before calling us.
+ * We provide hinting to the GlodaDatastore via the GlodaFolder so that it
+ * knows when it's a good time for it to go and detach from the database.
+ *
+ * @returns The nsIMsgDBHdr associated with this message if available, null on
+ * failure.
+ */
+ get folderMessage() {
+ if (this._folderID === null || this._messageKey === null) {
+ return null;
+ }
+
+ // XXX like for folder and folderURI, return null if we can't map the folder
+ let glodaFolder;
+ try {
+ glodaFolder = this._datastore._mapFolderID(this._folderID);
+ } catch (ex) {
+ return null;
+ }
+ let folder = glodaFolder.getXPCOMFolder(
+ glodaFolder.kActivityHeaderRetrieval
+ );
+ if (folder) {
+ let folderMessage;
+ try {
+ folderMessage = folder.GetMessageHeader(this._messageKey);
+ } catch (ex) {
+ folderMessage = null;
+ }
+ if (folderMessage !== null) {
+ // verify the message-id header matches what we expect...
+ if (folderMessage.messageId != this._headerMessageID) {
+ LOG.info(
+ "Message with message key " +
+ this._messageKey +
+ " in folder '" +
+ folder.URI +
+ "' does not match expected " +
+ "header! (" +
+ this._headerMessageID +
+ " expected, got " +
+ folderMessage.messageId +
+ ")"
+ );
+ folderMessage = null;
+ }
+ }
+ return folderMessage;
+ }
+
+ // this only gets logged if things have gone very wrong. we used to throw
+ // here, but it's unlikely our caller can do anything more meaningful than
+ // treating this as a disappeared message.
+ LOG.info(
+ "Unable to locate folder message for: " +
+ this._folderID +
+ ":" +
+ this._messageKey
+ );
+ return null;
+ },
+ get folderMessageURI() {
+ let folderMessage = this.folderMessage;
+ if (folderMessage) {
+ return folderMessage.folder.getUriForMsg(folderMessage);
+ }
+ return null;
+ },
+};
+MixIn(GlodaMessage, GlodaHasAttributesMixIn);
+
+/**
+ * @class Contacts correspond to people (one per person), and may own multiple
+ * identities (e-mail address, IM account, etc.)
+ */
+function GlodaContact(
+ aDatastore,
+ aID,
+ aDirectoryUUID,
+ aContactUUID,
+ aName,
+ aPopularity,
+ aFrecency,
+ aJsonText
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._directoryUUID = aDirectoryUUID;
+ this._contactUUID = aContactUUID;
+ this._name = aName;
+ this._popularity = aPopularity;
+ this._frecency = aFrecency;
+ if (aJsonText) {
+ this._jsonText = aJsonText;
+ }
+
+ this._identities = null;
+}
+
+GlodaContact.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_CONTACT,
+ // set by GlodaDatastore
+ _datastore: null,
+
+ get id() {
+ return this._id;
+ },
+ get directoryUUID() {
+ return this._directoryUUID;
+ },
+ get contactUUID() {
+ return this._contactUUID;
+ },
+ get name() {
+ return this._name;
+ },
+ set name(aName) {
+ this._name = aName;
+ },
+
+ get popularity() {
+ return this._popularity;
+ },
+ set popularity(aPopularity) {
+ this._popularity = aPopularity;
+ this.dirty = true;
+ },
+
+ get frecency() {
+ return this._frecency;
+ },
+ set frecency(aFrecency) {
+ this._frecency = aFrecency;
+ this.dirty = true;
+ },
+
+ get identities() {
+ return this._identities;
+ },
+
+ toString() {
+ return "Contact:" + this._id;
+ },
+
+ get accessibleLabel() {
+ return "Contact: " + this._name;
+ },
+
+ _clone() {
+ return new GlodaContact(
+ /* datastore */ null,
+ this._id,
+ this._directoryUUID,
+ this._contactUUID,
+ this._name,
+ this._popularity,
+ this._frecency
+ );
+ },
+};
+MixIn(GlodaContact, GlodaHasAttributesMixIn);
+
+/**
+ * @class A specific means of communication for a contact.
+ */
+function GlodaIdentity(
+ aDatastore,
+ aID,
+ aContactID,
+ aContact,
+ aKind,
+ aValue,
+ aDescription,
+ aIsRelay
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._contactID = aContactID;
+ this._contact = aContact;
+ this._kind = aKind;
+ this._value = aValue;
+ this._description = aDescription;
+ this._isRelay = aIsRelay;
+ // Cached indication of whether there is an address book card for this
+ // identity. We keep this up-to-date via address book listener
+ // notifications in |GlodaABIndexer|.
+ this._hasAddressBookCard = undefined;
+}
+
+GlodaIdentity.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_IDENTITY,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get contactID() {
+ return this._contactID;
+ },
+ get contact() {
+ return this._contact;
+ },
+ get kind() {
+ return this._kind;
+ },
+ get value() {
+ return this._value;
+ },
+ get description() {
+ return this._description;
+ },
+ get isRelay() {
+ return this._isRelay;
+ },
+
+ get uniqueValue() {
+ return this._kind + "@" + this._value;
+ },
+
+ toString() {
+ return "Identity:" + this._kind + ":" + this._value;
+ },
+
+ toLocaleString() {
+ if (this.contact.name == this.value) {
+ return this.value;
+ }
+ return this.contact.name + " : " + this.value;
+ },
+
+ get abCard() {
+ // for our purposes, the address book only speaks email
+ if (this._kind != "email") {
+ return false;
+ }
+ let card = MailServices.ab.cardForEmailAddress(this._value);
+ this._hasAddressBookCard = card != null;
+ return card;
+ },
+
+ /**
+ * Indicates whether we have an address book card for this identity. This
+ * value is cached once looked-up and kept up-to-date by |GlodaABIndexer|
+ * and its notifications.
+ */
+ get inAddressBook() {
+ if (this._hasAddressBookCard !== undefined) {
+ return this._hasAddressBookCard;
+ }
+ return (this.abCard && true) || false;
+ },
+};
+
+/**
+ * An attachment, with as much information as we can gather on it
+ */
+function GlodaAttachment(
+ aGlodaMessage,
+ aName,
+ aContentType,
+ aSize,
+ aPart,
+ aExternalUrl,
+ aIsExternal
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._glodaMessage = aGlodaMessage;
+ this._name = aName;
+ this._contentType = aContentType;
+ this._size = aSize;
+ this._part = aPart;
+ this._externalUrl = aExternalUrl;
+ this._isExternal = aIsExternal;
+}
+
+GlodaAttachment.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_ATTACHMENT,
+ // set by GlodaDatastore
+ get name() {
+ return this._name;
+ },
+ get contentType() {
+ return this._contentType;
+ },
+ get size() {
+ return this._size;
+ },
+ get url() {
+ if (this.isExternal) {
+ return this._externalUrl;
+ }
+
+ let uri = this._glodaMessage.folderMessageURI;
+ if (!uri) {
+ throw new Error(
+ "The message doesn't exist anymore, unable to rebuild attachment URL"
+ );
+ }
+ let msgService = MailServices.messageServiceFromURI(uri);
+ let neckoURL = msgService.getUrlForUri(uri);
+ let url = neckoURL.spec;
+ let hasParamAlready = url.match(/\?[a-z]+=[^\/]+$/);
+ let sep = hasParamAlready ? "&" : "?";
+ return (
+ url +
+ sep +
+ "part=" +
+ this._part +
+ "&filename=" +
+ encodeURIComponent(this._name)
+ );
+ },
+ get isExternal() {
+ return this._isExternal;
+ },
+
+ toString() {
+ return "attachment: " + this._name + ":" + this._contentType;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm b/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm
new file mode 100644
index 0000000000..eda41cb91a
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm
@@ -0,0 +1,210 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaDatabind"];
+
+function GlodaDatabind(aNounDef, aDatastore) {
+ this._nounDef = aNounDef;
+ this._tableName = aNounDef.tableName;
+ this._tableDef = aNounDef.schema;
+ this._datastore = aDatastore;
+ this._log = console.createInstance({
+ prefix: `gloda.databind.${this._tableName}`,
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ // process the column definitions and make sure they have an attribute mapping
+ for (let [iColDef, coldef] of this._tableDef.columns.entries()) {
+ // default to the other dude's thing.
+ if (coldef.length < 3) {
+ coldef[2] = coldef[0];
+ }
+ if (coldef[0] == "id") {
+ this._idAttr = coldef[2];
+ }
+ // colDef[3] is the index of us in our SQL bindings, storage-numbering
+ coldef[3] = iColDef;
+ }
+
+ // XXX This is obviously synchronous and not perfectly async. Since we are
+ // doing this, we don't actually need to move to ordinal binding below
+ // since we could just as well compel creation of the name map and thereby
+ // avoid ever acquiring the mutex after bootstrap.
+ // However, this specific check can be cleverly avoided with future work.
+ // Namely, at startup we can scan for extension-defined tables and get their
+ // maximum id so that we don't need to do it here. The table will either
+ // be brand new and thus have a maximum id of 1 or we will already know it
+ // because of that scan.
+ this._nextId = 1;
+ let stmt = this._datastore._createSyncStatement(
+ "SELECT MAX(id) FROM " + this._tableName,
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+
+ let insertColumns = [];
+ let insertValues = [];
+ let updateItems = [];
+ for (let [iColDef, coldef] of this._tableDef.columns.entries()) {
+ let column = coldef[0];
+ let placeholder = "?" + (iColDef + 1);
+ insertColumns.push(column);
+ insertValues.push(placeholder);
+ if (column != "id") {
+ updateItems.push(column + " = " + placeholder);
+ }
+ }
+
+ let insertSql =
+ "INSERT INTO " +
+ this._tableName +
+ " (" +
+ insertColumns.join(", ") +
+ ") VALUES (" +
+ insertValues.join(", ") +
+ ")";
+
+ // For the update, we want the 'id' to be a constraint and not a value
+ // that gets set...
+ let updateSql =
+ "UPDATE " +
+ this._tableName +
+ " SET " +
+ updateItems.join(", ") +
+ " WHERE id = ?1";
+ this._insertStmt = aDatastore._createAsyncStatement(insertSql);
+ this._updateStmt = aDatastore._createAsyncStatement(updateSql);
+
+ if (this._tableDef.fulltextColumns) {
+ for (let [iColDef, coldef] of this._tableDef.fulltextColumns.entries()) {
+ if (coldef.length < 3) {
+ coldef[2] = coldef[0];
+ }
+ // colDef[3] is the index of us in our SQL bindings, storage-numbering
+ coldef[3] = iColDef + 1;
+ }
+
+ let insertColumns = [];
+ let insertValues = [];
+ let updateItems = [];
+ for (var [iColDef, coldef] of this._tableDef.fulltextColumns.entries()) {
+ let column = coldef[0];
+ // +2 instead of +1 because docid is implied
+ let placeholder = "?" + (iColDef + 2);
+ insertColumns.push(column);
+ insertValues.push(placeholder);
+ if (column != "id") {
+ updateItems.push(column + " = " + placeholder);
+ }
+ }
+
+ let insertFulltextSql =
+ "INSERT INTO " +
+ this._tableName +
+ "Text (docid," +
+ insertColumns.join(", ") +
+ ") VALUES (?1," +
+ insertValues.join(", ") +
+ ")";
+
+ // For the update, we want the 'id' to be a constraint and not a value
+ // that gets set...
+ let updateFulltextSql =
+ "UPDATE " +
+ this._tableName +
+ "Text SET " +
+ updateItems.join(", ") +
+ " WHERE docid = ?1";
+
+ this._insertFulltextStmt =
+ aDatastore._createAsyncStatement(insertFulltextSql);
+ this._updateFulltextStmt =
+ aDatastore._createAsyncStatement(updateFulltextSql);
+ }
+}
+
+GlodaDatabind.prototype = {
+ /**
+ * Perform appropriate binding coercion based on the schema provided to us.
+ * Although we end up effectively coercing JS Date objects to numeric values,
+ * we should not be provided with JS Date objects! There is no way for us
+ * to know to turn them back into JS Date objects on the way out.
+ * Additionally, there is the small matter of storage's bias towards
+ * PRTime representations which may not always be desirable.
+ */
+ bindByType(aStmt, aColDef, aValue) {
+ aStmt.bindByIndex(aColDef[3], aValue);
+ },
+
+ objFromRow(aRow) {
+ let getVariant = this._datastore._getVariant;
+ let obj = new this._nounDef.class();
+ for (let [iCol, colDef] of this._tableDef.columns.entries()) {
+ obj[colDef[2]] = getVariant(aRow, iCol);
+ }
+ return obj;
+ },
+
+ objInsert(aThing) {
+ let bindByType = this.bindByType;
+ if (!aThing[this._idAttr]) {
+ aThing[this._idAttr] = this._nextId++;
+ }
+
+ let stmt = this._insertStmt;
+ for (let colDef of this._tableDef.columns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+
+ stmt.executeAsync(this._datastore.trackAsync());
+
+ if (this._insertFulltextStmt) {
+ stmt = this._insertFulltextStmt;
+ stmt.bindByIndex(0, aThing[this._idAttr]);
+ for (let colDef of this._tableDef.fulltextColumns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+ }
+ },
+
+ objUpdate(aThing) {
+ let bindByType = this.bindByType;
+ let stmt = this._updateStmt;
+ // note, we specially bound the location of 'id' for the insert, but since
+ // we're using named bindings, there is nothing special about setting it
+ for (let colDef of this._tableDef.columns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+
+ if (this._updateFulltextStmt) {
+ stmt = this._updateFulltextStmt;
+ // fulltextColumns doesn't include id/docid, need to explicitly set it
+ stmt.bindByIndex(0, aThing[this._idAttr]);
+ for (let colDef of this._tableDef.fulltextColumns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+ }
+ },
+
+ adjustAttributes(...aArgs) {
+ // just proxy the call over to the datastore... we have to do this for
+ // 'this' reasons. we don't refactor things to avoid this because it does
+ // make some sense to have all the methods exposed from a single object,
+ // even if the implementation does live elsewhere.
+ return this._datastore.adjustAttributes(...aArgs);
+ },
+
+ // also proxied...
+ queryFromQuery(...aArgs) {
+ return this._datastore.queryFromQuery(...aArgs);
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm b/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm
new file mode 100644
index 0000000000..1391ceaaf2
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm
@@ -0,0 +1,4402 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This file looks to Myk Melez <myk@mozilla.org>'s Mozilla Labs snowl
+ * project's (https://hg.mozilla.org/labs/snowl/) modules/GlodaDatastore.jsm
+ * for inspiration and idioms (and also a name :).
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaDatastore"];
+
+const {
+ GlodaAttributeDBDef,
+ GlodaConversation,
+ GlodaFolder,
+ GlodaMessage,
+ GlodaContact,
+ GlodaIdentity,
+} = ChromeUtils.import("resource:///modules/gloda/GlodaDataModel.jsm");
+const { GlodaDatabind } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatabind.jsm"
+);
+const { GlodaCollection, GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var MIN_CACHE_SIZE = 8 * 1048576;
+var MAX_CACHE_SIZE = 64 * 1048576;
+var MEMSIZE_FALLBACK_BYTES = 256 * 1048576;
+
+var PCH_LOG = console.createInstance({
+ prefix: "gloda.ds.pch",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Commit async handler; hands off the notification to
+ * |GlodaDatastore._asyncCompleted|.
+ */
+function PostCommitHandler(aCallbacks) {
+ this.callbacks = aCallbacks;
+ GlodaDatastore._pendingAsyncStatements++;
+}
+
+PostCommitHandler.prototype = {
+ handleResult(aResultSet) {},
+
+ handleError(aError) {
+ PCH_LOG.error("database error:" + aError);
+ },
+
+ handleCompletion(aReason) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ if (aReason == Ci.mozIStorageStatementCallback.REASON_FINISHED) {
+ for (let callback of this.callbacks) {
+ try {
+ callback();
+ } catch (ex) {
+ PCH_LOG.error(
+ "PostCommitHandler callback (" +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ") threw: " +
+ ex
+ );
+ }
+ }
+ }
+ try {
+ GlodaDatastore._asyncCompleted();
+ } catch (e) {
+ PCH_LOG.error("Exception in handleCompletion:", e);
+ }
+ },
+};
+
+var QFQ_LOG = console.createInstance({
+ prefix: "gloda.ds.qfq",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Singleton collection listener used by |QueryFromQueryCallback| to assist in
+ * the loading of referenced noun instances. Which is to say, messages have
+ * identities (specific e-mail addresses) associated with them via attributes.
+ * And these identities in turn reference / are referenced by contacts (the
+ * notion of a person).
+ *
+ * This listener is primarily concerned with fixing up the references in each
+ * noun instance to its referenced instances once they have been loaded. It
+ * also deals with caching so that our identity invariant is maintained: user
+ * code should only ever see one distinct instance of a thing at a time.
+ */
+var QueryFromQueryResolver = {
+ onItemsAdded(aIgnoredItems, aCollection, aFake) {
+ let originColl = aCollection.dataStack
+ ? aCollection.dataStack.pop()
+ : aCollection.data;
+ // QFQ_LOG.debug("QFQR: originColl: " + originColl);
+ if (aCollection.completionShifter) {
+ aCollection.completionShifter.push(originColl);
+ } else {
+ aCollection.completionShifter = [originColl];
+ }
+
+ if (!aFake) {
+ originColl.deferredCount--;
+ originColl.resolvedCount++;
+ }
+
+ // bail if we are still pending on some other load completion
+ if (originColl.deferredCount > 0) {
+ // QFQ_LOG.debug("QFQR: bailing " + originColl._nounDef.name);
+ return;
+ }
+
+ let referencesByNounID = originColl.masterCollection.referencesByNounID;
+ let inverseReferencesByNounID =
+ originColl.masterCollection.inverseReferencesByNounID;
+
+ if (originColl.pendingItems) {
+ for (let item of originColl.pendingItems) {
+ // QFQ_LOG.debug("QFQR: loading deferred " + item.NOUN_ID + ":" + item.id);
+ GlodaDatastore.loadNounDeferredDeps(
+ item,
+ referencesByNounID,
+ inverseReferencesByNounID
+ );
+ }
+
+ // we need to consider the possibility that we are racing a collection very
+ // much like our own. as such, this means we need to perform cache
+ // unification as our last step.
+ GlodaCollectionManager.cacheLoadUnify(
+ originColl._nounDef.id,
+ originColl.pendingItems,
+ false
+ );
+
+ // just directly tell the collection about the items. we know the query
+ // matches (at least until we introduce predicates that we cannot express
+ // in SQL.)
+ // QFQ_LOG.debug(" QFQR: about to trigger listener: " + originColl._listener +
+ // "with collection: " + originColl._nounDef.name);
+ originColl._onItemsAdded(originColl.pendingItems);
+ delete originColl.pendingItems;
+ delete originColl._pendingIdMap;
+ }
+ },
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ let originColl = aCollection.completionShifter
+ ? aCollection.completionShifter.shift()
+ : aCollection.data;
+ // QFQ_LOG.debug(" QFQR about to trigger completion with collection: " +
+ // originColl._nounDef.name);
+ if (originColl.deferredCount <= 0) {
+ originColl._onQueryCompleted();
+ }
+ },
+};
+
+/**
+ * Handles the results from a GlodaDatastore.queryFromQuery call in cooperation
+ * with the |QueryFromQueryResolver| collection listener. We do a lot of
+ * legwork related to satisfying references to other noun instances on the
+ * noun instances the user directly queried. Messages reference identities
+ * reference contacts which in turn (implicitly) reference identities again.
+ * We have to spin up those other queries and stitch things together.
+ *
+ * While the code is generally up to the existing set of tasks it is called to
+ * handle, I would not be surprised for it to fall down if things get more
+ * complex. Some of the logic here 'evolved' a bit and could benefit from
+ * additional documentation and a fresh go-through.
+ */
+function QueryFromQueryCallback(aStatement, aNounDef, aCollection) {
+ this.statement = aStatement;
+ this.nounDef = aNounDef;
+ this.collection = aCollection;
+
+ // QFQ_LOG.debug("Creating QFQCallback for noun: " + aNounDef.name);
+
+ // the master collection holds the referencesByNounID
+ this.referencesByNounID = {};
+ this.masterReferencesByNounID =
+ this.collection.masterCollection.referencesByNounID;
+ this.inverseReferencesByNounID = {};
+ this.masterInverseReferencesByNounID =
+ this.collection.masterCollection.inverseReferencesByNounID;
+ // we need to contribute our references as we load things; we need this
+ // because of the potential for circular dependencies and our inability to
+ // put things into the caching layer (or collection's _idMap) until we have
+ // fully resolved things.
+ if (this.nounDef.id in this.masterReferencesByNounID) {
+ this.selfReferences = this.masterReferencesByNounID[this.nounDef.id];
+ } else {
+ this.selfReferences = this.masterReferencesByNounID[this.nounDef.id] = {};
+ }
+ if (this.nounDef.parentColumnAttr) {
+ if (this.nounDef.id in this.masterInverseReferencesByNounID) {
+ this.selfInverseReferences =
+ this.masterInverseReferencesByNounID[this.nounDef.id];
+ } else {
+ this.selfInverseReferences = this.masterInverseReferencesByNounID[
+ this.nounDef.id
+ ] = {};
+ }
+ }
+
+ this.needsLoads = false;
+
+ GlodaDatastore._pendingAsyncStatements++;
+}
+
+QueryFromQueryCallback.prototype = {
+ handleResult(aResultSet) {
+ try {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ let pendingItems = this.collection.pendingItems;
+ let pendingIdMap = this.collection._pendingIdMap;
+ let row;
+ let nounDef = this.nounDef;
+ let nounID = nounDef.id;
+ while ((row = aResultSet.getNextRow())) {
+ let item = nounDef.objFromRow.call(nounDef.datastore, row);
+ if (this.collection.stashedColumns) {
+ let stashed = (this.collection.stashedColumns[item.id] = []);
+ for (let iCol of this.collection.query.options.stashColumns) {
+ stashed.push(GlodaDatastore._getVariant(row, iCol));
+ }
+ }
+ // try and replace the item with one from the cache, if we can
+ let cachedItem = GlodaCollectionManager.cacheLookupOne(
+ nounID,
+ item.id,
+ false
+ );
+
+ // if we already have a copy in the pending id map, skip it
+ if (item.id in pendingIdMap) {
+ continue;
+ }
+
+ // QFQ_LOG.debug("loading item " + nounDef.id + ":" + item.id + " existing: " +
+ // this.selfReferences[item.id] + " cached: " + cachedItem);
+ if (cachedItem) {
+ item = cachedItem;
+ } else if (this.selfReferences[item.id] != null) {
+ // We may already have been loaded by this process.
+ item = this.selfReferences[item.id];
+ } else {
+ // Perform loading logic which may produce reference dependencies.
+ this.needsLoads =
+ GlodaDatastore.loadNounItem(
+ item,
+ this.referencesByNounID,
+ this.inverseReferencesByNounID
+ ) || this.needsLoads;
+ }
+
+ // add ourself to the references by our id
+ // QFQ_LOG.debug("saving item " + nounDef.id + ":" + item.id + " to self-refs");
+ this.selfReferences[item.id] = item;
+
+ // if we're tracking it, add ourselves to our parent's list of children
+ // too
+ if (this.selfInverseReferences) {
+ let parentID = item[nounDef.parentColumnAttr.idStorageAttributeName];
+ let childrenList = this.selfInverseReferences[parentID];
+ if (childrenList === undefined) {
+ childrenList = this.selfInverseReferences[parentID] = [];
+ }
+ childrenList.push(item);
+ }
+
+ pendingItems.push(item);
+ pendingIdMap[item.id] = item;
+ }
+ } catch (e) {
+ GlodaDatastore._log.error("Exception in handleResult:", e);
+ }
+ },
+
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "Async queryFromQuery error: " + aError.result + ": " + aError.message
+ );
+ },
+
+ handleCompletion(aReason) {
+ try {
+ try {
+ this.statement.finalize();
+ this.statement = null;
+
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ // QFQ_LOG.debug("handleCompletion: " + this.collection._nounDef.name);
+
+ if (this.needsLoads) {
+ for (let nounID in this.referencesByNounID) {
+ let references = this.referencesByNounID[nounID];
+ if (nounID == this.nounDef.id) {
+ continue;
+ }
+ let nounDef = GlodaDatastore._nounIDToDef[nounID];
+ // QFQ_LOG.debug(" have references for noun: " + nounDef.name);
+ // try and load them out of the cache/existing collections. items in the
+ // cache will be fully formed, which is nice for us.
+ // XXX this mechanism will get dubious when we have multiple paths to a
+ // single noun-type. For example, a -> b -> c, a-> c; two paths to c
+ // and we're looking at issuing two requests to c, the latter of which
+ // will be a superset of the first one. This does not currently pose
+ // a problem because we only have a -> b -> c -> b, and sequential
+ // processing means no alarms and no surprises.
+ let masterReferences = this.masterReferencesByNounID[nounID];
+ if (masterReferences === undefined) {
+ masterReferences = this.masterReferencesByNounID[nounID] = {};
+ }
+ let outReferences;
+ if (nounDef.parentColumnAttr) {
+ outReferences = {};
+ } else {
+ outReferences = masterReferences;
+ }
+ let [, notFoundCount, notFound] =
+ GlodaCollectionManager.cacheLookupMany(
+ nounDef.id,
+ references,
+ outReferences
+ );
+
+ if (nounDef.parentColumnAttr) {
+ let inverseReferences;
+ if (nounDef.id in this.masterInverseReferencesByNounID) {
+ inverseReferences =
+ this.masterInverseReferencesByNounID[nounDef.id];
+ } else {
+ inverseReferences = this.masterInverseReferencesByNounID[
+ nounDef.id
+ ] = {};
+ }
+
+ for (let key in outReferences) {
+ let item = outReferences[key];
+ masterReferences[item.id] = item;
+ let parentID =
+ item[nounDef.parentColumnAttr.idStorageAttributeName];
+ let childrenList = inverseReferences[parentID];
+ if (childrenList === undefined) {
+ childrenList = inverseReferences[parentID] = [];
+ }
+ childrenList.push(item);
+ }
+ }
+
+ // QFQ_LOG.debug(" found: " + foundCount + " not found: " + notFoundCount);
+ if (notFoundCount === 0) {
+ this.collection.resolvedCount++;
+ } else {
+ this.collection.deferredCount++;
+ let query = new nounDef.queryClass();
+ query.id.apply(query, Object.keys(notFound));
+
+ // we fully expect/allow for there being no such subcollection yet.
+ let subCollection =
+ nounDef.id in this.collection.masterCollection.subCollections
+ ? this.collection.masterCollection.subCollections[nounDef.id]
+ : undefined;
+ this.collection.masterCollection.subCollections[nounDef.id] =
+ GlodaDatastore.queryFromQuery(
+ query,
+ QueryFromQueryResolver,
+ this.collection,
+ subCollection,
+ this.collection.masterCollection,
+ { becomeExplicit: true }
+ );
+ }
+ }
+
+ for (let nounID in this.inverseReferencesByNounID) {
+ let inverseReferences = this.inverseReferencesByNounID[nounID];
+ this.collection.deferredCount++;
+ let nounDef = GlodaDatastore._nounIDToDef[nounID];
+
+ // QFQ_LOG.debug("Want to load inverse via " + nounDef.parentColumnAttr.boundName);
+
+ let query = new nounDef.queryClass();
+ // we want to constrain using the parent column
+ let queryConstrainer = query[nounDef.parentColumnAttr.boundName];
+ queryConstrainer.apply(query, Object.keys(inverseReferences));
+ // we fully expect/allow for there being no such subcollection yet.
+ let subCollection =
+ nounDef.id in this.collection.masterCollection.subCollections
+ ? this.collection.masterCollection.subCollections[nounDef.id]
+ : undefined;
+ this.collection.masterCollection.subCollections[nounDef.id] =
+ GlodaDatastore.queryFromQuery(
+ query,
+ QueryFromQueryResolver,
+ this.collection,
+ subCollection,
+ this.collection.masterCollection,
+ { becomeExplicit: true }
+ );
+ }
+ } else {
+ this.collection.deferredCount--;
+ this.collection.resolvedCount++;
+ }
+
+ // QFQ_LOG.debug(" defer: " + this.collection.deferredCount +
+ // " resolved: " + this.collection.resolvedCount);
+
+ // process immediately and kick-up to the master collection...
+ if (this.collection.deferredCount <= 0) {
+ // this guy will resolve everyone using referencesByNounID and issue the
+ // call to this.collection._onItemsAdded to propagate things to the
+ // next concerned subCollection or the actual listener if this is the
+ // master collection. (Also, call _onQueryCompleted).
+ QueryFromQueryResolver.onItemsAdded(
+ null,
+ { data: this.collection },
+ true
+ );
+ QueryFromQueryResolver.onQueryCompleted({ data: this.collection });
+ }
+ } catch (e) {
+ console.error(e);
+ QFQ_LOG.error("Exception:", e);
+ }
+ } finally {
+ GlodaDatastore._asyncCompleted();
+ }
+ },
+};
+
+/**
+ * Used by |GlodaDatastore.folderCompactionPassBlockFetch| to accumulate the
+ * results and pass them back in to the compaction process in
+ * |GlodaMsgIndexer._worker_folderCompactionPass|.
+ */
+function CompactionBlockFetcherHandler(aCallback) {
+ this.callback = aCallback;
+ this.idsAndMessageKeys = [];
+ GlodaDatastore._pendingAsyncStatements++;
+}
+CompactionBlockFetcherHandler.prototype = {
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ this.idsAndMessageKeys.push([
+ row.getInt64(0), // id
+ row.getInt64(1), // messageKey
+ row.getString(2), // headerMessageID
+ ]);
+ }
+ },
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "CompactionBlockFetcherHandler error: " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ this.callback(this.idsAndMessageKeys);
+ },
+};
+
+/**
+ * Use this as the callback handler when you have a SQL query that returns a
+ * single row with a single integer column value, like a COUNT() query.
+ */
+function SingletonResultValueHandler(aCallback) {
+ this.callback = aCallback;
+ this.result = null;
+ GlodaDatastore._pendingAsyncStatements++;
+}
+SingletonResultValueHandler.prototype = {
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ this.result = row.getInt64(0);
+ }
+ },
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "SingletonResultValueHandler error: " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ this.callback(this.result);
+ },
+};
+
+/**
+ * Wrapper that duplicates actions taken on a real statement to an explain
+ * statement. Currently only fires an explain statement once.
+ */
+function ExplainedStatementWrapper(
+ aRealStatement,
+ aExplainStatement,
+ aSQLString,
+ aExplainHandler
+) {
+ this.real = aRealStatement;
+ this.explain = aExplainStatement;
+ this.sqlString = aSQLString;
+ this.explainHandler = aExplainHandler;
+ this.done = false;
+}
+ExplainedStatementWrapper.prototype = {
+ bindByIndex(aColIndex, aValue) {
+ this.real.bindByIndex(aColIndex, aValue);
+ if (!this.done) {
+ this.explain.bindByIndex(aColIndex, aValue);
+ }
+ },
+ executeAsync(aCallback) {
+ if (!this.done) {
+ this.explainHandler.sqlEnRoute(this.sqlString);
+ this.explain.executeAsync(this.explainHandler);
+ this.explain.finalize();
+ this.done = true;
+ }
+ return this.real.executeAsync(aCallback);
+ },
+ finalize() {
+ if (!this.done) {
+ this.explain.finalize();
+ }
+ this.real.finalize();
+ },
+};
+
+/**
+ * Writes a single JSON document to the provide file path in a streaming
+ * fashion. At startup we open an array to place the queries in and at
+ * shutdown we close it.
+ */
+function ExplainedStatementProcessor(aDumpPath) {
+ Services.obs.addObserver(this, "quit-application");
+
+ this._sqlStack = [];
+ this._curOps = [];
+ this._objsWritten = 0;
+
+ let filePath = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ filePath.initWithPath(aDumpPath);
+
+ this._ostream = Cc[
+ "@mozilla.org/network/file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ this._ostream.init(filePath, -1, -1, 0);
+
+ let s = '{"queries": [';
+ this._ostream.write(s, s.length);
+}
+ExplainedStatementProcessor.prototype = {
+ sqlEnRoute(aSQLString) {
+ this._sqlStack.push(aSQLString);
+ },
+ handleResult(aResultSet) {
+ let row;
+ // addr opcode (s) p1 p2 p3 p4 (s) p5 comment (s)
+ while ((row = aResultSet.getNextRow())) {
+ this._curOps.push([
+ row.getInt64(0), // addr
+ row.getString(1), // opcode
+ row.getInt64(2), // p1
+ row.getInt64(3), // p2
+ row.getInt64(4), // p3
+ row.getString(5), // p4
+ row.getString(6), // p5
+ row.getString(7), // comment
+ ]);
+ }
+ },
+ handleError(aError) {
+ console.error("Unexpected error in EXPLAIN handler: " + aError);
+ },
+ handleCompletion(aReason) {
+ let obj = {
+ sql: this._sqlStack.shift(),
+ operations: this._curOps,
+ };
+ let s = (this._objsWritten++ ? ", " : "") + JSON.stringify(obj, null, 2);
+ this._ostream.write(s, s.length);
+
+ this._curOps = [];
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic == "quit-application") {
+ this.shutdown();
+ }
+ },
+
+ shutdown() {
+ let s = "]}";
+ this._ostream.write(s, s.length);
+ this._ostream.close();
+
+ Services.obs.removeObserver(this, "quit-application");
+ },
+};
+
+// See the documentation on GlodaDatastore._schemaVersion to understand these:
+var DB_SCHEMA_ACCEPT_LEAVE_LOW = 31,
+ DB_SCHEMA_ACCEPT_LEAVE_HIGH = 34,
+ DB_SCHEMA_ACCEPT_DOWNGRADE_LOW = 35,
+ DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH = 39,
+ DB_SCHEMA_DOWNGRADE_DELTA = 5;
+
+/**
+ * Database abstraction layer. Contains explicit SQL schemas for our
+ * fundamental representations (core 'nouns', if you will) as well as
+ * specialized functions for then dealing with each type of object. At the
+ * same time, we are beginning to support extension-provided tables, which
+ * call into question whether we really need our hand-rolled code, or could
+ * simply improve the extension-provided table case to work for most of our
+ * hand-rolled cases.
+ * For now, the argument can probably be made that our explicit schemas and code
+ * is readable/intuitive (not magic) and efficient (although generic stuff
+ * could also be made efficient, if slightly evil through use of eval or some
+ * other code generation mechanism.)
+ *
+ * === Data Model Interaction / Dependencies
+ *
+ * Dependent on and assumes limited knowledge of the GlodaDataModel.jsm
+ * implementations. GlodaDataModel.jsm actually has an implicit dependency on
+ * our implementation, reaching back into the datastore via the _datastore
+ * attribute which we pass into every instance we create.
+ * We pass a reference to ourself as we create the GlodaDataModel.jsm instances (and
+ * they store it as _datastore) because of a half-implemented attempt to make
+ * it possible to live in a world where we have multiple datastores. This
+ * would be desirable in the cases where we are dealing with multiple SQLite
+ * databases. This could be because of per-account global databases or
+ * some other segmentation. This was abandoned when the importance of
+ * per-account databases was diminished following public discussion, at least
+ * for the short-term, but no attempted was made to excise the feature or
+ * preclude it. (Merely a recognition that it's too much to try and implement
+ * correct right now, especially because our solution might just be another
+ * (aggregating) layer on top of things, rather than complicating the lower
+ * levels.)
+ *
+ * === Object Identity / Caching
+ *
+ * The issue of object identity is handled by integration with the Collection.jsm
+ * provided GlodaCollectionManager. By "Object Identity", I mean that we only
+ * should ever have one object instance alive at a time that corresponds to
+ * an underlying database row in the database. Where possible we avoid
+ * performing database look-ups when we can check if the object is already
+ * present in memory; in practice, this means when we are asking for an object
+ * by ID. When we cannot avoid a database query, we attempt to make sure that
+ * we do not return a duplicate object instance, instead replacing it with the
+ * 'live' copy of the object. (Ideally, we would avoid any redundant
+ * construction costs, but that is not currently the case.)
+ * Although you should consult the GlodaCollectionManager for details, the
+ * general idea is that we have 'collections' which represent views of the
+ * database (based on a query) which use a single mechanism for double duty.
+ * The collections are registered with the collection manager via weak
+ * reference. The first 'duty' is that since the collections may be desired
+ * to be 'live views' of the data, we want them to update as changes occur.
+ * The weak reference allows the collection manager to track the 'live'
+ * collections and update them. The second 'duty' is the caching/object
+ * identity duty. In theory, every live item should be referenced by at least
+ * one collection, making it reachable for object identity/caching purposes.
+ * There is also an explicit (inclusive) caching layer present to both try and
+ * avoid poor performance from some of the costs of this strategy, as well as
+ * to try and keep track of objects that are being worked with that are not
+ * (yet) tracked by a collection. Using a size-bounded cache is clearly not
+ * a guarantee of correctness for this, but is suspected will work quite well.
+ * (Well enough to be dangerous because the inevitable failure case will not be
+ * expected.)
+ *
+ * The current strategy may not be the optimal one, feel free to propose and/or
+ * implement better ones, especially if you have numbers.
+ * The current strategy is not fully implemented in this file, but the common
+ * cases are believed to be covered. (Namely, we fail to purge items from the
+ * cache as they are purged from the database.)
+ *
+ * === Things That May Not Be Obvious (Gotchas)
+ *
+ * Although the schema includes "triggers", they are currently not used
+ * and were added when thinking about implementing the feature. We will
+ * probably implement this feature at some point, which is why they are still
+ * in there.
+ *
+ * We, and the layers above us, are not sufficiently thorough at cleaning out
+ * data from the database, and may potentially orphan it _as new functionality
+ * is added in the future at layers above us_. That is, currently we should
+ * not be leaking database rows, but we may in the future. This is because
+ * we/the layers above us lack a mechanism to track dependencies based on
+ * attributes. Say a plugin exists that extracts recipes from messages and
+ * relates them via an attribute. To do so, it must create new recipe rows
+ * in its own table as new recipes are discovered. No automatic mechanism
+ * will purge recipes as their source messages are purged, nor does any
+ * event-driven mechanism explicitly inform the plugin. (It could infer
+ * such an event from the indexing/attribute-providing process, or poll the
+ * states of attributes to accomplish this, but that is not desirable.) This
+ * needs to be addressed, and may be best addressed at layers above
+ * GlodaDatastore.jsm.
+ *
+ * @namespace
+ */
+var GlodaDatastore = {
+ _log: null,
+
+ /* ******************* SCHEMA ******************* */
+
+ /**
+ * Schema version policy. IMPORTANT! We expect the following potential things
+ * to happen in the life of gloda that can impact our schema and the ability
+ * to move between different versions of Thunderbird:
+ *
+ * - Fundamental changes to the schema so that two versions of Thunderbird
+ * cannot use the same global database. To wit, Thunderbird N+1 needs to
+ * blow away the database of Thunderbird N and reindex from scratch.
+ * Likewise, Thunderbird N will need to blow away Thunderbird N+1's
+ * database because it can't understand it. And we can't simply use a
+ * different file because there would be fatal bookkeeping losses.
+ *
+ * - Bidirectional minor schema changes (rare).
+ * Thunderbird N+1 does something that does not affect Thunderbird N's use
+ * of the database, and a user switching back to Thunderbird N will not be
+ * negatively impacted. It will also be fine when they go back to N+1 and
+ * N+1 will not be missing any vital data. The historic example of this is
+ * when we added a missing index that was important for performance. In
+ * that case, Thunderbird N could have potentially left the schema revision
+ * intact (if there was a safe revision), rather than swapping it on the
+ * downgrade, compelling N+1 to redo the transform on upgrade.
+ *
+ * - Backwards compatible, upgrade-transition minor schema changes.
+ * Thunderbird N+1 does something that does not require nuking the
+ * database / a full re-index, but does require processing on upgrade from
+ * a version of the database previously used by Thunderbird. These changes
+ * do not impact N's ability to use the database. For example, adding a
+ * new indexed attribute that affects a small number of messages could be
+ * handled by issuing a query on upgrade to dirty/index those messages.
+ * However, if the user goes back to N from N+1, when they upgrade to N+1
+ * again, we need to re-index. In this case N would need to have downgrade
+ * the schema revision.
+ *
+ * - Backwards incompatible, minor schema changes.
+ * Thunderbird N+1 does something that does not require nuking the database
+ * but will break Thunderbird N's ability to use the database.
+ *
+ * - Regression fixes. Sometimes we may land something that screws up
+ * databases, or the platform changes in a way that breaks our code and we
+ * had insufficient unit test coverage and so don't detect it until some
+ * databases have gotten messed up.
+ *
+ * Accordingly, every version of Thunderbird has a concept of potential schema
+ * versions with associated semantics to prepare for the minor schema upgrade
+ * cases were inter-op is possible. These ranges and their semantics are:
+ * - accepts and leaves intact. Covers:
+ * - regression fixes that no longer exist with the landing of the upgrade
+ * code as long as users never go back a build in the given channel.
+ * - bidirectional minor schema changes.
+ * - accepts but downgrades version to self. Covers:
+ * - backwards compatible, upgrade-transition minor schema changes.
+ * - nuke range (anything beyond a specific revision needs to be nuked):
+ * - backwards incompatible, minor scheme changes
+ * - fundamental changes
+ *
+ *
+ * SO, YOU WANT TO CHANGE THE SCHEMA?
+ *
+ * Use the ranges below for Thunderbird 11 as a guide, bumping things as little
+ * as possible. If we start to use up the "accepts and leaves intact" range
+ * without majorly changing things up, re-do the numbering acceptance range
+ * to give us additional runway.
+ *
+ * Also, if we keep needing non-nuking upgrades, consider adding an additional
+ * table to the database that can tell older versions of Thunderbird what to
+ * do when confronted with a newer database and where it can set flags to tell
+ * the newer Thunderbird what the older Thunderbird got up to. For example,
+ * it would be much easier if we just tell Thunderbird N what to do when it's
+ * confronted with the database.
+ *
+ *
+ * CURRENT STATE OF THE MIGRATION LOGIC:
+ *
+ * Thunderbird 11: uses 30 (regression fix from 26)
+ * - accepts and leaves intact: 31-34
+ * - accepts and downgrades by 5: 35-39
+ * - nukes: 40+
+ */
+ _schemaVersion: 30,
+ // what is the schema in the database right now?
+ _actualSchemaVersion: 0,
+ _schema: {
+ tables: {
+ // ----- Messages
+ folderLocations: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["folderURI", "TEXT NOT NULL"],
+ ["dirtyStatus", "INTEGER NOT NULL"],
+ ["name", "TEXT NOT NULL"],
+ ["indexingPriority", "INTEGER NOT NULL"],
+ ],
+
+ triggers: {
+ delete: "DELETE from messages WHERE folderID = OLD.id",
+ },
+ },
+
+ conversations: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["subject", "TEXT NOT NULL"],
+ ["oldestMessageDate", "INTEGER"],
+ ["newestMessageDate", "INTEGER"],
+ ],
+
+ indices: {
+ subject: ["subject"],
+ oldestMessageDate: ["oldestMessageDate"],
+ newestMessageDate: ["newestMessageDate"],
+ },
+
+ fulltextColumns: [["subject", "TEXT"]],
+
+ triggers: {
+ delete: "DELETE from messages WHERE conversationID = OLD.id",
+ },
+ },
+
+ /**
+ * A message record correspond to an actual message stored in a folder
+ * somewhere, or is a ghost record indicating a message that we know
+ * should exist, but which we have not seen (and which we may never see).
+ * We represent these ghost messages by storing NULL values in the
+ * folderID and messageKey fields; this may need to change to other
+ * sentinel values if this somehow impacts performance.
+ */
+ messages: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["folderID", "INTEGER"],
+ ["messageKey", "INTEGER"],
+ // conversationID used to have a REFERENCES but I'm losing it for
+ // presumed performance reasons and it doesn't do anything for us.
+ ["conversationID", "INTEGER NOT NULL"],
+ ["date", "INTEGER"],
+ // we used to have the parentID, but because of the very real
+ // possibility of multiple copies of a message with a given
+ // message-id, the parentID concept is unreliable.
+ ["headerMessageID", "TEXT"],
+ ["deleted", "INTEGER NOT NULL default 0"],
+ ["jsonAttributes", "TEXT"],
+ // Notability attempts to capture the static 'interestingness' of a
+ // message as a result of being starred/flagged, labeled, read
+ // multiple times, authored by someone in your address book or that
+ // you converse with a lot, etc.
+ ["notability", "INTEGER NOT NULL default 0"],
+ ],
+
+ indices: {
+ messageLocation: ["folderID", "messageKey"],
+ headerMessageID: ["headerMessageID"],
+ conversationID: ["conversationID"],
+ date: ["date"],
+ deleted: ["deleted"],
+ },
+
+ // note: if reordering the columns, you need to change this file's
+ // row-loading logic, GlodaMsgSearcher.jsm's ranking usages and also the
+ // column saturations in nsGlodaRankerFunction
+ fulltextColumns: [
+ ["body", "TEXT"],
+ ["subject", "TEXT"],
+ ["attachmentNames", "TEXT"],
+ ["author", "TEXT"],
+ ["recipients", "TEXT"],
+ ],
+
+ triggers: {
+ delete: "DELETE FROM messageAttributes WHERE messageID = OLD.id",
+ },
+ },
+
+ // ----- Attributes
+ attributeDefinitions: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["attributeType", "INTEGER NOT NULL"],
+ ["extensionName", "TEXT NOT NULL"],
+ ["name", "TEXT NOT NULL"],
+ ["parameter", "BLOB"],
+ ],
+
+ triggers: {
+ delete: "DELETE FROM messageAttributes WHERE attributeID = OLD.id",
+ },
+ },
+
+ messageAttributes: {
+ columns: [
+ // conversationID and messageID used to have REFERENCES back to their
+ // appropriate types. I removed it when removing attributeID for
+ // better reasons and because the code is not capable of violating
+ // this constraint, so the check is just added cost. (And we have
+ // unit tests that sanity check my assertions.)
+ ["conversationID", "INTEGER NOT NULL"],
+ ["messageID", "INTEGER NOT NULL"],
+ // This used to be REFERENCES attributeDefinitions(id) but then we
+ // introduced sentinel values and it's hard to justify the effort
+ // to compel injection of the record or the overhead to do the
+ // references checking.
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+
+ indices: {
+ attribQuery: [
+ "attributeID",
+ "value",
+ /* covering: */ "conversationID",
+ "messageID",
+ ],
+ // This is required for deletion of a message's attributes to be
+ // performant. We could optimize this index away if we changed our
+ // deletion logic to issue specific attribute deletions based on the
+ // information it already has available in the message's JSON blob.
+ // The rub there is that if we screwed up we could end up leaking
+ // attributes and there is a non-trivial performance overhead to
+ // the many requests it would cause (which can also be reduced in
+ // the future by changing our SQL dispatch code.)
+ messageAttribFastDeletion: ["messageID"],
+ },
+ },
+
+ // ----- Contacts / Identities
+
+ /**
+ * Corresponds to a human being and roughly to an address book entry.
+ * Contrast with an identity, which is a specific e-mail address, IRC
+ * nick, etc. Identities belong to contacts, and this relationship is
+ * expressed on the identityAttributes table.
+ */
+ contacts: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["directoryUUID", "TEXT"],
+ ["contactUUID", "TEXT"],
+ ["popularity", "INTEGER"],
+ ["frecency", "INTEGER"],
+ ["name", "TEXT"],
+ ["jsonAttributes", "TEXT"],
+ ],
+ indices: {
+ popularity: ["popularity"],
+ frecency: ["frecency"],
+ },
+ },
+
+ contactAttributes: {
+ columns: [
+ ["contactID", "INTEGER NOT NULL"],
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+ indices: {
+ contactAttribQuery: [
+ "attributeID",
+ "value",
+ /* covering: */ "contactID",
+ ],
+ },
+ },
+
+ /**
+ * Identities correspond to specific e-mail addresses, IRC nicks, etc.
+ */
+ identities: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["contactID", "INTEGER NOT NULL"],
+ ["kind", "TEXT NOT NULL"], // ex: email, irc, etc.
+ ["value", "TEXT NOT NULL"], // ex: e-mail address, irc nick/handle...
+ ["description", "NOT NULL"], // what makes this identity different
+ // from the others? (ex: home, work, etc.)
+ ["relay", "INTEGER NOT NULL"], // is the identity just a relay
+ // mechanism? (ex: mailing list, twitter 'bouncer', IRC gateway, etc.)
+ ],
+
+ indices: {
+ contactQuery: ["contactID"],
+ valueQuery: ["kind", "value"],
+ },
+ },
+ },
+ },
+
+ /* ******************* LOGIC ******************* */
+ /**
+ * We only have one connection; this name exists for legacy reasons but helps
+ * track when we are intentionally doing synchronous things during startup.
+ * We do nothing synchronous once our setup has completed.
+ */
+ syncConnection: null,
+ /**
+ * We only have one connection and we only do asynchronous things after setup;
+ * this name still exists mainly for legacy reasons.
+ */
+ asyncConnection: null,
+
+ /**
+ * Our "mailnews.database.global.datastore." preferences branch for debug
+ * notification handling. We register as an observer against this.
+ */
+ _prefBranch: null,
+
+ /**
+ * The unique ID assigned to an index when it has been built. This value
+ * changes once the index has been rebuilt.
+ */
+ _datastoreID: null,
+
+ /**
+ * Initialize logging, create the database if it doesn't exist, "upgrade" it
+ * if it does and it's not up-to-date, fill our authoritative folder uri/id
+ * mapping.
+ */
+ _init(aNounIDToDef) {
+ this._log = console.createInstance({
+ prefix: "gloda.datastore",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ this._log.debug("Beginning datastore initialization.");
+
+ this._nounIDToDef = aNounIDToDef;
+
+ let branch = Services.prefs.getBranch(
+ "mailnews.database.global.datastore."
+ );
+ this._prefBranch = branch;
+
+ // Not sure the weak reference really makes a difference given that we are a
+ // GC root.
+ branch.addObserver("", this);
+ // claim the pref changed so we can centralize our logic there.
+ this.observe(null, "nsPref:changed", "explainToPath");
+
+ // Get the path to our global database
+ var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ var dbConnection;
+
+ // Report about the size of the database through telemetry (if there's a
+ // database, naturally).
+ if (dbFile.exists()) {
+ try {
+ let h = Services.telemetry.getHistogramById(
+ "THUNDERBIRD_GLODA_SIZE_MB"
+ );
+ h.add(dbFile.fileSize / 1048576);
+ } catch (e) {
+ this._log.warn("Couldn't report telemetry", e);
+ }
+ }
+
+ // Create the file if it does not exist
+ if (!dbFile.exists()) {
+ this._log.debug("Creating database because it doesn't exist.");
+ dbConnection = this._createDB(dbFile);
+ } else {
+ // It does exist, but we (someday) might need to upgrade the schema
+ // (Exceptions may be thrown if the database is corrupt)
+ try {
+ dbConnection = Services.storage.openUnsharedDatabase(dbFile);
+ let cacheSize = this._determineCachePages(dbConnection);
+ // see _createDB...
+ dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cacheSize);
+ dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL");
+
+ // Register custom tokenizer to index all language text
+ var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService(
+ Ci.nsIFts3Tokenizer
+ );
+ tokenizer.registerTokenizer(dbConnection);
+
+ // -- database schema changes
+ let dbSchemaVersion = (this._actualSchemaVersion =
+ dbConnection.schemaVersion);
+ // - database from the future!
+ if (dbSchemaVersion > this._schemaVersion) {
+ if (
+ dbSchemaVersion >= DB_SCHEMA_ACCEPT_LEAVE_LOW &&
+ dbSchemaVersion <= DB_SCHEMA_ACCEPT_LEAVE_HIGH
+ ) {
+ this._log.debug(
+ "db from the future in acceptable range; leaving " +
+ "version at: " +
+ dbSchemaVersion
+ );
+ } else if (
+ dbSchemaVersion >= DB_SCHEMA_ACCEPT_DOWNGRADE_LOW &&
+ dbSchemaVersion <= DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH
+ ) {
+ let newVersion = dbSchemaVersion - DB_SCHEMA_DOWNGRADE_DELTA;
+ this._log.debug(
+ "db from the future in downgrade range; setting " +
+ "version to " +
+ newVersion +
+ " down from " +
+ dbSchemaVersion
+ );
+ dbConnection.schemaVersion = this._actualSchemaVersion = newVersion;
+ } else {
+ // too far from the future, nuke it.
+ dbConnection = this._nukeMigration(dbFile, dbConnection);
+ }
+ } else if (dbSchemaVersion < this._schemaVersion) {
+ // - database from the past! migrate it, possibly.
+ this._log.debug(
+ "Need to migrate database. (DB version: " +
+ this._actualSchemaVersion +
+ " desired version: " +
+ this._schemaVersion
+ );
+ dbConnection = this._migrate(
+ dbFile,
+ dbConnection,
+ this._actualSchemaVersion,
+ this._schemaVersion
+ );
+ this._log.debug("Migration call completed.");
+ }
+ // else: this database is juuust right.
+
+ // If we never had a datastore ID, make sure to create one now.
+ if (!this._prefBranch.prefHasUserValue("id")) {
+ this._datastoreID = this._generateDatastoreID();
+ this._prefBranch.setCharPref("id", this._datastoreID);
+ } else {
+ this._datastoreID = this._prefBranch.getCharPref("id");
+ }
+ } catch (ex) {
+ // Handle corrupt databases, other oddities
+ if (ex.result == Cr.NS_ERROR_FILE_CORRUPTED) {
+ this._log.warn("Database was corrupt, removing the old one.");
+ dbFile.remove(false);
+ this._log.warn("Removed old database, creating a new one.");
+ dbConnection = this._createDB(dbFile);
+ } else {
+ this._log.error(
+ "Unexpected error when trying to open the database:",
+ ex
+ );
+ throw ex;
+ }
+ }
+ }
+
+ this.syncConnection = dbConnection;
+ this.asyncConnection = dbConnection;
+
+ this._log.debug("Initializing folder mappings.");
+ this._getAllFolderMappings();
+ // we need to figure out the next id's for all of the tables where we
+ // manage that.
+ this._log.debug("Populating managed id counters.");
+ this._populateAttributeDefManagedId();
+ this._populateConversationManagedId();
+ this._populateMessageManagedId();
+ this._populateContactManagedId();
+ this._populateIdentityManagedId();
+
+ this._log.debug("Completed datastore initialization.");
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic != "nsPref:changed") {
+ return;
+ }
+
+ if (aData == "explainToPath") {
+ let explainToPath = null;
+ try {
+ explainToPath = this._prefBranch.getCharPref("explainToPath");
+ if (explainToPath.trim() == "") {
+ explainToPath = null;
+ }
+ } catch (ex) {
+ // don't care if the pref is not there.
+ }
+
+ // It is conceivable that the name is changing and this isn't a boolean
+ // toggle, so always clean out the explain processor.
+ if (this._explainProcessor) {
+ this._explainProcessor.shutdown();
+ this._explainProcessor = null;
+ }
+
+ if (explainToPath) {
+ this._createAsyncStatement = this._createExplainedAsyncStatement;
+ this._explainProcessor = new ExplainedStatementProcessor(explainToPath);
+ } else {
+ this._createAsyncStatement = this._realCreateAsyncStatement;
+ }
+ }
+ },
+
+ datastoreIsShutdown: false,
+
+ /**
+ * Perform datastore shutdown.
+ */
+ shutdown() {
+ // Clear out any pending transaction by committing it.
+ // The indexer has been shutdown by this point; it no longer has any active
+ // indexing logic and it no longer has active event listeners capable of
+ // generating new activity.
+ // Semantic consistency of the database is guaranteed by the indexer's
+ // strategy of only yielding control at coherent times. Although it takes
+ // multiple calls and multiple SQL operations to update the state of our
+ // database representations, the generator does not yield until it has
+ // issued all the database statements required for said update. As such,
+ // this commit will leave us in a good way (and the commit will happen
+ // because closing the connection will drain the async execution queue.)
+ while (this._transactionDepth) {
+ this._log.info("Closing pending transaction out for shutdown.");
+ // just schedule this function to be run again once the transaction has
+ // been closed out.
+ this._commitTransaction();
+ }
+
+ this.datastoreIsShutdown = true;
+
+ this._log.info("Closing db connection");
+
+ // we do not expect exceptions, but it's a good idea to avoid having our
+ // shutdown process explode.
+ try {
+ this._cleanupAsyncStatements();
+ this._cleanupSyncStatements();
+ } catch (ex) {
+ this._log.debug("Unexpected exception during statement cleanup: " + ex);
+ }
+
+ // it's conceivable we might get a spurious exception here, but we really
+ // shouldn't get one. again, we want to ensure shutdown runs to completion
+ // and doesn't break our caller.
+ try {
+ // This currently causes all pending asynchronous operations to be run to
+ // completion. this simplifies things from a correctness perspective,
+ // and, honestly, is a lot easier than us tracking all of the async
+ // event tasks so that we can explicitly cancel them.
+ // This is a reasonable thing to do because we don't actually ever have
+ // a huge number of statements outstanding. The indexing process needs
+ // to issue async requests periodically, so the most we have in-flight
+ // from a write perspective is strictly less than the work required to
+ // update the database state for a single message.
+ // However, the potential for multiple pending expensive queries does
+ // exist, and it may be advisable to attempt to track and cancel those.
+ // For simplicity we don't currently do this, and I expect this should
+ // not pose a major problem, but those are famous last words.
+ // Note: asyncClose does not spin a nested event loop, but the thread
+ // manager shutdown code will spin the async thread's event loop, so it
+ // nets out to be the same.
+ this.asyncConnection.asyncClose();
+ } catch (ex) {
+ this._log.debug(
+ "Potentially expected exception during connection closure: " + ex
+ );
+ }
+
+ this.asyncConnection = null;
+ this.syncConnection = null;
+ },
+
+ /**
+ * Generates and returns a UUID.
+ *
+ * @returns a UUID as a string, ex: "c4dd0159-9287-480f-a648-a4613e147fdb"
+ */
+ _generateDatastoreID() {
+ let uuid = Services.uuid.generateUUID().toString();
+ // We snip off the { and } from each end of the UUID.
+ return uuid.substring(1, uuid.length - 2);
+ },
+
+ _determineCachePages(aDBConn) {
+ try {
+ // For the details of the computations, one should read
+ // nsNavHistory::InitDB. We're slightly diverging from them in the sense
+ // that we won't allow gloda to use insane amounts of memory cache, and
+ // we start with 1% instead of 6% like them.
+ let pageStmt = aDBConn.createStatement("PRAGMA page_size");
+ pageStmt.executeStep();
+ let pageSize = pageStmt.row.page_size;
+ pageStmt.finalize();
+ let cachePermillage = this._prefBranch.getIntPref(
+ "cache_to_memory_permillage"
+ );
+ cachePermillage = Math.min(cachePermillage, 50);
+ cachePermillage = Math.max(cachePermillage, 0);
+ let physMem = Services.sysinfo.getPropertyAsInt64("memsize");
+ if (physMem == 0) {
+ physMem = MEMSIZE_FALLBACK_BYTES;
+ }
+ let cacheSize = Math.round((physMem * cachePermillage) / 1000);
+ cacheSize = Math.max(cacheSize, MIN_CACHE_SIZE);
+ cacheSize = Math.min(cacheSize, MAX_CACHE_SIZE);
+ let cachePages = Math.round(cacheSize / pageSize);
+ return cachePages;
+ } catch (ex) {
+ this._log.warn("Error determining cache size: " + ex);
+ // A little bit lower than on my personal machine, will result in ~40M.
+ return 1000;
+ }
+ },
+
+ /**
+ * Create our database; basically a wrapper around _createSchema.
+ */
+ _createDB(aDBFile) {
+ var dbConnection = Services.storage.openUnsharedDatabase(aDBFile);
+ // We now follow the Firefox strategy for places, which mainly consists in
+ // picking a default 32k page size, and then figuring out the amount of
+ // cache accordingly. The default 32k come from mozilla/toolkit/storage,
+ // but let's get it directly from sqlite in case they change it.
+ let cachePages = this._determineCachePages(dbConnection);
+ // This is a maximum number of pages to be used. If the database does not
+ // get this large, then the memory does not get used.
+ // Do not forget to update the code in _init if you change this value.
+ dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cachePages);
+ // The mozStorage default is NORMAL which shaves off some fsyncs in the
+ // interest of performance. Since everything we do after bootstrap is
+ // async, we do not care about the performance, but we really want the
+ // correctness. Bug reports and support avenues indicate a non-zero number
+ // of corrupt databases. Note that this may not fix everything; OS X
+ // also supports an F_FULLSYNC flag enabled by PRAGMA fullfsync that we are
+ // not enabling that is much more comprehensive. We can think about
+ // turning that on after we've seen how this reduces our corruption count.
+ dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL");
+ // Register custom tokenizer to index all language text
+ var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService(
+ Ci.nsIFts3Tokenizer
+ );
+ tokenizer.registerTokenizer(dbConnection);
+
+ // We're creating a new database, so let's generate a new ID for this
+ // version of the datastore. This way, indexers can know when the index
+ // has been rebuilt in the event that they need to rebuild dependent data.
+ this._datastoreID = this._generateDatastoreID();
+ this._prefBranch.setCharPref("id", this._datastoreID);
+
+ dbConnection.beginTransaction();
+ try {
+ this._createSchema(dbConnection);
+ dbConnection.commitTransaction();
+ } catch (ex) {
+ dbConnection.rollbackTransaction();
+ throw ex;
+ }
+
+ return dbConnection;
+ },
+
+ _createTableSchema(aDBConnection, aTableName, aTableDef) {
+ // - Create the table
+ this._log.info("Creating table: " + aTableName);
+ let columnDefs = [];
+ for (let [column, type] of aTableDef.columns) {
+ columnDefs.push(column + " " + type);
+ }
+ aDBConnection.createTable(aTableName, columnDefs.join(", "));
+
+ // - Create the fulltext table if applicable
+ if (aTableDef.fulltextColumns) {
+ let columnDefs = [];
+ for (let [column, type] of aTableDef.fulltextColumns) {
+ columnDefs.push(column + " " + type);
+ }
+ let createFulltextSQL =
+ "CREATE VIRTUAL TABLE " +
+ aTableName +
+ "Text" +
+ " USING fts3(tokenize mozporter, " +
+ columnDefs.join(", ") +
+ ")";
+ this._log.info("Creating fulltext table: " + createFulltextSQL);
+ aDBConnection.executeSimpleSQL(createFulltextSQL);
+ }
+
+ // - Create its indices
+ if (aTableDef.indices) {
+ for (let indexName in aTableDef.indices) {
+ let indexColumns = aTableDef.indices[indexName];
+ aDBConnection.executeSimpleSQL(
+ "CREATE INDEX " +
+ indexName +
+ " ON " +
+ aTableName +
+ "(" +
+ indexColumns.join(", ") +
+ ")"
+ );
+ }
+ }
+
+ // - Create the attributes table if applicable
+ if (aTableDef.genericAttributes) {
+ aTableDef.genericAttributes = {
+ columns: [
+ ["nounID", "INTEGER NOT NULL"],
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+ indices: {},
+ };
+ aTableDef.genericAttributes.indices[aTableName + "AttribQuery"] = [
+ "attributeID",
+ "value",
+ /* covering: */ "nounID",
+ ];
+ // let's use this very function! (since we created genericAttributes,
+ // explodey recursion is avoided.)
+ this._createTableSchema(
+ aDBConnection,
+ aTableName + "Attributes",
+ aTableDef.genericAttributes
+ );
+ }
+ },
+
+ /**
+ * Create our database schema assuming a newly created database. This
+ * comes down to creating normal tables, their full-text variants (if
+ * applicable), and their indices.
+ */
+ _createSchema(aDBConnection) {
+ // -- For each table...
+ for (let tableName in this._schema.tables) {
+ let tableDef = this._schema.tables[tableName];
+ this._createTableSchema(aDBConnection, tableName, tableDef);
+ }
+
+ aDBConnection.schemaVersion = this._actualSchemaVersion =
+ this._schemaVersion;
+ },
+
+ /**
+ * Create a table for a noun, replete with data binding.
+ */
+ createNounTable(aNounDef) {
+ // give it a _jsonText attribute if appropriate...
+ if (aNounDef.allowsArbitraryAttrs) {
+ aNounDef.schema.columns.push(["jsonAttributes", "STRING", "_jsonText"]);
+ }
+ // check if the table exists
+ if (!this.asyncConnection.tableExists(aNounDef.tableName)) {
+ // it doesn't! create it (and its potentially many variants)
+ try {
+ this._createTableSchema(
+ this.asyncConnection,
+ aNounDef.tableName,
+ aNounDef.schema
+ );
+ } catch (ex) {
+ this._log.error(
+ "Problem creating table " +
+ aNounDef.tableName +
+ " " +
+ "because: " +
+ ex +
+ " at " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber
+ );
+ return;
+ }
+ }
+
+ aNounDef._dataBinder = new GlodaDatabind(aNounDef, this);
+ aNounDef.datastore = aNounDef._dataBinder;
+ aNounDef.objFromRow = aNounDef._dataBinder.objFromRow;
+ aNounDef.objInsert = aNounDef._dataBinder.objInsert;
+ aNounDef.objUpdate = aNounDef._dataBinder.objUpdate;
+ aNounDef.dbAttribAdjuster = aNounDef._dataBinder.adjustAttributes;
+
+ if (aNounDef.schema.genericAttributes) {
+ aNounDef.attrTableName = aNounDef.tableName + "Attributes";
+ aNounDef.attrIDColumnName = "nounID";
+ }
+ },
+
+ _nukeMigration(aDBFile, aDBConnection) {
+ aDBConnection.close();
+ aDBFile.remove(false);
+ this._log.warn(
+ "Global database has been purged due to schema change. " +
+ "old version was " +
+ this._actualSchemaVersion +
+ ", new version is: " +
+ this._schemaVersion
+ );
+ return this._createDB(aDBFile);
+ },
+
+ /**
+ * Migrate the database _to the latest version_ from an older version. We
+ * only keep enough logic around to get us to the recent version. This code
+ * is not a time machine! If we need to blow away the database to get to the
+ * most recent version, then that's the sum total of the migration!
+ */
+ _migrate(aDBFile, aDBConnection, aCurVersion, aNewVersion) {
+ // version 12:
+ // - notability column added
+ // version 13:
+ // - we are adding a new fulltext index column. blow away!
+ // - note that I screwed up and failed to mark the schema change; apparently
+ // no database will claim to be version 13...
+ // version 14ish, still labeled 13?:
+ // - new attributes: forwarded, repliedTo, bcc, recipients
+ // - altered fromMeTo and fromMeCc to fromMe
+ // - altered toMe and ccMe to just be toMe
+ // - exposes bcc to cc-related attributes
+ // - MIME type DB schema overhaul
+ // version 15ish, still labeled 13:
+ // - change tokenizer to mozporter to support CJK
+ // (We are slip-streaming this so that only people who want to test CJK
+ // have to test it. We will properly bump the schema revision when the
+ // gloda correctness patch lands.)
+ // version 16ish, labeled 14 and now 16
+ // - gloda message id's start from 32 now
+ // - all kinds of correctness changes (blow away)
+ // version 17
+ // - more correctness fixes. (blow away)
+ // version 18
+ // - significant empty set support (blow away)
+ // version 19
+ // - there was a typo that was resulting in deleted getting set to the
+ // numeric value of the javascript undefined value. (migrate-able)
+ // version 20
+ // - tokenizer changes to provide for case/accent-folding. (blow away)
+ // version 21
+ // - add the messagesAttribFastDeletion index we thought was already covered
+ // by an index we removed a while ago (migrate-able)
+ // version 26
+ // - bump page size and also cache size (blow away)
+ // version 30
+ // - recover from bug 732372 that affected TB 11 beta / TB 12 alpha / TB 13
+ // trunk. The fix is bug 734507. The revision bump happens
+ // asynchronously. (migrate-able)
+
+ // nuke if prior to 26
+ if (aCurVersion < 26) {
+ return this._nukeMigration(aDBFile, aDBConnection);
+ }
+
+ // They must be desiring our "a.contact is undefined" fix!
+ // This fix runs asynchronously as the first indexing job the indexer ever
+ // performs. It is scheduled by the enabling of the message indexer and
+ // it is the one that updates the schema version when done.
+
+ // return the same DB connection since we didn't create a new one or do
+ // anything.
+ return aDBConnection;
+ },
+
+ /**
+ * Asynchronously update the schema version; only for use by in-tree callers
+ * who asynchronously perform migration work triggered by their initial
+ * indexing sweep and who have properly updated the schema version in all
+ * the appropriate locations in this file.
+ *
+ * This is done without doing anything about the current transaction state,
+ * which is desired.
+ */
+ _updateSchemaVersion(newSchemaVersion) {
+ this._actualSchemaVersion = newSchemaVersion;
+ let stmt = this._createAsyncStatement(
+ // we need to concat; pragmas don't like "?1" binds
+ "PRAGMA user_version = " + newSchemaVersion,
+ true
+ );
+ stmt.executeAsync(this.trackAsync());
+ stmt.finalize();
+ },
+
+ _outstandingAsyncStatements: [],
+
+ /**
+ * Unless debugging, this is just _realCreateAsyncStatement, but in some
+ * debugging modes this is instead the helpful wrapper
+ * _createExplainedAsyncStatement.
+ */
+ _createAsyncStatement: null,
+
+ _realCreateAsyncStatement(aSQLString, aWillFinalize) {
+ let statement = null;
+ try {
+ statement = this.asyncConnection.createAsyncStatement(aSQLString);
+ } catch (ex) {
+ throw new Error(
+ "error creating async statement " +
+ aSQLString +
+ " - " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ if (!aWillFinalize) {
+ this._outstandingAsyncStatements.push(statement);
+ }
+
+ return statement;
+ },
+
+ /**
+ * The ExplainedStatementProcessor instance used by
+ * _createExplainedAsyncStatement. This will be null if
+ * _createExplainedAsyncStatement is not being used as _createAsyncStatement.
+ */
+ _explainProcessor: null,
+
+ /**
+ * Wrapped version of _createAsyncStatement that EXPLAINs the statement. When
+ * used this decorates _createAsyncStatement, in which case we are found at
+ * that name and the original is at _orig_createAsyncStatement. This is
+ * controlled by the explainToPath preference (see |_init|).
+ */
+ _createExplainedAsyncStatement(aSQLString, aWillFinalize) {
+ let realStatement = this._realCreateAsyncStatement(
+ aSQLString,
+ aWillFinalize
+ );
+ // don't wrap transaction control statements.
+ if (
+ aSQLString == "COMMIT" ||
+ aSQLString == "BEGIN TRANSACTION" ||
+ aSQLString == "ROLLBACK"
+ ) {
+ return realStatement;
+ }
+
+ let explainSQL = "EXPLAIN " + aSQLString;
+ let explainStatement = this._realCreateAsyncStatement(explainSQL);
+
+ return new ExplainedStatementWrapper(
+ realStatement,
+ explainStatement,
+ aSQLString,
+ this._explainProcessor
+ );
+ },
+
+ _cleanupAsyncStatements() {
+ this._outstandingAsyncStatements.forEach(stmt => stmt.finalize());
+ },
+
+ _outstandingSyncStatements: [],
+
+ _createSyncStatement(aSQLString, aWillFinalize) {
+ let statement = null;
+ try {
+ statement = this.syncConnection.createStatement(aSQLString);
+ } catch (ex) {
+ throw new Error(
+ "error creating sync statement " +
+ aSQLString +
+ " - " +
+ this.syncConnection.lastError +
+ ": " +
+ this.syncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ if (!aWillFinalize) {
+ this._outstandingSyncStatements.push(statement);
+ }
+
+ return statement;
+ },
+
+ _cleanupSyncStatements() {
+ this._outstandingSyncStatements.forEach(stmt => stmt.finalize());
+ },
+
+ /**
+ * Perform a synchronous executeStep on the statement, handling any
+ * SQLITE_BUSY fallout that could conceivably happen from a collision on our
+ * read with the async writes.
+ * Basically we keep trying until we succeed or run out of tries.
+ * We believe this to be a reasonable course of action because we don't
+ * expect this to happen much.
+ */
+ _syncStep(aStatement) {
+ let tries = 0;
+ while (tries < 32000) {
+ try {
+ return aStatement.executeStep();
+ } catch (e) {
+ // SQLITE_BUSY becomes NS_ERROR_FAILURE
+ if (e.result == Cr.NS_ERROR_FAILURE) {
+ tries++;
+ // we really need to delay here, somehow. unfortunately, we can't
+ // allow event processing to happen, and most of the things we could
+ // do to delay ourselves result in event processing happening. (Use
+ // of a timer, a synchronous dispatch, etc.)
+ // in theory, nsIThreadEventFilter could allow us to stop other events
+ // that aren't our timer from happening, but it seems slightly
+ // dangerous and 'notxpcom' suggests it ain't happening anyways...
+ // so, let's just be dumb and hope that the underlying file I/O going
+ // on makes us more likely to yield to the other thread so it can
+ // finish what it is doing...
+ } else {
+ throw e;
+ }
+ }
+ }
+ this._log.error("Synchronous step gave up after " + tries + " tries.");
+ return false;
+ },
+
+ _bindVariant(aStatement, aIndex, aVariant) {
+ aStatement.bindByIndex(aIndex, aVariant);
+ },
+
+ /**
+ * Helper that uses the appropriate getter given the data type; should be
+ * mooted once we move to 1.9.2 and can use built-in variant support.
+ */
+ _getVariant(aRow, aIndex) {
+ let typeOfIndex = aRow.getTypeOfIndex(aIndex);
+ if (typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ // XPConnect would just end up going through an intermediary double stage
+ // for the int64 case anyways...
+ return null;
+ }
+ if (
+ typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_INTEGER ||
+ typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_DOUBLE
+ ) {
+ return aRow.getDouble(aIndex);
+ }
+ // typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_TEXT
+ return aRow.getString(aIndex);
+ },
+
+ /** Simple nested transaction support as a performance optimization. */
+ _transactionDepth: 0,
+ _transactionGood: false,
+
+ /**
+ * Self-memoizing BEGIN TRANSACTION statement.
+ */
+ get _beginTransactionStatement() {
+ let statement = this._createAsyncStatement("BEGIN TRANSACTION");
+ this.__defineGetter__("_beginTransactionStatement", () => statement);
+ return this._beginTransactionStatement;
+ },
+
+ /**
+ * Self-memoizing COMMIT statement.
+ */
+ get _commitTransactionStatement() {
+ let statement = this._createAsyncStatement("COMMIT");
+ this.__defineGetter__("_commitTransactionStatement", () => statement);
+ return this._commitTransactionStatement;
+ },
+
+ /**
+ * Self-memoizing ROLLBACK statement.
+ */
+ get _rollbackTransactionStatement() {
+ let statement = this._createAsyncStatement("ROLLBACK");
+ this.__defineGetter__("_rollbackTransactionStatement", () => statement);
+ return this._rollbackTransactionStatement;
+ },
+
+ _pendingPostCommitCallbacks: null,
+ /**
+ * Register a callback to be invoked when the current transaction's commit
+ * completes.
+ */
+ runPostCommit(aCallback) {
+ this._pendingPostCommitCallbacks.push(aCallback);
+ },
+
+ /**
+ * Begin a potentially nested transaction; only the outermost transaction gets
+ * to be an actual transaction, and the failure of any nested transaction
+ * results in a rollback of the entire outer transaction. If you really
+ * need an atomic transaction
+ */
+ _beginTransaction() {
+ if (this._transactionDepth == 0) {
+ this._pendingPostCommitCallbacks = [];
+ this._beginTransactionStatement.executeAsync(this.trackAsync());
+ this._transactionGood = true;
+ }
+ this._transactionDepth++;
+ },
+ /**
+ * Commit a potentially nested transaction; if we are the outer-most
+ * transaction and no sub-transaction issues a rollback
+ * (via _rollbackTransaction) then we commit, otherwise we rollback.
+ */
+ _commitTransaction() {
+ this._transactionDepth--;
+ if (this._transactionDepth == 0) {
+ try {
+ if (this._transactionGood) {
+ this._commitTransactionStatement.executeAsync(
+ new PostCommitHandler(this._pendingPostCommitCallbacks)
+ );
+ } else {
+ this._rollbackTransactionStatement.executeAsync(this.trackAsync());
+ }
+ } catch (ex) {
+ this._log.error("Commit problem:", ex);
+ }
+ this._pendingPostCommitCallbacks = [];
+ }
+ },
+ /**
+ * Abort the commit of the potentially nested transaction. If we are not the
+ * outermost transaction, we set a flag that tells the outermost transaction
+ * that it must roll back.
+ */
+ _rollbackTransaction() {
+ this._transactionDepth--;
+ this._transactionGood = false;
+ if (this._transactionDepth == 0) {
+ try {
+ this._rollbackTransactionStatement.executeAsync(this.trackAsync());
+ } catch (ex) {
+ this._log.error("Rollback problem:", ex);
+ }
+ }
+ },
+
+ _pendingAsyncStatements: 0,
+ /**
+ * The function to call, if any, when we hit 0 pending async statements.
+ */
+ _pendingAsyncCompletedListener: null,
+ _asyncCompleted() {
+ if (--this._pendingAsyncStatements == 0) {
+ if (this._pendingAsyncCompletedListener !== null) {
+ this._pendingAsyncCompletedListener();
+ this._pendingAsyncCompletedListener = null;
+ }
+ }
+ },
+ _asyncTrackerListener: {
+ handleResult() {},
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "got error in _asyncTrackerListener.handleError(): " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion() {
+ try {
+ // the helper method exists because the other classes need to call it too
+ GlodaDatastore._asyncCompleted();
+ } catch (e) {
+ this._log.error("Exception in handleCompletion:", e);
+ }
+ },
+ },
+ /**
+ * Increments _pendingAsyncStatements and returns a listener that will
+ * decrement the value when the statement completes.
+ */
+ trackAsync() {
+ this._pendingAsyncStatements++;
+ return this._asyncTrackerListener;
+ },
+
+ /* ********** Attribute Definitions ********** */
+ /** Maps (attribute def) compound names to the GlodaAttributeDBDef objects. */
+ _attributeDBDefs: {},
+ /** Map attribute ID to the definition and parameter value that produce it. */
+ _attributeIDToDBDefAndParam: {},
+
+ /**
+ * This attribute id indicates that we are encoding that a non-singular
+ * attribute has an empty set. The value payload that goes with this should
+ * the attribute id of the attribute we are talking about.
+ */
+ kEmptySetAttrId: 1,
+
+ /**
+ * We maintain the attributeDefinitions next id counter mainly because we can.
+ * Since we mediate the access, there's no real risk to doing so, and it
+ * allows us to keep the writes on the async connection without having to
+ * wait for a completion notification.
+ *
+ * Start from 32 so we can have a number of sentinel values.
+ */
+ _nextAttributeId: 32,
+
+ _populateAttributeDefManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM attributeDefinitions",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ // 0 gets returned even if there are no messages...
+ let highestSeen = stmt.getInt64(0);
+ if (highestSeen != 0) {
+ this._nextAttributeId = highestSeen + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ get _insertAttributeDefStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO attributeDefinitions (id, attributeType, extensionName, \
+ name, parameter) \
+ VALUES (?1, ?2, ?3, ?4, ?5)"
+ );
+ this.__defineGetter__("_insertAttributeDefStatement", () => statement);
+ return this._insertAttributeDefStatement;
+ },
+
+ /**
+ * Create an attribute definition and return the row ID. Special/atypical
+ * in that it doesn't directly return a GlodaAttributeDBDef; we leave that up
+ * to the caller since they know much more than actually needs to go in the
+ * database.
+ *
+ * @returns The attribute id allocated to this attribute.
+ */
+ _createAttributeDef(aAttrType, aExtensionName, aAttrName, aParameter) {
+ let attributeId = this._nextAttributeId++;
+
+ let iads = this._insertAttributeDefStatement;
+ iads.bindByIndex(0, attributeId);
+ iads.bindByIndex(1, aAttrType);
+ iads.bindByIndex(2, aExtensionName);
+ iads.bindByIndex(3, aAttrName);
+ this._bindVariant(iads, 4, aParameter);
+
+ iads.executeAsync(this.trackAsync());
+
+ return attributeId;
+ },
+
+ /**
+ * Sync-ly look-up all the attribute definitions, populating our authoritative
+ * _attributeDBDefss and _attributeIDToDBDefAndParam maps. (In other words,
+ * once this method is called, those maps should always be in sync with the
+ * underlying database.)
+ */
+ getAllAttributes() {
+ let stmt = this._createSyncStatement(
+ "SELECT id, attributeType, extensionName, name, parameter \
+ FROM attributeDefinitions",
+ true
+ );
+
+ // map compound name to the attribute
+ let attribs = {};
+ // map the attribute id to [attribute, parameter] where parameter is null
+ // in cases where parameter is unused.
+ let idToAttribAndParam = {};
+
+ this._log.info("loading all attribute defs");
+
+ while (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ let rowId = stmt.getInt64(0);
+ let rowAttributeType = stmt.getInt64(1);
+ let rowExtensionName = stmt.getString(2);
+ let rowName = stmt.getString(3);
+ let rowParameter = this._getVariant(stmt, 4);
+
+ let compoundName = rowExtensionName + ":" + rowName;
+
+ let attrib;
+ if (compoundName in attribs) {
+ attrib = attribs[compoundName];
+ } else {
+ attrib = new GlodaAttributeDBDef(
+ this,
+ /* aID */ null,
+ compoundName,
+ rowAttributeType,
+ rowExtensionName,
+ rowName
+ );
+ attribs[compoundName] = attrib;
+ }
+ // if the parameter is null, the id goes on the attribute def, otherwise
+ // it is a parameter binding and goes in the binding map.
+ if (rowParameter == null) {
+ this._log.debug(compoundName + " primary: " + rowId);
+ attrib._id = rowId;
+ idToAttribAndParam[rowId] = [attrib, null];
+ } else {
+ this._log.debug(
+ compoundName + " binding: " + rowParameter + " = " + rowId
+ );
+ attrib._parameterBindings[rowParameter] = rowId;
+ idToAttribAndParam[rowId] = [attrib, rowParameter];
+ }
+ }
+ stmt.finalize();
+
+ this._log.info("done loading all attribute defs");
+
+ this._attributeDBDefs = attribs;
+ this._attributeIDToDBDefAndParam = idToAttribAndParam;
+ },
+
+ /**
+ * Helper method for GlodaAttributeDBDef to tell us when their bindParameter
+ * method is called and they have created a new binding (using
+ * GlodaDatastore._createAttributeDef). In theory, that method could take
+ * an additional argument and obviate the need for this method.
+ */
+ reportBinding(aID, aAttrDef, aParamValue) {
+ this._attributeIDToDBDefAndParam[aID] = [aAttrDef, aParamValue];
+ },
+
+ /* ********** Folders ********** */
+ /** next folder (row) id to issue, populated by _getAllFolderMappings. */
+ _nextFolderId: 1,
+
+ get _insertFolderLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO folderLocations (id, folderURI, dirtyStatus, name, \
+ indexingPriority) VALUES \
+ (?1, ?2, ?3, ?4, ?5)"
+ );
+ this.__defineGetter__("_insertFolderLocationStatement", () => statement);
+ return this._insertFolderLocationStatement;
+ },
+
+ /**
+ * Authoritative map from folder URI to folder ID. (Authoritative in the
+ * sense that this map exactly represents the state of the underlying
+ * database. If it does not, it's a bug in updating the database.)
+ */
+ _folderByURI: {},
+ /** Authoritative map from folder ID to folder URI */
+ _folderByID: {},
+
+ /** Initialize our _folderByURI/_folderByID mappings, called by _init(). */
+ _getAllFolderMappings() {
+ let stmt = this._createSyncStatement(
+ "SELECT id, folderURI, dirtyStatus, name, indexingPriority \
+ FROM folderLocations",
+ true
+ );
+
+ while (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ let folderID = stmt.getInt64(0);
+ let folderURI = stmt.getString(1);
+ let dirtyStatus = stmt.getInt32(2);
+ let folderName = stmt.getString(3);
+ let indexingPriority = stmt.getInt32(4);
+
+ let folder = new GlodaFolder(
+ this,
+ folderID,
+ folderURI,
+ dirtyStatus,
+ folderName,
+ indexingPriority
+ );
+
+ this._folderByURI[folderURI] = folder;
+ this._folderByID[folderID] = folder;
+
+ if (folderID >= this._nextFolderId) {
+ this._nextFolderId = folderID + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ _folderKnown(aFolder) {
+ let folderURI = aFolder.URI;
+ return folderURI in this._folderByURI;
+ },
+
+ _folderIdKnown(aFolderID) {
+ return aFolderID in this._folderByID;
+ },
+
+ /**
+ * Return the default messaging priority for a folder of this type, based
+ * on the folder's flags. If aAllowSpecialFolderIndexing is true, then
+ * folders suchs as Trash and Junk will be indexed.
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {boolean} aAllowSpecialFolderIndexing
+ * @returns {number}
+ */
+ getDefaultIndexingPriority(aFolder, aAllowSpecialFolderIndexing) {
+ let indexingPriority = GlodaFolder.prototype.kIndexingDefaultPriority;
+ // Do not walk into trash/junk folders, unless the user is explicitly
+ // telling us to do so.
+ let specialFolderFlags =
+ Ci.nsMsgFolderFlags.Trash | Ci.nsMsgFolderFlags.Junk;
+ if (aFolder.isSpecialFolder(specialFolderFlags, true)) {
+ indexingPriority = aAllowSpecialFolderIndexing
+ ? GlodaFolder.prototype.kIndexingDefaultPriority
+ : GlodaFolder.prototype.kIndexingNeverPriority;
+ } else if (
+ aFolder.flags &
+ (Ci.nsMsgFolderFlags.Queue | Ci.nsMsgFolderFlags.Newsgroup)
+ // In unit testing at least folders can be
+ // confusingly labeled ImapPublic when they
+ // should not be. Or at least I don't think they
+ // should be. So they're legit for now.
+ // | Ci.nsMsgFolderFlags.ImapPublic
+ // | Ci.nsMsgFolderFlags.ImapOtherUser
+ ) {
+ // Queue folders should always be ignored just because messages should not
+ // spend much time in there.
+ // We hate newsgroups, and public IMAP folders are similar.
+ // Other user IMAP folders should be ignored because it's not this user's
+ // mail.
+ indexingPriority = GlodaFolder.prototype.kIndexingNeverPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.Inbox) {
+ indexingPriority = GlodaFolder.prototype.kIndexingInboxPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.SentMail) {
+ indexingPriority = GlodaFolder.prototype.kIndexingSentMailPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.Favorite) {
+ indexingPriority = GlodaFolder.prototype.kIndexingFavoritePriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.CheckNew) {
+ indexingPriority = GlodaFolder.prototype.kIndexingCheckNewPriority;
+ }
+
+ return indexingPriority;
+ },
+
+ /**
+ * Map a folder URI to a GlodaFolder instance, creating the mapping if it does
+ * not yet exist.
+ *
+ * @param aFolder The nsIMsgFolder instance you would like the GlodaFolder
+ * instance for.
+ * @returns The existing or newly created GlodaFolder instance.
+ */
+ _mapFolder(aFolder) {
+ let folderURI = aFolder.URI;
+ if (folderURI in this._folderByURI) {
+ return this._folderByURI[folderURI];
+ }
+
+ let folderID = this._nextFolderId++;
+
+ // If there's an indexingPriority stored on the folder, just use that.
+ // Otherwise, fall back to the default for folders of this type.
+ let indexingPriority = NaN;
+ try {
+ let pri = aFolder.getStringProperty("indexingPriority"); // Might throw.
+ indexingPriority = parseInt(pri); // Might return NaN.
+ } catch (ex) {}
+ if (isNaN(indexingPriority)) {
+ indexingPriority = this.getDefaultIndexingPriority(aFolder);
+ }
+
+ // If there are messages in the folder, it is filthy. If there are no
+ // messages, it can be clean.
+ let dirtyStatus = aFolder.getTotalMessages(false)
+ ? GlodaFolder.prototype.kFolderFilthy
+ : GlodaFolder.prototype.kFolderClean;
+ let folder = new GlodaFolder(
+ this,
+ folderID,
+ folderURI,
+ dirtyStatus,
+ aFolder.prettyName,
+ indexingPriority
+ );
+
+ this._insertFolderLocationStatement.bindByIndex(0, folder.id);
+ this._insertFolderLocationStatement.bindByIndex(1, folder.uri);
+ this._insertFolderLocationStatement.bindByIndex(2, folder.dirtyStatus);
+ this._insertFolderLocationStatement.bindByIndex(3, folder.name);
+ this._insertFolderLocationStatement.bindByIndex(4, folder.indexingPriority);
+ this._insertFolderLocationStatement.executeAsync(this.trackAsync());
+
+ this._folderByURI[folderURI] = folder;
+ this._folderByID[folderID] = folder;
+ this._log.debug("!! mapped " + folder.id + " from " + folderURI);
+ return folder;
+ },
+
+ /**
+ * Map an integer gloda folder ID to the corresponding GlodaFolder instance.
+ *
+ * @param aFolderID The known valid gloda folder ID for which you would like
+ * a GlodaFolder instance.
+ * @returns The GlodaFolder instance with the given id. If no such instance
+ * exists, we will throw an exception.
+ */
+ _mapFolderID(aFolderID) {
+ if (aFolderID === null) {
+ return null;
+ }
+ if (aFolderID in this._folderByID) {
+ return this._folderByID[aFolderID];
+ }
+ throw new Error("Got impossible folder ID: " + aFolderID);
+ },
+
+ /**
+ * Mark the gloda folder as deleted for any outstanding references to it and
+ * remove it from our tables so we don't hand out any new references. The
+ * latter is especially important in the case a folder with the same name
+ * is created afterwards; we don't want to confuse the new one with the old
+ * one!
+ */
+ _killGlodaFolderIntoTombstone(aGlodaFolder) {
+ aGlodaFolder._deleted = true;
+ delete this._folderByURI[aGlodaFolder.uri];
+ delete this._folderByID[aGlodaFolder.id];
+ },
+
+ get _updateFolderDirtyStatusStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET dirtyStatus = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateFolderDirtyStatusStatement", () => statement);
+ return this._updateFolderDirtyStatusStatement;
+ },
+
+ updateFolderDirtyStatus(aFolder) {
+ let ufds = this._updateFolderDirtyStatusStatement;
+ ufds.bindByIndex(1, aFolder.id);
+ ufds.bindByIndex(0, aFolder.dirtyStatus);
+ ufds.executeAsync(this.trackAsync());
+ },
+
+ get _updateFolderIndexingPriorityStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET indexingPriority = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__(
+ "_updateFolderIndexingPriorityStatement",
+ () => statement
+ );
+ return this._updateFolderIndexingPriorityStatement;
+ },
+
+ updateFolderIndexingPriority(aFolder) {
+ let ufip = this._updateFolderIndexingPriorityStatement;
+ ufip.bindByIndex(1, aFolder.id);
+ ufip.bindByIndex(0, aFolder.indexingPriority);
+ ufip.executeAsync(this.trackAsync());
+ },
+
+ get _updateFolderLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET folderURI = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateFolderLocationStatement", () => statement);
+ return this._updateFolderLocationStatement;
+ },
+
+ /**
+ * Non-recursive asynchronous folder renaming based on the URI.
+ *
+ * @TODO provide a mechanism for recursive folder renames or have a higher
+ * layer deal with it and remove this note.
+ */
+ renameFolder(aOldFolder, aNewURI) {
+ if (!(aOldFolder.URI in this._folderByURI)) {
+ return;
+ }
+ let folder = this._mapFolder(aOldFolder); // ensure the folder is mapped
+ let oldURI = folder.uri;
+ this._folderByURI[aNewURI] = folder;
+ folder._uri = aNewURI;
+ this._log.info("renaming folder URI " + oldURI + " to " + aNewURI);
+ this._updateFolderLocationStatement.bindByIndex(1, folder.id);
+ this._updateFolderLocationStatement.bindByIndex(0, aNewURI);
+ this._updateFolderLocationStatement.executeAsync(this.trackAsync());
+
+ delete this._folderByURI[oldURI];
+ },
+
+ get _deleteFolderByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM folderLocations WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteFolderByIDStatement", () => statement);
+ return this._deleteFolderByIDStatement;
+ },
+
+ deleteFolderByID(aFolderID) {
+ let dfbis = this._deleteFolderByIDStatement;
+ dfbis.bindByIndex(0, aFolderID);
+ dfbis.executeAsync(this.trackAsync());
+ },
+
+ /* ********** Conversation ********** */
+ /** The next conversation id to allocate. Initialize at startup. */
+ _nextConversationId: 1,
+
+ _populateConversationManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM conversations",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextConversationId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertConversationStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO conversations (id, subject, oldestMessageDate, \
+ newestMessageDate) \
+ VALUES (?1, ?2, ?3, ?4)"
+ );
+ this.__defineGetter__("_insertConversationStatement", () => statement);
+ return this._insertConversationStatement;
+ },
+
+ get _insertConversationTextStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO conversationsText (docid, subject) \
+ VALUES (?1, ?2)"
+ );
+ this.__defineGetter__("_insertConversationTextStatement", () => statement);
+ return this._insertConversationTextStatement;
+ },
+
+ /**
+ * Asynchronously create a conversation.
+ */
+ createConversation(aSubject, aOldestMessageDate, aNewestMessageDate) {
+ // create the data row
+ let conversationID = this._nextConversationId++;
+ let ics = this._insertConversationStatement;
+ ics.bindByIndex(0, conversationID);
+ ics.bindByIndex(1, aSubject);
+ if (aOldestMessageDate == null) {
+ ics.bindByIndex(2, null);
+ } else {
+ ics.bindByIndex(2, aOldestMessageDate);
+ }
+ if (aNewestMessageDate == null) {
+ ics.bindByIndex(3, null);
+ } else {
+ ics.bindByIndex(3, aNewestMessageDate);
+ }
+ ics.executeAsync(this.trackAsync());
+
+ // create the fulltext row, using the same rowid/docid
+ let icts = this._insertConversationTextStatement;
+ icts.bindByIndex(0, conversationID);
+ icts.bindByIndex(1, aSubject);
+ icts.executeAsync(this.trackAsync());
+
+ // create it
+ let conversation = new GlodaConversation(
+ this,
+ conversationID,
+ aSubject,
+ aOldestMessageDate,
+ aNewestMessageDate
+ );
+ // it's new! let the collection manager know about it.
+ GlodaCollectionManager.itemsAdded(conversation.NOUN_ID, [conversation]);
+ // return it
+ return conversation;
+ },
+
+ get _deleteConversationByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM conversations WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteConversationByIDStatement", () => statement);
+ return this._deleteConversationByIDStatement;
+ },
+
+ /**
+ * Asynchronously delete a conversation given its ID.
+ */
+ deleteConversationByID(aConversationID) {
+ let dcbids = this._deleteConversationByIDStatement;
+ dcbids.bindByIndex(0, aConversationID);
+ dcbids.executeAsync(this.trackAsync());
+
+ GlodaCollectionManager.itemsDeleted(GlodaConversation.prototype.NOUN_ID, [
+ aConversationID,
+ ]);
+ },
+
+ _conversationFromRow(aStmt) {
+ let oldestMessageDate, newestMessageDate;
+ if (aStmt.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ oldestMessageDate = null;
+ } else {
+ oldestMessageDate = aStmt.getInt64(2);
+ }
+ if (aStmt.getTypeOfIndex(3) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ newestMessageDate = null;
+ } else {
+ newestMessageDate = aStmt.getInt64(3);
+ }
+ return new GlodaConversation(
+ this,
+ aStmt.getInt64(0),
+ aStmt.getString(1),
+ oldestMessageDate,
+ newestMessageDate
+ );
+ },
+
+ /* ********** Message ********** */
+ /**
+ * Next message id, managed because of our use of asynchronous inserts.
+ * Initialized by _populateMessageManagedId called by _init.
+ *
+ * Start from 32 to leave us all kinds of magical sentinel values at the
+ * bottom.
+ */
+ _nextMessageId: 32,
+
+ _populateMessageManagedId() {
+ let stmt = this._createSyncStatement("SELECT MAX(id) FROM messages", true);
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ // 0 gets returned even if there are no messages...
+ let highestSeen = stmt.getInt64(0);
+ if (highestSeen != 0) {
+ this._nextMessageId = highestSeen + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ get _insertMessageStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messages (id, folderID, messageKey, conversationID, date, \
+ headerMessageID, jsonAttributes, notability) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"
+ );
+ this.__defineGetter__("_insertMessageStatement", () => statement);
+ return this._insertMessageStatement;
+ },
+
+ get _insertMessageTextStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messagesText (docid, subject, body, attachmentNames, \
+ author, recipients) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)"
+ );
+ this.__defineGetter__("_insertMessageTextStatement", () => statement);
+ return this._insertMessageTextStatement;
+ },
+
+ /**
+ * Create a GlodaMessage with the given properties. Because this is only half
+ * of the process of creating a message (the attributes still need to be
+ * completed), it's on the caller's head to call GlodaCollectionManager's
+ * itemAdded method once the message is fully created.
+ *
+ * This method uses the async connection, any downstream logic that depends on
+ * this message actually existing in the database must be done using an
+ * async query.
+ */
+ createMessage(
+ aFolder,
+ aMessageKey,
+ aConversationID,
+ aDatePRTime,
+ aHeaderMessageID
+ ) {
+ let folderID;
+ if (aFolder != null) {
+ folderID = this._mapFolder(aFolder).id;
+ } else {
+ folderID = null;
+ }
+
+ let messageID = this._nextMessageId++;
+
+ let message = new GlodaMessage(
+ this,
+ messageID,
+ folderID,
+ aMessageKey,
+ aConversationID,
+ /* conversation */ null,
+ aDatePRTime ? new Date(aDatePRTime / 1000) : null,
+ aHeaderMessageID,
+ /* deleted */ false,
+ /* jsonText */ undefined,
+ /* notability*/ 0
+ );
+
+ // We would love to notify the collection manager about the message at this
+ // point (at least if it's not a ghost), but we can't yet. We need to wait
+ // until the attributes have been indexed, which means it's out of our
+ // hands. (Gloda.processMessage does it.)
+
+ return message;
+ },
+
+ insertMessage(aMessage) {
+ this._log.debug("insertMessage " + aMessage);
+ let ims = this._insertMessageStatement;
+ ims.bindByIndex(0, aMessage.id);
+ if (aMessage.folderID == null) {
+ ims.bindByIndex(1, null);
+ } else {
+ ims.bindByIndex(1, aMessage.folderID);
+ }
+ if (aMessage.messageKey == null) {
+ ims.bindByIndex(2, null);
+ } else {
+ ims.bindByIndex(2, aMessage.messageKey);
+ }
+ ims.bindByIndex(3, aMessage.conversationID);
+ if (aMessage.date == null) {
+ ims.bindByIndex(4, null);
+ } else {
+ ims.bindByIndex(4, aMessage.date * 1000);
+ }
+ ims.bindByIndex(5, aMessage.headerMessageID);
+ if (aMessage._jsonText) {
+ ims.bindByIndex(6, aMessage._jsonText);
+ } else {
+ ims.bindByIndex(6, null);
+ }
+ ims.bindByIndex(7, aMessage.notability);
+
+ try {
+ ims.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ // we create the full-text row for any message that isn't a ghost,
+ // whether we have the body or not
+ if (aMessage.folderID !== null) {
+ this._insertMessageText(aMessage);
+ }
+ },
+
+ /**
+ * Inserts a full-text row. This should only be called if you're sure you want
+ * to insert a row into the table.
+ */
+ _insertMessageText(aMessage) {
+ if (aMessage._content && aMessage._content.hasContent()) {
+ aMessage._indexedBodyText = aMessage._content.getContentString(true);
+ } else if (aMessage._bodyLines) {
+ aMessage._indexedBodyText = aMessage._bodyLines.join("\n");
+ } else {
+ aMessage._indexedBodyText = null;
+ }
+
+ let imts = this._insertMessageTextStatement;
+ imts.bindByIndex(0, aMessage.id);
+ imts.bindByIndex(1, aMessage._subject);
+ if (aMessage._indexedBodyText == null) {
+ imts.bindByIndex(2, null);
+ } else {
+ imts.bindByIndex(2, aMessage._indexedBodyText);
+ }
+ if (aMessage._attachmentNames === null) {
+ imts.bindByIndex(3, null);
+ } else {
+ imts.bindByIndex(3, aMessage._attachmentNames.join("\n"));
+ }
+
+ // if (aMessage._indexAuthor)
+ imts.bindByIndex(4, aMessage._indexAuthor);
+ // if (aMessage._indexRecipients)
+ imts.bindByIndex(5, aMessage._indexRecipients);
+
+ try {
+ imts.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing fulltext statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+ },
+
+ get _updateMessageStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = ?1, \
+ messageKey = ?2, \
+ conversationID = ?3, \
+ date = ?4, \
+ headerMessageID = ?5, \
+ jsonAttributes = ?6, \
+ notability = ?7, \
+ deleted = ?8 \
+ WHERE id = ?9"
+ );
+ this.__defineGetter__("_updateMessageStatement", () => statement);
+ return this._updateMessageStatement;
+ },
+
+ get _updateMessageTextStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messagesText SET body = ?1, \
+ attachmentNames = ?2 \
+ WHERE docid = ?3"
+ );
+
+ this.__defineGetter__("_updateMessageTextStatement", () => statement);
+ return this._updateMessageTextStatement;
+ },
+
+ /**
+ * Update the database row associated with the message. If the message is
+ * not a ghost and has _isNew defined, messagesText is affected.
+ *
+ * aMessage._isNew is currently equivalent to the fact that there is no
+ * full-text row associated with this message, and we work with this
+ * assumption here. Note that if aMessage._isNew is not defined, then
+ * we don't do anything.
+ */
+ updateMessage(aMessage) {
+ this._log.debug("updateMessage " + aMessage);
+ let ums = this._updateMessageStatement;
+ ums.bindByIndex(8, aMessage.id);
+ if (aMessage.folderID === null) {
+ ums.bindByIndex(0, null);
+ } else {
+ ums.bindByIndex(0, aMessage.folderID);
+ }
+ if (aMessage.messageKey === null) {
+ ums.bindByIndex(1, null);
+ } else {
+ ums.bindByIndex(1, aMessage.messageKey);
+ }
+ ums.bindByIndex(2, aMessage.conversationID);
+ if (aMessage.date === null) {
+ ums.bindByIndex(3, null);
+ } else {
+ ums.bindByIndex(3, aMessage.date * 1000);
+ }
+ ums.bindByIndex(4, aMessage.headerMessageID);
+ if (aMessage._jsonText) {
+ ums.bindByIndex(5, aMessage._jsonText);
+ } else {
+ ums.bindByIndex(5, null);
+ }
+ ums.bindByIndex(6, aMessage.notability);
+ ums.bindByIndex(7, aMessage._isDeleted ? 1 : 0);
+
+ ums.executeAsync(this.trackAsync());
+
+ if (aMessage.folderID !== null) {
+ if ("_isNew" in aMessage && aMessage._isNew === true) {
+ this._insertMessageText(aMessage);
+ } else {
+ this._updateMessageText(aMessage);
+ }
+ }
+ },
+
+ /**
+ * Updates the full-text row associated with this message. This only performs
+ * the UPDATE query if the indexed body text has changed, which means that if
+ * the body hasn't changed but the attachments have, we don't update.
+ */
+ _updateMessageText(aMessage) {
+ let newIndexedBodyText;
+ if (aMessage._content && aMessage._content.hasContent()) {
+ newIndexedBodyText = aMessage._content.getContentString(true);
+ } else if (aMessage._bodyLines) {
+ newIndexedBodyText = aMessage._bodyLines.join("\n");
+ } else {
+ newIndexedBodyText = null;
+ }
+
+ // If the body text matches, don't perform an update
+ if (newIndexedBodyText == aMessage._indexedBodyText) {
+ this._log.debug(
+ "in _updateMessageText, skipping update because body matches"
+ );
+ return;
+ }
+
+ aMessage._indexedBodyText = newIndexedBodyText;
+ let umts = this._updateMessageTextStatement;
+ umts.bindByIndex(2, aMessage.id);
+
+ if (aMessage._indexedBodyText == null) {
+ umts.bindByIndex(0, null);
+ } else {
+ umts.bindByIndex(0, aMessage._indexedBodyText);
+ }
+
+ if (aMessage._attachmentNames == null) {
+ umts.bindByIndex(1, null);
+ } else {
+ umts.bindByIndex(1, aMessage._attachmentNames.join("\n"));
+ }
+
+ try {
+ umts.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing fulltext statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+ },
+
+ get _updateMessageLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = ?1, messageKey = ?2 WHERE id = ?3"
+ );
+ this.__defineGetter__("_updateMessageLocationStatement", () => statement);
+ return this._updateMessageLocationStatement;
+ },
+
+ /**
+ * Given a list of gloda message ids, and a list of their new message keys in
+ * the given new folder location, asynchronously update the message's
+ * database locations. Also, update the in-memory representations.
+ */
+ updateMessageLocations(
+ aMessageIds,
+ aNewMessageKeys,
+ aDestFolder,
+ aDoNotNotify
+ ) {
+ this._log.debug(
+ "updateMessageLocations:\n" +
+ "ids: " +
+ aMessageIds +
+ "\n" +
+ "keys: " +
+ aNewMessageKeys +
+ "\n" +
+ "dest folder: " +
+ aDestFolder +
+ "\n" +
+ "do not notify?" +
+ aDoNotNotify +
+ "\n"
+ );
+ let statement = this._updateMessageLocationStatement;
+ let destFolderID =
+ typeof aDestFolder == "number"
+ ? aDestFolder
+ : this._mapFolder(aDestFolder).id;
+
+ // map gloda id to the new message key for in-memory rep transform below
+ let cacheLookupMap = {};
+
+ for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) {
+ let id = aMessageIds[iMsg],
+ msgKey = aNewMessageKeys[iMsg];
+ statement.bindByIndex(0, destFolderID);
+ statement.bindByIndex(1, msgKey);
+ statement.bindByIndex(2, id);
+ statement.executeAsync(this.trackAsync());
+
+ cacheLookupMap[id] = msgKey;
+ }
+
+ // - perform the cache lookup so we can update in-memory representations
+ // found in memory items, and converted to list form for notification
+ let inMemoryItems = {},
+ modifiedItems = [];
+ GlodaCollectionManager.cacheLookupMany(
+ GlodaMessage.prototype.NOUN_ID,
+ cacheLookupMap,
+ inMemoryItems,
+ /* do not cache */ false
+ );
+ for (let glodaId in inMemoryItems) {
+ let glodaMsg = inMemoryItems[glodaId];
+ glodaMsg._folderID = destFolderID;
+ glodaMsg._messageKey = cacheLookupMap[glodaId];
+ modifiedItems.push(glodaMsg);
+ }
+
+ // tell the collection manager about the modified messages so it can update
+ // any existing views...
+ if (!aDoNotNotify && modifiedItems.length) {
+ GlodaCollectionManager.itemsModified(
+ GlodaMessage.prototype.NOUN_ID,
+ modifiedItems
+ );
+ }
+ },
+
+ get _updateMessageKeyStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET messageKey = ?1 WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateMessageKeyStatement", () => statement);
+ return this._updateMessageKeyStatement;
+ },
+
+ /**
+ * Update the message keys for the gloda messages with the given id's. This
+ * is to be used in response to msgKeyChanged notifications and is similar to
+ * `updateMessageLocations` except that we do not update the folder and we
+ * do not perform itemsModified notifications (because message keys are not
+ * intended to be relevant to the gloda message abstraction).
+ */
+ updateMessageKeys(aMessageIds, aNewMessageKeys) {
+ this._log.debug(
+ "updateMessageKeys:\n" +
+ "ids: " +
+ aMessageIds +
+ "\n" +
+ "keys:" +
+ aNewMessageKeys +
+ "\n"
+ );
+ let statement = this._updateMessageKeyStatement;
+
+ // map gloda id to the new message key for in-memory rep transform below
+ let cacheLookupMap = {};
+
+ for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) {
+ let id = aMessageIds[iMsg],
+ msgKey = aNewMessageKeys[iMsg];
+ statement.bindByIndex(0, msgKey);
+ statement.bindByIndex(1, id);
+ statement.executeAsync(this.trackAsync());
+
+ cacheLookupMap[id] = msgKey;
+ }
+
+ // - perform the cache lookup so we can update in-memory representations
+ let inMemoryItems = {};
+ GlodaCollectionManager.cacheLookupMany(
+ GlodaMessage.prototype.NOUN_ID,
+ cacheLookupMap,
+ inMemoryItems,
+ /* do not cache */ false
+ );
+ for (let glodaId in inMemoryItems) {
+ let glodaMsg = inMemoryItems[glodaId];
+ glodaMsg._messageKey = cacheLookupMap[glodaId];
+ }
+ },
+
+ /**
+ * Asynchronously mutate message folder id/message keys for the given
+ * messages, indicating that we are moving them to the target folder, but
+ * don't yet know their target message keys.
+ *
+ * Updates in-memory representations too.
+ */
+ updateMessageFoldersByKeyPurging(aGlodaIds, aDestFolder) {
+ let destFolderID = this._mapFolder(aDestFolder).id;
+
+ let sqlStr =
+ "UPDATE messages SET folderID = ?1, \
+ messageKey = ?2 \
+ WHERE id IN (" +
+ aGlodaIds.join(", ") +
+ ")";
+ let statement = this._createAsyncStatement(sqlStr, true);
+ statement.bindByIndex(0, destFolderID);
+ statement.bindByIndex(1, null);
+ statement.executeAsync(this.trackAsync());
+ statement.finalize();
+
+ let cached = GlodaCollectionManager.cacheLookupManyList(
+ GlodaMessage.prototype.NOUN_ID,
+ aGlodaIds
+ );
+ for (let id in cached) {
+ let glodaMsg = cached[id];
+ glodaMsg._folderID = destFolderID;
+ glodaMsg._messageKey = null;
+ }
+ },
+
+ _messageFromRow(aRow) {
+ this._log.debug("_messageFromRow " + aRow);
+ let folderId,
+ messageKey,
+ date,
+ jsonText,
+ subject,
+ indexedBodyText,
+ attachmentNames;
+ if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ folderId = null;
+ } else {
+ folderId = aRow.getInt64(1);
+ }
+ if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ messageKey = null;
+ } else {
+ messageKey = aRow.getInt64(2);
+ }
+ if (aRow.getTypeOfIndex(4) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ date = null;
+ } else {
+ date = new Date(aRow.getInt64(4) / 1000);
+ }
+ if (aRow.getTypeOfIndex(7) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ jsonText = undefined;
+ } else {
+ jsonText = aRow.getString(7);
+ }
+ // only queryFromQuery queries will have these columns
+ if (aRow.numEntries >= 14) {
+ if (aRow.getTypeOfIndex(10) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ subject = undefined;
+ } else {
+ subject = aRow.getString(10);
+ }
+ if (aRow.getTypeOfIndex(9) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ indexedBodyText = undefined;
+ } else {
+ indexedBodyText = aRow.getString(9);
+ }
+ if (aRow.getTypeOfIndex(11) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ attachmentNames = null;
+ } else {
+ attachmentNames = aRow.getString(11);
+ if (attachmentNames) {
+ attachmentNames = attachmentNames.split("\n");
+ } else {
+ attachmentNames = null;
+ }
+ }
+ // we ignore 12, author
+ // we ignore 13, recipients
+ }
+ return new GlodaMessage(
+ this,
+ aRow.getInt64(0),
+ folderId,
+ messageKey,
+ aRow.getInt64(3),
+ null,
+ date,
+ aRow.getString(5),
+ aRow.getInt64(6),
+ jsonText,
+ aRow.getInt64(8),
+ subject,
+ indexedBodyText,
+ attachmentNames
+ );
+ },
+
+ get _updateMessagesMarkDeletedByFolderID() {
+ // When marking deleted clear the folderID and messageKey so that the
+ // indexing process can reuse it without any location constraints.
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = NULL, messageKey = NULL, \
+ deleted = 1 WHERE folderID = ?1"
+ );
+ this.__defineGetter__(
+ "_updateMessagesMarkDeletedByFolderID",
+ () => statement
+ );
+ return this._updateMessagesMarkDeletedByFolderID;
+ },
+
+ /**
+ * Efficiently mark all the messages in a folder as deleted. Unfortunately,
+ * we obviously do not know the id's of the messages affected by this which
+ * complicates in-memory updates. The options are sending out to the SQL
+ * database for a list of the message id's or some form of in-memory
+ * traversal. I/O costs being what they are, users having a propensity to
+ * have folders with tens of thousands of messages, and the unlikeliness
+ * of all of those messages being gloda-memory-resident, we go with the
+ * in-memory traversal.
+ */
+ markMessagesDeletedByFolderID(aFolderID) {
+ let statement = this._updateMessagesMarkDeletedByFolderID;
+ statement.bindByIndex(0, aFolderID);
+ statement.executeAsync(this.trackAsync());
+
+ // Have the collection manager generate itemsRemoved events for any
+ // in-memory messages in that folder.
+ GlodaCollectionManager.itemsDeletedByAttribute(
+ GlodaMessage.prototype.NOUN_ID,
+ aMsg => aMsg._folderID == aFolderID
+ );
+ },
+
+ /**
+ * Mark all the gloda messages as deleted blind-fire. Check if any of the
+ * messages are known to the collection manager and update them to be deleted
+ * along with the requisite collection notifications.
+ */
+ markMessagesDeletedByIDs(aMessageIDs) {
+ // When marking deleted clear the folderID and messageKey so that the
+ // indexing process can reuse it without any location constraints.
+ let sqlString =
+ "UPDATE messages SET folderID = NULL, messageKey = NULL, " +
+ "deleted = 1 WHERE id IN (" +
+ aMessageIDs.join(",") +
+ ")";
+
+ let statement = this._createAsyncStatement(sqlString, true);
+ statement.executeAsync(this.trackAsync());
+ statement.finalize();
+
+ GlodaCollectionManager.itemsDeleted(
+ GlodaMessage.prototype.NOUN_ID,
+ aMessageIDs
+ );
+ },
+
+ get _countDeletedMessagesStatement() {
+ let statement = this._createAsyncStatement(
+ "SELECT COUNT(*) FROM messages WHERE deleted = 1"
+ );
+ this.__defineGetter__("_countDeletedMessagesStatement", () => statement);
+ return this._countDeletedMessagesStatement;
+ },
+
+ /**
+ * Count how many messages are currently marked as deleted in the database.
+ */
+ countDeletedMessages(aCallback) {
+ let cms = this._countDeletedMessagesStatement;
+ cms.executeAsync(new SingletonResultValueHandler(aCallback));
+ },
+
+ get _deleteMessageByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messages WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteMessageByIDStatement", () => statement);
+ return this._deleteMessageByIDStatement;
+ },
+
+ get _deleteMessageTextByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messagesText WHERE docid = ?1"
+ );
+ this.__defineGetter__("_deleteMessageTextByIDStatement", () => statement);
+ return this._deleteMessageTextByIDStatement;
+ },
+
+ /**
+ * Delete a message and its fulltext from the database. It is assumed that
+ * the message was already marked as deleted and so is not visible to the
+ * collection manager and so nothing needs to be done about that.
+ */
+ deleteMessageByID(aMessageID) {
+ let dmbids = this._deleteMessageByIDStatement;
+ dmbids.bindByIndex(0, aMessageID);
+ dmbids.executeAsync(this.trackAsync());
+
+ this.deleteMessageTextByID(aMessageID);
+ },
+
+ deleteMessageTextByID(aMessageID) {
+ let dmt = this._deleteMessageTextByIDStatement;
+ dmt.bindByIndex(0, aMessageID);
+ dmt.executeAsync(this.trackAsync());
+ },
+
+ get _folderCompactionStatement() {
+ let statement = this._createAsyncStatement(
+ "SELECT id, messageKey, headerMessageID FROM messages \
+ WHERE folderID = ?1 AND \
+ messageKey >= ?2 AND +deleted = 0 ORDER BY messageKey LIMIT ?3"
+ );
+ this.__defineGetter__("_folderCompactionStatement", () => statement);
+ return this._folderCompactionStatement;
+ },
+
+ folderCompactionPassBlockFetch(
+ aFolderID,
+ aStartingMessageKey,
+ aLimit,
+ aCallback
+ ) {
+ let fcs = this._folderCompactionStatement;
+ fcs.bindByIndex(0, aFolderID);
+ fcs.bindByIndex(1, aStartingMessageKey);
+ fcs.bindByIndex(2, aLimit);
+ fcs.executeAsync(new CompactionBlockFetcherHandler(aCallback));
+ },
+
+ /* ********** Message Attributes ********** */
+ get _insertMessageAttributeStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messageAttributes (conversationID, messageID, attributeID, \
+ value) \
+ VALUES (?1, ?2, ?3, ?4)"
+ );
+ this.__defineGetter__("_insertMessageAttributeStatement", () => statement);
+ return this._insertMessageAttributeStatement;
+ },
+
+ get _deleteMessageAttributeStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messageAttributes WHERE attributeID = ?1 AND value = ?2 \
+ AND conversationID = ?3 AND messageID = ?4"
+ );
+ this.__defineGetter__("_deleteMessageAttributeStatement", () => statement);
+ return this._deleteMessageAttributeStatement;
+ },
+
+ /**
+ * Insert and remove attributes relating to a GlodaMessage. This is performed
+ * inside a pseudo-transaction (we create one if we aren't in one, using
+ * our _beginTransaction wrapper, but if we are in one, no additional
+ * meaningful semantics are added).
+ * No attempt is made to verify uniqueness of inserted attributes, either
+ * against the current database or within the provided list of attributes.
+ * The caller is responsible for ensuring that unwanted duplicates are
+ * avoided.
+ *
+ * @param aMessage The GlodaMessage the attributes belong to. This is used
+ * to provide the message id and conversation id.
+ * @param aAddDBAttributes A list of attribute tuples to add, where each tuple
+ * contains an attribute ID and a value. Lest you forget, an attribute ID
+ * corresponds to a row in the attribute definition table. The attribute
+ * definition table stores the 'parameter' for the attribute, if any.
+ * (Which is to say, our frequent Attribute-Parameter-Value triple has
+ * the Attribute-Parameter part distilled to a single attribute id.)
+ * @param aRemoveDBAttributes A list of attribute tuples to remove.
+ */
+ adjustMessageAttributes(aMessage, aAddDBAttributes, aRemoveDBAttributes) {
+ let imas = this._insertMessageAttributeStatement;
+ let dmas = this._deleteMessageAttributeStatement;
+ this._beginTransaction();
+ try {
+ for (let iAttrib = 0; iAttrib < aAddDBAttributes.length; iAttrib++) {
+ let attribValueTuple = aAddDBAttributes[iAttrib];
+
+ imas.bindByIndex(0, aMessage.conversationID);
+ imas.bindByIndex(1, aMessage.id);
+ imas.bindByIndex(2, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ imas.bindByIndex(3, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ imas.bindByIndex(3, attribValueTuple[1]);
+ } else {
+ imas.bindByIndex(3, attribValueTuple[1]);
+ }
+ imas.executeAsync(this.trackAsync());
+ }
+
+ for (let iAttrib = 0; iAttrib < aRemoveDBAttributes.length; iAttrib++) {
+ let attribValueTuple = aRemoveDBAttributes[iAttrib];
+
+ dmas.bindByIndex(0, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ dmas.bindByIndex(1, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ dmas.bindByIndex(1, attribValueTuple[1]);
+ } else {
+ dmas.bindByIndex(1, attribValueTuple[1]);
+ }
+ dmas.bindByIndex(2, aMessage.conversationID);
+ dmas.bindByIndex(3, aMessage.id);
+ dmas.executeAsync(this.trackAsync());
+ }
+
+ this._commitTransaction();
+ } catch (ex) {
+ this._log.error("adjustMessageAttributes:", ex);
+ this._rollbackTransaction();
+ throw ex;
+ }
+ },
+
+ get _deleteMessageAttributesByMessageIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messageAttributes WHERE messageID = ?1"
+ );
+ this.__defineGetter__(
+ "_deleteMessageAttributesByMessageIDStatement",
+ () => statement
+ );
+ return this._deleteMessageAttributesByMessageIDStatement;
+ },
+
+ /**
+ * Clear all the message attributes for a given GlodaMessage. No changes
+ * are made to the in-memory representation of the message; it is up to the
+ * caller to ensure that it handles things correctly.
+ *
+ * @param aMessage The GlodaMessage whose database attributes should be
+ * purged.
+ */
+ clearMessageAttributes(aMessage) {
+ if (aMessage.id != null) {
+ this._deleteMessageAttributesByMessageIDStatement.bindByIndex(
+ 0,
+ aMessage.id
+ );
+ this._deleteMessageAttributesByMessageIDStatement.executeAsync(
+ this.trackAsync()
+ );
+ }
+ },
+
+ _stringSQLQuoter(aString) {
+ return "'" + aString.replace(/\'/g, "''") + "'";
+ },
+ _numberQuoter(aNum) {
+ return aNum;
+ },
+
+ /* ===== Generic Attribute Support ===== */
+ adjustAttributes(aItem, aAddDBAttributes, aRemoveDBAttributes) {
+ let nounDef = aItem.NOUN_DEF;
+ let dbMeta = nounDef._dbMeta;
+ if (dbMeta.insertAttrStatement === undefined) {
+ dbMeta.insertAttrStatement = this._createAsyncStatement(
+ "INSERT INTO " +
+ nounDef.attrTableName +
+ " (" +
+ nounDef.attrIDColumnName +
+ ", attributeID, value) " +
+ " VALUES (?1, ?2, ?3)"
+ );
+ // we always create this at the same time (right here), no need to check
+ dbMeta.deleteAttrStatement = this._createAsyncStatement(
+ "DELETE FROM " +
+ nounDef.attrTableName +
+ " WHERE " +
+ " attributeID = ?1 AND value = ?2 AND " +
+ nounDef.attrIDColumnName +
+ " = ?3"
+ );
+ }
+
+ let ias = dbMeta.insertAttrStatement;
+ let das = dbMeta.deleteAttrStatement;
+ this._beginTransaction();
+ try {
+ for (let iAttr = 0; iAttr < aAddDBAttributes.length; iAttr++) {
+ let attribValueTuple = aAddDBAttributes[iAttr];
+
+ ias.bindByIndex(0, aItem.id);
+ ias.bindByIndex(1, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ ias.bindByIndex(2, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ ias.bindByIndex(2, attribValueTuple[1]);
+ } else {
+ ias.bindByIndex(2, attribValueTuple[1]);
+ }
+ ias.executeAsync(this.trackAsync());
+ }
+
+ for (let iAttr = 0; iAttr < aRemoveDBAttributes.length; iAttr++) {
+ let attribValueTuple = aRemoveDBAttributes[iAttr];
+
+ das.bindByIndex(0, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ das.bindByIndex(1, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ das.bindByIndex(1, attribValueTuple[1]);
+ } else {
+ das.bindByIndex(1, attribValueTuple[1]);
+ }
+ das.bindByIndex(2, aItem.id);
+ das.executeAsync(this.trackAsync());
+ }
+
+ this._commitTransaction();
+ } catch (ex) {
+ this._log.error("adjustAttributes:", ex);
+ this._rollbackTransaction();
+ throw ex;
+ }
+ },
+
+ clearAttributes(aItem) {
+ let nounDef = aItem.NOUN_DEF;
+ let dbMeta = nounDef._dbMeta;
+ if (dbMeta.clearAttrStatement === undefined) {
+ dbMeta.clearAttrStatement = this._createAsyncStatement(
+ "DELETE FROM " +
+ nounDef.attrTableName +
+ " WHERE " +
+ nounDef.attrIDColumnName +
+ " = ?1"
+ );
+ }
+
+ if (aItem.id != null) {
+ dbMeta.clearAttrstatement.bindByIndex(0, aItem.id);
+ dbMeta.clearAttrStatement.executeAsync(this.trackAsync());
+ }
+ },
+
+ /**
+ * escapeStringForLIKE is only available on statements, and sometimes we want
+ * to use it before we create our statement, so we create a statement just
+ * for this reason.
+ */
+ get _escapeLikeStatement() {
+ let statement = this._createAsyncStatement("SELECT 0");
+ this.__defineGetter__("_escapeLikeStatement", () => statement);
+ return this._escapeLikeStatement;
+ },
+
+ *_convertToDBValuesAndGroupByAttributeID(aAttrDef, aValues) {
+ let objectNounDef = aAttrDef.objectNounDef;
+ if (!objectNounDef.usesParameter) {
+ let dbValues = [];
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let value = aValues[iValue];
+ // If the empty set is significant and it's an empty signifier, emit
+ // the appropriate dbvalue.
+ if (value == null && aAttrDef.emptySetIsSignificant) {
+ yield [this.kEmptySetAttrId, [aAttrDef.id]];
+ // Bail if the only value was us; we don't want to add a
+ // value-posessing wildcard into the mix.
+ if (aValues.length == 1) {
+ return;
+ }
+ continue;
+ }
+ let dbValue = objectNounDef.toParamAndValue(value)[1];
+ if (dbValue != null) {
+ dbValues.push(dbValue);
+ }
+ }
+ yield [aAttrDef.special ? undefined : aAttrDef.id, dbValues];
+ return;
+ }
+
+ let curParam, attrID, dbValues;
+ let attrDBDef = aAttrDef.dbDef;
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let value = aValues[iValue];
+ // If the empty set is significant and it's an empty signifier, emit
+ // the appropriate dbvalue.
+ if (value == null && aAttrDef.emptySetIsSignificant) {
+ yield [this.kEmptySetAttrId, [aAttrDef.id]];
+ // Bail if the only value was us; we don't want to add a
+ // value-posessing wildcard into the mix.
+ if (aValues.length == 1) {
+ return;
+ }
+ continue;
+ }
+ let [dbParam, dbValue] = objectNounDef.toParamAndValue(value);
+ if (curParam === undefined) {
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ if (dbValue != null) {
+ dbValues = [dbValue];
+ } else {
+ dbValues = [];
+ }
+ } else if (curParam == dbParam) {
+ if (dbValue != null) {
+ dbValues.push(dbValue);
+ }
+ } else {
+ yield [attrID, dbValues];
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ if (dbValue != null) {
+ dbValues = [dbValue];
+ } else {
+ dbValues = [];
+ }
+ }
+ }
+ if (dbValues !== undefined) {
+ yield [attrID, dbValues];
+ }
+ },
+
+ *_convertRangesToDBStringsAndGroupByAttributeID(
+ aAttrDef,
+ aValues,
+ aValueColumnName
+ ) {
+ let objectNounDef = aAttrDef.objectNounDef;
+ if (!objectNounDef.usesParameter) {
+ let dbStrings = [];
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let [lowerVal, upperVal] = aValues[iValue];
+ // they both can't be null. that is the law.
+ if (lowerVal == null) {
+ dbStrings.push(
+ aValueColumnName +
+ " <= " +
+ objectNounDef.toParamAndValue(upperVal)[1]
+ );
+ } else if (upperVal == null) {
+ dbStrings.push(
+ aValueColumnName +
+ " >= " +
+ objectNounDef.toParamAndValue(lowerVal)[1]
+ );
+ } else {
+ // No one is null!
+ dbStrings.push(
+ aValueColumnName +
+ " BETWEEN " +
+ objectNounDef.toParamAndValue(lowerVal)[1] +
+ " AND " +
+ objectNounDef.toParamAndValue(upperVal)[1]
+ );
+ }
+ }
+ yield [aAttrDef.special ? undefined : aAttrDef.id, dbStrings];
+ return;
+ }
+
+ let curParam, attrID, dbStrings;
+ let attrDBDef = aAttrDef.dbDef;
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let [lowerVal, upperVal] = aValues[iValue];
+
+ let dbString, dbParam, lowerDBVal, upperDBVal;
+ // they both can't be null. that is the law.
+ if (lowerVal == null) {
+ [dbParam, upperDBVal] = objectNounDef.toParamAndValue(upperVal);
+ dbString = aValueColumnName + " <= " + upperDBVal;
+ } else if (upperVal == null) {
+ [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal);
+ dbString = aValueColumnName + " >= " + lowerDBVal;
+ } else {
+ // no one is null!
+ [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal);
+ dbString =
+ aValueColumnName +
+ " BETWEEN " +
+ lowerDBVal +
+ " AND " +
+ objectNounDef.toParamAndValue(upperVal)[1];
+ }
+
+ if (curParam === undefined) {
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ dbStrings = [dbString];
+ } else if (curParam === dbParam) {
+ dbStrings.push(dbString);
+ } else {
+ yield [attrID, dbStrings];
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ dbStrings = [dbString];
+ }
+ }
+ if (dbStrings !== undefined) {
+ yield [attrID, dbStrings];
+ }
+ },
+
+ /* eslint-disable complexity */
+ /**
+ * Perform a database query given a GlodaQueryClass instance that specifies
+ * a set of constraints relating to the noun type associated with the query.
+ * A GlodaCollection is returned containing the results of the look-up.
+ * By default the collection is "live", and will mutate (generating events to
+ * its listener) as the state of the database changes.
+ * This functionality is made user/extension visible by the Query's
+ * getCollection (asynchronous).
+ *
+ * @param [aArgs] See |GlodaQuery.getCollection| for info.
+ */
+ queryFromQuery(
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection,
+ aArgs
+ ) {
+ // when changing this method, be sure that GlodaQuery's testMatch function
+ // likewise has its changes made.
+ let nounDef = aQuery._nounDef;
+
+ let whereClauses = [];
+ let unionQueries = [aQuery].concat(aQuery._unions);
+ let boundArgs = [];
+
+ // Use the dbQueryValidityConstraintSuffix to provide constraints that
+ // filter items down to those that are valid for the query mechanism to
+ // return. For example, in the case of messages, deleted or ghost
+ // messages should not be returned by this query layer. We require
+ // hand-rolled SQL to do that for now.
+ let validityConstraintSuffix;
+ if (
+ nounDef.dbQueryValidityConstraintSuffix &&
+ !aQuery.options.noDbQueryValidityConstraints
+ ) {
+ validityConstraintSuffix = nounDef.dbQueryValidityConstraintSuffix;
+ } else {
+ validityConstraintSuffix = "";
+ }
+
+ for (let iUnion = 0; iUnion < unionQueries.length; iUnion++) {
+ let curQuery = unionQueries[iUnion];
+ let selects = [];
+
+ let lastConstraintWasSpecial = false;
+ let curConstraintIsSpecial;
+
+ for (
+ let iConstraint = 0;
+ iConstraint < curQuery._constraints.length;
+ iConstraint++
+ ) {
+ let constraint = curQuery._constraints[iConstraint];
+ let [constraintType, attrDef] = constraint;
+ let constraintValues = constraint.slice(2);
+
+ let tableName, idColumnName, valueColumnName;
+ if (constraintType == GlodaConstants.kConstraintIdIn) {
+ // we don't need any of the next cases' setup code, and we especially
+ // would prefer that attrDef isn't accessed since it's null for us.
+ } else if (attrDef.special) {
+ tableName = nounDef.tableName;
+ idColumnName = "id"; // canonical id for a table is "id".
+ valueColumnName = attrDef.specialColumnName;
+ curConstraintIsSpecial = true;
+ } else {
+ tableName = nounDef.attrTableName;
+ idColumnName = nounDef.attrIDColumnName;
+ valueColumnName = "value";
+ curConstraintIsSpecial = false;
+ }
+
+ let select = null,
+ test = null;
+ if (constraintType === GlodaConstants.kConstraintIdIn) {
+ // this is somewhat of a trick. this does mean that this can be the
+ // only constraint. Namely, our idiom is:
+ // SELECT * FROM blah WHERE id IN (a INTERSECT b INTERSECT c)
+ // but if we only have 'a', then that becomes "...IN (a)", and if
+ // 'a' is not a select but a list of id's... tricky, no?
+ select = constraintValues.join(",");
+ } else if (constraintType === GlodaConstants.kConstraintIn) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintIn
+ let clauses = [];
+ for (let [
+ attrID,
+ values,
+ ] of this._convertToDBValuesAndGroupByAttributeID(
+ attrDef,
+ constraintValues
+ )) {
+ let clausePart;
+ if (attrID !== undefined) {
+ clausePart =
+ "(attributeID = " + attrID + (values.length ? " AND " : "");
+ } else {
+ clausePart = "(";
+ }
+ if (values.length) {
+ // strings need to be escaped, we would use ? binding, except
+ // that gets mad if we have too many strings... so we use our
+ // own escaping logic. correctly escaping is easy, but it still
+ // feels wrong to do it. (just double the quote character...)
+ if (
+ "special" in attrDef &&
+ attrDef.special == GlodaConstants.kSpecialString
+ ) {
+ clausePart +=
+ valueColumnName +
+ " IN (" +
+ values
+ .map(v => "'" + v.replace(/\'/g, "''") + "'")
+ .join(",") +
+ "))";
+ } else {
+ clausePart +=
+ valueColumnName + " IN (" + values.join(",") + "))";
+ }
+ } else {
+ clausePart += ")";
+ }
+ clauses.push(clausePart);
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintRanges) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintRanges
+ let clauses = [];
+ for (let [
+ attrID,
+ dbStrings,
+ ] of this._convertRangesToDBStringsAndGroupByAttributeID(
+ attrDef,
+ constraintValues,
+ valueColumnName
+ )) {
+ if (attrID !== undefined) {
+ clauses.push(
+ "(attributeID = " +
+ attrID +
+ " AND (" +
+ dbStrings.join(" OR ") +
+ "))"
+ );
+ } else {
+ clauses.push("(" + dbStrings.join(" OR ") + ")");
+ }
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintEquals) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintEquals
+ let clauses = [];
+ for (let [
+ attrID,
+ values,
+ ] of this._convertToDBValuesAndGroupByAttributeID(
+ attrDef,
+ constraintValues
+ )) {
+ if (attrID !== undefined) {
+ clauses.push(
+ "(attributeID = " +
+ attrID +
+ " AND (" +
+ values.map(_ => valueColumnName + " = ?").join(" OR ") +
+ "))"
+ );
+ } else {
+ clauses.push(
+ "(" +
+ values.map(_ => valueColumnName + " = ?").join(" OR ") +
+ ")"
+ );
+ }
+ boundArgs.push.apply(boundArgs, values);
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintStringLike) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintStringLike
+ let likePayload = "";
+ for (let valuePart of constraintValues) {
+ if (typeof valuePart == "string") {
+ likePayload += this._escapeLikeStatement.escapeStringForLIKE(
+ valuePart,
+ "/"
+ );
+ } else {
+ likePayload += "%";
+ }
+ }
+ test = valueColumnName + " LIKE ? ESCAPE '/'";
+ boundArgs.push(likePayload);
+ } else if (constraintType === GlodaConstants.kConstraintFulltext) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintFulltext
+ let matchStr = constraintValues[0];
+ select =
+ "SELECT docid FROM " +
+ nounDef.tableName +
+ "Text" +
+ " WHERE " +
+ attrDef.specialColumnName +
+ " MATCH ?";
+ boundArgs.push(matchStr);
+ }
+
+ if (curConstraintIsSpecial && lastConstraintWasSpecial && test) {
+ selects[selects.length - 1] += " AND " + test;
+ } else if (select) {
+ selects.push(select);
+ } else if (test) {
+ select =
+ "SELECT " + idColumnName + " FROM " + tableName + " WHERE " + test;
+ selects.push(select);
+ } else {
+ this._log.warn(
+ "Unable to translate constraint of type " +
+ constraintType +
+ " on attribute bound as " +
+ nounDef.name
+ );
+ }
+
+ lastConstraintWasSpecial = curConstraintIsSpecial;
+ }
+
+ if (selects.length) {
+ whereClauses.push(
+ "id IN (" +
+ selects.join(" INTERSECT ") +
+ ")" +
+ validityConstraintSuffix
+ );
+ }
+ }
+
+ let sqlString = "SELECT * FROM " + nounDef.tableName;
+ if (!aQuery.options.noMagic) {
+ if (
+ aQuery.options.noDbQueryValidityConstraints &&
+ nounDef.dbQueryJoinMagicWithNoValidityConstraints
+ ) {
+ sqlString += nounDef.dbQueryJoinMagicWithNoValidityConstraints;
+ } else if (nounDef.dbQueryJoinMagic) {
+ sqlString += nounDef.dbQueryJoinMagic;
+ }
+ }
+
+ if (whereClauses.length) {
+ sqlString += " WHERE (" + whereClauses.join(") OR (") + ")";
+ }
+
+ if (aQuery.options.explicitSQL) {
+ sqlString = aQuery.options.explicitSQL;
+ }
+
+ if (aQuery.options.outerWrapColumns) {
+ sqlString =
+ "SELECT *, " +
+ aQuery.options.outerWrapColumns.join(", ") +
+ " FROM (" +
+ sqlString +
+ ")";
+ }
+
+ if (aQuery._order.length) {
+ let orderClauses = [];
+ for (let colName of aQuery._order) {
+ if (colName.startsWith("-")) {
+ orderClauses.push(colName.substring(1) + " DESC");
+ } else {
+ orderClauses.push(colName + " ASC");
+ }
+ }
+ sqlString += " ORDER BY " + orderClauses.join(", ");
+ }
+
+ if (aQuery._limit) {
+ if (!("limitClauseAlreadyIncluded" in aQuery.options)) {
+ sqlString += " LIMIT ?";
+ }
+ boundArgs.push(aQuery._limit);
+ }
+
+ this._log.debug("QUERY FROM QUERY: " + sqlString + " ARGS: " + boundArgs);
+
+ // if we want to become explicit, replace the query (which has already
+ // provided our actual SQL query) with an explicit query. This will be
+ // what gets attached to the collection in the event we create a new
+ // collection. If we are reusing one, we assume that the explicitness,
+ // if desired, already happened.
+ // (we do not need to pass an argument to the explicitQueryClass constructor
+ // because it will be passed in to the collection's constructor, which will
+ // ensure that the collection attribute gets set.)
+ if (aArgs && "becomeExplicit" in aArgs && aArgs.becomeExplicit) {
+ aQuery = new nounDef.explicitQueryClass();
+ } else if (aArgs && "becomeNull" in aArgs && aArgs.becomeNull) {
+ aQuery = new nounDef.nullQueryClass();
+ }
+
+ return this._queryFromSQLString(
+ sqlString,
+ boundArgs,
+ nounDef,
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection
+ );
+ },
+ /* eslint-enable complexity */
+
+ _queryFromSQLString(
+ aSqlString,
+ aBoundArgs,
+ aNounDef,
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection
+ ) {
+ let statement = this._createAsyncStatement(aSqlString, true);
+ for (let [iBinding, bindingValue] of aBoundArgs.entries()) {
+ this._bindVariant(statement, iBinding, bindingValue);
+ }
+
+ let collection;
+ if (aExistingCollection) {
+ collection = aExistingCollection;
+ } else {
+ collection = new GlodaCollection(
+ aNounDef,
+ [],
+ aQuery,
+ aListener,
+ aMasterCollection
+ );
+ GlodaCollectionManager.registerCollection(collection);
+ // we don't want to overwrite the existing listener or its data, but this
+ // does raise the question about what should happen if we get passed in
+ // a different listener and/or data.
+ if (aListenerData !== undefined) {
+ collection.data = aListenerData;
+ }
+ }
+ if (aListenerData) {
+ if (collection.dataStack) {
+ collection.dataStack.push(aListenerData);
+ } else {
+ collection.dataStack = [aListenerData];
+ }
+ }
+
+ statement.executeAsync(
+ new QueryFromQueryCallback(statement, aNounDef, collection)
+ );
+ statement.finalize();
+ return collection;
+ },
+
+ /* eslint-disable complexity */
+ loadNounItem(aItem, aReferencesByNounID, aInverseReferencesByNounID) {
+ let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam;
+
+ let hadDeps = aItem._deps != null;
+ let deps = aItem._deps || {};
+ let hasDeps = false;
+
+ for (let attrib of aItem.NOUN_DEF.specialLoadAttribs) {
+ let objectNounDef = attrib.objectNounDef;
+
+ if (
+ "special" in attrib &&
+ attrib.special === GlodaConstants.kSpecialColumnChildren
+ ) {
+ let invReferences = aInverseReferencesByNounID[objectNounDef.id];
+ if (invReferences === undefined) {
+ invReferences = aInverseReferencesByNounID[objectNounDef.id] = {};
+ }
+ // only contribute if it's not already pending or there
+ if (
+ !(attrib.id in deps) &&
+ aItem[attrib.storageAttributeName] == null
+ ) {
+ // this._log.debug(" Adding inv ref for: " + aItem.id);
+ if (!(aItem.id in invReferences)) {
+ invReferences[aItem.id] = null;
+ }
+ deps[attrib.id] = null;
+ hasDeps = true;
+ }
+ } else if (
+ "special" in attrib &&
+ attrib.special === GlodaConstants.kSpecialColumnParent
+ ) {
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[objectNounDef.id] = {};
+ }
+ // nothing to contribute if it's already there
+ if (
+ !(attrib.id in deps) &&
+ aItem[attrib.valueStorageAttributeName] == null
+ ) {
+ let parentID = aItem[attrib.idStorageAttributeName];
+ if (!(parentID in references)) {
+ references[parentID] = null;
+ }
+ // this._log.debug(" Adding parent ref for: " +
+ // aItem[attrib.idStorageAttributeName]);
+ deps[attrib.id] = null;
+ hasDeps = true;
+ } else {
+ this._log.debug(
+ " paranoia value storage: " +
+ aItem[attrib.valueStorageAttributeName]
+ );
+ }
+ }
+ }
+
+ // bail here if arbitrary values are not allowed, there just is no
+ // encoded json, or we already had dependencies for this guy, implying
+ // the json pass has already been performed
+ if (!aItem.NOUN_DEF.allowsArbitraryAttrs || !aItem._jsonText || hadDeps) {
+ if (hasDeps) {
+ aItem._deps = deps;
+ }
+ return hasDeps;
+ }
+
+ // this._log.debug(" load json: " + aItem._jsonText);
+ let jsonDict = JSON.parse(aItem._jsonText);
+ delete aItem._jsonText;
+
+ // Iterate over the attributes on the item
+ for (let attribId in jsonDict) {
+ let jsonValue = jsonDict[attribId];
+ // It is technically impossible for attribute ids to go away at this
+ // point in time. This would require someone to monkey around with
+ // our schema. But we will introduce this functionality one day, so
+ // prepare for it now.
+ if (!(attribId in attribIDToDBDefAndParam)) {
+ continue;
+ }
+ // find the attribute definition that corresponds to this key
+ let dbAttrib = attribIDToDBDefAndParam[attribId][0];
+
+ let attrib = dbAttrib.attrDef;
+ // The attribute definition will fail to exist if no one defines the
+ // attribute anymore. This can happen for many reasons: an extension
+ // was uninstalled, an extension was changed and no longer defines the
+ // attribute, or patches are being applied/unapplied. Ignore this
+ // attribute if missing.
+ if (attrib == null) {
+ continue;
+ }
+ let objectNounDef = attrib.objectNounDef;
+
+ // If it has a tableName member but no fromJSON, then it's a persistent
+ // object that needs to be loaded, which also means we need to hold it in
+ // a collection owned by our collection.
+ // (If it has a fromJSON method, then it's a special case like
+ // MimeTypeNoun where it is authoritatively backed by a table but caches
+ // everything into memory. There is no case where fromJSON would be
+ // implemented but we should still be doing database lookups.)
+ if (objectNounDef.tableName && !objectNounDef.fromJSON) {
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[objectNounDef.id] = {};
+ }
+
+ if (attrib.singular) {
+ if (!(jsonValue in references)) {
+ references[jsonValue] = null;
+ }
+ } else {
+ for (let key in jsonValue) {
+ let anID = jsonValue[key];
+ if (!(anID in references)) {
+ references[anID] = null;
+ }
+ }
+ }
+
+ deps[attribId] = jsonValue;
+ hasDeps = true;
+ } else if (objectNounDef.contributeObjDependencies) {
+ /* if it has custom contribution logic, use it */
+ if (
+ objectNounDef.contributeObjDependencies(
+ jsonValue,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ )
+ ) {
+ deps[attribId] = jsonValue;
+ hasDeps = true;
+ } else {
+ // just propagate the value, it's some form of simple sentinel
+ aItem[attrib.boundName] = jsonValue;
+ }
+ } else if (objectNounDef.fromJSON) {
+ // otherwise, the value just needs to be de-persisted, or...
+ if (attrib.singular) {
+ // For consistency with the non-singular case, we don't assign the
+ // attribute if undefined is returned.
+ let deserialized = objectNounDef.fromJSON(jsonValue, aItem);
+ if (deserialized !== undefined) {
+ aItem[attrib.boundName] = deserialized;
+ }
+ } else {
+ // Convert all the entries in the list filtering out any undefined
+ // values. (TagNoun will do this if the tag is now dead.)
+ let outList = [];
+ for (let key in jsonValue) {
+ let val = jsonValue[key];
+ let deserialized = objectNounDef.fromJSON(val, aItem);
+ if (deserialized !== undefined) {
+ outList.push(deserialized);
+ }
+ }
+ // Note: It's possible if we filtered things out that this is an empty
+ // list. This is acceptable because this is somewhat of an unusual
+ // case and I don't think we want to further complicate our
+ // semantics.
+ aItem[attrib.boundName] = outList;
+ }
+ } else {
+ // it's fine as is
+ aItem[attrib.boundName] = jsonValue;
+ }
+ }
+
+ if (hasDeps) {
+ aItem._deps = deps;
+ }
+ return hasDeps;
+ },
+ /* eslint-enable complexity */
+
+ loadNounDeferredDeps(aItem, aReferencesByNounID, aInverseReferencesByNounID) {
+ if (aItem._deps === undefined) {
+ return;
+ }
+
+ let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam;
+
+ for (let [attribId, jsonValue] of Object.entries(aItem._deps)) {
+ let dbAttrib = attribIDToDBDefAndParam[attribId][0];
+ let attrib = dbAttrib.attrDef;
+
+ let objectNounDef = attrib.objectNounDef;
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (attrib.special) {
+ if (attrib.special === GlodaConstants.kSpecialColumnChildren) {
+ let inverseReferences = aInverseReferencesByNounID[objectNounDef.id];
+ // this._log.info("inverse assignment: " + objectNounDef.id +
+ // " of " + aItem.id)
+ aItem[attrib.storageAttributeName] = inverseReferences[aItem.id];
+ } else if (attrib.special === GlodaConstants.kSpecialColumnParent) {
+ // this._log.info("parent column load: " + objectNounDef.id +
+ // " storage value: " + aItem[attrib.idStorageAttributeName]);
+ aItem[attrib.valueStorageAttributeName] =
+ references[aItem[attrib.idStorageAttributeName]];
+ }
+ } else if (objectNounDef.tableName) {
+ if (attrib.singular) {
+ aItem[attrib.boundName] = references[jsonValue];
+ } else {
+ aItem[attrib.boundName] = Object.keys(jsonValue).map(
+ key => references[jsonValue[key]]
+ );
+ }
+ } else if (objectNounDef.contributeObjDependencies) {
+ aItem[attrib.boundName] = objectNounDef.resolveObjDependencies(
+ jsonValue,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ );
+ }
+ // there is no other case
+ }
+
+ delete aItem._deps;
+ },
+
+ /* ********** Contact ********** */
+ _nextContactId: 1,
+
+ _populateContactManagedId() {
+ let stmt = this._createSyncStatement("SELECT MAX(id) FROM contacts", true);
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextContactId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertContactStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO contacts (id, directoryUUID, contactUUID, name, popularity,\
+ frecency, jsonAttributes) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
+ );
+ this.__defineGetter__("_insertContactStatement", () => statement);
+ return this._insertContactStatement;
+ },
+
+ createContact(aDirectoryUUID, aContactUUID, aName, aPopularity, aFrecency) {
+ let contactID = this._nextContactId++;
+ let contact = new GlodaContact(
+ this,
+ contactID,
+ aDirectoryUUID,
+ aContactUUID,
+ aName,
+ aPopularity,
+ aFrecency
+ );
+ return contact;
+ },
+
+ insertContact(aContact) {
+ let ics = this._insertContactStatement;
+ ics.bindByIndex(0, aContact.id);
+ if (aContact.directoryUUID == null) {
+ ics.bindByIndex(1, null);
+ } else {
+ ics.bindByIndex(1, aContact.directoryUUID);
+ }
+ if (aContact.contactUUID == null) {
+ ics.bindByIndex(2, null);
+ } else {
+ ics.bindByIndex(2, aContact.contactUUID);
+ }
+ ics.bindByIndex(3, aContact.name);
+ ics.bindByIndex(4, aContact.popularity);
+ ics.bindByIndex(5, aContact.frecency);
+ if (aContact._jsonText) {
+ ics.bindByIndex(6, aContact._jsonText);
+ } else {
+ ics.bindByIndex(6, null);
+ }
+
+ ics.executeAsync(this.trackAsync());
+
+ return aContact;
+ },
+
+ get _updateContactStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE contacts SET directoryUUID = ?1, \
+ contactUUID = ?2, \
+ name = ?3, \
+ popularity = ?4, \
+ frecency = ?5, \
+ jsonAttributes = ?6 \
+ WHERE id = ?7"
+ );
+ this.__defineGetter__("_updateContactStatement", () => statement);
+ return this._updateContactStatement;
+ },
+
+ updateContact(aContact) {
+ let ucs = this._updateContactStatement;
+ ucs.bindByIndex(6, aContact.id);
+ ucs.bindByIndex(0, aContact.directoryUUID);
+ ucs.bindByIndex(1, aContact.contactUUID);
+ ucs.bindByIndex(2, aContact.name);
+ ucs.bindByIndex(3, aContact.popularity);
+ ucs.bindByIndex(4, aContact.frecency);
+ if (aContact._jsonText) {
+ ucs.bindByIndex(5, aContact._jsonText);
+ } else {
+ ucs.bindByIndex(5, null);
+ }
+
+ ucs.executeAsync(this.trackAsync());
+ },
+
+ _contactFromRow(aRow) {
+ let directoryUUID, contactUUID, jsonText;
+ if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ directoryUUID = null;
+ } else {
+ directoryUUID = aRow.getString(1);
+ }
+ if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ contactUUID = null;
+ } else {
+ contactUUID = aRow.getString(2);
+ }
+ if (aRow.getTypeOfIndex(6) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ jsonText = undefined;
+ } else {
+ jsonText = aRow.getString(6);
+ }
+
+ return new GlodaContact(
+ this,
+ aRow.getInt64(0),
+ directoryUUID,
+ contactUUID,
+ aRow.getString(5),
+ aRow.getInt64(3),
+ aRow.getInt64(4),
+ jsonText
+ );
+ },
+
+ get _selectContactByIDStatement() {
+ let statement = this._createSyncStatement(
+ "SELECT * FROM contacts WHERE id = ?1"
+ );
+ this.__defineGetter__("_selectContactByIDStatement", () => statement);
+ return this._selectContactByIDStatement;
+ },
+
+ /**
+ * Synchronous contact lookup currently only for use by gloda's creation
+ * of the concept of "me". It is okay for it to be doing synchronous work
+ * because it is part of the startup process before any user code could
+ * have gotten a reference to Gloda, but no one else should do this.
+ */
+ getContactByID(aContactID) {
+ let contact = GlodaCollectionManager.cacheLookupOne(
+ GlodaContact.prototype.NOUN_ID,
+ aContactID
+ );
+
+ if (contact === null) {
+ let scbi = this._selectContactByIDStatement;
+ scbi.bindByIndex(0, aContactID);
+ if (this._syncStep(scbi)) {
+ contact = this._contactFromRow(scbi);
+ GlodaCollectionManager.itemLoaded(contact);
+ }
+ scbi.reset();
+ }
+
+ return contact;
+ },
+
+ /* ********** Identity ********** */
+ /** next identity id, managed for async use reasons. */
+ _nextIdentityId: 1,
+ _populateIdentityManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM identities",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextIdentityId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertIdentityStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO identities (id, contactID, kind, value, description, relay) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)"
+ );
+ this.__defineGetter__("_insertIdentityStatement", () => statement);
+ return this._insertIdentityStatement;
+ },
+
+ createIdentity(aContactID, aContact, aKind, aValue, aDescription, aIsRelay) {
+ let identityID = this._nextIdentityId++;
+ let iis = this._insertIdentityStatement;
+ iis.bindByIndex(0, identityID);
+ iis.bindByIndex(1, aContactID);
+ iis.bindByIndex(2, aKind);
+ iis.bindByIndex(3, aValue);
+ iis.bindByIndex(4, aDescription);
+ iis.bindByIndex(5, aIsRelay ? 1 : 0);
+ iis.executeAsync(this.trackAsync());
+
+ let identity = new GlodaIdentity(
+ this,
+ identityID,
+ aContactID,
+ aContact,
+ aKind,
+ aValue,
+ aDescription,
+ aIsRelay
+ );
+ GlodaCollectionManager.itemsAdded(identity.NOUN_ID, [identity]);
+ return identity;
+ },
+
+ get _updateIdentityStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE identities SET contactID = ?1, \
+ kind = ?2, \
+ value = ?3, \
+ description = ?4, \
+ relay = ?5 \
+ WHERE id = ?6"
+ );
+ this.__defineGetter__("_updateIdentityStatement", () => statement);
+ return this._updateIdentityStatement;
+ },
+
+ updateIdentity(aIdentity) {
+ let ucs = this._updateIdentityStatement;
+ ucs.bindByIndex(5, aIdentity.id);
+ ucs.bindByIndex(0, aIdentity.contactID);
+ ucs.bindByIndex(1, aIdentity.kind);
+ ucs.bindByIndex(2, aIdentity.value);
+ ucs.bindByIndex(3, aIdentity.description);
+ ucs.bindByIndex(4, aIdentity.relay ? 1 : 0);
+
+ ucs.executeAsync(this.trackAsync());
+ },
+
+ _identityFromRow(aRow) {
+ return new GlodaIdentity(
+ this,
+ aRow.getInt64(0),
+ aRow.getInt64(1),
+ null,
+ aRow.getString(2),
+ aRow.getString(3),
+ aRow.getString(4),
+ !!aRow.getInt32(5)
+ );
+ },
+
+ get _selectIdentityByKindValueStatement() {
+ let statement = this._createSyncStatement(
+ "SELECT * FROM identities WHERE kind = ?1 AND value = ?2"
+ );
+ this.__defineGetter__(
+ "_selectIdentityByKindValueStatement",
+ () => statement
+ );
+ return this._selectIdentityByKindValueStatement;
+ },
+
+ /**
+ * Synchronous lookup of an identity by kind and value, only for use by
+ * the legacy gloda core code that creates a concept of "me".
+ * Ex: (email, foo@example.com)
+ */
+ getIdentity(aKind, aValue) {
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaIdentity.prototype.NOUN_ID,
+ aKind + "@" + aValue
+ );
+
+ let ibkv = this._selectIdentityByKindValueStatement;
+ ibkv.bindByIndex(0, aKind);
+ ibkv.bindByIndex(1, aValue);
+ if (this._syncStep(ibkv)) {
+ identity = this._identityFromRow(ibkv);
+ GlodaCollectionManager.itemLoaded(identity);
+ }
+ ibkv.reset();
+
+ return identity;
+ },
+};
+GlodaAttributeDBDef.prototype._datastore = GlodaDatastore;
+GlodaConversation.prototype._datastore = GlodaDatastore;
+GlodaFolder.prototype._datastore = GlodaDatastore;
+GlodaMessage.prototype._datastore = GlodaDatastore;
+GlodaContact.prototype._datastore = GlodaDatastore;
+GlodaIdentity.prototype._datastore = GlodaDatastore;
diff --git a/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm b/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm
new file mode 100644
index 0000000000..7a10b4112e
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm
@@ -0,0 +1,188 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides the "explicit attribute" provider for messages. It is
+ * concerned with attributes that are the result of user actions. For example,
+ * whether a message is starred (flagged), message tags, whether it is
+ * read/unread, etc.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaExplicitAttr"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { TagNoun } = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+
+/**
+ * @namespace Explicit attribute provider. Indexes/defines attributes that are
+ * explicitly a result of user action. This dubiously includes marking a
+ * message as read.
+ */
+var GlodaExplicitAttr = {
+ providerName: "gloda.explattr",
+ strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.explattr",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ /** Boost for starred messages. */
+ NOTABILITY_STARRED: 16,
+ /** Boost for tagged messages, first tag. */
+ NOTABILITY_TAGGED_FIRST: 8,
+ /** Boost for tagged messages, each additional tag. */
+ NOTABILITY_TAGGED_ADDL: 1,
+
+ defineAttributes() {
+ // Tag
+ this._attrTag = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "tag",
+ bindName: "tags",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_TAG,
+ parameterNoun: null,
+ // Property change notifications that we care about:
+ propertyChanges: ["keywords"],
+ }); // not-tested
+
+ // Star
+ this._attrStar = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "star",
+ bindName: "starred",
+ singular: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+ // Read/Unread
+ this._attrRead = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "read",
+ // Make the message query-able but without using the database.
+ canQuery: "truthy-but-not-true",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+
+ /**
+ * Has this message been replied to by the user.
+ */
+ this._attrRepliedTo = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "repliedTo",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+
+ /**
+ * Has this user forwarded this message to someone.
+ */
+ this._attrForwarded = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "forwarded",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+ },
+
+ *process(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+
+ aGlodaMessage.starred = aMsgHdr.isFlagged;
+ if (aGlodaMessage.starred) {
+ aGlodaMessage.notability += this.NOTABILITY_STARRED;
+ }
+
+ aGlodaMessage.read = aMsgHdr.isRead;
+
+ let flags = aMsgHdr.flags;
+ aGlodaMessage.repliedTo = Boolean(flags & Ci.nsMsgMessageFlags.Replied);
+ aGlodaMessage.forwarded = Boolean(flags & Ci.nsMsgMessageFlags.Forwarded);
+
+ let tags = (aGlodaMessage.tags = []);
+
+ // -- Tag
+ // build a map of the keywords
+ let keywords = aMsgHdr.getStringProperty("keywords");
+ let keywordList = keywords.split(" ");
+ let keywordMap = {};
+ for (let iKeyword = 0; iKeyword < keywordList.length; iKeyword++) {
+ let keyword = keywordList[iKeyword];
+ keywordMap[keyword] = true;
+ }
+
+ let tagArray = TagNoun.getAllTags();
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ if (tag.key in keywordMap) {
+ tags.push(tag);
+ }
+ }
+
+ if (tags.length) {
+ aGlodaMessage.notability +=
+ this.NOTABILITY_TAGGED_FIRST +
+ (tags.length - 1) * this.NOTABILITY_TAGGED_ADDL;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Duplicates the notability logic from process(). Arguably process should
+ * be factored to call us, grokNounItem should be factored to call us, or we
+ * should get sufficiently fancy that our code wildly diverges.
+ */
+ score(aMessage, aContext) {
+ let score = 0;
+ if (aMessage.starred) {
+ score += this.NOTABILITY_STARRED;
+ }
+ if (aMessage.tags.length) {
+ score +=
+ this.NOTABILITY_TAGGED_FIRST +
+ (aMessage.tags.length - 1) * this.NOTABILITY_TAGGED_ADDL;
+ }
+ return score;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm b/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm
new file mode 100644
index 0000000000..364ea61bb0
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm
@@ -0,0 +1,947 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaFundAttr"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+const { GlodaUtils } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaUtils.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaAttachment } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { MimeTypeNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounMimetype.jsm"
+);
+const { GlodaContent } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaContent.jsm"
+);
+
+/**
+ * @namespace The Gloda Fundamental Attribute provider is a special attribute
+ * provider; it provides attributes that the rest of the providers should be
+ * able to assume exist. Also, it may end up accessing things at a lower level
+ * than most extension providers should do. In summary, don't mimic this code
+ * unless you won't complain when your code breaks.
+ */
+var GlodaFundAttr = {
+ providerName: "gloda.fundattr",
+ strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.fundattr",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ POPULARITY_FROM_ME_TO: 10,
+ POPULARITY_FROM_ME_CC: 4,
+ POPULARITY_FROM_ME_BCC: 3,
+ POPULARITY_TO_ME: 5,
+ POPULARITY_CC_ME: 1,
+ POPULARITY_BCC_ME: 1,
+
+ /** Boost for messages 'I' sent */
+ NOTABILITY_FROM_ME: 10,
+ /** Boost for messages involving 'me'. */
+ NOTABILITY_INVOLVING_ME: 1,
+ /** Boost for message from someone in 'my' address book. */
+ NOTABILITY_FROM_IN_ADDR_BOOK: 10,
+ /** Boost for the first person involved in my address book. */
+ NOTABILITY_INVOLVING_ADDR_BOOK_FIRST: 8,
+ /** Boost for each additional person involved in my address book. */
+ NOTABILITY_INVOLVING_ADDR_BOOK_ADDL: 2,
+
+ defineAttributes() {
+ /* ***** Conversations ***** */
+ // conversation: subjectMatches
+ this._attrConvSubject = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "subjectMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "subject",
+ subjectNouns: [GlodaConstants.NOUN_CONVERSATION],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ /* ***** Messages ***** */
+ // folder
+ this._attrFolder = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "folder",
+ singular: true,
+ facet: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "folderID",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FOLDER,
+ }); // tested-by: test_attributes_fundamental
+ this._attrAccount = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "account",
+ canQuery: "memory",
+ singular: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_ACCOUNT,
+ });
+ this._attrMessageKey = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "messageKey",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "messageKey",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ // We need to surface the deleted attribute for querying, but there is no
+ // reason for user code, so let's call it "_deleted" rather than deleted.
+ // (In fact, our validity constraints require a special query formulation
+ // that user code should have no clue exists. That's right user code,
+ // that's a dare.)
+ Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "_deleted",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "deleted",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ });
+
+ // -- fulltext search helpers
+ // fulltextMatches. Match over message subject, body, and attachments
+ // @testpoint gloda.noun.message.attr.fulltextMatches
+ this._attrFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "fulltextMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "messagesText",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // subjectMatches. Fulltext match on subject
+ // @testpoint gloda.noun.message.attr.subjectMatches
+ this._attrSubjectText = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "subjectMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "subject",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // bodyMatches. super-synthetic full-text matching...
+ // @testpoint gloda.noun.message.attr.bodyMatches
+ this._attrBody = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "bodyMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "body",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // attachmentNamesMatch
+ // @testpoint gloda.noun.message.attr.attachmentNamesMatch
+ this._attrAttachmentNames = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "attachmentNamesMatch",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "attachmentNames",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // @testpoint gloda.noun.message.attr.authorMatches
+ this._attrAuthorFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "authorMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "author",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ this._attrRecipientsFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "recipientsMatch",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "recipients",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // --- synthetic stuff for some reason
+ // conversation
+ // @testpoint gloda.noun.message.attr.conversation
+ this._attrConversation = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "conversation",
+ singular: true,
+ special: GlodaConstants.kSpecialColumnParent,
+ specialColumnName: "conversationID",
+ idStorageAttributeName: "_conversationID",
+ valueStorageAttributeName: "_conversation",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_CONVERSATION,
+ canQuery: true,
+ });
+
+ // --- Fundamental
+ // From
+ this._attrFrom = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "from",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ // To
+ this._attrTo = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "to",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ // Cc
+ this._attrCc = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "cc",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+ /**
+ * Bcc'ed recipients; only makes sense for sent messages.
+ */
+ this._attrBcc = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "bcc",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ // Date. now lives on the row.
+ this._attrDate = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "date",
+ singular: true,
+ facet: {
+ type: "date",
+ },
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "date",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_DATE,
+ }); // tested-by: test_attributes_fundamental
+
+ // Header message ID.
+ this._attrHeaderMessageID = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "headerMessageID",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "headerMessageID",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ // Attachment MIME Types
+ this._attrAttachmentTypes = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "attachmentTypes",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: {
+ type: "default",
+ // This will group the MIME types by their category.
+ groupIdAttr: "category",
+ queryHelper: "Category",
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_MIME_TYPE,
+ });
+
+ // Attachment infos
+ this._attrIsEncrypted = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "isEncrypted",
+ singular: true,
+ emptySetIsSignificant: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ });
+
+ // Attachment infos
+ this._attrAttachmentInfos = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "attachmentInfos",
+ singular: false,
+ emptySetIsSignificant: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_ATTACHMENT,
+ });
+
+ // --- Optimization
+ /**
+ * Involves means any of from/to/cc/bcc. The queries get ugly enough
+ * without this that it seems to justify the cost, especially given the
+ * frequent use case. (In fact, post-filtering for the specific from/to/cc
+ * is probably justifiable rather than losing this attribute...)
+ */
+ this._attrInvolves = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "involves",
+ singular: false,
+ facet: {
+ type: "default",
+ /**
+ * Filter out 'me', as we have other facets that deal with that, and the
+ * 'me' identities are so likely that they distort things.
+ *
+ * @returns true if the identity is not one of my identities, false if it
+ * is.
+ */
+ filter(aItem) {
+ return !(aItem.id in Gloda.myIdentities);
+ },
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ /**
+ * Any of to/cc/bcc.
+ */
+ this._attrRecipients = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "recipients",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ // From Me (To/Cc/Bcc)
+ this._attrFromMe = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "fromMe",
+ singular: false,
+ // The interesting thing to a facet is whether the message is from me.
+ facet: {
+ type: "nonempty?",
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_PARAM_IDENTITY,
+ }); // not-tested
+ // To/Cc/Bcc Me
+ this._attrToMe = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "toMe",
+ // The interesting thing to a facet is whether the message is to me.
+ facet: {
+ type: "nonempty?",
+ },
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_PARAM_IDENTITY,
+ }); // not-tested
+
+ // -- Mailing List
+ // Non-singular, but a hard call. Namely, it is obvious that a message can
+ // be addressed to multiple mailing lists. However, I don't see how you
+ // could receive a message with more than one set of List-* headers,
+ // since each list-serve would each send you a copy. Based on our current
+ // decision to treat each physical message as separate, it almost seems
+ // right to limit the list attribute to the copy that originated at the
+ // list. That may sound entirely wrong, but keep in mind that until we
+ // have seen a message from the list with the List headers, we can't
+ // definitely know it's a mailing list (although heuristics could take us
+ // pretty far). As such, the quasi-singular thing is appealing.
+ // Of course, the reality is that we really want to know if a message was
+ // sent to multiple mailing lists and be able to query on that.
+ // Additionally, our implicit-to logic needs to work on messages that
+ // weren't relayed by the list-serve, especially messages sent to the list
+ // by the user.
+ this._attrList = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "mailing-list",
+ bindName: "mailingLists",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested, not-implemented
+ },
+
+ RE_LIST_POST: /<mailto:([^>]+)>/,
+
+ /**
+ *
+ * Specializations:
+ * - Mailing Lists. Replies to a message on a mailing list frequently only
+ * have the list-serve as the 'to', so we try to generate a synthetic 'to'
+ * based on the author of the parent message when possible. (The 'possible'
+ * part is that we may not have a copy of the parent message at the time of
+ * processing.)
+ * - Newsgroups. Same deal as mailing lists.
+ */
+ *process(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+ let aMimeMsg = aRawReps.mime;
+
+ // -- From
+ // Let's use replyTo if available.
+ // er, since we are just dealing with mailing lists for now, forget the
+ // reply-to...
+ // TODO: deal with default charset issues
+ let author = null;
+ /*
+ try {
+ author = aMsgHdr.getStringProperty("replyTo");
+ }
+ catch (ex) {
+ }
+ */
+ if (author == null || author == "") {
+ author = aMsgHdr.author;
+ }
+
+ let normalizedListPost = "";
+ if (aMimeMsg && aMimeMsg.has("list-post")) {
+ let match = this.RE_LIST_POST.exec(aMimeMsg.get("list-post"));
+ if (match) {
+ normalizedListPost = "<" + match[1] + ">";
+ }
+ }
+
+ // Do not use the MIME decoded variants of any of the email addresses
+ // because if name is encoded and has a comma in it, it will break the
+ // address parser (which already knows how to do the decoding anyways).
+ let [
+ authorIdentities,
+ toIdentities,
+ ccIdentities,
+ bccIdentities,
+ listIdentities,
+ ] = yield aCallbackHandle.pushAndGo(
+ Gloda.getOrCreateMailIdentities(
+ aCallbackHandle,
+ author,
+ aMsgHdr.recipients,
+ aMsgHdr.ccList,
+ aMsgHdr.bccList,
+ normalizedListPost
+ )
+ );
+
+ if (authorIdentities.length != 1) {
+ throw new Gloda.BadItemContentsError(
+ "Message with subject '" +
+ aMsgHdr.mime2DecodedSubject +
+ "' somehow lacks a valid author. Bailing."
+ );
+ }
+ let authorIdentity = authorIdentities[0];
+ aGlodaMessage.from = authorIdentity;
+
+ // -- To, Cc, Bcc
+ aGlodaMessage.to = toIdentities;
+ aGlodaMessage.cc = ccIdentities;
+ aGlodaMessage.bcc = bccIdentities;
+
+ // -- Mailing List
+ if (listIdentities.length) {
+ aGlodaMessage.mailingLists = listIdentities;
+ }
+
+ let findIsEncrypted = x =>
+ x.isEncrypted || (x.parts ? x.parts.some(findIsEncrypted) : false);
+
+ // -- Encryption
+ aGlodaMessage.isEncrypted = false;
+ if (aMimeMsg) {
+ aGlodaMessage.isEncrypted = findIsEncrypted(aMimeMsg);
+ }
+
+ // -- Attachments
+ if (aMimeMsg) {
+ // nsParseMailbox.cpp puts the attachment flag on msgHdrs as soon as it
+ // finds a multipart/mixed part. This is a good heuristic, but if it turns
+ // out the part has no filename, then we don't treat it as an attachment.
+ // We just streamed the message, and we have all the information to figure
+ // that out, so now is a good place to clear the flag if needed.
+ let attachmentTypes = new Set();
+ for (let attachment of aMimeMsg.allAttachments) {
+ // getMimeType expects the content type to contain at least a "/".
+ if (!attachment.contentType.includes("/")) {
+ continue;
+ }
+ attachmentTypes.add(MimeTypeNoun.getMimeType(attachment.contentType));
+ }
+ if (attachmentTypes.size) {
+ aGlodaMessage.attachmentTypes = Array.from(attachmentTypes);
+ }
+
+ let aMsgHdr = aRawReps.header;
+ let wasStreamed =
+ aMsgHdr &&
+ !aGlodaMessage.isEncrypted &&
+ (aMsgHdr.flags & Ci.nsMsgMessageFlags.Offline ||
+ aMsgHdr.folder instanceof Ci.nsIMsgLocalMailFolder);
+
+ // Clear the flag if it turns out there's no attachment after all and we
+ // streamed completely the message (if we didn't, then we have no
+ // knowledge of attachments, unless bug 673370 is fixed).
+ if (wasStreamed && !aMimeMsg.allAttachments.length) {
+ aMsgHdr.markHasAttachments(false);
+ }
+
+ // This is not the same kind of attachments as above. Now, we want to
+ // provide convenience attributes to Gloda consumers, so that they can run
+ // through the list of attachments of a given message, to possibly build a
+ // visualization on top of it. We still reject bogus mime types, which
+ // means yencode won't be supported. Oh, I feel really bad.
+ let attachmentInfos = [];
+ for (let att of aMimeMsg.allUserAttachments) {
+ attachmentInfos.push(
+ this.glodaAttFromMimeAtt(aRawReps.trueGlodaRep, att)
+ );
+ }
+ aGlodaMessage.attachmentInfos = attachmentInfos;
+ }
+
+ // TODO: deal with mailing lists, including implicit-to. this will require
+ // convincing the indexer to pass us in the previous message if it is
+ // available. (which we'll simply pass to everyone... it can help body
+ // logic for quoting purposes, etc. too.)
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ glodaAttFromMimeAtt(aGlodaMessage, aAtt) {
+ // So we don't want to store the URL because it can change over time if
+ // the message is moved. What we do is store the full URL if it's a
+ // detached attachment, otherwise just keep the part information, and
+ // rebuild the URL according to where the message is sitting.
+ let part, externalUrl;
+ if (aAtt.isExternal) {
+ externalUrl = aAtt.url;
+ } else {
+ let matches = aAtt.url.match(GlodaUtils.PART_RE);
+ if (matches && matches.length) {
+ part = matches[1];
+ } else {
+ this._log.error("Error processing attachment: " + aAtt.url);
+ }
+ }
+ return new GlodaAttachment(
+ aGlodaMessage,
+ aAtt.name,
+ aAtt.contentType,
+ aAtt.size,
+ part,
+ externalUrl,
+ aAtt.isExternal
+ );
+ },
+
+ *optimize(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+
+ // for simplicity this is used for both involves and recipients
+ let involvesIdentities = {};
+ let involves = aGlodaMessage.involves || [];
+ let recipients = aGlodaMessage.recipients || [];
+
+ // 'me' specialization optimizations
+ let toMe = aGlodaMessage.toMe || [];
+ let fromMe = aGlodaMessage.fromMe || [];
+
+ let myIdentities = Gloda.myIdentities; // needless optimization?
+ let authorIdentity = aGlodaMessage.from;
+ let isFromMe = authorIdentity.id in myIdentities;
+
+ // The fulltext search column for the author. We want to have in here:
+ // - The e-mail address and display name as enclosed on the message.
+ // - The name per the address book card for this e-mail address, if we have
+ // one.
+ aGlodaMessage._indexAuthor = aMsgHdr.mime2DecodedAuthor;
+ // The fulltext search column for the recipients. (same deal)
+ aGlodaMessage._indexRecipients = aMsgHdr.mime2DecodedRecipients;
+
+ if (isFromMe) {
+ aGlodaMessage.notability += this.NOTABILITY_FROM_ME;
+ } else {
+ let authorDisplayName = MailServices.ab.cardForEmailAddress(
+ authorIdentity.value
+ )?.displayName;
+ if (authorDisplayName !== null) {
+ aGlodaMessage.notability += this.NOTABILITY_FROM_IN_ADDR_BOOK;
+ // @testpoint gloda.noun.message.attr.authorMatches
+ aGlodaMessage._indexAuthor += " " + authorDisplayName;
+ }
+ }
+
+ involves.push(authorIdentity);
+ involvesIdentities[authorIdentity.id] = true;
+
+ let involvedAddrBookCount = 0;
+
+ for (let toIdentity of aGlodaMessage.to) {
+ if (!(toIdentity.id in involvesIdentities)) {
+ involves.push(toIdentity);
+ recipients.push(toIdentity);
+ involvesIdentities[toIdentity.id] = true;
+ let toDisplayName = MailServices.ab.cardForEmailAddress(
+ toIdentity.value
+ )?.displayName;
+ if (toDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + toDisplayName;
+ }
+ }
+
+ // optimization attribute to-me ('I' am the parameter)
+ if (toIdentity.id in myIdentities) {
+ toMe.push([toIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_TO_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, toIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ toIdentity.contact.popularity += this.POPULARITY_FROM_ME_TO;
+ }
+ }
+ }
+ for (let ccIdentity of aGlodaMessage.cc) {
+ if (!(ccIdentity.id in involvesIdentities)) {
+ involves.push(ccIdentity);
+ recipients.push(ccIdentity);
+ involvesIdentities[ccIdentity.id] = true;
+ let ccDisplayName = MailServices.ab.cardForEmailAddress(
+ ccIdentity.value
+ )?.displayName;
+ if (ccDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + ccDisplayName;
+ }
+ }
+ // optimization attribute cc-me ('I' am the parameter)
+ if (ccIdentity.id in myIdentities) {
+ toMe.push([ccIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_CC_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, ccIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ ccIdentity.contact.popularity += this.POPULARITY_FROM_ME_CC;
+ }
+ }
+ }
+ // just treat bcc like cc; the intent is the same although the exact
+ // semantics differ.
+ for (let bccIdentity of aGlodaMessage.bcc) {
+ if (!(bccIdentity.id in involvesIdentities)) {
+ involves.push(bccIdentity);
+ recipients.push(bccIdentity);
+ involvesIdentities[bccIdentity.id] = true;
+ let bccDisplayName = MailServices.ab.cardForEmailAddress(
+ bccIdentity.value
+ )?.displayName;
+ if (bccDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + bccDisplayName;
+ }
+ }
+ // optimization attribute cc-me ('I' am the parameter)
+ if (bccIdentity.id in myIdentities) {
+ toMe.push([bccIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_BCC_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, bccIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ bccIdentity.contact.popularity += this.POPULARITY_FROM_ME_BCC;
+ }
+ }
+ }
+
+ if (involvedAddrBookCount) {
+ aGlodaMessage.notability +=
+ this.NOTABILITY_INVOLVING_ADDR_BOOK_FIRST +
+ (involvedAddrBookCount - 1) * this.NOTABILITY_INVOLVING_ADDR_BOOK_ADDL;
+ }
+
+ aGlodaMessage.involves = involves;
+ aGlodaMessage.recipients = recipients;
+ if (toMe.length) {
+ aGlodaMessage.toMe = toMe;
+ aGlodaMessage.notability += this.NOTABILITY_INVOLVING_ME;
+ }
+ if (fromMe.length) {
+ aGlodaMessage.fromMe = fromMe;
+ }
+
+ // Content
+ if (aRawReps.bodyLines) {
+ aGlodaMessage._content = aRawReps.content = new GlodaContent();
+ if (this.contentWhittle({}, aRawReps.bodyLines, aGlodaMessage._content)) {
+ // we were going to do something here?
+ }
+ } else {
+ aRawReps.content = null;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Duplicates the notability logic from optimize(). Arguably optimize should
+ * be factored to call us, grokNounItem should be factored to call us, or we
+ * should get sufficiently fancy that our code wildly diverges.
+ */
+ score(aMessage, aContext) {
+ let score = 0;
+
+ let authorIdentity = aMessage.from;
+ if (authorIdentity.id in Gloda.myIdentities) {
+ score += this.NOTABILITY_FROM_ME;
+ } else if (authorIdentity.inAddressBook) {
+ score += this.NOTABILITY_FROM_IN_ADDR_BOOK;
+ }
+ if (aMessage.toMe) {
+ score += this.NOTABILITY_INVOLVING_ME;
+ }
+
+ let involvedAddrBookCount = 0;
+ for (let identity of aMessage.to) {
+ if (identity.inAddressBook) {
+ involvedAddrBookCount++;
+ }
+ }
+ for (let identity of aMessage.cc) {
+ if (identity.inAddressBook) {
+ involvedAddrBookCount++;
+ }
+ }
+ if (involvedAddrBookCount) {
+ score +=
+ this.NOTABILITY_INVOLVING_ADDR_BOOK_FIRST +
+ (involvedAddrBookCount - 1) * this.NOTABILITY_INVOLVING_ADDR_BOOK_ADDL;
+ }
+ return score;
+ },
+
+ _countQuoteDepthAndNormalize(aLine) {
+ let count = 0;
+ let lastStartOffset = 0;
+
+ for (let i = 0; i < aLine.length; i++) {
+ let c = aLine[i];
+ if (c == ">") {
+ count++;
+ lastStartOffset = i + 1;
+ } else if (c != " ") {
+ return [
+ count,
+ lastStartOffset ? aLine.substring(lastStartOffset) : aLine,
+ ];
+ }
+ }
+
+ return [count, lastStartOffset ? aLine.substring(lastStartOffset) : aLine];
+ },
+
+ /**
+ * Attempt to understand simple quoting constructs that use ">" with
+ * obvious phrases to enter the quoting block. No support for other types
+ * of quoting at this time. Also no support for piercing the wrapper of
+ * forwarded messages to actually be the content of the forwarded message.
+ */
+ contentWhittle(aMeta, aBodyLines, aContent) {
+ if (!aContent.volunteerContent(aContent.kPriorityBase)) {
+ return false;
+ }
+
+ // duplicate the list; we mutate somewhat...
+ let bodyLines = aBodyLines.concat();
+
+ // lastNonBlankLine originally was just for detecting quoting idioms where
+ // the "wrote" line was separated from the quoted block by a blank line.
+ // Now we also use it for whitespace suppression at the boundaries of
+ // quoted and un-quoted text. (We keep blank lines within the same
+ // 'block' of quoted or non-quoted text.)
+ // Because we now have two goals for it, and we still want to suppress blank
+ // lines when there is a 'wrote' line involved, we introduce...
+ // prevLastNonBlankLine! This arguably suggests refactoring should be the
+ // next step, but things work for now.
+ let rangeStart = 0,
+ lastNonBlankLine = null,
+ prevLastNonBlankLine = null;
+ let inQuoteDepth = 0;
+ for (let [iLine, line] of bodyLines.entries()) {
+ if (!line || line == "\xa0") {
+ /* unicode non breaking space */
+ continue;
+ }
+
+ if (line.startsWith(">")) {
+ if (!inQuoteDepth) {
+ let rangeEnd = iLine - 1;
+ let quoteRangeStart = iLine;
+ // see if the last non-blank-line was a lead-in...
+ if (lastNonBlankLine != null) {
+ // TODO: localize quote range start detection
+ if (aBodyLines[lastNonBlankLine].includes("wrote")) {
+ quoteRangeStart = lastNonBlankLine;
+ rangeEnd = lastNonBlankLine - 1;
+ // we 'used up' lastNonBlankLine, let's promote the prev guy to
+ // be the new lastNonBlankLine for the next logic block
+ lastNonBlankLine = prevLastNonBlankLine;
+ }
+ // eat the trailing whitespace...
+ if (lastNonBlankLine != null) {
+ rangeEnd = Math.min(rangeEnd, lastNonBlankLine);
+ }
+ }
+ if (rangeEnd >= rangeStart) {
+ aContent.content(aBodyLines.slice(rangeStart, rangeEnd + 1));
+ }
+
+ [inQuoteDepth, line] = this._countQuoteDepthAndNormalize(line);
+ bodyLines[iLine] = line;
+ rangeStart = quoteRangeStart;
+ } else {
+ let curQuoteDepth;
+ [curQuoteDepth, line] = this._countQuoteDepthAndNormalize(line);
+ bodyLines[iLine] = line;
+
+ if (curQuoteDepth != inQuoteDepth) {
+ // we could do some "wrote" compensation here, but it's not really
+ // as important. let's wait for a more clever algorithm.
+ aContent.quoted(aBodyLines.slice(rangeStart, iLine), inQuoteDepth);
+ inQuoteDepth = curQuoteDepth;
+ rangeStart = iLine;
+ }
+ }
+ } else if (inQuoteDepth) {
+ aContent.quoted(aBodyLines.slice(rangeStart, iLine), inQuoteDepth);
+ inQuoteDepth = 0;
+ rangeStart = iLine;
+ }
+
+ prevLastNonBlankLine = lastNonBlankLine;
+ lastNonBlankLine = iLine;
+ }
+
+ if (inQuoteDepth) {
+ aContent.quoted(aBodyLines.slice(rangeStart), inQuoteDepth);
+ } else {
+ aContent.content(aBodyLines.slice(rangeStart, lastNonBlankLine + 1));
+ }
+
+ return true;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm b/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm
new file mode 100644
index 0000000000..05919e4d67
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm
@@ -0,0 +1,1491 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file currently contains a fairly general implementation of asynchronous
+ * indexing with a very explicit message indexing implementation. As gloda
+ * will eventually want to index more than just messages, the message-specific
+ * things should ideally lose their special hold on this file. This will
+ * benefit readability/size as well.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaIndexer", "IndexingJob"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "GlodaCollectionManager",
+ "resource:///modules/gloda/Collection.jsm"
+);
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "GlodaDatastore",
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+/**
+ * @class Capture the indexing batch concept explicitly.
+ *
+ * @param aJobType The type of thing we are indexing. Current choices are:
+ * "folder" and "message". Previous choices included "account". The indexer
+ * currently knows too much about these; they should be de-coupled.
+ * @param aID Specific to the job type, but for now only used to hold folder
+ * IDs.
+ *
+ * @ivar items The list of items to process during this job/batch. (For
+ * example, if this is a "messages" job, this would be the list of messages
+ * to process, although the specific representation is determined by the
+ * job.) The list will only be mutated through the addition of extra items.
+ * @ivar offset The current offset into the 'items' list (if used), updated as
+ * processing occurs. If 'items' is not used, the processing code can also
+ * update this in a similar fashion. This is used by the status
+ * notification code in conjunction with goal.
+ * @ivar goal The total number of items to index/actions to perform in this job.
+ * This number may increase during the life of the job, but should not
+ * decrease. This is used by the status notification code in conjunction
+ * with the goal.
+ */
+function IndexingJob(aJobType, aID, aItems) {
+ this.jobType = aJobType;
+ this.id = aID;
+ this.items = aItems != null ? aItems : [];
+ this.offset = 0;
+ this.goal = null;
+ this.callback = null;
+ this.callbackThis = null;
+}
+IndexingJob.prototype = {
+ /**
+ * Invoke the callback associated with this job, passing through all arguments
+ * received by this function to the callback function.
+ */
+ safelyInvokeCallback(...aArgs) {
+ if (!this.callback) {
+ return;
+ }
+ try {
+ this.callback.apply(this.callbackThis, aArgs);
+ } catch (ex) {
+ GlodaIndexer._log.warn("job callback invocation problem:", ex);
+ }
+ },
+ toString() {
+ return (
+ "[job:" +
+ this.jobType +
+ " id:" +
+ this.id +
+ " items:" +
+ (this.items ? this.items.length : "no") +
+ " offset:" +
+ this.offset +
+ " goal:" +
+ this.goal +
+ "]"
+ );
+ },
+};
+
+/**
+ * @namespace Core indexing logic, plus message-specific indexing logic.
+ *
+ * === Indexing Goals
+ * We have the following goals:
+ *
+ * Responsiveness
+ * - When the user wants to quit, we should be able to stop and quit in a timely
+ * fashion.
+ * - We should not interfere with the user's thunderbird usage.
+ *
+ * Correctness
+ * - Quitting should not result in any information loss; we should (eventually)
+ * end up at the same indexed state regardless of whether a user lets
+ * indexing run to completion or restarts thunderbird in the middle of the
+ * process. (It is okay to take slightly longer in the latter case.)
+ *
+ * Worst Case Scenario Avoidance
+ * - We should try to be O(1) memory-wise regardless of what notifications
+ * are thrown at us.
+ *
+ * === Indexing Throttling
+ *
+ * Adaptive Indexing
+ * - The indexer tries to stay out of the way of other running code in
+ * Thunderbird (autosync) and other code on the system. We try and target
+ * some number of milliseconds of activity between intentional inactive
+ * periods. The number of milliseconds of activity varies based on whether we
+ * believe the user to be actively using the computer or idle. We use our
+ * inactive periods as a way to measure system load; if we receive our
+ * notification promptly at the end of our inactive period, we believe the
+ * system is not heavily loaded. If we do not get notified promptly, we
+ * assume there is other stuff going on and back off.
+ *
+ */
+var GlodaIndexer = {
+ /**
+ * A partial attempt to generalize to support multiple databases. Each
+ * database would have its own datastore would have its own indexer. But
+ * we rather inter-mingle our use of this field with the singleton global
+ * GlodaDatastore.
+ */
+ _log: console.createInstance({
+ prefix: "gloda.indexer",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+ /**
+ * Our nsITimer that we use to schedule ourselves on the main thread
+ * intermittently. The timer always exists but may not always be active.
+ */
+ _timer: null,
+ /**
+ * Our nsITimer that we use to schedule events in the "far" future. For now,
+ * this means not compelling an initial indexing sweep until some number of
+ * seconds after startup.
+ */
+ _longTimer: null,
+
+ /**
+ * Periodic performance adjustment parameters: The overall goal is to adjust
+ * our rate of work so that we don't interfere with the user's activities
+ * when they are around (non-idle), and the system in general (when idle).
+ * Being nice when idle isn't quite as important, but is a good idea so that
+ * when the user un-idles we are able to back off nicely. Also, we give
+ * other processes on the system a chance to do something.
+ *
+ * We do this by organizing our work into discrete "tokens" of activity,
+ * then processing the number of tokens that we have determined will
+ * not impact the UI. Then we pause to give other activities a chance to get
+ * some work done, and we measure whether anything happened during our pause.
+ * If something else is going on in our application during that pause, we
+ * give it priority (up to a point) by delaying further indexing.
+ *
+ * Keep in mind that many of our operations are actually asynchronous, so we
+ * aren't entirely starving the event queue. However, a lot of the async
+ * stuff can end up not having any actual delay between events. For
+ * example, we only index offline message bodies, so there's no network
+ * latency involved, just disk IO; the only meaningful latency will be the
+ * initial disk seek (if there is one... pre-fetching may seriously be our
+ * friend).
+ *
+ * In order to maintain responsiveness, I assert that we want to minimize the
+ * length of the time we are dominating the event queue. This suggests
+ * that we want break up our blocks of work frequently. But not so
+ * frequently that there is a lot of waste. Accordingly our algorithm is
+ * basically:
+ *
+ * - Estimate the time that it takes to process a token, and schedule the
+ * number of tokens that should fit into that time.
+ * - Detect user activity, and back off immediately if found.
+ * - Try to delay commits and garbage collection until the user is inactive,
+ * as these tend to cause a brief pause in the UI.
+ */
+
+ /**
+ * The number of milliseconds before we declare the user idle and step up our
+ * indexing.
+ */
+ _INDEX_IDLE_ADJUSTMENT_TIME: 5000,
+
+ /**
+ * The time delay in milliseconds before we should schedule our initial sweep.
+ */
+ _INITIAL_SWEEP_DELAY: 10000,
+
+ /**
+ * How many milliseconds in the future should we schedule indexing to start
+ * when turning on indexing (and it was not previously active).
+ */
+ _INDEX_KICKOFF_DELAY: 200,
+
+ /**
+ * The time interval, in milliseconds, of pause between indexing batches. The
+ * maximum processor consumption is determined by this constant and the
+ * active |_cpuTargetIndexTime|.
+ *
+ * For current constants, that puts us at 50% while the user is active and 83%
+ * when idle.
+ */
+ _INDEX_INTERVAL: 32,
+
+ /**
+ * Number of indexing 'tokens' we are allowed to consume before yielding for
+ * each incremental pass. Consider a single token equal to indexing a single
+ * medium-sized message. This may be altered by user session (in)activity.
+ * Because we fetch message bodies, which is potentially asynchronous, this
+ * is not a precise knob to twiddle.
+ */
+ _indexTokens: 2,
+
+ /**
+ * Stopwatches used to measure performance during indexing, and during
+ * pauses between indexing. These help us adapt our indexing constants so
+ * as to not explode your computer. Kind of us, no?
+ */
+ _perfIndexStopwatch: null,
+ _perfPauseStopwatch: null,
+ /**
+ * Do we have an uncommitted indexer transaction that idle callback should commit?
+ */
+ _idleToCommit: false,
+ /**
+ * Target CPU time per batch of tokens, current value (milliseconds).
+ */
+ _cpuTargetIndexTime: 32,
+ /**
+ * Target CPU time per batch of tokens, during non-idle (milliseconds).
+ */
+ _CPU_TARGET_INDEX_TIME_ACTIVE: 32,
+ /**
+ * Target CPU time per batch of tokens, during idle (milliseconds).
+ */
+ _CPU_TARGET_INDEX_TIME_IDLE: 160,
+ /**
+ * Average CPU time per processed token (milliseconds).
+ */
+ _cpuAverageTimePerToken: 16,
+ /**
+ * Damping factor for _cpuAverageTimePerToken, as an approximate
+ * number of tokens to include in the average time.
+ */
+ _CPU_AVERAGE_TIME_DAMPING: 200,
+ /**
+ * Maximum tokens per batch. This is normally just a sanity check.
+ */
+ _CPU_MAX_TOKENS_PER_BATCH: 100,
+ /**
+ * CPU usage during a pause to declare that system was busy (milliseconds).
+ * This is typically set as 1.5 times the minimum resolution of the cpu
+ * usage clock, which is 16 milliseconds on Windows systems, and (I think)
+ * smaller on other systems, so we take the worst case.
+ */
+ _CPU_IS_BUSY_TIME: 24,
+ /**
+ * Time that return from pause may be late before the system is declared
+ * busy, in milliseconds. (Same issues as _CPU_IS_BUSY_TIME).
+ */
+ _PAUSE_LATE_IS_BUSY_TIME: 24,
+ /**
+ * Number of times that we will repeat a pause while waiting for a
+ * free CPU.
+ */
+ _PAUSE_REPEAT_LIMIT: 10,
+ /**
+ * Minimum time delay between commits, in milliseconds.
+ */
+ _MINIMUM_COMMIT_TIME: 5000,
+ /**
+ * Maximum time delay between commits, in milliseconds.
+ */
+ _MAXIMUM_COMMIT_TIME: 20000,
+
+ /**
+ * Unit testing hook to get us to emit additional logging that verges on
+ * inane for general usage but is helpful in unit test output to get a lay
+ * of the land and for paranoia reasons.
+ */
+ _unitTestSuperVerbose: false,
+ /**
+ * Unit test vector to get notified when a worker has a problem and it has
+ * a recover helper associated. This gets called with an argument
+ * indicating whether the recovery helper indicates recovery was possible.
+ */
+ _unitTestHookRecover: null,
+ /**
+ * Unit test vector to get notified when a worker runs into an exceptional
+ * situation (an exception propagates or gets explicitly killed) and needs
+ * to be cleaned up. This gets called with an argument indicating if there
+ * was a helper that was used or if we just did the default cleanup thing.
+ */
+ _unitTestHookCleanup: null,
+
+ /**
+ * Last commit time. Tracked to try and only commit at reasonable intervals.
+ */
+ _lastCommitTime: Date.now(),
+
+ _inited: false,
+ /**
+ * Initialize the indexer.
+ */
+ _init() {
+ if (this._inited) {
+ return;
+ }
+
+ this._inited = true;
+
+ this._callbackHandle.init();
+
+ if (Services.io.offline) {
+ this._suppressIndexing = true;
+ }
+
+ // create the timer that drives our intermittent indexing
+ this._timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ // create the timer for larger offsets independent of indexing
+ this._longTimer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+
+ this._idleService = Cc["@mozilla.org/widget/useridleservice;1"].getService(
+ Ci.nsIUserIdleService
+ );
+
+ // create our performance stopwatches
+ try {
+ this._perfIndexStopwatch = Cc["@mozilla.org/stopwatch;1"].createInstance(
+ Ci.nsIStopwatch
+ );
+ this._perfPauseStopwatch = Cc["@mozilla.org/stopwatch;1"].createInstance(
+ Ci.nsIStopwatch
+ );
+ } catch (ex) {
+ this._log.error("problem creating stopwatch!: " + ex);
+ }
+
+ // register for shutdown notifications
+ Services.obs.addObserver(this, "quit-application");
+
+ // figure out if event-driven indexing should be enabled...
+ let branch = Services.prefs.getBranch("mailnews.database.global.indexer.");
+ let eventDrivenEnabled = branch.getBoolPref("enabled", false);
+ let performInitialSweep = branch.getBoolPref("perform_initial_sweep", true);
+ // pretend we have already performed an initial sweep...
+ if (!performInitialSweep) {
+ this._initialSweepPerformed = true;
+ }
+
+ this.enabled = eventDrivenEnabled;
+ },
+
+ /**
+ * When shutdown, indexing immediately ceases and no further progress should
+ * be made. This flag goes true once, and never returns to false. Being
+ * in this state is a destructive thing from whence we cannot recover.
+ */
+ _indexerIsShutdown: false,
+
+ /**
+ * Shutdown the indexing process and datastore as quickly as possible in
+ * a synchronous fashion.
+ */
+ _shutdown() {
+ // no more timer events, please
+ try {
+ this._timer.cancel();
+ } catch (ex) {}
+ this._timer = null;
+ try {
+ this._longTimer.cancel();
+ } catch (ex) {}
+ this._longTimer = null;
+
+ this._perfIndexStopwatch = null;
+ this._perfPauseStopwatch = null;
+
+ // Remove listeners to avoid reference cycles on the off chance one of them
+ // holds a reference to the indexer object.
+ this._indexListeners = [];
+
+ this._indexerIsShutdown = true;
+
+ if (this.enabled) {
+ this._log.info("Shutting Down");
+ }
+
+ // don't let anything try and convince us to start indexing again
+ this.suppressIndexing = true;
+
+ // If there is an active job and it has a cleanup handler, run it.
+ if (this._curIndexingJob) {
+ let workerDef = this._curIndexingJob._workerDef;
+ try {
+ if (workerDef.cleanup) {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ }
+ } catch (ex) {
+ this._log.error("problem during worker cleanup during shutdown.");
+ }
+ }
+ // Definitely clean out the async call stack and any associated data
+ this._callbackHandle.cleanup();
+ this._workBatchData = undefined;
+
+ // disable ourselves and all of the specific indexers
+ this.enabled = false;
+
+ lazy.GlodaDatastore.shutdown();
+ },
+
+ /**
+ * The list of indexers registered with us. If you are a core gloda indexer
+ * (you ship with gloda), then you can import this file directly and should
+ * make sure your indexer is imported in 'Everybody.jsm' in the right order.
+ * If you are not core gloda, then you should import 'GlodaPublic.jsm' and only
+ * then should you import 'GlodaIndexer.jsm' to get at GlodaIndexer.
+ */
+ _indexers: [],
+ /**
+ * Register an indexer with the Gloda indexing mechanism.
+ *
+ * @param aIndexer.name The name of your indexer.
+ * @param aIndexer.enable Your enable function. This will be called during
+ * the call to registerIndexer if Gloda indexing is already enabled. If
+ * indexing is not yet enabled, you will be called
+ * @param aIndexer.disable Your disable function. This will be called when
+ * indexing is disabled or we are shutting down. This will only be called
+ * if enable has already been called.
+ * @param aIndexer.workers A list of tuples of the form [worker type code,
+ * worker generator function, optional scheduling trigger function]. The
+ * type code is the string used to uniquely identify the job type. If you
+ * are not core gloda, your job type must start with your extension's name
+ * and a colon; you can collow that with anything you want. The worker
+ * generator is not easily explained in here. The trigger function is
+ * invoked immediately prior to calling the generator to create it. The
+ * trigger function takes the job as an argument and should perform any
+ * finalization required on the job. Most workers should not need to use
+ * the trigger function.
+ * @param aIndexer.initialSweep We call this to tell each indexer when it is
+ * its turn to run its indexing sweep. The idea of the indexing sweep is
+ * that this is when you traverse things eligible for indexing to make
+ * sure they are indexed. Right now we just call everyone at the same
+ * time and hope that their jobs don't fight too much.
+ */
+ registerIndexer(aIndexer) {
+ this._log.info("Registering indexer: " + aIndexer.name);
+ this._indexers.push(aIndexer);
+
+ try {
+ for (let workerInfo of aIndexer.workers) {
+ let workerCode = workerInfo[0];
+ let workerDef = workerInfo[1];
+ workerDef.name = workerCode;
+ workerDef.indexer = aIndexer;
+ this._indexerWorkerDefs[workerCode] = workerDef;
+ if (!("recover" in workerDef)) {
+ workerDef.recover = null;
+ }
+ if (!("cleanup" in workerDef)) {
+ workerDef.cleanup = null;
+ }
+ if (!("onSchedule" in workerDef)) {
+ workerDef.onSchedule = null;
+ }
+ if (!("jobCanceled" in workerDef)) {
+ workerDef.jobCanceled = null;
+ }
+ }
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on worker enum.");
+ }
+
+ if (this._enabled) {
+ try {
+ aIndexer.enable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on enable: " + ex);
+ }
+ }
+ },
+
+ /**
+ * Are we enabled, read: are we processing change events?
+ */
+ _enabled: false,
+ get enabled() {
+ return this._enabled;
+ },
+ set enabled(aEnable) {
+ if (!this._enabled && aEnable) {
+ // register for offline notifications
+ Services.obs.addObserver(this, "network:offline-status-changed");
+
+ // register for idle notification
+ this._idleService.addIdleObserver(this, this._indexIdleThresholdSecs);
+
+ this._enabled = true;
+
+ for (let indexer of this._indexers) {
+ try {
+ indexer.enable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on enable: " + ex);
+ }
+ }
+
+ // if we have an accumulated desire to index things, kick it off again.
+ if (this._indexingDesired) {
+ this._indexingDesired = false; // it's edge-triggered for now
+ this.indexing = true;
+ }
+
+ // if we have not done an initial sweep, schedule scheduling one.
+ if (!this._initialSweepPerformed) {
+ this._longTimer.initWithCallback(
+ this._scheduleInitialSweep,
+ this._INITIAL_SWEEP_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ } else if (this._enabled && !aEnable) {
+ for (let indexer of this._indexers) {
+ try {
+ indexer.disable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on disable: " + ex);
+ }
+ }
+
+ // remove offline observer
+ Services.obs.removeObserver(this, "network:offline-status-changed");
+
+ // remove idle
+ this._idleService.removeIdleObserver(this, this._indexIdleThresholdSecs);
+
+ this._enabled = false;
+ }
+ },
+
+ /** Track whether indexing is desired (we have jobs to prosecute). */
+ _indexingDesired: false,
+ /**
+ * Track whether we have an actively pending callback or timer event. We do
+ * this so we don't experience a transient suppression and accidentally
+ * get multiple event-chains driving indexing at the same time (which the
+ * code will not handle correctly).
+ */
+ _indexingActive: false,
+ /**
+ * Indicates whether indexing is currently ongoing. This may return false
+ * while indexing activities are still active, but they will quiesce shortly.
+ */
+ get indexing() {
+ return this._indexingDesired && !this._suppressIndexing;
+ },
+ /** Indicates whether indexing is desired. */
+ get indexingDesired() {
+ return this._indexingDesired;
+ },
+ /**
+ * Set this to true to indicate there is indexing work to perform. This does
+ * not mean indexing will begin immediately (if it wasn't active), however.
+ * If suppressIndexing has been set, we won't do anything until indexing is
+ * no longer suppressed.
+ */
+ set indexing(aShouldIndex) {
+ if (!this._indexingDesired && aShouldIndex) {
+ this._indexingDesired = true;
+ if (this.enabled && !this._indexingActive && !this._suppressIndexing) {
+ this._log.info("+++ Indexing Queue Processing Commencing");
+ this._indexingActive = true;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_KICKOFF_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ }
+ },
+
+ _suppressIndexing: false,
+ /**
+ * Set whether or not indexing should be suppressed. This is to allow us to
+ * avoid running down a laptop's battery when it is not on AC. Only code
+ * in charge of regulating that tracking should be setting this variable; if
+ * other factors want to contribute to such a decision, this logic needs to
+ * be changed to track that, since last-write currently wins.
+ */
+ set suppressIndexing(aShouldSuppress) {
+ this._suppressIndexing = aShouldSuppress;
+
+ // re-start processing if we are no longer suppressing, there is work yet
+ // to do, and the indexing process had actually stopped.
+ if (
+ !this._suppressIndexing &&
+ this._indexingDesired &&
+ !this._indexingActive
+ ) {
+ this._log.info("+++ Indexing Queue Processing Resuming");
+ this._indexingActive = true;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_KICKOFF_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ },
+
+ /**
+ * Track whether an initial sweep has been performed. This mainly exists so
+ * that unit testing can stop us from performing an initial sweep.
+ */
+ _initialSweepPerformed: false,
+ /**
+ * Our timer-driven callback to schedule our first initial indexing sweep.
+ * Because it is invoked by an nsITimer it operates without the benefit of
+ * a 'this' context and must use GlodaIndexer instead of this.
+ * Since an initial sweep could have been performed before we get invoked,
+ * we need to check whether an initial sweep is still desired before trying
+ * to schedule one. We don't need to worry about whether one is active
+ * because the indexingSweepNeeded takes care of that.
+ */
+ _scheduleInitialSweep() {
+ if (GlodaIndexer._initialSweepPerformed) {
+ return;
+ }
+ GlodaIndexer._initialSweepPerformed = true;
+ for (let indexer of GlodaIndexer._indexers) {
+ indexer.initialSweep();
+ }
+ },
+
+ /**
+ * Our current job number. Meaningless value that increments with every job
+ * we process that resets to 0 when we run out of jobs. Currently used by
+ * the activity manager's gloda listener to tell when we have changed jobs.
+ * We really need a better listener mechanism.
+ */
+ _indexingJobCount: 0,
+
+ /**
+ * A list of IndexingJob instances to process.
+ */
+ _indexQueue: [],
+
+ /**
+ * The current indexing job.
+ */
+ _curIndexingJob: null,
+
+ /**
+ * The number of seconds before we declare the user idle and commit if
+ * needed.
+ */
+ _indexIdleThresholdSecs: 3,
+
+ _indexListeners: [],
+ /**
+ * Add an indexing progress listener. The listener will be notified of at
+ * least all major status changes (idle -> indexing, indexing -> idle), plus
+ * arbitrary progress updates during the indexing process.
+ * If indexing is not active when the listener is added, a synthetic idle
+ * notification will be generated.
+ *
+ * @param aListener A listener function, taking arguments: status (Gloda.
+ * kIndexer*), the folder name if a folder is involved (string or null),
+ * current zero-based job number (int),
+ * current item number being indexed in this job (int), total number
+ * of items in this job to be indexed (int).
+ *
+ * @TODO should probably allow for a 'this' value to be provided
+ * @TODO generalize to not be folder/message specific. use nouns!
+ */
+ addListener(aListener) {
+ // should we weakify?
+ if (!this._indexListeners.includes(aListener)) {
+ this._indexListeners.push(aListener);
+ }
+ // if we aren't indexing, give them an idle indicator, otherwise they can
+ // just be happy when we hit the next actual status point.
+ if (!this.indexing) {
+ aListener(GlodaConstants.kIndexerIdle, null, 0, 0, 1);
+ }
+ return aListener;
+ },
+ /**
+ * Remove the given listener so that it no longer receives indexing progress
+ * updates.
+ */
+ removeListener(aListener) {
+ let index = this._indexListeners.indexOf(aListener);
+ if (index != -1) {
+ this._indexListeners.splice(index, 1);
+ }
+ },
+ /**
+ * Helper method to tell listeners what we're up to. For code simplicity,
+ * the caller is just deciding when to send this update (preferably at
+ * reasonable intervals), and doesn't need to provide any indication of
+ * state... we figure that out ourselves.
+ *
+ * This was not pretty but got ugly once we moved the message indexing out
+ * to its own indexer. Some generalization is required but will likely
+ * require string hooks.
+ */
+ _notifyListeners() {
+ let status, prettyName, jobIndex, jobItemIndex, jobItemGoal, jobType;
+
+ if (this.indexing && this._curIndexingJob) {
+ let job = this._curIndexingJob;
+ status = GlodaConstants.kIndexerIndexing;
+
+ let indexer = this._indexerWorkerDefs[job.jobType].indexer;
+ if ("_indexingFolder" in indexer) {
+ prettyName =
+ indexer._indexingFolder != null
+ ? indexer._indexingFolder.prettyName
+ : null;
+ } else {
+ prettyName = null;
+ }
+
+ jobIndex = this._indexingJobCount - 1;
+ jobItemIndex = job.offset;
+ jobItemGoal = job.goal;
+ jobType = job.jobType;
+ } else {
+ status = GlodaConstants.kIndexerIdle;
+ prettyName = null;
+ jobIndex = 0;
+ jobItemIndex = 0;
+ jobItemGoal = 1;
+ jobType = null;
+ }
+
+ // Some people ascribe to the belief that the most you can give is 100%.
+ // We know better, but let's humor them.
+ if (jobItemIndex > jobItemGoal) {
+ jobItemGoal = jobItemIndex;
+ }
+
+ for (
+ let iListener = this._indexListeners.length - 1;
+ iListener >= 0;
+ iListener--
+ ) {
+ let listener = this._indexListeners[iListener];
+ try {
+ listener(
+ status,
+ prettyName,
+ jobIndex,
+ jobItemIndex,
+ jobItemGoal,
+ jobType
+ );
+ } catch (ex) {
+ this._log.error(ex);
+ }
+ }
+ },
+
+ /**
+ * A wrapped callback driver intended to be used by timers that provide
+ * arguments we really do not care about.
+ */
+ _timerCallbackDriver() {
+ GlodaIndexer.callbackDriver();
+ },
+
+ /**
+ * A simple callback driver wrapper to provide 'this'.
+ */
+ _wrapCallbackDriver(...aArgs) {
+ GlodaIndexer.callbackDriver(...aArgs);
+ },
+
+ /**
+ * The current processing 'batch' generator, produced by a call to workBatch()
+ * and used by callbackDriver to drive execution.
+ */
+ _batch: null,
+ _inCallback: false,
+ _savedCallbackArgs: null,
+ /**
+ * The root work-driver. callbackDriver creates workBatch generator instances
+ * (stored in _batch) which run until they are done (kWorkDone) or they
+ * (really the embedded activeIterator) encounter something asynchronous.
+ * The convention is that all the callback handlers end up calling us,
+ * ensuring that control-flow properly resumes. If the batch completes,
+ * we re-schedule ourselves after a time delay (controlled by _INDEX_INTERVAL)
+ * and return. (We use one-shot timers because repeating-slack does not
+ * know enough to deal with our (current) asynchronous nature.)
+ */
+ callbackDriver(...aArgs) {
+ // just bail if we are shutdown
+ if (this._indexerIsShutdown) {
+ return;
+ }
+
+ // it is conceivable that someone we call will call something that in some
+ // cases might be asynchronous, and in other cases immediately generate
+ // events without returning. In the interest of (stack-depth) sanity,
+ // let's handle this by performing a minimal time-delay callback.
+ // this is also now a good thing sequencing-wise. if we get our callback
+ // with data before the underlying function has yielded, we obviously can't
+ // cram the data in yet. Our options in this case are to either mark the
+ // fact that the callback has already happened and immediately return to
+ // the iterator when it does bubble up the kWorkAsync, or we can do as we
+ // have been doing, but save the
+ if (this._inCallback) {
+ this._savedCallbackArgs = aArgs;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ 0,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ return;
+ }
+ this._inCallback = true;
+
+ try {
+ if (this._batch === null) {
+ this._batch = this.workBatch();
+ }
+
+ // kWorkAsync, kWorkDone, kWorkPause are allowed out; kWorkSync is not
+ // On kWorkDone, we want to schedule another timer to fire on us if we are
+ // not done indexing. (On kWorkAsync, we don't care what happens, because
+ // someone else will be receiving the callback, and they will call us when
+ // they are done doing their thing.
+ let args;
+ if (this._savedCallbackArgs != null) {
+ args = this._savedCallbackArgs;
+ this._savedCallbackArgs = null;
+ } else {
+ args = aArgs;
+ }
+
+ let result;
+ if (args.length == 0) {
+ result = this._batch.next().value;
+ } else if (args.length == 1) {
+ result = this._batch.next(args[0]).value;
+ } else {
+ // Arguments works with destructuring assignment.
+ result = this._batch.next(args).value;
+ }
+ switch (result) {
+ // job's done, close the batch and re-schedule ourselves if there's more
+ // to do.
+ case GlodaConstants.kWorkDone:
+ this._batch.return();
+ this._batch = null;
+ // the batch wants to get re-scheduled, do so.
+ // (intentional fall-through to re-scheduling logic)
+ case GlodaConstants.kWorkPause:
+ if (this.indexing) {
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_INTERVAL,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ } else {
+ // it's important to indicate no more callbacks are in flight
+ this._indexingActive = false;
+ }
+ break;
+ case GlodaConstants.kWorkAsync:
+ // there is nothing to do. some other code is now responsible for
+ // calling us.
+ break;
+ }
+ } finally {
+ this._inCallback = false;
+ }
+ },
+
+ _callbackHandle: {
+ init() {
+ this.wrappedCallback = GlodaIndexer._wrapCallbackDriver;
+ this.callbackThis = GlodaIndexer;
+ this.callback = GlodaIndexer.callbackDriver;
+ },
+ /**
+ * The stack of generators we are processing. The (numerically) last one is
+ * also the |activeIterator|.
+ */
+ activeStack: [],
+ /**
+ * The generator at the top of the |activeStack| and that we will call next
+ * or send on next if nothing changes.
+ */
+ activeIterator: null,
+ /**
+ * Meta-information about the generators at each level of the stack.
+ */
+ contextStack: [],
+ /**
+ * Push a new generator onto the stack. It becomes the active generator.
+ */
+ push(aIterator, aContext) {
+ this.activeStack.push(aIterator);
+ this.contextStack.push(aContext);
+ this.activeIterator = aIterator;
+ },
+ /**
+ * For use by generators that want to call another asynchronous process
+ * implemented as a generator. They should do
+ * "yield aCallbackHandle.pushAndGo(someGenerator(arg1, arg2));".
+ *
+ * @public
+ */
+ pushAndGo(aIterator, aContext) {
+ this.push(aIterator, aContext);
+ return GlodaConstants.kWorkSync;
+ },
+ /**
+ * Pop the active generator off the stack.
+ */
+ pop() {
+ this.activeIterator.return();
+ this.activeStack.pop();
+ this.contextStack.pop();
+ if (this.activeStack.length) {
+ this.activeIterator = this.activeStack[this.activeStack.length - 1];
+ } else {
+ this.activeIterator = null;
+ }
+ },
+ /**
+ * Someone propagated an exception and we need to clean-up all the active
+ * logic as best we can. Which is not really all that well.
+ *
+ * @param [aOptionalStopAtDepth=0] The length the stack should be when this
+ * method completes. Pass 0 or omit for us to clear everything out.
+ * Pass 1 to leave just the top-level generator intact.
+ */
+ cleanup(aOptionalStopAtDepth) {
+ if (aOptionalStopAtDepth === undefined) {
+ aOptionalStopAtDepth = 0;
+ }
+ while (this.activeStack.length > aOptionalStopAtDepth) {
+ this.pop();
+ }
+ },
+ /**
+ * For use when a generator finishes up by calling |doneWithResult| on us;
+ * the async driver calls this to pop that generator off the stack
+ * and get the result it passed in to its call to |doneWithResult|.
+ *
+ * @protected
+ */
+ popWithResult() {
+ this.pop();
+ let result = this._result;
+ this._result = null;
+ return result;
+ },
+ _result: null,
+ /**
+ * For use by generators that want to return a result to the calling
+ * asynchronous generator. Specifically, they should do
+ * "yield aCallbackHandle.doneWithResult(RESULT);".
+ *
+ * @public
+ */
+ doneWithResult(aResult) {
+ this._result = aResult;
+ return GlodaConstants.kWorkDoneWithResult;
+ },
+
+ /* be able to serve as a collection listener, resuming the active iterator's
+ last yield kWorkAsync */
+ onItemsAdded() {},
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ GlodaIndexer.callbackDriver();
+ },
+ },
+ _workBatchData: undefined,
+ /* eslint-disable complexity */
+ /**
+ * The workBatch generator handles a single 'batch' of processing, managing
+ * the database transaction and keeping track of "tokens". It drives the
+ * activeIterator generator which is doing the work.
+ * workBatch will only produce kWorkAsync, kWorkPause, and kWorkDone
+ * notifications. If activeIterator returns kWorkSync and there are still
+ * tokens available, workBatch will keep driving the activeIterator until it
+ * encounters a kWorkAsync (which workBatch will yield to callbackDriver), or
+ * it runs out of tokens and yields a kWorkPause or kWorkDone.
+ */
+ *workBatch() {
+ // Do we still have an open transaction? If not, start a new one.
+ if (!this._idleToCommit) {
+ lazy.GlodaDatastore._beginTransaction();
+ } else {
+ // We'll manage commit ourself while this routine is active.
+ this._idleToCommit = false;
+ }
+
+ this._perfIndexStopwatch.start();
+ let batchCount;
+ let haveMoreWork = true;
+ let transactionToCommit = true;
+ let inIdle;
+
+ let notifyDecimator = 0;
+
+ while (haveMoreWork) {
+ // Both explicit work activity points (sync + async) and transfer of
+ // control return (via kWorkDone*) results in a token being eaten. The
+ // idea now is to make tokens less precious so that the adaptive logic
+ // can adjust them with less impact. (Before this change, doing 1
+ // token's work per cycle ended up being an entire non-idle time-slice's
+ // work.)
+ // During this loop we track the clock real-time used even though we
+ // frequently yield to asynchronous operations. These asynchronous
+ // operations are either database queries or message streaming requests.
+ // Both may involve disk I/O but no network I/O (since we only stream
+ // messages that are already available offline), but in an ideal
+ // situation will come from cache and so the work this function kicks off
+ // will dominate.
+ // We do not use the CPU time to this end because...
+ // 1) Our timer granularity on linux is worse for CPU than for wall time.
+ // 2) That can fail to account for our I/O cost.
+ // 3) If something with a high priority / low latency need (like playing
+ // a video) is fighting us, although using CPU time will accurately
+ // express how much time we are actually spending to index, our goal
+ // is to control the duration of our time slices, not be "right" about
+ // the actual CPU cost. In that case, if we attempted to take on more
+ // work, we would likely interfere with the higher priority process or
+ // make ourselves less responsive by drawing out the period of time we
+ // are dominating the main thread.
+ this._perfIndexStopwatch.start();
+ // For telemetry purposes, we want to know how many messages we've been
+ // processing during that batch, and how long it took, pauses included.
+ let t0 = Date.now();
+ this._indexedMessageCount = 0;
+ batchCount = 0;
+ while (batchCount < this._indexTokens) {
+ if (
+ this._callbackHandle.activeIterator === null &&
+ !this._hireJobWorker()
+ ) {
+ haveMoreWork = false;
+ break;
+ }
+ batchCount++;
+
+ // XXX for performance, we may want to move the try outside the for loop
+ // with a quasi-redundant outer loop that shunts control back inside
+ // if we left the loop due to an exception (without consuming all the
+ // tokens.)
+ try {
+ switch (
+ this._callbackHandle.activeIterator.next(this._workBatchData).value
+ ) {
+ case GlodaConstants.kWorkSync:
+ this._workBatchData = undefined;
+ break;
+ case GlodaConstants.kWorkAsync:
+ this._workBatchData = yield GlodaConstants.kWorkAsync;
+ break;
+ case GlodaConstants.kWorkDone:
+ this._callbackHandle.pop();
+ this._workBatchData = undefined;
+ break;
+ case GlodaConstants.kWorkDoneWithResult:
+ this._workBatchData = this._callbackHandle.popWithResult();
+ break;
+ default:
+ break;
+ }
+ } catch (ex) {
+ this._log.debug("Exception in batch processing:", ex);
+ let workerDef = this._curIndexingJob._workerDef;
+ if (workerDef.recover) {
+ let recoverToDepth;
+ try {
+ recoverToDepth = workerDef.recover.call(
+ workerDef.indexer,
+ this._curIndexingJob,
+ this._callbackHandle.contextStack,
+ ex
+ );
+ } catch (ex2) {
+ this._log.error(
+ "Worker '" +
+ workerDef.name +
+ "' recovery function itself failed:",
+ ex2
+ );
+ }
+ if (this._unitTestHookRecover) {
+ this._unitTestHookRecover(
+ recoverToDepth,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+
+ if (recoverToDepth) {
+ this._callbackHandle.cleanup(recoverToDepth);
+ continue;
+ }
+ }
+ // (we either did not have a recover handler or it couldn't recover)
+ // call the cleanup helper if there is one
+ if (workerDef.cleanup) {
+ try {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ } catch (ex2) {
+ this._log.error(
+ "Worker '" +
+ workerDef.name +
+ "' cleanup function itself failed:",
+ ex2
+ );
+ }
+ if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ true,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+ } else if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ false,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+
+ // Clean out everything on the async stack, warn about the job, kill.
+ // We do not log this warning lightly; it will break unit tests and
+ // be visible to users. Anything expected should likely have a
+ // recovery function or the cleanup logic should be extended to
+ // indicate that the failure is acceptable.
+ this._callbackHandle.cleanup();
+ this._log.warn(
+ "Problem during " + this._curIndexingJob + ", bailing:",
+ ex
+ );
+ this._curIndexingJob = null;
+ // the data must now be invalid
+ this._workBatchData = undefined;
+ }
+ }
+ this._perfIndexStopwatch.stop();
+
+ // idleTime can throw if there is no idle-provider available, such as an
+ // X session without the relevant extensions available. In this case
+ // we assume that the user is never idle.
+ try {
+ // We want to stop ASAP when leaving idle, so we can't rely on the
+ // standard polled callback. We do the polling ourselves.
+ if (this._idleService.idleTime < this._INDEX_IDLE_ADJUSTMENT_TIME) {
+ inIdle = false;
+ this._cpuTargetIndexTime = this._CPU_TARGET_INDEX_TIME_ACTIVE;
+ } else {
+ inIdle = true;
+ this._cpuTargetIndexTime = this._CPU_TARGET_INDEX_TIME_IDLE;
+ }
+ } catch (ex) {
+ inIdle = false;
+ }
+
+ // take a breather by having the caller re-schedule us sometime in the
+ // future, but only if we're going to perform another loop iteration.
+ if (haveMoreWork) {
+ notifyDecimator = (notifyDecimator + 1) % 32;
+ if (!notifyDecimator) {
+ this._notifyListeners();
+ }
+
+ for (
+ let pauseCount = 0;
+ pauseCount < this._PAUSE_REPEAT_LIMIT;
+ pauseCount++
+ ) {
+ this._perfPauseStopwatch.start();
+
+ yield GlodaConstants.kWorkPause;
+
+ this._perfPauseStopwatch.stop();
+ // We repeat the pause if the pause was longer than
+ // we expected, or if it used a significant amount
+ // of cpu, either of which indicate significant other
+ // activity.
+ if (
+ this._perfPauseStopwatch.cpuTimeSeconds * 1000 <
+ this._CPU_IS_BUSY_TIME &&
+ this._perfPauseStopwatch.realTimeSeconds * 1000 -
+ this._INDEX_INTERVAL <
+ this._PAUSE_LATE_IS_BUSY_TIME
+ ) {
+ break;
+ }
+ }
+ }
+
+ // All pauses have been taken, how effective were we? Report!
+ // XXX: there's possibly a lot of fluctuation since we go through here
+ // every 5 messages or even less
+ if (this._indexedMessageCount > 0) {
+ let delta = (Date.now() - t0) / 1000; // in seconds
+ let v = Math.round(this._indexedMessageCount / delta);
+ try {
+ let h = Services.telemetry.getHistogramById(
+ "THUNDERBIRD_INDEXING_RATE_MSG_PER_S"
+ );
+ h.add(v);
+ } catch (e) {
+ this._log.warn("Couldn't report telemetry", e, v);
+ }
+ }
+
+ if (batchCount > 0) {
+ let totalTime = this._perfIndexStopwatch.realTimeSeconds * 1000;
+ let timePerToken = totalTime / batchCount;
+ // Damp the average time since it is a rough estimate only.
+ this._cpuAverageTimePerToken =
+ (totalTime +
+ this._CPU_AVERAGE_TIME_DAMPING * this._cpuAverageTimePerToken) /
+ (batchCount + this._CPU_AVERAGE_TIME_DAMPING);
+ // We use the larger of the recent or the average time per token, so
+ // that we can respond quickly to slow down indexing if there
+ // is a sudden increase in time per token.
+ let bestTimePerToken = Math.max(
+ timePerToken,
+ this._cpuAverageTimePerToken
+ );
+ // Always index at least one token!
+ this._indexTokens = Math.max(
+ 1,
+ this._cpuTargetIndexTime / bestTimePerToken
+ );
+ // But no more than the a maximum limit, just for sanity's sake.
+ this._indexTokens = Math.min(
+ this._CPU_MAX_TOKENS_PER_BATCH,
+ this._indexTokens
+ );
+ this._indexTokens = Math.ceil(this._indexTokens);
+ }
+
+ // Should we try to commit now?
+ let elapsed = Date.now() - this._lastCommitTime;
+ // Commit tends to cause a brief UI pause, so we try to delay it (but not
+ // forever) if the user is active. If we're done and idling, we'll also
+ // commit, otherwise we'll let the idle callback do it.
+ let doCommit =
+ transactionToCommit &&
+ (elapsed > this._MAXIMUM_COMMIT_TIME ||
+ (inIdle && (elapsed > this._MINIMUM_COMMIT_TIME || !haveMoreWork)));
+ if (doCommit) {
+ lazy.GlodaCollectionManager.cacheCommitDirty();
+ // Set up an async notification to happen after the commit completes so
+ // that we can avoid the indexer doing something with the database that
+ // causes the main thread to block against the completion of the commit
+ // (which can be a while) on 1.9.1.
+ lazy.GlodaDatastore.runPostCommit(this._callbackHandle.wrappedCallback);
+ // kick off the commit
+ lazy.GlodaDatastore._commitTransaction();
+ yield GlodaConstants.kWorkAsync;
+ this._lastCommitTime = Date.now();
+ // Restart the transaction if we still have work.
+ if (haveMoreWork) {
+ lazy.GlodaDatastore._beginTransaction();
+ } else {
+ transactionToCommit = false;
+ }
+ }
+ }
+
+ this._notifyListeners();
+
+ // If we still have a transaction to commit, tell idle to do the commit
+ // when it gets around to it.
+ if (transactionToCommit) {
+ this._idleToCommit = true;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Maps indexing job type names to a worker definition.
+ * The worker definition is an object with the following attributes where
+ * only worker is required:
+ * - worker:
+ * - onSchedule: A function to be invoked when the worker is scheduled. The
+ * job is passed as an argument.
+ * - recover:
+ * - cleanup:
+ */
+ _indexerWorkerDefs: {},
+ /**
+ * Perform the initialization step and return a generator if there is any
+ * steady-state processing to be had.
+ */
+ _hireJobWorker() {
+ // In no circumstances should there be data bouncing around from previous
+ // calls if we are here. |killActiveJob| depends on this.
+ this._workBatchData = undefined;
+
+ if (this._indexQueue.length == 0) {
+ this._log.info("--- Done indexing, disabling timer renewal.");
+
+ this._curIndexingJob = null;
+ this._indexingDesired = false;
+ this._indexingJobCount = 0;
+ return false;
+ }
+
+ let job = (this._curIndexingJob = this._indexQueue.shift());
+ this._indexingJobCount++;
+
+ let generator = null;
+
+ if (job.jobType in this._indexerWorkerDefs) {
+ let workerDef = this._indexerWorkerDefs[job.jobType];
+ job._workerDef = workerDef;
+
+ // Prior to creating the worker, call the scheduling trigger function
+ // if there is one. This is so that jobs can be finalized. The
+ // initial use case is event-driven message indexing that accumulates
+ // a list of messages to index but wants it locked down once we start
+ // processing the list.
+ if (workerDef.onSchedule) {
+ workerDef.onSchedule.call(workerDef.indexer, job);
+ }
+
+ generator = workerDef.worker.call(
+ workerDef.indexer,
+ job,
+ this._callbackHandle
+ );
+ } else {
+ // Nothing we can do about this. Be loud about it and try to schedule
+ // something else.
+ this._log.error("Unknown job type: " + job.jobType);
+ return this._hireJobWorker();
+ }
+
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Hired job of type: " + job.jobType);
+ }
+
+ this._notifyListeners();
+
+ if (generator) {
+ this._callbackHandle.push(generator);
+ return true;
+ }
+ return false;
+ },
+
+ /**
+ * Schedule a job for indexing.
+ */
+ indexJob(aJob) {
+ this._log.info("Queue-ing job for indexing: " + aJob.jobType);
+
+ this._indexQueue.push(aJob);
+ this.indexing = true;
+ },
+
+ /**
+ * Kill the active job. This means a few things:
+ * - Kill all the generators in the callbackHandle stack.
+ * - If we are currently waiting on an async return, we need to make sure it
+ * does not screw us up.
+ * - Make sure the job's cleanup function gets called if appropriate.
+ *
+ * The async return case is actually not too troublesome. Since there is an
+ * active indexing job and we are not (by fiat) in that call stack, we know
+ * that the callback driver is guaranteed to get triggered again somehow.
+ * The only issue is to make sure that _workBatchData does not end up with
+ * the data. We compel |_hireJobWorker| to erase it to this end.
+ *
+ * @note You MUST NOT call this function from inside a job or an async function
+ * on the callbackHandle's stack of generators. If you are in that
+ * situation, you should just throw an exception. At the very least,
+ * use a timeout to trigger us.
+ */
+ killActiveJob() {
+ // There is nothing to do if we have no job
+ if (!this._curIndexingJob) {
+ return;
+ }
+
+ // -- Blow away the stack with cleanup.
+ let workerDef = this._curIndexingJob._workerDef;
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Killing job of type: " + this._curIndexingJob.jobType);
+ }
+ if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ !!workerDef.cleanup,
+ "no exception, this was killActiveJob",
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+ this._callbackHandle.cleanup();
+ if (workerDef.cleanup) {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ }
+
+ // Eliminate the job.
+ this._curIndexingJob = null;
+ },
+
+ /**
+ * Purge all jobs that the filter function returns true for. This does not
+ * kill the active job, use |killActiveJob| to do that.
+ *
+ * Make sure to call this function before killActiveJob
+ *
+ * @param aFilterElimFunc A filter function that takes an |IndexingJob| and
+ * returns true if the job should be purged, false if it should not be.
+ * The filter sees the jobs in the order they are scheduled.
+ */
+ purgeJobsUsingFilter(aFilterElimFunc) {
+ for (let iJob = 0; iJob < this._indexQueue.length; iJob++) {
+ let job = this._indexQueue[iJob];
+
+ // If the filter says to, splice the job out of existence (and make sure
+ // to fixup iJob to compensate.)
+ if (aFilterElimFunc(job)) {
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Purging job of type: " + job.jobType);
+ }
+ this._indexQueue.splice(iJob--, 1);
+ let workerDef = this._indexerWorkerDefs[job.jobType];
+ if (workerDef.jobCanceled) {
+ workerDef.jobCanceled.call(workerDef.indexer, job);
+ }
+ }
+ }
+ },
+
+ /* *********** Event Processing *********** */
+ observe(aSubject, aTopic, aData) {
+ // idle
+ if (aTopic == "idle") {
+ // Do we need to commit an indexer transaction?
+ if (this._idleToCommit) {
+ this._idleToCommit = false;
+ lazy.GlodaCollectionManager.cacheCommitDirty();
+ lazy.GlodaDatastore._commitTransaction();
+ this._lastCommitTime = Date.now();
+ this._notifyListeners();
+ }
+ } else if (aTopic == "network:offline-status-changed") {
+ // offline status
+ if (aData == "offline") {
+ this.suppressIndexing = true;
+ } else {
+ // online
+ this.suppressIndexing = false;
+ }
+ } else if (aTopic == "quit-application") {
+ // shutdown fallback
+ this._shutdown();
+ }
+ },
+};
+// we used to initialize here; now we have GlodaPublic.jsm do it for us after the
+// indexers register themselves so we know about all our built-in indexers
+// at init-time.
diff --git a/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm b/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm
new file mode 100644
index 0000000000..54ceacb59a
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm
@@ -0,0 +1,310 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaABIndexer", "GlodaABAttrs"];
+
+const { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+const { FreeTagNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounFreetag.jsm"
+);
+
+var GlodaABIndexer = {
+ _log: null,
+ _notifications: [
+ "addrbook-contact-created",
+ "addrbook-contact-updated",
+ "addrbook-contact-deleted",
+ ],
+
+ name: "index_ab",
+ enable() {
+ if (this._log == null) {
+ this._log = console.createInstance({
+ prefix: "gloda.index_ab",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ }
+
+ for (let topic of this._notifications) {
+ Services.obs.addObserver(this, topic);
+ }
+ },
+
+ disable() {
+ for (let topic of this._notifications) {
+ Services.obs.removeObserver(this, topic);
+ }
+ },
+
+ // it's a getter so we can reference 'this'
+ get workers() {
+ return [
+ [
+ "ab-card",
+ {
+ worker: this._worker_index_card,
+ },
+ ],
+ ];
+ },
+
+ *_worker_index_card(aJob, aCallbackHandle) {
+ let card = aJob.id;
+
+ if (card.primaryEmail) {
+ // load the identity
+ let query = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ query.kind("email");
+ // we currently normalize all e-mail addresses to be lowercase
+ query.value(card.primaryEmail.toLowerCase());
+ let identityCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ if (identityCollection.items.length) {
+ let identity = identityCollection.items[0];
+ // force the identity to know it has an associated ab card.
+ identity._hasAddressBookCard = true;
+
+ this._log.debug("Found identity, processing card.");
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ identity.contact,
+ { card },
+ false,
+ false,
+ aCallbackHandle
+ )
+ );
+ this._log.debug("Done processing card.");
+ }
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ initialSweep() {},
+
+ observe(subject, topic, data) {
+ subject.QueryInterface(Ci.nsIAbCard);
+
+ switch (topic) {
+ case "addrbook-contact-created": {
+ // When an address book card is added, update the cached GlodaIdentity
+ // object's cached idea of whether the identity has an ab card.
+ this._log.debug("Received Card Add Notification");
+
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + subject.primaryEmail.toLowerCase()
+ );
+ if (identity) {
+ identity._hasAddressBookCard = true;
+ }
+ break;
+ }
+ case "addrbook-contact-updated": {
+ this._log.debug("Received Card Change Notification");
+
+ let job = new IndexingJob("ab-card", subject);
+ GlodaIndexer.indexJob(job);
+ break;
+ }
+ case "addrbook-contact-deleted": {
+ // When an address book card is added, update the cached GlodaIdentity
+ // object's cached idea of whether the identity has an ab card.
+ this._log.debug("Received Card Removal Notification");
+
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + subject.primaryEmail.toLowerCase()
+ );
+ if (identity) {
+ identity._hasAddressBookCard = false;
+ }
+ break;
+ }
+ }
+ },
+};
+GlodaIndexer.registerIndexer(GlodaABIndexer);
+
+var GlodaABAttrs = {
+ providerName: "gloda.ab_attr",
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.abattrs",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ defineAttributes() {
+ /* ***** Contacts ***** */
+ this._attrIdentityContact = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "identities",
+ singular: false,
+ special: GlodaConstants.kSpecialColumnChildren,
+ // specialColumnName: "contactID",
+ storageAttributeName: "_identities",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ this._attrContactName = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "name",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "name",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrContactPopularity = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "popularity",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "popularity",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // not-tested
+ this._attrContactFrecency = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "frecency",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "frecency",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // not-tested
+
+ /* ***** Identities ***** */
+ this._attrIdentityContact = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "contact",
+ singular: true,
+ special: GlodaConstants.kSpecialColumnParent,
+ specialColumnName: "contactID", // the column in the db
+ idStorageAttributeName: "_contactID",
+ valueStorageAttributeName: "_contact",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_CONTACT,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrIdentityKind = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "kind",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "kind",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrIdentityValue = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "value",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "value",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ /* ***** Contact Meta ***** */
+ // Freeform tags; not explicit like thunderbird's fundamental tags.
+ // we differentiate for now because of fundamental implementation
+ // differences.
+ this._attrFreeTag = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "freetag",
+ bind: true,
+ bindName: "freeTags",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: Gloda.lookupNoun("freetag"),
+ parameterNoun: null,
+ canQuery: true,
+ }); // not-tested
+ // we need to find any existing bound freetag attributes, and use them to
+ // populate to FreeTagNoun's understanding
+ if ("parameterBindings" in this._attrFreeTag) {
+ for (let freeTagName in this._attrFreeTag.parameterBindings) {
+ this._log.debug("Telling FreeTagNoun about: " + freeTagName);
+ FreeTagNoun.getFreeTag(freeTagName);
+ }
+ }
+ },
+
+ *process(aContact, aRawReps, aIsNew, aCallbackHandle) {
+ let card = aRawReps.card;
+ if (aContact.NOUN_ID != GlodaConstants.NOUN_CONTACT) {
+ this._log.warn("Somehow got a non-contact: " + aContact);
+ return; // this will produce an exception; we like.
+ }
+
+ // update the name
+ if (card.displayName && card.displayName != aContact.name) {
+ aContact.name = card.displayName;
+ }
+
+ aContact.freeTags = [];
+
+ let tags = null;
+ try {
+ tags = card.getProperty("Categories", null);
+ } catch (ex) {
+ this._log.error("Problem accessing property: " + ex);
+ }
+ if (tags) {
+ for (let tagName of tags.split(",")) {
+ tagName = tagName.trim();
+ if (tagName) {
+ aContact.freeTags.push(FreeTagNoun.getFreeTag(tagName));
+ }
+ }
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm b/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm
new file mode 100644
index 0000000000..f81def2560
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm
@@ -0,0 +1,361 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaMsgSearcher"];
+
+const { Gloda } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * How much time boost should a 'score point' amount to? The authoritative,
+ * incontrivertible answer, across all time and space, is a week.
+ * Note that gloda stores timestamps as PRTimes for no exceedingly good
+ * reason.
+ */
+var FUZZSCORE_TIMESTAMP_FACTOR = 1000 * 1000 * 60 * 60 * 24 * 7;
+
+var RANK_USAGE = "glodaRank(matchinfo(messagesText), 1.0, 2.0, 2.0, 1.5, 1.5)";
+
+var DASCORE =
+ "(((" +
+ RANK_USAGE +
+ " + messages.notability) * " +
+ FUZZSCORE_TIMESTAMP_FACTOR +
+ ") + messages.date)";
+
+/**
+ * A new optimization decision we are making is that we do not want to carry
+ * around any data in our ephemeral tables that is not used for whittling the
+ * result set. The idea is that the btree page cache or OS cache is going to
+ * save us from the disk seeks and carrying around the extra data is just going
+ * to be CPU/memory churn that slows us down.
+ *
+ * Additionally, we try and avoid row lookups that would have their results
+ * discarded by the LIMIT. Because of limitations in FTS3 (which might
+ * be addressed in FTS4 by a feature request), we can't avoid the 'messages'
+ * lookup since that has the message's date and static notability but we can
+ * defer the 'messagesText' lookup.
+ *
+ * This is the access pattern we are after here:
+ * 1) Order the matches with minimized lookup and result storage costs.
+ * - The innermost MATCH does the doclist magic and provides us with
+ * matchinfo() support which does not require content row retrieval
+ * from messagesText. Unfortunately, this is not enough to whittle anything
+ * because we still need static interestingness, so...
+ * - Based on the match we retrieve the date and notability for that row from
+ * 'messages' using this in conjunction with matchinfo() to provide a score
+ * that we can then use to LIMIT our results.
+ * 2) We reissue the MATCH query so that we will be able to use offsets(), but
+ * we intersect the results of this MATCH against our LIMITed results from
+ * step 1.
+ * - We use 'docid IN (phase 1 query)' to accomplish this because it results in
+ * efficient lookup. If we just use a join, we get O(mn) performance because
+ * a cartesian join ends up being performed where either we end up performing
+ * the fulltext query M times and table scan intersect with the results from
+ * phase 1 or we do the fulltext once but traverse the entire result set from
+ * phase 1 N times.
+ * - We believe that the re-execution of the MATCH query should have no disk
+ * costs because it should still be cached by SQLite or the OS. In the case
+ * where memory is so constrained this is not true our behavior is still
+ * probably preferable than the old way because that would have caused lots
+ * of swapping.
+ * - This part of the query otherwise resembles the basic gloda query but with
+ * the inclusion of the offsets() invocation. The messages table lookup
+ * should not involve any disk traffic because the pages should still be
+ * cached (SQLite or OS) from phase 1. The messagesText lookup is new, and
+ * this is the major disk-seek reduction optimization we are making. (Since
+ * we avoid this lookup for all of the documents that were excluded by the
+ * LIMIT.) Since offsets() also needs to retrieve the row from messagesText
+ * there is a nice synergy there.
+ */
+var NUEVO_FULLTEXT_SQL =
+ "SELECT messages.*, messagesText.*, offsets(messagesText) AS osets " +
+ "FROM messagesText, messages " +
+ "WHERE" +
+ " messagesText MATCH ?1 " +
+ " AND messagesText.docid IN (" +
+ "SELECT docid " +
+ "FROM messagesText JOIN messages ON messagesText.docid = messages.id " +
+ "WHERE messagesText MATCH ?1 " +
+ "ORDER BY " +
+ DASCORE +
+ " DESC " +
+ "LIMIT ?2" +
+ " )" +
+ " AND messages.id = messagesText.docid " +
+ " AND +messages.deleted = 0" +
+ " AND +messages.folderID IS NOT NULL" +
+ " AND +messages.messageKey IS NOT NULL";
+
+function identityFunc(x) {
+ return x;
+}
+
+function oneLessMaxZero(x) {
+ if (x <= 1) {
+ return 0;
+ }
+ return x - 1;
+}
+
+function reduceSum(accum, curValue) {
+ return accum + curValue;
+}
+
+/*
+ * Columns are: body, subject, attachment names, author, recipients
+ */
+
+/**
+ * Scores if all search terms match in a column. We bias against author
+ * slightly and recipient a bit more in this case because a search that
+ * entirely matches just on a person should give a mention of that person
+ * in the subject or attachment a fighting chance.
+ * Keep in mind that because of our indexing in the face of address book
+ * contacts (namely, we index the name used in the e-mail as well as the
+ * display name on the address book card associated with the e-mail address)
+ * a contact is going to bias towards matching multiple times.
+ */
+var COLUMN_ALL_MATCH_SCORES = [4, 20, 20, 16, 12];
+/**
+ * Score for each distinct term that matches in the column. This is capped
+ * by COLUMN_ALL_SCORES.
+ */
+var COLUMN_PARTIAL_PER_MATCH_SCORES = [1, 4, 4, 4, 3];
+/**
+ * If a term matches multiple times, what is the marginal score for each
+ * additional match. We count the total number of matches beyond the
+ * first match for each term. In other words, if we have 3 terms which
+ * matched 5, 3, and 0 times, then the total from our perspective is
+ * (5 - 1) + (3 - 1) + 0 = 4 + 2 + 0 = 6. We take the minimum of that value
+ * and the value in COLUMN_MULTIPLE_MATCH_LIMIT and multiply by the value in
+ * COLUMN_MULTIPLE_MATCH_SCORES.
+ */
+var COLUMN_MULTIPLE_MATCH_SCORES = [1, 0, 0, 0, 0];
+var COLUMN_MULTIPLE_MATCH_LIMIT = [10, 0, 0, 0, 0];
+
+/**
+ * Score the message on its offsets (from stashedColumns).
+ */
+function scoreOffsets(aMessage, aContext) {
+ let score = 0;
+
+ let termTemplate = aContext.terms.map(_ => 0);
+ // for each column, a list of the incidence of each term
+ let columnTermIncidence = [
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ ];
+
+ // we need a friendlyParseInt because otherwise the radix stuff happens
+ // because of the extra arguments map parses. curse you, map!
+ let offsetNums = aContext.stashedColumns[aMessage.id][0]
+ .split(" ")
+ .map(x => parseInt(x));
+ for (let i = 0; i < offsetNums.length; i += 4) {
+ let columnIndex = offsetNums[i];
+ let termIndex = offsetNums[i + 1];
+ columnTermIncidence[columnIndex][termIndex]++;
+ }
+
+ for (let iColumn = 0; iColumn < COLUMN_ALL_MATCH_SCORES.length; iColumn++) {
+ let termIncidence = columnTermIncidence[iColumn];
+ if (termIncidence.every(identityFunc)) {
+ // Bestow all match credit.
+ score += COLUMN_ALL_MATCH_SCORES[iColumn];
+ } else if (termIncidence.some(identityFunc)) {
+ // Bestow partial match credit.
+ score += Math.min(
+ COLUMN_ALL_MATCH_SCORES[iColumn],
+ COLUMN_PARTIAL_PER_MATCH_SCORES[iColumn] *
+ termIncidence.filter(identityFunc).length
+ );
+ }
+ // Bestow multiple match credit.
+ score +=
+ Math.min(
+ termIncidence.map(oneLessMaxZero).reduce(reduceSum, 0),
+ COLUMN_MULTIPLE_MATCH_LIMIT[iColumn]
+ ) * COLUMN_MULTIPLE_MATCH_SCORES[iColumn];
+ }
+
+ return score;
+}
+
+/**
+ * The searcher basically looks like a query, but is specialized for fulltext
+ * search against messages. Most of the explicit specialization involves
+ * crafting a SQL query that attempts to order the matches by likelihood that
+ * the user was looking for it. This is based on full-text matches combined
+ * with an explicit (generic) interest score value placed on the message at
+ * indexing time. This is followed by using the more generic gloda scoring
+ * mechanism to explicitly score the messages given the search context in
+ * addition to the more generic score adjusting rules.
+ */
+function GlodaMsgSearcher(aListener, aSearchString, aAndTerms) {
+ this.listener = aListener;
+
+ this.searchString = aSearchString;
+ this.fulltextTerms = this.parseSearchString(aSearchString);
+ this.andTerms = aAndTerms != null ? aAndTerms : true;
+
+ this.query = null;
+ this.collection = null;
+
+ this.scores = null;
+}
+GlodaMsgSearcher.prototype = {
+ /**
+ * Number of messages to retrieve initially.
+ */
+ get retrievalLimit() {
+ return Services.prefs.getIntPref(
+ "mailnews.database.global.search.msg.limit"
+ );
+ },
+
+ /**
+ * Parse the string into terms/phrases by finding matching double-quotes.
+ */
+ parseSearchString(aSearchString) {
+ aSearchString = aSearchString.trim();
+ let terms = [];
+
+ /*
+ * Add the term as long as the trim on the way in didn't obliterate it.
+ *
+ * In the future this might have other helper logic; it did once before.
+ */
+ function addTerm(aTerm) {
+ if (aTerm) {
+ terms.push(aTerm);
+ }
+ }
+
+ while (aSearchString) {
+ if (aSearchString.startsWith('"')) {
+ let endIndex = aSearchString.indexOf(aSearchString[0], 1);
+ // eat the quote if it has no friend
+ if (endIndex == -1) {
+ aSearchString = aSearchString.substring(1);
+ continue;
+ }
+
+ addTerm(aSearchString.substring(1, endIndex).trim());
+ aSearchString = aSearchString.substring(endIndex + 1);
+ continue;
+ }
+
+ let spaceIndex = aSearchString.indexOf(" ");
+ if (spaceIndex == -1) {
+ addTerm(aSearchString);
+ break;
+ }
+
+ addTerm(aSearchString.substring(0, spaceIndex));
+ aSearchString = aSearchString.substring(spaceIndex + 1);
+ }
+
+ return terms;
+ },
+
+ buildFulltextQuery() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noMagic: true,
+ explicitSQL: NUEVO_FULLTEXT_SQL,
+ limitClauseAlreadyIncluded: true,
+ // osets is 0-based column number 14 (volatile to column changes)
+ // save the offset column for extra analysis
+ stashColumns: [14],
+ });
+
+ let fulltextQueryString = "";
+
+ for (let [iTerm, term] of this.fulltextTerms.entries()) {
+ if (iTerm) {
+ fulltextQueryString += this.andTerms ? " " : " OR ";
+ }
+
+ // Put our term in quotes. This is needed for the tokenizer to be able
+ // to do useful things. The exception is people clever enough to use
+ // NEAR.
+ if (/^NEAR(\/\d+)?$/.test(term)) {
+ fulltextQueryString += term;
+ } else if (term.length == 1 && term.charCodeAt(0) >= 0x2000) {
+ // This is a single-character CJK search query, so add a wildcard.
+ // Our tokenizer treats anything at/above 0x2000 as CJK for now.
+ fulltextQueryString += term + "*";
+ } else if (
+ (term.length == 2 &&
+ term.charCodeAt(0) >= 0x2000 &&
+ term.charCodeAt(1) >= 0x2000) ||
+ term.length >= 3
+ ) {
+ fulltextQueryString += '"' + term + '"';
+ }
+ }
+
+ query.fulltextMatches(fulltextQueryString);
+ query.limit(this.retrievalLimit);
+
+ return query;
+ },
+
+ getCollection(aListenerOverride, aData) {
+ if (aListenerOverride) {
+ this.listener = aListenerOverride;
+ }
+
+ this.query = this.buildFulltextQuery();
+ this.collection = this.query.getCollection(this, aData);
+ this.completed = false;
+
+ return this.collection;
+ },
+
+ sortBy: "-dascore",
+
+ onItemsAdded(aItems, aCollection) {
+ let newScores = Gloda.scoreNounItems(
+ aItems,
+ {
+ terms: this.fulltextTerms,
+ stashedColumns: aCollection.stashedColumns,
+ },
+ [scoreOffsets]
+ );
+ if (this.scores) {
+ this.scores = this.scores.concat(newScores);
+ } else {
+ this.scores = newScores;
+ }
+
+ if (this.listener) {
+ this.listener.onItemsAdded(aItems, aCollection);
+ }
+ },
+ onItemsModified(aItems, aCollection) {
+ if (this.listener) {
+ this.listener.onItemsModified(aItems, aCollection);
+ }
+ },
+ onItemsRemoved(aItems, aCollection) {
+ if (this.listener) {
+ this.listener.onItemsRemoved(aItems, aCollection);
+ }
+ },
+ onQueryCompleted(aCollection) {
+ this.completed = true;
+ if (this.listener) {
+ this.listener.onQueryCompleted(aCollection);
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaPublic.jsm b/comm/mailnews/db/gloda/modules/GlodaPublic.jsm
new file mode 100644
index 0000000000..555a6d8921
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaPublic.jsm
@@ -0,0 +1,45 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["Gloda"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+/* nothing to import, just run some code */ ChromeUtils.import(
+ "resource:///modules/gloda/Everybody.jsm"
+);
+const { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+// initialize the indexer! (who was actually imported as a nested dep by the
+// things Everybody.jsm imported.) We waited until now so it could know about
+// its indexers.
+GlodaIndexer._init();
+const { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+/**
+ * Expose some junk
+ */
+function proxy(aSourceObj, aSourceAttr, aDestObj, aDestAttr) {
+ aDestObj[aDestAttr] = function (...aArgs) {
+ return aSourceObj[aSourceAttr](...aArgs);
+ };
+}
+
+proxy(GlodaIndexer, "addListener", Gloda, "addIndexerListener");
+proxy(GlodaIndexer, "removeListener", Gloda, "removeIndexerListener");
+proxy(GlodaMsgIndexer, "isMessageIndexed", Gloda, "isMessageIndexed");
+proxy(
+ GlodaMsgIndexer,
+ "setFolderIndexingPriority",
+ Gloda,
+ "setFolderIndexingPriority"
+);
+proxy(
+ GlodaMsgIndexer,
+ "resetFolderIndexingPriority",
+ Gloda,
+ "resetFolderIndexingPriority"
+);
diff --git a/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm b/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm
new file mode 100644
index 0000000000..2e53cf5925
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm
@@ -0,0 +1,642 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaQueryClassFactory"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * @class Query class core; each noun gets its own sub-class where attributes
+ * have helper methods bound.
+ *
+ * @param aOptions A dictionary of options. Current legal options are:
+ * - noMagic: Indicates that the noun's dbQueryJoinMagic should be ignored.
+ * Currently, this means that messages will not have their
+ * full-text indexed values re-attached. This is planned to be
+ * offset by having queries/cache lookups that do not request
+ * noMagic to ensure that their data does get loaded.
+ * - explicitSQL: A hand-rolled alternate representation for the core
+ * SELECT portion of the SQL query. The queryFromQuery logic still
+ * generates its normal query, we just ignore its result in favor of
+ * your provided value. This means that the positional parameter
+ * list is still built and you should/must rely on those bound
+ * parameters (using '?'). The replacement occurs prior to the
+ * outerWrapColumns, ORDER BY, and LIMIT contributions to the query.
+ * - outerWrapColumns: If provided, wraps the query in a "SELECT *,blah
+ * FROM (actual query)" where blah is your list of outerWrapColumns
+ * made comma-delimited. The idea is that this allows you to
+ * reference the result of expressions inside the query using their
+ * names rather than having to duplicate the logic. In practice,
+ * this makes things more readable but is unlikely to improve
+ * performance. (Namely, my use of 'offsets' for full-text stuff
+ * ends up in the EXPLAIN plan twice despite this.)
+ * - noDbQueryValidityConstraints: Indicates that any validity constraints
+ * should be ignored. This should be used when you need to get every
+ * match regardless of whether it's valid.
+ *
+ * @property _owner The query instance that holds the list of unions...
+ * @property _constraints A list of (lists of OR constraints) that are ANDed
+ * together. For example [[FROM bob, FROM jim], [DATE last week]] would
+ * be requesting us to find all the messages from either bob or jim, and
+ * sent in the last week.
+ * @property _unions A list of other queries whose results are unioned with our
+ * own. There is no concept of nesting or sub-queries apart from this
+ * mechanism.
+ */
+function GlodaQueryClass(aOptions) {
+ this.options = aOptions != null ? aOptions : {};
+
+ // if we are an 'or' clause, who is our parent whom other 'or' clauses should
+ // spawn from...
+ this._owner = null;
+ // our personal chain of and-ing.
+ this._constraints = [];
+ // the other instances we union with
+ this._unions = [];
+
+ this._order = [];
+ this._limit = 0;
+}
+
+GlodaQueryClass.prototype = {
+ WILDCARD: {},
+
+ get constraintCount() {
+ return this._constraints.length;
+ },
+
+ or() {
+ let owner = this._owner || this;
+ let orQuery = new this._queryClass();
+ orQuery._owner = owner;
+ owner._unions.push(orQuery);
+ return orQuery;
+ },
+
+ orderBy(...aArgs) {
+ this._order.push(...aArgs);
+ return this;
+ },
+
+ limit(aLimit) {
+ this._limit = aLimit;
+ return this;
+ },
+
+ /**
+ * Return a collection asynchronously populated by this collection. You must
+ * provide a listener to receive notifications from the collection as it
+ * receives updates. The listener object should implement onItemsAdded,
+ * onItemsModified, and onItemsRemoved methods, all of which take a single
+ * argument which is the list of items which have been added, modified, or
+ * removed respectively.
+ *
+ * @param aListener The collection listener.
+ * @param [aData] The data attribute to set on the collection.
+ * @param [aArgs.becomeExplicit] Make the collection explicit so that the
+ * collection will only ever contain results found from the database
+ * query and the query will not be updated as new items are indexed that
+ * also match the query.
+ * @param [aArgs.becomeNull] Change the collection's query to a null query so
+ * that it will never receive any additional added/modified/removed events
+ * apart from the underlying database query. This is really only intended
+ * for gloda internal use but may be acceptable for non-gloda use. Please
+ * ask on mozilla.dev.apps.thunderbird first to make sure there isn't a
+ * better solution for your use-case. (Note: removals will still happen
+ * when things get fully deleted.)
+ */
+ getCollection(aListener, aData, aArgs) {
+ this.completed = false;
+ return this._nounDef.datastore.queryFromQuery(
+ this,
+ aListener,
+ aData,
+ /* aExistingCollection */ null,
+ /* aMasterCollection */ null,
+ aArgs
+ );
+ },
+
+ /* eslint-disable complexity */
+ /**
+ * Test whether the given first-class noun instance satisfies this query.
+ *
+ * @testpoint gloda.query.test
+ */
+ test(aObj) {
+ // when changing this method, be sure that GlodaDatastore's queryFromQuery
+ // method likewise has any required changes made.
+ let unionQueries = [this].concat(this._unions);
+
+ for (let iUnion = 0; iUnion < unionQueries.length; iUnion++) {
+ let curQuery = unionQueries[iUnion];
+
+ // assume success until a specific (or) constraint proves us wrong
+ let querySatisfied = true;
+ for (
+ let iConstraint = 0;
+ iConstraint < curQuery._constraints.length;
+ iConstraint++
+ ) {
+ let constraint = curQuery._constraints[iConstraint];
+ let [constraintType, attrDef] = constraint;
+ let boundName = attrDef ? attrDef.boundName : "id";
+ if (
+ boundName in aObj &&
+ aObj[boundName] === GlodaConstants.IGNORE_FACET
+ ) {
+ querySatisfied = false;
+ break;
+ }
+
+ let constraintValues = constraint.slice(2);
+
+ if (constraintType === GlodaConstants.kConstraintIdIn) {
+ if (!constraintValues.includes(aObj.id)) {
+ querySatisfied = false;
+ break;
+ }
+ } else if (
+ constraintType === GlodaConstants.kConstraintIn ||
+ constraintType === GlodaConstants.kConstraintEquals
+ ) {
+ // @testpoint gloda.query.test.kConstraintIn
+ let objectNounDef = attrDef.objectNounDef;
+
+ // if they provide an equals comparator, use that.
+ // (note: the next case has better optimization possibilities than
+ // this mechanism, but of course has higher initialization costs or
+ // code complexity costs...)
+ if (objectNounDef.equals) {
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ // If there are no constraints, then we are just testing for there
+ // being a value. Succeed (continue) in that case.
+ if (
+ constraintValues.length == 0 &&
+ testValues.length &&
+ testValues[0] != null
+ ) {
+ continue;
+ }
+
+ // If there are no test values and the empty set is significant,
+ // then check if any of the constraint values are null (our
+ // empty indicator.)
+ if (testValues.length == 0 && attrDef.emptySetIsSignificant) {
+ let foundEmptySetSignifier = false;
+ for (let constraintValue of constraintValues) {
+ if (constraintValue == null) {
+ foundEmptySetSignifier = true;
+ break;
+ }
+ }
+ if (foundEmptySetSignifier) {
+ continue;
+ }
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ for (let value of constraintValues) {
+ if (objectNounDef.equals(testValue, value)) {
+ foundMatch = true;
+ break;
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ } else {
+ // otherwise, we need to convert everyone to their param/value form
+ // in order to test for equality
+ // let's just do the simple, obvious thing for now. which is
+ // what we did in the prior case but exploding values using
+ // toParamAndValue, and then comparing.
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ // If there are no constraints, then we are just testing for there
+ // being a value. Succeed (continue) in that case.
+ if (
+ constraintValues.length == 0 &&
+ testValues.length &&
+ testValues[0] != null
+ ) {
+ continue;
+ }
+ // If there are no test values and the empty set is significant,
+ // then check if any of the constraint values are null (our
+ // empty indicator.)
+ if (testValues.length == 0 && attrDef.emptySetIsSignificant) {
+ let foundEmptySetSignifier = false;
+ for (let constraintValue of constraintValues) {
+ if (constraintValue == null) {
+ foundEmptySetSignifier = true;
+ break;
+ }
+ }
+ if (foundEmptySetSignifier) {
+ continue;
+ }
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ let [aParam, aValue] = objectNounDef.toParamAndValue(testValue);
+ for (let value of constraintValues) {
+ // skip empty set check sentinel values
+ if (value == null && attrDef.emptySetIsSignificant) {
+ continue;
+ }
+ let [bParam, bValue] = objectNounDef.toParamAndValue(value);
+ if (aParam == bParam && aValue == bValue) {
+ foundMatch = true;
+ break;
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ }
+ } else if (constraintType === GlodaConstants.kConstraintRanges) {
+ // @testpoint gloda.query.test.kConstraintRanges
+ let objectNounDef = attrDef.objectNounDef;
+
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ let [tParam, tValue] = objectNounDef.toParamAndValue(testValue);
+ for (let rangeTuple of constraintValues) {
+ let [lowerRValue, upperRValue] = rangeTuple;
+ if (lowerRValue == null) {
+ let [upperParam, upperValue] =
+ objectNounDef.toParamAndValue(upperRValue);
+ if (tParam == upperParam && tValue <= upperValue) {
+ foundMatch = true;
+ break;
+ }
+ } else if (upperRValue == null) {
+ let [lowerParam, lowerValue] =
+ objectNounDef.toParamAndValue(lowerRValue);
+ if (tParam == lowerParam && tValue >= lowerValue) {
+ foundMatch = true;
+ break;
+ }
+ } else {
+ // no one is null
+ let [upperParam, upperValue] =
+ objectNounDef.toParamAndValue(upperRValue);
+ let [lowerParam, lowerValue] =
+ objectNounDef.toParamAndValue(lowerRValue);
+ if (
+ tParam == lowerParam &&
+ tValue >= lowerValue &&
+ tParam == upperParam &&
+ tValue <= upperValue
+ ) {
+ foundMatch = true;
+ break;
+ }
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ } else if (constraintType === GlodaConstants.kConstraintStringLike) {
+ // @testpoint gloda.query.test.kConstraintStringLike
+ let curIndex = 0;
+ let value = boundName in aObj ? aObj[boundName] : "";
+ // the attribute must be singular, we don't support arrays of strings.
+ for (let valuePart of constraintValues) {
+ if (typeof valuePart == "string") {
+ let index = value.indexOf(valuePart);
+ // if curIndex is null, we just need any match
+ // if it's not null, it must match the offset of our found match
+ if (curIndex === null) {
+ if (index == -1) {
+ querySatisfied = false;
+ } else {
+ curIndex = index + valuePart.length;
+ }
+ } else if (index != curIndex) {
+ querySatisfied = false;
+ } else {
+ curIndex = index + valuePart.length;
+ }
+ if (!querySatisfied) {
+ break;
+ }
+ } else {
+ // wild!
+ curIndex = null;
+ }
+ }
+ // curIndex must be null or equal to the length of the string
+ if (querySatisfied && curIndex !== null && curIndex != value.length) {
+ querySatisfied = false;
+ }
+ } else if (constraintType === GlodaConstants.kConstraintFulltext) {
+ // @testpoint gloda.query.test.kConstraintFulltext
+ // this is beyond our powers. Even if we have the fulltext content in
+ // memory, which we may not, the tokenization and such to perform
+ // the testing gets very complicated in the face of i18n, etc.
+ // so, let's fail if the item is not already in the collection, and
+ // let the testing continue if it is. (some other constraint may no
+ // longer apply...)
+ if (!(aObj.id in this.collection._idMap)) {
+ querySatisfied = false;
+ }
+ }
+
+ if (!querySatisfied) {
+ break;
+ }
+ }
+
+ if (querySatisfied) {
+ return true;
+ }
+ }
+ return false;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Helper code for noun definitions of queryHelpers that want to build a
+ * traditional in/equals constraint. The goal is to let them build a range
+ * without having to know how we structure |_constraints|.
+ *
+ * @protected
+ */
+ _inConstraintHelper(aAttrDef, aValues) {
+ let constraint = [GlodaConstants.kConstraintIn, aAttrDef].concat(aValues);
+ this._constraints.push(constraint);
+ return this;
+ },
+
+ /**
+ * Helper code for noun definitions of queryHelpers that want to build a
+ * range. The goal is to let them build a range without having to know how
+ * we structure |_constraints| or requiring them to mark themselves as
+ * continuous to get a "Range".
+ *
+ * @protected
+ */
+ _rangedConstraintHelper(aAttrDef, aRanges) {
+ let constraint = [GlodaConstants.kConstraintRanges, aAttrDef].concat(
+ aRanges
+ );
+ this._constraints.push(constraint);
+ return this;
+ },
+};
+
+/**
+ * @class A query that never matches anything.
+ *
+ * Collections corresponding to this query are intentionally frozen in time and
+ * do not want to be notified of any updates. We need the collection to be
+ * registered with the collection manager so that the noun instances in the
+ * collection are always 'reachable' via the collection for as long as we might
+ * be handing out references to the instances. (The other way to avoid updates
+ * would be to not register the collection, but then items might not be
+ * reachable.)
+ * This is intended to be used in implementation details behind the gloda
+ * abstraction barrier. For example, the message indexer likes to be able
+ * to represent 'ghost' and deleted messages, but these should never be exposed
+ * to the user. For code simplicity, it wants to be able to use the query
+ * mechanism. But it doesn't want updates that are effectively
+ * nonsensical. For example, a ghost message that is reused by message
+ * indexing may already be present in a collection; when the collection manager
+ * receives an itemsAdded event, a GlodaExplicitQueryClass would result in
+ * an item added notification in that case, which would wildly not be desired.
+ */
+function GlodaNullQueryClass() {}
+
+GlodaNullQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation, which
+ * does not happen for null queries.
+ */
+ options: {},
+
+ /**
+ * Provide a duck-typing way of indicating to GlodaCollectionManager that our
+ * associated collection just doesn't want anything to change. Our test
+ * function is able to convey most of it, but special-casing has to happen
+ * somewhere, so it happens here.
+ */
+ frozen: true,
+
+ /**
+ * Since our query never matches anything, it doesn't make sense to let
+ * someone attempt to construct a boolean OR involving us.
+ *
+ * @returns null
+ */
+ or() {
+ return null;
+ },
+
+ /**
+ * Return nothing (null) because it does not make sense to create a collection
+ * based on a null query. This method is normally used (on a normal query)
+ * to return a collection populated by the constraints of the query. We
+ * match nothing, so we should return nothing. More importantly, you are
+ * currently doing something wrong if you try and do this, so null is
+ * appropriate. It may turn out that it makes sense for us to return an
+ * empty collection in the future for sentinel value purposes, but we'll
+ * cross that bridge when we come to it.
+ *
+ * @returns null
+ */
+ getCollection() {
+ return null;
+ },
+
+ /**
+ * Never matches anything.
+ *
+ * @param aObj The object someone wants us to test for relevance to our
+ * associated collection. But we don't care! Not a fig!
+ * @returns false
+ */
+ test(aObj) {
+ return false;
+ },
+};
+
+/**
+ * @class A query that only 'tests' for already belonging to the collection.
+ *
+ * This type of collection is useful for when you (or rather your listener)
+ * are interested in hearing about modifications to your collection or removals
+ * from your collection because of deletion, but do not want to be notified
+ * about newly indexed items matching your normal query constraints.
+ *
+ * @param aCollection The collection this query belongs to. This needs to be
+ * passed-in here or the collection should set the attribute directly when
+ * the query is passed in to a collection's constructor.
+ */
+function GlodaExplicitQueryClass(aCollection) {
+ this.collection = aCollection;
+}
+
+GlodaExplicitQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation, which
+ * does not happen for explicit queries.
+ */
+ options: {},
+
+ /**
+ * Since our query is intended to only match the contents of our collection,
+ * it doesn't make sense to let someone attempt to construct a boolean OR
+ * involving us.
+ *
+ * @returns null
+ */
+ or() {
+ return null;
+ },
+
+ /**
+ * Return nothing (null) because it does not make sense to create a collection
+ * based on an explicit query. This method is normally used (on a normal
+ * query) to return a collection populated by the constraints of the query.
+ * In the case of an explicit query, we expect it will be associated with
+ * either a hand-created collection or the results of a normal query that is
+ * immediately converted into an explicit query. In all likelihood, calling
+ * this method on an instance of this type is an error, so it is helpful to
+ * return null because people will error hard.
+ *
+ * @returns null
+ */
+ getCollection() {
+ return null;
+ },
+
+ /**
+ * Matches only items that are already in the collection associated with this
+ * query (by id).
+ *
+ * @param aObj The object/item to test for already being in the associated
+ * collection.
+ * @returns true when the object is in the associated collection, otherwise
+ * false.
+ */
+ test(aObj) {
+ return aObj.id in this.collection._idMap;
+ },
+};
+
+/**
+ * @class A query that 'tests' true for everything. Intended for debugging purposes
+ * only.
+ */
+function GlodaWildcardQueryClass() {}
+
+GlodaWildcardQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation.
+ */
+ options: {},
+
+ // don't let people try and mess with us
+ or() {
+ return null;
+ },
+ // don't let people try and query on us (until we have a real use case for
+ // that...)
+ getCollection() {
+ return null;
+ },
+ /**
+ * Everybody wins!
+ */
+ test(aObj) {
+ return true;
+ },
+};
+
+/**
+ * Factory method to effectively create per-noun subclasses of GlodaQueryClass,
+ * GlodaNullQueryClass, GlodaExplicitQueryClass, and GlodaWildcardQueryClass.
+ * For GlodaQueryClass this allows us to add per-noun helpers. For the others,
+ * this is merely a means of allowing us to attach the (per-noun) nounDef to
+ * the 'class'.
+ */
+function GlodaQueryClassFactory(aNounDef) {
+ let newQueryClass = function (aOptions) {
+ GlodaQueryClass.call(this, aOptions);
+ };
+ newQueryClass.prototype = new GlodaQueryClass();
+ newQueryClass.prototype._queryClass = newQueryClass;
+ newQueryClass.prototype._nounDef = aNounDef;
+
+ let newNullClass = function (aCollection) {
+ GlodaNullQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newNullClass.prototype = new GlodaNullQueryClass();
+ newNullClass.prototype._queryClass = newNullClass;
+ newNullClass.prototype._nounDef = aNounDef;
+
+ let newExplicitClass = function (aCollection) {
+ GlodaExplicitQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newExplicitClass.prototype = new GlodaExplicitQueryClass();
+ newExplicitClass.prototype._queryClass = newExplicitClass;
+ newExplicitClass.prototype._nounDef = aNounDef;
+
+ let newWildcardClass = function (aCollection) {
+ GlodaWildcardQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newWildcardClass.prototype = new GlodaWildcardQueryClass();
+ newWildcardClass.prototype._queryClass = newWildcardClass;
+ newWildcardClass.prototype._nounDef = aNounDef;
+
+ return [newQueryClass, newNullClass, newExplicitClass, newWildcardClass];
+}
diff --git a/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm b/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm
new file mode 100644
index 0000000000..2e0fb7b5be
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm
@@ -0,0 +1,175 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file is charged with providing you a way to have a pretty gloda-backed
+ * nsIMsgDBView.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaSyntheticView"];
+
+/**
+ * Create a synthetic view suitable for passing to |FolderDisplayWidget.show|.
+ * You must pass a query, collection, or conversation in.
+ *
+ * @param {GlodaQuery} [aArgs.query] A gloda query to run.
+ * @param {GlodaCollection} [aArgs.collection] An already-populated collection
+ * to display. Do not call getCollection on a query and hand us that. We
+ * will not register ourselves as a listener and things will not work.
+ * @param {GlodaConversation} [aArgs.conversation] A conversation whose messages
+ * you want to display.
+ */
+function GlodaSyntheticView(aArgs) {
+ if ("query" in aArgs) {
+ this.query = aArgs.query;
+ this.collection = this.query.getCollection(this);
+ this.completed = false;
+ this.viewType = "global";
+ } else if ("collection" in aArgs) {
+ this.query = null;
+ this.collection = aArgs.collection;
+ this.completed = true;
+ this.viewType = "global";
+ } else if ("conversation" in aArgs) {
+ this.collection = aArgs.conversation.getMessagesCollection(this);
+ this.query = this.collection.query;
+ this.completed = false;
+ this.viewType = "conversation";
+ this.selectedMessage = aArgs.message.folderMessage;
+ } else {
+ throw new Error("You need to pass a query or collection");
+ }
+
+ this.customColumns = [];
+}
+GlodaSyntheticView.prototype = {
+ defaultSort: [
+ [Ci.nsMsgViewSortType.byDate, Ci.nsMsgViewSortOrder.descending],
+ ],
+
+ /**
+ * Request the search be performed and notification provided to
+ * aSearchListener. If results are already available, they should
+ * be provided to aSearchListener without re-performing the search.
+ */
+ search(aSearchListener, aCompletionCallback) {
+ this.searchListener = aSearchListener;
+ this.completionCallback = aCompletionCallback;
+
+ this.searchListener.onNewSearch();
+ if (this.completed) {
+ this.reportResults(this.collection.items);
+ // we're not really aborting, but it closes things out nicely
+ this.abortSearch();
+ }
+ },
+
+ abortSearch() {
+ if (this.searchListener) {
+ this.searchListener.onSearchDone(Cr.NS_OK);
+ }
+ if (this.completionCallback) {
+ this.completionCallback();
+ }
+ this.searchListener = null;
+ this.completionCallback = null;
+ },
+
+ reportResults(aItems) {
+ for (let item of aItems) {
+ let hdr = item.folderMessage;
+ if (hdr) {
+ this.searchListener.onSearchHit(hdr, hdr.folder);
+ }
+ }
+ },
+
+ /**
+ * Helper function used by |DBViewWrapper.getMsgHdrForMessageID| since there
+ * are no actual backing folders for it to check.
+ */
+ getMsgHdrForMessageID(aMessageId) {
+ for (let item of this.collection.items) {
+ if (item.headerMessageID == aMessageId) {
+ let hdr = item.folderMessage;
+ if (hdr) {
+ return hdr;
+ }
+ }
+ }
+ return null;
+ },
+
+ /**
+ * The default set of columns to show.
+ */
+ DEFAULT_COLUMN_STATES: {
+ threadCol: {
+ visible: true,
+ },
+ flaggedCol: {
+ visible: true,
+ },
+ subjectCol: {
+ visible: true,
+ },
+ correspondentCol: {
+ visible: Services.prefs.getBoolPref("mail.threadpane.use_correspondents"),
+ },
+ senderCol: {
+ visible: !Services.prefs.getBoolPref(
+ "mail.threadpane.use_correspondents"
+ ),
+ },
+ dateCol: {
+ visible: true,
+ },
+ locationCol: {
+ visible: true,
+ },
+ },
+
+ // --- settings persistence
+ getPersistedSetting(aSetting) {
+ try {
+ return JSON.parse(
+ Services.prefs.getCharPref(
+ "mailnews.database.global.views." + this.viewType + "." + aSetting
+ )
+ );
+ } catch (e) {
+ return this.getDefaultSetting(aSetting);
+ }
+ },
+ setPersistedSetting(aSetting, aValue) {
+ Services.prefs.setCharPref(
+ "mailnews.database.global.views." + this.viewType + "." + aSetting,
+ JSON.stringify(aValue)
+ );
+ },
+ getDefaultSetting(aSetting) {
+ if (aSetting == "columns") {
+ return this.DEFAULT_COLUMN_STATES;
+ }
+ return undefined;
+ },
+
+ // --- collection listener
+ onItemsAdded(aItems, aCollection) {
+ if (this.searchListener) {
+ this.reportResults(aItems);
+ }
+ },
+ onItemsModified(aItems, aCollection) {},
+ onItemsRemoved(aItems, aCollection) {},
+ onQueryCompleted(aCollection) {
+ this.completed = true;
+ if (this.searchListener) {
+ this.searchListener.onSearchDone(Cr.NS_OK);
+ }
+ if (this.completionCallback) {
+ this.completionCallback();
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaUtils.jsm b/comm/mailnews/db/gloda/modules/GlodaUtils.jsm
new file mode 100644
index 0000000000..a2b7fe4174
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaUtils.jsm
@@ -0,0 +1,84 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaUtils"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * @namespace A holding place for logic that is not gloda-specific and should
+ * reside elsewhere.
+ */
+var GlodaUtils = {
+ /**
+ * This Regexp is super-complicated and used at least in two different parts of
+ * the code, so let's expose it from one single location.
+ */
+ PART_RE: new RegExp(
+ "^[^?]+\\?(?:/;section=\\d+\\?)?(?:[^&]+&)*part=([^&]+)(?:&[^&]+)*$"
+ ),
+
+ deMime(aString) {
+ return MailServices.mimeConverter.decodeMimeHeader(
+ aString,
+ null,
+ false,
+ true
+ );
+ },
+
+ _headerParser: MailServices.headerParser,
+
+ /**
+ * Parses an RFC 2822 list of e-mail addresses and returns an object with
+ * 4 attributes, as described below. We will use the example of the user
+ * passing an argument of '"Bob Smith" <bob@example.com>'.
+ *
+ * This method (by way of nsIMsgHeaderParser) takes care of decoding mime
+ * headers, but is not aware of folder-level character set overrides.
+ *
+ * count: the number of addresses parsed. (ex: 1)
+ * addresses: a list of e-mail addresses (ex: ["bob@example.com"])
+ * names: a list of names (ex: ["Bob Smith"])
+ * fullAddresses: aka the list of name and e-mail together (ex: ['"Bob Smith"
+ * <bob@example.com>']).
+ *
+ * This method is a convenience wrapper around nsIMsgHeaderParser.
+ */
+ parseMailAddresses(aMailAddresses) {
+ let addresses = this._headerParser.parseEncodedHeader(aMailAddresses);
+ return {
+ names: addresses.map(a => a.name || null),
+ addresses: addresses.map(a => a.email),
+ fullAddresses: addresses.map(a => a.toString()),
+ count: addresses.length,
+ };
+ },
+
+ /**
+ * MD5 hash a string and return the hex-string result. Impl from nsICryptoHash
+ * docs.
+ */
+ md5HashString(aString) {
+ let data = [...new TextEncoder().encode(aString)];
+
+ let hasher = Cc["@mozilla.org/security/hash;1"].createInstance(
+ Ci.nsICryptoHash
+ );
+ hasher.init(Ci.nsICryptoHash.MD5);
+ hasher.update(data, data.length);
+ let hash = hasher.finish(false);
+
+ // return the two-digit hexadecimal code for a byte
+ function toHexString(charCode) {
+ return ("0" + charCode.toString(16)).slice(-2);
+ }
+
+ // convert the binary hash data to a hex string.
+ let hex = Object.keys(hash).map(i => toHexString(hash.charCodeAt(i)));
+ return hex.join("");
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/IndexMsg.jsm b/comm/mailnews/db/gloda/modules/IndexMsg.jsm
new file mode 100644
index 0000000000..9a4add589e
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/IndexMsg.jsm
@@ -0,0 +1,3464 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/*
+ * This file currently contains a fairly general implementation of asynchronous
+ * indexing with a very explicit message indexing implementation. As gloda
+ * will eventually want to index more than just messages, the message-specific
+ * things should ideally lose their special hold on this file. This will
+ * benefit readability/size as well.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaMsgIndexer"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+const { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+const { GlodaContact, GlodaFolder } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+const { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "MailUtils",
+ "resource:///modules/MailUtils.jsm"
+);
+
+// Cr does not have mailnews error codes!
+var NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE = 0x80550005;
+
+var GLODA_MESSAGE_ID_PROPERTY = "gloda-id";
+/**
+ * Message header property to track dirty status; one of
+ * |GlodaIndexer.kMessageClean|, |GlodaIndexer.kMessageDirty|,
+ * |GlodaIndexer.kMessageFilthy|.
+ */
+var GLODA_DIRTY_PROPERTY = "gloda-dirty";
+
+/**
+ * The sentinel GLODA_MESSAGE_ID_PROPERTY value indicating that a message fails
+ * to index and we should not bother trying again, at least not until a new
+ * release is made.
+ *
+ * This should ideally just flip between 1 and 2, with GLODA_OLD_BAD_MESSAGE_ID
+ * flipping in the other direction. If we start having more trailing badness,
+ * _indexerGetEnumerator and GLODA_OLD_BAD_MESSAGE_ID will need to be altered.
+ *
+ * When flipping this, be sure to update glodaTestHelper.js's copy.
+ */
+var GLODA_BAD_MESSAGE_ID = 2;
+/**
+ * The gloda id we used to use to mark messages as bad, but now should be
+ * treated as eligible for indexing. This is only ever used for consideration
+ * when creating msg header enumerators with `_indexerGetEnumerator` which
+ * means we only will re-index such messages in an indexing sweep. Accordingly
+ * event-driven indexing will still treat such messages as unindexed (and
+ * unindexable) until an indexing sweep picks them up.
+ */
+var GLODA_OLD_BAD_MESSAGE_ID = 1;
+var GLODA_FIRST_VALID_MESSAGE_ID = 32;
+
+var JUNK_SCORE_PROPERTY = "junkscore";
+var JUNK_SPAM_SCORE_STR = Ci.nsIJunkMailPlugin.IS_SPAM_SCORE.toString();
+
+/**
+ * The processing flags that tell us that a message header has not yet been
+ * reported to us via msgsClassified. If it has one of these flags, it is
+ * still being processed.
+ */
+var NOT_YET_REPORTED_PROCESSING_FLAGS =
+ Ci.nsMsgProcessingFlags.NotReportedClassified |
+ Ci.nsMsgProcessingFlags.ClassifyJunk;
+
+// for list comprehension fun
+function* range(begin, end) {
+ for (let i = begin; i < end; ++i) {
+ yield i;
+ }
+}
+
+/**
+ * We do not set properties on the messages until we perform a DB commit; this
+ * helper class tracks messages that we have indexed but are not yet marked
+ * as such on their header.
+ */
+var PendingCommitTracker = {
+ /**
+ * Maps message URIs to their gloda ids.
+ *
+ * I am not entirely sure why I chose the URI for the key rather than
+ * gloda folder ID + message key. Most likely it was to simplify debugging
+ * since the gloda folder ID is opaque while the URI is very informative. It
+ * is also possible I was afraid of IMAP folder renaming triggering a UID
+ * renumbering?
+ */
+ _indexedMessagesPendingCommitByKey: {},
+ /**
+ * Map from the pending commit gloda id to a tuple of [the corresponding
+ * message header, dirtyState].
+ */
+ _indexedMessagesPendingCommitByGlodaId: {},
+ /**
+ * Do we have a post-commit handler registered with this transaction yet?
+ */
+ _pendingCommit: false,
+
+ /**
+ * The function gets called when the commit actually happens to flush our
+ * message id's.
+ *
+ * It is very possible that by the time this call happens we have left the
+ * folder and nulled out msgDatabase on the folder. Since nulling it out
+ * is what causes the commit, if we set the headers here without somehow
+ * forcing a commit, we will lose. Badly.
+ * Accordingly, we make a list of all the folders that the headers belong to
+ * as we iterate, make sure to re-attach their msgDatabase before forgetting
+ * the headers, then make sure to zero the msgDatabase again, triggering a
+ * commit. If there were a way to directly get the nsIMsgDatabase from the
+ * header we could do that and call commit directly. We don't track
+ * databases along with the headers since the headers can change because of
+ * moves and that would increase the number of moving parts.
+ */
+ _commitCallback() {
+ let foldersByURI = {};
+ let lastFolder = null;
+
+ for (let glodaId in PendingCommitTracker._indexedMessagesPendingCommitByGlodaId) {
+ let [msgHdr, dirtyState] =
+ PendingCommitTracker._indexedMessagesPendingCommitByGlodaId[glodaId];
+ // Mark this message as indexed.
+ // It's conceivable the database could have gotten blown away, in which
+ // case the message headers are going to throw exceptions when we try
+ // and touch them. So we wrap this in a try block that complains about
+ // this unforeseen circumstance. (noteFolderDatabaseGettingBlownAway
+ // should have been called and avoided this situation in all known
+ // situations.)
+ try {
+ let curGlodaId = msgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ if (curGlodaId != glodaId) {
+ msgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, glodaId);
+ }
+ let headerDirty = msgHdr.getUint32Property(GLODA_DIRTY_PROPERTY);
+ if (headerDirty != dirtyState) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, dirtyState);
+ }
+
+ // Make sure this folder is in our foldersByURI map.
+ if (lastFolder == msgHdr.folder) {
+ continue;
+ }
+ lastFolder = msgHdr.folder;
+ let folderURI = lastFolder.URI;
+ if (!(folderURI in foldersByURI)) {
+ foldersByURI[folderURI] = lastFolder;
+ }
+ } catch (ex) {
+ GlodaMsgIndexer._log.error(
+ "Exception while attempting to mark message with gloda state after" +
+ "db commit",
+ ex
+ );
+ }
+ }
+
+ // it is vitally important to do this before we forget about the headers!
+ for (let uri in foldersByURI) {
+ let folder = foldersByURI[uri];
+ // This will not cause a parse. The database is in-memory since we have
+ // a header that belongs to it. This just causes the folder to
+ // re-acquire a reference from the database manager.
+ folder.msgDatabase;
+ // And this will cause a commit. (And must be done since we don't want
+ // to cause a leak.)
+ folder.msgDatabase = null;
+ }
+
+ PendingCommitTracker._indexedMessagesPendingCommitByGlodaId = {};
+ PendingCommitTracker._indexedMessagesPendingCommitByKey = {};
+
+ PendingCommitTracker._pendingCommit = false;
+ },
+
+ /**
+ * Track a message header that should be marked with the given gloda id when
+ * the database commits.
+ */
+ track(aMsgHdr, aGlodaId) {
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ this._indexedMessagesPendingCommitByKey[pendingKey] = aGlodaId;
+ this._indexedMessagesPendingCommitByGlodaId[aGlodaId] = [
+ aMsgHdr,
+ GlodaMsgIndexer.kMessageClean,
+ ];
+
+ if (!this._pendingCommit) {
+ GlodaDatastore.runPostCommit(this._commitCallback);
+ this._pendingCommit = true;
+ }
+ },
+
+ /**
+ * Get the current state of a message header given that we cannot rely on just
+ * looking at the header's properties because we defer setting those
+ * until the SQLite commit happens.
+ *
+ * @returns Tuple of [gloda id, dirty status].
+ */
+ getGlodaState(aMsgHdr) {
+ // If it's in the pending commit table, then the message is basically
+ // clean. Return that info.
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ if (pendingKey in this._indexedMessagesPendingCommitByKey) {
+ let glodaId =
+ PendingCommitTracker._indexedMessagesPendingCommitByKey[pendingKey];
+ return [glodaId, this._indexedMessagesPendingCommitByGlodaId[glodaId][1]];
+ }
+
+ // Otherwise the header's concept of state is correct.
+ let glodaId = aMsgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ let glodaDirty = aMsgHdr.getUint32Property(GLODA_DIRTY_PROPERTY);
+ return [glodaId, glodaDirty];
+ },
+
+ /**
+ * Update our structure to reflect moved headers. Moves are currently
+ * treated as weakly interesting and do not require a reindexing
+ * although collections will get notified. So our job is to to fix-up
+ * the pending commit information if the message has a pending commit.
+ */
+ noteMove(aOldHdr, aNewHdr) {
+ let oldKey = aOldHdr.folder.URI + "#" + aOldHdr.messageKey;
+ if (!(oldKey in this._indexedMessagesPendingCommitByKey)) {
+ return;
+ }
+
+ let glodaId = this._indexedMessagesPendingCommitByKey[oldKey];
+ delete this._indexedMessagesPendingCommitByKey[oldKey];
+
+ let newKey = aNewHdr.folder.URI + "#" + aNewHdr.messageKey;
+ this._indexedMessagesPendingCommitByKey[newKey] = glodaId;
+
+ // only clobber the header, not the dirty state
+ this._indexedMessagesPendingCommitByGlodaId[glodaId][0] = aNewHdr;
+ },
+
+ /**
+ * A blind move is one where we have the source header but not the destination
+ * header. This happens for IMAP messages that do not involve offline fake
+ * headers.
+ * XXX Since IMAP moves will propagate the gloda-id/gloda-dirty bits for us,
+ * we could detect the other side of the move when it shows up as a
+ * msgsClassified event and restore the mapping information. Since the
+ * offline fake header case should now cover the bulk of IMAP move
+ * operations, we probably do not need to pursue this.
+ *
+ * We just re-dispatch to noteDirtyHeader because we can't do anything more
+ * clever.
+ */
+ noteBlindMove(aOldHdr) {
+ this.noteDirtyHeader(aOldHdr);
+ },
+
+ /**
+ * If a message is dirty we should stop tracking it for post-commit
+ * purposes. This is not because we don't want to write to its header
+ * when we commit as much as that we want to avoid |getHeaderGlodaState|
+ * reporting that the message is clean. We could complicate our state
+ * by storing that information, but this is easier and ends up the same
+ * in the end.
+ */
+ noteDirtyHeader(aMsgHdr) {
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ if (!(pendingKey in this._indexedMessagesPendingCommitByKey)) {
+ return;
+ }
+
+ // (It is important that we get the gloda id from our own structure!)
+ let glodaId = this._indexedMessagesPendingCommitByKey[pendingKey];
+ this._indexedMessagesPendingCommitByGlodaId[glodaId][1] =
+ GlodaMsgIndexer.kMessageDirty;
+ },
+
+ /**
+ * Sometimes a folder database gets blown away. This happens for one of two
+ * expected reasons right now:
+ * - Folder compaction.
+ * - Explicit reindexing of a folder via the folder properties "rebuild index"
+ * button.
+ *
+ * When this happens, we are basically out of luck and need to discard
+ * everything about the folder. The good news is that the folder compaction
+ * pass is clever enough to re-establish the linkages that are being lost
+ * when we drop these things on the floor. Reindexing of a folder is not
+ * clever enough to deal with this but is an exceptional case of last resort
+ * (the user should not normally be performing a reindex as part of daily
+ * operation), so we accept that messages may be redundantly indexed.
+ */
+ noteFolderDatabaseGettingBlownAway(aMsgFolder) {
+ let uri = aMsgFolder.URI + "#";
+ for (let key of Object.keys(this._indexedMessagesPendingCommitByKey)) {
+ // this is not as efficient as it could be, but compaction is relatively
+ // rare and the number of pending headers is generally going to be
+ // small.
+ if (key.indexOf(uri) == 0) {
+ delete this._indexedMessagesPendingCommitByKey[key];
+ }
+ }
+ },
+};
+
+/**
+ * This callback handles processing the asynchronous query results of
+ * |GlodaMsgIndexer.getMessagesByMessageID|.
+ */
+function MessagesByMessageIdCallback(
+ aMsgIDToIndex,
+ aResults,
+ aCallback,
+ aCallbackThis
+) {
+ this.msgIDToIndex = aMsgIDToIndex;
+ this.results = aResults;
+ this.callback = aCallback;
+ this.callbackThis = aCallbackThis;
+}
+
+MessagesByMessageIdCallback.prototype = {
+ _log: console.createInstance({
+ prefix: "gloda.index_msg.mbm",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ onItemsAdded(aItems, aCollection) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ this._log.debug("getting results...");
+ for (let message of aItems) {
+ this.results[this.msgIDToIndex[message.headerMessageID]].push(message);
+ }
+ },
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ this._log.debug("query completed, notifying... " + this.results);
+
+ this.callback.call(this.callbackThis, this.results);
+ },
+};
+
+/**
+ * The message indexer!
+ *
+ * === Message Indexing Strategy
+ * To these ends, we implement things like so:
+ *
+ * Message State Tracking
+ * - We store a property on all indexed headers indicating their gloda message
+ * id. This allows us to tell whether a message is indexed from the header,
+ * without having to consult the SQL database.
+ * - When we receive an event that indicates that a message's meta-data has
+ * changed and gloda needs to re-index the message, we set a property on the
+ * header that indicates the message is dirty. This property can indicate
+ * that the message needs to be re-indexed but the gloda-id is valid (dirty)
+ * or that the message's gloda-id is invalid (filthy) because the gloda
+ * database has been blown away.
+ * - We track whether a folder is up-to-date on our GlodaFolder representation
+ * using a concept of dirtiness, just like messages. Like messages, a folder
+ * can be dirty or filthy. A dirty folder has at least one dirty message in
+ * it which means we should scan the folder. A filthy folder means that
+ * every message in the folder should be considered filthy. Folders start
+ * out filthy when Gloda is first told about them indicating we cannot
+ * trust any of the gloda-id's in the folders. Filthy folders are downgraded
+ * to dirty folders after we mark all of the headers with gloda-id's filthy.
+ *
+ * Indexing Message Control
+ * - We index the headers of all IMAP messages. We index the bodies of all IMAP
+ * messages that are offline. We index all local messages. We plan to avoid
+ * indexing news messages.
+ * - We would like a way to express desires about indexing that either don't
+ * confound offline storage with indexing, or actually allow some choice.
+ *
+ * Indexing Messages
+ * - We have two major modes of indexing: sweep and event-driven. When we
+ * start up we kick off an indexing sweep. We use event-driven indexing
+ * as we receive events for eligible messages, but if we get too many
+ * events we start dropping them on the floor and just flag that an indexing
+ * sweep is required.
+ * - The sweep initiates folder indexing jobs based on the priorities assigned
+ * to folders. Folder indexing uses a filtered message enumerator to find
+ * messages that need to be indexed, minimizing wasteful exposure of message
+ * headers to XPConnect that we would not end up indexing.
+ * - For local folders, we use GetDatabaseWithReparse to ensure that the .msf
+ * file exists. For IMAP folders, we simply use GetDatabase because we know
+ * the auto-sync logic will make sure that the folder is up-to-date and we
+ * want to avoid creating problems through use of updateFolder.
+ *
+ * Junk Mail
+ * - We do not index junk. We do not index messages until the junk/non-junk
+ * determination has been made. If a message gets marked as junk, we act like
+ * it was deleted.
+ * - We know when a message is actively queued for junk processing thanks to
+ * folder processing flags. nsMsgDBFolder::CallFilterPlugins does this
+ * prior to initiating spam processing. Unfortunately, this method does not
+ * get called until after we receive the notification about the existence of
+ * the header. How long after can vary on different factors. The longest
+ * delay is in the IMAP case where there is a filter that requires the
+ * message body to be present; the method does not get called until all the
+ * bodies are downloaded.
+ *
+ */
+var GlodaMsgIndexer = {
+ /**
+ * A partial attempt to generalize to support multiple databases. Each
+ * database would have its own datastore would have its own indexer. But
+ * we rather inter-mingle our use of this field with the singleton global
+ * GlodaDatastore.
+ */
+ _datastore: GlodaDatastore,
+ _log: console.createInstance({
+ prefix: "gloda.index_msg",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ _junkService: MailServices.junk,
+
+ name: "index_msg",
+ /**
+ * Are we enabled, read: are we processing change events?
+ */
+ _enabled: false,
+ get enabled() {
+ return this._enabled;
+ },
+
+ enable() {
+ // initialize our listeners' this pointers
+ this._databaseAnnouncerListener.indexer = this;
+ this._msgFolderListener.indexer = this;
+
+ // register for:
+ // - folder loaded events, so we know when getDatabaseWithReparse has
+ // finished updating the index/what not (if it wasn't immediately
+ // available)
+ // - property changes (so we know when a message's read/starred state have
+ // changed.)
+ this._folderListener._init(this);
+ MailServices.mailSession.AddFolderListener(
+ this._folderListener,
+ Ci.nsIFolderListener.intPropertyChanged |
+ Ci.nsIFolderListener.propertyFlagChanged |
+ Ci.nsIFolderListener.event
+ );
+
+ MailServices.mfn.addListener(
+ this._msgFolderListener,
+ // note: intentionally no msgAdded or msgUnincorporatedMoved.
+ Ci.nsIMsgFolderNotificationService.msgsClassified |
+ Ci.nsIMsgFolderNotificationService.msgsJunkStatusChanged |
+ Ci.nsIMsgFolderNotificationService.msgsDeleted |
+ Ci.nsIMsgFolderNotificationService.msgsMoveCopyCompleted |
+ Ci.nsIMsgFolderNotificationService.msgKeyChanged |
+ Ci.nsIMsgFolderNotificationService.folderAdded |
+ Ci.nsIMsgFolderNotificationService.folderDeleted |
+ Ci.nsIMsgFolderNotificationService.folderMoveCopyCompleted |
+ Ci.nsIMsgFolderNotificationService.folderRenamed |
+ Ci.nsIMsgFolderNotificationService.folderCompactStart |
+ Ci.nsIMsgFolderNotificationService.folderCompactFinish |
+ Ci.nsIMsgFolderNotificationService.folderReindexTriggered
+ );
+
+ this._enabled = true;
+
+ this._considerSchemaMigration();
+
+ this._log.info("Event-Driven Indexing is now " + this._enabled);
+ },
+ disable() {
+ // remove FolderLoaded notification listener
+ MailServices.mailSession.RemoveFolderListener(this._folderListener);
+
+ MailServices.mfn.removeListener(this._msgFolderListener);
+
+ this._indexerLeaveFolder(); // nop if we aren't "in" a folder
+
+ this._enabled = false;
+
+ this._log.info("Event-Driven Indexing is now " + this._enabled);
+ },
+
+ /**
+ * Indicates that we have pending deletions to process, meaning that there
+ * are gloda message rows flagged for deletion. If this value is a boolean,
+ * it means the value is known reliably. If this value is null, it means
+ * that we don't know, likely because we have started up and have not checked
+ * the database.
+ */
+ pendingDeletions: null,
+
+ /**
+ * The message (or folder state) is believed up-to-date.
+ */
+ kMessageClean: 0,
+ /**
+ * The message (or folder) is known to not be up-to-date. In the case of
+ * folders, this means that some of the messages in the folder may be dirty.
+ * However, because of the way our indexing works, it is possible there may
+ * actually be no dirty messages in a folder. (We attempt to process
+ * messages in an event-driven fashion for a finite number of messages, but
+ * because we can quit without completing processing of the queue, we need to
+ * mark the folder dirty, just-in-case.) (We could do some extra leg-work
+ * and do a better job of marking the folder clean again.)
+ */
+ kMessageDirty: 1,
+ /**
+ * We have not indexed the folder at all, but messages in the folder think
+ * they are indexed. We downgrade the folder to just kMessageDirty after
+ * marking all the messages in the folder as dirty. We do this so that if we
+ * have to stop indexing the folder we can still build on our progress next
+ * time we enter the folder.
+ * We mark all folders filthy when (re-)creating the database because there
+ * may be previous state left over from an earlier database.
+ */
+ kMessageFilthy: 2,
+
+ /**
+ * A message addition job yet to be (completely) processed. Since message
+ * addition events come to us one-by-one, in order to aggregate them into a
+ * job, we need something like this. It's up to the indexing loop to
+ * decide when to null this out; it can either do it when it first starts
+ * processing it, or when it has processed the last thing. It's really a
+ * question of whether we want retrograde motion in the folder progress bar
+ * or the message progress bar.
+ */
+ _pendingAddJob: null,
+
+ /**
+ * The number of messages that we should queue for processing before letting
+ * them fall on the floor and relying on our folder-walking logic to ensure
+ * that the messages are indexed.
+ * The reason we allow for queueing messages in an event-driven fashion is
+ * that once we have reached a steady-state, it is preferable to be able to
+ * deal with new messages and modified meta-data in a prompt fashion rather
+ * than having to (potentially) walk every folder in the system just to find
+ * the message that the user changed the tag on.
+ */
+ _indexMaxEventQueueMessages: 20,
+
+ /**
+ * Unit testing hook to get us to emit additional logging that verges on
+ * inane for general usage but is helpful in unit test output to get a lay
+ * of the land and for paranoia reasons.
+ */
+ _unitTestSuperVerbose: false,
+
+ /** The GlodaFolder corresponding to the folder we are indexing. */
+ _indexingGlodaFolder: null,
+ /** The nsIMsgFolder we are currently indexing. */
+ _indexingFolder: null,
+ /** The nsIMsgDatabase we are currently indexing. */
+ _indexingDatabase: null,
+ /**
+ * The iterator we are using to iterate over the headers in
+ * this._indexingDatabase.
+ */
+ _indexingIterator: null,
+
+ /** folder whose entry we are pending on */
+ _pendingFolderEntry: null,
+
+ /**
+ * Async common logic that we want to deal with the given folder ID. Besides
+ * cutting down on duplicate code, this ensures that we are listening on
+ * the folder in case it tries to go away when we are using it.
+ *
+ * @returns true when the folder was successfully entered, false when we need
+ * to pend on notification of updating of the folder (due to re-parsing
+ * or what have you). In the event of an actual problem, an exception
+ * will escape.
+ */
+ _indexerEnterFolder(aFolderID) {
+ // leave the folder if we haven't explicitly left it.
+ if (this._indexingFolder !== null) {
+ this._indexerLeaveFolder();
+ }
+
+ this._indexingGlodaFolder = GlodaDatastore._mapFolderID(aFolderID);
+ this._indexingFolder = this._indexingGlodaFolder.getXPCOMFolder(
+ this._indexingGlodaFolder.kActivityIndexing
+ );
+
+ if (this._indexingFolder) {
+ this._log.debug("Entering folder: " + this._indexingFolder.URI);
+ }
+
+ try {
+ // The msf may need to be created or otherwise updated for local folders.
+ // This may require yielding until such time as the msf has been created.
+ try {
+ if (this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder) {
+ this._indexingDatabase = this._indexingFolder.getDatabaseWithReparse(
+ null,
+ null
+ );
+ }
+ // we need do nothing special for IMAP, news, or other
+ } catch (e) {
+ // getDatabaseWithReparse can return either NS_ERROR_NOT_INITIALIZED or
+ // NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE if the net result is that it
+ // is going to send us a notification when the reparse has completed.
+ // (note that although internally NS_MSG_ERROR_FOLDER_SUMMARY_MISSING
+ // might get flung around, it won't make it out to us, and will instead
+ // be permuted into an NS_ERROR_NOT_INITIALIZED.)
+ if (
+ e.result == Cr.NS_ERROR_NOT_INITIALIZED ||
+ e.result == NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE
+ ) {
+ // this means that we need to pend on the update; the listener for
+ // FolderLoaded events will call _indexerCompletePendingFolderEntry.
+ this._log.debug("Pending on folder load...");
+ this._pendingFolderEntry = this._indexingFolder;
+ return GlodaConstants.kWorkAsync;
+ }
+ throw e;
+ }
+ // we get an nsIMsgDatabase out of this (unsurprisingly) which
+ // explicitly inherits from nsIDBChangeAnnouncer, which has the
+ // addListener call we want.
+ if (this._indexingDatabase == null) {
+ this._indexingDatabase = this._indexingFolder.msgDatabase;
+ }
+ this._indexingDatabase.addListener(this._databaseAnnouncerListener);
+ } catch (ex) {
+ this._log.error(
+ "Problem entering folder: " +
+ (this._indexingFolder ? this._indexingFolder.prettyName : "unknown") +
+ ", skipping. Error was: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ this._indexingGlodaFolder.indexing = false;
+ this._indexingFolder = null;
+ this._indexingGlodaFolder = null;
+ this._indexingDatabase = null;
+ this._indexingEnumerator = null;
+
+ // re-throw, we just wanted to make sure this junk is cleaned up and
+ // get localized error logging...
+ throw ex;
+ }
+
+ return GlodaConstants.kWorkSync;
+ },
+
+ /**
+ * If the folder was still parsing/updating when we tried to enter, then this
+ * handler will get called by the listener who got the FolderLoaded message.
+ * All we need to do is get the database reference, register a listener on
+ * the db, and retrieve an iterator if desired.
+ */
+ _indexerCompletePendingFolderEntry() {
+ this._indexingDatabase = this._indexingFolder.msgDatabase;
+ this._indexingDatabase.addListener(this._databaseAnnouncerListener);
+ this._log.debug("...Folder Loaded!");
+
+ // the load is no longer pending; we certainly don't want more notifications
+ this._pendingFolderEntry = null;
+ // indexerEnterFolder returned kWorkAsync, which means we need to notify
+ // the callback driver to get things going again.
+ GlodaIndexer.callbackDriver();
+ },
+
+ /**
+ * Enumerate all messages in the folder.
+ */
+ kEnumAllMsgs: 0,
+ /**
+ * Enumerate messages that look like they need to be indexed.
+ */
+ kEnumMsgsToIndex: 1,
+ /**
+ * Enumerate messages that are already indexed.
+ */
+ kEnumIndexedMsgs: 2,
+
+ /**
+ * Synchronous helper to get an enumerator for the current folder (as found
+ * in |_indexingFolder|.
+ *
+ * @param aEnumKind One of |kEnumAllMsgs|, |kEnumMsgsToIndex|, or
+ * |kEnumIndexedMsgs|.
+ * @param [aAllowPreBadIds=false] Only valid for |kEnumIndexedMsgs|, tells us
+ * that we should treat message with any gloda-id as dirty, not just
+ * messages that have non-bad message id's.
+ */
+ _indexerGetEnumerator(aEnumKind, aAllowPreBadIds) {
+ if (aEnumKind == this.kEnumMsgsToIndex) {
+ // We need to create search terms for messages to index. Messages should
+ // be indexed if they're indexable (local or offline and not expunged)
+ // and either: haven't been indexed, are dirty, or are marked with with
+ // a former GLODA_BAD_MESSAGE_ID that is no longer our bad marker. (Our
+ // bad marker can change on minor schema revs so that we can try and
+ // reindex those messages exactly once and without needing to go through
+ // a pass to mark them as needing one more try.)
+ // The basic search expression is:
+ // ((GLODA_MESSAGE_ID_PROPERTY Is 0) ||
+ // (GLODA_MESSAGE_ID_PROPERTY Is GLODA_OLD_BAD_MESSAGE_ID) ||
+ // (GLODA_DIRTY_PROPERTY Isnt 0)) &&
+ // (JUNK_SCORE_PROPERTY Isnt 100)
+ // If the folder !isLocal we add the terms:
+ // - if the folder is offline -- && (Status Is nsMsgMessageFlags.Offline)
+ // - && (Status Isnt nsMsgMessageFlags.Expunged)
+
+ let searchSession = Cc[
+ "@mozilla.org/messenger/searchSession;1"
+ ].createInstance(Ci.nsIMsgSearchSession);
+ let searchTerms = [];
+ let isLocal = this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder;
+
+ searchSession.addScopeTerm(
+ Ci.nsMsgSearchScope.offlineMail,
+ this._indexingFolder
+ );
+ let nsMsgSearchAttrib = Ci.nsMsgSearchAttrib;
+ let nsMsgSearchOp = Ci.nsMsgSearchOp;
+
+ // first term: (GLODA_MESSAGE_ID_PROPERTY Is 0
+ let searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // actually don't care here
+ searchTerm.beginsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Is;
+ let value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = 0;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // second term: || GLODA_MESSAGE_ID_PROPERTY Is GLODA_OLD_BAD_MESSAGE_ID
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // OR
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Is;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = GLODA_OLD_BAD_MESSAGE_ID;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // third term: || GLODA_DIRTY_PROPERTY Isnt 0 )
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false;
+ searchTerm.endsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = 0;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_DIRTY_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // JUNK_SCORE_PROPERTY Isnt 100
+ // For symmetry with our event-driven stuff, we just directly deal with
+ // the header property.
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.str = JUNK_SPAM_SCORE_STR;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = JUNK_SCORE_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ if (!isLocal) {
+ // If the folder is offline, then the message should be too
+ if (this._indexingFolder.getFlag(Ci.nsMsgFolderFlags.Offline)) {
+ // third term: && Status Is nsMsgMessageFlags.Offline
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.MsgStatus;
+ searchTerm.op = nsMsgSearchOp.Is;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = Ci.nsMsgMessageFlags.Offline;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+ }
+
+ // fourth term: && Status Isnt nsMsgMessageFlags.Expunged
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.MsgStatus;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = Ci.nsMsgMessageFlags.Expunged;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+ }
+
+ this._indexingEnumerator = this._indexingDatabase.getFilterEnumerator(
+ searchTerms,
+ true
+ );
+ } else if (aEnumKind == this.kEnumIndexedMsgs) {
+ // Enumerate only messages that are already indexed. This comes out to:
+ // ((GLODA_MESSAGE_ID_PROPERTY > GLODA_FIRST_VALID_MESSAGE_ID-1) &&
+ // (GLODA_DIRTY_PROPERTY Isnt kMessageFilthy))
+ // In English, a message is indexed if (by clause):
+ // 1) The message has a gloda-id and that gloda-id is in the valid range
+ // (and not in the bad message marker range).
+ // 2) The message has not been marked filthy (which invalidates the
+ // gloda-id.) We also assume that the folder would not have been
+ // entered at all if it was marked filthy.
+ let searchSession = Cc[
+ "@mozilla.org/messenger/searchSession;1"
+ ].createInstance(Ci.nsIMsgSearchSession);
+ let searchTerms = [];
+
+ searchSession.addScopeTerm(
+ Ci.nsMsgSearchScope.offlineMail,
+ this._indexingFolder
+ );
+ let nsMsgSearchAttrib = Ci.nsMsgSearchAttrib;
+ let nsMsgSearchOp = Ci.nsMsgSearchOp;
+
+ // first term: (GLODA_MESSAGE_ID_PROPERTY > GLODA_FIRST_VALID_MESSAGE_ID-1
+ let searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // actually don't care here
+ searchTerm.beginsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ // use != 0 if we're allow pre-bad ids.
+ searchTerm.op = aAllowPreBadIds
+ ? nsMsgSearchOp.Isnt
+ : nsMsgSearchOp.IsGreaterThan;
+ let value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = aAllowPreBadIds ? 0 : GLODA_FIRST_VALID_MESSAGE_ID - 1;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // second term: && GLODA_DIRTY_PROPERTY Isnt kMessageFilthy)
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.endsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = this.kMessageFilthy;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_DIRTY_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // The use-case of already indexed messages does not want them reversed;
+ // we care about seeing the message keys in order.
+ this._indexingEnumerator = this._indexingDatabase.getFilterEnumerator(
+ searchTerms,
+ false
+ );
+ } else if (aEnumKind == this.kEnumAllMsgs) {
+ this._indexingEnumerator =
+ this._indexingDatabase.reverseEnumerateMessages();
+ } else {
+ throw new Error("Unknown enumerator type requested:" + aEnumKind);
+ }
+ },
+
+ _indexerLeaveFolder() {
+ if (this._indexingFolder !== null) {
+ if (this._indexingDatabase) {
+ this._indexingDatabase.commit(Ci.nsMsgDBCommitType.kLargeCommit);
+ // remove our listener!
+ this._indexingDatabase.removeListener(this._databaseAnnouncerListener);
+ }
+ // let the gloda folder know we are done indexing
+ this._indexingGlodaFolder.indexing = false;
+ // null everyone out
+ this._indexingFolder = null;
+ this._indexingGlodaFolder = null;
+ this._indexingDatabase = null;
+ this._indexingEnumerator = null;
+ }
+ },
+
+ /**
+ * Event fed to us by our nsIFolderListener when a folder is loaded. We use
+ * this event to know when a folder we were trying to open to index is
+ * actually ready to be indexed. (The summary may have not existed, may have
+ * been out of date, or otherwise.)
+ *
+ * @param aFolder An nsIMsgFolder, already QI'd.
+ */
+ _onFolderLoaded(aFolder) {
+ if (
+ this._pendingFolderEntry !== null &&
+ aFolder.URI == this._pendingFolderEntry.URI
+ ) {
+ this._indexerCompletePendingFolderEntry();
+ }
+ },
+
+ // it's a getter so we can reference 'this'. we could memoize.
+ get workers() {
+ return [
+ [
+ "folderSweep",
+ {
+ worker: this._worker_indexingSweep,
+ jobCanceled: this._cleanup_indexingSweep,
+ cleanup: this._cleanup_indexingSweep,
+ },
+ ],
+ [
+ "folder",
+ {
+ worker: this._worker_folderIndex,
+ recover: this._recover_indexMessage,
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "folderCompact",
+ {
+ worker: this._worker_folderCompactionPass,
+ // compaction enters the folder so needs to know how to leave
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "message",
+ {
+ worker: this._worker_messageIndex,
+ onSchedule: this._schedule_messageIndex,
+ jobCanceled: this._canceled_messageIndex,
+ recover: this._recover_indexMessage,
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "delete",
+ {
+ worker: this._worker_processDeletes,
+ },
+ ],
+
+ [
+ "fixMissingContacts",
+ {
+ worker: this._worker_fixMissingContacts,
+ },
+ ],
+ ];
+ },
+
+ _schemaMigrationInitiated: false,
+ _considerSchemaMigration() {
+ if (
+ !this._schemaMigrationInitiated &&
+ GlodaDatastore._actualSchemaVersion === 26
+ ) {
+ let job = new IndexingJob("fixMissingContacts", null);
+ GlodaIndexer.indexJob(job);
+ this._schemaMigrationInitiated = true;
+ }
+ },
+
+ initialSweep() {
+ this.indexingSweepNeeded = true;
+ },
+
+ _indexingSweepActive: false,
+ /**
+ * Indicate that an indexing sweep is desired. We kick-off an indexing
+ * sweep at start-up and whenever we receive an event-based notification
+ * that we either can't process as an event or that we normally handle
+ * during the sweep pass anyways.
+ */
+ set indexingSweepNeeded(aNeeded) {
+ if (!this._indexingSweepActive && aNeeded) {
+ let job = new IndexingJob("folderSweep", null);
+ job.mappedFolders = false;
+ GlodaIndexer.indexJob(job);
+ this._indexingSweepActive = true;
+ }
+ },
+
+ /**
+ * Performs the folder sweep, locating folders that should be indexed, and
+ * creating a folder indexing job for them, and rescheduling itself for
+ * execution after that job is completed. Once it indexes all the folders,
+ * if we believe we have deletions to process (or just don't know), it kicks
+ * off a deletion processing job.
+ *
+ * Folder traversal logic is based off the spotlight/vista indexer code; we
+ * retrieve the list of servers and folders each time want to find a new
+ * folder to index. This avoids needing to maintain a perfect model of the
+ * folder hierarchy at all times. (We may eventually want to do that, but
+ * this is sufficient and safe for now.) Although our use of dirty flags on
+ * the folders allows us to avoid tracking the 'last folder' we processed,
+ * we do so to avoid getting 'trapped' in a folder with a high rate of
+ * changes.
+ */
+ *_worker_indexingSweep(aJob) {
+ if (!aJob.mappedFolders) {
+ // Walk the folders and make sure all the folders we would want to index
+ // are mapped. Build up a list of GlodaFolders as we go, so that we can
+ // sort them by their indexing priority.
+ let foldersToProcess = (aJob.foldersToProcess = []);
+
+ for (let folder of MailServices.accounts.allFolders) {
+ if (this.shouldIndexFolder(folder)) {
+ foldersToProcess.push(Gloda.getFolderForFolder(folder));
+ }
+ }
+
+ // sort the folders by priority (descending)
+ foldersToProcess.sort(function (a, b) {
+ return b.indexingPriority - a.indexingPriority;
+ });
+
+ aJob.mappedFolders = true;
+ }
+
+ // -- process the folders (in sorted order)
+ while (aJob.foldersToProcess.length) {
+ let glodaFolder = aJob.foldersToProcess.shift();
+ // ignore folders that:
+ // - have been deleted out of existence!
+ // - are not dirty/have not been compacted
+ // - are actively being compacted
+ if (
+ glodaFolder._deleted ||
+ (!glodaFolder.dirtyStatus && !glodaFolder.compacted) ||
+ glodaFolder.compacting
+ ) {
+ continue;
+ }
+
+ // If the folder is marked as compacted, give it a compaction job.
+ if (glodaFolder.compacted) {
+ GlodaIndexer.indexJob(new IndexingJob("folderCompact", glodaFolder.id));
+ }
+
+ // add a job for the folder indexing if it was dirty
+ if (glodaFolder.dirtyStatus) {
+ GlodaIndexer.indexJob(new IndexingJob("folder", glodaFolder.id));
+ }
+
+ // re-schedule this job (although this worker will die)
+ GlodaIndexer.indexJob(aJob);
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // consider deletion
+ if (this.pendingDeletions || this.pendingDeletions === null) {
+ GlodaIndexer.indexJob(new IndexingJob("delete", null));
+ }
+
+ // we don't have any more work to do...
+ this._indexingSweepActive = false;
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * The only state we need to cleanup is that there is no longer an active
+ * indexing sweep.
+ */
+ _cleanup_indexingSweep(aJob) {
+ this._indexingSweepActive = false;
+ },
+
+ /**
+ * The number of headers to look at before yielding with kWorkSync. This
+ * is for time-slicing purposes so we still yield to the UI periodically.
+ */
+ HEADER_CHECK_SYNC_BLOCK_SIZE: 25,
+
+ FOLDER_COMPACTION_PASS_BATCH_SIZE: 512,
+ /**
+ * Special indexing pass for (local) folders than have been compacted. The
+ * compaction can cause message keys to change because message keys in local
+ * folders are simply offsets into the mbox file. Accordingly, we need to
+ * update the gloda records/objects to point them at the new message key.
+ *
+ * Our general algorithm is to perform two traversals in parallel. The first
+ * is a straightforward enumeration of the message headers in the folder that
+ * apparently have been already indexed. These provide us with the message
+ * key and the "gloda-id" property.
+ * The second is a list of tuples containing a gloda message id, its current
+ * message key per the gloda database, and the message-id header. We re-fill
+ * the list with batches on-demand. This allows us to both avoid dispatching
+ * needless UPDATEs as well as deal with messages that were tracked by the
+ * PendingCommitTracker but were discarded by the compaction notification.
+ *
+ * We end up processing two streams of gloda-id's and some extra info. In
+ * the normal case we expect these two streams to line up exactly and all
+ * we need to do is update the message key if it has changed.
+ *
+ * There are a few exceptional cases where things do not line up:
+ * 1) The gloda database knows about a message that the enumerator does not
+ * know about...
+ * a) This message exists in the folder (identified using its message-id
+ * header). This means the message got indexed but PendingCommitTracker
+ * had to forget about the info when the compaction happened. We
+ * re-establish the link and track the message in PendingCommitTracker
+ * again.
+ * b) The message does not exist in the folder. This means the message got
+ * indexed, PendingCommitTracker had to forget about the info, and
+ * then the message either got moved or deleted before now. We mark
+ * the message as deleted; this allows the gloda message to be reused
+ * if the move target has not yet been indexed or purged if it already
+ * has been and the gloda message is a duplicate. And obviously, if the
+ * event that happened was actually a delete, then the delete is the
+ * right thing to do.
+ * 2) The enumerator knows about a message that the gloda database does not
+ * know about. This is unexpected and should not happen. We log a
+ * warning. We are able to differentiate this case from case #1a by
+ * retrieving the message header associated with the next gloda message
+ * (using the message-id header per 1a again). If the gloda message's
+ * message key is after the enumerator's message key then we know this is
+ * case #2. (It implies an insertion in the enumerator stream which is how
+ * we define the unexpected case.)
+ *
+ * Besides updating the database rows, we also need to make sure that
+ * in-memory representations are updated. Immediately after dispatching
+ * UPDATE changes to the database we use the same set of data to walk the
+ * live collections and update any affected messages. We are then able to
+ * discard the information. Although this means that we will have to
+ * potentially walk the live collections multiple times, unless something
+ * has gone horribly wrong, the number of collections should be reasonable
+ * and the lookups are cheap. We bias batch sizes accordingly.
+ *
+ * Because we operate based on chunks we need to make sure that when we
+ * actually deal with multiple chunks that we don't step on our own feet with
+ * our database updates. Since compaction of message key K results in a new
+ * message key K' such that K' <= K, we can reliably issue database
+ * updates for all values <= K. Which means our feet are safe no matter
+ * when we issue the update command. For maximum cache benefit, we issue
+ * our updates prior to our new query since they should still be maximally
+ * hot at that point.
+ */
+ *_worker_folderCompactionPass(aJob, aCallbackHandle) {
+ yield this._indexerEnterFolder(aJob.id);
+
+ // It's conceivable that with a folder sweep we might end up trying to
+ // compact a folder twice. Bail early in this case.
+ if (!this._indexingGlodaFolder.compacted) {
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // this is a forward enumeration (sometimes we reverse enumerate; not here)
+ this._indexerGetEnumerator(this.kEnumIndexedMsgs);
+
+ const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
+ const FOLDER_COMPACTION_PASS_BATCH_SIZE =
+ this.FOLDER_COMPACTION_PASS_BATCH_SIZE;
+
+ // Tuples of [gloda id, message key, message-id header] from
+ // folderCompactionPassBlockFetch
+ let glodaIdsMsgKeysHeaderIds = [];
+ // Unpack each tuple from glodaIdsMsgKeysHeaderIds into these guys.
+ // (Initialize oldMessageKey because we use it to kickstart our query.)
+ let oldGlodaId,
+ oldMessageKey = -1,
+ oldHeaderMessageId;
+ // parallel lists of gloda ids and message keys to pass to
+ // GlodaDatastore.updateMessageLocations
+ let updateGlodaIds = [];
+ let updateMessageKeys = [];
+ // list of gloda id's to mark deleted
+ let deleteGlodaIds = [];
+
+ // for GC reasons we need to track the number of headers seen
+ let numHeadersSeen = 0;
+
+ // We are consuming two lists; our loop structure has to reflect that.
+ let headerIter = this._indexingEnumerator[Symbol.iterator]();
+ let mayHaveMoreGlodaMessages = true;
+ let keepIterHeader = false;
+ let keepGlodaTuple = false;
+ let msgHdr = null;
+ while (headerIter || mayHaveMoreGlodaMessages) {
+ let glodaId;
+ if (headerIter) {
+ if (!keepIterHeader) {
+ let result = headerIter.next();
+ if (result.done) {
+ headerIter = null;
+ msgHdr = null;
+ // do the loop check again
+ continue;
+ }
+ msgHdr = result.value;
+ } else {
+ keepIterHeader = false;
+ }
+ }
+
+ if (msgHdr) {
+ numHeadersSeen++;
+ if (numHeadersSeen % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ // There is no need to check with PendingCommitTracker. If a message
+ // somehow got indexed between the time the compaction killed
+ // everything and the time we run, that is a bug.
+ glodaId = msgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ // (there is also no need to check for gloda dirty since the enumerator
+ // filtered that for us.)
+ }
+
+ // get more [gloda id, message key, message-id header] tuples if out
+ if (!glodaIdsMsgKeysHeaderIds.length && mayHaveMoreGlodaMessages) {
+ // Since we operate on blocks, getting a new block implies we should
+ // flush the last block if applicable.
+ if (updateGlodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ updateGlodaIds,
+ updateMessageKeys,
+ aJob.id,
+ true
+ );
+ updateGlodaIds = [];
+ updateMessageKeys = [];
+ }
+
+ if (deleteGlodaIds.length) {
+ GlodaDatastore.markMessagesDeletedByIDs(deleteGlodaIds);
+ deleteGlodaIds = [];
+ }
+
+ GlodaDatastore.folderCompactionPassBlockFetch(
+ aJob.id,
+ oldMessageKey + 1,
+ FOLDER_COMPACTION_PASS_BATCH_SIZE,
+ aCallbackHandle.wrappedCallback
+ );
+ glodaIdsMsgKeysHeaderIds = yield GlodaConstants.kWorkAsync;
+ // Reverse so we can use pop instead of shift and I don't need to be
+ // paranoid about performance.
+ glodaIdsMsgKeysHeaderIds.reverse();
+
+ if (!glodaIdsMsgKeysHeaderIds.length) {
+ mayHaveMoreGlodaMessages = false;
+
+ // We shouldn't be in the loop anymore if headerIter is dead now.
+ if (!headerIter) {
+ break;
+ }
+ }
+ }
+
+ if (!keepGlodaTuple) {
+ if (mayHaveMoreGlodaMessages) {
+ [oldGlodaId, oldMessageKey, oldHeaderMessageId] =
+ glodaIdsMsgKeysHeaderIds.pop();
+ } else {
+ oldGlodaId = oldMessageKey = oldHeaderMessageId = null;
+ }
+ } else {
+ keepGlodaTuple = false;
+ }
+
+ // -- normal expected case
+ if (glodaId == oldGlodaId) {
+ // only need to do something if the key is not right
+ if (msgHdr.messageKey != oldMessageKey) {
+ updateGlodaIds.push(glodaId);
+ updateMessageKeys.push(msgHdr.messageKey);
+ }
+ } else {
+ // -- exceptional cases
+ // This should always return a value unless something is very wrong.
+ // We do not want to catch the exception if one happens.
+ let idBasedHeader = oldHeaderMessageId
+ ? this._indexingDatabase.getMsgHdrForMessageID(oldHeaderMessageId)
+ : false;
+ // - Case 1b.
+ // We want to mark the message as deleted.
+ if (idBasedHeader == null) {
+ deleteGlodaIds.push(oldGlodaId);
+ } else if (
+ idBasedHeader &&
+ ((msgHdr && idBasedHeader.messageKey < msgHdr.messageKey) || !msgHdr)
+ ) {
+ // - Case 1a
+ // The expected case is that the message referenced by the gloda
+ // database precedes the header the enumerator told us about. This
+ // is expected because if PendingCommitTracker did not mark the
+ // message as indexed/clean then the enumerator would not tell us
+ // about it.
+ // Also, if we ran out of headers from the enumerator, this is a dead
+ // giveaway that this is the expected case.
+ // tell the pending commit tracker about the gloda database one
+ PendingCommitTracker.track(idBasedHeader, oldGlodaId);
+ // and we might need to update the message key too
+ if (idBasedHeader.messageKey != oldMessageKey) {
+ updateGlodaIds.push(oldGlodaId);
+ updateMessageKeys.push(idBasedHeader.messageKey);
+ }
+ // Take another pass through the loop so that we check the
+ // enumerator header against the next message in the gloda
+ // database.
+ keepIterHeader = true;
+ } else if (msgHdr) {
+ // - Case 2
+ // Whereas if the message referenced by gloda has a message key
+ // greater than the one returned by the enumerator, then we have a
+ // header claiming to be indexed by gloda that gloda does not
+ // actually know about. This is exceptional and gets a warning.
+ this._log.warn(
+ "Observed header that claims to be gloda indexed " +
+ "but that gloda has never heard of during " +
+ "compaction." +
+ " In folder: " +
+ msgHdr.folder.URI +
+ " sketchy key: " +
+ msgHdr.messageKey +
+ " subject: " +
+ msgHdr.mime2DecodedSubject
+ );
+ // Keep this tuple around for the next enumerator provided header
+ keepGlodaTuple = true;
+ }
+ }
+ }
+ // If we don't flush the update, no one will!
+ if (updateGlodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ updateGlodaIds,
+ updateMessageKeys,
+ aJob.id,
+ true
+ );
+ }
+ if (deleteGlodaIds.length) {
+ GlodaDatastore.markMessagesDeletedByIDs(deleteGlodaIds);
+ }
+
+ this._indexingGlodaFolder._setCompactedState(false);
+
+ this._indexerLeaveFolder();
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Index the contents of a folder.
+ */
+ *_worker_folderIndex(aJob, aCallbackHandle) {
+ yield this._indexerEnterFolder(aJob.id);
+
+ if (!this.shouldIndexFolder(this._indexingFolder)) {
+ aJob.safelyInvokeCallback(true);
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // Make sure listeners get notified about this job.
+ GlodaIndexer._notifyListeners();
+
+ // there is of course a cost to all this header investigation even if we
+ // don't do something. so we will yield with kWorkSync for every block.
+ const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
+
+ // we can safely presume if we are here that this folder has been selected
+ // for offline processing...
+
+ // -- Filthy Folder
+ // A filthy folder may have misleading properties on the message that claim
+ // the message is indexed. They are misleading because the database, for
+ // whatever reason, does not have the messages (accurately) indexed.
+ // We need to walk all the messages and mark them filthy if they have a
+ // dirty property. Once we have done this, we can downgrade the folder's
+ // dirty status to plain dirty. We do this rather than trying to process
+ // everyone in one go in a filthy context because if we have to terminate
+ // indexing before we quit, we don't want to have to re-index messages next
+ // time. (This could even lead to never completing indexing in a
+ // pathological situation.)
+ let glodaFolder = GlodaDatastore._mapFolder(this._indexingFolder);
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderFilthy) {
+ this._indexerGetEnumerator(this.kEnumIndexedMsgs, true);
+ let count = 0;
+ for (let msgHdr of this._indexingEnumerator) {
+ // we still need to avoid locking up the UI, pause periodically...
+ if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ let glodaMessageId = msgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ // if it has a gloda message id, we need to mark it filthy
+ if (glodaMessageId != 0) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, this.kMessageFilthy);
+ }
+ // if it doesn't have a gloda message id, we will definitely index it,
+ // so no action is required.
+ }
+ // Commit the filthy status changes to the message database.
+ this._indexingDatabase.commit(Ci.nsMsgDBCommitType.kLargeCommit);
+
+ // this will automatically persist to the database
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderDirty);
+ }
+
+ // Figure out whether we're supposed to index _everything_ or just what
+ // has not yet been indexed.
+ let force = "force" in aJob && aJob.force;
+ let enumeratorType = force ? this.kEnumAllMsgs : this.kEnumMsgsToIndex;
+
+ // Pass 1: count the number of messages to index.
+ // We do this in order to be able to report to the user what we're doing.
+ // TODO: give up after reaching a certain number of messages in folders
+ // with ridiculous numbers of messages and make the interface just say
+ // something like "over N messages to go."
+
+ this._indexerGetEnumerator(enumeratorType);
+
+ let numMessagesToIndex = 0;
+ // eslint-disable-next-line no-unused-vars
+ for (let ignore of this._indexingEnumerator) {
+ // We're only counting, so do bigger chunks on this pass.
+ ++numMessagesToIndex;
+ if (numMessagesToIndex % (HEADER_CHECK_SYNC_BLOCK_SIZE * 8) == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+ }
+
+ aJob.goal = numMessagesToIndex;
+
+ if (numMessagesToIndex > 0) {
+ // We used up the iterator, get a new one.
+ this._indexerGetEnumerator(enumeratorType);
+
+ // Pass 2: index the messages.
+ let count = 0;
+ for (let msgHdr of this._indexingEnumerator) {
+ // per above, we want to periodically release control while doing all
+ // this header traversal/investigation.
+ if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ // To keep our counts more accurate, increment the offset before
+ // potentially skipping any messages.
+ ++aJob.offset;
+
+ // Skip messages that have not yet been reported to us as existing via
+ // msgsClassified.
+ if (
+ this._indexingFolder.getProcessingFlags(msgHdr.messageKey) &
+ NOT_YET_REPORTED_PROCESSING_FLAGS
+ ) {
+ continue;
+ }
+
+ // Because the gloda id could be in-flight, we need to double-check the
+ // enumerator here since it can't know about our in-memory stuff.
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+ // if the message seems valid and we are not forcing indexing, skip it.
+ // (that means good gloda id and not dirty)
+ if (
+ !force &&
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty == this.kMessageClean
+ ) {
+ continue;
+ }
+
+ this._log.debug(">>> calling _indexMessage");
+ yield aCallbackHandle.pushAndGo(
+ this._indexMessage(msgHdr, aCallbackHandle),
+ { what: "indexMessage", msgHdr }
+ );
+ GlodaIndexer._indexedMessageCount++;
+ this._log.debug("<<< back from _indexMessage");
+ }
+ }
+
+ // This will trigger an (async) db update which cannot hit the disk prior to
+ // the actual database records that constitute the clean state.
+ // XXX There is the slight possibility that, in the event of a crash, this
+ // will hit the disk but the gloda-id properties on the headers will not
+ // get set. This should ideally be resolved by detecting a non-clean
+ // shutdown and marking all folders as dirty.
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+
+ // by definition, it's not likely we'll visit this folder again anytime soon
+ this._indexerLeaveFolder();
+
+ aJob.safelyInvokeCallback(true);
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Invoked when a "message" job is scheduled so that we can clear
+ * _pendingAddJob if that is the job. We do this so that work items are not
+ * added to _pendingAddJob while it is being processed.
+ */
+ _schedule_messageIndex(aJob, aCallbackHandle) {
+ // we do not want new work items to be added as we are processing, so
+ // clear _pendingAddJob. A new job will be created as needed.
+ if (aJob === this._pendingAddJob) {
+ this._pendingAddJob = null;
+ }
+ // update our goal from the items length
+ aJob.goal = aJob.items.length;
+ },
+ /**
+ * If the job gets canceled, we need to make sure that we clear out pending
+ * add job or our state will get wonky.
+ */
+ _canceled_messageIndex(aJob) {
+ if (aJob === this._pendingAddJob) {
+ this._pendingAddJob = null;
+ }
+ },
+
+ /**
+ * Index a specific list of messages that we know to index from
+ * event-notification hints.
+ */
+ *_worker_messageIndex(aJob, aCallbackHandle) {
+ // if we are already in the correct folder, our "get in the folder" clause
+ // will not execute, so we need to make sure this value is accurate in
+ // that case. (and we want to avoid multiple checks...)
+ for (; aJob.offset < aJob.items.length; aJob.offset++) {
+ let item = aJob.items[aJob.offset];
+ // item is either [folder ID, message key] or
+ // [folder ID, message ID]
+
+ let glodaFolderId = item[0];
+ // If the folder has been deleted since we queued, skip this message
+ if (!GlodaDatastore._folderIdKnown(glodaFolderId)) {
+ continue;
+ }
+ let glodaFolder = GlodaDatastore._mapFolderID(glodaFolderId);
+
+ // Stay out of folders that:
+ // - are compacting / compacted and not yet processed
+ // - got deleted (this would be redundant if we had a stance on id nukage)
+ // (these things could have changed since we queued the event)
+ if (
+ glodaFolder.compacting ||
+ glodaFolder.compacted ||
+ glodaFolder._deleted
+ ) {
+ continue;
+ }
+
+ // get in the folder
+ if (this._indexingGlodaFolder != glodaFolder) {
+ yield this._indexerEnterFolder(glodaFolderId);
+
+ // Now that we have the real nsIMsgFolder, sanity-check that we should
+ // be indexing it. (There are some checks that require the
+ // nsIMsgFolder.)
+ if (!this.shouldIndexFolder(this._indexingFolder)) {
+ continue;
+ }
+ }
+
+ let msgHdr;
+ // GetMessageHeader can be affected by the use cache, so we need to check
+ // ContainsKey first to see if the header is really actually there.
+ if (typeof item[1] == "number") {
+ msgHdr =
+ this._indexingDatabase.containsKey(item[1]) &&
+ this._indexingFolder.GetMessageHeader(item[1]);
+ } else {
+ // Same deal as in move processing.
+ // TODO fixme to not assume singular message-id's.
+ msgHdr = this._indexingDatabase.getMsgHdrForMessageID(item[1]);
+ }
+
+ if (msgHdr) {
+ yield aCallbackHandle.pushAndGo(
+ this._indexMessage(msgHdr, aCallbackHandle),
+ { what: "indexMessage", msgHdr }
+ );
+ } else {
+ yield GlodaConstants.kWorkSync;
+ }
+ }
+
+ // There is no real reason to stay 'in' the folder. If we are going to get
+ // more events from the folder, its database would have to be open for us
+ // to get the events, so it's not like we're creating an efficiency
+ // problem where we unload a folder just to load it again in 2 seconds.
+ // (Well, at least assuming the views are good about holding onto the
+ // database references even though they go out of their way to avoid
+ // holding onto message header references.)
+ this._indexerLeaveFolder();
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Recover from a "folder" or "message" job failing inside a call to
+ * |_indexMessage|, marking the message bad. If we were not in an
+ * |_indexMessage| call, then fail to recover.
+ *
+ * @param aJob The job that was being worked. We ignore this for now.
+ * @param aContextStack The callbackHandle mechanism's context stack. When we
+ * invoke pushAndGo for _indexMessage we put something in so we can
+ * detect when it is on the async stack.
+ * @param aException The exception that is necessitating we attempt to
+ * recover.
+ *
+ * @returns 1 if we were able to recover (because we want the call stack
+ * popped down to our worker), false if we can't.
+ */
+ _recover_indexMessage(aJob, aContextStack, aException) {
+ // See if indexMessage is on the stack...
+ if (
+ aContextStack.length >= 2 &&
+ aContextStack[1] &&
+ "what" in aContextStack[1] &&
+ aContextStack[1].what == "indexMessage"
+ ) {
+ // it is, so this is probably recoverable.
+
+ this._log.debug(
+ "Exception while indexing message, marking it bad (gloda id of 1)."
+ );
+
+ // -- Mark the message as bad
+ let msgHdr = aContextStack[1].msgHdr;
+ // (In the worst case, the header is no longer valid, which will result in
+ // exceptions. We need to be prepared for that.)
+ try {
+ msgHdr.setUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY,
+ GLODA_BAD_MESSAGE_ID
+ );
+ // clear the dirty bit if it has one
+ if (msgHdr.getUint32Property(GLODA_DIRTY_PROPERTY)) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, 0);
+ }
+ } catch (ex) {
+ // If we are indexing a folder and the message header is no longer
+ // valid, then it's quite likely the whole folder is no longer valid.
+ // But since in the event-driven message indexing case we could have
+ // other valid things to look at, let's try and recover. The folder
+ // indexing case will come back to us shortly and we will indicate
+ // recovery is not possible at that point.
+ // So do nothing here since by popping the indexing of the specific
+ // message out of existence we are recovering.
+ }
+ return 1;
+ }
+ return false;
+ },
+
+ /**
+ * Cleanup after an aborted "folder" or "message" job.
+ */
+ _cleanup_indexing(aJob) {
+ this._indexerLeaveFolder();
+ aJob.safelyInvokeCallback(false);
+ },
+
+ /**
+ * Maximum number of deleted messages to process at a time. Arbitrary; there
+ * are no real known performance constraints at this point.
+ */
+ DELETED_MESSAGE_BLOCK_SIZE: 32,
+
+ /**
+ * Process pending deletes...
+ */
+ *_worker_processDeletes(aJob, aCallbackHandle) {
+ // Count the number of messages we will eventually process. People freak
+ // out when the number is constantly increasing because they think gloda
+ // has gone rogue. (Note: new deletions can still accumulate during
+ // our execution, so we may 'expand' our count a little still.)
+ this._datastore.countDeletedMessages(aCallbackHandle.wrappedCallback);
+ aJob.goal = yield GlodaConstants.kWorkAsync;
+ this._log.debug(
+ "There are currently " +
+ aJob.goal +
+ " messages awaiting" +
+ " deletion processing."
+ );
+
+ // get a block of messages to delete.
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ query._deleted(1);
+ query.limit(this.DELETED_MESSAGE_BLOCK_SIZE);
+ let deletedCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ while (deletedCollection.items.length) {
+ for (let message of deletedCollection.items) {
+ // If it turns out our count is wrong (because some new deletions
+ // happened since we entered this worker), let's issue a new count
+ // and use that to accurately update our goal.
+ if (aJob.offset >= aJob.goal) {
+ this._datastore.countDeletedMessages(aCallbackHandle.wrappedCallback);
+ aJob.goal += yield GlodaConstants.kWorkAsync;
+ }
+
+ yield aCallbackHandle.pushAndGo(
+ this._deleteMessage(message, aCallbackHandle)
+ );
+ aJob.offset++;
+ yield GlodaConstants.kWorkSync;
+ }
+
+ deletedCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+ }
+ this.pendingDeletions = false;
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ *_worker_fixMissingContacts(aJob, aCallbackHandle) {
+ let identityContactInfos = [];
+
+ // -- asynchronously get a list of all identities without contacts
+ // The upper bound on the number of messed up contacts is the number of
+ // contacts in the user's address book. This should be small enough
+ // (and the data size small enough) that this won't explode thunderbird.
+ let queryStmt = GlodaDatastore._createAsyncStatement(
+ "SELECT identities.id, identities.contactID, identities.value " +
+ "FROM identities " +
+ "LEFT JOIN contacts ON identities.contactID = contacts.id " +
+ "WHERE identities.kind = 'email' AND contacts.id IS NULL",
+ true
+ );
+ queryStmt.executeAsync({
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ identityContactInfos.push({
+ identityId: row.getInt64(0),
+ contactId: row.getInt64(1),
+ email: row.getString(2),
+ });
+ }
+ },
+ handleError(aError) {},
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ aCallbackHandle.wrappedCallback();
+ },
+ });
+ queryStmt.finalize();
+ GlodaDatastore._pendingAsyncStatements++;
+ yield GlodaConstants.kWorkAsync;
+
+ // -- perform fixes only if there were missing contacts
+ if (identityContactInfos.length) {
+ const yieldEvery = 64;
+ // - create the missing contacts
+ for (let i = 0; i < identityContactInfos.length; i++) {
+ if (i % yieldEvery === 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ let info = identityContactInfos[i],
+ card = MailServices.ab.cardForEmailAddress(info.email),
+ contact = new GlodaContact(
+ GlodaDatastore,
+ info.contactId,
+ null,
+ null,
+ card ? card.displayName || info.email : info.email,
+ 0,
+ 0
+ );
+ GlodaDatastore.insertContact(contact);
+
+ // update the in-memory rep of the identity to know about the contact
+ // if there is one.
+ let identity = GlodaCollectionManager.cacheLookupOne(
+ GlodaConstants.NOUN_IDENTITY,
+ info.identityId,
+ false
+ );
+ if (identity) {
+ // Unfortunately, although this fixes the (reachable) Identity and
+ // exposes the Contact, it does not make the Contact reachable from
+ // the collection manager. This will make explicit queries that look
+ // up the contact potentially see the case where
+ // contact.identities[0].contact !== contact. Alternately, that
+ // may not happen and instead the "contact" object we created above
+ // may become unlinked. (I'd have to trace some logic I don't feel
+ // like tracing.) Either way, The potential fallout is minimal
+ // since the object identity invariant will just lapse and popularity
+ // on the contact may become stale, and neither of those meaningfully
+ // affect the operation of anything in Thunderbird.
+ // If we really cared, we could find all the dominant collections
+ // that reference the identity and update their corresponding
+ // contact collection to make it reachable. That use-case does not
+ // exist outside of here, which is why we're punting.
+ identity._contact = contact;
+ contact._identities = [identity];
+ }
+
+ // NOTE: If the addressbook indexer did anything useful other than
+ // adapting to name changes, we could schedule indexing of the cards at
+ // this time. However, as of this writing, it doesn't, and this task
+ // is a one-off relevant only to the time of this writing.
+ }
+
+ // - mark all folders as dirty, initiate indexing sweep
+ this.dirtyAllKnownFolders();
+ this.indexingSweepNeeded = true;
+ }
+
+ // -- mark the schema upgrade, be done
+ GlodaDatastore._updateSchemaVersion(GlodaDatastore._schemaVersion);
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Determine whether a folder is suitable for indexing.
+ *
+ * @param aMsgFolder An nsIMsgFolder you want to see if we should index.
+ *
+ * @returns true if we want to index messages in this type of folder, false if
+ * we do not.
+ */
+ shouldIndexFolder(aMsgFolder) {
+ let folderFlags = aMsgFolder.flags;
+ // Completely ignore non-mail and virtual folders. They should never even
+ // get to be GlodaFolder instances.
+ if (
+ !(folderFlags & Ci.nsMsgFolderFlags.Mail) ||
+ folderFlags & Ci.nsMsgFolderFlags.Virtual
+ ) {
+ return false;
+ }
+
+ // Some folders do not really exist; we can detect this by getStringProperty
+ // exploding when we call it. This is primarily a concern because
+ // _mapFolder calls said exploding method, but we also don't want to
+ // even think about indexing folders that don't exist. (Such folders are
+ // likely the result of a messed up profile.)
+ try {
+ // flags is used because it should always be in the cache avoiding a miss
+ // which would compel an msf open.
+ aMsgFolder.getStringProperty("flags");
+ } catch (ex) {
+ return false;
+ }
+
+ // Now see what our gloda folder information has to say about the folder.
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgFolder);
+ return glodaFolder.indexingPriority != glodaFolder.kIndexingNeverPriority;
+ },
+
+ /**
+ * Sets the indexing priority for this folder and persists it both to Gloda,
+ * and, for backup purposes, to the nsIMsgFolder via string property as well.
+ *
+ * Setting this priority may cause the indexer to either reindex this folder,
+ * or remove this folder from the existing index.
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {number} aPriority (one of the priority constants from GlodaFolder)
+ */
+ setFolderIndexingPriority(aFolder, aPriority) {
+ let glodaFolder = GlodaDatastore._mapFolder(aFolder);
+
+ // if there's been no change, we're done
+ if (aPriority == glodaFolder.indexingPriority) {
+ return;
+ }
+
+ // save off the old priority, and set the new one
+ let previousPrio = glodaFolder.indexingPriority;
+ glodaFolder._indexingPriority = aPriority;
+
+ // persist the new priority
+ GlodaDatastore.updateFolderIndexingPriority(glodaFolder);
+ aFolder.setStringProperty("indexingPriority", Number(aPriority).toString());
+
+ // if we've been told never to index this folder...
+ if (aPriority == glodaFolder.kIndexingNeverPriority) {
+ // stop doing so
+ if (this._indexingFolder == aFolder) {
+ GlodaIndexer.killActiveJob();
+ }
+
+ // mark all existing messages as deleted
+ GlodaDatastore.markMessagesDeletedByFolderID(glodaFolder.id);
+
+ // re-index
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ } else if (previousPrio == glodaFolder.kIndexingNeverPriority) {
+ // there's no existing index, but the user now wants one
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+ GlodaDatastore.updateFolderDirtyStatus(glodaFolder);
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ }
+ },
+
+ /**
+ * Resets the indexing priority on the given folder to whatever the default
+ * is for folders of that type.
+ *
+ * @note Calls setFolderIndexingPriority under the hood, so has identical
+ * potential reindexing side-effects
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {boolean} aAllowSpecialFolderIndexing
+ */
+ resetFolderIndexingPriority(aFolder, aAllowSpecialFolderIndexing) {
+ this.setFolderIndexingPriority(
+ aFolder,
+ GlodaDatastore.getDefaultIndexingPriority(
+ aFolder,
+ aAllowSpecialFolderIndexing
+ )
+ );
+ },
+
+ /**
+ * Queue all of the folders of all of the accounts of the current profile
+ * for indexing. We traverse all folders and queue them immediately to try
+ * and have an accurate estimate of the number of folders that need to be
+ * indexed. (We previously queued accounts rather than immediately
+ * walking their list of folders.)
+ */
+ indexEverything() {
+ this._log.info("Queueing all accounts for indexing.");
+
+ GlodaDatastore._beginTransaction();
+ for (let account of MailServices.accounts.accounts) {
+ this.indexAccount(account);
+ }
+ GlodaDatastore._commitTransaction();
+ },
+
+ /**
+ * Queue all of the folders belonging to an account for indexing.
+ */
+ indexAccount(aAccount) {
+ let rootFolder = aAccount.incomingServer.rootFolder;
+ if (rootFolder instanceof Ci.nsIMsgFolder) {
+ this._log.info("Queueing account folders for indexing: " + aAccount.key);
+
+ for (let folder of rootFolder.descendants) {
+ if (this.shouldIndexFolder(folder)) {
+ GlodaIndexer.indexJob(
+ new IndexingJob("folder", GlodaDatastore._mapFolder(folder).id)
+ );
+ }
+ }
+ } else {
+ this._log.info("Skipping Account, root folder not nsIMsgFolder");
+ }
+ },
+
+ /**
+ * Queue a single folder for indexing given an nsIMsgFolder.
+ *
+ * @param [aOptions.callback] A callback to invoke when the folder finishes
+ * indexing. First argument is true if the task ran to completion
+ * successfully, false if we had to abort for some reason.
+ * @param [aOptions.force=false] Should we force the indexing of all messages
+ * in the folder (true) or just index what hasn't been indexed (false).
+ * @returns true if we are going to index the folder, false if not.
+ */
+ indexFolder(aMsgFolder, aOptions) {
+ if (!this.shouldIndexFolder(aMsgFolder)) {
+ return false;
+ }
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgFolder);
+ // stay out of compacting/compacted folders
+ if (glodaFolder.compacting || glodaFolder.compacted) {
+ return false;
+ }
+
+ this._log.info("Queue-ing folder for indexing: " + aMsgFolder.prettyName);
+ let job = new IndexingJob("folder", glodaFolder.id);
+ if (aOptions) {
+ if ("callback" in aOptions) {
+ job.callback = aOptions.callback;
+ }
+ if ("force" in aOptions) {
+ job.force = true;
+ }
+ }
+ GlodaIndexer.indexJob(job);
+ return true;
+ },
+
+ /**
+ * Queue a list of messages for indexing.
+ *
+ * @param aFoldersAndMessages List of [nsIMsgFolder, message key] tuples.
+ */
+ indexMessages(aFoldersAndMessages) {
+ let job = new IndexingJob("message", null);
+ job.items = aFoldersAndMessages.map(fm => [
+ GlodaDatastore._mapFolder(fm[0]).id,
+ fm[1],
+ ]);
+ GlodaIndexer.indexJob(job);
+ },
+
+ /**
+ * Mark all known folders as dirty so that the next indexing sweep goes
+ * into all folders and checks their contents to see if they need to be
+ * indexed.
+ *
+ * This is being added for the migration case where we want to try and reindex
+ * all of the messages that had been marked with GLODA_BAD_MESSAGE_ID but
+ * which is now GLODA_OLD_BAD_MESSAGE_ID and so we should attempt to reindex
+ * them.
+ */
+ dirtyAllKnownFolders() {
+ // Just iterate over the datastore's folder map and tell each folder to
+ // be dirty if its priority is not disabled.
+ for (let folderID in GlodaDatastore._folderByID) {
+ let glodaFolder = GlodaDatastore._folderByID[folderID];
+ if (glodaFolder.indexingPriority !== glodaFolder.kIndexingNeverPriority) {
+ glodaFolder._ensureFolderDirty();
+ }
+ }
+ },
+
+ /**
+ * Given a message header, return whether this message is likely to have
+ * been indexed or not.
+ *
+ * This means the message must:
+ * - Be in a folder eligible for gloda indexing. (Not News, etc.)
+ * - Be in a non-filthy folder.
+ * - Be gloda-indexed and non-filthy.
+ *
+ * @param aMsgHdr A message header.
+ * @returns true if the message is likely to have been indexed.
+ */
+ isMessageIndexed(aMsgHdr) {
+ // If it's in a folder that we flat out do not index, say no.
+ if (!this.shouldIndexFolder(aMsgHdr.folder)) {
+ return false;
+ }
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgHdr.folder);
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(aMsgHdr);
+ return (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != GlodaMsgIndexer.kMessageFilthy &&
+ glodaFolder &&
+ glodaFolder.dirtyStatus != glodaFolder.kFolderFilthy
+ );
+ },
+
+ /* *********** Event Processing *********** */
+
+ /**
+ * Tracks messages we have received msgKeyChanged notifications for in order
+ * to provide batching and to suppress needless reindexing when we receive
+ * the expected follow-up msgsClassified notification.
+ *
+ * The entries in this dictionary should be extremely short-lived as we
+ * receive the msgKeyChanged notification as the offline fake header is
+ * converted into a real header (which is accompanied by a msgAdded
+ * notification we don't pay attention to). Once the headers finish
+ * updating, the message classifier will get its at-bat and should likely
+ * find that the messages have already been classified and so fast-path
+ * them.
+ *
+ * The keys in this dictionary are chosen to be consistent with those of
+ * PendingCommitTracker: the folder.URI + "#" + the (new) message key.
+ * The values in the dictionary are either an object with "id" (the gloda
+ * id), "key" (the new message key), and "dirty" (is it dirty and so
+ * should still be queued for indexing) attributes, or null indicating that
+ * no change in message key occurred and so no database changes are required.
+ */
+ _keyChangedBatchInfo: {},
+
+ /**
+ * Common logic for things that want to feed event-driven indexing. This gets
+ * called by both |_msgFolderListener.msgsClassified| when we are first
+ * seeing a message as well as by |_folderListener| when things happen to
+ * existing messages. Although we could slightly specialize for the
+ * new-to-us case, it works out to be cleaner to just treat them the same
+ * and take a very small performance hit.
+ *
+ * @param aMsgHdrs array of messages to treat as potentially changed.
+ * @param aDirtyingEvent Is this event inherently dirtying? Receiving a
+ * msgsClassified notification is not inherently dirtying because it is
+ * just telling us that a message exists. We use this knowledge to
+ * ignore the msgsClassified notifications for messages we have received
+ * msgKeyChanged notifications for and fast-pathed. Since it is possible
+ * for user action to do something that dirties the message between the
+ * time we get the msgKeyChanged notification and when we receive the
+ * msgsClassified notification, we want to make sure we don't get
+ * confused. (Although since we remove the message from our ignore-set
+ * after the first notification, we would likely just mistakenly treat
+ * the msgsClassified notification as something dirtying, so it would
+ * still work out...)
+ */
+ _reindexChangedMessages(aMsgHdrs, aDirtyingEvent) {
+ let glodaIdsNeedingDeletion = null;
+ let messageKeyChangedIds = null,
+ messageKeyChangedNewKeys = null;
+ for (let msgHdr of aMsgHdrs) {
+ // -- Index this folder?
+ let msgFolder = msgHdr.folder;
+ if (!this.shouldIndexFolder(msgFolder)) {
+ continue;
+ }
+ // -- Ignore messages in filthy folders!
+ // A filthy folder can only be processed by an indexing sweep, and at
+ // that point the message will get indexed.
+ let glodaFolder = GlodaDatastore._mapFolder(msgHdr.folder);
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderFilthy) {
+ continue;
+ }
+
+ // -- msgKeyChanged event follow-up
+ if (!aDirtyingEvent) {
+ let keyChangedKey = msgHdr.folder.URI + "#" + msgHdr.messageKey;
+ if (keyChangedKey in this._keyChangedBatchInfo) {
+ var keyChangedInfo = this._keyChangedBatchInfo[keyChangedKey];
+ delete this._keyChangedBatchInfo[keyChangedKey];
+
+ // Null means to ignore this message because the key did not change
+ // (and the message was not dirty so it is safe to ignore.)
+ if (keyChangedInfo == null) {
+ continue;
+ }
+ // (the key may be null if we only generated the entry because the
+ // message was dirty)
+ if (keyChangedInfo.key !== null) {
+ if (messageKeyChangedIds == null) {
+ messageKeyChangedIds = [];
+ messageKeyChangedNewKeys = [];
+ }
+ messageKeyChangedIds.push(keyChangedInfo.id);
+ messageKeyChangedNewKeys.push(keyChangedInfo.key);
+ }
+ // ignore the message because it was not dirty
+ if (!keyChangedInfo.isDirty) {
+ continue;
+ }
+ }
+ }
+
+ // -- Index this message?
+ // We index local messages, IMAP messages that are offline, and IMAP
+ // messages that aren't offline but whose folders aren't offline either
+ let isFolderLocal = msgFolder instanceof Ci.nsIMsgLocalMailFolder;
+ if (!isFolderLocal) {
+ if (
+ !(msgHdr.flags & Ci.nsMsgMessageFlags.Offline) &&
+ msgFolder.getFlag(Ci.nsMsgFolderFlags.Offline)
+ ) {
+ continue;
+ }
+ }
+ // Ignore messages whose processing flags indicate it has not yet been
+ // classified. In the IMAP case if the Offline flag is going to get set
+ // we are going to see it before the msgsClassified event so this is
+ // very important.
+ if (
+ msgFolder.getProcessingFlags(msgHdr.messageKey) &
+ NOT_YET_REPORTED_PROCESSING_FLAGS
+ ) {
+ continue;
+ }
+
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+
+ let isSpam =
+ msgHdr.getStringProperty(JUNK_SCORE_PROPERTY) == JUNK_SPAM_SCORE_STR;
+
+ // -- Is the message currently gloda indexed?
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != this.kMessageFilthy
+ ) {
+ // - Is the message spam?
+ if (isSpam) {
+ // Treat this as a deletion...
+ if (!glodaIdsNeedingDeletion) {
+ glodaIdsNeedingDeletion = [];
+ }
+ glodaIdsNeedingDeletion.push(glodaId);
+ // and skip to the next message
+ continue;
+ }
+
+ // - Mark the message dirty if it is clean.
+ // (This is the only case in which we need to mark dirty so that the
+ // indexing sweep takes care of things if we don't process this in
+ // an event-driven fashion. If the message has no gloda-id or does
+ // and it's already dirty or filthy, it is already marked for
+ // indexing.)
+ if (glodaDirty == this.kMessageClean) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, this.kMessageDirty);
+ }
+ // if the message is pending clean, this change invalidates that.
+ PendingCommitTracker.noteDirtyHeader(msgHdr);
+ } else if (isSpam) {
+ // If it's not indexed but is spam, ignore it.
+ continue;
+ }
+ // (we want to index the message if we are here)
+
+ // mark the folder dirty too, so we know to look inside
+ glodaFolder._ensureFolderDirty();
+
+ if (this._pendingAddJob == null) {
+ this._pendingAddJob = new IndexingJob("message", null);
+ GlodaIndexer.indexJob(this._pendingAddJob);
+ }
+ // only queue the message if we haven't overflowed our event-driven budget
+ if (this._pendingAddJob.items.length < this._indexMaxEventQueueMessages) {
+ this._pendingAddJob.items.push([
+ GlodaDatastore._mapFolder(msgFolder).id,
+ msgHdr.messageKey,
+ ]);
+ } else {
+ this.indexingSweepNeeded = true;
+ }
+ }
+
+ // Process any message key changes (from earlier msgKeyChanged events)
+ if (messageKeyChangedIds != null) {
+ GlodaDatastore.updateMessageKeys(
+ messageKeyChangedIds,
+ messageKeyChangedNewKeys
+ );
+ }
+
+ // If we accumulated any deletions in there, batch them off now.
+ if (glodaIdsNeedingDeletion) {
+ GlodaDatastore.markMessagesDeletedByIDs(glodaIdsNeedingDeletion);
+ this.pendingDeletions = true;
+ }
+ },
+
+ /* ***** Folder Changes ***** */
+ /**
+ * All additions and removals are queued for processing. Indexing messages
+ * is potentially phenomenally expensive, and deletion can still be
+ * relatively expensive due to our need to delete the message, its
+ * attributes, and all attributes that reference it. Additionally,
+ * attribute deletion costs are higher than attribute look-up because
+ * there is the actual row plus its 3 indices, and our covering indices are
+ * no help there.
+ *
+ */
+ _msgFolderListener: {
+ indexer: null,
+
+ /**
+ * We no longer use the msgAdded notification, instead opting to wait until
+ * junk/trait classification has run (or decided not to run) and all
+ * filters have run. The msgsClassified notification provides that for us.
+ */
+ msgAdded(aMsgHdr) {
+ // we are never called! we do not enable this bit!
+ },
+
+ /**
+ * Process (apparently newly added) messages that have been looked at by
+ * the message classifier. This ensures that if the message was going
+ * to get marked as spam, this will have already happened.
+ *
+ * Besides truly new (to us) messages, We will also receive this event for
+ * messages that are the result of IMAP message move/copy operations,
+ * including both moves that generated offline fake headers and those that
+ * did not. In the offline fake header case, however, we are able to
+ * ignore their msgsClassified events because we will have received a
+ * msgKeyChanged notification sometime in the recent past.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ this.indexer._log.debug("msgsClassified notification");
+ try {
+ GlodaMsgIndexer._reindexChangedMessages(aMsgHdrs, false);
+ } catch (ex) {
+ this.indexer._log.error("Explosion in msgsClassified handling:", ex);
+ }
+ },
+
+ /**
+ * Any messages which have had their junk state changed are marked for
+ * reindexing.
+ */
+ msgsJunkStatusChanged(messages) {
+ this.indexer._log.debug("JunkStatusChanged notification");
+ GlodaMsgIndexer._reindexChangedMessages(messages, true);
+ },
+
+ /**
+ * Handle real, actual deletion (move to trash and IMAP deletion model
+ * don't count); we only see the deletion here when it becomes forever,
+ * or rather _just before_ it becomes forever. Because the header is
+ * going away, we need to either process things immediately or extract the
+ * information required to purge it later without the header.
+ * To this end, we mark all messages that were indexed in the gloda message
+ * database as deleted. We set our pending deletions flag to let our
+ * indexing logic know that after its next wave of folder traversal, it
+ * should perform a deletion pass. If it turns out the messages are coming
+ * back, the fact that deletion is thus deferred can be handy, as we can
+ * reuse the existing gloda message.
+ */
+ msgsDeleted(aMsgHdrs) {
+ this.indexer._log.debug("msgsDeleted notification");
+ let glodaMessageIds = [];
+
+ for (let msgHdr of aMsgHdrs) {
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ glodaMessageIds.push(glodaId);
+ }
+ }
+
+ if (glodaMessageIds.length) {
+ GlodaMsgIndexer._datastore.markMessagesDeletedByIDs(glodaMessageIds);
+ GlodaMsgIndexer.pendingDeletions = true;
+ }
+ },
+
+ /**
+ * Process a move or copy.
+ *
+ * Moves to a local folder or an IMAP folder where we are generating offline
+ * fake headers are dealt with efficiently because we get both the source
+ * and destination headers. The main ingredient to having offline fake
+ * headers is that allowUndo was true when the operation was performance.
+ * The only non-obvious thing is that we need to make sure that we deal
+ * with the impact of filthy folders and messages on gloda-id's (they
+ * invalidate the gloda-id).
+ *
+ * Moves to an IMAP folder that do not generate offline fake headers do not
+ * provide us with the target header, but the IMAP SetPendingAttributes
+ * logic will still attempt to propagate the properties on the message
+ * header so when we eventually see it in the msgsClassified notification,
+ * it should have the properties of the source message copied over.
+ * We make sure that gloda-id's do not get propagated when messages are
+ * moved from IMAP folders that are marked filthy or are marked as not
+ * supposed to be indexed by clearing the pending attributes for the header
+ * being tracked by the destination IMAP folder.
+ * We could fast-path the IMAP move case in msgsClassified by noticing that
+ * a message is showing up with a gloda-id header already and just
+ * performing an async location update.
+ *
+ * Moves that occur involving 'compacted' folders are fine and do not
+ * require special handling here. The one tricky super-edge-case that
+ * can happen (and gets handled by the compaction pass) is the move of a
+ * message that got gloda indexed that did not already have a gloda-id and
+ * PendingCommitTracker did not get to flush the gloda-id before the
+ * compaction happened. In that case our move logic cannot know to do
+ * anything and the gloda database still thinks the message lives in our
+ * folder. The compaction pass will deal with this by marking the message
+ * as deleted. The rationale being that marking it deleted allows the
+ * message to be re-used if it gets indexed in the target location, or if
+ * the target location has already been indexed, we no longer need the
+ * duplicate and it should be deleted. (Also, it is unable to distinguish
+ * between a case where the message got deleted versus moved.)
+ *
+ * Because copied messages are, by their nature, duplicate messages, we
+ * do not particularly care about them. As such, we defer their processing
+ * to the automatic sync logic that will happen much later on. This is
+ * potentially desirable in case the user deletes some of the original
+ * messages, allowing us to reuse the gloda message representations when
+ * we finally get around to indexing the messages. We do need to mark the
+ * folder as dirty, though, to clue in the sync logic.
+ */
+ msgsMoveCopyCompleted(aMove, aSrcMsgHdrs, aDestFolder, aDestMsgHdrs) {
+ this.indexer._log.debug("MoveCopy notification. Move: " + aMove);
+ try {
+ // ---- Move
+ if (aMove) {
+ // -- Effectively a deletion?
+ // If the destination folder is not indexed, it's like these messages
+ // are being deleted.
+ if (!GlodaMsgIndexer.shouldIndexFolder(aDestFolder)) {
+ this.msgsDeleted(aSrcMsgHdrs);
+ return;
+ }
+
+ // -- Avoid propagation of filthy gloda-id's.
+ // If the source folder is filthy or should not be indexed (and so
+ // any gloda-id's found in there are gibberish), our only job is to
+ // strip the gloda-id's off of all the destination headers because
+ // none of the gloda-id's are valid (and so we certainly don't want
+ // to try and use them as a basis for updating message keys.)
+ let srcMsgFolder = aSrcMsgHdrs[0].folder;
+ if (
+ !this.indexer.shouldIndexFolder(srcMsgFolder) ||
+ GlodaDatastore._mapFolder(srcMsgFolder).dirtyStatus ==
+ GlodaFolder.prototype.kFolderFilthy
+ ) {
+ // Local case, just modify the destination headers directly.
+ if (aDestMsgHdrs.length > 0) {
+ for (let destMsgHdr of aDestMsgHdrs) {
+ // zero it out if it exists
+ // (no need to deal with pending commit issues here; a filthy
+ // folder by definition has nothing indexed in it.)
+ let glodaId = destMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destMsgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, 0);
+ }
+ }
+
+ // Since we are moving messages from a folder where they were
+ // effectively not indexed, it is up to us to make sure the
+ // messages now get indexed.
+ this.indexer._reindexChangedMessages(aDestMsgHdrs);
+ return;
+ }
+
+ // IMAP move case, we need to operate on the pending headers using
+ // the source header to get the pending header and as the
+ // indication of what has been already set on the pending header.
+ let destDb;
+ // so, this can fail, and there's not much we can do about it.
+ try {
+ destDb = aDestFolder.msgDatabase;
+ } catch (ex) {
+ this.indexer._log.warn(
+ "Destination database for " +
+ aDestFolder.prettyName +
+ " not ready on IMAP move." +
+ " Gloda corruption possible."
+ );
+ return;
+ }
+ for (let srcMsgHdr of aSrcMsgHdrs) {
+ // zero it out if it exists
+ // (no need to deal with pending commit issues here; a filthy
+ // folder by definition has nothing indexed in it.)
+ let glodaId = srcMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destDb.setUint32AttributeOnPendingHdr(
+ srcMsgHdr,
+ GLODA_MESSAGE_ID_PROPERTY,
+ 0
+ );
+ }
+ }
+
+ // Nothing remains to be done. The msgClassified event will take
+ // care of making sure the message gets indexed.
+ return;
+ }
+
+ // --- Have destination headers (local case):
+ if (aDestMsgHdrs.length > 0) {
+ // -- Update message keys for valid gloda-id's.
+ // (Which means ignore filthy gloda-id's.)
+ let glodaIds = [];
+ let newMessageKeys = [];
+ // Track whether we see any messages that are not gloda indexed so
+ // we know if we have to mark the destination folder dirty.
+ let sawNonGlodaMessage = false;
+ for (let iMsg = 0; iMsg < aSrcMsgHdrs.length; iMsg++) {
+ let srcMsgHdr = aSrcMsgHdrs[iMsg];
+ let destMsgHdr = aDestMsgHdrs[iMsg];
+
+ let [glodaId, dirtyStatus] =
+ PendingCommitTracker.getGlodaState(srcMsgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ dirtyStatus != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ // we may need to update the pending commit map (it checks)
+ PendingCommitTracker.noteMove(srcMsgHdr, destMsgHdr);
+ // but we always need to update our database
+ glodaIds.push(glodaId);
+ newMessageKeys.push(destMsgHdr.messageKey);
+ } else {
+ sawNonGlodaMessage = true;
+ }
+ }
+
+ // this method takes care to update the in-memory representations
+ // too; we don't need to do anything
+ if (glodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ glodaIds,
+ newMessageKeys,
+ aDestFolder
+ );
+ }
+
+ // Mark the destination folder dirty if we saw any messages that
+ // were not already gloda indexed.
+ if (sawNonGlodaMessage) {
+ let destGlodaFolder = GlodaDatastore._mapFolder(aDestFolder);
+ destGlodaFolder._ensureFolderDirty();
+ this.indexer.indexingSweepNeeded = true;
+ }
+ } else {
+ // --- No dest headers (IMAP case):
+ // Update any valid gloda indexed messages into their new folder to
+ // make the indexer's life easier when it sees the messages in their
+ // new folder.
+ let glodaIds = [];
+
+ let srcFolderIsLocal =
+ srcMsgFolder instanceof Ci.nsIMsgLocalMailFolder;
+ for (let msgHdr of aSrcMsgHdrs) {
+ let [glodaId, dirtyStatus] =
+ PendingCommitTracker.getGlodaState(msgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ dirtyStatus != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ // we may need to update the pending commit map (it checks)
+ PendingCommitTracker.noteBlindMove(msgHdr);
+ // but we always need to update our database
+ glodaIds.push(glodaId);
+
+ // XXX UNDO WORKAROUND
+ // This constitutes a move from a local folder to an IMAP
+ // folder. Undo does not currently do the right thing for us,
+ // but we have a chance of not orphaning the message if we
+ // mark the source header as dirty so that when the message
+ // gets re-added we see it. (This does require that we enter
+ // the folder; we set the folder dirty after the loop to
+ // increase the probability of this but it's not foolproof
+ // depending on when the next indexing sweep happens and when
+ // the user performs an undo.)
+ msgHdr.setUint32Property(
+ GLODA_DIRTY_PROPERTY,
+ GlodaMsgIndexer.kMessageDirty
+ );
+ }
+ }
+ // XXX ALSO UNDO WORKAROUND
+ if (srcFolderIsLocal) {
+ let srcGlodaFolder = GlodaDatastore._mapFolder(srcMsgFolder);
+ srcGlodaFolder._ensureFolderDirty();
+ }
+
+ // quickly move them to the right folder, zeroing their message keys
+ GlodaDatastore.updateMessageFoldersByKeyPurging(
+ glodaIds,
+ aDestFolder
+ );
+ // we _do not_ need to mark the folder as dirty, because the
+ // message added events will cause that to happen.
+ }
+ } else {
+ // ---- Copy case
+ // -- Do not propagate gloda-id's for copies
+ // (Only applies if we have the destination header, which means local)
+ for (let destMsgHdr of aDestMsgHdrs) {
+ let glodaId = destMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destMsgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, 0);
+ }
+ }
+
+ // mark the folder as dirty; we'll get to it later.
+ let destGlodaFolder = GlodaDatastore._mapFolder(aDestFolder);
+ destGlodaFolder._ensureFolderDirty();
+ this.indexer.indexingSweepNeeded = true;
+ }
+ } catch (ex) {
+ this.indexer._log.error(
+ "Problem encountered during message move/copy:",
+ ex.stack
+ );
+ }
+ },
+
+ /**
+ * Queue up message key changes that are a result of offline fake headers
+ * being made real for the actual update during the msgsClassified
+ * notification that is expected after this. We defer the
+ * actual work (if there is any to be done; the fake header might have
+ * guessed the right UID correctly) so that we can batch our work.
+ *
+ * The expectation is that there will be no meaningful time window between
+ * this notification and the msgsClassified notification since the message
+ * classifier should not actually need to classify the messages (they
+ * should already have been classified) and so can fast-path them.
+ */
+ msgKeyChanged(aOldMsgKey, aNewMsgHdr) {
+ try {
+ let val = null,
+ newKey = aNewMsgHdr.messageKey;
+ let [glodaId, glodaDirty] =
+ PendingCommitTracker.getGlodaState(aNewMsgHdr);
+ // If we haven't indexed this message yet, take no action, and leave it
+ // up to msgsClassified to take proper action.
+ if (glodaId < GLODA_FIRST_VALID_MESSAGE_ID) {
+ return;
+ }
+ // take no action on filthy messages,
+ // generate an entry if dirty or the keys don't match.
+ if (
+ glodaDirty !== GlodaMsgIndexer.kMessageFilthy &&
+ (glodaDirty === GlodaMsgIndexer.kMessageDirty ||
+ aOldMsgKey !== newKey)
+ ) {
+ val = {
+ id: glodaId,
+ key: aOldMsgKey !== newKey ? newKey : null,
+ isDirty: glodaDirty === GlodaMsgIndexer.kMessageDirty,
+ };
+ }
+
+ let key = aNewMsgHdr.folder.URI + "#" + aNewMsgHdr.messageKey;
+ this.indexer._keyChangedBatchInfo[key] = val;
+ } catch (ex) {
+ // this is more for the unit test to fail rather than user error reporting
+ this.indexer._log.error(
+ "Problem encountered during msgKeyChanged" +
+ " notification handling: " +
+ ex +
+ "\n\n" +
+ ex.stack +
+ " \n\n"
+ );
+ }
+ },
+
+ /**
+ * Detect newly added folders before they get messages so we map them before
+ * they get any messages added to them. If we only hear about them after
+ * they get their 1st message, then we will mark them filthy, but if we mark
+ * them before that, they get marked clean.
+ */
+ folderAdded(aMsgFolder) {
+ // This is invoked for its side-effect of invoking _mapFolder and doing so
+ // only after filtering out folders we don't care about.
+ GlodaMsgIndexer.shouldIndexFolder(aMsgFolder);
+ },
+
+ /**
+ * Handles folder no-longer-exists-ence. We mark all messages as deleted
+ * and remove the folder from our URI table. Currently, if a folder that
+ * contains other folders is deleted, we may either receive one
+ * notification for the folder that is deleted, or a notification for the
+ * folder and one for each of its descendents. This depends upon the
+ * underlying account implementation, so we explicitly handle each case.
+ * Namely, we treat it as if we're only planning on getting one, but we
+ * handle if the children are already gone for some reason.
+ */
+ folderDeleted(aFolder) {
+ this.indexer._log.debug("folderDeleted notification");
+ try {
+ let delFunc = function (aFolder, indexer) {
+ if (indexer._datastore._folderKnown(aFolder)) {
+ indexer._log.info(
+ "Processing deletion of folder " + aFolder.prettyName + "."
+ );
+ let glodaFolder = GlodaDatastore._mapFolder(aFolder);
+ indexer._datastore.markMessagesDeletedByFolderID(glodaFolder.id);
+ indexer._datastore.deleteFolderByID(glodaFolder.id);
+ GlodaDatastore._killGlodaFolderIntoTombstone(glodaFolder);
+ } else {
+ indexer._log.info(
+ "Ignoring deletion of folder " +
+ aFolder.prettyName +
+ " because it is unknown to gloda."
+ );
+ }
+ };
+
+ let descendentFolders = aFolder.descendants;
+ // (the order of operations does not matter; child, non-child, whatever.)
+ // delete the parent
+ delFunc(aFolder, this.indexer);
+ // delete all its descendents
+ for (let folder of descendentFolders) {
+ delFunc(folder, this.indexer);
+ }
+
+ this.indexer.pendingDeletions = true;
+ } catch (ex) {
+ this.indexer._log.error(
+ "Problem encountered during folder deletion" +
+ ": " +
+ ex +
+ "\n\n" +
+ ex.stack +
+ "\n\n"
+ );
+ }
+ },
+
+ /**
+ * Handle a folder being copied or moved.
+ * Moves are handled by a helper function shared with _folderRenameHelper
+ * (which takes care of any nesting involved).
+ * Copies are actually ignored, because our periodic indexing traversal
+ * should discover these automatically. We could hint ourselves into
+ * action, but arguably a set of completely duplicate messages is not
+ * a high priority for indexing.
+ */
+ folderMoveCopyCompleted(aMove, aSrcFolder, aDestFolder) {
+ this.indexer._log.debug(
+ "folderMoveCopy notification (Move: " + aMove + ")"
+ );
+ if (aMove) {
+ let srcURI = aSrcFolder.URI;
+ let targetURI =
+ aDestFolder.URI + srcURI.substring(srcURI.lastIndexOf("/"));
+ this._folderRenameHelper(aSrcFolder, targetURI);
+ } else {
+ this.indexer.indexingSweepNeeded = true;
+ }
+ },
+
+ /**
+ * We just need to update the URI <-> ID maps and the row in the database,
+ * all of which is actually done by the datastore for us.
+ * This method needs to deal with the complexity where local folders will
+ * generate a rename notification for each sub-folder, but IMAP folders
+ * will generate only a single notification. Our logic primarily handles
+ * this by not exploding if the original folder no longer exists.
+ */
+ _folderRenameHelper(aOrigFolder, aNewURI) {
+ let newFolder = lazy.MailUtils.getOrCreateFolder(aNewURI);
+ let specialFolderFlags =
+ Ci.nsMsgFolderFlags.Trash | Ci.nsMsgFolderFlags.Junk;
+ if (newFolder.isSpecialFolder(specialFolderFlags, true)) {
+ let descendentFolders = newFolder.descendants;
+
+ // First thing to do: make sure we don't index the resulting folder and
+ // its descendents.
+ GlodaMsgIndexer.resetFolderIndexingPriority(newFolder);
+ for (let folder of descendentFolders) {
+ GlodaMsgIndexer.resetFolderIndexingPriority(folder);
+ }
+
+ // Remove from the index messages from the original folder
+ this.folderDeleted(aOrigFolder);
+ } else {
+ let descendentFolders = aOrigFolder.descendants;
+
+ let origURI = aOrigFolder.URI;
+ // this rename is straightforward.
+ GlodaDatastore.renameFolder(aOrigFolder, aNewURI);
+
+ for (let folder of descendentFolders) {
+ let oldSubURI = folder.URI;
+ // mangle a new URI from the old URI. we could also try and do a
+ // parallel traversal of the new folder hierarchy, but that seems like
+ // more work.
+ let newSubURI = aNewURI + oldSubURI.substring(origURI.length);
+ this.indexer._datastore.renameFolder(oldSubURI, newSubURI);
+ }
+
+ this.indexer._log.debug(
+ "folder renamed: " + origURI + " to " + aNewURI
+ );
+ }
+ },
+
+ /**
+ * Handle folder renames, dispatching to our rename helper (which also
+ * takes care of any nested folder issues.)
+ */
+ folderRenamed(aOrigFolder, aNewFolder) {
+ this._folderRenameHelper(aOrigFolder, aNewFolder.URI);
+ },
+
+ /**
+ * Helper used by folderCompactStart/folderReindexTriggered.
+ */
+ _reindexFolderHelper(folder, isCompacting) {
+ // ignore folders we ignore...
+ if (!GlodaMsgIndexer.shouldIndexFolder(folder)) {
+ return;
+ }
+
+ let glodaFolder = GlodaDatastore._mapFolder(folder);
+ if (isCompacting) {
+ glodaFolder.compacting = true;
+ }
+
+ // Purge any explicit indexing of said folder.
+ GlodaIndexer.purgeJobsUsingFilter(function (aJob) {
+ return aJob.jobType == "folder" && aJob.id == folder.id;
+ });
+
+ // Abort the active job if it's in the folder (this covers both
+ // event-driven indexing that happens to be in the folder as well
+ // explicit folder indexing of the folder).
+ if (GlodaMsgIndexer._indexingFolder == folder) {
+ GlodaIndexer.killActiveJob();
+ }
+
+ // Tell the PendingCommitTracker to throw away anything it is tracking
+ // about the folder. We will pick up the pieces in the compaction
+ // pass.
+ PendingCommitTracker.noteFolderDatabaseGettingBlownAway(folder);
+
+ // (We do not need to mark the folder dirty because if we were indexing
+ // it, it already must have been marked dirty.)
+ },
+
+ /**
+ * folderCompactStart: Mark the folder as compacting in our in-memory
+ * representation. This should keep any new indexing out of the folder
+ * until it is done compacting. Also, kill any active or existing jobs
+ * to index the folder.
+ */
+ folderCompactStart(folder) {
+ this._reindexFolderHelper(folder, true);
+ },
+
+ /**
+ * folderReindexTriggered: We do the same thing as folderCompactStart
+ * but don't mark the folder as compacting.
+ */
+ folderReindexTriggered(folder) {
+ this._reindexFolderHelper(folder, false);
+ },
+
+ /**
+ * folderCompactFinish: Mark the folder as done compacting in our
+ * in-memory representation. Assuming the folder was known to us and
+ * not marked filthy, queue a compaction job.
+ */
+ folderCompactFinish(folder) {
+ // ignore folders we ignore...
+ if (!GlodaMsgIndexer.shouldIndexFolder(folder)) {
+ return;
+ }
+
+ let glodaFolder = GlodaDatastore._mapFolder(folder);
+ glodaFolder.compacting = false;
+ glodaFolder._setCompactedState(true);
+
+ // Queue compaction unless the folder was filthy (in which case there
+ // are no valid gloda-id's to update.)
+ if (glodaFolder.dirtyStatus != glodaFolder.kFolderFilthy) {
+ GlodaIndexer.indexJob(new IndexingJob("folderCompact", glodaFolder.id));
+ }
+
+ // Queue indexing of the folder if it is dirty. We are doing this
+ // mainly in case we were indexing it before the compaction started.
+ // It should be reasonably harmless if we weren't.
+ // (It would probably be better to just make sure that there is an
+ // indexing sweep queued or active, and if it's already active that
+ // this folder is in the queue to be processed.)
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderDirty) {
+ GlodaIndexer.indexJob(new IndexingJob("folder", glodaFolder.id));
+ }
+ },
+ },
+
+ /**
+ * A nsIFolderListener (listening on nsIMsgMailSession so we get all of
+ * these events) PRIMARILY to get folder loaded notifications. Because of
+ * deficiencies in the nsIMsgFolderListener's events at this time, we also
+ * get our folder-added and newsgroup notifications from here for now. (This
+ * will be rectified.)
+ */
+ _folderListener: {
+ indexer: null,
+
+ _init(aIndexer) {
+ this.indexer = aIndexer;
+ },
+
+ onFolderAdded(parentFolder, child) {},
+ onMessageAdded(parentFolder, msg) {},
+ onFolderRemoved(parentFolder, child) {},
+ onMessageRemoved(parentFolder, msg) {},
+ onFolderPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ /**
+ * Detect changes to folder flags and reset our indexing priority. This
+ * is important because (all?) folders start out without any flags and
+ * then get their flags added to them.
+ */
+ onFolderIntPropertyChanged(aFolderItem, aProperty, aOldValue, aNewValue) {
+ if (aProperty !== "FolderFlag") {
+ return;
+ }
+ if (!GlodaMsgIndexer.shouldIndexFolder(aFolderItem)) {
+ return;
+ }
+ // Only reset priority if folder Special Use changes.
+ if (
+ (aOldValue & Ci.nsMsgFolderFlags.SpecialUse) ==
+ (aNewValue & Ci.nsMsgFolderFlags.SpecialUse)
+ ) {
+ return;
+ }
+ GlodaMsgIndexer.resetFolderIndexingPriority(aFolderItem);
+ },
+ onFolderBoolPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ onFolderUnicharPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ /**
+ * Notice when user activity adds/removes tags or changes a message's
+ * status.
+ */
+ onFolderPropertyFlagChanged(aMsgHdr, aProperty, aOldValue, aNewValue) {
+ if (
+ aProperty == "Keywords" ||
+ // We could care less about the new flag changing.
+ (aProperty == "Status" &&
+ (aOldValue ^ aNewValue) != Ci.nsMsgMessageFlags.New &&
+ // We do care about IMAP deletion, but msgsDeleted tells us that, so
+ // ignore IMAPDeleted too...
+ (aOldValue ^ aNewValue) != Ci.nsMsgMessageFlags.IMAPDeleted) ||
+ aProperty == "Flagged"
+ ) {
+ GlodaMsgIndexer._reindexChangedMessages([aMsgHdr], true);
+ }
+ },
+
+ /**
+ * Get folder loaded notifications for folders that had to do some
+ * (asynchronous) processing before they could be opened.
+ */
+ onFolderEvent(aFolder, aEvent) {
+ if (aEvent == "FolderLoaded") {
+ this.indexer._onFolderLoaded(aFolder);
+ }
+ },
+ },
+
+ /* ***** Rebuilding / Reindexing ***** */
+ /**
+ * Allow us to invalidate an outstanding folder traversal because the
+ * underlying database is going away. We use other means for detecting
+ * modifications of the message (labeling, marked (un)read, starred, etc.)
+ *
+ * This is an nsIDBChangeListener listening to an nsIDBChangeAnnouncer. To
+ * add ourselves, we get us a nice nsMsgDatabase, query it to the announcer,
+ * then call addListener.
+ */
+ _databaseAnnouncerListener: {
+ indexer: null,
+ /**
+ * XXX We really should define the operations under which we expect this to
+ * occur. While we know this must be happening as the result of a
+ * ForceClosed call, we don't have a comprehensive list of when this is
+ * expected to occur. Some reasons:
+ * - Compaction (although we should already have killed the job thanks to
+ * our compaction notification)
+ * - UID validity rolls.
+ * - Folder Rename
+ * - Folder Delete
+ * The fact that we already have the database open when getting this means
+ * that it had to be valid before we opened it, which hopefully rules out
+ * modification of the mbox file by an external process (since that is
+ * forbidden when we are running) and many other exotic things.
+ *
+ * So this really ends up just being a correctness / safety protection
+ * mechanism. At least now that we have better compaction support.
+ */
+ onAnnouncerGoingAway(aDBChangeAnnouncer) {
+ // The fact that we are getting called means we have an active folder and
+ // that we therefore are the active job. As such, we must kill the
+ // active job.
+ // XXX In the future, when we support interleaved event-driven indexing
+ // that bumps long-running indexing tasks, the semantics of this will
+ // have to change a bit since we will want to maintain being active in a
+ // folder even when bumped. However, we will probably have a more
+ // complex notion of indexing contexts on a per-job basis.
+ GlodaIndexer.killActiveJob();
+ },
+
+ onHdrFlagsChanged(aHdrChanged, aOldFlags, aNewFlags, aInstigator) {},
+ onHdrDeleted(aHdrChanged, aParentKey, aFlags, aInstigator) {},
+ onHdrAdded(aHdrChanged, aParentKey, aFlags, aInstigator) {},
+ onParentChanged(aKeyChanged, aOldParent, aNewParent, aInstigator) {},
+ onReadChanged(aInstigator) {},
+ onJunkScoreChanged(aInstigator) {},
+ onHdrPropertyChanged(aHdrToChange, aPreChange, aStatus, aInstigator) {},
+ onEvent(aDB, aEvent) {},
+ },
+
+ /**
+ * Given a list of Message-ID's, return a matching list of lists of messages
+ * matching those Message-ID's. So if you pass an array with three
+ * Message-ID's ["a", "b", "c"], you would get back an array containing
+ * 3 lists, where the first list contains all the messages with a message-id
+ * of "a", and so forth. The reason a list is returned rather than null/a
+ * message is that we accept the reality that we have multiple copies of
+ * messages with the same ID.
+ * This call is asynchronous because it depends on previously created messages
+ * to be reflected in our results, which requires us to execute on the async
+ * thread where all our writes happen. This also turns out to be a
+ * reasonable thing because we could imagine pathological cases where there
+ * could be a lot of message-id's and/or a lot of messages with those
+ * message-id's.
+ *
+ * The returned collection will include both 'ghost' messages (messages
+ * that exist for conversation-threading purposes only) as well as deleted
+ * messages in addition to the normal 'live' messages that non-privileged
+ * queries might return.
+ */
+ getMessagesByMessageID(aMessageIDs, aCallback, aCallbackThis) {
+ let msgIDToIndex = {};
+ let results = [];
+ for (let iID = 0; iID < aMessageIDs.length; ++iID) {
+ let msgID = aMessageIDs[iID];
+ results.push([]);
+ msgIDToIndex[msgID] = iID;
+ }
+
+ // (Note: although we are performing a lookup with no validity constraints
+ // and using the same object-relational-mapper-ish layer used by things
+ // that do have constraints, we are not at risk of exposing deleted
+ // messages to other code and getting it confused. The only way code
+ // can find a message is if it shows up in their queries or gets announced
+ // via GlodaCollectionManager.itemsAdded, neither of which will happen.)
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ query.headerMessageID.apply(query, aMessageIDs);
+ query.frozen = true;
+
+ let listener = new MessagesByMessageIdCallback(
+ msgIDToIndex,
+ results,
+ aCallback,
+ aCallbackThis
+ );
+ return query.getCollection(listener, null, { becomeNull: true });
+ },
+
+ /**
+ * A reference to MsgHdrToMimeMessage that unit testing can clobber when it
+ * wants to cause us to hang or inject a fault. If you are not
+ * glodaTestHelper.js then _do not touch this_.
+ */
+ _MsgHdrToMimeMessageFunc: MsgHdrToMimeMessage,
+ /**
+ * Primary message indexing logic. This method is mainly concerned with
+ * getting all the information about the message required for threading /
+ * conversation building and subsequent processing. It is responsible for
+ * determining whether to reuse existing gloda messages or whether a new one
+ * should be created. Most attribute stuff happens in fund_attr.js or
+ * expl_attr.js.
+ *
+ * Prior to calling this method, the caller must have invoked
+ * |_indexerEnterFolder|, leaving us with the following true invariants
+ * below.
+ *
+ * @pre aMsgHdr.folder == this._indexingFolder
+ * @pre aMsgHdr.folder.msgDatabase == this._indexingDatabase
+ */
+ *_indexMessage(aMsgHdr, aCallbackHandle) {
+ this._log.debug(
+ "*** Indexing message: " + aMsgHdr.messageKey + " : " + aMsgHdr.subject
+ );
+
+ // If the message is offline, then get the message body as well
+ let aMimeMsg;
+ if (
+ aMsgHdr.flags & Ci.nsMsgMessageFlags.Offline ||
+ aMsgHdr.folder instanceof Ci.nsIMsgLocalMailFolder
+ ) {
+ this._MsgHdrToMimeMessageFunc(
+ aMsgHdr,
+ aCallbackHandle.callbackThis,
+ aCallbackHandle.callback,
+ false,
+ {
+ saneBodySize: true,
+ }
+ );
+ aMimeMsg = (yield GlodaConstants.kWorkAsync)[1];
+ } else {
+ this._log.debug(" * Message is not offline -- only headers indexed");
+ }
+
+ this._log.debug(" * Got message, subject " + aMsgHdr.subject);
+
+ if (this._unitTestSuperVerbose) {
+ if (aMimeMsg) {
+ this._log.debug(" * Got Mime " + aMimeMsg.prettyString());
+ } else {
+ this._log.debug(" * NO MIME MESSAGE!!!\n");
+ }
+ }
+
+ // -- Find/create the conversation the message belongs to.
+ // Our invariant is that all messages that exist in the database belong to
+ // a conversation.
+
+ // - See if any of the ancestors exist and have a conversationID...
+ // (references are ordered from old [0] to new [n-1])
+ let references = Array.from(range(0, aMsgHdr.numReferences)).map(i =>
+ aMsgHdr.getStringReference(i)
+ );
+ // also see if we already know about the message...
+ references.push(aMsgHdr.messageId);
+
+ this.getMessagesByMessageID(
+ references,
+ aCallbackHandle.callback,
+ aCallbackHandle.callbackThis
+ );
+ // (ancestorLists has a direct correspondence to the message ids)
+ let ancestorLists = yield GlodaConstants.kWorkAsync;
+
+ this._log.debug("ancestors raw: " + ancestorLists);
+ this._log.debug(
+ "ref len: " + references.length + " anc len: " + ancestorLists.length
+ );
+ this._log.debug("references: " + references);
+ this._log.debug("ancestors: " + ancestorLists);
+
+ // pull our current message lookup results off
+ references.pop();
+ let candidateCurMsgs = ancestorLists.pop();
+
+ let conversationID = null;
+ let conversation = null;
+ // -- figure out the conversation ID
+ // if we have a clone/already exist, just use his conversation ID
+ if (candidateCurMsgs.length > 0) {
+ conversationID = candidateCurMsgs[0].conversationID;
+ conversation = candidateCurMsgs[0].conversation;
+ } else {
+ // otherwise check out our ancestors
+ // (walk from closest to furthest ancestor)
+ for (
+ let iAncestor = ancestorLists.length - 1;
+ iAncestor >= 0;
+ --iAncestor
+ ) {
+ let ancestorList = ancestorLists[iAncestor];
+
+ if (ancestorList.length > 0) {
+ // we only care about the first instance of the message because we are
+ // able to guarantee the invariant that all messages with the same
+ // message id belong to the same conversation.
+ let ancestor = ancestorList[0];
+ if (conversationID === null) {
+ conversationID = ancestor.conversationID;
+ conversation = ancestor.conversation;
+ } else if (conversationID != ancestor.conversationID) {
+ // XXX this inconsistency is known and understood and tracked by
+ // bug 478162 https://bugzilla.mozilla.org/show_bug.cgi?id=478162
+ // this._log.error("Inconsistency in conversations invariant on " +
+ // ancestor.headerMessageID + ". It has conv id " +
+ // ancestor.conversationID + " but expected " +
+ // conversationID + ". ID: " + ancestor.id);
+ }
+ }
+ }
+ }
+
+ // nobody had one? create a new conversation
+ if (conversationID === null) {
+ // (the create method could issue the id, making the call return
+ // without waiting for the database...)
+ conversation = this._datastore.createConversation(
+ aMsgHdr.mime2DecodedSubject,
+ null,
+ null
+ );
+ conversationID = conversation.id;
+ }
+
+ // Walk from furthest to closest ancestor, creating the ancestors that don't
+ // exist. (This is possible if previous messages that were consumed in this
+ // thread only had an in-reply-to or for some reason did not otherwise
+ // provide the full references chain.)
+ for (let iAncestor = 0; iAncestor < ancestorLists.length; ++iAncestor) {
+ let ancestorList = ancestorLists[iAncestor];
+
+ if (ancestorList.length == 0) {
+ this._log.debug(
+ "creating message with: null, " +
+ conversationID +
+ ", " +
+ references[iAncestor] +
+ ", null."
+ );
+ let ancestor = this._datastore.createMessage(
+ null,
+ null, // ghost
+ conversationID,
+ null,
+ references[iAncestor],
+ null, // no subject
+ null, // no body
+ null
+ ); // no attachments
+ this._datastore.insertMessage(ancestor);
+ ancestorLists[iAncestor].push(ancestor);
+ }
+ }
+ // now all our ancestors exist, though they may be ghost-like...
+
+ // find if there's a ghost version of our message or we already have indexed
+ // this message.
+ let curMsg = null;
+ this._log.debug(candidateCurMsgs.length + " candidate messages");
+ for (let iCurCand = 0; iCurCand < candidateCurMsgs.length; iCurCand++) {
+ let candMsg = candidateCurMsgs[iCurCand];
+
+ this._log.debug(
+ "candidate folderID: " +
+ candMsg.folderID +
+ " messageKey: " +
+ candMsg.messageKey
+ );
+
+ if (candMsg.folderURI == this._indexingFolder.URI) {
+ // if we are in the same folder and we have the same message key, we
+ // are definitely the same, stop looking.
+ if (candMsg.messageKey == aMsgHdr.messageKey) {
+ curMsg = candMsg;
+ break;
+ }
+ // if (we are in the same folder and) the candidate message has a null
+ // message key, we treat it as our best option unless we find an exact
+ // key match. (this would happen because the 'move' notification case
+ // has to deal with not knowing the target message key. this case
+ // will hopefully be somewhat improved in the future to not go through
+ // this path which mandates re-indexing of the message in its entirety)
+ if (candMsg.messageKey === null) {
+ curMsg = candMsg;
+ } else if (
+ curMsg === null &&
+ !this._indexingDatabase.containsKey(candMsg.messageKey)
+ ) {
+ // (We are in the same folder and) the candidate message's underlying
+ // message no longer exists/matches. Assume we are the same but
+ // were betrayed by a re-indexing or something, but we have to make
+ // sure a perfect match doesn't turn up.
+ curMsg = candMsg;
+ }
+ } else if (curMsg === null && candMsg.folderID === null) {
+ // a ghost/deleted message is fine
+ curMsg = candMsg;
+ }
+ }
+
+ let attachmentNames = aMimeMsg?.allAttachments.map(att => att.name) || null;
+
+ let isConceptuallyNew, isRecordNew, insertFulltext;
+ if (curMsg === null) {
+ curMsg = this._datastore.createMessage(
+ aMsgHdr.folder,
+ aMsgHdr.messageKey,
+ conversationID,
+ aMsgHdr.date,
+ aMsgHdr.messageId
+ );
+ curMsg._conversation = conversation;
+ isConceptuallyNew = isRecordNew = insertFulltext = true;
+ } else {
+ isRecordNew = false;
+ // the message is conceptually new if it was a ghost or dead.
+ isConceptuallyNew = curMsg._isGhost || curMsg._isDeleted;
+ // insert fulltext if it was a ghost
+ insertFulltext = curMsg._isGhost;
+ curMsg._folderID = this._datastore._mapFolder(aMsgHdr.folder).id;
+ curMsg._messageKey = aMsgHdr.messageKey;
+ curMsg.date = new Date(aMsgHdr.date / 1000);
+ // the message may have been deleted; tell it to make sure it's not.
+ curMsg._ensureNotDeleted();
+ // note: we are assuming that our matching logic is flawless in that
+ // if this message was not a ghost, we are assuming the 'body'
+ // associated with the id is still exactly the same. It is conceivable
+ // that there are cases where this is not true.
+ }
+
+ if (aMimeMsg) {
+ let bodyPlain = aMimeMsg.coerceBodyToPlaintext(aMsgHdr.folder);
+ if (bodyPlain) {
+ curMsg._bodyLines = bodyPlain.split(/\r?\n/);
+ // curMsg._content gets set by GlodaFundAttr.jsm
+ }
+ }
+
+ // Mark the message as new (for the purposes of fulltext insertion)
+ if (insertFulltext) {
+ curMsg._isNew = true;
+ }
+
+ curMsg._subject = aMsgHdr.mime2DecodedSubject;
+ curMsg._attachmentNames = attachmentNames;
+
+ // curMsg._indexAuthor gets set by GlodaFundAttr.jsm
+ // curMsg._indexRecipients gets set by GlodaFundAttr.jsm
+
+ // zero the notability so everything in grokNounItem can just increment
+ curMsg.notability = 0;
+
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ curMsg,
+ { header: aMsgHdr, mime: aMimeMsg, bodyLines: curMsg._bodyLines },
+ isConceptuallyNew,
+ isRecordNew,
+ aCallbackHandle
+ )
+ );
+
+ delete curMsg._bodyLines;
+ delete curMsg._content;
+ delete curMsg._isNew;
+ delete curMsg._indexAuthor;
+ delete curMsg._indexRecipients;
+
+ // we want to update the header for messages only after the transaction
+ // irrevocably hits the disk. otherwise we could get confused if the
+ // transaction rolls back or what not.
+ PendingCommitTracker.track(aMsgHdr, curMsg.id);
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Wipe a message out of existence from our index. This is slightly more
+ * tricky than one would first expect because there are potentially
+ * attributes not immediately associated with this message that reference
+ * the message. Not only that, but deletion of messages may leave a
+ * conversation possessing only ghost messages, which we don't want, so we
+ * need to nuke the moot conversation and its moot ghost messages.
+ * For now, we are actually punting on that trickiness, and the exact
+ * nuances aren't defined yet because we have not decided whether to store
+ * such attributes redundantly. For example, if we have subject-pred-object,
+ * we could actually store this as attributes (subject, id, object) and
+ * (object, id, subject). In such a case, we could query on (subject, *)
+ * and use the results to delete the (object, id, subject) case. If we
+ * don't redundantly store attributes, we can deal with the problem by
+ * collecting up all the attributes that accept a message as their object
+ * type and issuing a delete against that. For example, delete (*, [1,2,3],
+ * message id).
+ * (We are punting because we haven't implemented support for generating
+ * attributes like that yet.)
+ *
+ * @TODO: implement deletion of attributes that reference (deleted) messages
+ */
+ *_deleteMessage(aMessage, aCallbackHandle) {
+ this._log.debug("*** Deleting message: " + aMessage);
+
+ // -- delete our attributes
+ // delete the message's attributes (if we implement the cascade delete, that
+ // could do the honors for us... right now we define the trigger in our
+ // schema but the back-end ignores it)
+ GlodaDatastore.clearMessageAttributes(aMessage);
+
+ // -- delete our message or ghost us, and maybe nuke the whole conversation
+ // Look at the other messages in the conversation.
+ // (Note: although we are performing a lookup with no validity constraints
+ // and using the same object-relational-mapper-ish layer used by things
+ // that do have constraints, we are not at risk of exposing deleted
+ // messages to other code and getting it confused. The only way code
+ // can find a message is if it shows up in their queries or gets announced
+ // via GlodaCollectionManager.itemsAdded, neither of which will happen.)
+ let convPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ convPrivQuery.conversation(aMessage.conversation);
+ let conversationCollection = convPrivQuery.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ let conversationMsgs = conversationCollection.items;
+
+ // Count the number of ghosts messages we see to determine if we are
+ // the last message alive.
+ let ghostCount = 0;
+ let twinMessageExists = false;
+ for (let convMsg of conversationMsgs) {
+ // ignore our own message
+ if (convMsg.id == aMessage.id) {
+ continue;
+ }
+
+ if (convMsg._isGhost) {
+ ghostCount++;
+ } else if (
+ // This message is our (living) twin if it is not a ghost, not deleted,
+ // and has the same message-id header.
+ !convMsg._isDeleted &&
+ convMsg.headerMessageID == aMessage.headerMessageID
+ ) {
+ twinMessageExists = true;
+ }
+ }
+
+ // -- If everyone else is a ghost, blow away the conversation.
+ // If there are messages still alive or deleted but we have not yet gotten
+ // to them yet _deleteMessage, then do not do this. (We will eventually
+ // hit this case if they are all deleted.)
+ if (conversationMsgs.length - 1 == ghostCount) {
+ // - Obliterate each message
+ for (let msg of conversationMsgs) {
+ GlodaDatastore.deleteMessageByID(msg.id);
+ }
+ // - Obliterate the conversation
+ GlodaDatastore.deleteConversationByID(aMessage.conversationID);
+ // *no one* should hold a reference or use aMessage after this point,
+ // trash it so such ne'er do'wells are made plain.
+ aMessage._objectPurgedMakeYourselfUnpleasant();
+ } else if (twinMessageExists) {
+ // -- Ghost or purge us as appropriate
+ // Purge us if we have a (living) twin; no ghost required.
+ GlodaDatastore.deleteMessageByID(aMessage.id);
+ // *no one* should hold a reference or use aMessage after this point,
+ // trash it so such ne'er do'wells are made plain.
+ aMessage._objectPurgedMakeYourselfUnpleasant();
+ } else {
+ // No twin, a ghost is required, we become the ghost.
+ aMessage._ghost();
+ GlodaDatastore.updateMessage(aMessage);
+ // ghosts don't have fulltext. purge it.
+ GlodaDatastore.deleteMessageTextByID(aMessage.id);
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+};
+GlodaIndexer.registerIndexer(GlodaMsgIndexer);
diff --git a/comm/mailnews/db/gloda/modules/MimeMessage.jsm b/comm/mailnews/db/gloda/modules/MimeMessage.jsm
new file mode 100644
index 0000000000..8859f10877
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/MimeMessage.jsm
@@ -0,0 +1,821 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "MsgHdrToMimeMessage",
+ "MimeMessage",
+ "MimeContainer",
+ "MimeBody",
+ "MimeUnknown",
+ "MimeMessageAttachment",
+];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * The URL listener is surplus because the CallbackStreamListener ends up
+ * getting the same set of events, effectively.
+ */
+var dumbUrlListener = {
+ OnStartRunningUrl(aUrl) {},
+ OnStopRunningUrl(aUrl, aExitCode) {},
+};
+
+/**
+ * Maintain a list of all active stream listeners so that we can cancel them all
+ * during shutdown. If we don't cancel them, we risk calls into javascript
+ * from C++ after the various XPConnect contexts have already begun their
+ * teardown process.
+ */
+var activeStreamListeners = {};
+
+var shutdownCleanupObserver = {
+ _initialized: false,
+ ensureInitialized() {
+ if (this._initialized) {
+ return;
+ }
+
+ Services.obs.addObserver(this, "quit-application");
+
+ this._initialized = true;
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic == "quit-application") {
+ Services.obs.removeObserver(this, "quit-application");
+
+ for (let uri in activeStreamListeners) {
+ let streamListener = activeStreamListeners[uri];
+ if (streamListener._request) {
+ streamListener._request.cancel(Cr.NS_BINDING_ABORTED);
+ }
+ }
+ }
+ },
+};
+
+function CallbackStreamListener(aMsgHdr, aCallbackThis, aCallback) {
+ this._msgHdr = aMsgHdr;
+ // Messages opened from file or attachments do not have a folder property, but
+ // have their url stored as a string property.
+ let hdrURI = aMsgHdr.folder
+ ? aMsgHdr.folder.getUriForMsg(aMsgHdr)
+ : aMsgHdr.getStringProperty("dummyMsgUrl");
+
+ this._request = null;
+ this._stream = null;
+ if (aCallback === undefined) {
+ this._callbacksThis = [null];
+ this._callbacks = [aCallbackThis];
+ } else {
+ this._callbacksThis = [aCallbackThis];
+ this._callbacks = [aCallback];
+ }
+ activeStreamListeners[hdrURI] = this;
+}
+
+/**
+ * @implements {nsIRequestObserver}
+ * @implements {nsIStreamListener}
+ */
+CallbackStreamListener.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIStreamListener"]),
+
+ // nsIRequestObserver part
+ onStartRequest(aRequest) {
+ this._request = aRequest;
+ },
+ onStopRequest(aRequest, aStatusCode) {
+ // Messages opened from file or attachments do not have a folder property,
+ // but have their url stored as a string property.
+ let msgURI = this._msgHdr.folder
+ ? this._msgHdr.folder.getUriForMsg(this._msgHdr)
+ : this._msgHdr.getStringProperty("dummyMsgUrl");
+ delete activeStreamListeners[msgURI];
+
+ aRequest.QueryInterface(Ci.nsIChannel);
+ let message = MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aRequest.URI.spec];
+ if (message === undefined) {
+ message = null;
+ }
+
+ delete MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aRequest.URI.spec];
+
+ for (let i = 0; i < this._callbacksThis.length; i++) {
+ try {
+ this._callbacks[i].call(this._callbacksThis[i], this._msgHdr, message);
+ } catch (e) {
+ // Most of the time, exceptions will silently disappear into the endless
+ // deeps of XPConnect, and never reach the surface ever again. At least
+ // warn the user if he has dump enabled.
+ dump(
+ "The MsgHdrToMimeMessage callback threw an exception: " + e + "\n"
+ );
+ // That one will probably never make it to the original caller.
+ throw e;
+ }
+ }
+
+ this._msgHdr = null;
+ this._request = null;
+ this._stream = null;
+ this._callbacksThis = null;
+ this._callbacks = null;
+ },
+
+ // nsIStreamListener part
+
+ /**
+ * Our onDataAvailable should actually never be called. The stream converter
+ * is actually eating everything except the start and stop notification.
+ */
+ onDataAvailable(aRequest, aInputStream, aOffset, aCount) {
+ throw new Error(
+ `The stream converter should have grabbed the data for ${aRequest?.URI.spec}`
+ );
+ },
+};
+
+function stripEncryptedParts(aPart) {
+ if (aPart.parts && aPart.isEncrypted) {
+ aPart.parts = []; // Show an empty container.
+ } else if (aPart.parts) {
+ aPart.parts = aPart.parts.map(stripEncryptedParts);
+ }
+ return aPart;
+}
+
+/**
+ * Starts retrieval of a MimeMessage instance for the given message header.
+ * Your callback will be called with the message header you provide and the
+ *
+ * @param aMsgHdr The message header to retrieve the body for and build a MIME
+ * representation of the message.
+ * @param aCallbackThis The (optional) 'this' to use for your callback function.
+ * @param aCallback The callback function to invoke on completion of message
+ * parsing or failure. The first argument passed will be the nsIMsgDBHdr
+ * you passed to this function. The second argument will be the MimeMessage
+ * instance resulting from the processing on success, and null on failure.
+ * @param [aAllowDownload=false] Should we allow the message to be downloaded
+ * for this streaming request? The default is false, which means that we
+ * require that the message be available offline. If false is passed and
+ * the message is not available offline, we will propagate an exception
+ * thrown by the underlying code.
+ * @param [aOptions] Optional options.
+ * @param [aOptions.saneBodySize] Limit body sizes to a 'reasonable' size in
+ * order to combat corrupt offline/message stores creating pathological
+ * situations where we have erroneously multi-megabyte messages. This
+ * also likely reduces the impact of legitimately ridiculously large
+ * messages.
+ * @param [aOptions.examineEncryptedParts] By default, we won't reveal the
+ * contents of multipart/encrypted parts to the consumers, unless explicitly
+ * requested. In the case of MIME/PGP messages, for instance, the message
+ * will appear as an empty multipart/encrypted container, unless this option
+ * is used.
+ */
+function MsgHdrToMimeMessage(
+ aMsgHdr,
+ aCallbackThis,
+ aCallback,
+ aAllowDownload,
+ aOptions
+) {
+ shutdownCleanupObserver.ensureInitialized();
+
+ let requireOffline = !aAllowDownload;
+ // Messages opened from file or attachments do not have a folder property, but
+ // have their url stored as a string property.
+ let msgURI = aMsgHdr.folder
+ ? aMsgHdr.folder.getUriForMsg(aMsgHdr)
+ : aMsgHdr.getStringProperty("dummyMsgUrl");
+
+ let msgService = MailServices.messageServiceFromURI(msgURI);
+
+ MsgHdrToMimeMessage.OPTION_TUNNEL = aOptions;
+ // By default, Enigmail only decrypts a message streamed via libmime if it's
+ // the one currently on display in the message reader. With this option, we're
+ // letting Enigmail know that it should decrypt the message since the client
+ // explicitly asked for it.
+ let encryptedStr =
+ aOptions && aOptions.examineEncryptedParts
+ ? "&examineEncryptedParts=true"
+ : "";
+
+ // S/MIME, our other encryption backend, is not that smart, and always
+ // decrypts data. In order to protect sensitive data (e.g. not index it in
+ // Gloda), unless the client asked for encrypted data, we pass to the client
+ // callback a stripped-down version of the MIME structure where encrypted
+ // parts have been removed.
+ let wrapCallback = function (aCallback, aCallbackThis) {
+ if (aOptions && aOptions.examineEncryptedParts) {
+ return aCallback;
+ }
+ return (aMsgHdr, aMimeMsg) =>
+ aCallback.call(aCallbackThis, aMsgHdr, stripEncryptedParts(aMimeMsg));
+ };
+
+ // Apparently there used to be an old syntax where the callback was the second
+ // argument...
+ let callback = aCallback ? aCallback : aCallbackThis;
+ let callbackThis = aCallback ? aCallbackThis : null;
+
+ // if we're already streaming this msg, just add the callback
+ // to the listener.
+ let listenerForURI = activeStreamListeners[msgURI];
+ if (listenerForURI != undefined) {
+ listenerForURI._callbacks.push(wrapCallback(callback, callbackThis));
+ listenerForURI._callbacksThis.push(callbackThis);
+ return;
+ }
+ let streamListener = new CallbackStreamListener(
+ aMsgHdr,
+ callbackThis,
+ wrapCallback(callback, callbackThis)
+ );
+
+ try {
+ msgService.streamMessage(
+ msgURI,
+ streamListener, // consumer
+ null, // nsIMsgWindow
+ dumbUrlListener, // nsIUrlListener
+ true, // have them create the converter
+ // additional uri payload, note that "header=" is prepended automatically
+ "filter&emitter=js" + encryptedStr,
+ requireOffline
+ );
+ } catch (ex) {
+ // If streamMessage throws an exception, we should make sure to clear the
+ // activeStreamListener, or any subsequent attempt at sreaming this URI
+ // will silently fail
+ if (activeStreamListeners[msgURI]) {
+ delete activeStreamListeners[msgURI];
+ }
+ MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+ throw ex;
+ }
+
+ MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+}
+
+/**
+ * Let the jsmimeemitter provide us with results. The poor emitter (if I am
+ * understanding things correctly) is evaluated outside of the C.u.import
+ * world, so if we were to import him, we would not see him, but rather a new
+ * copy of him. This goes for his globals, etc. (and is why we live in this
+ * file right here). Also, it appears that the XPCOM JS wrappers aren't
+ * magically unified so that we can try and pass data as expando properties
+ * on things like the nsIUri instances either. So we have the jsmimeemitter
+ * import us and poke things into RESULT_RENDEVOUZ. We put it here on this
+ * function to try and be stealthy and avoid polluting the namespaces (or
+ * encouraging bad behaviour) of our importers.
+ *
+ * If you can come up with a prettier way to shuttle this data, please do.
+ */
+MsgHdrToMimeMessage.RESULT_RENDEVOUZ = {};
+/**
+ * Cram rich options here for the MimeMessageEmitter to grab from. We
+ * leverage the known control-flow to avoid needing a whole dictionary here.
+ * We set this immediately before constructing the emitter and clear it
+ * afterwards. Control flow is never yielded during the process and reentrancy
+ * cannot happen via any other means.
+ */
+MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+
+var HeaderHandlerBase = {
+ /**
+ * Look-up a header that should be present at most once.
+ *
+ * @param aHeaderName The header name to retrieve, case does not matter.
+ * @param aDefaultValue The value to return if the header was not found, null
+ * if left unspecified.
+ * @returns the value of the header if present, and the default value if not
+ * (defaults to null). If the header was present multiple times, the first
+ * instance of the header is returned. Use getAll if you want all of the
+ * values for the multiply-defined header.
+ */
+ get(aHeaderName, aDefaultValue) {
+ if (aDefaultValue === undefined) {
+ aDefaultValue = null;
+ }
+ let lowerHeader = aHeaderName.toLowerCase();
+ if (lowerHeader in this.headers) {
+ // we require that the list cannot be empty if present
+ return this.headers[lowerHeader][0];
+ }
+ return aDefaultValue;
+ },
+ /**
+ * Look-up a header that can be present multiple times. Use get for headers
+ * that you only expect to be present at most once.
+ *
+ * @param aHeaderName The header name to retrieve, case does not matter.
+ * @returns An array containing the values observed, which may mean a zero
+ * length array.
+ */
+ getAll(aHeaderName) {
+ let lowerHeader = aHeaderName.toLowerCase();
+ if (lowerHeader in this.headers) {
+ return this.headers[lowerHeader];
+ }
+ return [];
+ },
+ /**
+ * @param aHeaderName Header name to test for its presence.
+ * @returns true if the message has (at least one value for) the given header
+ * name.
+ */
+ has(aHeaderName) {
+ let lowerHeader = aHeaderName.toLowerCase();
+ return lowerHeader in this.headers;
+ },
+ _prettyHeaderString(aIndent) {
+ if (aIndent === undefined) {
+ aIndent = "";
+ }
+ let s = "";
+ for (let header in this.headers) {
+ let values = this.headers[header];
+ s += "\n " + aIndent + header + ": " + values;
+ }
+ return s;
+ },
+};
+
+/**
+ * @ivar partName The MIME part, ex "1.2.2.1". The partName of a (top-level)
+ * message is "1", its first child is "1.1", its second child is "1.2",
+ * its first child's first child is "1.1.1", etc.
+ * @ivar headers Maps lower-cased header field names to a list of the values
+ * seen for the given header. Use get or getAll as convenience helpers.
+ * @ivar parts The list of the MIME part children of this message. Children
+ * will be either MimeMessage instances, MimeMessageAttachment instances,
+ * MimeContainer instances, or MimeUnknown instances. The latter two are
+ * the result of limitations in the Javascript representation generation
+ * at this time, combined with the need to most accurately represent the
+ * MIME structure.
+ */
+function MimeMessage() {
+ this.partName = null;
+ this.headers = {};
+ this.parts = [];
+ this.isEncrypted = false;
+}
+
+MimeMessage.prototype = {
+ __proto__: HeaderHandlerBase,
+ contentType: "message/rfc822",
+
+ /**
+ * @returns a list of all attachments contained in this message and all its
+ * sub-messages. Only MimeMessageAttachment instances will be present in
+ * the list (no sub-messages).
+ */
+ get allAttachments() {
+ let results = []; // messages are not attachments, don't include self
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allAttachments);
+ }
+ return results;
+ },
+
+ /**
+ * @returns a list of all attachments contained in this message and all its
+ * sub-messages, including the sub-messages.
+ */
+ get allInlineAttachments() {
+ // Do not include the top message, but only sub-messages.
+ let results = this.partName ? [this] : [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allInlineAttachments);
+ }
+ return results;
+ },
+
+ /**
+ * @returns a list of all attachments contained in this message, with
+ * included/forwarded messages treated as real attachments. Attachments
+ * contained in inner messages won't be shown.
+ */
+ get allUserAttachments() {
+ if (this.url) {
+ // The jsmimeemitter camouflaged us as a MimeAttachment
+ return [this];
+ }
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+
+ /**
+ * @returns the total size of this message, that is, the size of all subparts
+ */
+ get size() {
+ return this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0);
+ },
+
+ /**
+ * In the case of attached messages, libmime considers them as attachments,
+ * and if the body is, say, quoted-printable encoded, then libmime will start
+ * counting bytes and notify the js mime emitter about it. The JS mime emitter
+ * being a nice guy, it will try to set a size on us. While this is the
+ * expected behavior for MimeMsgAttachments, we must make sure we can handle
+ * that (failing to write a setter results in exceptions being thrown).
+ */
+ set size(whatever) {
+ // nop
+ },
+
+ /**
+ * @param aMsgFolder A message folder, any message folder. Because this is
+ * a hack.
+ * @returns The concatenation of all of the body parts where parts
+ * available as text/plain are pulled as-is, and parts only available
+ * as text/html are converted to plaintext form first. In other words,
+ * if we see a multipart/alternative with a text/plain, we take the
+ * text/plain. If we see a text/html without an alternative, we convert
+ * that to text.
+ */
+ coerceBodyToPlaintext(aMsgFolder) {
+ let bodies = [];
+ for (let part of this.parts) {
+ // an undefined value for something not having the method is fine
+ let body =
+ part.coerceBodyToPlaintext && part.coerceBodyToPlaintext(aMsgFolder);
+ if (body) {
+ bodies.push(body);
+ }
+ }
+ if (bodies) {
+ return bodies.join("");
+ }
+ return "";
+ },
+
+ /**
+ * Convert the message and its hierarchy into a "pretty string". The message
+ * and each MIME part get their own line. The string never ends with a
+ * newline. For a non-multi-part message, only a single line will be
+ * returned.
+ * Messages have their subject displayed, attachments have their filename and
+ * content-type (ex: image/jpeg) displayed. "Filler" classes simply have
+ * their class displayed.
+ */
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ if (aIndent === undefined) {
+ aIndent = "";
+ }
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Message " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ "subject" in
+ this.headers
+ ? this.headers.subject
+ : "";
+ if (aVerbose) {
+ s += this._prettyHeaderString(nextIndent);
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ part.prettyString(aVerbose, nextIndent, aDumpBody);
+ }
+
+ return s;
+ },
+};
+
+/**
+ * @ivar contentType The content-type of this container.
+ * @ivar parts The parts held by this container. These can be instances of any
+ * of the classes found in this file.
+ */
+function MimeContainer(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ this.parts = [];
+ this.isEncrypted = false;
+}
+
+MimeContainer.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ let results = [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allAttachments);
+ }
+ return results;
+ },
+ get allInlineAttachments() {
+ let results = [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allInlineAttachments);
+ }
+ return results;
+ },
+ get allUserAttachments() {
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get size() {
+ return this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0);
+ },
+ set size(whatever) {
+ // nop
+ },
+ coerceBodyToPlaintext(aMsgFolder) {
+ if (this.contentType == "multipart/alternative") {
+ let htmlPart;
+ // pick the text/plain if we can find one, otherwise remember the HTML one
+ for (let part of this.parts) {
+ if (part.contentType == "text/plain") {
+ return part.body;
+ }
+ if (part.contentType == "text/html") {
+ htmlPart = part;
+ } else if (!htmlPart && part.contentType == "text/enriched") {
+ // text/enriched gets transformed into HTML, so use it if we don't
+ // already have an HTML part.
+ htmlPart = part;
+ }
+ }
+ // convert the HTML part if we have one
+ if (htmlPart) {
+ return aMsgFolder.convertMsgSnippetToPlainText(htmlPart.body);
+ }
+ }
+ // if it's not alternative, recurse/aggregate using MimeMessage logic
+ return MimeMessage.prototype.coerceBodyToPlaintext.call(this, aMsgFolder);
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Container " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ this.contentType;
+ if (aVerbose) {
+ s += this._prettyHeaderString(nextIndent);
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ part.prettyString(aVerbose, nextIndent, aDumpBody);
+ }
+
+ return s;
+ },
+ toString() {
+ return "Container: " + this.contentType;
+ },
+};
+
+/**
+ * @class Represents a body portion that we understand and do not believe to be
+ * a proper attachment. This means text/plain or text/html and it has no
+ * filename. (A filename suggests an attachment.)
+ *
+ * @ivar contentType The content type of this body materal; text/plain or
+ * text/html.
+ * @ivar body The actual body content.
+ */
+function MimeBody(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ this.body = "";
+ this.isEncrypted = false;
+}
+
+MimeBody.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return []; // we are a leaf
+ },
+ get allInlineAttachments() {
+ return []; // we are a leaf
+ },
+ get allUserAttachments() {
+ return []; // we are a leaf
+ },
+ get size() {
+ return this.body.length;
+ },
+ set size(whatever) {
+ // nop
+ },
+ appendBody(aBuf) {
+ this.body += aBuf;
+ },
+ coerceBodyToPlaintext(aMsgFolder) {
+ if (this.contentType == "text/plain") {
+ return this.body;
+ }
+ // text/enriched gets transformed into HTML by libmime
+ if (
+ this.contentType == "text/html" ||
+ this.contentType == "text/enriched"
+ ) {
+ return aMsgFolder.convertMsgSnippetToPlainText(this.body);
+ }
+ return "";
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let s =
+ "Body: " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "" +
+ this.contentType +
+ " (" +
+ this.body.length +
+ " bytes" +
+ (aDumpBody ? ": '" + this.body + "'" : "") +
+ ")";
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+ return s;
+ },
+ toString() {
+ return "Body: " + this.contentType + " (" + this.body.length + " bytes)";
+ },
+};
+
+/**
+ * @class A MIME Leaf node that doesn't have a filename so we assume it's not
+ * intended to be an attachment proper. This is probably meant for inline
+ * display or is the result of someone amusing themselves by composing messages
+ * by hand or a bad client. This class should probably be renamed or we should
+ * introduce a better named class that we try and use in preference to this
+ * class.
+ *
+ * @ivar contentType The content type of this part.
+ */
+function MimeUnknown(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ // Looks like libmime does not always interpret us as an attachment, which
+ // means we'll have to have a default size. Returning undefined would cause
+ // the recursive size computations to fail.
+ this._size = 0;
+ this.isEncrypted = false;
+ // We want to make sure MimeUnknown has a part property: S/MIME encrypted
+ // messages have a topmost MimeUnknown part, with the encrypted bit set to 1,
+ // and we need to ensure all other encrypted parts are children of this
+ // topmost part.
+ this.parts = [];
+}
+
+MimeUnknown.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return this.parts
+ .map(child => child.allAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get allInlineAttachments() {
+ return this.parts
+ .map(child => child.allInlineAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get allUserAttachments() {
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get size() {
+ return (
+ this._size +
+ this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0)
+ );
+ },
+ set size(aSize) {
+ this._size = aSize;
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Unknown: " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "" +
+ this.contentType +
+ " (" +
+ this.size +
+ " bytes)";
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ (part ? part.prettyString(aVerbose, nextIndent, aDumpBody) : "NULL");
+ }
+ return s;
+ },
+ toString() {
+ return "Unknown: " + this.contentType;
+ },
+};
+
+/**
+ * @class An attachment proper. We think it's an attachment because it has a
+ * filename that libmime was able to figure out.
+ *
+ * @ivar partName @see{MimeMessage.partName}
+ * @ivar name The filename of this attachment.
+ * @ivar contentType The MIME content type of this part.
+ * @ivar url The URL to stream if you want the contents of this part.
+ * @ivar isExternal Is the attachment stored someplace else than in the message?
+ * @ivar size The size of the attachment if available, -1 otherwise (size is set
+ * after initialization by jsmimeemitter.js)
+ */
+function MimeMessageAttachment(
+ aPartName,
+ aName,
+ aContentType,
+ aUrl,
+ aIsExternal
+) {
+ this.partName = aPartName;
+ this.name = aName;
+ this.contentType = aContentType;
+ this.url = aUrl;
+ this.isExternal = aIsExternal;
+ this.headers = {};
+ this.isEncrypted = false;
+ // parts is copied over from the part instance that preceded us
+ // headers is copied over from the part instance that preceded us
+ // isEncrypted is copied over from the part instance that preceded us
+}
+
+MimeMessageAttachment.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return [this]; // we are a leaf, so just us.
+ },
+ get allInlineAttachments() {
+ return [this]; // we are a leaf, so just us.
+ },
+ get allUserAttachments() {
+ return [this];
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let s =
+ "Attachment " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ this.name +
+ ", " +
+ this.contentType;
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+ return s;
+ },
+ toString() {
+ return this.prettyString(false, "");
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/NounFreetag.jsm b/comm/mailnews/db/gloda/modules/NounFreetag.jsm
new file mode 100644
index 0000000000..cb169645f1
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounFreetag.jsm
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["FreeTag", "FreeTagNoun"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+
+function FreeTag(aTagName) {
+ this.name = aTagName;
+}
+
+FreeTag.prototype = {
+ toString() {
+ return this.name;
+ },
+};
+
+/**
+ * @namespace Tag noun provider. Since the tag unique value is stored as a
+ * parameter, we are an odd case and semantically confused.
+ */
+var FreeTagNoun = {
+ _log: console.createInstance({
+ prefix: "gloda.noun.freetag",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ name: "freetag",
+ clazz: FreeTag,
+ allowsArbitraryAttrs: false,
+ usesParameter: true,
+
+ _listeners: [],
+ addListener(aListener) {
+ this._listeners.push(aListener);
+ },
+ removeListener(aListener) {
+ let index = this._listeners.indexOf(aListener);
+ if (index >= 0) {
+ this._listeners.splice(index, 1);
+ }
+ },
+
+ populateKnownFreeTags() {
+ for (let attr of this.objectNounOfAttributes) {
+ let attrDB = attr.dbDef;
+ for (let param in attrDB.parameterBindings) {
+ this.getFreeTag(param);
+ }
+ }
+ },
+
+ knownFreeTags: {},
+ getFreeTag(aTagName) {
+ let tag = this.knownFreeTags[aTagName];
+ if (!tag) {
+ tag = this.knownFreeTags[aTagName] = new FreeTag(aTagName);
+ for (let listener of this._listeners) {
+ listener.onFreeTagAdded(tag);
+ }
+ }
+ return tag;
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+
+ toParamAndValue(aTag) {
+ return [aTag.name, null];
+ },
+
+ toJSON(aTag) {
+ return aTag.name;
+ },
+ fromJSON(aTagName) {
+ return this.getFreeTag(aTagName);
+ },
+};
+
+Gloda.defineNoun(FreeTagNoun);
diff --git a/comm/mailnews/db/gloda/modules/NounMimetype.jsm b/comm/mailnews/db/gloda/modules/NounMimetype.jsm
new file mode 100644
index 0000000000..fef1a33bc7
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounMimetype.jsm
@@ -0,0 +1,582 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["MimeType", "MimeTypeNoun"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var LOG = console.createInstance({
+ prefix: "gloda.noun.mimetype",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var CategoryStringMap = {};
+
+/**
+ * Input data structure to allow us to build a fast mapping from mime type to
+ * category name. The keys in MimeCategoryMapping are the top-level
+ * categories. Each value can either be a list of MIME types or a nested
+ * object which recursively defines sub-categories. We currently do not use
+ * the sub-categories. They are just there to try and organize the MIME types
+ * a little and open the door to future enhancements.
+ *
+ * Do _not_ add additional top-level categories unless you have added
+ * corresponding entries to gloda.properties under the
+ * "gloda.mimetype.category" branch and are making sure localizers are aware
+ * of the change and have time to localize it.
+ *
+ * Entries with wildcards in them are part of a fallback strategy by the
+ * |mimeTypeNoun| and do not actually use regular expressions or anything like
+ * that. Everything is a straight string lookup. Given "foo/bar" we look for
+ * "foo/bar", then "foo/*", and finally "*".
+ */
+var MimeCategoryMapping = {
+ archives: [
+ "application/java-archive",
+ "application/x-java-archive",
+ "application/x-jar",
+ "application/x-java-jnlp-file",
+
+ "application/mac-binhex40",
+ "application/vnd.ms-cab-compressed",
+
+ "application/x-arc",
+ "application/x-arj",
+ "application/x-compress",
+ "application/x-compressed-tar",
+ "application/x-cpio",
+ "application/x-cpio-compressed",
+ "application/x-deb",
+
+ "application/x-bittorrent",
+
+ "application/x-rar",
+ "application/x-rar-compressed",
+ "application/x-7z-compressed",
+ "application/zip",
+ "application/x-zip-compressed",
+ "application/x-zip",
+
+ "application/x-bzip",
+ "application/x-bzip-compressed-tar",
+ "application/x-bzip2",
+ "application/x-gzip",
+ "application/x-tar",
+ "application/x-tar-gz",
+ "application/x-tarz",
+ ],
+ documents: {
+ database: [
+ "application/vnd.ms-access",
+ "application/x-msaccess",
+ "application/msaccess",
+ "application/vnd.msaccess",
+ "application/x-msaccess",
+ "application/mdb",
+ "application/x-mdb",
+
+ "application/vnd.oasis.opendocument.database",
+ ],
+ graphics: [
+ "application/postscript",
+ "application/x-bzpostscript",
+ "application/x-dvi",
+ "application/x-gzdvi",
+
+ "application/illustrator",
+
+ "application/vnd.corel-draw",
+ "application/cdr",
+ "application/coreldraw",
+ "application/x-cdr",
+ "application/x-coreldraw",
+ "image/cdr",
+ "image/x-cdr",
+ "zz-application/zz-winassoc-cdr",
+
+ "application/vnd.oasis.opendocument.graphics",
+ "application/vnd.oasis.opendocument.graphics-template",
+ "application/vnd.oasis.opendocument.image",
+
+ "application/x-dia-diagram",
+ ],
+ presentation: [
+ "application/vnd.ms-powerpoint.presentation.macroenabled.12",
+ "application/vnd.ms-powerpoint.template.macroenabled.12",
+ "application/vnd.ms-powerpoint",
+ "application/powerpoint",
+ "application/mspowerpoint",
+ "application/x-mspowerpoint",
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ "application/vnd.openxmlformats-officedocument.presentationml.template",
+
+ "application/vnd.oasis.opendocument.presentation",
+ "application/vnd.oasis.opendocument.presentation-template",
+ ],
+ spreadsheet: [
+ "application/vnd.lotus-1-2-3",
+ "application/x-lotus123",
+ "application/x-123",
+ "application/lotus123",
+ "application/wk1",
+
+ "application/x-quattropro",
+
+ "application/vnd.ms-excel.sheet.binary.macroenabled.12",
+ "application/vnd.ms-excel.sheet.macroenabled.12",
+ "application/vnd.ms-excel.template.macroenabled.12",
+ "application/vnd.ms-excel",
+ "application/msexcel",
+ "application/x-msexcel",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
+
+ "application/vnd.oasis.opendocument.formula",
+ "application/vnd.oasis.opendocument.formula-template",
+ "application/vnd.oasis.opendocument.chart",
+ "application/vnd.oasis.opendocument.chart-template",
+ "application/vnd.oasis.opendocument.spreadsheet",
+ "application/vnd.oasis.opendocument.spreadsheet-template",
+
+ "application/x-gnumeric",
+ ],
+ wordProcessor: [
+ "application/msword",
+ "application/vnd.ms-word",
+ "application/x-msword",
+ "application/msword-template",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
+ "application/vnd.ms-word.document.macroenabled.12",
+ "application/vnd.ms-word.template.macroenabled.12",
+ "application/x-mswrite",
+ "application/x-pocket-word",
+
+ "application/rtf",
+ "text/rtf",
+
+ "application/vnd.oasis.opendocument.text",
+ "application/vnd.oasis.opendocument.text-master",
+ "application/vnd.oasis.opendocument.text-template",
+ "application/vnd.oasis.opendocument.text-web",
+
+ "application/vnd.wordperfect",
+
+ "application/x-abiword",
+ "application/x-amipro",
+ ],
+ suite: ["application/vnd.ms-works"],
+ },
+ images: ["image/*"],
+ media: {
+ audio: ["audio/*"],
+ video: ["video/*"],
+ container: [
+ "application/ogg",
+
+ "application/smil",
+ "application/vnd.ms-asf",
+ "application/vnd.rn-realmedia",
+ "application/x-matroska",
+ "application/x-quicktime-media-link",
+ "application/x-quicktimeplayer",
+ ],
+ },
+ other: ["*"],
+ pdf: [
+ "application/pdf",
+ "application/x-pdf",
+ "image/pdf",
+ "file/pdf",
+ "application/x-bzpdf",
+ "application/x-gzpdf",
+ ],
+};
+
+/**
+ * Mime type abstraction that exists primarily so we can map mime types to
+ * integer id's.
+ *
+ * Instances of this class should only be retrieved via |MimeTypeNoun|; no one
+ * should ever create an instance directly.
+ */
+function MimeType(aID, aType, aSubType, aFullType, aCategory) {
+ this._id = aID;
+ this._type = aType;
+ this._subType = aSubType;
+ this._fullType = aFullType;
+ this._category = aCategory;
+}
+
+MimeType.prototype = {
+ /**
+ * The integer id we have associated with the mime type. This is stable for
+ * the lifetime of the database, which means that anything in the Gloda
+ * database can use this without fear. Things not persisted in the database
+ * should use the actual string mime type, retrieval via |fullType|.
+ */
+ get id() {
+ return this._id;
+ },
+ /**
+ * The first part of the MIME type; "text/plain" gets you "text".
+ */
+ get type() {
+ return this._type;
+ },
+ set fullType(aFullType) {
+ if (!this._fullType) {
+ this._fullType = aFullType;
+ [this._type, this._subType] = this._fullType.split("/");
+ this._category = MimeTypeNoun._getCategoryForMimeType(
+ aFullType,
+ this._type
+ );
+ }
+ },
+ /**
+ * If the |fullType| is "text/plain", subType is "plain".
+ */
+ get subType() {
+ return this._subType;
+ },
+ /**
+ * The full MIME type; "text/plain" returns "text/plain".
+ */
+ get fullType() {
+ return this._fullType;
+ },
+ toString() {
+ return this.fullType;
+ },
+
+ /**
+ * @returns the category we believe this mime type belongs to. This category
+ * name should never be shown directly to the user. Instead, use
+ * |categoryLabel| to get the localized name for the category. The
+ * category mapping comes from mimeTypesCategories.js.
+ */
+ get category() {
+ return this._category;
+ },
+ /**
+ * @returns The localized label for the category from gloda.properties in the
+ * "gloda.mimetype.category.CATEGORY.label" definition using the value
+ * from |category|.
+ */
+ get categoryLabel() {
+ return CategoryStringMap[this._category];
+ },
+};
+
+/**
+ * Mime type noun provider.
+ *
+ * The set of MIME Types is sufficiently limited that we can keep them all in
+ * memory. In theory it is also sufficiently limited that we could use the
+ * parameter mechanism in the database. However, it is more efficient, for
+ * both space and performance reasons, to store the specific mime type as a
+ * value. For future-proofing reasons, we opt to use a database table to
+ * persist the mapping rather than a hard-coded list. A preferences file or
+ * other text file would arguably suffice, but for consistency reasons, the
+ * database is not a bad thing.
+ */
+var MimeTypeNoun = {
+ name: "mime-type",
+ clazz: MimeType, // gloda supports clazz as well as class
+ allowsArbitraryAttrs: false,
+
+ _strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+
+ // note! update test_noun_mimetype if you change our internals!
+ _mimeTypes: {},
+ _mimeTypesByID: {},
+ TYPE_BLOCK_SIZE: 16384,
+ _mimeTypeHighID: {},
+ _mimeTypeRangeDummyObjects: {},
+ _highID: 0,
+
+ // we now use the exciting 'schema' mechanism of defineNoun to get our table
+ // created for us, plus some helper methods that we simply don't use.
+ schema: {
+ name: "mimeTypes",
+ columns: [
+ ["id", "INTEGER PRIMARY KEY", "_id"],
+ ["mimeType", "TEXT", "fullType"],
+ ],
+ },
+
+ _init() {
+ LOG.debug("loading MIME types");
+ this._loadCategoryMapping();
+ this._loadMimeTypes();
+ },
+
+ /**
+ * A map from MIME type to category name.
+ */
+ _mimeTypeToCategory: {},
+ /**
+ * Load the contents of MimeTypeCategories and populate
+ */
+ _loadCategoryMapping() {
+ let mimeTypeToCategory = this._mimeTypeToCategory;
+
+ function procMapObj(aSubTree, aCategories) {
+ for (let key in aSubTree) {
+ let value = aSubTree[key];
+ // Add this category to our nested categories list. Use concat since
+ // the list will be long-lived and each list needs to be distinct.
+ let categories = aCategories.concat();
+ categories.push(key);
+
+ if (categories.length == 1) {
+ CategoryStringMap[key] = MimeTypeNoun._strings.GetStringFromName(
+ "gloda.mimetype.category." + key + ".label"
+ );
+ }
+
+ // Is it an array? If so, just process this depth
+ if (Array.isArray(value)) {
+ for (let mimeTypeStr of value) {
+ mimeTypeToCategory[mimeTypeStr] = categories;
+ }
+ } else {
+ // it's yet another sub-tree branch
+ procMapObj(value, categories);
+ }
+ }
+ }
+ procMapObj(MimeCategoryMapping, []);
+ },
+
+ /**
+ * Lookup the category associated with a MIME type given its full type and
+ * type. (So, "foo/bar" and "foo" for "foo/bar".)
+ */
+ _getCategoryForMimeType(aFullType, aType) {
+ if (aFullType in this._mimeTypeToCategory) {
+ return this._mimeTypeToCategory[aFullType][0];
+ }
+ let wildType = aType + "/*";
+ if (wildType in this._mimeTypeToCategory) {
+ return this._mimeTypeToCategory[wildType][0];
+ }
+ return this._mimeTypeToCategory["*"][0];
+ },
+
+ /**
+ * In order to allow the gloda query mechanism to avoid hitting the database,
+ * we need to either define the noun type as cacheable and have a super-large
+ * cache or simply have a collection with every MIME type in it that stays
+ * alive forever.
+ * This is that collection. It is initialized by |_loadMimeTypes|. As new
+ * MIME types are created, we add them to the collection.
+ */
+ _universalCollection: null,
+
+ /**
+ * Kick off a query of all the mime types in our database, leaving
+ * |_processMimeTypes| to actually do the legwork.
+ */
+ _loadMimeTypes() {
+ // get all the existing mime types!
+ let query = Gloda.newQuery(this.id);
+ let nullFunc = function () {};
+ this._universalCollection = query.getCollection(
+ {
+ onItemsAdded: nullFunc,
+ onItemsModified: nullFunc,
+ onItemsRemoved: nullFunc,
+ onQueryCompleted(aCollection) {
+ MimeTypeNoun._processMimeTypes(aCollection.items);
+ },
+ },
+ null
+ );
+ },
+
+ /**
+ * For the benefit of our Category queryHelper, we need dummy ranged objects
+ * that cover the numerical address space allocated to the category. We
+ * can't use a real object for the upper-bound because the upper-bound is
+ * constantly growing and there is the chance the query might get persisted,
+ * which means these values need to be long-lived. Unfortunately, our
+ * solution to this problem (dummy objects) complicates the second case,
+ * should it ever occur. (Because the dummy objects cannot be persisted
+ * on their own... but there are other issues that will come up that we will
+ * just have to deal with then.)
+ */
+ _createCategoryDummies(aId, aCategory) {
+ let blockBottom = aId - (aId % this.TYPE_BLOCK_SIZE);
+ let blockTop = blockBottom + this.TYPE_BLOCK_SIZE - 1;
+ this._mimeTypeRangeDummyObjects[aCategory] = [
+ new MimeType(
+ blockBottom,
+ "!category-dummy!",
+ aCategory,
+ "!category-dummy!/" + aCategory,
+ aCategory
+ ),
+ new MimeType(
+ blockTop,
+ "!category-dummy!",
+ aCategory,
+ "!category-dummy!/" + aCategory,
+ aCategory
+ ),
+ ];
+ },
+
+ _processMimeTypes(aMimeTypes) {
+ for (let mimeType of aMimeTypes) {
+ if (mimeType.id > this._highID) {
+ this._highID = mimeType.id;
+ }
+ this._mimeTypes[mimeType] = mimeType;
+ this._mimeTypesByID[mimeType.id] = mimeType;
+
+ let blockHighID =
+ mimeType.category in this._mimeTypeHighID
+ ? this._mimeTypeHighID[mimeType.category]
+ : undefined;
+ // create the dummy range objects
+ if (blockHighID === undefined) {
+ this._createCategoryDummies(mimeType.id, mimeType.category);
+ }
+ if (blockHighID === undefined || mimeType.id > blockHighID) {
+ this._mimeTypeHighID[mimeType.category] = mimeType.id;
+ }
+ }
+ },
+
+ _addNewMimeType(aMimeTypeName) {
+ let [typeName, subTypeName] = aMimeTypeName.split("/");
+ let category = this._getCategoryForMimeType(aMimeTypeName, typeName);
+
+ if (!(category in this._mimeTypeHighID)) {
+ let nextID =
+ this._highID -
+ (this._highID % this.TYPE_BLOCK_SIZE) +
+ this.TYPE_BLOCK_SIZE;
+ this._mimeTypeHighID[category] = nextID;
+ this._createCategoryDummies(nextID, category);
+ }
+
+ let nextID = ++this._mimeTypeHighID[category];
+
+ let mimeType = new MimeType(
+ nextID,
+ typeName,
+ subTypeName,
+ aMimeTypeName,
+ category
+ );
+ if (mimeType.id > this._highID) {
+ this._highID = mimeType.id;
+ }
+
+ this._mimeTypes[aMimeTypeName] = mimeType;
+ this._mimeTypesByID[nextID] = mimeType;
+
+ // As great as the gloda extension mechanisms are, we don't think it makes
+ // a lot of sense to use them in this case. So we directly trigger object
+ // insertion without any of the grokNounItem stuff.
+ this.objInsert.call(this.datastore, mimeType);
+ // Since we bypass grokNounItem and its fun, we need to explicitly add the
+ // new MIME-type to _universalCollection ourselves. Don't try this at
+ // home, kids.
+ this._universalCollection._onItemsAdded([mimeType]);
+
+ return mimeType;
+ },
+
+ /**
+ * Map a mime type to a |MimeType| instance, creating it if necessary.
+ *
+ * @param aMimeTypeName The mime type. It may optionally include parameters
+ * (which will be ignored). A mime type is of the form "type/subtype".
+ * A type with parameters would look like 'type/subtype; param="value"'.
+ */
+ getMimeType(aMimeTypeName) {
+ // first, lose any parameters
+ let semiIndex = aMimeTypeName.indexOf(";");
+ if (semiIndex >= 0) {
+ aMimeTypeName = aMimeTypeName.substring(0, semiIndex);
+ }
+ aMimeTypeName = aMimeTypeName.trim().toLowerCase();
+
+ if (aMimeTypeName in this._mimeTypes) {
+ return this._mimeTypes[aMimeTypeName];
+ }
+ return this._addNewMimeType(aMimeTypeName);
+ },
+
+ /**
+ * Query helpers contribute additional functions to the query object for the
+ * attributes that use the noun type. For example, we define Category, so
+ * for the "attachmentTypes" attribute, "attachmentTypesCategory" would be
+ * exposed.
+ */
+ queryHelpers: {
+ /**
+ * Query for MIME type categories based on one or more MIME type objects
+ * passed in. We want the range to span the entire block allocated to the
+ * category.
+ *
+ * @param aAttrDef The attribute that is using us.
+ * @param aArguments The actual arguments object that
+ */
+ Category(aAttrDef, aArguments) {
+ let rangePairs = [];
+ // If there are no arguments then we want to fall back to the 'in'
+ // constraint which matches on any attachment.
+ if (!aArguments || aArguments.length == 0) {
+ return this._inConstraintHelper(aAttrDef, []);
+ }
+
+ for (let iArg = 0; iArg < aArguments.length; iArg++) {
+ let arg = aArguments[iArg];
+ rangePairs.push(MimeTypeNoun._mimeTypeRangeDummyObjects[arg.category]);
+ }
+ return this._rangedConstraintHelper(aAttrDef, rangePairs);
+ },
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.fullType.localeCompare(b.fullType);
+ },
+
+ toParamAndValue(aMimeType) {
+ return [null, aMimeType.id];
+ },
+ toJSON(aMimeType) {
+ return aMimeType.id;
+ },
+ fromJSON(aMimeTypeID) {
+ return this._mimeTypesByID[aMimeTypeID];
+ },
+};
+Gloda.defineNoun(MimeTypeNoun, GlodaConstants.NOUN_MIME_TYPE);
+try {
+ MimeTypeNoun._init();
+} catch (ex) {
+ LOG.error(
+ "problem init-ing: " + ex.fileName + ":" + ex.lineNumber + ": " + ex
+ );
+}
diff --git a/comm/mailnews/db/gloda/modules/NounTag.jsm b/comm/mailnews/db/gloda/modules/NounTag.jsm
new file mode 100644
index 0000000000..1e5db85a42
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounTag.jsm
@@ -0,0 +1,97 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["TagNoun"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * @namespace Tag noun provider.
+ */
+var TagNoun = {
+ name: "tag",
+ clazz: Ci.nsIMsgTag,
+ usesParameter: true,
+ allowsArbitraryAttrs: false,
+ idAttr: "key",
+ _msgTagService: null,
+ _tagMap: null,
+ _tagList: null,
+
+ _init() {
+ // This reference can be substituted for testing purposes.
+ this._msgTagService = MailServices.tags;
+ this._updateTagMap();
+ },
+
+ getAllTags() {
+ if (this._tagList == null) {
+ this._updateTagMap();
+ }
+ return this._tagList;
+ },
+
+ _updateTagMap() {
+ this._tagMap = {};
+ let tagArray = (this._tagList = this._msgTagService.getAllTags());
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ this._tagMap[tag.key] = tag;
+ }
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.tag.localeCompare(b.tag);
+ },
+ userVisibleString(aTag) {
+ return aTag.tag;
+ },
+
+ // we cannot be an attribute value
+
+ toParamAndValue(aTag) {
+ return [aTag.key, null];
+ },
+ toJSON(aTag) {
+ return aTag.key;
+ },
+ fromJSON(aTagKey, aIgnored) {
+ let tag = this._tagMap.hasOwnProperty(aTagKey)
+ ? this._tagMap[aTagKey]
+ : undefined;
+ // you will note that if a tag is removed, we are unable to aggressively
+ // deal with this. we are okay with this, but it would be nice to be able
+ // to listen to the message tag service to know when we should rebuild.
+ if (tag === undefined && this._msgTagService.isValidKey(aTagKey)) {
+ this._updateTagMap();
+ tag = this._tagMap[aTagKey];
+ }
+ // we intentionally are returning undefined if the tag doesn't exist
+ return tag;
+ },
+ /**
+ * Convenience helper to turn a tag key into a tag name.
+ */
+ getTag(aTagKey) {
+ return this.fromJSON(aTagKey);
+ },
+};
+
+TagNoun._init();
+Gloda.defineNoun(TagNoun, GlodaConstants.NOUN_TAG);
diff --git a/comm/mailnews/db/gloda/modules/SuffixTree.jsm b/comm/mailnews/db/gloda/modules/SuffixTree.jsm
new file mode 100644
index 0000000000..239993e180
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/SuffixTree.jsm
@@ -0,0 +1,381 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["SuffixTree", "MultiSuffixTree"];
+
+/**
+ * Given a list of strings and a corresponding map of items that those strings
+ * correspond to, build a suffix tree.
+ */
+function MultiSuffixTree(aStrings, aItems) {
+ if (aStrings.length != aItems.length) {
+ throw new Error("Array lengths need to be the same.");
+ }
+
+ let s = "";
+ let offsetsToItems = [];
+ let lastLength = 0;
+ for (let i = 0; i < aStrings.length; i++) {
+ s += aStrings[i];
+ offsetsToItems.push(lastLength, s.length, aItems[i]);
+ lastLength = s.length;
+ }
+
+ this._construct(s);
+ this._offsetsToItems = offsetsToItems;
+ this._numItems = aItems.length;
+}
+
+/**
+ * @class
+ */
+function State(aStartIndex, aEndIndex, aSuffix) {
+ this.start = aStartIndex;
+ this.end = aEndIndex;
+ this.suffix = aSuffix;
+}
+
+/**
+ * Since objects are basically hash-tables anyways, we simply create an
+ * attribute whose name is the first letter of the edge string. (So, the
+ * edge string can conceptually be a multi-letter string, but since we would
+ * split it were there any ambiguity, it's okay to just use the single letter.)
+ * This avoids having to update the attribute name or worry about tripping our
+ * implementation up.
+ */
+State.prototype = {
+ get isExplicit() {
+ // our end is not inclusive...
+ return this.end <= this.start;
+ },
+ get isImplicit() {
+ // our end is not inclusive...
+ return this.end > this.start;
+ },
+
+ get length() {
+ return this.end - this.start;
+ },
+
+ toString() {
+ return (
+ "[Start: " +
+ this.start +
+ " End: " +
+ this.end +
+ (this.suffix ? " non-null suffix]" : " null suffix]")
+ );
+ },
+};
+
+/**
+ * Suffix tree implemented using Ukkonen's algorithm.
+ *
+ * @class
+ */
+function SuffixTree(aStr) {
+ this._construct(aStr);
+}
+
+/**
+ * States are
+ */
+SuffixTree.prototype = {
+ /**
+ * Find all items matching the provided substring.
+ */
+ findMatches(aSubstring) {
+ let results = [];
+ let state = this._root;
+ let index = 0;
+ let end = aSubstring.length;
+ while (index < end) {
+ state = state[aSubstring[index]];
+ // bail if there was no edge
+ if (state === undefined) {
+ return results;
+ }
+ // bail if the portion of the edge we traversed is not equal to that
+ // portion of our pattern
+ let actualTraverseLength = Math.min(state.length, end - index);
+ if (
+ this._str.substring(state.start, state.start + actualTraverseLength) !=
+ aSubstring.substring(index, index + actualTraverseLength)
+ ) {
+ return results;
+ }
+ index += state.length;
+ }
+
+ // state should now be the node which itself and all its children match...
+ // The delta is to adjust us to the offset of the last letter of our match;
+ // the edge we traversed to get here may have found us traversing more
+ // than we wanted.
+ // index - end captures the over-shoot of the edge traversal,
+ // index - end + 1 captures the fact that we want to find the last letter
+ // that matched, not just the first letter beyond it
+ // However, if this state is a leaf node (end == 'infinity'), then 'end'
+ // isn't describing an edge at all and we want to avoid accounting for it.
+ let delta;
+ /*
+ if (state.end != this._infinity)
+ //delta = index - end + 1;
+ delta = end - (index - state.length);
+ else */
+ delta = index - state.length - end + 1;
+
+ this._resultGather(state, results, {}, end, delta, true);
+ return results;
+ },
+
+ _resultGather(
+ aState,
+ aResults,
+ aPresence,
+ aPatLength,
+ aDelta,
+ alreadyAdjusted
+ ) {
+ // find the item that this state originated from based on the state's
+ // start character. offsetToItem holds [string start index, string end
+ // index (exclusive), item reference]. So we want to binary search to
+ // find the string whose start/end index contains the state's start index.
+ let low = 0;
+ let high = this._numItems - 1;
+ let mid, stringStart, stringEnd;
+
+ let patternLast = aState.start - aDelta;
+ while (low <= high) {
+ mid = low + Math.floor((high - low) / 2); // excessive, especially with js nums
+ stringStart = this._offsetsToItems[mid * 3];
+ let startDelta = stringStart - patternLast;
+ stringEnd = this._offsetsToItems[mid * 3 + 1];
+ let endDelta = stringEnd - patternLast;
+ if (startDelta > 0) {
+ high = mid - 1;
+ } else if (endDelta <= 0) {
+ low = mid + 1;
+ } else {
+ break;
+ }
+ }
+
+ // - The match occurred completely inside a source string. Success.
+ // - The match spans more than one source strings, and is therefore not
+ // a match.
+
+ // at this point, we have located the origin string that corresponds to the
+ // start index of this state.
+ // - The match terminated with the end of the preceding string, and does
+ // not match us at all. We, and potentially our children, are merely
+ // serving as a unique terminal.
+ // - The
+
+ let patternFirst = patternLast - (aPatLength - 1);
+
+ if (patternFirst >= stringStart) {
+ if (!(stringStart in aPresence)) {
+ aPresence[stringStart] = true;
+ aResults.push(this._offsetsToItems[mid * 3 + 2]);
+ }
+ }
+
+ // bail if we had it coming OR
+ // if the result terminates at/part-way through this state, meaning any
+ // of its children are not going to be actual results, just hangers
+ // on.
+ /*
+ if (bail || (end <= aState.end)) {
+dump(" bailing! (bail was: " + bail + ")\n");
+ return;
+ }
+*/
+ // process our children...
+ for (let key in aState) {
+ // edges have attributes of length 1...
+ if (key.length == 1) {
+ let statePrime = aState[key];
+ this._resultGather(
+ statePrime,
+ aResults,
+ aPresence,
+ aPatLength,
+ aDelta + aState.length, // (alreadyAdjusted ? 0 : aState.length),
+ false
+ );
+ }
+ }
+ },
+
+ /**
+ * Given a reference 'pair' of a state and a string (may be 'empty'=explicit,
+ * which means no work to do and we return immediately) follow that state
+ * (and then the successive states)'s transitions until we run out of
+ * transitions. This happens either when we find an explicit state, or
+ * find ourselves partially along an edge (conceptually speaking). In
+ * the partial case, we return the state prior to the edge traversal.
+ * (The information about the 'edge' is contained on its target State;
+ * we can do this because a state is only referenced by one other state.)
+ */
+ _canonize(aState, aStart, aEnd) {
+ if (aEnd <= aStart) {
+ return [aState, aStart];
+ }
+
+ let statePrime;
+ // we treat an aState of null as 'bottom', which has transitions for every
+ // letter in the alphabet to 'root'. rather than create all those
+ // transitions, we special-case here.
+ if (aState === null) {
+ statePrime = this._root;
+ } else {
+ statePrime = aState[this._str[aStart]];
+ }
+ while (statePrime.length <= aEnd - aStart) {
+ // (no 1 adjustment required)
+ aStart += statePrime.length;
+ aState = statePrime;
+ if (aStart < aEnd) {
+ statePrime = aState[this._str[aStart]];
+ }
+ }
+ return [aState, aStart];
+ },
+
+ /**
+ * Given a reference 'pair' whose state may or may not be explicit (and for
+ * which we will perform the required splitting to make it explicit), test
+ * whether it already possesses a transition corresponding to the provided
+ * character.
+ *
+ * @returns A list of: whether we had to make it explicit, the (potentially)
+ * new explicit state.
+ */
+ _testAndSplit(aState, aStart, aEnd, aChar) {
+ if (aStart < aEnd) {
+ // it's not explicit
+ let statePrime = aState[this._str[aStart]];
+ let length = aEnd - aStart;
+ if (aChar == this._str[statePrime.start + length]) {
+ return [true, aState];
+ }
+
+ // do splitting... aState -> rState -> statePrime
+ let rState = new State(statePrime.start, statePrime.start + length);
+ aState[this._str[statePrime.start]] = rState;
+ statePrime.start += length;
+ rState[this._str[statePrime.start]] = statePrime;
+ return [false, rState];
+ }
+
+ // it's already explicit
+ if (aState === null) {
+ // bottom case... shouldn't happen, but hey.
+ return [true, aState];
+ }
+ return [aChar in aState, aState];
+ },
+
+ _update(aState, aStart, aIndex) {
+ let oldR = this._root;
+ let textAtIndex = this._str[aIndex]; // T sub i (0-based corrected...)
+ // because of the way we store the 'end' value as a one-past form, we do
+ // not need to subtract 1 off of aIndex.
+ let [endPoint, rState] = this._testAndSplit(
+ aState,
+ aStart,
+ aIndex, // no -1
+ textAtIndex
+ );
+ while (!endPoint) {
+ let rPrime = new State(aIndex, this._infinity);
+ rState[textAtIndex] = rPrime;
+ if (oldR !== this._root) {
+ oldR.suffix = rState;
+ }
+ oldR = rState;
+ [aState, aStart] = this._canonize(aState.suffix, aStart, aIndex); // no -1
+ [endPoint, rState] = this._testAndSplit(
+ aState,
+ aStart,
+ aIndex, // no -1
+ textAtIndex
+ );
+ }
+ if (oldR !== this._root) {
+ oldR.suffix = aState;
+ }
+
+ return [aState, aStart];
+ },
+
+ _construct(aStr) {
+ this._str = aStr;
+ // just needs to be longer than the string.
+ this._infinity = aStr.length + 1;
+
+ // this._bottom = new State(0, -1, null);
+ this._root = new State(-1, 0, null); // null === bottom
+ let state = this._root;
+ let start = 0;
+
+ for (let i = 0; i < aStr.length; i++) {
+ [state, start] = this._update(state, start, i); // treat as flowing -1...
+ [state, start] = this._canonize(state, start, i + 1); // 1-length string
+ }
+ },
+
+ dump(aState, aIndent, aKey) {
+ if (aState === undefined) {
+ aState = this._root;
+ }
+ if (aIndent === undefined) {
+ aIndent = "";
+ aKey = ".";
+ }
+
+ if (aState.isImplicit) {
+ let snip;
+ if (aState.length > 10) {
+ snip =
+ this._str.slice(
+ aState.start,
+ Math.min(aState.start + 10, this._str.length)
+ ) + "...";
+ } else {
+ snip = this._str.slice(
+ aState.start,
+ Math.min(aState.end, this._str.length)
+ );
+ }
+ dump(
+ aIndent +
+ aKey +
+ ":" +
+ snip +
+ "(" +
+ aState.start +
+ ":" +
+ aState.end +
+ ")\n"
+ );
+ } else {
+ dump(
+ aIndent +
+ aKey +
+ ": (explicit:" +
+ aState.start +
+ ":" +
+ aState.end +
+ ")\n"
+ );
+ }
+ let nextIndent = aIndent + " ";
+ let keys = Object.keys(aState).filter(c => c.length == 1);
+ for (let key of keys) {
+ this.dump(aState[key], nextIndent, key);
+ }
+ },
+};
+MultiSuffixTree.prototype = SuffixTree.prototype;
diff --git a/comm/mailnews/db/gloda/modules/moz.build b/comm/mailnews/db/gloda/modules/moz.build
new file mode 100644
index 0000000000..54978c24ea
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/moz.build
@@ -0,0 +1,31 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXTRA_JS_MODULES.gloda += [
+ "Collection.jsm",
+ "Everybody.jsm",
+ "Facet.jsm",
+ "Gloda.jsm",
+ "GlodaConstants.jsm",
+ "GlodaContent.jsm",
+ "GlodaDatabind.jsm",
+ "GlodaDataModel.jsm",
+ "GlodaDatastore.jsm",
+ "GlodaExplicitAttr.jsm",
+ "GlodaFundAttr.jsm",
+ "GlodaIndexer.jsm",
+ "GlodaMsgIndexer.jsm",
+ "GlodaMsgSearcher.jsm",
+ "GlodaPublic.jsm",
+ "GlodaQueryClassFactory.jsm",
+ "GlodaSyntheticView.jsm",
+ "GlodaUtils.jsm",
+ "IndexMsg.jsm",
+ "MimeMessage.jsm",
+ "NounFreetag.jsm",
+ "NounMimetype.jsm",
+ "NounTag.jsm",
+ "SuffixTree.jsm",
+]
diff --git a/comm/mailnews/db/gloda/moz.build b/comm/mailnews/db/gloda/moz.build
new file mode 100644
index 0000000000..4c7d35cca3
--- /dev/null
+++ b/comm/mailnews/db/gloda/moz.build
@@ -0,0 +1,13 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ "modules",
+ "components",
+]
+
+TEST_DIRS += ["test"]
+
+JAR_MANIFESTS += ["jar.mn"]
diff --git a/comm/mailnews/db/gloda/test/moz.build b/comm/mailnews/db/gloda/test/moz.build
new file mode 100644
index 0000000000..c16fdd2b6c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/moz.build
@@ -0,0 +1,12 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPCSHELL_TESTS_MANIFESTS += ["unit/xpcshell.ini"]
+
+TESTING_JS_MODULES.gloda += [
+ "unit/resources/GlodaQueryHelper.jsm",
+ "unit/resources/GlodaTestHelper.jsm",
+ "unit/resources/GlodaTestHelperFunctions.jsm",
+]
diff --git a/comm/mailnews/db/gloda/test/unit/base_gloda_content.js b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
new file mode 100644
index 0000000000..d106015b48
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
@@ -0,0 +1,226 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent. This may also be implicitly tested by indexing
+ * and fulltext query tests (on messages), but the buck stops here for the
+ * content stuff.
+ *
+ * Currently, we just test quoting removal and that the content turns out right.
+ * We do not actually verify that the quoted blocks are correct (aka we might
+ * screw up eating the greater-than signs). (We have no known consumers who
+ * care about the quoted blocks.)
+ */
+
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+// We need to be able to get at GlodaFundAttr to check the number of whittler
+// invocations.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* ===== Data ===== */
+var messageInfos = [
+ {
+ name: "no quoting",
+ bode: [
+ [true, "I like hats"],
+ [true, "yes I do!"],
+ [true, "I like hats!"],
+ [true, "How bout you?"],
+ ],
+ },
+ {
+ name: "no quoting, whitespace removal",
+ bode: [
+ [true, "robots are nice..."],
+ [true, ""],
+ [true, "except for the bloodlust"],
+ ],
+ },
+ {
+ name: "bottom posting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, ">"], // This quoted blank line is significant! no lose!
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "I do enjoy them as well."],
+ [true, ""],
+ [true, "Bob"],
+ ],
+ },
+ {
+ name: "top posting",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ ],
+ },
+ {
+ name: "top posting with trailing whitespace, no intro",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ [false, ""],
+ [false, ""],
+ ],
+ },
+ {
+ name: "interspersed quoting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [true, "I concur with this point."],
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "this point also resonates with me."],
+ [false, ""],
+ [false, "> I like hats!"],
+ [false, "> How bout you?"],
+ [false, ""],
+ [true, "Verily!"],
+ ],
+ },
+ {
+ name: "german style",
+ bode: [
+ [false, "Mark Banner <bugzilla@standard8.plus.invalid> wrote:"],
+ [false, "\xa0"],
+ [
+ false,
+ "> We haven't nailed anything down in detail yet, depending on how we are ",
+ ],
+ [
+ true,
+ "That sounds great and would definitely be appreciated by localizers.",
+ ],
+ [false, ""],
+ ],
+ },
+ {
+ name: "tortuous interference",
+ bode: [
+ [false, "> wrote"],
+ [true, "running all the time"],
+ [false, "> wrote"],
+ [true, "cheese"],
+ [false, ""],
+ ],
+ },
+];
+
+function setup_create_message(info) {
+ info.body = { body: info.bode.map(tupe => tupe[1]).join("\r\n") };
+ info.expected = info.bode
+ .filter(tupe => tupe[0])
+ .map(tupe => tupe[1])
+ .join("\n");
+
+ info._synMsg = msgGen.makeMessage(info);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ // Let's not assume an ordering.
+ for (let iMsg = 0; iMsg < messageInfos.length; iMsg++) {
+ if (messageInfos[iMsg]._synMsg == aSynthMessage) {
+ messageInfos[iMsg]._glodaMsg = aGlodaMessage;
+ }
+ }
+}
+
+/**
+ * Actually inject all the messages we created above.
+ */
+async function setup_inject_messages() {
+ // Create the messages from messageInfo.
+ messageInfos.forEach(info => {
+ setup_create_message(info);
+ });
+ let msgSet = new SyntheticMessageSet(messageInfos.map(info => info._synMsg));
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], { verifier: glodaInfoStasher })
+ );
+}
+
+function test_stream_message(info) {
+ // Currying the function for simpler usage with `base_gloda_content_tests`.
+ return () => {
+ let msgHdr = info._glodaMsg.folderMessage;
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_message_content(
+ info,
+ info._synMsg,
+ info._glodaMsg,
+ aMsgHdr,
+ aMimeMsg
+ );
+ });
+ };
+}
+
+// Instrument GlodaFundAttr so we can check the count.
+var originalWhittler = GlodaFundAttr.contentWhittle;
+var whittleCount = 0;
+GlodaFundAttr.contentWhittle = function (...aArgs) {
+ whittleCount++;
+ return originalWhittler.apply(this, aArgs);
+};
+
+function verify_message_content(aInfo, aSynMsg, aGlodaMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ throw new Error(
+ "Message streaming should work; check test_mime_emitter.js first"
+ );
+ }
+
+ whittleCount = 0;
+ let content = Gloda.getMessageContent(aGlodaMsg, aMimeMsg);
+ if (whittleCount != 1) {
+ throw new Error("Whittle count is " + whittleCount + " but should be 1!");
+ }
+
+ Assert.equal(content.getContentString(), aInfo.expected, "Message streamed");
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_gloda_content_tests = [
+ test_sanity_test_environment,
+ setup_inject_messages,
+ ...messageInfos.map(e => {
+ return test_stream_message(e);
+ }),
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_junk.js b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
new file mode 100644
index 0000000000..8529f24a56
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
@@ -0,0 +1,217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing in the face of junk classification and junk folders. It is
+ * gloda policy not to index junk mail.
+ *
+ * A similar test that moving things to the trash folder is deletion happens in
+ * base_index_messages.js.
+ */
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var messageInjection;
+
+const SPAM_BODY = { body: "superspam superspam superspam eevil eevil eevil" };
+const HAM_BODY = { body: "ham ham ham nice nice nice happy happy happy" };
+
+/**
+ * Make SPAM_BODY be known as spammy and HAM_BODY be known as hammy.
+ */
+async function setup_spam_filter() {
+ let [, spamSet, hamSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ { count: 1, body: HAM_BODY },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([spamSet, hamSet], []));
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ let junkListener = {
+ onMessageClassified() {
+ promiseResolve();
+ },
+ };
+
+ // Ham.
+ dump(`Marking message: ${hamSet.getMsgHdr(0)} as ham.`);
+ MailServices.junk.setMessageClassification(
+ hamSet.getMsgURI(0),
+ null, // no old classification
+ MailServices.junk.GOOD,
+ null,
+ junkListener
+ );
+ await promise;
+
+ // Reset promise for junkListener.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Spam.
+ dump(`Marking message: ${spamSet.getMsgHdr(0)} as spam.`);
+ MailServices.junk.setMessageClassification(
+ spamSet.getMsgURI(0),
+ null, // No old classification.
+ MailServices.junk.JUNK,
+ null,
+ junkListener
+ );
+ await promise;
+}
+
+/**
+ * Because gloda defers indexing until after junk, we should never index a
+ * message that gets marked as junk. So if we inject a message that will
+ * definitely be marked as junk (thanks to use of terms that guarantee it),
+ * the indexer should never index it.
+ *
+ * ONLY THIS TEST ACTUALLY RELIES ON THE BAYESIAN CLASSIFIER.
+ */
+async function test_never_indexes_a_message_marked_as_junk() {
+ // Event-driven does not index junk.
+
+ // Make a message that will be marked as junk from the get-go.
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ ]);
+ // Since the message is junk, gloda should not index it!
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Folder sweep does not index junk.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Reset the training data so the bayesian classifier stops doing things.
+ */
+function reset_spam_filter() {
+ MailServices.junk.resetTrainingData();
+}
+
+/**
+ * Marking a message as junk is equivalent to deleting the message, un-mark it
+ * and it should go back to being a happy message (with the same gloda-id!).
+ *
+ * THIS TEST DOES NOT RELY ON THE BAYESIAN CLASSIFIER.
+ */
+
+async function test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure() {
+ // Mark as junk is deletion.
+ // Create a message; it should get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let glodaId = msgSet.glodaMessages[0].id;
+ // Mark it as junk.
+ msgSet.setJunk(true);
+ // It will appear deleted after the event.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+ // Mark as non-junk gets indexed.
+ msgSet.setJunk(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ // We should have reused the existing gloda message so it should keep the id.
+ Assert.equal(glodaId, msgSet.glodaMessages[0].id);
+}
+
+/**
+ * Moving a message to the junk folder is equivalent to deletion. Gloda does
+ * not index junk folders at all, which is why this is an important and
+ * independent determination from marking a message directly as junk.
+ *
+ * The move to the junk folder is performed without using any explicit junk
+ * support code. This ends up being effectively the same underlying logic test
+ * as base_index_messages' test of moving a message to the trash folder.
+ */
+async function test_message_moving_to_junk_folder_is_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the junk folder.
+ await messageInjection.moveMessages(
+ msgSet,
+ await messageInjection.getJunkFolder()
+ );
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // We do not index the junk folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // (Well, the conversation will die, but we can't see that.)
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+ Assert.ok(messageInjection.messageGenerator, "Sanity that msgGen is set.");
+}
+
+/* exported tests */
+var base_index_junk_tests = [
+ test_sanity_test_environment,
+ setup_spam_filter,
+ test_never_indexes_a_message_marked_as_junk,
+ reset_spam_filter,
+ test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure,
+ test_message_moving_to_junk_folder_is_deletion,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_messages.js b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
new file mode 100644
index 0000000000..bea2337d7f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
@@ -0,0 +1,1461 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our indexing prowess. This includes both our ability to
+ * properly be triggered by events taking place in thunderbird as well as our
+ * ability to correctly extract/index the right data.
+ * In general, if these tests pass, things are probably working quite well.
+ *
+ * This test has local, IMAP online, IMAP offline, and IMAP online-become-offline
+ * variants. See the text_index_messages_*.js files.
+ *
+ * Things we don't test that you think we might test:
+ * - Full-text search. Happens in query testing.
+ */
+
+var { MailUtils } = ChromeUtils.import("resource:///modules/MailUtils.jsm");
+var { NetUtil } = ChromeUtils.import("resource://gre/modules/NetUtil.jsm");
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var {
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+ resumeFromSimulatedHang,
+ permuteMessages,
+ makeABCardForAddressPair,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { SyntheticMessageSet, SyntheticPartMultiMixed, SyntheticPartLeaf } =
+ ChromeUtils.import("resource://testing-common/mailnews/MessageGenerator.jsm");
+var { TagNoun } = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+
+// Whether we can expect fulltext results
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+var messageInjection;
+var msgGen;
+var scenarios;
+
+/* ===== Indexing Basics ===== */
+
+/**
+ * Index a message, wait for a commit, make sure the header gets the property
+ * set correctly. Then modify the message, verify the dirty property shows
+ * up, flush again, and make sure the dirty property goes clean again.
+ */
+async function test_pending_commit_tracker_flushes_correctly() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Before the flush, there should be no gloda-id property.
+ let msgHdr = msgSet.getMsgHdr(0);
+ // Get it as a string to make sure it's empty rather than possessing a value.
+ Assert.equal(msgHdr.getStringProperty("gloda-id"), "");
+
+ await waitForGlodaDBFlush();
+
+ // After the flush there should be a gloda-id property and it should
+ // equal the gloda id.
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+
+ // Make sure no dirty property was written.
+ Assert.equal(msgHdr.getStringProperty("gloda-dirty"), "");
+
+ // Modify the message.
+ msgSet.setRead(true);
+ await waitForGlodaIndexer(msgSet);
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Now there should be a dirty property and it should be 1.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageDirty
+ );
+
+ // Flush.
+ await waitForGlodaDBFlush();
+
+ // Now dirty should be 0 and the gloda id should still be the same.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageClean
+ );
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+}
+
+/**
+ * Make sure that PendingCommitTracker causes a msgdb commit to occur so that
+ * if the nsIMsgFolder's msgDatabase attribute has already been nulled
+ * (which is normally how we force a msgdb commit), that the changes to the
+ * header actually hit the disk.
+ */
+async function test_pending_commit_causes_msgdb_commit() {
+ // New message, index it.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Force the msgDatabase closed; the sqlite commit will not yet have occurred.
+ messageInjection.getRealInjectionFolder(folder).msgDatabase = null;
+ // Make the commit happen, this causes the header to get set.
+ await waitForGlodaDBFlush();
+
+ // Force a GC. This will kill off the header and the database, losing data
+ // if we are not protecting it.
+ Cu.forceGC();
+
+ // Now retrieve the header and make sure it has the gloda id set!
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-id"),
+ msgSet.glodaMessages[0].id
+ );
+}
+
+/**
+ * Give the indexing sweep a workout.
+ *
+ * This includes:
+ * - Basic indexing sweep across never-before-indexed folders.
+ * - Indexing sweep across folders with just some changes.
+ * - Filthy pass.
+ */
+async function test_indexing_sweep() {
+ // -- Never-before-indexed folders.
+ // Turn off event-driven indexing.
+ configureGlodaIndexing({ event: false });
+
+ let [[folderA], setA1, setA2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+ let [, setB1, setB2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ { count: 2 },
+ ]);
+ let [[folderC], setC1, setC2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+
+ // Make sure that event-driven job gets nuked out of existence
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Turn on event-driven indexing again; this will trigger a sweep.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setA1, setA2, setB1, setB2, setC1, setC2])
+ );
+
+ // -- Folders with some changes, pending commits.
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA1.setRead(true);
+ setB2.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setB2]));
+
+ // -- Folders with some changes, no pending commits.
+ // Force a commit to clear out our pending commits.
+ await waitForGlodaDBFlush();
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA2.setRead(true);
+ setB1.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA2, setB1]));
+
+ // -- Filthy foldering indexing.
+ // Just mark the folder filthy and make sure that we reindex everyone.
+ // IMPORTANT! The trick of marking the folder filthy only works because
+ // we flushed/committed the database above; the PendingCommitTracker
+ // is not aware of bogus filthy-marking of folders.
+ // We leave the verification of the implementation details to
+ // test_index_sweep_folder.js.
+ let glodaFolderC = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folderC)
+ );
+ // Marked gloda folder dirty.
+ glodaFolderC._dirtyStatus = glodaFolderC.kFolderFilthy;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setC1, setC2]));
+
+ // -- Forced folder indexing.
+ var callbackInvoked = false;
+ GlodaMsgIndexer.indexFolder(
+ messageInjection.getRealInjectionFolder(folderA),
+ {
+ force: true,
+ callback() {
+ callbackInvoked = true;
+ },
+ }
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setA2]));
+ Assert.ok(callbackInvoked);
+}
+
+/**
+ * We used to screw up and downgrade filthy folders to dirty if we saw an event
+ * happen in the folder before we got to the folder; this tests that we no
+ * longer do that.
+ */
+async function test_event_driven_indexing_does_not_mess_with_filthy_folders() {
+ // Add a folder with a message.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Fake marking the folder filthy.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Generate an event in the folder.
+ msgSet.setRead(true);
+ // Make sure the indexer did not do anything and the folder is still filthy.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ Assert.equal(glodaFolder._dirtyStatus, glodaFolder.kFolderFilthy);
+ // Also, the message should not have actually gotten marked dirty.
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-dirty"), 0);
+
+ // Let's make the message un-read again for consistency with the gloda state.
+ msgSet.setRead(false);
+ // Make the folder dirty and let an indexing sweep take care of this so we
+ // don't get extra events in subsequent tests.
+ glodaFolder._dirtyStatus = glodaFolder.kFolderDirty;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // The message won't get indexed though.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_indexing_never_priority() {
+ // Add a folder with a bunch of messages.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index it, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Verify that the setter and getter do the right thing.
+ Assert.equal(
+ glodaFolder.indexingPriority,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder.id,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([folder], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_setting_indexing_priority_never_while_indexing() {
+ if (!messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForIndexingHang();
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed. Also, make sure a cleanup
+ // was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { cleanedUp: 1 }));
+}
+
+/* ===== Threading / Conversation Grouping ===== */
+
+var gSynMessages = [];
+function allMessageInSameConversation(aSynthMessage, aGlodaMessage, aConvID) {
+ if (aConvID === undefined) {
+ return aGlodaMessage.conversationID;
+ }
+ Assert.equal(aConvID, aGlodaMessage.conversationID);
+ // Cheat and stash the synthetic message (we need them for one of the IMAP
+ // tests).
+ gSynMessages.push(aSynthMessage);
+ return aConvID;
+}
+
+/**
+ * Test our conversation/threading logic in the straight-forward direct
+ * reply case, the missing intermediary case, and the siblings with missing
+ * parent case. We also test all permutations of receipt of those messages.
+ * (Also tests that we index new messages.)
+ */
+async function test_threading_direct_reply() {
+ let permutationMessages = await permuteMessages(
+ scenarios.directReply,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+async function test_threading_missing_intermediary() {
+ let permutationMessages = await permuteMessages(
+ scenarios.missingIntermediary,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+async function test_threading_siblings_missing_parent() {
+ let permutationMessages = await permuteMessages(
+ scenarios.siblingsMissingParent,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+/**
+ * Test the bit that says "if we're fulltext-indexing the message and we
+ * discover it didn't have any attachments, clear the attachment bit from the
+ * message header".
+ */
+async function test_attachment_flag() {
+ // Create a synthetic message with an attachment that won't normally be listed
+ // in the attachment pane (Content-Disposition: inline, no filename, and
+ // displayable inline).
+ let smsg = msgGen.makeMessage({
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ let msgSet = new SyntheticMessageSet([smsg]);
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attachment_flag,
+ })
+ );
+}
+
+function verify_attachment_flag(smsg, gmsg) {
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentNames.length, 0);
+ Assert.equal(gmsg.attachmentInfos.length, 0);
+ Assert.equal(
+ false,
+ gmsg.folderMessage.flags & Ci.nsMsgMessageFlags.Attachment
+ );
+ }
+}
+/* ===== Fundamental Attributes (per GlodaFundAttr.jsm) ===== */
+
+/**
+ * Save the synthetic message created in test_attributes_fundamental for the
+ * benefit of test_attributes_fundamental_from_disk.
+ */
+var fundamentalSyntheticMessage;
+var fundamentalFolderHandle;
+/**
+ * We're saving this one so that we can move the message later and verify that
+ * the attributes are consistent.
+ */
+var fundamentalMsgSet;
+var fundamentalGlodaMsgAttachmentUrls;
+/**
+ * Save the resulting gloda message id corresponding to the
+ * fundamentalSyntheticMessage so we can use it to query the message from disk.
+ */
+var fundamentalGlodaMessageId;
+
+/**
+ * Test that we extract the 'fundamental attributes' of a message properly
+ * 'Fundamental' in this case is talking about the attributes defined/extracted
+ * by gloda's GlodaFundAttr.jsm and perhaps the core message indexing logic itself
+ * (which show up as kSpecial* attributes in GlodaFundAttr.jsm anyways.)
+ */
+async function test_attributes_fundamental() {
+ // Create a synthetic message with attachment.
+ let smsg = msgGen.makeMessage({
+ name: "test message",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartLeaf({ body: "I like cheese!" }),
+ msgGen.makeMessage({ body: { body: "I like wine!" } }), // That's one attachment.
+ ]),
+ attachments: [
+ { filename: "bob.txt", body: "I like bread!" }, // And that's another one.
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ fundamentalSyntheticMessage = smsg;
+ let msgSet = new SyntheticMessageSet([smsg]);
+ fundamentalMsgSet = msgSet;
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attributes_fundamental,
+ })
+ );
+}
+
+function verify_attributes_fundamental(smsg, gmsg) {
+ // Save off the message id for test_attributes_fundamental_from_disk.
+ fundamentalGlodaMessageId = gmsg.id;
+ if (gmsg.attachmentInfos) {
+ fundamentalGlodaMsgAttachmentUrls = gmsg.attachmentInfos.map(
+ att => att.url
+ );
+ } else {
+ fundamentalGlodaMsgAttachmentUrls = [];
+ }
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(fundamentalFolderHandle).URI
+ );
+
+ // -- Subject
+ Assert.equal(smsg.subject, gmsg.conversation.subject);
+ Assert.equal(smsg.subject, gmsg.subject);
+
+ // -- Contact/identity information.
+ // - From
+ // Check the e-mail address.
+ Assert.equal(gmsg.from.kind, "email");
+ Assert.equal(smsg.fromAddress, gmsg.from.value);
+ // Check the name.
+ Assert.equal(smsg.fromName, gmsg.from.contact.name);
+
+ // - To
+ Assert.equal(smsg.toAddress, gmsg.to[0].value);
+ Assert.equal(smsg.toName, gmsg.to[0].contact.name);
+
+ // Date
+ Assert.equal(smsg.date.valueOf(), gmsg.date.valueOf());
+
+ // -- Message ID
+ Assert.equal(smsg.messageId, gmsg.headerMessageID);
+
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentTypes.length, 1);
+ Assert.equal(gmsg.attachmentTypes[0], "text/plain");
+ Assert.equal(gmsg.attachmentNames.length, 1);
+ Assert.equal(gmsg.attachmentNames[0], "bob.txt");
+
+ let expectedInfos = [
+ // The name for that one is generated randomly.
+ { contentType: "message/rfc822" },
+ { name: "bob.txt", contentType: "text/plain" },
+ ];
+ let expectedSize = 14;
+ Assert.equal(gmsg.attachmentInfos.length, 2);
+ for (let [i, attInfos] of gmsg.attachmentInfos.entries()) {
+ for (let k in expectedInfos[i]) {
+ Assert.equal(attInfos[k], expectedInfos[i][k]);
+ }
+ // Because it's unreliable and depends on the platform.
+ Assert.ok(Math.abs(attInfos.size - expectedSize) <= 2);
+ // Check that the attachment URLs are correct.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal: Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+
+ try {
+ // Will throw if the URL is invalid.
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ do_throw(new Error("Invalid attachment URL"));
+ }
+ }
+ } else {
+ // Make sure we don't actually get attachments!
+ Assert.equal(gmsg.attachmentTypes, null);
+ Assert.equal(gmsg.attachmentNames, null);
+ }
+}
+
+/**
+ * We now move the message into another folder, wait for it to be indexed,
+ * and make sure the magic url getter for GlodaAttachment returns a proper
+ * URL.
+ */
+async function test_moved_message_attributes() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ // Don't ask me why, let destFolder = MessageInjection.make_empty_folder would result in a
+ // random error when running test_index_messages_imap_offline.js ...
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+ fundamentalFolderHandle = destFolder;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([ignoreSet]));
+
+ // This is a fast move (third parameter set to true).
+ await messageInjection.moveMessages(fundamentalMsgSet, destFolder, true);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([fundamentalMsgSet], {
+ verifier(newSynMsg, newGlodaMsg) {
+ // Verify we still have the same number of attachments.
+ Assert.equal(
+ fundamentalGlodaMsgAttachmentUrls.length,
+ newGlodaMsg.attachmentInfos.length
+ );
+ for (let [i, attInfos] of newGlodaMsg.attachmentInfos.entries()) {
+ // Verify the url has changed.
+ Assert.notEqual(fundamentalGlodaMsgAttachmentUrls[i], attInfos.url);
+ // And verify that the new url is still valid.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal:
+ Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+ try {
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ new Error("Invalid attachment URL");
+ }
+ }
+ },
+ fullyIndexed: 0,
+ })
+ );
+}
+
+/**
+ * We want to make sure that all of the fundamental properties also are there
+ * when we load them from disk. Nuke our cache, query the message back up.
+ * We previously used getMessagesByMessageID to get the message back, but he
+ * does not perform a full load-out like a query does, so we need to use our
+ * query mechanism for this.
+ */
+async function test_attributes_fundamental_from_disk() {
+ nukeGlodaCachesAndCollections();
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE).id(
+ fundamentalGlodaMessageId
+ );
+ await queryExpect(
+ query,
+ [fundamentalSyntheticMessage],
+ verify_attributes_fundamental_from_disk,
+ function (smsg) {
+ return smsg.messageId;
+ }
+ );
+}
+
+/**
+ * We are just a wrapper around verify_attributes_fundamental, adapting the
+ * return callback from getMessagesByMessageID.
+ *
+ * @param aGlodaMessageLists This should be [[theGlodaMessage]].
+ */
+function verify_attributes_fundamental_from_disk(aGlodaMessage) {
+ // Teturn the message id for test_attributes_fundamental_from_disk's benefit.
+ verify_attributes_fundamental(fundamentalSyntheticMessage, aGlodaMessage);
+ return aGlodaMessage.headerMessageID;
+}
+
+/* ===== Explicit Attributes (per GlodaExplicitAttr.jsm) ===== */
+
+/**
+ * Test the attributes defined by GlodaExplicitAttr.jsm.
+ */
+async function test_attributes_explicit() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ // -- Tags
+ // Note that the tag service does not guarantee stable nsIMsgTag references,
+ // nor does noun_tag go too far out of its way to provide stability.
+ // However, it is stable as long as we don't spook it by bringing new tags
+ // into the equation.
+ let tagOne = TagNoun.getTag("$label1");
+ let tagTwo = TagNoun.getTag("$label2");
+
+ msgSet.addTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+
+ msgSet.addTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.equal(gmsg.tags.indexOf(tagTwo), -1);
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Test non-query-able attributes
+ */
+async function test_attributes_cant_query() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ let readDbAttr = Gloda.getAttrDef(GlodaConstants.BUILT_IN, "read");
+ let readId = readDbAttr.id;
+
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messageAttributes WHERE attributeID = ?1",
+ readId
+ );
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Have the participants be in our addressbook prior to indexing so that we can
+ * verify that the hand-off to the addressbook indexer does not cause breakage.
+ */
+async function test_people_in_addressbook() {
+ var senderPair = msgGen.makeNameAndAddress(),
+ recipPair = msgGen.makeNameAndAddress();
+
+ // - Add both people to the address book.
+ makeABCardForAddressPair(senderPair);
+ makeABCardForAddressPair(recipPair);
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, to: [recipPair], from: senderPair },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0],
+ senderIdentity = gmsg.from,
+ recipIdentity = gmsg.to[0];
+
+ Assert.notEqual(senderIdentity.contact, null);
+ Assert.ok(senderIdentity.inAddressBook);
+
+ Assert.notEqual(recipIdentity.contact, null);
+ Assert.ok(recipIdentity.inAddressBook);
+}
+
+/* ===== Fulltexts Indexing ===== */
+
+/**
+ * Make sure that we are using the saneBodySize flag. This is basically the
+ * test_sane_bodies test from test_mime_emitter but we pull the indexedBodyText
+ * off the message to check and also make sure that the text contents slice
+ * off the end rather than the beginning.
+ */
+async function test_streamed_bodies_are_size_capped() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ let hugeString =
+ "qqqqxxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx \r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ let bodyString = "aabb" + hugeString + "xxyy";
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: bodyString, contentType: "text/plain" },
+ });
+ let msgSet = new SyntheticMessageSet([synMsg]);
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.ok(gmsg.indexedBodyText.startsWith("aabb"));
+ Assert.ok(!gmsg.indexedBodyText.includes("xxyy"));
+
+ if (gmsg.indexedBodyText.length > 20 * 1024 + 58 + 10) {
+ do_throw(
+ "Indexed body text is too big! (" + gmsg.indexedBodyText.length + ")"
+ );
+ }
+}
+
+/* ===== Message Deletion ===== */
+/**
+ * Test actually deleting a message on a per-message basis (not just nuking the
+ * folder like emptying the trash does.)
+ *
+ * Logic situations:
+ * - Non-last message in a conversation, twin.
+ * - Non-last message in a conversation, not a twin.
+ * - Last message in a conversation
+ */
+async function test_message_deletion() {
+ // Non-last message in conv, twin.
+ // Create and index two messages in a conversation.
+ let [, convSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([convSet], { augment: true }));
+
+ // Twin the first message in a different folder owing to our reliance on
+ // message-id's in the SyntheticMessageSet logic. (This is also why we broke
+ // up the indexing waits too.)
+ let twinFolder = await messageInjection.makeEmptyFolder();
+ let twinSet = new SyntheticMessageSet([convSet.synMessages[0]]);
+ await messageInjection.addSetsToFolders([twinFolder], [twinSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([twinSet], { augment: true }));
+
+ // Split the conv set into two helper sets.
+ let firstSet = convSet.slice(0, 1); // The twinned first message in the thread.
+ let secondSet = convSet.slice(1, 2); // The un-twinned second thread message.
+
+ // Make sure we can find the message (paranoia).
+ let firstQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ firstQuery.id(firstSet.glodaMessages[0].id);
+ let firstColl = await queryExpect(firstQuery, firstSet);
+
+ // Delete it (not trash! delete!).
+ await MessageInjection.deleteMessages(firstSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [firstSet] }));
+ // And our collection from that query should now be empty.
+ Assert.equal(firstColl.items.length, 0);
+
+ // Make sure it no longer shows up in a standard query.
+ firstColl = await queryExpect(firstQuery, []);
+
+ // Make sure it shows up in a privileged query.
+ let privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ let firstGlodaId = firstSet.glodaMessages[0].id;
+ privQuery.id(firstGlodaId);
+ await queryExpect(privQuery, firstSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure it no longer shows up in a privileged query; since it has a twin
+ // we don't need to leave it as a ghost.
+ await queryExpect(privQuery, []);
+
+ // Make sure that the messagesText entry got blown away.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ firstGlodaId
+ );
+
+ // Make sure the conversation still exists.
+ let conv = twinSet.glodaMessages[0].conversation;
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(conv.id);
+ let convColl = await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, no longer a twin => ghost.
+
+ // Make sure nuking the twin didn't somehow kill them both.
+ let twinQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // Let's search on the message-id now that there is no ambiguity.
+ twinQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ let twinColl = await queryExpect(twinQuery, twinSet);
+
+ // Delete the twin.
+ await MessageInjection.deleteMessages(twinSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [twinSet] }));
+ // It should disappear from the collection.
+ Assert.equal(twinColl.items.length, 0);
+
+ // No longer show up in the standard query.
+ twinColl = await queryExpect(twinQuery, []);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ await queryExpect(privQuery, twinSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The message should be marked as a ghost now that the deletion pass.
+ // Ghosts have no fulltext rows, so check for that.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ twinSet.glodaMessages[0].id
+ );
+
+ // It still should show up in the privileged query; it's a ghost!
+ let privColl = await queryExpect(privQuery, twinSet);
+ // Make sure it looks like a ghost.
+ let twinGhost = privColl.items[0];
+ Assert.equal(twinGhost._folderID, null);
+ Assert.equal(twinGhost._messageKey, null);
+
+ // Make sure the conversation still exists.
+ await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, not a twin.
+ // This should blow away the message, the ghosts, and the conversation.
+
+ // Second message should still be around.
+ let secondQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ secondQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ let secondColl = await queryExpect(secondQuery, secondSet);
+
+ // Delete it and make sure it gets marked deleted appropriately.
+ await MessageInjection.deleteMessages(secondSet);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [secondSet] }));
+ Assert.equal(secondColl.items.length, 0);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ await queryExpect(privQuery, secondSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // It should no longer show up in a privileged query; we killed the ghosts.
+ await queryExpect(privQuery, []);
+
+ // - The conversation should have disappeared too.
+ // (we have no listener to watch for it to have disappeared from convQuery but
+ // this is basically how glodaTestHelper does its thing anyways.)
+ Assert.equal(convColl.items.length, 0);
+
+ // Make sure the query fails to find it too.
+ await queryExpect(convQuery, []);
+
+ // -- Identity culling verification.
+ // The identities associated with that message should no longer exist, nor
+ // should their contacts.
+}
+
+async function test_moving_to_trash_marks_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the trash.
+ await messageInjection.trashMessages(msgSet);
+
+ // We do not index the trash folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/**
+ * Deletion that occurs because a folder got deleted.
+ * There is no hand-holding involving the headers that were in the folder.
+ */
+async function test_folder_nuking_message_deletion() {
+ // Create and index two messages in a conversation.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Delete the folder.
+ messageInjection.deleteFolder(folder);
+ // That does generate the deletion events if the messages were in-memory,
+ // which these are.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // This should have caused us to mark all the messages as deleted; the
+ // messages should no longer show up in an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/* ===== Folder Move/Rename/Copy (Single and Nested) ===== */
+
+async function test_folder_deletion_nested() {
+ // Add a folder with a bunch of messages.
+ let [[folder1], msgSet1] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ let [[folder2], msgSet2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index these folders, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet1, msgSet2], { augment: true })
+ );
+ // The move has to be performed after the indexing, because otherwise, on
+ // IMAP, the moved message header are different entities and it's not msgSet2
+ // that ends up indexed, but the fresh headers
+ await MessageInjection.moveFolder(folder2, folder1);
+
+ // Add a trash folder, and move folder1 into it.
+ let trash = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Trash,
+ ]);
+ await MessageInjection.moveFolder(folder1, trash);
+
+ let folders = MessageInjection.get_nsIMsgFolder(trash).descendants;
+ Assert.equal(folders.length, 2);
+ let [newFolder1, newFolder2] = folders;
+
+ let glodaFolder1 = Gloda.getFolderForFolder(newFolder1);
+ let glodaFolder2 = Gloda.getFolderForFolder(newFolder2);
+
+ // Verify that Gloda properly marked this folder as not to be indexed anymore.
+ Assert.equal(
+ glodaFolder1.indexingPriority,
+ glodaFolder1.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], { deleted: [msgSet1, msgSet2] })
+ );
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder1.id,
+ glodaFolder1.kIndexingNeverPriority
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder2.id,
+ glodaFolder2.kIndexingNeverPriority
+ );
+
+ if (messageInjection.messageInjectionIsLocal()) {
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([newFolder1], [{ count: 1 }]);
+ await messageInjection.makeNewSetsInFolders([newFolder2], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ }
+}
+
+/* ===== IMAP Nuances ===== */
+
+/**
+ * Verify that for IMAP folders we still see an index a message that is added
+ * as read.
+ */
+async function test_imap_add_unread_to_folder() {
+ if (messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, read: true },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+/* ===== Message Moving ===== */
+
+/**
+ * Moving a message between folders should result in us knowing that the message
+ * is in the target location.
+ */
+async function test_message_moving() {
+ // - Inject and insert.
+ // Source folder with the message we care about.
+ let [[srcFolder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ // Dest folder with some messages in it to test some wacky local folder moving
+ // logic. (Local moves try and update the correspondence immediately.)
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+
+ // We want the gloda message mapping.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet, ignoreSet], { augment: true })
+ );
+ let gmsg = msgSet.glodaMessages[0];
+ // Save off the message key so we can make sure it changes.
+ let oldMessageKey = msgSet.getMsgHdr(0).messageKey;
+
+ // - Fastpath (offline) move it to a new folder.
+ // Initial move.
+ await messageInjection.moveMessages(msgSet, destFolder, true);
+
+ // - Make sure gloda sees it in the new folder.
+ // Since we are doing offline IMAP moves, the fast-path should be taken and
+ // so we should receive an itemsModified notification without a call to
+ // Gloda.grokNounItem.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { fullyIndexed: 0 }));
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(destFolder).URI
+ );
+
+ // - Make sure the message key is correct!
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+ // Sanity check that the messageKey actually changed for the message.
+ Assert.notEqual(gmsg.messageKey, oldMessageKey);
+
+ // - Make sure the indexer's _keyChangedBatchInfo dict is empty.
+ for (let evilKey in GlodaMsgIndexer._keyChangedBatchInfo) {
+ let evilValue = GlodaMsgIndexer._keyChangedBatchInfo[evilKey];
+ throw new Error(
+ "GlodaMsgIndexer._keyChangedBatchInfo should be empty but" +
+ "has key:\n" +
+ evilKey +
+ "\nAnd value:\n",
+ evilValue + "."
+ );
+ }
+
+ // - Slowpath (IMAP online) move it back to its origin folder.
+ // Move it back.
+ await messageInjection.moveMessages(msgSet, srcFolder, false);
+ // In the IMAP case we will end up reindexing the message because we will
+ // not be able to fast-path, but the local case will still be fast-pathed.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ fullyIndexed: messageInjection.messageInjectionIsLocal() ? 0 : 1,
+ })
+ );
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(srcFolder).URI
+ );
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+}
+
+/**
+ * Moving a gloda-indexed message out of a filthy folder should result in the
+ * destination message not having a gloda-id.
+ */
+
+/* ===== Message Copying ===== */
+
+/* ===== Sweep Complications ==== */
+
+/**
+ * Make sure that a message indexed by event-driven indexing does not
+ * get reindexed by sweep indexing that follows.
+ */
+async function test_sweep_indexing_does_not_reindex_event_indexed() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Wait for the event sweep to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a sweep of the folder.
+ GlodaMsgIndexer.indexFolder(messageInjection.getRealInjectionFolder(folder));
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Verify that moving apparently gloda-indexed messages from a filthy folder or
+ * one that simply should not be gloda indexed does not result in the target
+ * messages having the gloda-id property on them. To avoid messing with too
+ * many invariants we do the 'folder should not be gloda indexed' case.
+ * Uh, and of course, the message should still get indexed once we clear the
+ * filthy gloda-id off of it given that it is moving from a folder that is not
+ * indexed to one that is indexed.
+ */
+async function test_filthy_moves_slash_move_from_unindexed_to_indexed() {
+ // - Inject.
+ // The source folder needs a flag so we don't index it.
+ let srcFolder = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Junk,
+ ]);
+ // The destination folder has to be something we want to index though.
+ let destFolder = await messageInjection.makeEmptyFolder();
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [srcFolder],
+ [{ count: 1 }]
+ );
+
+ // - Mark with a bogus gloda-id.
+ msgSet.getMsgHdr(0).setUint32Property("gloda-id", 9999);
+
+ // - Disable event driven indexing so we don't get interference from indexing.
+ configureGlodaIndexing({ event: false });
+
+ // - Move.
+ await messageInjection.moveMessages(msgSet, destFolder);
+
+ // - Verify the target has no gloda-id!
+ dump(`checking ${msgSet.getMsgHdr(0)}`);
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-id"), 0);
+
+ // - Re-enable indexing and let the indexer run.
+ // We don't want to affect other tests.
+ configureGlodaIndexing({});
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(scenarios, "Sanity that scenarios is set");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_index_messages_tests = [
+ test_sanity_test_environment,
+ test_pending_commit_tracker_flushes_correctly,
+ test_pending_commit_causes_msgdb_commit,
+ test_indexing_sweep,
+ test_event_driven_indexing_does_not_mess_with_filthy_folders,
+
+ test_threading_direct_reply,
+ test_threading_missing_intermediary,
+ test_threading_siblings_missing_parent,
+ test_attachment_flag,
+ test_attributes_fundamental,
+ test_moved_message_attributes,
+ test_attributes_fundamental_from_disk,
+ test_attributes_explicit,
+ test_attributes_cant_query,
+
+ test_people_in_addressbook,
+
+ test_streamed_bodies_are_size_capped,
+
+ test_imap_add_unread_to_folder,
+ test_message_moving,
+
+ test_message_deletion,
+ test_moving_to_trash_marks_deletion,
+ test_folder_nuking_message_deletion,
+
+ test_sweep_indexing_does_not_reindex_event_indexed,
+
+ test_filthy_moves_slash_move_from_unindexed_to_indexed,
+
+ test_indexing_never_priority,
+ test_setting_indexing_priority_never_while_indexing,
+
+ test_folder_deletion_nested,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_query_messages.js b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
new file mode 100644
index 0000000000..02b8cceb1a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
@@ -0,0 +1,729 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our querying support. We build up a deterministic little
+ * 'world' of messages spread across multiple conversations, multiple folders
+ * and multiple authors. To verify expected negative results, in addition to
+ * the 'peoples' in our world clique, we also have 'outlier' contacts that do
+ * not communicate with the others (but are also spread across folders).
+ *
+ * This is broadly intended to test all of our query features and mechanisms
+ * (apart from our specialized search implementation, which is tested by
+ * test_search_messages.js), but is probably not the place to test specific
+ * edge-cases if they do not easily fit into the 'world' data set.
+ *
+ * I feel like having the 'world' mishmash as a data source may muddle things
+ * more than it should, but it is hard to deny the benefit of not having to
+ * define a bunch of message corpuses entirely specialized for each test.
+ */
+
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/**
+ * Whether we expect fulltext results. IMAP folders that are offline shouldn't
+ * have their bodies indexed.
+ */
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+/* ===== Populate ===== */
+var world = {
+ phase: 0,
+
+ // A list of tuples of [name, email] of length NUM_AUTHORS.
+ peoples: null,
+ NUM_AUTHORS: 5,
+ // Maps each author (as defined by their email address) to the list of
+ // (synthetic) messages they have 'authored'.
+ authorGroups: {},
+
+ NUM_CONVERSATIONS: 3,
+ // The last message (so far) in each conversation.
+ lastMessagesInConvos: [],
+ // Maps the message-id of the root message in a conversation to the list of
+ // synthetic messages in the conversation.
+ conversationGroups: {},
+ // A list of lists of synthetic messages, organized by the conversation they
+ // belong to.
+ conversationLists: [],
+ // A list of gloda conversation id's, each corresponding to the entries in
+ // converastionLists.
+ glodaConversationIds: [],
+
+ NUM_FOLDERS: 2,
+ MESSAGES_PER_FOLDER: 11,
+ // A list of lists of synthetic messages, one list per folder.
+ folderClumps: [],
+ // A list of nsIMsgFolders, with each folder containing the messages in the
+ // corresponding list in folderClumps.
+ glodaFolders: [],
+
+ outlierAuthor: null,
+ outlierFriend: null,
+
+ // Messages authored by contacts in the "peoples" group.
+ peoplesMessages: [],
+ // Messages authored by outlierAuthor and outlierFriend.
+ outlierMessages: [],
+};
+
+/**
+ * Given a number, provide a unique term. This is for the benefit of the search
+ * logic. This entails using a unique prefix to avoid accidental collision
+ * with terms outside our control and then just generating unique character
+ * strings in a vaguely base-26 style. To avoid the porter stemmer causing odd
+ * things to happen we actually double every numerically driven character.
+ */
+function uniqueTermGenerator(aNum) {
+ let s = "uniq";
+ do {
+ let l = String.fromCharCode(97 + (aNum % 26));
+ s += l + l;
+ aNum = Math.floor(aNum / 26);
+ } while (aNum);
+ return s;
+}
+
+var UNIQUE_OFFSET_CONV = 0;
+var UNIQUE_OFFSET_AUTHOR = 26;
+var UNIQUE_OFFSET_BODY = 0;
+var UNIQUE_OFFSET_SUBJECT = 26 * 26;
+var UNIQUE_OFFSET_ATTACHMENT = 26 * 26 * 26;
+
+/**
+ * Categorize a synthetic message by conversation/folder/people in the 'world'
+ * structure. This is then used by the test code to generate and verify query
+ * data.
+ *
+ * @param aSynthMessage The synthetic message.
+ */
+function categorizeMessage(aSynthMessage) {
+ // Lump by author.
+ let author = aSynthMessage.fromAddress;
+ if (!(author in world.authorGroups)) {
+ world.authorGroups[author] = [];
+ }
+ world.authorGroups[author].push(aSynthMessage);
+
+ // Lump by conversation, keying off of the originator's message id.
+ let originator = aSynthMessage;
+ while (originator.parent) {
+ originator = originator.parent;
+ }
+ if (!(originator.messageId in world.conversationGroups)) {
+ world.conversationGroups[originator.messageId] = [];
+ }
+ world.conversationGroups[originator.messageId].push(aSynthMessage);
+ world.conversationLists[aSynthMessage.iConvo].push(aSynthMessage);
+
+ // Folder lumping happens in a big glob.
+}
+
+/**
+ * Generate messages in a single folder, categorizing them as we go.
+ *
+ * Key message characteristics:
+ * - Whenever a 'peoples' sends a message, they send it to all 'peoples',
+ * including themselves.
+ */
+function generateFolderMessages() {
+ let messages = [],
+ smsg;
+
+ let iAuthor = 0;
+ for (let iMessage = 0; iMessage < world.MESSAGES_PER_FOLDER; iMessage++) {
+ let iConvo = iMessage % world.NUM_CONVERSATIONS;
+
+ // We need missing messages to create ghosts, so periodically add an extra
+ // unknown into the equation. we do this prior to the below step because
+ // then we don't hose up all the fancy body creation the next step does.
+ if (iMessage % 3 == 1) {
+ smsg = msgGen.makeMessage({ inReplyTo: smsg });
+ }
+
+ let convUniqueSubject = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let convUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let authorUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_AUTHOR + iAuthor
+ );
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ smsg = msgGen.makeMessage({
+ inReplyTo: world.lastMessagesInConvos[iConvo],
+ // Note that the reply-logic will ignore our subject, luckily that does
+ // not matter! (since it will just copy the subject)
+ subject: convUniqueSubject,
+ body: {
+ body: convUniqueBody + " " + authorUniqueBody,
+ },
+ attachments: [
+ {
+ filename: convUniqueAttachment + ".conv",
+ body: "content does not matter. only life matters.",
+ contentType: "application/x-test",
+ },
+ ],
+ });
+
+ // MakeMessage is not exceedingly clever right now, we need to overwrite
+ // From and To.
+ smsg.from = world.peoples[iAuthor];
+ iAuthor = (iAuthor + iConvo + 1) % world.NUM_AUTHORS;
+ // So, everyone is talking to everyone for this stuff.
+ smsg.to = world.peoples;
+ world.lastMessagesInConvos[iConvo] = smsg;
+ // Simplify categorizeMessage and glodaInfoStasher's life.
+ smsg.iConvo = iConvo;
+
+ categorizeMessage(smsg);
+ messages.push(smsg);
+ world.peoplesMessages.push(smsg);
+ }
+
+ smsg = msgGen.makeMessage();
+ smsg.from = world.outlierAuthor;
+ smsg.to = [world.outlierFriend];
+ // Do not lump it.
+ messages.push(smsg);
+ world.outlierMessages.push(smsg);
+
+ world.folderClumps.push(messages);
+
+ return new SyntheticMessageSet(messages);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ if (aSynthMessage.iConvo !== undefined) {
+ world.glodaConversationIds[aSynthMessage.iConvo] =
+ aGlodaMessage.conversation.id;
+ }
+ if (world.glodaFolders.length <= world.phase) {
+ world.glodaFolders.push(aGlodaMessage.folder);
+ }
+}
+
+// We override these for the IMAP tests.
+var pre_setup_populate_hook = function default_pre_setup_populate_hook() {};
+var post_setup_populate_hook = function default_post_setup_populate_hook() {};
+
+// First, we must populate our message store with delicious messages.
+async function setup_populate() {
+ world.glodaHolderCollection = Gloda.explicitCollection(
+ GlodaConstants.NOUN_MESSAGE,
+ []
+ );
+
+ world.peoples = msgGen.makeNamesAndAddresses(world.NUM_AUTHORS);
+ world.outlierAuthor = msgGen.makeNameAndAddress();
+ world.outlierFriend = msgGen.makeNameAndAddress();
+ // Set up the per-conversation values with blanks initially.
+ for (let iConvo = 0; iConvo < world.NUM_CONVERSATIONS; iConvo++) {
+ world.lastMessagesInConvos.push(null);
+ world.conversationLists.push([]);
+ world.glodaConversationIds.push(null);
+ }
+
+ let setOne = generateFolderMessages();
+ let folderOne = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderOne], [setOne]);
+ // If this is the online_to_offline variant (indicated by goOffline) we want
+ // to make the messages available offline. This should trigger an event
+ // driven re-indexing of the messages which should make the body available
+ // for fulltext queries.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setOne]));
+ await messageInjection.makeFolderAndContentsOffline(folderOne);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setOne], { verifier: glodaInfoStasher })
+ );
+
+ world.phase++;
+ let setTwo = generateFolderMessages();
+ let folderTwo = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderTwo], [setTwo]);
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setTwo]));
+ await messageInjection.makeFolderAndContentsOffline(folderTwo);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setTwo], { verifier: glodaInfoStasher })
+ );
+}
+
+/* ===== Non-text queries ===== */
+
+/* === messages === */
+
+/**
+ * Takes a list of mutually exclusive queries and a list of the resulting
+ * collections and ensures that the collections from one query do not pass the
+ * query.test() method of one of the other queries. To restate, the queries
+ * must not have any overlapping results, or we will get angry without
+ * justification.
+ */
+function verify_nonMatches(aQueries, aCollections) {
+ for (let i = 0; i < aCollections.length; i++) {
+ let testQuery = aQueries[i];
+ let nonmatches = aCollections[(i + 1) % aCollections.length].items;
+
+ for (let item of nonmatches) {
+ if (testQuery.test(item)) {
+ dump("item: " + JSON.stringify(item) + "\n");
+ dump("constraints: " + JSON.stringify(testQuery._constraints) + "\n");
+ do_throw(
+ "Something should not match query.test(), but it does: " + item
+ );
+ }
+ }
+ }
+}
+
+var ts_convNum = 0;
+/* preserved state for the non-match testing performed by
+ * test_query_messages_by_conversation_nonmatches.
+ */
+var ts_convQueries = [];
+var ts_convCollections = [];
+/**
+ * Query conversations by gloda conversation-id, saving the queries and
+ * resulting collections in ts_convQueries and ts_convCollections for the
+ * use of test_query_messages_by_conversation_nonmatches who verifies the
+ * query.test() logic doesn't match on things it should not match on.
+ *
+ * @tests gloda.noun.message.attr.conversation
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_conversation() {
+ let convNum = ts_convNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.conversation(world.glodaConversationIds[convNum]);
+
+ ts_convQueries.push(query);
+ ts_convCollections.push(
+ await queryExpect(query, world.conversationLists[convNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_conversation_nonmatches() {
+ verify_nonMatches(ts_convQueries, ts_convCollections);
+}
+
+var ts_folderNum = 0;
+var ts_folderQueries = [];
+var ts_folderCollections = [];
+/**
+ * @tests gloda.noun.message.attr.folder
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_folder() {
+ let folderNum = ts_folderNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.folder(world.glodaFolders[folderNum]);
+
+ ts_folderQueries.push(query);
+ ts_folderCollections.push(
+ await queryExpect(query, world.folderClumps[folderNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_folder_nonmatches() {
+ verify_nonMatches(ts_folderQueries, ts_folderCollections);
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeader()
+ */
+async function test_get_message_for_header() {
+ // Pick an arbitrary message.
+ let glodaMessage = ts_convCollections[1].items[0];
+ // Find the synthetic message that matches (ordering must not be assumed).
+ let synthMessage = world.conversationLists[1].find(
+ sm => sm.messageId == glodaMessage.headerMessageID
+ );
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeader,
+ queryThis: Gloda,
+ args: [glodaMessage.folderMessage],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ [synthMessage]
+ );
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeaders()
+ */
+async function test_get_messages_for_headers() {
+ let messageCollection = ts_convCollections[0];
+ let headers = messageCollection.items.map(m => m.folderMessage);
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeaders,
+ queryThis: Gloda,
+ args: [headers],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ world.conversationLists[0]
+ );
+}
+
+// At this point we go run the identity and contact tests for side-effects.
+
+var ts_messageIdentityQueries = [];
+var ts_messageIdentityCollections = [];
+/**
+ * @tests gloda.noun.message.attr.involves
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_identity_peoples() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(peoplesIdentityCollection.items[0]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.peoplesMessages)
+ );
+}
+
+/**
+ * @tests gloda.noun.message.attr.involves
+ */
+async function test_query_messages_by_identity_outlier() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(outlierIdentityCollection.items[0]);
+ // This also tests our ability to have two intersecting constraints! hooray!.
+ query.involves(outlierIdentityCollection.items[1]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.outlierMessages)
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_identity_nonmatches() {
+ verify_nonMatches(ts_messageIdentityQueries, ts_messageIdentityCollections);
+}
+
+/* exported test_query_messages_by_contact */
+function test_query_messages_by_contact() {
+ // IOU
+}
+
+var ts_messagesDateQuery;
+/**
+ * @tests gloda.noun.message.attr.date
+ * @tests gloda.datastore.sqlgen.kConstraintRanges
+ */
+async function test_query_messages_by_date() {
+ ts_messagesDateQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // We are clearly relying on knowing the generation sequence here,
+ // fuggedaboutit.
+ ts_messagesDateQuery.dateRange([
+ world.peoplesMessages[1].date,
+ world.peoplesMessages[2].date,
+ ]);
+ await queryExpect(ts_messagesDateQuery, world.peoplesMessages.slice(1, 3));
+}
+
+/**
+ * @tests gloda.query.test.kConstraintRanges
+ */
+function test_query_messages_by_date_nonmatches() {
+ if (
+ ts_messagesDateQuery.test(world.peoplesMessages[0]) ||
+ ts_messagesDateQuery.test(world.peoplesMessages[3])
+ ) {
+ do_throw("The date testing mechanism is busted.");
+ }
+}
+
+/* === contacts === */
+/* exported test_query_contacts_by_popularity */
+function test_query_contacts_by_popularity() {
+ // IOU
+}
+
+/* === identities === */
+
+/* ===== Text-based queries ===== */
+
+/* === conversations === */
+
+/* exported test_query_conversations_by_subject_text */
+function test_query_conversations_by_subject_text() {}
+
+/* === messages === */
+
+/**
+ * Test subject searching using the conversation unique subject term.
+ *
+ * @tests gloda.noun.message.attr.subjectMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_subject_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convSubjectTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.subjectMatches(convSubjectTerm);
+ await queryExpect(query, world.conversationLists[convNum]);
+}
+
+/**
+ * Test body searching using the conversation unique body term.
+ *
+ * @tests gloda.noun.message.attr.bodyMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_body_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convBodyTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.bodyMatches(convBodyTerm);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test attachment name searching using the conversation unique attachment term.
+ *
+ * @tests gloda.noun.message.attr.attachmentNamesMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_attachment_names() {
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.attachmentNamesMatch(convUniqueAttachment);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test author name fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_name() {
+ let [authorName, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorName);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test author mail address fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_email() {
+ let [, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorMail);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test recipient name fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_name() {
+ let name = world.peoples[0][0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(name);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/**
+ * Test recipient mail fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_email() {
+ let [, mail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(mail);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/* === contacts === */
+
+var contactLikeQuery;
+/**
+ * @tests gloda.noun.contact.attr.name
+ * @tests gloda.datastore.sqlgen.kConstraintStringLike
+ */
+async function test_query_contacts_by_name() {
+ // Let's use like... we need to test that.
+ contactLikeQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ let personName = world.peoples[0][0];
+ // Chop off the first and last letter... this isn't the most edge-case
+ // handling way to roll, but LOOK OVER THERE? IS THAT ELVIS?
+ let personNameSubstring = personName.substring(1, personName.length - 1);
+ contactLikeQuery.nameLike(
+ contactLikeQuery.WILDCARD,
+ personNameSubstring,
+ contactLikeQuery.WILDCARD
+ );
+
+ await queryExpect(contactLikeQuery, [personName]);
+}
+
+/**
+ * @tests gloda.query.test.kConstraintStringLike
+ */
+function test_query_contacts_by_name_nonmatch() {
+ let otherContact = outlierIdentityCollection.items[0].contact;
+ if (contactLikeQuery.test(otherContact)) {
+ do_throw("The string LIKE mechanism as applied to contacts does not work.");
+ }
+}
+
+/* === identities === */
+
+var peoplesIdentityQuery;
+var peoplesIdentityCollection;
+async function test_query_identities_for_peoples() {
+ peoplesIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ peoplesIdentityQuery.kind("email");
+ let peopleAddrs = world.peoples.map(nameAndAddr => nameAndAddr[1]);
+ peoplesIdentityQuery.value.apply(peoplesIdentityQuery, peopleAddrs);
+ peoplesIdentityCollection = await queryExpect(
+ peoplesIdentityQuery,
+ peopleAddrs
+ );
+}
+
+var outlierIdentityQuery;
+var outlierIdentityCollection;
+async function test_query_identities_for_outliers() {
+ outlierIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ outlierIdentityQuery.kind("email");
+ let outlierAddrs = [world.outlierAuthor[1], world.outlierFriend[1]];
+ outlierIdentityQuery.value.apply(outlierIdentityQuery, outlierAddrs);
+ outlierIdentityCollection = await queryExpect(
+ outlierIdentityQuery,
+ outlierAddrs
+ );
+}
+
+function test_query_identities_by_kind_and_value_nonmatches() {
+ verify_nonMatches(
+ [peoplesIdentityQuery, outlierIdentityQuery],
+ [peoplesIdentityCollection, outlierIdentityCollection]
+ );
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_query_messages_tests = [
+ test_sanity_test_environment,
+ function pre_setup_populate() {
+ pre_setup_populate_hook();
+ },
+ setup_populate,
+ function post_setup_populate() {
+ post_setup_populate_hook();
+ },
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation_nonmatches,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder_nonmatches,
+ test_get_message_for_header,
+ test_get_messages_for_headers,
+ // Need to do the identity and contact lookups so we can have their results
+ // for the other message-related queries.
+ test_query_identities_for_peoples,
+ test_query_identities_for_outliers,
+ test_query_identities_by_kind_and_value_nonmatches,
+ // Back to messages!
+ test_query_messages_by_identity_peoples,
+ test_query_messages_by_identity_outlier,
+ test_query_messages_by_identity_nonmatches,
+ test_query_messages_by_date,
+ test_query_messages_by_date_nonmatches,
+ // Fulltext
+ test_query_messages_by_subject_text,
+ test_query_messages_by_body_text,
+ test_query_messages_by_attachment_names,
+ test_query_messages_by_authorMatches_name,
+ test_query_messages_by_authorMatches_email,
+ test_query_messages_by_recipients_name,
+ test_query_messages_by_recipients_email,
+ // Like
+ test_query_contacts_by_name,
+ test_query_contacts_by_name_nonmatch,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/head_gloda.js b/comm/mailnews/db/gloda/test/unit/head_gloda.js
new file mode 100644
index 0000000000..fb8edbd24e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/head_gloda.js
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { mailTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MailTestUtils.jsm"
+);
+
+// Ensure the profile directory is set up
+do_get_profile();
+
+var gDEPTH = "../../../../../";
+
+registerCleanupFunction(function () {
+ load(gDEPTH + "mailnews/resources/mailShutdown.js");
+});
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
new file mode 100644
index 0000000000..e8234f1a97
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
@@ -0,0 +1,431 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["queryExpect", "sqlExpectCount", "sqlRun"];
+
+/*
+ * This file provides gloda query helpers for the test infrastructure.
+ */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.queryHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var _defaultExpectationExtractors = {};
+_defaultExpectationExtractors[GlodaConstants.NOUN_MESSAGE] = [
+ function expectExtract_message_gloda(aGlodaMessage) {
+ return aGlodaMessage.headerMessageID;
+ },
+ function expectExtract_message_synth(aSynthMessage) {
+ return aSynthMessage.messageId;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_CONTACT] = [
+ function expectExtract_contact_gloda(aGlodaContact) {
+ return aGlodaContact.name;
+ },
+ function expectExtract_contact_name(aName) {
+ return aName;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_IDENTITY] = [
+ function expectExtract_identity_gloda(aGlodaIdentity) {
+ return aGlodaIdentity.value;
+ },
+ function expectExtract_identity_address(aAddress) {
+ return aAddress;
+ },
+];
+
+function expectExtract_default_toString(aThing) {
+ return aThing.toString();
+}
+
+/**
+ * @see queryExpect for info on what we do.
+ */
+class QueryExpectationListener {
+ constructor(
+ aExpectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ aCallerStackFrame
+ ) {
+ this.expectedSet = aExpectedSet;
+ this.glodaExtractor = aGlodaExtractor;
+ this.orderVerifier = aOrderVerifier;
+ this.completed = false;
+ this.callerStackFrame = aCallerStackFrame;
+ // Track our current 'index' in the results for the (optional) order verifier,
+ // but also so we can provide slightly more useful debug output.
+ this.nextIndex = 0;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ onItemsAdded(aItems, aCollection) {
+ log.debug("QueryExpectationListener onItemsAdded received.");
+ for (let item of aItems) {
+ let glodaStringRep;
+ try {
+ glodaStringRep = this.glodaExtractor(item);
+ } catch (ex) {
+ this._reject(
+ new Error(
+ "Gloda extractor threw during query expectation.\n" +
+ "Item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // Make sure we were expecting this guy.
+ if (glodaStringRep in this.expectedSet) {
+ delete this.expectedSet[glodaStringRep];
+ } else {
+ this._reject(
+ new Error(
+ "Query returned unexpected result!\n" +
+ "Item:\n" +
+ item +
+ "\nExpected set:\n" +
+ this.expectedSet +
+ "\nCaller:\n" +
+ this.callerStackFrame
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ if (this.orderVerifier) {
+ try {
+ this.orderVerifier(this.nextIndex, item, aCollection);
+ } catch (ex) {
+ // If the order was wrong, we could probably go for an output of what
+ // we actually got...
+ dump("Order Problem detected. Dump of data:\n");
+ for (let [iThing, thing] of aItems.entries()) {
+ dump(
+ iThing +
+ ": " +
+ thing +
+ (aCollection.stashedColumns
+ ? ". " + aCollection.stashedColumns[thing.id].join(", ")
+ : "") +
+ "\n"
+ );
+ }
+ this._reject(ex);
+ return; // We don't have to continue for more checks.
+ }
+ }
+ this.nextIndex++;
+
+ // Make sure the query's test method agrees with the database about this.
+ if (!aCollection.query.test(item)) {
+ this._reject(
+ new Error(
+ "Query test returned false when it should have been true on.\n" +
+ "Extracted:\n" +
+ glodaStringRep +
+ "\nItem:\n" +
+ item
+ )
+ );
+ }
+ }
+ }
+ onItemsModified(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsModified received. Nothing done."
+ );
+ }
+ onItemsRemoved(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsRemoved received. Nothing done."
+ );
+ }
+ onQueryCompleted(aCollection) {
+ log.debug("QueryExpectationListener onQueryCompleted received.");
+ // We may continue to match newly added items if we leave our query as it
+ // is, so let's become explicit to avoid related troubles.
+ aCollection.becomeExplicit();
+
+ // `expectedSet` should now be empty.
+ for (let key in this.expectedSet) {
+ let value = this.expectedSet[key];
+ this._reject(
+ new Error(
+ "Query should have returned:\n" +
+ key +
+ " (" +
+ value +
+ ").\n" +
+ "But " +
+ this.nextIndex +
+ " was seen."
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // If no error is thrown then we're fine here.
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
+
+/**
+ * Execute the given query, verifying that the result set contains exactly the
+ * contents of the expected set; no more, no less. Since we expect that the
+ * query will result in gloda objects, but your expectations will not be posed
+ * in terms of gloda objects (though they could be), we rely on extractor
+ * functions to take the gloda result objects and the expected result objects
+ * into the same string.
+ * If you don't provide extractor functions, we will use our defaults (based on
+ * the query noun type) if available, or assume that calling toString is
+ * sufficient.
+ *
+ * @param aQuery Either a query to execute, or a dict with the following keys:
+ * - queryFunc: The function to call that returns a function.
+ * - queryThis: The 'this' to use for the invocation of queryFunc.
+ * - args: A list (possibly empty) or arguments to precede the traditional
+ * arguments to query.getCollection.
+ * - nounId: The (numeric) noun id of the noun type expected to be returned.
+ * @param aExpectedSet The list of expected results from the query where each
+ * item is suitable for extraction using aExpectedExtractor. We have a soft
+ * spot for SyntheticMessageSets and automatically unbox them.
+ * @param aGlodaExtractor The extractor function to take an instance of the
+ * gloda representation and return a string for comparison/equivalence
+ * against that returned by the expected extractor (against the input
+ * instance in aExpectedSet.) The value returned must be unique for all
+ * of the expected gloda representations of the expected set. If omitted,
+ * the default extractor for the gloda noun type is used. If no default
+ * extractor exists, toString is called on the item.
+ * @param aExpectedExtractor The extractor function to take an instance from the
+ * values in the aExpectedSet and return a string for comparison/equivalence
+ * against that returned by the gloda extractor. The value returned must
+ * be unique for all of the values in the expected set. If omitted, the
+ * default extractor for the presumed input type based on the gloda noun
+ * type used for the query is used, failing over to toString.
+ * @param aOrderVerifier Optional function to verify the order the results are
+ * received in. Function signature should be of the form (aZeroBasedIndex,
+ * aItem, aCollectionResultIsFor).
+ */
+async function queryExpect(
+ aQuery,
+ aExpectedSet,
+ aGlodaExtractor,
+ aExpectedExtractor,
+ aOrderVerifier
+) {
+ if (aQuery.test) {
+ aQuery = {
+ queryFunc: aQuery.getCollection,
+ queryThis: aQuery,
+ args: [],
+ nounId: aQuery._nounDef.id,
+ };
+ }
+
+ if ("synMessages" in aExpectedSet) {
+ aExpectedSet = aExpectedSet.synMessages;
+ }
+
+ // - set extractor functions to defaults if omitted
+ if (aGlodaExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aGlodaExtractor = _defaultExpectationExtractors[aQuery.nounId][0];
+ } else {
+ aGlodaExtractor = expectExtract_default_toString;
+ }
+ }
+ if (aExpectedExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aExpectedExtractor = _defaultExpectationExtractors[aQuery.nounId][1];
+ } else {
+ aExpectedExtractor = expectExtract_default_toString;
+ }
+ }
+
+ // - build the expected set
+ let expectedSet = {};
+ for (let item of aExpectedSet) {
+ try {
+ expectedSet[aExpectedExtractor(item)] = item;
+ } catch (ex) {
+ throw new Error(
+ "Expected extractor threw during query expectation for item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ );
+ }
+ }
+
+ // - create the listener...
+ let listener = new QueryExpectationListener(
+ expectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ Components.stack.caller
+ );
+ aQuery.args.push(listener);
+ let queryValue = aQuery.queryFunc.apply(aQuery.queryThis, aQuery.args);
+ // Wait for the QueryListener to finish.
+ await listener.promise;
+ return queryValue;
+}
+
+/**
+ * Asynchronously run a SQL statement against the gloda database. This can grow
+ * binding logic and data returning as needed.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlRun(sql) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createAsyncStatement(sql);
+ let rows = null;
+
+ let promiseResolve;
+ let promiseReject;
+ let promise = new Promise((resolve, reject) => {
+ promiseResolve = resolve;
+ promiseReject = reject;
+ });
+ // Running SQL.
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ if (!rows) {
+ rows = [];
+ }
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ rows.push(row);
+ }
+ },
+ handleError(aError) {
+ promiseReject(
+ new Error("SQL error!\nResult:\n" + aError + "\nSQL:\n" + sql)
+ );
+ },
+ handleCompletion() {
+ promiseResolve(rows);
+ },
+ });
+ stmt.finalize();
+ return promise;
+}
+
+/**
+ * Run an (async) SQL statement against the gloda database. The statement
+ * should be a SELECT COUNT; we check the count against aExpectedCount.
+ * Any additional arguments are positionally bound to the statement.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlExpectCount(aExpectedCount, aSQLString, ...params) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createStatement(aSQLString);
+
+ for (let iArg = 0; iArg < params.length; iArg++) {
+ GlodaDatastore._bindVariant(stmt, iArg, params[iArg]);
+ }
+
+ let desc = [aSQLString, ...params];
+ // Running SQL count.
+ let listener = new SqlExpectationListener(
+ aExpectedCount,
+ desc,
+ Components.stack.caller
+ );
+ stmt.executeAsync(listener);
+ // We don't need the statement anymore.
+ stmt.finalize();
+
+ await listener.promise;
+}
+
+class SqlExpectationListener {
+ constructor(aExpectedCount, aDesc, aCallerStackFrame) {
+ this.actualCount = null;
+ this.expectedCount = aExpectedCount;
+ this.sqlDesc = aDesc;
+ this.callerStackFrame = aCallerStackFrame;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ handleResult(aResultSet) {
+ let row = aResultSet.getNextRow();
+ if (!row) {
+ this._reject(
+ new Error(
+ "No result row returned from caller:\n" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this.actualCount = row.getInt64(0);
+ }
+
+ handleError(aError) {
+ this._reject(
+ new Error(
+ "SQL error from caller:\n" +
+ this.callerStackFrame +
+ "\nResult:\n" +
+ aError +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ }
+
+ handleCompletion(aReason) {
+ if (this.actualCount != this.expectedCount) {
+ this._reject(
+ new Error(
+ "Actual count of " +
+ this.actualCount +
+ "does not match expected count of:\n" +
+ this.expectedCount +
+ "\nFrom caller:" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
new file mode 100644
index 0000000000..a4c092400b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
@@ -0,0 +1,847 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides gloda testing infrastructure.
+ *
+ * A few words about how tests should expect to interact with indexing:
+ *
+ * By default, we enable only event-driven indexing with an infinite work queue
+ * length. This means that all messages will be queued for indexing as they
+ * are added or modified. You should await to |waitForGlodaIndexer| to wait
+ * until the indexer completes. If you want to assert that certain messages
+ * will have been indexed during that pass, you can pass them as arguments to
+ * |assertExpectedMessagesIndexed|.
+ * There is no need to tell us to expect the messages to be indexed prior to the
+ * waiting as long as nothing spins the event loop after you perform the action
+ * that triggers indexing. None of our existing xpcshell tests do this, but it
+ * is part of the mozmill idiom for its waiting mechanism, so be sure to not
+ * perform a mozmill wait without first telling us to expect the messages.
+ */
+
+const EXPORTED_SYMBOLS = [
+ "assertExpectedMessagesIndexed",
+ "glodaTestHelperInitialize",
+ "nukeGlodaCachesAndCollections",
+ "prepareIndexerForTesting",
+ "waitForGlodaIndexer",
+];
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.testHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var indexMessageState;
+
+/**
+ * Create a 'me' identity of "me@localhost" for the benefit of Gloda. At the
+ * time of this writing, Gloda only initializes Gloda.myIdentities and
+ * Gloda.myContact at startup with no event-driven updates. As such, this
+ * function needs to be called prior to gloda startup.
+ */
+function createMeIdentity() {
+ let identity = MailServices.accounts.createIdentity;
+ identity.email = "me@localhost";
+ identity.fullName = "Me";
+}
+// And run it now.
+createMeIdentity();
+
+// Set the gloda prefs.
+// "yes" to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// "no" to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+var ENVIRON_MAPPINGS = [
+ {
+ envVar: "GLODA_DATASTORE_EXPLAIN_TO_PATH",
+ prefName: "mailnews.database.global.datastore.explainToPath",
+ },
+];
+
+// Propagate environment variables to prefs as appropriate:
+for (let { envVar, prefName } of ENVIRON_MAPPINGS) {
+ if (Services.env.exists(envVar)) {
+ Services.prefs.setCharPref(prefName, Services.env.get(envVar));
+ }
+}
+
+/**
+ * Side note:
+ * Keep them in the global scope so that a Cu.forceGC() call won't purge them.
+ */
+var collectionListener;
+
+/**
+ * Registers MessageInjection listeners and Gloda listeners for our tests.
+ *
+ * @param {MessageInjection} messageInjection Instance of MessageInjection
+ * to register Events to.
+ */
+function glodaTestHelperInitialize(messageInjection) {
+ // Initialize the message state if we are dealing with messages. At some
+ // point we probably want to just completely generalize the indexing state.
+ // That point is likely when our testing infrastructure needs the support
+ // provided by `indexMessageState` for things other than messages.
+ indexMessageState = new IndexMessageState();
+
+ collectionListener = new GlodaCollectionListener();
+ new TestAttributeProvider();
+ new MsgsClassifiedListener();
+
+ // Add a hook that makes folders not filthy when we first see them.
+ messageInjection.registerMessageInjectionListener({
+ /**
+ * By default all folders start out filthy. This is great in the real world
+ * but I went and wrote all the unit tests without entirely thinking about
+ * how this affected said unit tests. So we add a listener so that we can
+ * force the folders to be clean.
+ * This is okay and safe because messageInjection always creates the folders
+ * without any messages in them.
+ */
+ onRealFolderCreated(aRealFolder) {
+ log.debug(
+ `onRealFolderCreated through MessageInjection received. ` +
+ `Make folder: ${aRealFolder.name} clean for Gloda.`
+ );
+ let glodaFolder = Gloda.getFolderForFolder(aRealFolder);
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+ },
+
+ /**
+ * Make waitForGlodaIndexer know that it should wait for a msgsClassified
+ * event whenever messages have been injected, at least if event-driven
+ * indexing is enabled.
+ */
+ onInjectingMessages() {
+ log.debug(
+ "onInjectingMessages through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+
+ /**
+ * This basically translates to "we are triggering an IMAP move" and has
+ * the ramification that we should expect a msgsClassified event because
+ * the destination will see the header get added at some point.
+ */
+ onMovingMessagesWithoutDestHeaders() {
+ log.debug(
+ "onMovingMessagesWithoutDestHeaders through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+ });
+ log.debug("glodaTestHelperInitialize finished.");
+}
+
+class IndexMessageState {
+ data = new GlodaIndexerData();
+
+ constructor() {
+ prepareIndexerForTesting();
+ // Continue the preparing by assigning the hook recover and hook cleanup.
+ GlodaIndexer._unitTestHookRecover = this._testHookRecover;
+ GlodaIndexer._unitTestHookCleanup = this._testHookCleanup;
+ }
+
+ resetData() {
+ this.data = new GlodaIndexerData();
+ }
+
+ // The synthetic message sets passed in to |assertExpectedMessagesIndexed|.
+ synMessageSets = [];
+ // The user-specified accumulate-style verification function.
+ verifier() {
+ return this.data.data.verifier;
+ }
+ // Should we augment the synthetic sets with gloda message info?
+ augmentSynSets() {
+ return this.data.data.augment;
+ }
+ deletionSynSets() {
+ return this.data.data.deleted;
+ }
+
+ // Expected value of |_workerRecoveredCount| at assertion time.
+ expectedWorkerRecoveredCount() {
+ return this.data.data.recovered;
+ }
+ // Expected value of |_workerFailedToRecoverCount| at assertion time.
+ expectedFailedToRecoverCount() {
+ return this.data.data.failedToRecover;
+ }
+ // Expected value of |_workerCleanedUpCount| at assertion time.
+ expectedCleanedUpCount() {
+ return this.data.data.cleanedUp;
+ }
+ // Expected value of |_workerHadNoCleanUpCount| at assertion time.
+ expectedHadNoCleanUpCount() {
+ return this.data.data.hadNoCleanUp;
+ }
+ /**
+ * The number of messages that were fully (re)indexed using
+ * Gloda.grokNounItem.
+ */
+ _numFullIndexed = 0;
+ // Expected value of |_numFullIndexed| at assertion time.
+ expectedNumFullIndexed() {
+ return this.data.data.fullyIndexed;
+ }
+
+ // The number of times a worker had a recover helper and it recovered.
+ _workerRecoveredCount = 0;
+ // The number of times a worker had a recover helper and it did not recover.
+ _workerFailedToRecoverCount = 0;
+ // The number of times a worker had a cleanup helper and it cleaned up.
+ _workerCleanedUpCount = 0;
+ // The number of times a worker had no cleanup helper but there was a cleanup.
+ _workerHadNoCleanUpCount = 0;
+
+ /**
+ * Beware this scoping for this class is lost where _testHookRecover is used.
+ *
+ * @param aRecoverResult
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookRecover(aRecoverResult, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer recovery hook fired" +
+ "\nrecover result:\n" +
+ aRecoverResult +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle:\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aRecoverResult) {
+ indexMessageState._workerRecoveredCount++;
+ } else {
+ indexMessageState._workerFailedToRecoverCount++;
+ }
+ }
+
+ /**
+ * Beware this scoping for this class is lost where _testHookCleanup is used.
+ *
+ * @param aHadCleanupFunc
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookCleanup(aHadCleanupFunc, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer cleanup hook fired" +
+ "\nhad cleanup?\n" +
+ aHadCleanupFunc +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aHadCleanupFunc) {
+ indexMessageState._workerCleanedUpCount++;
+ } else {
+ indexMessageState._workerHadNoCleanUpCount++;
+ }
+ }
+ _jsonifyCallbackHandleState(aCallbackHandle) {
+ return {
+ _stringRep: aCallbackHandle.activeStack.length + " active generators",
+ activeStackLength: aCallbackHandle.activeStack.length,
+ contextStack: aCallbackHandle.contextStack,
+ };
+ }
+
+ /**
+ * The gloda messages indexed since the last call to |waitForGlodaIndexer|.
+ */
+ _glodaMessagesByMessageId = [];
+ _glodaDeletionsByMessageId = [];
+
+ _numItemsAdded = 0;
+
+ applyGlodaIndexerData(data) {
+ this.data.applyData(data);
+ }
+
+ /**
+ * A list of events that we need to see before we allow ourselves to perform
+ * the indexer check. For example, if "msgsClassified" is in here, it means
+ * that whether the indexer is active or not is irrelevant until we have
+ * seen that msgsClassified event.
+ */
+ interestingEvents = [];
+}
+
+function prepareIndexerForTesting() {
+ if (!GlodaIndexer.enabled) {
+ throw new Error(
+ "The gloda indexer is somehow not enabled. This is problematic."
+ );
+ }
+ // Make the indexer be more verbose about indexing for us.
+ GlodaIndexer._unitTestSuperVerbose = true;
+ GlodaMsgIndexer._unitTestSuperVerbose = true;
+ // Lobotomize the adaptive indexer.
+ // The indexer doesn't need to worry about load; zero his rescheduling time.
+ GlodaIndexer._INDEX_INTERVAL = 0;
+ // The indexer already registered for the idle service; we must remove this
+ // or "idle" notifications will still get sent via the observer mechanism.
+ let realIdleService = GlodaIndexer._idleService;
+ realIdleService.removeIdleObserver(
+ GlodaIndexer,
+ GlodaIndexer._indexIdleThresholdSecs
+ );
+ // Pretend we are always idle.
+ GlodaIndexer._idleService = {
+ idleTime: 1000,
+ addIdleObserver() {
+ // There is no actual need to register with the idle observer, and if
+ // we do, the stupid "idle" notification will trigger commits.
+ },
+ removeIdleObserver() {},
+ };
+ // We want the event-driven indexer to always handle indexing and never spill
+ // to an indexing sweep unless a test intentionally does so.
+ GlodaIndexer._indexMaxEventQueueMessages = 10000;
+ // Lobotomize the adaptive indexer's constants.
+ GlodaIndexer._cpuTargetIndexTime = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_ACTIVE = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_IDLE = 10000000;
+ GlodaIndexer._CPU_IS_BUSY_TIME = 10000000;
+ GlodaIndexer._PAUSE_LATE_IS_BUSY_TIME = 10000000;
+
+ delete GlodaIndexer._indexTokens;
+ GlodaIndexer.__defineGetter__("_indexTokens", function () {
+ return GlodaIndexer._CPU_MAX_TOKENS_PER_BATCH;
+ });
+ GlodaIndexer.__defineSetter__("_indexTokens", function () {});
+
+ // This includes making commits only happen when we the unit tests explicitly
+ // tell them to.
+ GlodaIndexer._MINIMUM_COMMIT_TIME = 10000000;
+ GlodaIndexer._MAXIMUM_COMMIT_TIME = 10000000;
+}
+
+class GlodaIndexerData {
+ data = {
+ verifier: null,
+ augment: false,
+ deleted: [],
+ fullyIndexed: null,
+
+ // Things should not be recovering or failing and cleaning up unless the test
+ // is expecting it.
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ };
+
+ /**
+ * Applies data shallow.
+ * Only the first level of keys are applied and replaced complete
+ * if given via param data. No deep merge.
+ *
+ * @param {*} data
+ */
+ applyData(data) {
+ this.data = {
+ ...this.data,
+ ...data,
+ };
+ }
+}
+
+/**
+ * Note that if the indexer is not currently active we assume it has already
+ * completed; we do not entertain the possibility that it has not yet started.
+ * Since the indexer is 'active' as soon as it sees an event, this does mean
+ * that you need to wait to make sure the indexing event has happened before
+ * calling us. This is reasonable.
+ */
+async function waitForGlodaIndexer() {
+ let eventsPending = TestUtils.waitForCondition(() => {
+ if (indexMessageState.interestingEvents.length > 1) {
+ // Events still pending. See msgClassified event and
+ // messageInjection.registerMessageInjectionListener.
+ return false;
+ }
+ // Events finished.
+ return true;
+ });
+ let indexerRunning = TestUtils.waitForCondition(() => {
+ if (GlodaIndexer.indexing) {
+ // Still indexing.
+ return false;
+ }
+ // Indexing finished.
+ return true;
+ });
+
+ log.debug(
+ "waitForGlodaIndexer waiting for intrestingEvents and GlodaIndexer.indexing."
+ );
+
+ // If we are waiting on certain events to occur first, block on those.
+ await Promise.all([eventsPending, indexerRunning]);
+}
+
+/**
+ * Each time a msgClassified Event is fired and it is present
+ * in IndexMessageState.interestingEvents it will be removed.
+ */
+class MsgsClassifiedListener {
+ /**
+ * Events pending for the tests.
+ * (we want this to happen after gloda registers its own listener, and it
+ * does.)
+ */
+ constructor() {
+ MailServices.mfn.addListener(
+ this,
+ Ci.nsIMsgFolderNotificationService.msgsClassified
+ );
+ }
+ /**
+ * If this was an expected interesting event, remove it from the list.
+ * If an event happens that we did not expect, it does not matter. We know
+ * this because we add events we care about to interestingEvents before they
+ * can possibly be fired.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ log.debug("MsgsClassifiedListener msgsClassified received.");
+ let idx = indexMessageState.interestingEvents.indexOf("msgsClassified");
+ if (idx != -1) {
+ log.debug("Remove intrestingEvent through msgsClassified.");
+ // Remove the interesting Event as we received it here.
+ indexMessageState.interestingEvents.splice(idx, 1);
+ }
+ }
+}
+
+/**
+ * This AttributeProvider helps us testing Gloda.
+ * With the `process` method the Collections will be noticed
+ * through listeners.
+ * (onItemsAdded, onItemsModified, onItemsRemoved, onQueryComplete)
+ */
+class TestAttributeProvider {
+ providerName = "glodaTestHelper:fakeProvider";
+ constructor() {
+ // Register us with gloda as an attribute provider so that we can
+ // distinguish between fully reindexed messages and fastpath indexed
+ // messages.
+ Gloda._attrProviderOrderByNoun[GlodaConstants.NOUN_MESSAGE].push({
+ providerName: this.providerName,
+ process: this.process,
+ });
+ }
+ /**
+ * Fake attribute provider processing function so we can distinguish
+ * between fully reindexed messages and fast-path modified messages.
+ * Process has to be invoked for the GlodaCollectionListener
+ */
+ *process(aItem, aRawReps, aIsConceptuallyNew, aCallbackHandle) {
+ indexMessageState._numFullIndexed++;
+
+ yield GlodaConstants.kWorkDone;
+ }
+}
+
+/**
+ * This class tracks a GlodaCollection (created by Gloda._wildcardCollection).
+ * The listeners for this collection which will notify our IndexMessageState
+ * are defined here.
+ */
+class GlodaCollectionListener {
+ // Our catch-all message collection that nets us all messages passing by.
+ catchAllCollection = null;
+ constructor() {
+ this.catchAllCollection = Gloda._wildcardCollection(
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.catchAllCollection.listener = this;
+ }
+ /*
+ * Our catch-all collection listener. Any time a new message gets indexed,
+ * we should receive an onItemsAdded call. Any time an existing message
+ * gets reindexed, we should receive an onItemsModified call. Any time an
+ * existing message actually gets purged from the system, we should receive
+ * an onItemsRemoved call.
+ */
+ onItemsAdded(aItems) {
+ log.debug("GlodaCollectionListener onItemsAdded received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item.folderMessage +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+
+ // Simulate some other activity clearing out the the current folder's
+ // cached database, which used to kill the indexer's enumerator.
+ if (++indexMessageState._numItemsAdded == 3) {
+ log.debug("GlodaCollectionListener simulate other activity.");
+ GlodaMsgIndexer._indexingFolder.msgDatabase = null;
+ }
+ }
+
+ onItemsModified(aItems) {
+ log.debug("GlodaCollectionListener onItemsModified received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+ }
+
+ onItemsRemoved(aItems) {
+ log.debug("GlodaCollectionListener onItemsRemoved received.");
+ for (let item of aItems) {
+ if (
+ item.headerMessageID in indexMessageState._glodaDeletionsByMessageId
+ ) {
+ throw new Error(
+ "Gloda message " +
+ item +
+ "already deleted once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaDeletionsByMessageId."
+ );
+ indexMessageState._glodaDeletionsByMessageId[item.headerMessageID] = item;
+ }
+ }
+ onQueryComplete(aCollection) {
+ log.debug(
+ "GlodaCollectionListener onQueryComplete received. Nothing done."
+ );
+ }
+}
+
+/**
+ * Assert that the set of messages indexed is exactly the set passed in.
+ * If a verification function is provided, use it on a per-message basis
+ * to make sure the resulting gloda message looks like it should given the
+ * synthetic message.
+ *
+ * Throws Errors if something is not according and returns always [true, string]
+ * for `Assert.ok` in your tests. This ensures proper testing output.
+ *
+ * @param {SyntheticMessage[]} aSynMessageSets A list of SyntheticMessageSets
+ * containing exactly the messages we should expect to see.
+ * @param [aConfig.verifier] The function to call to verify that the indexing
+ * had the desired result. Takes arguments aSynthMessage (the synthetic
+ * message just indexed), aGlodaMessage (the gloda message representation of
+ * the indexed message), and aPreviousResult (the value last returned by the
+ * verifier function for this given set of messages, or undefined if it is
+ * the first message.)
+ * @param [aConfig.augment=false] Should we augment the synthetic message sets
+ * with references to their corresponding gloda messages? The messages
+ * will show up in a 'glodaMessages' list on the syn set.
+ * @param {SyntheticMessageSet[]} [aConfig.deleted] A list of SyntheticMessageSets
+ * containing messages that should be recognized as deleted by the gloda
+ * indexer in this pass.
+ * @param [aConfig.fullyIndexed] A count of the number of messages we expect
+ * to observe being fully indexed. This is relevant because in the case
+ * of message moves, gloda may generate an onItemsModified notification but
+ * not reindex the message. This attribute allows the tests to distinguish
+ * between the two cases.
+ * @returns {[true, string]}
+ */
+function assertExpectedMessagesIndexed(aSynMessageSets, aConfig) {
+ indexMessageState.synMessageSets = aSynMessageSets;
+
+ indexMessageState.applyGlodaIndexerData(aConfig);
+
+ // Check that we have a gloda message for every syn message and verify.
+ for (let msgSet of indexMessageState.synMessageSets) {
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages = [];
+ }
+ for (let [iSynMsg, synMsg] of msgSet.synMessages.entries()) {
+ if (!(synMsg.messageId in indexMessageState._glodaMessagesByMessageId)) {
+ let msgHdr = msgSet.getMsgHdr(iSynMsg);
+ throw new Error(
+ "Header " +
+ msgHdr.messageId +
+ " in folder: " +
+ (msgHdr ? msgHdr.folder.name : "no header?") +
+ " should have been indexed."
+ );
+ }
+
+ let glodaMsg =
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId];
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages.push(glodaMsg);
+ }
+
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId] = null;
+
+ let verifier = indexMessageState.verifier();
+ let previousValue = undefined;
+ if (verifier) {
+ try {
+ // Looking if a previous value have been present.
+ previousValue = verifier(synMsg, glodaMsg, previousValue);
+ } catch (ex) {
+ throw new Error(
+ "Verification failure: " +
+ synMsg +
+ " is not close enough to " +
+ glodaMsg +
+ "; basing this on exception: " +
+ ex
+ );
+ }
+ }
+ }
+ }
+
+ // Check that we don't have any extra gloda messages. (lacking syn msgs)
+ for (let messageId in indexMessageState._glodaMessagesByMessageId) {
+ let glodaMsg = indexMessageState._glodaMessagesByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message:\n" +
+ glodaMsg +
+ "\nShould not have been indexed.\n" +
+ "Source header:\n" +
+ glodaMsg.folderMessage
+ );
+ }
+ }
+
+ if (indexMessageState.deletionSynSets()) {
+ for (let msgSet of indexMessageState.deletionSynSets()) {
+ for (let synMsg of msgSet.synMessages) {
+ if (
+ !(synMsg.messageId in indexMessageState._glodaDeletionsByMessageId)
+ ) {
+ throw new Error(
+ "Synthetic message " + synMsg + " did not get deleted!"
+ );
+ }
+
+ indexMessageState._glodaDeletionsByMessageId[synMsg.messageId] = null;
+ }
+ }
+ }
+
+ // Check that we don't have unexpected deletions.
+ for (let messageId in indexMessageState._glodaDeletionsByMessageId) {
+ let glodaMsg = indexMessageState._glodaDeletionsByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message with message id " +
+ messageId +
+ " was " +
+ "unexpectedly deleted!"
+ );
+ }
+ }
+
+ if (
+ indexMessageState.expectedWorkerRecoveredCount() != null &&
+ indexMessageState.expectedWorkerRecoveredCount() !=
+ indexMessageState._workerRecoveredCount
+ ) {
+ throw new Error(
+ "Expected worker-recovered count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedWorkerRecoveredCount() +
+ "\nActual:\n" +
+ indexMessageState._workerRecoveredCount
+ );
+ }
+ if (
+ indexMessageState.expectedFailedToRecoverCount() != null &&
+ indexMessageState.expectedFailedToRecoverCount() !=
+ indexMessageState._workerFailedToRecoverCount
+ ) {
+ throw new Error(
+ "Expected worker-failed-to-recover count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedFailedToRecoverCount() +
+ "\nActual:\n" +
+ indexMessageState._workerFailedToRecoverCount
+ );
+ }
+ if (
+ indexMessageState.expectedCleanedUpCount() != null &&
+ indexMessageState.expectedCleanedUpCount() !=
+ indexMessageState._workerCleanedUpCount
+ ) {
+ throw new Error(
+ "Expected worker-cleaned-up count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedCleanedUpCount() +
+ "\nActual:\n" +
+ indexMessageState._workerCleanedUpCount
+ );
+ }
+ if (
+ indexMessageState.expectedHadNoCleanUpCount() != null &&
+ indexMessageState.expectedHadNoCleanUpCount() !=
+ indexMessageState._workerHadNoCleanUpCount
+ ) {
+ throw new Error(
+ "Expected worker-had-no-cleanup count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedHadNoCleanUpCount() +
+ "\nActual\n" +
+ indexMessageState._workerHadNoCleanUpCount
+ );
+ }
+
+ if (
+ indexMessageState.expectedNumFullIndexed() != null &&
+ indexMessageState.expectedNumFullIndexed() !=
+ indexMessageState._numFullIndexed
+ ) {
+ throw new Error(
+ "Expected number of fully indexed messages did not match.\n" +
+ "Expected:\n" +
+ indexMessageState.expectedNumFullIndexed() +
+ "\nActual:\n" +
+ indexMessageState._numFullIndexed
+ );
+ }
+
+ // Cleanup of internal tracking values in the IndexMessageState
+ // for new tests.
+ resetIndexMessageState();
+
+ // If no error has been thrown till here were fine!
+ // Return values for Assert.ok.
+ // Using like Assert.ok(...assertExpectedMessagesIndexed()).
+ return [true, "Expected messages were indexed."];
+}
+
+/**
+ * Resets the IndexMessageState
+ *
+ * @TODO more docs
+ */
+function resetIndexMessageState() {
+ indexMessageState.synMessageSets = [];
+ indexMessageState._glodaMessagesByMessageId = [];
+ indexMessageState._glodaDeletionsByMessageId = [];
+
+ indexMessageState._workerRecoveredCount = 0;
+ indexMessageState._workerFailedToRecoverCount = 0;
+ indexMessageState._workerCleanedUpCount = 0;
+ indexMessageState._workerHadNoCleanUpCount = 0;
+
+ indexMessageState._numFullIndexed = 0;
+ indexMessageState.resetData();
+}
+
+/**
+ * Wipe out almost everything from the clutches of the GlodaCollectionManager.
+ * By default, it is caching things and knows about all the non-GC'ed
+ * collections. Tests may want to ensure that their data is loaded from disk
+ * rather than relying on the cache, and so, we exist.
+ * The exception to everything is that Gloda's concept of myContact and
+ * myIdentities needs to have its collections still be reachable or invariants
+ * are in danger of being "de-invarianted".
+ * The other exception to everything are any catch-all-collections used by our
+ * testing/indexing process. We don't scan for them, we just hard-code their
+ * addition if they exist.
+ */
+function nukeGlodaCachesAndCollections() {
+ // Explode if the GlodaCollectionManager somehow doesn't work like we think it
+ // should. (I am reluctant to put this logic in there, especially because
+ // knowledge of the Gloda contact/identity collections simply can't be known
+ // by the colleciton manager.)
+ if (
+ GlodaCollectionManager._collectionsByNoun === undefined ||
+ GlodaCollectionManager._cachesByNoun === undefined
+ ) {
+ // We don't check the Gloda contact/identities things because they might not
+ // get initialized if there are no identities, which is the case for our
+ // unit tests right now...
+ throw new Error(
+ "Try and remember to update the testing infrastructure when you " +
+ "change things!"
+ );
+ }
+
+ // We can just blow away the known collections.
+ GlodaCollectionManager._collectionsByNoun = {};
+ // But then we have to put the myContact / myIdentities junk back.
+ if (Gloda._myContactCollection) {
+ GlodaCollectionManager.registerCollection(Gloda._myContactCollection);
+ GlodaCollectionManager.registerCollection(Gloda._myIdentitiesCollection);
+ }
+ // Don't forget our testing catch-all collection.
+ if (collectionListener.catchAllCollection) {
+ // Empty it out in case it has anything in it.
+ collectionListener.catchAllCollection.clear();
+ // And now we can register it.
+ GlodaCollectionManager.registerCollection(
+ collectionListener.catchAllCollection
+ );
+ }
+
+ // Caches aren't intended to be cleared, but we also don't want to lose our
+ // caches, so we need to create new ones from the ashes of the old ones.
+ let oldCaches = GlodaCollectionManager._cachesByNoun;
+ GlodaCollectionManager._cachesByNoun = {};
+ for (let nounId in oldCaches) {
+ let cache = oldCaches[nounId];
+ GlodaCollectionManager.defineCache(cache._nounDef, cache._maxCacheSize);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
new file mode 100644
index 0000000000..f7a5199ba3
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
@@ -0,0 +1,293 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "configureGlodaIndexing",
+ "waitForGlodaDBFlush",
+ "waitForIndexingHang",
+ "resumeFromSimulatedHang",
+ "permuteMessages",
+ "makeABCardForAddressPair",
+];
+
+/*
+ * This file provides gloda testing infrastructure functions which are not coupled
+ * with the IndexMessageState from GlodaTestHelper.jsm
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.helperFunctions",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Resume execution when the db has run all the async statements whose execution
+ * was queued prior to this call. We trigger a commit to accomplish this,
+ * although this could also be accomplished without a commit. (Though we would
+ * have to reach into GlodaDatastore.jsm and get at the raw connection or extend
+ * datastore to provide a way to accomplish this.)
+ */
+async function waitForGlodaDBFlush() {
+ // We already have a mechanism to do this by forcing a commit. Arguably,
+ // it would be better to use a mechanism that does not induce an fsync.
+ var savedDepth = GlodaDatastore._transactionDepth;
+ if (!savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ GlodaDatastore.runPostCommit(promiseResolve);
+ // We don't actually need to run things to zero. We can just wait for the
+ // outer transaction to close itself.
+ GlodaDatastore._commitTransaction();
+ if (savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+ await promise;
+}
+
+/**
+ * An injected fault exception.
+ */
+function InjectedFault(aWhy) {
+ this.message = aWhy;
+}
+InjectedFault.prototype = {
+ toString() {
+ return "[InjectedFault: " + this.message + "]";
+ },
+};
+
+function _inject_failure_on_MsgHdrToMimeMessage() {
+ throw new InjectedFault("MsgHdrToMimeMessage");
+}
+
+let hangResolve;
+let hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+});
+
+function _simulate_hang_on_MsgHdrToMimeMessage(...aArgs) {
+ hangResolve([MsgHdrToMimeMessage, null, aArgs]);
+}
+
+/**
+ * If you have configured gloda to hang while indexing, this is the thing
+ * you wait on to make sure the indexer actually gets to the point where it
+ * hangs.
+ */
+async function waitForIndexingHang() {
+ await hangPromise;
+}
+
+/**
+ * Configure gloda indexing. For most settings, the settings get clobbered by
+ * the next time this method is called. Omitted settings reset to the defaults.
+ * However, anything labeled as a 'sticky' setting stays that way until
+ * explicitly changed.
+ *
+ * @param {boolean} [aArgs.event=true] Should event-driven indexing be enabled
+ * (true) or disabled (false)? Right now, this actually suppresses
+ * indexing... the semantics will be ironed out as-needed.
+ * @param [aArgs.hangWhile] Must be either omitted (for don't force a hang) or
+ * "streaming" indicating that we should do a no-op instead of performing
+ * the message streaming. This will manifest as a hang until
+ * |resumeFromSimulatedHang| is invoked or the test explicitly causes the
+ * indexer to abort (in which case you do not need to call the resume
+ * function.) You must omit injectFaultIn if you use hangWhile.
+ * @param [aArgs.injectFaultIn=null] Must be omitted (for don't inject a
+ * failure) or "streaming" indicating that we should inject a failure when
+ * the message indexer attempts to stream a message. The fault will be an
+ * appropriate exception. You must omit hangWhile if you use injectFaultIn.
+ */
+function configureGlodaIndexing(aArgs) {
+ let shouldSuppress = "event" in aArgs ? !aArgs.event : false;
+ if (shouldSuppress != GlodaIndexer.suppressIndexing) {
+ log.debug(`Setting suppress indexing to ${shouldSuppress}.`);
+ GlodaIndexer.suppressIndexing = shouldSuppress;
+ }
+
+ if ("hangWhile" in aArgs) {
+ log.debug(`Enabling hang injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.hangWhile) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _simulate_hang_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.hangWhile + " is not a legal choice for hangWhile"
+ );
+ }
+ } else if ("injectFaultIn" in aArgs) {
+ log.debug(`Enabling fault injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.injectFaultIn) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _inject_failure_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.injectFaultIn + " is not a legal choice for injectFaultIn"
+ );
+ }
+ } else {
+ if (GlodaMsgIndexer._MsgHdrToMimeMessageFunc != MsgHdrToMimeMessage) {
+ log.debug("Clearing hang/fault injection.");
+ }
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc = MsgHdrToMimeMessage;
+ }
+}
+
+/**
+ * Call this to resume from the hang induced by configuring the indexer with
+ * a "hangWhile" argument to |configureGlodaIndexing|.
+ *
+ * @param [aJustResumeExecution=false] Should we just poke the callback driver
+ * for the indexer rather than continuing the call. You would likely want
+ * to do this if you committed a lot of violence while in the simulated
+ * hang and proper resumption would throw exceptions all over the place.
+ * (For example; if you hang before streaming and destroy the message
+ * header while suspended, resuming the attempt to stream will throw.)
+ */
+async function resumeFromSimulatedHang(aJustResumeExecution) {
+ if (aJustResumeExecution) {
+ log.debug("Resuming from simulated hang with direct wrapper callback.");
+ GlodaIndexer._wrapCallbackDriver();
+ } else {
+ let [func, dis, args] = await hangPromise;
+ log.debug(`Resuming from simulated hang with call to: ${func.name}.`);
+ func.apply(dis, args);
+ }
+ // Reset the promise for the hang.
+ hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+ });
+}
+
+/**
+ * Prepares permutations for messages with aScenarioMaker. Be sure to wait for the indexer
+ * for every permutation and verify the result.
+ *
+ * This process is executed once for each possible permutation of observation
+ * of the synthetic messages. (Well, we cap it; brute-force test your logic
+ * on your own time; you should really only be feeding us minimal scenarios.)
+ *
+ * @param aScenarioMaker A function that, when called, will generate a series
+ * of SyntheticMessage instances. Each call to this method should generate
+ * a new set of conceptually equivalent, but not identical, messages. This
+ * allows us to process without having to reset our state back to nothing each
+ * time. (This is more to try and make sure we run the system with a 'dirty'
+ * state than a bid for efficiency.)
+ * @param {MessageInjection} messageInjection An instance to use for permuting
+ * the messages and creating folders.
+ *
+ * @returns {[async () => SyntheticMessageSet]} Await it sequentially with a for...of loop.
+ * Wait for each element for the Indexer and assert afterwards.
+ */
+async function permuteMessages(aScenarioMaker, messageInjection) {
+ let folder = await messageInjection.makeEmptyFolder();
+
+ // To calculate the permutations, we need to actually see what gets produced.
+ let scenarioMessages = aScenarioMaker();
+ let numPermutations = Math.min(factorial(scenarioMessages.length), 32);
+
+ let permutations = [];
+ for (let iPermutation = 0; iPermutation < numPermutations; iPermutation++) {
+ permutations.push(async () => {
+ log.debug(`Run permutation: ${iPermutation + 1} / ${numPermutations}`);
+ // If this is not the first time through, we need to create a new set.
+ if (iPermutation) {
+ scenarioMessages = aScenarioMaker();
+ }
+ scenarioMessages = permute(scenarioMessages, iPermutation);
+ let scenarioSet = new SyntheticMessageSet(scenarioMessages);
+ await messageInjection.addSetsToFolders([folder], [scenarioSet]);
+ return scenarioSet;
+ });
+ }
+ return permutations;
+}
+
+/**
+ * A simple factorial function used to calculate the number of permutations
+ * possible for a given set of messages.
+ */
+function factorial(i, rv) {
+ if (i <= 1) {
+ return rv || 1;
+ }
+ return factorial(i - 1, (rv || 1) * i); // tail-call capable
+}
+
+/**
+ * Permute an array given a 'permutation id' that is an integer that fully
+ * characterizes the permutation through the decisions that need to be made
+ * at each step.
+ *
+ * @param aArray Source array that is destructively processed.
+ * @param aPermutationId The permutation id. A permutation id of 0 results in
+ * the original array's sequence being maintained.
+ */
+function permute(aArray, aPermutationId) {
+ let out = [];
+ for (let i = aArray.length; i > 0; i--) {
+ let offset = aPermutationId % i;
+ out.push(aArray[offset]);
+ aArray.splice(offset, 1);
+ aPermutationId = Math.floor(aPermutationId / i);
+ }
+ return out;
+}
+
+/**
+ * Add a name-and-address pair as generated by `makeNameAndAddress` to the
+ * personal address book.
+ */
+function makeABCardForAddressPair(nameAndAddress) {
+ // XXX bug 314448 demands that we trigger creation of the ABs... If we don't
+ // do this, then the call to addCard will fail if someone else hasn't tickled
+ // this.
+ MailServices.ab.directories;
+
+ // kPABData is copied from abSetup.js
+ let kPABData = {
+ URI: "jsaddrbook://abook.sqlite",
+ };
+ let addressBook = MailServices.ab.getDirectory(kPABData.URI);
+
+ let card = Cc["@mozilla.org/addressbook/cardproperty;1"].createInstance(
+ Ci.nsIAbCard
+ );
+ card.displayName = nameAndAddress[0];
+ card.primaryEmail = nameAndAddress[1];
+
+ // Just save the new node straight away.
+ addressBook.addCard(card);
+
+ log.debug(`Adding address book card for: ${nameAndAddress}`);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
new file mode 100644
index 0000000000..ff186e871a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
@@ -0,0 +1,86 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test does not use glodaTestHelper because:
+ * 1) We need to do things as part of the test without gloda having remotely
+ * thought about opening the database.
+ * 2) We expect and desire that the logger produce a warning and glodaTestHelper
+ * takes the view that warnings = death.
+ *
+ * We do use the rest of the test infrastructure though.
+ */
+
+// -- Do configure the gloda prefs though...
+// Yes to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// No to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+// We'll start with this datastore ID, and make sure it gets overwritten
+// when the index is rebuilt.
+var kDatastoreIDPref = "mailnews.database.global.datastore.id";
+var kOriginalDatastoreID = "47e4bad6-fedc-4931-bf3f-d2f4146ac63e";
+Services.prefs.setCharPref(kDatastoreIDPref, kOriginalDatastoreID);
+
+/**
+ * Create an illegal=corrupt database and make sure that we log a message and
+ * still end up happy.
+ */
+add_task(function test_corrupt_databases_get_reported_and_blown_away() {
+ // - Get the file path.
+ let dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // - Protect dangerous people from themselves.
+ // (There should not be a database at this point; if there is one, we are
+ // not in the sandbox profile we expect. I wouldn't bother except we're
+ // going out of our way to write gibberish whereas gloda accidentally
+ // opening a valid database is bad but not horrible.)
+ if (dbFile.exists()) {
+ do_throw("There should not be a database at this point.");
+ }
+
+ // - Create the file.
+ dump("Creating gibberish file\n");
+ let ostream = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(
+ Ci.nsIFileOutputStream
+ );
+ ostream.init(dbFile, -1, -1, 0);
+ let fileContents = "I'm in ur database not being a database.\n";
+ ostream.write(fileContents, fileContents.length);
+ ostream.close();
+
+ // - Init gloda, get warnings.
+ dump("Init gloda\n");
+ var { Gloda } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+ );
+ dump("Gloda inited, checking\n");
+
+ // - Make sure the datastore has an actual database.
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ // Make sure that the datastoreID was overwritten
+ Assert.notEqual(Gloda.datastoreID, kOriginalDatastoreID);
+ // And for good measure, make sure that the pref was also overwritten
+ let currentDatastoreID = Services.prefs.getCharPref(kDatastoreIDPref);
+ Assert.notEqual(currentDatastoreID, kOriginalDatastoreID);
+ // We'll also ensure that the Gloda.datastoreID matches the one stashed
+ // in prefs...
+ Assert.equal(currentDatastoreID, Gloda.datastoreID);
+ // And finally, we'll make sure that the datastoreID is a string with length
+ // greater than 0.
+ Assert.equal(typeof Gloda.datastoreID, "string");
+ Assert.ok(Gloda.datastoreID.length > 0);
+
+ if (!GlodaDatastore.asyncConnection) {
+ do_throw("No database connection suggests no database!");
+ }
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_folder_logic.js b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
new file mode 100644
index 0000000000..6625258daa
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
@@ -0,0 +1,60 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the gloda folder logic.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Tests in this file assume that returned folders are nsIMsgFolders and not
+ // handles which currently only local injection supports.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Newly created folders should not be filthy (at least as long as they have
+ * nothing in them.)
+ */
+add_task(async function test_newly_created_folders_start_clean() {
+ let msgFolder = await messageInjection.makeEmptyFolder();
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderClean);
+});
+
+/**
+ * Deleted folders should not leave behind any mapping, and that mapping
+ * definitely should not interfere with a newly created folder of the same
+ * name.
+ */
+add_task(async function test_deleted_folder_tombstones_get_forgotten() {
+ let oldFolder = await messageInjection.makeEmptyFolder("volver");
+ let oldGlodaFolder = Gloda.getFolderForFolder(oldFolder);
+ messageInjection.deleteFolder(oldFolder);
+
+ // The tombstone needs to know it is deleted.
+ Assert.ok(oldGlodaFolder._deleted);
+
+ let newFolder = await messageInjection.makeEmptyFolder("volver");
+ let newGlodaFolder = Gloda.getFolderForFolder(newFolder);
+
+ // This folder better not be the same and better not think it is deleted.
+ Assert.notEqual(oldGlodaFolder, newGlodaFolder);
+ Assert.ok(!newGlodaFolder._deleted);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
new file mode 100644
index 0000000000..d938208c9b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
@@ -0,0 +1,299 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test file recycles part of test_intl.js. What we do is insert into the
+ * fulltext index two messages:
+ * - one has tokens 'aa' and 'bbb',
+ * - one is from a previous test and has CJK characters in it.
+ *
+ * We want to test that the behavior of the tokenizer is as expected (namely,
+ * that it drops two-letter tokens unless they're CJK bigrams), and that
+ * GlodaMsgSearcher.jsm properly drops two-letter tokens (unless CJK) from the search
+ * terms to avoid issuing a query that will definitely return no results.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaFolder } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* ===== Tests ===== */
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ ],
+ },
+ // -- Regular case. Make sure two-letter tokens do not match, since the
+ // tokenizer is supposed to drop them. Also make sure that a three-letter
+ // token matches.
+ {
+ name: "Boring ASCII",
+ actual: "aa bbb",
+ encodings: {
+ "utf-8": ["=?utf-8?q?aa_bbb?=", "aa bbb"],
+ },
+ searchPhrases: [
+ { body: "aa", match: false },
+ { body: "bbb", match: true },
+ ],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_cjk() {
+ await indexPhrase(intlPhrases[0]);
+});
+
+add_task(async function test_index_regular() {
+ await indexPhrase(intlPhrases[1]);
+});
+
+/**
+ * - Check that the 'aa' token was never emitted (we don't emit two-letter
+ * tokens unless they're CJK).
+ * - Check that the '\u81ea\u52d5' token was emitted, because it's CJK.
+ * - Check that the 'bbb' token was duly emitted (three letters is more than two
+ * letters so it's tokenized).
+ */
+add_task(async function test_token_count() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'aa'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'bbb'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH '\u81ea\u52d5'"
+ );
+});
+
+add_task(async function test_fulltextsearch_cjk() {
+ await test_fulltextsearch(intlPhrases[0]);
+});
+
+add_task(async function test_fulltextsearch_regular() {
+ await test_fulltextsearch(intlPhrases[1]);
+});
+
+/**
+ * We make sure that the Gloda module that builds the query drops two-letter
+ * tokens, otherwise this would result in an empty search (no matches for
+ * two-letter tokens).
+ */
+add_task(async function test_query_builder() {
+ // aa should be dropped, and we have one message containing the bbb token.
+ await msgSearchExpectCount(1, "aa bbb");
+ // The CJK part should not be dropped, and match message 1; the bbb token
+ // should not be dropped, and match message 2; 0 results returned because no
+ // message has the two tokens in it.
+ await msgSearchExpectCount(0, "\u81ea\u52d5 bbb");
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |test_fulltextsearch|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("Using character set:\n" + charset + "\nActual:\n" + actual + "\n");
+ dump("Subject:\n" + subject + "\nSubject length:\n" + subject.length + "\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name:" +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function test_fulltextsearch(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
+
+/**
+ * Pass a query string to the GlodaMsgSearcher, run the corresponding SQL query,
+ * and check the resulted count is what we want.
+ *
+ * Use like so:
+ * await msgSearchExpectCount(1, "I like cheese");
+ */
+async function msgSearchExpectCount(aCount, aFulltextStr) {
+ // Let the GlodaMsgSearcher build its query
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ let conn = GlodaDatastore.asyncConnection;
+ let query = searcher.buildFulltextQuery();
+
+ // Brace yourself, brutal monkey-patching NOW
+ let sql, args;
+ let oldFunc = GlodaDatastore._queryFromSQLString;
+ GlodaDatastore._queryFromSQLString = function (aSql, aArgs) {
+ sql = aSql;
+ args = aArgs;
+ };
+ query.getCollection();
+ GlodaDatastore._queryFromSQLString = oldFunc;
+
+ // Bind the parameters
+ let stmt = conn.createStatement(sql);
+ for (let [iBinding, bindingValue] of args.entries()) {
+ GlodaDatastore._bindVariant(stmt, iBinding, bindingValue);
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ let i = 0;
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ for (
+ let row = aResultSet.getNextRow();
+ row;
+ row = aResultSet.getNextRow()
+ ) {
+ i++;
+ }
+ },
+
+ handleError(aError) {
+ do_throw(new Error("Error: " + aError.message));
+ },
+
+ handleCompletion(aReason) {
+ if (aReason != Ci.mozIStorageStatementCallback.REASON_FINISHED) {
+ do_throw(new Error("Query canceled or aborted!"));
+ }
+
+ if (i != aCount) {
+ throw new Error(
+ "Didn't get the expected number of rows: got " +
+ i +
+ " expected " +
+ aCount +
+ " SQL: " +
+ sql
+ );
+ }
+ promiseResolve();
+ },
+ });
+ stmt.finalize();
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
new file mode 100644
index 0000000000..3c59de4233
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
@@ -0,0 +1,34 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for IMAP messages that are offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
new file mode 100644
index 0000000000..f02a6750b4
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
new file mode 100644
index 0000000000..9d0b0d4103
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
@@ -0,0 +1,139 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Check that events update identity._hasAddressBookCard correctly.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var EMAIL_ADDRESS = "all.over@the.world.invalid";
+var DISPLAY_NAME = "every day";
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Create an e-mail so the identity can exist.
+ */
+add_setup(async function () {
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, from: [DISPLAY_NAME, EMAIL_ADDRESS] }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Okay, but it knows it has no card because indexing thinks stuff.
+ // So let's flush all caches and create a query that just knows about the
+ // identity.
+ nukeGlodaCachesAndCollections();
+
+ let identQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ identQuery.kind("email");
+ identQuery.value(EMAIL_ADDRESS);
+ await queryExpect(identQuery, [EMAIL_ADDRESS]);
+
+ // Now the identity exists. Make sure it is in cache.
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.notEqual(identity, null);
+
+ // And make sure it has no idea what the current state of the card is.
+ if (identity._hasAddressBookCard !== undefined) {
+ do_throw(
+ "We should have no idea about the state of the ab card, but " +
+ "it's: " +
+ identity._hasAddressBookCard
+ );
+ }
+});
+
+/**
+ * Add a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+/**
+ * Remove the card we added in setup, make sure we update the cached identity
+ * ab card state.
+ */
+add_task(function test_remove_card_cache_indication() {
+ delete_card(EMAIL_ADDRESS);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, false);
+});
+
+/**
+ * Add again a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+function add_card(aEmailAddress, aDisplayName) {
+ Cc["@mozilla.org/addressbook/services/addressCollector;1"]
+ .getService(Ci.nsIAbAddressCollector)
+ .collectSingleAddress(aEmailAddress, aDisplayName, true, true);
+}
+
+function get_card_for_email(aEmailAddress) {
+ for (let book of MailServices.ab.directories) {
+ let card = book.cardForEmailAddress(aEmailAddress);
+ if (card) {
+ return [book, card];
+ }
+ }
+ return [null, null];
+}
+
+function delete_card(aEmailAddress) {
+ let [book, card] = get_card_for_email(aEmailAddress);
+
+ MailServices.ab.getDirectory(book.URI).deleteCards([card]);
+}
+
+function get_cached_gloda_identity_for_email(aEmailAddress) {
+ return GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + aEmailAddress.toLowerCase()
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
new file mode 100644
index 0000000000..5920ac981e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
@@ -0,0 +1,210 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that we fail on bad messages by marking the messages as bad rather than
+ * exploding or something bad like that.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_BAD_MESSAGE_ID = 2;
+
+var illegalMessageTemplates = [
+ // -- Authors
+ {
+ name: "no author",
+ clobberHeaders: {
+ From: "",
+ },
+ },
+ {
+ name: "too many authors (> 1)",
+ clobberHeaders: {
+ From: "Tweedle Dee <dee@example.com>, Tweedle Dum <dum@example.com>",
+ },
+ },
+];
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_illegal_message_no_author() {
+ await illegal_message(illegalMessageTemplates[0]);
+});
+add_task(async function test_illegal_message_too_many_authors() {
+ await illegal_message(illegalMessageTemplates[1]);
+});
+
+/**
+ * A byzantine failure to stream should not sink us. Fake a failure.
+ */
+add_task(async function test_streaming_failure() {
+ configureGlodaIndexing({ injectFaultIn: "streaming" });
+
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1 }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+
+ configureGlodaIndexing({});
+});
+
+/**
+ * If we have one bad message followed by a good message, the good message
+ * should still get indexed. Additionally, if we do a sweep on the folder,
+ * we should not attempt to index the message again.
+ */
+add_task(async function test_recovery_and_no_second_attempts() {
+ let [, goodSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: { From: "" } }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([goodSet], { recovered: 1 }));
+
+ // Index the folder; no messages should get indexed and there should be no
+ // failure things.
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+});
+
+/**
+ * Make sure that we attempt to reindex a dirty bad message and that when we
+ * fail that we clear the dirty bit.
+ */
+add_task(async function test_reindex_on_dirty_clear_dirty_on_fail() {
+ // Inject a new illegal message
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [
+ {
+ count: 1,
+ clobberHeaders: illegalMessageTemplates[0].clobberHeaders,
+ },
+ ]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Mark the message dirty, force the folder to be indexed.
+ let msgHdr = msgSet.getMsgHdr(0);
+ msgHdr.setUint32Property("gloda-dirty", 1);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+
+ // Check again with filthy.
+ msgHdr.setUint32Property("gloda-dirty", 2);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+});
+
+/**
+ * Using exciting templates from |illegalMessageTemplates|, verify that gloda
+ * fails to index them and marks the messages bad.
+ */
+async function illegal_message(aInfo) {
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: aInfo.clobberHeaders }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed.
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_compaction.js b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
new file mode 100644
index 0000000000..7b6923ab61
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
@@ -0,0 +1,395 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that gloda does the right things in terms of compaction. Major cases:
+ *
+ * - Compaction occurs while we are in the process of indexing a folder. We
+ * want to make sure we stop indexing cleanly
+ *
+ * - A folder that we have already indexed gets compacted. We want to make sure
+ * that we update the message keys for all involved. This means verifying
+ * that both the on-disk representations and in-memory representations are
+ * correct.
+ *
+ * - Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the
+ * compaction. (Simulating the user quitting before all compactions have
+ * been processed.)
+ *
+ * - Moves/deletes that happen after a compaction but before we process the
+ * compaction generate a special type of edge case that we need to check.
+ *
+ * There is also a less interesting case:
+ *
+ * - Make sure that the indexer does not try and start indexing a folder that is
+ * in the process of being compacted.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ resumeFromSimulatedHang,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ /*
+ * All the rest of the gloda tests (should) work with maildir, but this test
+ * only works/makes sense with mbox, so force it to always use mbox. This
+ * allows developers to manually change the default to maildir and have the
+ * gloda tests run with that.
+ */
+ Services.prefs.setCharPref(
+ "mail.serverDefaultStoreContractID",
+ "@mozilla.org/msgstore/berkeleystore;1"
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function compaction_indexing_pass_none_pending_commit() {
+ await compaction_indexing_pass({
+ name: "none pending commit",
+ forceCommit: true,
+ });
+});
+add_task(async function compaction_indexing_pass_all_pending_commit() {
+ await compaction_indexing_pass({
+ name: "all pending commit",
+ forceCommit: false,
+ });
+});
+
+/**
+ * Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the compaction.
+ * (Simulating the user quitting before all compactions have been processed.)
+ */
+add_task(async function test_sweep_performs_compaction() {
+ let [[folder], moveSet, staySet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 1 }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet, staySet], { augment: true })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ // Disable event-driven indexing so there is no way the compaction job can
+ // get worked.
+ configureGlodaIndexing({ event: false });
+
+ // Compact.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Make sure the folder is marked compacted.
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.ok(glodaFolder.compacted);
+
+ // Re-enable indexing and fire up an indexing pass.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure the compaction happened.
+ verify_message_keys(staySet);
+});
+
+/**
+ * Make sure that if we compact a folder then move messages out of it and/or
+ * delete messages from it before its compaction pass happens that the
+ * compaction pass properly marks the messages deleted.
+ */
+add_task(
+ async function test_moves_and_deletions_on_compacted_folder_edge_case() {
+ let [[folder], compactMoveSet, moveSet, delSet, staySet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed(
+ [compactMoveSet, moveSet, delSet, staySet],
+ {
+ augment: true,
+ }
+ )
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactMoveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactMoveSet]));
+
+ // Disable indexing because we don't want to process the compaction.
+ configureGlodaIndexing({ event: false });
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Delete
+ // Because of the compaction, the PendingCommitTracker forgot that the message
+ // we are deleting got indexed; we will receive no event.
+ await MessageInjection.deleteMessages(delSet);
+
+ // - Move
+ // Same deal on the move, except that it will try and trigger event-based
+ // indexing in the target folder...
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ // Kill the event-based indexing job of the target; we want the indexing sweep
+ // to see it as a move.
+ dump("killing all indexing jobs\n");
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Indexing pass
+ // Re-enable indexing so we can do a sweep.
+ configureGlodaIndexing({ event: true });
+
+ // This will trigger compaction (per the previous unit test) which should mark
+ // moveSet and delSet as deleted. Then it should happen in to the next
+ // folder and add moveSet again...
+ dump("triggering indexing sweep\n");
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet], {
+ deleted: [moveSet, delSet],
+ })
+ );
+
+ // Sanity check the compaction for giggles.
+ verify_message_keys(staySet);
+ }
+);
+
+/**
+ * Induce a compaction while we are in the middle of indexing. Make sure we
+ * clean up and that the folder ends
+ *
+ * Note that in order for compaction to happen there has to be something for
+ * compaction to do, so our prep involves moving a message to another folder.
+ * (Deletion actually produces more legwork for gloda whereas a local move is
+ * almost entirely free.)
+ */
+add_task(async function test_compaction_interrupting_indexing() {
+ // Create a folder with a message inside.
+ let [[folder], compactionFodderSet] =
+ await messageInjection.makeFoldersWithSets(1, [{ count: 1 }]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Move that message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactionFodderSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [folder],
+ [{ count: 1 }]
+ );
+
+ await waitForIndexingHang();
+
+ // Compact! This should kill the job and because of the compaction; no other
+ // reason should be able to do this.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed.
+ // Also, make sure a cleanup was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { cleanedUp: 1 }));
+});
+
+/**
+ *
+ */
+add_task(async function test_do_not_enter_compacting_folders() {
+ // Turn off indexing.
+ configureGlodaIndexing({ event: false });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Lie and claim we are compacting that folder.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder.compacting = true;
+
+ // Now try and force ourselves to index that folder and its message.
+ // Turn back on indexing.
+ configureGlodaIndexing({ event: true });
+
+ // Verify that the indexer completes without having indexed anything.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Verify that the message keys match between the message headers and the
+ * (augmented on) gloda messages that correspond to the headers.
+ */
+function verify_message_keys(aSynSet) {
+ let iMsg = 0;
+ for (let msgHdr of aSynSet.msgHdrs()) {
+ let glodaMsg = aSynSet.glodaMessages[iMsg++];
+ if (msgHdr.messageKey != glodaMsg.messageKey) {
+ throw new Error(
+ "Message header " +
+ msgHdr +
+ " should have message key " +
+ msgHdr.messageKey +
+ " but has key " +
+ glodaMsg.messageKey +
+ " per gloda msg " +
+ glodaMsg
+ );
+ }
+ }
+ dump("verified message keys after compaction\n");
+}
+
+/**
+ * Compact a folder that we were not indexing. Make sure gloda's representations
+ * get updated to the new message keys.
+ *
+ * This is parameterized because the logic has special cases to deal with
+ * messages that were pending commit that got blown away.
+ */
+async function compaction_indexing_pass(aParam) {
+ // Create 5 messages. We will move just the third message so the first two
+ // message keep their keys and the last two change. (We want 2 for both
+ // cases to avoid edge cases.)
+ let [[folder], sameSet, moveSet, shiftSet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ { count: 1 },
+ { count: 2 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([sameSet, moveSet, shiftSet], {
+ augment: true,
+ })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ if (aParam.forceCommit) {
+ await waitForGlodaDBFlush();
+ }
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+ // Wait for the compaction job to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ verify_message_keys(sameSet);
+ verify_message_keys(shiftSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
new file mode 100644
index 0000000000..0004373f7a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
@@ -0,0 +1,49 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for offline IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(function () {
+ // Set these preferences to stop the cache value "cachePDir" being fetched. This
+ // avoids errors on the javascript console, for which the test would otherwise fail.
+ // See bug 903402 for follow-up information.
+ Services.prefs.setComplexValue(
+ "browser.cache.disk.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ Services.prefs.setComplexValue(
+ "browser.cache.offline.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
new file mode 100644
index 0000000000..c144155799
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for online IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
new file mode 100644
index 0000000000..788b630d5b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for local junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
new file mode 100644
index 0000000000..a340122ef0
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are offline from the start.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
new file mode 100644
index 0000000000..4977dd5521
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..85031ec0ac
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are not offline at first, but
+ * are made offline later.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
new file mode 100644
index 0000000000..5441a3062c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing support for local messages.
+ */
+
+var {
+ glodaTestHelperInitialize,
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ messageInjection,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_reparse_of_local_folder_works() {
+ // Index a folder.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a db flush so we do not have any outstanding references to the
+ // folder or its headers.
+ await waitForGlodaDBFlush();
+
+ // Mark the summary invalid.
+ folder.msgDatabase.summaryValid = false;
+ // Clear the database so next time we have to reparse.
+ folder.msgDatabase.forceClosed();
+
+ // Force gloda to re-parse the folder again.
+ GlodaMsgIndexer.indexFolder(folder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Ensure that fromJSON for a non-singular attribute properly filters out
+ * "undefined" return values, specifically as it relates to tags. When the
+ * user removes them Gloda doesn't actually re-index the messages so the
+ * values will still be there when we next load the message.
+ *
+ * We directly monkey with the state of NounTag for no really good reason, but
+ * maybe it cuts down on disk I/O because we don't have to touch prefs.
+ */
+add_task(async function test_fromjson_of_removed_tag() {
+ // -- Inject
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Tag
+ let tag = TagNoun.getTag("$label4");
+ msgSet.addTag(tag.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.length, 1);
+ Assert.equal(gmsg.tags[0].key, tag.key);
+
+ // -- Forget about the tag, TagNoun!
+ delete TagNoun._tagMap[tag.key];
+ // This also means we have to replace the tag service with a liar.
+ let realTagService = TagNoun._msgTagService;
+ TagNoun._msgTagService = {
+ isValidKey() {
+ return false;
+ }, // Lies!
+ };
+
+ // -- Forget about the message, gloda!
+ let glodaId = gmsg.id;
+ nukeGlodaCachesAndCollections();
+
+ // -- Re-load the message.
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.id(glodaId);
+ let coll = await queryExpect(query, msgSet);
+
+ // -- Put the tag back in TagNoun before we check and possibly explode.
+ TagNoun._tagMap[tag.key] = tag;
+ TagNoun._msgTagService = realTagService;
+
+ // -- Verify the message apparently has no tags (despite no reindex).
+ gmsg = coll.items[0];
+ Assert.equal(gmsg.tags.length, 0);
+});
+
+/**
+ * Test that we are using hasOwnProperty or a properly guarding dict for
+ * NounTag so that if someone created a tag called "watch" and then deleted
+ * it, we don't end up exposing the watch function as the tag.
+ *
+ * Strictly speaking, this does not really belong here, but it's a matched set
+ * with the previous test.
+ */
+add_task(
+ function test_nountag_does_not_think_it_has_watch_tag_when_it_does_not() {
+ Assert.equal(TagNoun.fromJSON("watch"), undefined);
+ }
+);
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
new file mode 100644
index 0000000000..c3f79f0c21
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
@@ -0,0 +1,265 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests the folder indexing logic of Gloda._worker_folderIndex in
+ * the greater context of the sweep indexing mechanism in a whitebox fashion.
+ *
+ * Automated indexing is suppressed for the duration of this file.
+ *
+ * In order to test the phases of the logic we inject failures into
+ * GlodaIndexer._indexerGetEnumerator with a wrapper to control how far
+ * indexing gets. We also clobber or wrap other functions as needed.
+ */
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+
+/**
+ * We want to stop the GlodaMsgIndexer._indexerGetEnumerator after a
+ * set amount of folder indexing.
+ */
+const ENUMERATOR_SIGNAL_WORD = "STOP Me!";
+/**
+ * How many more enumerations before we should throw; 0 means don't throw.
+ */
+var stop_enumeration_after = 0;
+/**
+ * We hide the error in the promise chain. But we do have to know if it happens
+ * at another cycle.
+ */
+var error_is_thrown = false;
+/**
+ * Inject GlodaMsgIndexer._indexerGetEnumerator with our test indexerGetEnumerator.
+ */
+GlodaMsgIndexer._original_indexerGetEnumerator =
+ GlodaMsgIndexer._indexerGetEnumerator;
+/**
+ * Wrapper for GlodaMsgIndexer._indexerGetEnumerator to cause explosions.
+ */
+GlodaMsgIndexer._indexerGetEnumerator = function (...aArgs) {
+ if (stop_enumeration_after && !--stop_enumeration_after) {
+ error_is_thrown = true;
+ throw new Error(ENUMERATOR_SIGNAL_WORD);
+ }
+
+ return GlodaMsgIndexer._original_indexerGetEnumerator(...aArgs);
+};
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ // We do not want the event-driven indexer crimping our style.
+ configureGlodaIndexing({ event: false });
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * The value itself does not matter; it just needs to be present and be in a
+ * certain range for our logic testing.
+ */
+var arbitraryGlodaId = 4096;
+
+/**
+ * When we enter a filthy folder we should be marking all the messages as filthy
+ * that have gloda-id's and committing.
+ */
+add_task(async function test_propagate_filthy_from_folder_to_messages() {
+ // Mark the folder as filthy.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ ]);
+ let glodaFolder = Gloda.getFolderForFolder(folder);
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Mark each header with a gloda-id so they can get marked filthy.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", arbitraryGlodaId);
+ }
+
+ // Force the database to see it as filthy so we can verify it changes.
+ glodaFolder._datastore.updateFolderDirtyStatus(glodaFolder);
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderFilthy
+ );
+
+ // Index the folder, aborting at the second get enumerator request.
+ stop_enumeration_after = 2;
+
+ await spin_folder_indexer(folder);
+
+ // The folder should only be dirty.
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderDirty);
+ // Make sure the database sees it as dirty.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderDirty
+ );
+
+ // The messages should be filthy per the headers.
+ // We force a commit of the database.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageFilthy
+ );
+ }
+});
+
+/**
+ * Make sure our counting pass and our indexing passes gets it right. We test
+ * with 0,1,2 messages matching.
+ */
+add_task(async function test_count_pass() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ ]);
+
+ let hdrs = msgSet.msgHdrList;
+
+ // - (clean) messages with gloda-id's do not get indexed
+ // Nothing is indexed at this point, so all 2.
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 2);
+
+ // Pretend the first is indexed, leaving a count of 1.
+ hdrs[0].setUint32Property("gloda-id", arbitraryGlodaId);
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 1);
+
+ // Pretend both are indexed, count of 0.
+ hdrs[1].setUint32Property("gloda-id", arbitraryGlodaId);
+ // No explosion should happen since we should never get to the second
+ // enumerator.
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 0);
+
+ // - Dirty messages get indexed.
+ hdrs[0].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 1);
+
+ hdrs[1].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 2);
+});
+
+/**
+ * Create a folder indexing job for the given injection folder handle and
+ * run it until completion.
+ *
+ * The folder indexer will continue running on its own if we dont throw an Error in the
+ * GlodaMsgIndexer._indexerGetEnumerator
+ */
+async function spin_folder_indexer(aFolderHandle, aExpectedJobGoal) {
+ let msgFolder = messageInjection.getRealInjectionFolder(aFolderHandle);
+
+ // Cheat and use indexFolder to build the job for us.
+ GlodaMsgIndexer.indexFolder(msgFolder);
+ // Steal that job.
+ let job = GlodaIndexer._indexQueue.pop();
+ GlodaIndexer._indexingJobGoal--;
+
+ // Create the callbackHandle.
+ let callbackHandle = new CallbackHandle();
+ // Create the worker.
+ let worker = GlodaMsgIndexer._worker_folderIndex(job, callbackHandle);
+ try {
+ callbackHandle.pushAndGo(worker, null);
+ await Promise.race([
+ callbackHandle.promise,
+ TestUtils.waitForCondition(() => {
+ return error_is_thrown;
+ }),
+ ]);
+ } catch (ex) {
+ do_throw(ex);
+ }
+
+ if (aExpectedJobGoal !== undefined) {
+ Assert.equal(job.goal, aExpectedJobGoal);
+ }
+}
+
+/**
+ * Implements GlodaIndexer._callbackHandle's interface adapted to our async
+ * test driver. This allows us to run indexing workers directly in tests
+ * or support code.
+ *
+ * We do not do anything with the context stack or recovery. Use the actual
+ * indexer callback handler for that!
+ *
+ * Actually, we do very little at all right now. This will fill out as needs
+ * arise.
+ */
+class CallbackHandle {
+ constructor() {
+ this._promise = new Promise(resolve => {
+ this._resolve = resolve;
+ });
+ }
+
+ pushAndGo(aIterator, aContext) {
+ this.glodaWorkerAdapter(aIterator, this._resolve).catch(reason => {
+ if (!reason.message.match(ENUMERATOR_SIGNAL_WORD)) {
+ throw reason;
+ }
+ });
+ }
+
+ async glodaWorkerAdapter(aIter, resolve) {
+ while (!error_is_thrown) {
+ switch (aIter.next().value) {
+ case GlodaConstants.kWorkSync:
+ break;
+ case GlodaConstants.kWorkDone:
+ case GlodaConstants.kWorkDoneWithResult:
+ resolve();
+ return;
+ default:
+ break;
+ }
+ }
+ }
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_intl.js b/comm/mailnews/db/gloda/test/unit/test_intl.js
new file mode 100644
index 0000000000..e6e9868189
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_intl.js
@@ -0,0 +1,355 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Sanity check our encoding transforms and make sure the mozporter tokenizer
+ * is resulting in the expected fulltext search results. Specifically:
+ * - Check that subject, body, and attachment names are properly indexed;
+ * previously we screwed up at least one of these in terms of handling
+ * encodings properly.
+ * - Check that we can fulltext search on those things afterwards.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ "euc-jp": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\xbc\xab\xc6\xb0\xd3\xb4\xb2\xdf\xb5\xa1",
+ ],
+ "shift-jis": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\x8e\xa9\x93\xae\x9aS\x89\xdd\x8b@",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ { body: '"\u81ea\u52d5\u552e"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ // Now match from the back (bi-gram based).
+ { body: '"\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u8ca8\u6a5f"', match: true },
+ // Now everybody in the middle!
+ { body: '"\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u552e\u8ca8"', match: true },
+ { body: '"\u52d5\u552e"', match: true },
+ // -- Now match nobody!
+ // Nothing in common with the right answer.
+ { body: '"\u81eb\u52dc"', match: false },
+ // Too long, no match!
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f\u6a5f"', match: false },
+ // Minor change at the end.
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5e"', match: false },
+ ],
+ },
+ // Use two words where the last character is a multi-byte sequence and one of
+ // them is the last word in the string. This helps test an off-by-one error
+ // in both the asymmetric case (query's last character is last character in
+ // the tokenized string but it is not the last character in the body string)
+ // and symmetric case (last character in the query and the body).
+ {
+ name: "Czech diacritics",
+ actual: "Slov\u00e1cko Moravsk\u00e9 rodin\u011b",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?U2xvdsOhY2tvIE1vcmF2c2vDqSByb2RpbsSb?=",
+ "Slov\xc3\xa1cko Moravsk\xc3\xa9 rodin\xc4\x9b",
+ ],
+ },
+ searchPhrases: [
+ // -- Desired
+ // Match on exact for either word should work
+ { body: "Slov\u00e1cko", match: true },
+ { body: "Moravsk\u00e9", match: true },
+ { body: "rodin\u011b", match: true },
+ // The ASCII uppercase letters get case-folded
+ { body: "slov\u00e1cko", match: true },
+ { body: "moravsk\u00e9", match: true },
+ { body: "rODIN\u011b", match: true },
+ ],
+ },
+ // Ignore accent search!
+ {
+ name: "having accent: Paris",
+ actual: "Par\u00eds",
+ encodings: {
+ "utf-8": ["=?UTF-8?B?UGFyw61z?=", "Par\xc3\xads"],
+ },
+ searchPhrases: [{ body: "paris", match: true }],
+ },
+ // Case insensitive case for non-ASCII characters.
+ {
+ name: "Russian: new",
+ actual: "\u041d\u043e\u0432\u043e\u0435",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?0J3QvtCy0L7QtQ==?=",
+ "\xd0\x9d\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xb5",
+ ],
+ },
+ searchPhrases: [{ body: "\u043d\u043e\u0432\u043e\u0435", match: true }],
+ },
+ // Case-folding happens after decomposition.
+ {
+ name: "Awesome where A has a bar over it",
+ actual: "\u0100wesome",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=C4=80wesome?=", "\xc4\x80wesome"],
+ },
+ searchPhrases: [
+ { body: "\u0100wesome", match: true }, // Upper A-bar
+ { body: "\u0101wesome", match: true }, // Lower a-bar
+ { body: "Awesome", match: true }, // Upper A
+ { body: "awesome", match: true }, // Lower a
+ ],
+ },
+ // Deep decomposition happens and after that, case folding.
+ {
+ name: "Upper case upsilon with diaeresis and hook goes to small upsilon",
+ actual: "\u03d4esterday",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=CF=94esterday?=", "\xcf\x94esterday"],
+ },
+ searchPhrases: [
+ { body: "\u03d4esterday", match: true }, // Y_: 03d4 => 03d2 (decomposed)
+ { body: "\u03d3esterday", match: true }, // Y_' 03d3 => 03d2 (decomposed)
+ { body: "\u03d2esterday", match: true }, // Y_ 03d2 => 03a5 (decomposed)
+ { body: "\u03a5esterday", match: true }, // Y 03a5 => 03c5 (lowercase)
+ { body: "\u03c5esterday", match: true }, // y 03c5 (final state)
+ ],
+ },
+ // Full-width alphabet.
+ // Even if search phrases are ASCII, it has to hit.
+ {
+ name: "Full-width Thunderbird",
+ actual:
+ "\uff34\uff48\uff55\uff4e\uff44\uff45\uff52\uff42\uff49\uff52\uff44",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?77y0772I772V772O772E772F772S772C772J772S772E?=",
+ "\xef\xbc\xb4\xef\xbd\x88\xef\xbd\x95\xef\xbd\x8e\xef\xbd\x84\xef\xbd\x85\xef\xbd\x92\xef\xbd\x82\xef\xbd\x89\xef\xbd\x92\xef\xbd\x84",
+ ],
+ },
+ searchPhrases: [
+ // Full-width lower.
+ {
+ body: "\uff34\uff28\uff35\uff2e\uff24\uff25\uff32\uff22\uff29\uff32\uff24",
+ match: true,
+ },
+ // Half-width.
+ { body: "Thunderbird", match: true },
+ ],
+ },
+ // Half-width Katakana with voiced sound mark.
+ // Even if search phrases are full-width, it has to hit.
+ {
+ name: "Half-width Katakana: Thunderbird (SANDAABAADO)",
+ actual: "\uff7b\uff9d\uff80\uff9e\uff70\uff8a\uff9e\uff70\uff84\uff9e",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?7727776d776A776e772w776K776e772w776E776e?=",
+ "\xef\xbd\xbb\xef\xbe\x9d\xef\xbe\x80\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x8a\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x84\xef\xbe\x9e",
+ ],
+ },
+ searchPhrases: [
+ { body: "\u30b5\u30f3\u30c0\u30fc\u30d0\u30fc\u30c9", match: true },
+ ],
+ },
+ // Thai: Would you like to see the movie?
+ {
+ name: "Thai: query movie word into Thai language content",
+ actual:
+ "\u0e04\u0e38\u0e13\u0e2d\u0e22\u0e32\u0e01\u0e44\u0e1b\u0e14\u0e39\u0e2b\u0e19\u0e31\u0e07",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?4LiE4Li44LiT4Lit4Lii4Liy4LiB4LmE4Lib4LiU4Li54Lir4LiZ4Lix4LiH?=",
+ "\xe0\xb8\x84\xe0\xb8\xb8\xe0\xb8\x93\xe0\xb8\xad\xe0\xb8\xa2\xe0\xb8\xb2\xe0\xb8\x81\xe0\xb9\x84\xe0\xb8\x9b\xe0\xb8\x94\xe0\xb8\xb9\xe0\xb8\xab\xe0\xb8\x99\xe0\xb8\xb1\xe0\xb8\x87",
+ ],
+ },
+ searchPhrases: [{ body: "\u0e2b\u0e19\u0e31\u0e07", match: true }],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Use mbox injection because the fake server chokes sometimes right now.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await indexPhrase(phrase);
+ }
+});
+
+add_task(async function flush_db() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+});
+
+add_task(async function test_fulltextsearch_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await fulltextsearchPhrase(phrase);
+ }
+});
+
+/**
+ * Names with encoded commas in them can screw up our mail address parsing if
+ * we perform the mime decoding prior to handing the mail address off for
+ * parsing.
+ */
+add_task(async function test_encoding_complications_with_mail_addresses() {
+ let basePair = msgGen.makeNameAndAddress();
+ // The =2C encodes a comma!
+ let encodedCommaPair = ["=?iso-8859-1?Q?=DFnake=2C_=DFammy?=", basePair[1]];
+ // "Snake, Sammy", but with a much cooler looking S-like character!
+ let decodedName = "\u00dfnake, \u00dfammy";
+ // Use the thing with the comma in it for all cases; previously there was an
+ // asymmetry between to and cc...
+ let smsg = msgGen.makeMessage({
+ from: encodedCommaPair,
+ to: [encodedCommaPair],
+ cc: [encodedCommaPair],
+ });
+ function verify_sammy_snake(unused, gmsg) {
+ Assert.equal(gmsg.from.contact.name, decodedName);
+ Assert.equal(gmsg.to.length, 1);
+ Assert.equal(gmsg.to[0].id, gmsg.from.id);
+ Assert.equal(gmsg.cc.length, 1);
+ Assert.equal(gmsg.cc[0].id, gmsg.from.id);
+ }
+
+ let synSet = new SyntheticMessageSet([smsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_sammy_snake })
+ );
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |fulltextsearchPhrase|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("using character set: " + charset + " actual: " + actual + "\n");
+ dump("subject: " + subject + " (len: " + subject.length + ")\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name: " +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function fulltextsearchPhrase(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_migration.js b/comm/mailnews/db/gloda/test/unit/test_migration.js
new file mode 100644
index 0000000000..f7e1bc334d
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_migration.js
@@ -0,0 +1,151 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test migration logic by artificially inducing or simulating the problem, then
+ * trigger the migration logic, then verify things ended up correct, including
+ * the schema version so a second pass of the logic doesn't happen. (As
+ * opposed to checking in an example of a broken database and running against
+ * that.)
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush, makeABCardForAddressPair } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlRun } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_OLD_BAD_MESSAGE_ID = 1;
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Fix the fallout from bug 732372 (with this patch for bug 734507) which left
+ * identities whose e-mails were in the address book without contacts and then
+ * broke messages involving them.
+ */
+add_task(async function test_fix_missing_contacts_and_fallout() {
+ // -- Setup
+
+ // - Create 4 e-mail addresses, 2 of which are in the address book. (We want
+ // to make sure we have to iterate, hence >1).
+ let abPeeps = msgGen.makeNamesAndAddresses(2);
+ let nonAbPeeps = msgGen.makeNamesAndAddresses(2);
+ makeABCardForAddressPair(abPeeps[0]);
+ makeABCardForAddressPair(abPeeps[1]);
+
+ // - Create messages of the genres [from, to]: [inAB, inAB], [inAB, !inAB],
+ // [!inAB, inAB], [!inAB, !inAB]. The permutations are black box overkill.
+ // Smear the messages over multiple folders for realism.
+ let [, yesyesMsgSet, yesnoMsgSet, noyesMsgSet, nonoMsgSet] =
+ await messageInjection.makeFoldersWithSets(3, [
+ { count: 2, from: abPeeps[0], to: [abPeeps[1]] },
+ { count: 2, from: abPeeps[1], to: nonAbPeeps },
+ { count: 2, from: nonAbPeeps[0], to: abPeeps },
+ { count: 2, from: nonAbPeeps[1], to: [nonAbPeeps[0]] },
+ ]);
+
+ // Union the yeses together; we don't care about their composition.
+ let yesMsgSet = yesyesMsgSet.union(yesnoMsgSet).union(noyesMsgSet),
+ noMsgSet = nonoMsgSet;
+
+ // - Let gloda index the messages so the identities get created.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([yesMsgSet, noMsgSet], { augment: true })
+ );
+ // The messages are now indexed and the contacts created.
+
+ // - Compel an indexing sweep so the folder's dirty statuses get cleared
+ GlodaMsgIndexer.initialSweep();
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([])); // (no new messages to index)
+
+ // - Force a DB commit so the pending commit tracker gets emptied out
+ // (otherwise we need to worry about its state overriding our clobbering)
+ await waitForGlodaDBFlush();
+
+ // - Delete the contact records for the people in the address book.
+ await sqlRun(
+ "DELETE FROM contacts WHERE id IN (" +
+ yesMsgSet.glodaMessages[0].from.contact.id +
+ ", " +
+ yesMsgSet.glodaMessages[0].to[0].contact.id +
+ ")"
+ );
+
+ // - Nuke the gloda caches so we totally forget those contact records.
+ nukeGlodaCachesAndCollections();
+
+ // - Manually mark the messages involving the inAB people with the _old_ bad
+ // id marker so that our scan will see them.
+ for (let msgHdr of yesMsgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", GLODA_OLD_BAD_MESSAGE_ID);
+ }
+
+ // - Mark the db schema version to the version with the bug (26).
+ // Sanity check that gloda actually populates the value with the current
+ // version correctly.
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+ GlodaDatastore._actualSchemaVersion = 26;
+ await sqlRun("PRAGMA user_version = 26");
+ // Make sure that took, since we check it below as a success indicator.
+ let verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), 26);
+
+ // -- Test
+ // - Trigger the migration logic and request an indexing sweep.
+ GlodaMsgIndexer.disable();
+ GlodaMsgIndexer.enable();
+ GlodaMsgIndexer.initialSweep();
+
+ // - Wait for the indexer to complete, expecting that the messages that we
+ // marked bad will get indexed but not the good messages.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([yesMsgSet], { augment: true }));
+
+ // - Verify that the identities have contacts again.
+ // Must have the contact object.
+ Assert.notEqual(yesMsgSet.glodaMessages[0].from.contact, undefined);
+ // The contact's name should come from the address book card
+ Assert.equal(yesMsgSet.glodaMessages[0].from.contact.name, abPeeps[0][0]);
+
+ // - Verify that the schema version changed from gloda's perspective and from
+ // the db's perspective.
+ verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), GlodaDatastore._schemaVersion);
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
new file mode 100644
index 0000000000..2e18fbe11f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
@@ -0,0 +1,445 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the byte-counting libmime facility, to make sure that what
+ * is streamed to us is actually labeled with the right size.
+ */
+
+/*
+ * Do not include glodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiRelated,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+add_setup(function () {
+ // Sanity check: figure out how many bytes the original text occupies in UTF-8 encoding
+ Assert.equal(
+ new TextEncoder().encode(originalText).length,
+ originalTextByteCount
+ );
+
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+var htmlText = "<html><head></head><body>I am HTML! Woo! </body></html>";
+
+var partHtml = new SyntheticPartLeaf(htmlText, {
+ contentType: "text/html",
+});
+
+// This text is 168 characters long, and occupies 173 bytes when encoded in
+// UTF-8. (We make sure it occupies 173 bytes in run_test below). Note that
+// you cannot use this text directly because it isn't pure ASCII. You must use
+// one of the encoded forms below.
+var originalText =
+ "Longtemps, je me suis couché de bonne heure. Parfois, à " +
+ "peine ma bougie éteinte, mes yeux se fermaient si vite que je n'avais pas le " +
+ "temps de me dire : « Je m'endors. »";
+var originalTextByteCount = 173;
+
+var b64Text =
+ "TG9uZ3RlbXBzLCBqZSBtZSBzdWlzIGNvdWNow6kgZGUgYm9ubmUgaGV1cmUuIFBhcmZvaXMs\n" +
+ "IMOgIHBlaW5lIG1hIGJvdWdpZSDDqXRlaW50ZSwgbWVzIHlldXggc2UgZmVybWFpZW50IHNp\n" +
+ "IHZpdGUgcXVlIGplIG4nYXZhaXMgcGFzIGxlIHRlbXBzIGRlIG1lIGRpcmUgOiDCqyBKZSBt\n" +
+ "J2VuZG9ycy4gwrsK";
+
+var qpText =
+ "Longtemps,=20je=20me=20suis=20couch=C3=A9=20de=20bonne=20heure.=20Parfois,=\n" +
+ "=20=C3=A0=20peine=20ma=20bougie=20=C3=A9teinte,=20mes=20yeux=20se=20fermaie=\n" +
+ "nt=20si=20vite=20que=20je=20n'avais=20pas=20le=20temps=20de=20me=20dire=20:=\n" +
+ "=20=C2=AB=20Je=20m'endors.=20=C2=BB";
+
+var uuText =
+ "begin 666 -\n" +
+ 'M3&]N9W1E;7!S+"!J92!M92!S=6ES(&-O=6-HPZD@9&4@8F]N;F4@:&5U<F4N\n' +
+ "M(%!A<F9O:7,L(,.@('!E:6YE(&UA(&)O=6=I92##J71E:6YT92P@;65S('EE\n" +
+ "M=7@@<V4@9F5R;6%I96YT('-I('9I=&4@<75E(&IE(&XG879A:7,@<&%S(&QE\n" +
+ "G('1E;7!S(&1E(&UE(&1I<F4@.B#\"JR!*92!M)V5N9&]R<RX@PKL*\n" +
+ "\n" +
+ "end";
+
+var yencText =
+ "Hello there --\n" +
+ "=ybegin line=128 size=174 name=jane.doe\n" +
+ "\x76\x99\x98\x91\x9e\x8f\x97\x9a\x9d\x56\x4a\x94\x8f\x4a\x97\x8f" +
+ "\x4a\x9d\x9f\x93\x9d\x4a\x8d\x99\x9f\x8d\x92\xed\xd3\x4a\x8e\x8f" +
+ "\x4a\x8c\x99\x98\x98\x8f\x4a\x92\x8f\x9f\x9c\x8f\x58\x4a\x7a\x8b" +
+ "\x9c\x90\x99\x93\x9d\x56\x4a\xed\xca\x4a\x9a\x8f\x93\x98\x8f\x4a" +
+ "\x97\x8b\x4a\x8c\x99\x9f\x91\x93\x8f\x4a\xed\xd3\x9e\x8f\x93\x98" +
+ "\x9e\x8f\x56\x4a\x97\x8f\x9d\x4a\xa3\x8f\x9f\xa2\x4a\x9d\x8f\x4a" +
+ "\x90\x8f\x9c\x97\x8b\x93\x8f\x98\x9e\x4a\x9d\x93\x4a\xa0\x93\x9e" +
+ "\x8f\x4a\x9b\x9f\x8f\x4a\x94\x8f\x4a\x98\x51\x8b\xa0\x8b\x93\x9d" +
+ "\x0d\x0a\x4a\x9a\x8b\x9d\x4a\x96\x8f\x4a\x9e\x8f\x97\x9a\x9d\x4a" +
+ "\x8e\x8f\x4a\x97\x8f\x4a\x8e\x93\x9c\x8f\x4a\x64\x4a\xec\xd5\x4a" +
+ "\x74\x8f\x4a\x97\x51\x8f\x98\x8e\x99\x9c\x9d\x58\x4a\xec\xe5\x34" +
+ "\x0d\x0a" +
+ "=yend size=174 crc32=7efccd8e\n";
+
+// That completely exotic encoding is only detected if there is no content type
+// on the message, which is usually the case in newsgroups. I hate you yencode!
+// var partYencText = new SyntheticPartLeaf("I am text! Woo!\n\n" + yencText, {
+// contentType: "",
+// charset: "",
+// format: "",
+// });
+
+var partUUText = new SyntheticPartLeaf(
+ "I am text! With uuencode... noes...\n\n" + uuText,
+ {
+ contentType: "",
+ charset: "",
+ format: "",
+ }
+);
+
+var tachText = {
+ filename: "bob.txt",
+ body: qpText,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+};
+
+var tachInlineText = {
+ filename: "foo.txt",
+ body: qpText,
+ format: null,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+ disposition: "inline",
+};
+
+// Images have a different behavior than other attachments: they are displayed
+// inline most of the time, so there are two different code paths that need to
+// enable streaming and byte counting to the JS mime emitter.
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachPdf = {
+ filename: "bob.pdf",
+ contentType: "application/pdf",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachUU = {
+ filename: "john.doe",
+ contentType: "application/x-uuencode",
+ encoding: "uuencode",
+ charset: null,
+ format: null,
+ body: uuText,
+};
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ encoding: "base64",
+ body: b64Text,
+};
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: b64Text,
+};
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+new SyntheticPartLeaf(relImage.body, relImage);
+
+var messageInfos = [
+ {
+ name: "uuencode inline",
+ bodyPart: partUUText,
+ subject: "duh",
+ epsilon: 1,
+ checkTotalSize: false,
+ },
+ // Encoding type specific to newsgroups, not interested, gloda doesn't even
+ // treat this as an attachment (probably because gloda requires an attachment
+ // to have a content-type, which these yencoded parts don't have), but size IS
+ // counted properly nonetheless.
+ /* {
+ name: 'text/plain with yenc inline',
+ bodyPart: partYencText,
+ subject: "yEnc-Prefix: \"jane.doe\" 174 yEnc bytes - yEnc test (1)",
+ },*/
+ // Inline image, not interested either, gloda doesn't keep that as an
+ // attachment (probably a deliberate choice), size is NOT counted properly.
+ // (don't want to investigate, I doubt it's a useful information anyway.)
+ /* {
+ name: 'multipart/related',
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },*/
+ // This doesn't really make sense because it returns the length of the
+ // encoded blob without the envelope. Disabling as part of bug 711980.
+ /* {
+ name: '.eml attachment',
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({ body: { body: qpText,
+ charset: "UTF-8",
+ encoding: "quoted-printable" } }),
+ ]),
+ epsilon: 1,
+ },*/
+ // All of the other common cases work fine.
+ {
+ name: 'all sorts of "real" attachments',
+ bodyPart: partHtml,
+ attachments: [
+ tachImage,
+ tachPdf,
+ tachUU,
+ tachApplication,
+ tachText,
+ tachInlineText,
+ ],
+ epsilon: 2,
+ },
+];
+
+add_task(async function test_message_attachments() {
+ for (let messageInfo of messageInfos) {
+ await message_attachments(messageInfo);
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: originalText } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var bogusMessageInfos = [
+ // In this case, the wooooo part is not an attachment, so its bytes won't be
+ // counted (size will end up being 0 bytes). We don't check the size, but
+ // check_bogus_parts makes sure we're able to come up with a resulting size
+ // for the MimeMessage.
+ //
+ // In that very case, since message M is an attachment, libmime will count M's
+ // bytes, and we could have MimeMessages prefer the size libmime tells them
+ // (when they have it), rather than recursively computing their sizes. I'm not
+ // sure changing jsmimeemitter.js is worth the trouble just for buggy
+ // messages...
+ {
+ name: ".eml attachment with inner MimeUnknown",
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({
+ // <--- M
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([
+ partHtml,
+ new SyntheticPartLeaf(htmlText, { contentType: "woooooo" }),
+ ]),
+ ]),
+ }),
+ ]),
+ epsilon: 6,
+ checkSize: false,
+ },
+];
+
+add_task(async function test_bogus_messages(info) {
+ for (let bogusMessageInfo of bogusMessageInfos) {
+ await bogus_messages(bogusMessageInfo);
+ }
+});
+
+add_task(async function test_have_attachments() {
+ // The goal here is to explicitly check that these messages have attachments.
+ let number = 1;
+ let synMsg = msgGen.makeMessage({
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiMixed([partHtml, partTachVCard]),
+ number,
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.equal(aMimeMsg.allUserAttachments.length, number);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+});
+
+async function message_attachments(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_attachments(
+ aMimeMsg,
+ info.epsilon,
+ "checkTotalSize" in info ? info.checkTotalSize : undefined
+ );
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
+
+function check_attachments(aMimeMsg, epsilon, checkTotalSize) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ /* It is hard to get a byte count that's perfectly accurate. When composing
+ * the message, the MIME structure goes like this (for an encoded attachment):
+ *
+ * XXXXXXXXXX
+ * XXXXXXXXXX <-- encoded block
+ * XXXXXXXXXX
+ * <-- newline
+ * --chopchop <-- MIME separator
+ *
+ * libmime counts bytes all the way up to the separator, which means it counts
+ * the bytes for the extra line. Since newlines in emails are \n, most of the
+ * time we get att.size = 174 instead of 173.
+ *
+ * The good news is, it's just a fixed extra cost. There no issues with the
+ * inner contents of the attachment, you can add as many newlines as you want
+ * in it, Unix or Windows, the count won't get past the bounds.
+ */
+
+ Assert.ok(aMimeMsg.allUserAttachments.length > 0);
+
+ let totalSize = htmlText.length;
+
+ for (let att of aMimeMsg.allUserAttachments) {
+ dump("*** Attachment now is " + att.name + " " + att.size + "\n");
+ Assert.ok(Math.abs(att.size - originalTextByteCount) <= epsilon);
+ totalSize += att.size;
+ }
+
+ // Undefined means true.
+ if (checkTotalSize !== false) {
+ dump(
+ "*** Total size comparison: " + totalSize + " vs " + aMimeMsg.size + "\n"
+ );
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+function check_bogus_parts(aMimeMsg, { epsilon, checkSize }) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ // First make sure the size is computed properly
+ let x = parseInt(aMimeMsg.size);
+ Assert.ok(!isNaN(x));
+
+ let sep = "@mozilla.org/windows-registry-key;1" in Cc ? "\r\n" : "\n";
+
+ if (checkSize) {
+ let partSize = 0;
+ // The attachment, although a MimeUnknown part, is actually plain/text that
+ // contains the whole attached message, including headers. Count them.
+ for (let k in bogusMessage.headers) {
+ let v = bogusMessage.headers[k];
+ partSize += (k + ": " + v + sep).length;
+ }
+ // That's the newline between the headers and the message body.
+ partSize += sep.length;
+ // That's the message body.
+ partSize += originalTextByteCount;
+ // That's the total length that's to be returned by the MimeMessage abstraction.
+ let totalSize = htmlText.length + partSize;
+ dump(totalSize + " vs " + aMimeMsg.size + "\n");
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+async function bogus_messages(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_bogus_parts(aMimeMsg, info);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
new file mode 100644
index 0000000000..3380a0937e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
@@ -0,0 +1,746 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the JS Mime Emitter to make sure it doesn't choke on any
+ * scenarios.
+ *
+ * We do not test, but should consider testing:
+ * - MimeEncryptedPKCS7, whatever that translates to.
+ * - apple double
+ * - sun attachment
+ */
+
+/*
+ * Do not include GlodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiAlternative,
+ SyntheticDegeneratePartEmpty,
+ SyntheticPartMultiSignedSMIME,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiSignedPGP,
+ SyntheticPartMultiRelated,
+ SyntheticPartMultiDigest,
+ SyntheticPartMultiParallel,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+// While we're at it, we'll also test the correctness of the GlodaAttachment
+// representation, esp. its "I just need the part information to rebuild the
+// URLs" claim.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+
+const DEATH_TO_NEWLINE_TYPE_THINGS = /[\r\n]+/g;
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+var partText = new SyntheticPartLeaf("I am text! Woo!");
+var partHtml = new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+);
+var partEnriched = new SyntheticPartLeaf(
+ "<bold><italic>I am not a popular format! sad woo :(</italic></bold>",
+ {
+ contentType: "text/enriched",
+ }
+);
+var partAlternative = new SyntheticPartMultiAlternative([partText, partHtml]);
+var partMailingListFooter = new SyntheticPartLeaf("I am an annoying footer!");
+
+// We need to make sure a part that has content-disposition: attachment, even
+// though it doesn't have any filename, still is treated as an attachment.
+var tachNoFilename = {
+ body: "I like Bordeaux wine",
+ contentType: "text/plain",
+ disposition: "attachment",
+};
+
+// This is an external attachment, i.e. a mime part that basically says "go find
+// the attachment on disk, assuming it still exists, here's the path to the file
+// on disk". It turns out feed enclosures are presented in the exact same way,
+// so this covers this case as well.
+var tachExternal = {
+ body:
+ "You deleted an attachment from this message. The original MIME headers for the attachment were:\n" +
+ "Content-Type: image/png;\n" +
+ ' name="conversations-bug1.png"\n' +
+ "Content-Transfer-Encoding: base64\n" +
+ "Content-Disposition: attachment;\n" +
+ ' filename="conversations-bug1.png"',
+ contentType: "image/png",
+ filename: "conversations-bug1.png",
+ charset: null,
+ format: null,
+ encoding: "base64",
+ extraHeaders: {
+ "X-Mozilla-External-Attachment-URL": "file:///tmp/conversations-bug1.png",
+ "X-Mozilla-Altered": 'AttachmentDetached; date="Wed Aug 03 11:11:33 2011"',
+ },
+};
+var tachText = { filename: "bob.txt", body: "I like cheese!" };
+var partTachText = new SyntheticPartLeaf(tachText.body, tachText);
+var tachInlineText = {
+ filename: "foo.txt",
+ body: "Rock the mic",
+ format: null,
+ charset: null,
+ disposition: "inline",
+};
+new SyntheticPartLeaf(tachInlineText.body, tachInlineText);
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: "YWJj\n",
+};
+var partTachImage = new SyntheticPartLeaf(tachImage.body, tachImage);
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: "YWJj\n",
+};
+var partRelImage = new SyntheticPartLeaf(relImage.body, relImage);
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ body: "funk!",
+};
+var partTachApplication = new SyntheticPartLeaf(
+ tachApplication.body,
+ tachApplication
+);
+
+var partTachMessages = [msgGen.makeMessage(), msgGen.makeMessage()];
+
+var partEmpty = new SyntheticDegeneratePartEmpty();
+
+var messageInfos = [
+ // -- Simple
+ {
+ name: "text/plain",
+ bodyPart: partText,
+ },
+ {
+ name: "text/html",
+ bodyPart: partHtml,
+ },
+ // -- Simply ugly
+ {
+ name: "text/enriched",
+ bodyPart: partEnriched,
+ },
+ // -- Simple w/attachment
+ {
+ name: "text/plain w/text attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachText],
+ },
+ {
+ name: "text/plain w/image attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/plain w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/plain w/app attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachApplication],
+ },
+ {
+ name: "text/html w/text attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachText],
+ },
+ {
+ name: "text/html w/image attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/html w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/html w/app attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachApplication],
+ },
+ // -- Alternatives
+ {
+ name: "multipart/alternative: text/plain, text/html",
+ bodyPart: partAlternative,
+ },
+ {
+ name: "multipart/alternative plain/html w/text attachment",
+ bodyPart: partAlternative,
+ attachments: [tachText],
+ },
+ {
+ name: "multipart/alternative plain/html w/image attachment",
+ bodyPart: partAlternative,
+ attachments: [tachImage],
+ },
+ {
+ name: "multipart/alternative plain/html w/vcard attachment",
+ bodyPart: partAlternative,
+ attachments: [tachVCard],
+ },
+ {
+ name: "multipart/alternative plain/html w/app attachment",
+ bodyPart: partAlternative,
+ attachments: [tachApplication],
+ },
+ // -- S/MIME.
+ {
+ name: "S/MIME alternative",
+ bodyPart: new SyntheticPartMultiSignedSMIME(partAlternative),
+ },
+ {
+ name: "S/MIME alternative with text attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachText])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachImage])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachVCard])
+ ),
+ },
+ {
+ name: "S/MIME alternative with app attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachApplication])
+ ),
+ },
+ {
+ name: "S/MIME alternative wrapped in mailing list",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiSignedSMIME(partAlternative),
+ partMailingListFooter,
+ ]),
+ },
+ // -- PGP signature
+ // We mainly care that all the content-type parameters show up.
+ {
+ name: "PGP signed alternative",
+ bodyPart: new SyntheticPartMultiSignedPGP(partAlternative),
+ },
+ // -- Attached RFC822
+ {
+ // Not your average attachment, pack ourselves for now.
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachMessages[0],
+ ]),
+ },
+ // -- Multipart/related
+ {
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },
+ {
+ name: "multipart/related inside multipart/alternative",
+ bodyPart: new SyntheticPartMultiAlternative([
+ partText,
+ new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ ]),
+ },
+ // -- Multipart/digest
+ {
+ name: "multipart/digest",
+ bodyPart: new SyntheticPartMultiDigest(partTachMessages.concat()),
+ },
+ // -- Multipart/parallel (allegedly the same as mixed)
+ {
+ name: "multipart/parallel",
+ bodyPart: new SyntheticPartMultiParallel([partText, partTachImage]),
+ },
+ // --- Previous bugs
+ // -- Bug 495057, text/enriched was being dumb
+ {
+ name: "text/enriched inside related",
+ bodyPart: new SyntheticPartMultiRelated([partEnriched]),
+ },
+ // -- Empty sections
+ // This was a crasher because the empty part made us try and close the
+ // child preceding the empty part a second time. The nested multipart led
+ // to the crash providing evidence of the double-close bug but there was
+ // nothing inherently nested-multipart-requiring to trigger the double-close
+ // bug.
+ {
+ name: "nested multipart with empty multipart section",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([partAlternative, partTachText]),
+ partEmpty,
+ ]),
+ },
+ {
+ name: "empty multipart section produces no child",
+ bodyPart: new SyntheticPartMultiMixed([partText, partEmpty, partTachText]),
+ },
+];
+
+add_setup(async function () {
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ registerCleanupFunction(function () {
+ GlodaDatastore.shutdown();
+ });
+});
+
+add_task(async function test_stream_message() {
+ for (let messageInfo of messageInfos) {
+ await stream_message(messageInfo);
+ }
+});
+
+/**
+ * Stream
+ */
+add_task(async function test_sane_bodies() {
+ // 60 bytes long... (becomes 59 on the other side when \r is dropped)
+ let hugeString =
+ "don't know what you want but I can't stream it anymore...\r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ // This will come out to be 60k, of course.
+ Assert.equal(hugeString.length, 60 * Math.pow(2, powahsOfTwo));
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: hugeString, contentType: "text/plain" },
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ let bodyPart = aMimeMsg.parts[0];
+ // (the \r gets gone, so it's only 59 per line)
+ if (bodyPart.body.length > 20 * 1024 + 59) {
+ do_throw(
+ "Mime body length is " +
+ bodyPart.body.length +
+ " bytes long but should not be!"
+ );
+ }
+ promiseResolve();
+ },
+ false,
+ { saneBodySize: true }
+ );
+
+ await promise;
+});
+
+// Additional testing for the correctness of allAttachments and
+// allUserAttachments representation
+
+var partTachNestedMessages = [
+ // Looks like the synthetic part generator appends the charset=ISO-8859-1 part
+ // all by itself. That allows us to create a non-UTF-8 subject, and ensure the
+ // resulting attachment name is indeed São Paulo.eml.
+ msgGen.makeMessage({
+ subject: "S" + String.fromCharCode(0xe3) + "o Paulo",
+ bodyPart: new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+ ),
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage],
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage, tachApplication],
+ }),
+];
+
+var attMessagesParams = [
+ {
+ attachments: [tachNoFilename],
+ },
+ {
+ attachments: [tachExternal],
+ },
+ {
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[0],
+ ]),
+ },
+ {
+ name: "attached rfc822 w. image inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[1],
+ ]),
+ },
+ {
+ name: "attached x/funky + attached rfc822 w. (image + x/funky) inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachApplication,
+ partTachNestedMessages[2],
+ ]),
+ },
+];
+
+var expectedAttachmentsInfo = [
+ {
+ allAttachmentsContentTypes: ["text/plain"],
+ allUserAttachmentsContentTypes: ["text/plain"],
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["image/png"],
+ },
+ {
+ allAttachmentsContentTypes: [],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ firstAttachmentName: "S\u00e3o Paulo.eml",
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ },
+ {
+ allAttachmentsContentTypes: [
+ "application/x-funky",
+ "image/png",
+ "application/x-funky",
+ ],
+ allUserAttachmentsContentTypes: ["application/x-funky", "message/rfc822"],
+ },
+];
+
+add_task(async function test_attachments_correctness() {
+ for (let [i, params] of attMessagesParams.entries()) {
+ let synMsg = msgGen.makeMessage(params);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ try {
+ let expected = expectedAttachmentsInfo[i];
+ if ("firstAttachmentName" in expected) {
+ let att = aMimeMsg.allUserAttachments[0];
+ Assert.equal(att.name.length, expected.firstAttachmentName.length);
+ for (let j = 0; j < att.name.length; ++j) {
+ Assert.equal(
+ att.name.charCodeAt(j),
+ expected.firstAttachmentName.charCodeAt(j)
+ );
+ }
+ }
+
+ Assert.equal(
+ aMimeMsg.allAttachments.length,
+ expected.allAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allAttachmentsContentTypes[j]
+ );
+ }
+
+ Assert.equal(
+ aMimeMsg.allUserAttachments.length,
+ expected.allUserAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allUserAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allUserAttachmentsContentTypes[j]
+ );
+ }
+
+ // Test
+ for (let att of aMimeMsg.allUserAttachments) {
+ let uri = aMsgHdr.folder.getUriForMsg(aMsgHdr);
+ let glodaAttachment = GlodaFundAttr.glodaAttFromMimeAtt(
+ { folderMessageURI: uri },
+ att
+ );
+ // The GlodaAttachment appends the filename, which is not always
+ // present
+ Assert.ok(glodaAttachment.url.startsWith(att.url));
+ }
+ } catch (e) {
+ dump(aMimeMsg.prettyString() + "\n");
+ do_throw(e);
+ }
+
+ promiseResolve();
+ },
+ false
+ );
+
+ await promise;
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: "whatever" } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var weirdMessageInfos = [
+ // This message has an unnamed part as an attachment (with
+ // Content-Disposition: inline and which is displayable inline). Previously,
+ // libmime would emit notifications for this to be treated as an attachment,
+ // name Part 1.2. Now it's not the case anymore, so we should ensure this
+ // message has no attachments.
+ {
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ },
+];
+
+add_task(async function test_part12_not_an_attachment() {
+ let synMsg = msgGen.makeMessage(weirdMessageInfos[0]);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.ok(aMimeMsg.allUserAttachments.length == 0);
+ Assert.ok(aMimeMsg.allAttachments.length == 0);
+ } catch (e) {
+ do_throw(e);
+ }
+ promiseResolve();
+ });
+
+ await promise;
+});
+
+async function stream_message(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_stream_message(info, synMsg, aMsgHdr, aMimeMsg);
+ promiseResolve();
+ });
+
+ await promise;
+}
+/**
+ * Verify the streamed results are what we wanted. For now, this just means
+ * receiving a representation; we don't check it for correctness.
+ */
+function verify_stream_message(aInfo, aSynMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+ try {
+ // aMimeMsg is normalized; it only ever actually gets one child.
+ verify_body_part_equivalence(aSynMsg.bodyPart, aMimeMsg.parts[0]);
+ } catch (ex) {
+ dump("Something was wrong with the MIME rep!\n!!!!!!!!\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like: \n" + aMimeMsg.prettyString(true, " ", true) + "\n\n"
+ );
+ do_throw(ex);
+ }
+
+ dump("Everything is just fine.\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like:\n " + aMimeMsg.prettyString(true, " ", false) + "\n\n"
+ );
+}
+
+/**
+ * Applies any transformations to the synthetic body part that we would expect
+ * to happen to a message during its libmime journey. It may be better to
+ * just put the expected translations in the synthetic body part instead of
+ * trying to make this method do anything complex.
+ */
+function synTransformBody(aSynBodyPart) {
+ let text = aSynBodyPart.body.trim();
+ // This transforms things into HTML apparently.
+ if (aSynBodyPart._contentType == "text/enriched") {
+ // Our job here is just to transform just enough for our example above.
+ // We also could have provided a manual translation on the body part.
+ text = text.replace(/bold/g, "B").replace(/italic/g, "I");
+ }
+ return text;
+}
+
+function verify_body_part_equivalence(aSynBodyPart, aMimePart) {
+ // The content-type devoid of parameters should match.
+ Assert.equal(aSynBodyPart._contentType, aMimePart.contentType);
+
+ // The header representation of the content-type should also match unless
+ // this is an rfc822 part, in which case it should only match for the
+ // actual contents.
+ if (aMimePart.contentType != "message/rfc822") {
+ Assert.equal(
+ aSynBodyPart.contentTypeHeaderValue.replace(
+ DEATH_TO_NEWLINE_TYPE_THINGS,
+ ""
+ ),
+ aMimePart.get("content-type").replace(DEATH_TO_NEWLINE_TYPE_THINGS, "")
+ );
+ }
+
+ // XXX body part checking will get brittle if we ever actually encode things!
+ if (
+ aSynBodyPart.body &&
+ !aSynBodyPart._filename &&
+ aSynBodyPart._contentType.startsWith("text/")
+ ) {
+ Assert.equal(
+ synTransformBody(aSynBodyPart),
+ aMimePart.body
+ .trim()
+ .replace(/\r/g, "")
+ // Remove stuff added by libmime for HTML parts.
+ .replace(
+ /[\n]*<meta http-equiv="content-type" content="text\/html; .*">[\n]*/g,
+ ""
+ )
+ .replace(/[\n]+<\/body>/, "</body>")
+ );
+ }
+ if (aSynBodyPart.parts) {
+ let iPart;
+ let realPartOffsetCompensator = 0;
+ for (iPart = 0; iPart < aSynBodyPart.parts.length; iPart++) {
+ let subSyn = aSynBodyPart.parts[iPart];
+ // If this is a degenerate empty, it should not produce output, so
+ // compensate for the offset drift and get on with our lives.
+ if (subSyn instanceof SyntheticDegeneratePartEmpty) {
+ realPartOffsetCompensator--;
+ continue;
+ }
+ let subMime = aMimePart.parts[iPart + realPartOffsetCompensator];
+ // Our special case is the signature, which libmime does not expose to us.
+ // Ignore! (Also, have our too-many-part checker below not trip on this.)
+ if (subSyn._contentType != "application/x-pkcs7-signature") {
+ if (subMime == null) {
+ do_throw(
+ "No MIME part matching " + subSyn.contentTypeHeaderValue + "\n"
+ );
+ }
+ verify_body_part_equivalence(subSyn, subMime);
+ }
+ }
+ // Only check if there are still more mime parts; don't check for a count
+ // mismatch (the PKCS case from above needs to be handled).
+ if (iPart < aMimePart.parts.length) {
+ do_throw("MIME part has more sub-parts than syn part?");
+ }
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_msg_search.js b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
new file mode 100644
index 0000000000..2c8ea1c528
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
@@ -0,0 +1,155 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test GlodaMsgSearcher.jsm our heuristic-based fulltext search mechanism. Things we
+ * generally want to verify:
+ * - fulltext weighting by where the match happened works.
+ * - static interestingness impacts things appropriately.
+ *
+ * Our general strategy is to create two messages each with a unique string
+ * placed in controlled places and whatever intentional message manipulation
+ * is required to set things up. Then we query using a GlodaMsgSearcher with
+ * the limit set to 1. Only the message we expect should come back.
+ * Keep in mind in all tests that our underlying ranking mechanism is based on
+ * time so the date of each message is relevant but should not be significant
+ * because our score boost factor should always be well in excess of the one
+ * hour increment between messages.
+ *
+ * Previously, we relied on the general equivalence of the logic in
+ * test_query_core to our message search logic.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var uniqueCounter = 0;
+var messageInjection;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Verify that the ranking function is using the weights as expected. We do not
+ * need to test all the permutations
+ */
+add_task(async function test_fulltext_weighting_by_column() {
+ let ustr = unique_string();
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, body: { body: ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, subjSet);
+});
+
+/**
+ * A term mentioned 3 times in the body is worth more than twice in the subject.
+ * (This is because the subject saturates at one occurrence worth 2.0 and the
+ * body does not saturate until 10, each worth 1.0.)
+ */
+add_task(async function test_fulltext_weighting_saturation() {
+ let ustr = unique_string();
+ let double_ustr = ustr + " " + ustr;
+ let thrice_ustr = ustr + " " + ustr + " " + ustr;
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: double_ustr },
+ { count: 1, body: { body: thrice_ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, bodySet);
+});
+
+/**
+ * Use a starred message with the same fulltext match characteristics as another
+ * message to verify the preference goes the right way. Have the starred
+ * message be the older message for safety.
+ */
+add_task(async function test_static_interestingness_boost_works() {
+ let ustr = unique_string();
+ let [, starred, notStarred] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, subject: ustr },
+ ]);
+ // Index in their native state.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred, notStarred]));
+ // Star and index.
+ starred.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred]));
+ // Stars upon thars wins.
+ await asyncMsgSearcherExpect(ustr, starred);
+});
+
+/**
+ * Make sure that the query does not retrieve more than actually matches.
+ */
+add_task(async function test_joins_do_not_return_everybody() {
+ let ustr = unique_string();
+ let [, subjSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet]));
+ await asyncMsgSearcherExpect(ustr, subjSet, 2);
+});
+
+/**
+ * Generate strings like "aaaaa", "aabaa", "aacaa", etc. The idea with the
+ * suffix is to avoid the porter stemmer from doing something weird that
+ * collapses things.
+ */
+function unique_string() {
+ let uval = uniqueCounter++;
+ let s =
+ String.fromCharCode(97 + Math.floor(uval / (26 * 26))) +
+ String.fromCharCode(97 + (Math.floor(uval / 26) % 26)) +
+ String.fromCharCode(97 + (uval % 26)) +
+ "aa";
+ return s;
+}
+
+/**
+ * Wrap the construction of a GlodaMsgSearcher with a limit of 1 and feed it to
+ * queryExpect.
+ *
+ * @param aFulltextStr The fulltext query string which GlodaMsgSearcher will
+ * parse.
+ * @param aExpectedSet The expected result set. Make sure that the size of the
+ * set is consistent with aLimit.
+ * @param [aLimit=1]
+ *
+ * Use like so:
+ * await asyncMsgSearchExpect("foo bar", someSynMsgSet);
+ */
+async function asyncMsgSearcherExpect(aFulltextStr, aExpectedSet, aLimit) {
+ let limit = aLimit ? aLimit : 1;
+ Services.prefs.setIntPref("mailnews.database.global.search.msg.limit", limit);
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ await queryExpect(searcher.buildFulltextQuery(), aExpectedSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
new file mode 100644
index 0000000000..128720ee76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
@@ -0,0 +1,144 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test noun_mimetype. Exists because I just changed its implementation and I'm
+ * afraid I may have damaged it and it's hard to tell, so ironically a unit test
+ * is the easiest solution. (Don't you hate it when the right thing to do is
+ * also the easy thing to do?)
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MimeTypeNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounMimetype.jsm"
+);
+
+var passResults = [];
+var curPassResults;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ let messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_new_pass_first_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_first_time() {
+ test_basics();
+});
+
+/**
+ * Do two passes of test_basics making sure that persisted values really
+ * persist.
+ */
+add_task(async function test_new_pass_second_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_second_time() {
+ test_basics();
+});
+
+add_task(function verify_passes_are_the_same() {
+ var firstPassResults = passResults[0];
+ for (let iType = 0; iType < curPassResults.length; iType++) {
+ for (let iPass = 1; iPass < passResults.length; iPass++) {
+ Assert.equal(firstPassResults[iType].id, passResults[iPass][iType].id);
+ }
+ }
+});
+
+add_task(function test_parameters() {
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ Assert.equal(plain, MimeTypeNoun.getMimeType('text/plain; charset="UTF-8"'));
+});
+
+/**
+ * Setup a new 'pass' by nuking the MimeTypeNoun's state if it has any. The
+ * goal here is to verify that the database persistence is actually working,
+ * and we can only do that if we convince it to nuke its authoritative 'cache'
+ * and grab a new copy.
+ */
+async function new_pass() {
+ // We have to nuke if it has already happened.
+ if (passResults.length) {
+ MimeTypeNoun._mimeTypes = {};
+ MimeTypeNoun._mimeTypesByID = {};
+ MimeTypeNoun._mimeTypeHighID = {};
+ MimeTypeNoun._highID = 0;
+ MimeTypeNoun._init();
+ }
+ curPassResults = [];
+ passResults.push(curPassResults);
+
+ // The mime type does some async stuff... make sure we don't advance until
+ // it is done with said stuff.
+ await waitForGlodaDBFlush();
+}
+
+function test_basics() {
+ let python;
+ // If this is not the first pass, check for python before other things to
+ // make sure we're not just relying on consistent logic rather than actual
+ // persistence.
+ if (passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+
+ let jpeg = MimeTypeNoun.getMimeType("image/jpeg");
+ curPassResults.push(jpeg);
+
+ let png = MimeTypeNoun.getMimeType("image/png");
+ curPassResults.push(png);
+
+ let html = MimeTypeNoun.getMimeType("text/html");
+ curPassResults.push(html);
+
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ curPassResults.push(plain);
+
+ // If this is for the first time, check for python now (see above).
+ if (!passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+ // But always add it to the results now, as we need consistent ordering
+ // since we use a list.
+ curPassResults.push(python);
+
+ // Sanity-checking the parsing.
+ Assert.equal(jpeg.type, "image");
+ Assert.equal(jpeg.subType, "jpeg");
+
+ // - Make sure the numeric trickiness for the block stuff is actually doing
+ // the right thing!
+ const BLOCK_SIZE = MimeTypeNoun.TYPE_BLOCK_SIZE;
+ // Same blocks.
+ Assert.equal(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(png.id / BLOCK_SIZE)
+ );
+ Assert.equal(
+ Math.floor(html.id / BLOCK_SIZE),
+ Math.floor(plain.id / BLOCK_SIZE)
+ );
+ // Different blocks.
+ Assert.notEqual(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(html.id / BLOCK_SIZE)
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
new file mode 100644
index 0000000000..e47eac75bc
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
@@ -0,0 +1,62 @@
+/**
+ * Atypical gloda unit test that tests nuke migration. Gloda is not designed
+ * to be shutdown and started up again in the same process lifetime. It tries
+ * to be clever with caching accessors that clobber themselves out of existence
+ * which are hard to make come back to life, and probably other things.
+ *
+ * So what we do is create a global-messages-db.sqlite with an unacceptably
+ * old schema version before tickling gloda to startup. If gloda comes up
+ * with a database connection and it has the right schema version, we declare
+ * that gloda has successfully loaded. Our only historical screw-up here was
+ * very blatant (and was actually a result of trying to avoid complexity in
+ * the nuke path! oh the irony!) so we don't need to get all hardcore.
+ */
+
+/**
+ * The DB version to use. We set this as a non-const variable so that
+ * test_nuke_migration_from_future.js can change it.
+ */
+var BAD_DB_VERSION_TO_USE = 2;
+
+/**
+ * Synchronously create and close the out-of-date database. Because we are
+ * only using synchronous APIs, we know everything is in fact dead. GC being
+ * what it is, the various C++ objects will probably stay alive through the
+ * next test, but will be inert because we have closed the database.
+ */
+function make_out_of_date_database() {
+ // Get the path to our global database
+ var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // Create the database
+ var dbConnection = Services.storage.openUnsharedDatabase(dbFile);
+ dbConnection.schemaVersion = BAD_DB_VERSION_TO_USE;
+
+ // Close the database (will throw if there's a problem closing)
+ dbConnection.close();
+}
+
+// some copied and pasted preference setup from glodaTestHelper that is
+// appropriate here.
+// yes to indexing
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// no to a sweep we don't control
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+function run_test() {
+ // - make the old database
+ make_out_of_date_database();
+
+ // - tickle gloda
+ // GlodaPublic.jsm loads Gloda.jsm which self-initializes and initializes the datastore
+ ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ Assert.notEqual(GlodaDatastore.asyncConnection, null);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
new file mode 100644
index 0000000000..f60c1dd29e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
@@ -0,0 +1,12 @@
+/**
+ * There are actually two ways the nuke migration can be invoked. From
+ * a database too far from the future, and too far from the past. This
+ * one is the future one. We must keep ourselves safe from time-traveling
+ * grandchildren!
+ */
+
+/* import-globals-from test_nuke_migration.js */
+load("test_nuke_migration.js");
+
+// pick something so far forward it will never get used!
+BAD_DB_VERSION_TO_USE = 100000000;
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_core.js b/comm/mailnews/db/gloda/test/unit/test_query_core.js
new file mode 100644
index 0000000000..0849a62d50
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_core.js
@@ -0,0 +1,658 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test the mechanics our query functionality. Tests in this file are intended
+ * to cover extreme boundary cases and things that are just unlikely to happen
+ * in reasonable message use-cases. (Which is to say, it could be hard to
+ * formulate a set of synthetic messages that result in the situation we want
+ * to test for.)
+ */
+
+var { prepareIndexerForTesting } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+
+/* ===== Test Noun ===== */
+/*
+ * Introduce a simple noun type for our testing so that we can avoid having to
+ * deal with the semantics of messages/friends and all their complexity.
+ */
+
+var WidgetProvider = {
+ providerName: "widget",
+ *process() {
+ yield GlodaConstants.kWorkDone;
+ },
+};
+
+add_setup(function () {
+ // Don't initialize the index message state
+ prepareIndexerForTesting();
+ GlodaIndexer.registerIndexer(GenericIndexer);
+ Gloda.addIndexerListener(genericIndexerCallback);
+});
+
+var WidgetNoun;
+add_task(function setup_test_noun_and_attributes() {
+ // --- noun
+ WidgetNoun = Gloda.defineNoun({
+ name: "widget",
+ clazz: Widget,
+ allowsArbitraryAttrs: true,
+ // It is vitally important to our correctness that we allow caching
+ // otherwise our in-memory representations will not be canonical and the db
+ // will load some. Or we could add things to collections as we index them.
+ cache: true,
+ cacheCost: 32,
+ schema: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["intCol", "NUMBER", "inum"],
+ // datePRTime is special and creates a Date object.
+ ["dateCol", "NUMBER", "datePRTime"],
+ ["strCol", "STRING", "str"],
+ ["notabilityCol", "NUMBER", "notability"],
+ ["textOne", "STRING", "text1"],
+ ["textTwo", "STRING", "text2"],
+ ],
+ indices: {
+ intCol: ["intCol"],
+ strCol: ["strCol"],
+ },
+ fulltextColumns: [
+ ["fulltextOne", "TEXT", "text1"],
+ ["fulltextTwo", "TEXT", "text2"],
+ ],
+ genericAttributes: true,
+ },
+ });
+
+ const EXT_NAME = "test";
+
+ // --- special (on-row) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "inum",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "intCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "date",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "dateCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_DATE,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "str",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "strCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ });
+
+ // --- fulltext attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text1",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextOne",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text2",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextTwo",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "fulltextAll",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: WidgetNoun.tableName + "Text",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+
+ // --- external (attribute-storage) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "singleIntAttr",
+ singular: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "multiIntAttr",
+ singular: false,
+ emptySetIsSignificant: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+});
+
+/* ===== Tests ===== */
+
+const ALPHABET = "abcdefghijklmnopqrstuvwxyz";
+add_task(async function test_lots_of_string_constraints() {
+ let stringConstraints = [];
+ for (let i = 0; i < 2049; i++) {
+ stringConstraints.push(
+ ALPHABET[Math.floor(i / (ALPHABET.length * 2)) % ALPHABET.length] +
+ ALPHABET[Math.floor(i / ALPHABET.length) % ALPHABET.length] +
+ ALPHABET[i % ALPHABET.length] +
+ // Throw in something that will explode if not quoted
+ // and use an uneven number of things so if we fail
+ // to quote it won't get quietly eaten.
+ "'\""
+ );
+ }
+
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.str.apply(query, stringConstraints);
+
+ await queryExpect(query, []);
+});
+
+/* === Query === */
+
+/**
+ * Use a counter so that each test can have its own unique value for intCol so
+ * that it can use that as a constraint. Otherwise we would need to purge
+ * between every test. That's not an unreasonable alternative, but this works.
+ * Every test should increment this before using it.
+ */
+var testUnique = 100;
+
+/**
+ * Widgets with multiIntAttr populated with one or more values.
+ */
+var nonSingularWidgets;
+/**
+ * Widgets with multiIntAttr unpopulated.
+ */
+var singularWidgets;
+
+add_task(async function setup_non_singular_values() {
+ testUnique++;
+ let origin = new Date("2007/01/01");
+ nonSingularWidgets = [
+ new Widget(testUnique, origin, "ns1", 0, "", ""),
+ new Widget(testUnique, origin, "ns2", 0, "", ""),
+ ];
+ singularWidgets = [
+ new Widget(testUnique, origin, "s1", 0, "", ""),
+ new Widget(testUnique, origin, "s2", 0, "", ""),
+ ];
+ nonSingularWidgets[0].multiIntAttr = [1, 2];
+ nonSingularWidgets[1].multiIntAttr = [3];
+ singularWidgets[0].multiIntAttr = [];
+ // And don't bother setting it on singularWidgets[1].
+
+ GenericIndexer.indexObjects(nonSingularWidgets.concat(singularWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+add_task(async function test_query_has_value_for_non_singular() {
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr();
+ await queryExpect(query, nonSingularWidgets);
+});
+
+/**
+ * We should find the one singular object where we set the multiIntAttr to an
+ * empty set. We don't find the one without the attribute since that's
+ * actually something different.
+ * We also want to test that re-indexing properly adds/removes the attribute
+ * so change the object and make sure everything happens correctly.
+ *
+ * @tests gloda.datastore.sqlgen.kConstraintIn.emptySet
+ * @tests gloda.query.test.kConstraintIn.emptySet
+ */
+add_task(async function test_empty_set_logic() {
+ // - Initial query based on the setup previously.
+ dump("Initial index case\n");
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [singularWidgets[0]]);
+
+ // - Make one of the non-singulars move to empty and move the guy who matched
+ // to no longer match.
+ dump("Incremental index case\n");
+ nonSingularWidgets[0].multiIntAttr = [];
+ singularWidgets[0].multiIntAttr = [4, 5];
+
+ GenericIndexer.indexObjects([nonSingularWidgets[0], singularWidgets[0]]);
+ await promiseGenericIndexerCallback;
+
+ // Reset promise;
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+
+ query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [nonSingularWidgets[0]]);
+
+ // Make sure that the query doesn't explode when it has to handle a case
+ // that's not supposed to match.
+ Assert.ok(!query.test(singularWidgets[0]));
+});
+
+/* === Search === */
+/*
+ * The conceit of our search is that more recent messages are better than older
+ * messages. But at the same time, we care about some messages more than
+ * others (in general), and we care about messages that match search terms
+ * more strongly too. So we introduce a general 'score' heuristic which we
+ * then apply to message timestamps to make them appear more recent. We
+ * then order by this 'date score' hybrid, which we dub "dascore". Such a
+ * flattening heuristic is over-simple, but believed to be sufficient to
+ * generally get us the messages we want. Post-processing based can then
+ * be more multi-dimensional and what not, but that is beyond the scope of
+ * this unit test.
+ */
+
+/**
+ * How much time boost should a 'score point' amount to? The authoritative,
+ * incontrivertible answer, across all time and space, is a week.
+ * Gloda and storage like to store things as PRTime and so we do it too,
+ * even though milliseconds are the actual granularity of JS Date instances.
+ */
+const SCORE_TIMESTAMP_FACTOR = 1000 * 1000 * 60 * 60 * 24 * 7;
+
+/**
+ * How many score points for each fulltext match?
+ */
+const SCORE_FOR_FULLTEXT_MATCH = 1;
+
+/**
+ * Roughly how many characters are in each offset match.
+ */
+const OFFSET_CHARS_PER_FULLTEXT_MATCH = 8;
+
+var fooWidgets = null;
+var barBazWidgets = null;
+
+add_task(async function setup_search_ranking_idiom() {
+ // --- Build some widgets for testing.
+ // Use inum to represent the expected result sequence
+ // Setup a base date.
+ let origin = new Date("2008/01/01");
+ let daymore = new Date("2008/01/02");
+ let monthmore = new Date("2008/02/01");
+ fooWidgets = [
+ // -- Setup the term "foo" to do frequency tests.
+ new Widget(5, origin, "", 0, "", "foo"),
+ new Widget(4, origin, "", 0, "", "foo foo"),
+ new Widget(3, origin, "", 0, "foo", "foo foo"),
+ new Widget(2, origin, "", 0, "foo foo", "foo foo"),
+ new Widget(1, origin, "", 0, "foo foo", "foo foo foo"),
+ new Widget(0, origin, "", 0, "foo foo foo", "foo foo foo"),
+ ];
+ barBazWidgets = [
+ // -- Setup score and matches to boost older messages over newer messages.
+ new Widget(7, origin, "", 0, "", "bar"), // score boost: 1 + date: 0
+ new Widget(6, daymore, "", 0, "", "bar"), // 1 + 0+
+ new Widget(5, origin, "", 1, "", "bar"), // 2 + 0
+ new Widget(4, daymore, "", 0, "bar", "bar"), // 2 + 0+
+ new Widget(3, origin, "", 1, "bar", "baz"), // 3 + 0
+ new Widget(2, monthmore, "", 0, "", "bar"), // 1 + 4
+ new Widget(1, origin, "", 0, "bar baz", "bar baz bar bar"), // 6 + 0
+ new Widget(0, origin, "", 1, "bar baz", "bar baz bar bar"), // 7 + 0
+ ];
+
+ GenericIndexer.indexObjects(fooWidgets.concat(barBazWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+// Add one because the last snippet shouldn't have a trailing space.
+const OFFSET_SCORE_SQL_SNIPPET =
+ "(((length(osets) + 1) / " +
+ OFFSET_CHARS_PER_FULLTEXT_MATCH +
+ ") * " +
+ SCORE_FOR_FULLTEXT_MATCH +
+ ")";
+
+const SCORE_SQL_SNIPPET = "(" + OFFSET_SCORE_SQL_SNIPPET + " + notabilityCol)";
+
+const DASCORE_SQL_SNIPPET =
+ "((" + SCORE_SQL_SNIPPET + " * " + SCORE_TIMESTAMP_FACTOR + ") + dateCol)";
+
+const WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL =
+ "SELECT ext_widget.*, offsets(ext_widgetText) AS osets " +
+ "FROM ext_widget, ext_widgetText WHERE ext_widgetText MATCH ?" +
+ " AND ext_widget.id == ext_widgetText.docid";
+
+/**
+ * Used by queryExpect to verify
+ */
+function verify_widget_order_and_stashing(
+ aZeroBasedIndex,
+ aWidget,
+ aCollection
+) {
+ Assert.equal(aZeroBasedIndex, aWidget.inum);
+ if (
+ !aCollection.stashedColumns[aWidget.id] ||
+ !aCollection.stashedColumns[aWidget.id].length
+ ) {
+ do_throw("no stashed information for widget: " + aWidget);
+ }
+}
+
+/**
+ * Test the fundamentals of the search ranking idiom we use elsewhere. This
+ * is primarily a simplified
+ */
+add_task(async function test_search_ranking_idiom_offsets() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7.
+ // dascore becomes 0-based column number 8.
+ outerWrapColumns: [DASCORE_SQL_SNIPPET + " AS dascore"],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8],
+ });
+ query.fulltextAll("foo");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ fooWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+add_task(async function test_search_ranking_idiom_score() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7
+ // dascore becomes 0-based column number 8
+ outerWrapColumns: [
+ DASCORE_SQL_SNIPPET + " AS dascore",
+ SCORE_SQL_SNIPPET + " AS dabore",
+ "dateCol",
+ ],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8, 9, 10],
+ });
+ query.fulltextAll("bar OR baz");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ barBazWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+/**
+ * Generic indexing mechanism; does nothing special, just uses
+ * Gloda.grokNounItem. Call GenericIndexer.indexNewObjects() to queue
+ * queue your objects for initial indexing.
+ */
+var GenericIndexer = {
+ _log: console.createInstance({
+ prefix: "gloda.test",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.test.loglevel",
+ }),
+ /* public interface */
+ name: "generic_indexer",
+ enable() {
+ this.enabled = true;
+ },
+ disable() {
+ this.enabled = false;
+ },
+ get workers() {
+ return [
+ [
+ "generic",
+ {
+ worker: this._worker_index_generic,
+ },
+ ],
+ ];
+ },
+ initialSweep() {},
+ /* mock interface */
+ enabled: false,
+ initialSweepCalled: false,
+ indexObjects(aObjects) {
+ indexingInProgress = true;
+ this._log.debug(
+ "enqueuing " +
+ aObjects.length +
+ " generic objects with id: " +
+ aObjects[0].NOUN_ID
+ );
+ GlodaIndexer.indexJob(new IndexingJob("generic", null, aObjects.concat()));
+ },
+ /* implementation */
+ *_worker_index_generic(aJob, aCallbackHandle) {
+ this._log.debug(
+ "Beginning indexing " + aJob.items.length + " generic items"
+ );
+ for (let item of aJob.items) {
+ this._log.debug("Indexing: " + item);
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ item,
+ {},
+ item.id === undefined,
+ item.id === undefined,
+ aCallbackHandle,
+ item.NOUN_DEF.cache
+ )
+ );
+ item._stash();
+ }
+
+ yield GlodaConstants.kWorkDone;
+ this._log.debug("Done indexing");
+ },
+};
+
+var indexingInProgress = false;
+var promiseGenericIndexerCallbackResolve;
+var promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+});
+function genericIndexerCallback(aStatus) {
+ // If indexingInProgress is false, we've received the synthetic
+ // notification, so ignore it.
+ if (indexingInProgress && aStatus == GlodaConstants.kIndexerIdle) {
+ indexingInProgress = false;
+ promiseGenericIndexerCallbackResolve();
+ }
+}
+
+/**
+ * Simple test object.
+ *
+ * Has some tricks for gloda indexing to deal with gloda's general belief that
+ * things are immutable. When we get indexed we stash all of our attributes
+ * at that time in _indexStash. Then when we get cloned we propagate our
+ * current attributes over to the cloned object and restore _indexStash. This
+ * sets things up the way gloda expects them as long as we never de-persist
+ * from the db.
+ */
+function Widget(inum, date, str, notability, text1, text2) {
+ this._id = undefined;
+ this._inum = inum;
+ this._date = date;
+ this._str = str;
+ this._notability = notability;
+ this._text1 = text1;
+ this._text2 = text2;
+
+ this._indexStash = null;
+ this._restoreStash = null;
+}
+Widget.prototype = {
+ _clone() {
+ let clonus = new Widget(
+ this._inum,
+ this._date,
+ this._str,
+ this._notability,
+ this._text1,
+ this._text2
+ );
+ clonus._id = this._id;
+ clonus._iAmAClone = true;
+
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key.startsWith("_")) {
+ continue;
+ }
+ clonus[key] = value;
+ if (key in this._indexStash) {
+ this[key] = this._indexStash[key];
+ }
+ }
+
+ return clonus;
+ },
+ _stash() {
+ this._indexStash = {};
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key[0].startsWith("_")) {
+ continue;
+ }
+ this._indexStash[key] = value;
+ }
+ },
+
+ get id() {
+ return this._id;
+ },
+ set id(aVal) {
+ this._id = aVal;
+ },
+
+ // Gloda's attribute idiom demands that row attributes be prefixed with a '_'
+ // (Because Gloda.grokNounItem detects attributes by just walking.). This
+ // could be resolved by having the special attributes moot these dudes, but
+ // that's not how things are right now.
+ get inum() {
+ return this._inum;
+ },
+ set inum(aVal) {
+ this._inum = aVal;
+ },
+ get date() {
+ return this._date;
+ },
+ set date(aVal) {
+ this._date = aVal;
+ },
+
+ get datePRTime() {
+ return this._date.valueOf() * 1000;
+ },
+ // We need a special setter to convert back from PRTime to an actual
+ // date object.
+ set datePRTime(aVal) {
+ this._date = new Date(aVal / 1000);
+ },
+
+ get str() {
+ return this._str;
+ },
+ set str(aVal) {
+ this._str = aVal;
+ },
+ get notability() {
+ return this._notability;
+ },
+ set notability(aVal) {
+ this._notability = aVal;
+ },
+ get text1() {
+ return this._text1;
+ },
+ set text1(aVal) {
+ this._text1 = aVal;
+ },
+ get text2() {
+ return this._text2;
+ },
+ set text2(aVal) {
+ this._text2 = aVal;
+ },
+
+ toString() {
+ return "" + this.id;
+ },
+};
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
new file mode 100644
index 0000000000..93b4a9ec34
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
@@ -0,0 +1,37 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were offline before they were
+ * indexed.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
new file mode 100644
index 0000000000..368252a5e6
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..0788c15ff7
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were indexed, then made available
+ * offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
new file mode 100644
index 0000000000..c88fe1aa4e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
new file mode 100644
index 0000000000..efe489974e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
@@ -0,0 +1,894 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that S/MIME messages are properly displayed and that the MimeMessage
+ * representation is correct.
+ */
+
+var { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+function initNSS() {
+ // Copy the NSS database files over.
+ let profile = FileUtils.getDir("ProfD", []);
+ let files = ["cert9.db", "key4.db"];
+ let directory = do_get_file("../../../../data/db-tinderbox-invalid");
+ for (let f of files) {
+ let keydb = directory.clone();
+ keydb.append(f);
+ keydb.copyTo(profile, f);
+ }
+
+ // Ensure NSS is initialized.
+ Cc["@mozilla.org/psm;1"].getService(Ci.nsISupports);
+}
+
+add_setup(async function () {
+ initNSS();
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+add_task(async function test_smime_mimemsg() {
+ let msg = msgGen.makeEncryptedSMimeMessage({
+ from: ["Tinderbox", "tinderbox@foo.invalid"],
+ to: [["Tinderbox", "tinderbox@foo.invalid"]],
+ subject: "Albertine disparue (La Fugitive)",
+ body: { body: encrypted_blurb },
+ });
+ let synSet = new SyntheticMessageSet([msg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ // Make sure by default, MimeMessages do not include encrypted parts
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can't "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 0);
+ // Make sure we can't see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 0);
+ promiseResolve();
+ },
+ true,
+ {}
+ );
+
+ await promise;
+
+ // Reset promise.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Now what about we specifically ask to "see" the encrypted parts?
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts[0].parts.length, 2);
+ // Make sure we can see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 1);
+ Assert.equal(aMimeMsg.allUserAttachments[0].contentType, "image/jpeg");
+ promiseResolve();
+ // Extra little bit of testing
+ },
+ true,
+ {
+ examineEncryptedParts: true,
+ }
+ );
+ await promise;
+});
+
+var encrypted_blurb =
+ "MIAGCSqGSIb3DQEHA6CAMIACAQAxgf8wgfwCAQAwZTBgMQswCQYDVQQGEwJTVzETMBEGA1UE\n" +
+ "CBMKVGVzdCBTdGF0ZTERMA8GA1UEBxMIVGVzdCBMb2MxETAPBgNVBAoTCFRlc3QgT3JnMRYw\n" +
+ "FAYDVQQDEw1TTUlNRSBUZXN0IENBAgEFMA0GCSqGSIb3DQEBAQUABIGAJ6gUwBMmtiIIF4ii\n" +
+ "SzkMP5vh6kCztLuF7yy/To27ZUlNOjBZZRuiwcQHiZx0aZXVhtAZcLgQKRcDwwGGd0xGvBIW\n" +
+ "dHO/gJlVX0frePMALZx/NIUtbN1cjtwDAezcTmTshiosYmlzzpPnTkgPDNDezxbN4bdBfWRu\n" +
+ "vA7aVTWGn/YwgAYJKoZIhvcNAQcBMBQGCCqGSIb3DQMHBAgV77BzGUrfiqCABIIgAGLhaWnP\n" +
+ "VOgC/TGjXhAk+kjv2g4Oi8qJIJ9CWXGnBjqMAAkTgUBspqc6rxY23gIrnYbLxX3Ik+YM9je0\n" +
+ "XP/ECiY44C8lGTKIOYAE5S58w9HCrtHn3tWid8h9Yc4TJrlJ8DRv0AnpOIsob1oqkDGuIjSt\n" +
+ "sKkr2tR8t632ARoEqyWdoHIVdKVkCE7gIICHn03e/0e5Aye4dLWttTNcCwqClXR9W6QsNPuA\n" +
+ "ZWvxBCBzN8SmqkdJilFFbFusup2ON69oFTFpX8CzaUYoXI6LgxuX435fWsXJUfDI077NWQrB\n" +
+ "LbnqM6UAoYkLPYRL+hTtYE4Z8o8sU/3n5yaq6WtCRUWz+ukQWKfDq2MDWqTVI12CCy505npv\n" +
+ "2bvNUxZHInfmSzbdmTty2aaSWnuGzWI8jnA/LdPS+0ly8fkZV9tU5n46uAYOFzcVGfA94iIr\n" +
+ "8+ftcVSSLCu5qpjOdYi1iVg/sR2sjhq3gcS+CxOGjdR1s+UWmWdBnulQ0yks7/PTjlztGVvV\n" +
+ "PYkmJQ/1io3whu0UPGdUOINTFKyfca8OHnPtkAqsTBqqxnEaXVsaD4QI859u7ZiKfUL08vC2\n" +
+ "cmwHTN7iVGyMe9IfaKxXPDi3WWbOi5Aafc5KDeX3sgzC01LoIaWqTrm756GEj7dJ9vsKzlxO\n" +
+ "Xfz95oVq1/pDwUcPtTtDLWPtQHRmBl711qzVvUozT9p3GCmvzDHETlMQa45/m5jp4jEHlA1j\n" +
+ "GFX/Y0G8Y5Zziv9JD2sYc+78H5f7IMrHibKRlnsIuCvcxazUB0CfiUO5Q4Xe82bSS09C1IvJ\n" +
+ "/I79HN0KNGN4es+x/0eyIlYD3dcm3uqDpsv0ghMEPBKogqDLMzZUwW3bQxn8bMqB/zL+6hLm\n" +
+ "1197EESFEYrs6yzVnuap+vnfaqk+vprwe2Kasl1vIl1h3K+PZvsjdQHqX1WfZRWQ41eKHX/M\n" +
+ "cR5Kn8fhi/4ddt8IK2i+OeCbkRsRnBIhGpcP2pkVaH0EtZ45nbxbs1qlFbWC4nWAJ3UlmnSe\n" +
+ "eO5QOErFgwJX9W1hUWiAgyDqMWcdWLYPQJ4Gw9yqwrEP6baILArF1oZyc9XgSBzZn/7kTw6h\n" +
+ "TeCSKu0QCK1jQXUKbftl76ftFh6L/mEPWG8CZP02GnDQx5eEoUhEIS4tf3Ltc/8ey6k62R8C\n" +
+ "gMLsUdOusI61w18bNW0ffVc+N+C8j8uWbc8w4dL4DHnfz/oFUjuk0AlpZE8ii7GNqszBgirq\n" +
+ "wQ3WdXwpD4Q/j/hru040ONElMJr7HO6ipL1oP7nbIR7JHoJmht4G39pXJ86XfJmtzMuu0MxC\n" +
+ "UTcLt1Sz87HzrMO9eWdApGo6qvwSwapAQC48nXY/WDRHgxjji6EQLwO0wF4Rlwlo4SsW3nwm\n" +
+ "NtOBsjKsEQ6/WILvRAziAPlp7+v13QfLrrzmnWFwKE6h9KQ/wpLL9/TAoy76FHoRvZgT3x20\n" +
+ "Vo9Fe7nZbc6qEc9/DbwShxWMbsU8vlzrxm4pgOC7I4jftUgolQ+NE78sQHH4XefHDKXWxRvx\n" +
+ "H8HVU/TPsj+2cEHM2WlVOXlYdtlobx20DSiOvhWdkW45Zw+9SaVkGw/IhCVkLi0UKuQV1gou\n" +
+ "lA4FeTVs0WY7jUdZB6c3DYgu4o5gxVvpRKOmwNp7rVIjsGuAjC91FN3DGQYlsyItLlZd8Yli\n" +
+ "FqGL6B2HTehmOGwtc6pfzbUJj9X9biZlQBigS3waDC0ei7HUq5M0ztyZv71dg+ZA39F0ZlVD\n" +
+ "CszjUNp847Lvt91JVmQdH0HTPu7Qfb/l3qX6LARTCgFfLGzjdcthzxyWEU/oCurUj9E1MwxX\n" +
+ "pfr8AX9/ajgCCS9bBvV0luYe/+0xqrzbnZw3m3ljfpxx5k78HFVuYhXt4iEsgtbXhJuLr/EJ\n" +
+ "B+Cu2YaQhXrvtyfi4EkOLoOcIzu5hs8V4hPebDbhDQKDcF3EhzYZ0k2YlfXnUx2Uk1Xw/x7n\n" +
+ "bLKVIpw0xSnVWdj3XeHLwEwh+T6/uthhi99iiXNQikxwbrEU4Y5IVAjh/JfKywIgPcXnaDqR\n" +
+ "1anwP8a+QQcD3U9neOvIZVx4fA/Ide5svJEkJ6gccel7kMAGD3/R14VfasqjBc0XhoEZT4PN\n" +
+ "xuW8fZIKPkxU4KEgM2VlzB9ZgTTcfUbUMmaCWioQEwfF7J2PhmIl6pBUiBFUdPv9+TnE4riG\n" +
+ "Cm5myUQEap9SFIjWRbLidy4+ZOK1rA34zNT4CnknLWFruygn8EzpgQVlru5no+qppchbOjyH\n" +
+ "O+Yz9VGs+SjoQlMl1HjtM2GQeNizP7AsLLd/R+jQ0Al4+KmM0Z8obTtYKUjG5rlwtNzcxyjv\n" +
+ "tvEhXeWjD4xGkWN8Xhf7VQX2dM7APMIiyyMNaNDVZvWxU9DpJjt4F+mhQFk4Yk5ao+Bs23MV\n" +
+ "XI4b0GanjnGzu5bHMUngkHLISMNGcDicT5JzfVYMbiM2pDakaaZWQ/ztQW5gWzjYFpj/Yffg\n" +
+ "ThvYXUi71gTZqHZiybqu6UI4iBOXc3mXbKzN3XwBSfCODFHJj5A9Lzh4pVBrndC7APfgn+Cm\n" +
+ "6ga7DmPZI1igomTOiIcF5+i7AOW/8hnv9hlsxN3D7mrIiJuRAkCD56kGBkCEMnZ1EA5nk49+\n" +
+ "k1s+XKPKPskxz8XrD2vhPL9ToSXQl/i+b+bh7jBIi+2KJh5XoiM9CCqP3B7bjxwx9qvtq7nD\n" +
+ "/+Zn4B2qCxxGI5d92mV4d1KGanbzHSZh1PJyQHrRcMMdoHMEVl1AW+YPffkwQrnRef1AZm9D\n" +
+ "ZB8B5LJvvjyNXsVGicPYM+RZwthk9Eko0W17u8fC3I/TST8c+kNqJihNhJW3y70plmSe/na4\n" +
+ "G4XeSHdbHsOWHq8CkRW83jk+2G0BE+1Y7YQt9jLOgVlIm6qYr1ov629575zV3ebyxXtkQY0g\n" +
+ "mjoal1nGJCrCp7GAl/c5KMK66T03RXEY+sBZZ2sbv6FiB6+xHreUI7k+JCUJ/uoW6c/8ithM\n" +
+ "L0gMRpxZrhksRcaBDXa8Mp4lyrqf3QWiowznSIyKPm7i0FjGGul/SESz7cKe/8RjJbKnx4TP\n" +
+ "dZ5G/+dhOZwXoisiGSj4CdXq6KKY62C1Pfvnf9elYJMo7GT8+6REYXrCQEoTIAw9zkQGD/FJ\n" +
+ "L6PvXunheXSHY454jau9JqqQdYaroYVrIHD9AINJPKluaToyT62oOL2CcG3dB0Yw1SZfUASa\n" +
+ "P36CevQjjs9GhLeFrqXXYx9ItqbYZKMiHDarjf3KgOzRhFS97n4OaZgn7Yc/tOvtXTMlYSAy\n" +
+ "M4pw2vISXcuaSl6mQzbllYuWk2sqt+rpt+/l0Hd/TfLVzp4mMq84cKerXSL271oc/2Sary/l\n" +
+ "wRHj50Wz0gIxjyfg1FgegnDmaeDCuMwSTFjrlUaV7FSKPZqaVr4LBQbyL5fsd2VrO4mQfmdO\n" +
+ "rwd7+CojtVraeyrNcwC6inBoPOa07A1aYB+bGKhwn/7n6YJEdX8AtTtir1u4r9rIPeUyv+nA\n" +
+ "QpPkPie5R481ZEgApFhyvFy6+etmHBPEpr5PguDzX1Una8sOBfBxDMVCLdn6lHA/ebDCDrLn\n" +
+ "JobzOLmW8G8cXwTmgxr1r5KbvoUaWfSZtJYL6M3b4Ix73GfAhbH30eAbgRya+IHrTx2Nhy0q\n" +
+ "pU1mgbM1aV4OhZ3wZXga8tpWnohVcTIXUfQhBYwJXCxVj6lR6mVd+4WKZT5Tz1twrYxI1ZGD\n" +
+ "HRIatLWeshiULj2KNVtTkc0w4HqIw6gVEwYSojzduuhrtXZMsBVImyV9151ZFL/oDgMQEOEm\n" +
+ "qIill8STDIz2bFF+FzkLLW+l5TeJ9rS4mrO1ffKdVWWL/PFlBvP39PHTkSv7+MYKhobbzccA\n" +
+ "ydjzdauQVn28lXVIMpF9UWmMeyWZlogGNECxb7NAPwvzONGvak//dBKEsqnGquNfNHfFJoMZ\n" +
+ "S5Ts8Br8rc0LW0zyLpLls3p+AnyJQQArteqraSodGk6X18BIbJc2avhbzGJnegacFhTr+e6a\n" +
+ "7niVgn1/P9PNo/SfMYZLWTIUKLkHq9GDhuniHqGM7tcdujI+Orit/uLVYaHDEMVKUDvJuJGj\n" +
+ "z+EybiUvIvpWjY7nWRjmtwTzR8JFUnltTGoLbcnA0Fmtu3rQCOuECYbUvH2bbtJBjatmA38+\n" +
+ "AotExnchuqDI13HVm9OY2CjyD4cJonvmjpz60xwFnr3HGp8pZNNFmvY2udGKUYhNF1X8mb9c\n" +
+ "vgs8SiT3Lf1HNXfayy+F+kLkXqBNZLnGfRHWKOAWSEj8dXiJ0ScLmAvoJTbC18s3yYoK3o2X\n" +
+ "z1sY+RERhyJ3UmFHuQ5q75w2mKz4l0kzHA6bfwHvLbTps7sNkkhT403KU8RbxNmsQDgFMCfw\n" +
+ "BaJnTNyQFJTVgljTEnFsaUAhEOgyoCAFvwe7eKTGO2NqqX9hrWcEoXSa6FgnLQvT49SZHrYC\n" +
+ "poVRVZdJ6sqnjSy7OxT+WbuQufc44TEYeGuHjH444yS7ZCMVyjNaQDRvWPYuXmFp8Anw5lO+\n" +
+ "xLb+LMEgeFKcVMjtnYLZTTgY6UtqMr18BzwHKft6+ATzyUc1zsHv9Ap7mmdRakLFa+8QbXvc\n" +
+ "+AfVbOsmcY8Bmin0nKIL9nfOUPahEMQBN1NN3dOWM/5qa3REk1Cx3rIaB/jsU9f9zUztg9MV\n" +
+ "kvplfOVYoxUsBoAhCjjzPmCgVbp6Gnr/Ebd2vFvDsokp0yHw7Cgb0mBznsntRnkb2cEB0tvw\n" +
+ "fBhK7YeETx8W3A+PKSkn1AwvoG6WeiM6CFh2yp/VYjabwfrTrfVbXpk4epzCLU8WTyPvuxv3\n" +
+ "DDH4s/Zl0CMIqL2oydtEN11ARdwXi3AImYsix/cWLzV5r1UN6NN0B0y9zmT5BrCElrJKJxYb\n" +
+ "NmafkxyrCFGnjWFIRzw4s/GGm/dBx0DGBizioedTttqjnF0rfF2pM/MVf/udCdd6uQyYlGZz\n" +
+ "AxW6ZKX0TPj7bvPyRgzqXBXTfd23kYVH/lvHEsKxnMb2F9A9LYun63jPFSiHXCahU4WcuzZK\n" +
+ "aH6h+cnY3xJn8+P2e4m4pTDMHdsgBQs4upMTxrxhH01MnUgbKz6IA2KV9y8H24PzzqJawh02\n" +
+ "xhdMHVuV396LvvjICg4OWzvFdEFdWDEZ4ph4nYTHN62TsQUwa8t3MBbKeW4mlIQXqGNAhfN6\n" +
+ "UR8nqf4H56oAMTvsvNS8EoCgcu/L9C5TrDnldYf3Zhyx51A0ufvpSNR6onWOKzVF/qwtyn/C\n" +
+ "y5l9X4c/0uCbff2nkYUqVAkfgD/hdEXiO0kdku6ptnWbNUPU76pQDQ5vD6sfe/8ZsRF68Eay\n" +
+ "XhvbZYmXCVn7azZeEps3EiOKCL4cazE508fLyjC/fNc1WMdyIve1lhXGI8uJ7/lB6tJ6CucL\n" +
+ "WT4OX6kHZh4I7mXy2+lezAELmrP3eU7YduHemlXqqlOrnw8pwGEVCsxGmCv6DdJNehk3wCJv\n" +
+ "GcdygTynL5d5fGe1mP2zxZjW9kscNX1nwf1+sz6chZ3jXpiBTRXICh66vk3UbyS3eZk8NKYL\n" +
+ "dY+/cN1O4jtipgHGq8EPUefBVRH+DmjTqFA05qHAaV/fZ53xLWm8YVTI/DS9fbbPZprOBeib\n" +
+ "GoMdA+a0Sqh6RdIWlaFXYYJUspp+rI1FlOBZvgy8Z5K5oGajE6RM06EeB7DPtI1/K+jRXa5O\n" +
+ "YXacRu/lgDlZvevVsSj27Oy6A+rbfo5oafhMMCLArtGlY4ENMk+u/ztvoxPlos9vCUV6NSFj\n" +
+ "znenH7iv5TUvv5gm4n1NCSZ9Db+zW5DQS8Gm5iGUsRj6VX5hZ1pMl2df43B6I5BwCKnq2eYn\n" +
+ "mpDzvUXUku9C/RkTxf/xfaIG30+whnY9Id4MWzWNNIJicvEdJkDgE5iRfwsVntbQYGwctmxs\n" +
+ "209aIk/KjeGWPOyg6TFYF5ZJMe/0XVSr2Bci3cj7GWeFc2FrFB/5nfExErrT4+e+9GMCyXcz\n" +
+ "bIbj45WCoA3Lgo2vh7bZV7xy6iXv358kl7bahH2/IvjUPGn3EKQY8ApoTNrRXvKAt7P4Q7zM\n" +
+ "HrRSQ+iDYZ3BCmoWfXMzRmRJbAzvC1akeduykIwQkL8QP7z7n33ntPlP2n1rDLI+LoDSOC3o\n" +
+ "bJzafHOOAH2J/MWOI61Tj7+FWyGIPihUf4rZqFXnoZkBpy/fRb/+qmSmIZ3YPiDdwICnCerU\n" +
+ "0BLeaWRD4aie51FyZ5fR+tXmTu7JDC+GRKp4EARokJgL4CTnuSGY9TaYKsoKrwST/9kKQrlM\n" +
+ "ISOGV8yTnLTzhs01EijkNEJZkJwg7QYxsJ8x9zLDL44fCL+KALLpkHEmUQdkLwy5DQV97qL+\n" +
+ "/6bSyxgLBiEHRJQns3HHGlUvNt2naUPCukRO7ieIlrPPSaL199yPcgjmFIBiXptTm9fZJRzE\n" +
+ "rkwkIeGlXzxhSpLHApOnDZaeNlVE+5NyNHOxbbBxfrc5Xmg68ZESXwxKeZAF4GM11OBLzj/f\n" +
+ "r6iGBayidg/uYZ5D0CCSyTDT1Y5RKFFe1DieQey1bj9oIuE+jo9coYLc7XUK8cnlOqLRl9Kt\n" +
+ "kcB4t5JAqob/ZttXhHnZ8J3QUpprXYYQ9c4NrYf4KEy1+femS8uGnuBZgUM1Tun5EjSeKxMB\n" +
+ "cY8gGkXcsuLzRpAtwifgHM2R6dgOq7g2nwB4wQYiILSqAsSH0QKNb+tS3NKyfNsg1tJK1PSI\n" +
+ "vOjRQCkzaII1IureIWrUikWCbQWqTDW/PazEr3HG9+BMs1JMUbEviA6ljNZz478Xbc+mA9yI\n" +
+ "RsqILUos/MCjKEhYn/qq+BsKtKmSC0nsZ3KXQcLbq7O/RZU85Dr+N+wyhieT8vu+4hb0mqrn\n" +
+ "FZwyMQt2WpnqaNk5tw92/Gw/Ad5q6ACt3PZiG4GrG3NNaKxadwkN9POzyN4zn+7gq3cyF/uN\n" +
+ "imAv6aVHaiD002PMWHIMKUOFwmS9AV3iskmW+swH9UyLPnWDejvUs8jW6mmeD3TOR8sRQv8q\n" +
+ "KwcvrscKtEXmBvFDYh3UcIcu/j5wb7WLwhNi3XOpGHEgg2MjDf5ti0kkrR68VEc+XBvnAYV7\n" +
+ "5EIrxI1qfkNcgXKRdOg6msLv6a9QSgJunwjACXM7Zv96MHMEETgkNr7DO+woHjWcPl4AYV4k\n" +
+ "HgPGUISEGUQr6/c1penqLiExW+iVj8Y5uLj3c/PNQLMhnttckHWVCz6wlqxmvoUQHgEl3Qd5\n" +
+ "pODBWHyC2FZku+Xuyu2o+GHxj10hYfsEl/qoDqqvW4TGlTz16MQrSV3SMs/i6SHmq5eiuhMf\n" +
+ "Hj6nkt3hljgHA1YawbFL58hj4x2DAyeYFfLY1YEBMH3K6JLxUdD0c02lecUDOqUxBrp+/qp2\n" +
+ "4KIqFLZ3+z7Wzx8WI0DzKYyZK79+VV7+Imv+DpOTaLFLu7nymvPeOgbzTsrJbJQo560EXpLl\n" +
+ "wID5Z36x9P/A54q0i/mhTzK/RtYYhqgaV4+GmP7XxA58zulNAJIVcsmgXKiD1GpmOR8c8EDm\n" +
+ "kMGEcrACXBOkpEJHp07J5vD8gfWublIG3MzeoTjeBhUJM7G9H5r6tNHdB4Ak+TMVfjcN0vbZ\n" +
+ "UtVCiQJqR8USTwNCumY3EtcMiXGVM3CRTTLai+IZVmLqED7SL3kpOdFcthMk5K0L0j1Ootan\n" +
+ "wFE2QhcmMVP6x8kH9cJVhbhLHWYbO/vg1AcLE7YOPRD3DVId+3dTZo0JVDC6RQKpOuUBolbH\n" +
+ "P8GpxBg4IcKqyMAA/1+FzaLicvXPzk7rKFkXjL5cgervdWF8Xx6gaihVXRfR7AiWOy38I0GH\n" +
+ "RJI8WC8NruvGHN71Oi0VKiyGD8o4tlGZyQoeRU02Z7cM1X493wCEVUuBEXYI5ax7wIcl25AD\n" +
+ "+WAv2iBZ3gHNNyCSJZM/Tqk2/2B35pfotVMgs67fnUy9tpm3n9nOdm/FgReSu3CBM3JZmYtf\n" +
+ "tOfqq3Xpu/3WnhWjkqDVmgaQ42PWtxYU32ah3M+EHHhkYSIG/csaSkVlyGYul3BsfeZ4jCvK\n" +
+ "MvVFFD2Kzkyt8zKKQlA7Zzyf900aFNhU5SkX70s94Bk3WXHXD5DRQRYHWmruCFVkFJXyaiZj\n" +
+ "qWBVKP3Gv6OXSc9IRimu6p0l0TaDxxjNoPskg6dXHTV5uTcgOKfRohgudjQC20VmamOp8IGd\n" +
+ "1muj9L82CT7elonqA0E6HFZfJqJIfxq/wSFVG7wiB9Gwjoj1xgB7bSzbglpOV/ReBPcv1ivl\n" +
+ "KsJmK9nlmfS4Y9MPWuctSROg9QVEOWq/XowOm6+Y4bpKpDhmmpsUpMsDtOJnrvSWJwcwWRRB\n" +
+ "+2Z3H6kIEUXDq1cjLsrBIWRTwb//h0Sbb2Kb1cUHnQQAjlhkSlOpaEMTzQb7GMojunx8Yeb9\n" +
+ "ff/1l4/1tqVSxX61AJuJyywGyk9AIsDIm1WW6P+P5AVRsy5xu61qrL60GHlMxtfm7ZSLAeR7\n" +
+ "GvBOgDitOE+llhzZSjwdaESxSAvnhFfM5TOCSj5YNBfLaI8bVxn4Br342GV7nufFqOLkp4rr\n" +
+ "3pcNbQvsb+k7kkdyNMNtOQfG/Ojf8YTGoanvDYrtB/0Euu0TXR86ljXPIJOT/4nhue4149SO\n" +
+ "9lboxBH6iaP8AGxn+2/pzCbcOXjDzcD/i1DoQXVcwfniiMf6S+CHb38Os3KTO49YsMYjrDPP\n" +
+ "9L2IurXfUHONlljI1T6GFV1RfRCBfO5XklduPaR4+4B0JLhU6+UKl9vdTphhwrYTuJ8I3wkD\n" +
+ "6DO4hvktTjl/IPLyYPU1w48W3cZ++P/wJNtIYl5I/ZSNfAzefc8SQh7kcnVnDoocElfWHfg6\n" +
+ "oZL0MSe088uFDAxaJTLxDaDIbzjBkwaiRYSBQ+SQVBmUlP1EjLbrwdayi2IidFj2Mr6nv4KZ\n" +
+ "4HUlmmVMSvg4K2Iv5NGgAmAjYngYSveCdDkYXQgOXldxnzVTzRRP+nEAtFepLx6TZjSjawqL\n" +
+ "nZ+N0/BCJ5UkldplLALg+5kdHCLwcdkz+H4YsB2sLE8zULM9JJW88DGBKXKue4J8GkhJlY9i\n" +
+ "1y1pdTW6mvC0J0oMAe2ULkrakIdyGgNghwjnDMaf85niB1A4+qjN0K3uGGjRyWddJH/Pnv+Z\n" +
+ "7A9dmkRNnYMFEkyFYTkbfmE2fHr4MY+YwlwjE7f69LmKEcai/is9L/Lqv5Onb8W6N06l54s1\n" +
+ "iYKzFFqo/gc0UJsiBhPmSKMNvoeoUpi0yUgXDPtw5+9HD/hqFSXqWGh2uR1vOUi85k0f1eOe\n" +
+ "zzkIBzcL3on0y03D74cB1QtjBAS2lwTXzjyEbitB4AxHyp5L13tPJs4l2uo8JXpL8u0HmJVR\n" +
+ "w6AOL/rV6elTYkuxnq5aOq8WQcm+1cYY4fPdT7ZRwVy0ZfHpN6VsqmMNIoAUyRgy86sYU2E+\n" +
+ "UMTeKZzD1+T2LbbV38AQh2kaLlSNuNkoFIjFZZvth/vubqIjHlmsw2MeZqXZIs3dBeA/1GL8\n" +
+ "s0k5ix2Obdy1t+w1e0d+y/ei1IzsxHRdBvrn1YDqdFw4xdUreJ3FSTrsTePlSWVjJXKGm13h\n" +
+ "hFjuCqELnR05+au1dFSbiAlbMPM6W/cebi+/0GmvIvfRaqrbvRJoUWxfgaFcanrlin7a11Pb\n" +
+ "6pFV47mIKHxWQiYq0z3kq+QQ1YqXvxMdM7eIg0PEOygB4Wp2FwIG0ZcEFfdq5CPveormJ/EZ\n" +
+ "NOFrIHZXkFl8fT4x8LFLWNmlQwoVqeQGOs51CYQF7YPXjFx64mV0RXz/umA5/Un6fHjKS5Yq\n" +
+ "7ZIhx+JPX4+s3RrxbUjbq4hCCa2MSBBQONhdmXtKKIf+TNvnimm9je5bt3Nu79A2OYbAzvb3\n" +
+ "cOEcQqieXzqj358oIxwd7BL2xLEMbe2Z+1bDXK+YwyJpXNF0Ech6Vbh4PSLHpW1jCoIn5HCP\n" +
+ "4K28TdrXOwwKkac1WjaQCw0RztZEatpJW1PyhQ0n8xcegTqT+6nyifeTbEKuUYXhCaJa0spg\n" +
+ "Xx1yv6G+ieBg5owSZ3DQSQ4GmaZ4GBgFePkqroihA4C1bs2FbrRWRFVRWAAEZYdcgHOyBWNG\n" +
+ "KLGntWv2VWf7yid8+oSQLExsYHBGdYMTJCbU53fuAnJYE4DJJ15Vztj0TO74KqKrkTtxfog7\n" +
+ "5CdFia/OvcCruLblCFLcrRyhsW3YKUxHmgpAPoSN4/46Bz+ob+CCkd6RJzwjnhfIgbXqKRLE\n" +
+ "8KfsCqksHp1p3hEgvm3iDuqHfBP/7O/T5V753HBhuAzFZlaOzQsjBfzK+BMXP3zp+DEpGwUL\n" +
+ "Pd/DG0fa6odMTqPs/TUblpHeANF88+XRkgB/hucv+K7h13bfRRYPMM4zephlWBzBDIaoazv9\n" +
+ "SvRyy21B4vRTXrwbTkSZXTtEFCb2027l+ycCayD9XXCLQUhjSrsI8SB+9qC/i827HcLF7X20\n" +
+ "L/8Na6qnRTinmwkBUDk+o6APUlR6sDpX+uf1bOyiV6oF0wy59+kXi9oCjupzPBOatSM8ka47\n" +
+ "6tcHJ6na0wJ+Z7EjcaOqy26OYcPT2m3wvquK00JLHCaTDisK3cQ9178FxZmpD8i09AsLVWuz\n" +
+ "r/dmucYAxjKMQzV6+q94S42EThtTbw3LJURF/8QNLk8AZKwVuaw7zz5+8F/bc2qtrUr762t1\n" +
+ "KN+9Ul8Kc2N5IxAS+klFXPfA1isfvbm88737wa3Tk1N54QIvDXVLBJg4OzvjkQAPai9lPqUK\n" +
+ "Tj3LrtYGPDTaRyRXpsH0ehIZ66TRobSaBBrL4VeopHzoWOutlTLlSSjZ1Grn6SFGdH/i998Q\n" +
+ "64ucbkyejUbFT6SgOzDN3rnl9ppqnDPOCk60WAeosAJdf4tndoYGGQQnQpsBh8uLCkyyu4z2\n" +
+ "di/om5c1yNSJsv6j2jQQiPsMX+ef+27mdAj9pUXQSRnl3oZRvQMQ7VmKsa8NBByU05MwSvOn\n" +
+ "vuEKgPq5CL+2Spnjcll+wWQsDF6OZMb2cM7PmLTGTI9LKnPnDPEhz4borQfch3jHR/EVtsmg\n" +
+ "BX6xmoD7gQdXPWBFTvwT7ljRJ4v5O0v/4p56rTneZZwBBIIgAOfncYVNGur0g1ZaFAujgzEG\n" +
+ "/PLpgIqn2rjHU+zmUuf28MvHdWxVNgSar7qMRp67M6UM6RExfuv1vzWw+ogYWeiQOYMYcBqP\n" +
+ "4p1Dm0ZxwWaqgllea7MCmniOrEGNizUMlvYIJoYcKJFVHz4Jbxy9pzGVL58Kbmwa1ZDwSXqC\n" +
+ "YHcVLer9yxoZpuDnIhRXHUnDx6Iw6QDiKpMQqJcFKf0YJTUrhN2M17kUaOD7TY0zHhDznFHY\n" +
+ "Oe0hlEu1y/FEwNxueg8tpjGVivXTX5E/81RMpUHKenlM8WbA7GQepFiIrcZTsnZ9jBCXLPGu\n" +
+ "CI00YwbNnzV/EsYsHAcvwIQBlBDVjSjkxoBaBmDsVpLawCh/SGEAl99Fe1/08OKHceGPDxko\n" +
+ "wZ3Sge2vC2ydyu4+LVnypr29R3sv53cnApKlt0uplnF4rbpBSbTCgH6IR0Aq/aYQUW032HtX\n" +
+ "wuPhxgIp7Yf5mi5rd3MwyLhsTQ7dFhXZT1kecAXAMo2x2BAo98yJfmvXM90hIwlXHwp11ped\n" +
+ "MTzc47I5XC+dR3YTHbxUKC7RCo2OjiLsT6UocM0vqyxkkJrUWHuC9vGHNEA3wmJuj/Tncr+r\n" +
+ "/bLYzx9TWcN0st02kCC4wUjQJuNlCZLjmnCrr6Y8Yrm592pv3ztcVD+cbgjwptpxN4OXTreI\n" +
+ "7Py1P0BRRC7N3I+W8OVsszHpjGsEqxFDdyRL7VtUWMR85c1cJKvmYWeSSVX0YlNsbMtVledB\n" +
+ "ViJg/2Qa6vU5lB3WXIyXOuJVEo9B6ua60Fg+HlKDHEl/5bOqOzW5pgTz2BclmAb+NvhEdl6a\n" +
+ "SzNSHFrCqmmG6Nb9DCT9wcvvs74PN2QFHm1vxPymLoEQYZ1o0oI9puayLFpMykIK4N8Kinp5\n" +
+ "iUWxh3t+V3L/yz6jHXiL2pR3UYBrfzRb+bOumTD5ENLil/3P8BngPSCvYAfRMOrBj6EAIoZi\n" +
+ "HTaCqKN2K7LefPum/AQXfE5oHHJXWkS5Zx+DiKVmwJcQuzqO5j+sJxuUlZXQnSR28g++33WK\n" +
+ "zZsrMU1MolLmEFArfC2Z1o2dxtk2FIQVq/mNhq79sfU+xmCEaGyUV84NCFpXMTe2z0m8gQA9\n" +
+ "/v+Arqi7hCbtq2AyUFNwlUlBjdAxtoPNUj5E9iPfpQVZLUTGM8H4C5kJkOXYtb+XKeoKRLx5\n" +
+ "VCESit4KBnFfx4Egptm58q2CDUOb441YhMQKUR2TCCgLPJFZBKexz0jJpWHoCBBNj5lbAeQk\n" +
+ "3Hrpj2ErGttnVxL/pFEOY0u22FWHeXdELaBs0bvbQ/8WHGUg9THFzhZtvo+utuFGpmU+gK+a\n" +
+ "XCvYMtSxSBFoSSwA4v/YTc12QBO/Dm5xINzupyx9cfkbUgrRRbP/ORXB+KIkL3uQEa6UwRzo\n" +
+ "NdZlGOySsXHLmMkICx1TxWHiTjVbrk0tAvSjIiCgdW3kFVAqGNovgl259anhCkXxbnLUMMsc\n" +
+ "sAVW0cdy3DPLjbab5tCSjbpLE8g7KxGTX6jgwjZVEDEkvhk2JwqaxQdhp6JsZIOMSxOmhhSa\n" +
+ "+zZ2V3amEkQs6Ks+3MOPRF233G33dfkmkaq8oPNOXzROimZod7RaYTJYlfl3kBHx2Gd33ID3\n" +
+ "OR2Z5ZywURCEUZ1tmidgJaChiT42hfkTNI+Y11S0DKHoQZfDQQ4gOpoGo8qn8GntVyVx25nA\n" +
+ "VpxqsbddA6diporOmNx76M7+tuSKN8KpqHpv1K1+Bv180rqa/oZ+PXxO1nu3Iv+drzvMuSXs\n" +
+ "ityJ/DRhzg3Hdz8ZJOUuKb06AfhMDcFGOpCAz5KVN6wr9/bD53Ig0KU/gUKDd2vBsPemKSL5\n" +
+ "FKKKuHf5LYVMELEfgEwhcnen5tT+wvh+UOVit6YLHSQ3uoNW9REzBwEsBcSM2xHRlg+oPw7V\n" +
+ "K6CoW1SZdRt3P6ixVDbU5IAz9oH3owqC27FK1poBSXTEg6+AodSdKD2TOqyAaP3a5+/QoGya\n" +
+ "uQntOxj2mU9rtGP2p7wQuL48ya6waALfx+8N/P18hlILF8x7K+JPBZ+0BWhMNEF9BgPOau//\n" +
+ "THHwFMvjc0yVlRtChlhzEjhAhvcK9WpM7c0R6N5vBm7M9477PbGkNZzMFqduJxTw+hxja2oZ\n" +
+ "gjcm9JXGFbYb1ATE/8WDh5dy4H49azbAb70mf9XxzvllCUCdor8TXkjqTp8qyof7P81BUknL\n" +
+ "g8vYzpY3D8eoKFwyS/f0QQic0t0/wbRVZ/tiW9qzzKaAppKINddPfVXlGUKbSKsXy5rjcg+f\n" +
+ "rD4WKauGPgTs+kOpCOAOxAd46wEP0CoLnjALeVsP6q+yNic2Mxa2FUN2fQ7Am8IWV73cnkP0\n" +
+ "RK/tcmGOmFkg73KJSl/FC3yNxG8HLQmcY/IeW+Z0PVLTj5tzWer2cey9/JTHzzOLvqEjDpZH\n" +
+ "bbsS7lOi+oxEEHHRlOM7PECSsMc9C/AQohyDyHNYPEqo1XjRmTUSU6ozbgcLDucrpAIjvVYm\n" +
+ "8Cz1icS3xZCO97XtqSGd6LsMYWlCHvQ6RJAcuBxL8sasJHkz5QZ4TG1xArSRDdz+bO/4Df+Q\n" +
+ "R5HTXGqY6cFs9CLG6O/vpzGKCaeaIjKVIZTTl93Ql988Y3Rk/NQFpWRoIWtrMC0Lpu04Vmop\n" +
+ "qYLPJCFEdCctbhiD/SXjUR6unYXHPAPGWwpRmUF8gQChRng+R5bzpmMXGAUOP8W7lvthh5+g\n" +
+ "66o+0kvtxImNox3up83hSnsU9xv5n37j9T3pttub3ozQIJTudiHS6uNLbKwDCbCvrdvY9vMu\n" +
+ "8D2LSmNC1b7QHkU7R7Bq6R8DWdvm+T+LKqgqodpoInMsN/p70ShybyVQAOg7RNUzw7k8RJKV\n" +
+ "TdxHFAxEVpS/PiBs3JFwL8QpOMVhmgK3O6Ictn/TW249fQ5qEEA7LLMY6H/TZmYWg/EWfTzL\n" +
+ "wd4bGfdMoY+IRjMsxfX4Z1vLVAo83VbtgvbKFpLb1EO7Kc7zuCS0w1BeQ5++eAnZCy3GaTUk\n" +
+ "vFAkjZkU64NaObuZ1/4hyMMGnzNZYnNraZ0+wNOFdLquhi6F5wjsbep9kf/VZfNJscWNIhsd\n" +
+ "+okxW4QlBC9smcIQJfpYx+ycVttGXQ7acP6U5NmVKf/TCu30Ltev6/SXtLlWVzFMFO6ZgKrG\n" +
+ "4xlUqiSn2L0P8AmjvWEPAyL7f3E8iarGS8mKnAq+h/LeyQPD5M8sDhrBDsBweQJghnRavj5/\n" +
+ "kg9MalKxnbYxB79uzRi3Cqz1nNJxP/sAyUi4/c7+PU4T0xQkoU3BioXURhCXZMcOOBSwSEGy\n" +
+ "LCpJbPMRSnX6gveGth2ba7os14cRSG44LPe9BDjrJwSvVV4Pv12OeNPqwH/tvyaVi5V2UvGn\n" +
+ "J9t8EK+rYLlZJs65g7oxaTIcBpkRIzElLMGNmXsEHkGc5PQeJC48C+yho5cKq84lDq0XHlMv\n" +
+ "atYV/u5N/w7Ta+nOQGn41GTOyZmAqddNwpabhszmzx32klOHwNWdM/xoqXze0SHBEMSYaXfW\n" +
+ "cOecJNbWpmIoFs+gxt6AKnOYWC/UdaBN+NPUmyQh56LNBPXHInMGc+TJpJR2BhLryKYbMRiG\n" +
+ "3KcysiWiSOujHeMhohFMUm/DUfy1LgMT8T+bQGrCIvhAjpQn5uqtB2xBMtnD4Rc6KxTyY/HT\n" +
+ "VhVtQqITCY4wy3yv15lIGxe0LLGGnVtYJqo5EEe6hQg9eXOhH6dhCDKMQ8InV+H55fAB7dnq\n" +
+ "7gZhYwjUh3+cbQHnamh/qovVNY/4sTHOP0i+13ekbw/Q7zTq27bWPGyWrfa1vsMFqBZD4vVQ\n" +
+ "1/dkZvzpdWc0uJqqSw1p0vVaHddjAwaoBqqYLwIbhrhDPYqpkQuBnNLxSoYf296ut3Z6tcxX\n" +
+ "PSOt9Z5XGK0f3XdQQSOyP2ujB9KI8sNgPCC3BpXcqb0shalUXwltnRpAsLzRnxjOujR48rxA\n" +
+ "li/1wGpRxFPNsA0dG9/kGGN/FKdYW9J38fC8YVM1gpFDrvENuiGqKxdTnAQqwNTQ4YMZKgIU\n" +
+ "spsCCOA26YRsJwRYRn3Ajw9wpTR22OG9SwmZlhgsvFxVRiDRa0KlysJVpF4n5C3F7oQtroiD\n" +
+ "86oThYaQN3ylOr8qpf4ks/rl5QHoY7j72FAaqn/9hef1C3kAh6vF85ZliGXKY4tV3gBLMgZX\n" +
+ "L08CCTUsBQG+1qeRY3UKaigBTfsbYfxU/CLayCoEV95Y4j6yFV1GDG/OuYN6hSIjw9hl3p5t\n" +
+ "4iSmAuH4jkdQFWAile59e5ewt9KuJwxjyCFpn2gREx6LBImTDAQ9YW1AManPRtvriv0mnmG/\n" +
+ "x3Pm826Jteq8pd0Vi6pLLATWjzAz+GyrtmMjk1InY0sUXdMzMfWczWBedZKCLzd2WB1tCoUt\n" +
+ "g2ZnQO3nBV/+t3yTH15cNtkG74Kk/3itRBxz1kPvLjMMwQrlErfIF5zOQ/SFXaJoiC18jIFp\n" +
+ "1aDng/elbbjpz8Y6ZQdYlwZAJt14Pgmd9oCiT8nw7cNzJkzhPw1g3MSjHiqndHNeP3J16Rp2\n" +
+ "wGnvYwGTWA2sbPgtPSv61mstrs0ZW8+JbqknLn6lRxfnODqiwH8jR723GrJGHWRwwFOLN0SY\n" +
+ "eKO7T6OPsxdiWSnDb587DzdcPV8UjwU92sdtxJPJTE5AP3ER/GFlrRtJWoJNEc4FPQPEbxSI\n" +
+ "kf8ziZWlEcwztvZyeKv/iOqmGBULuXXjFVRYn+PLXJ7rXIMo/FC4rp8wOpVy1Kr82UoJdriE\n" +
+ "+KRpOMZAqyBoQhnzqT3KSI2fzfKlKLg4XFajzjKgvA25Lt4t0FiTX0oPjT5xXy3nLMPqJkSa\n" +
+ "1xk8jA/WhFzm1H7KPjttN3Cl7Q4II+NnbxXrZ3jxZ0pAQkbR1goH3QrBDkr888Gxp4RpyUqd\n" +
+ "sgplw5FdAIGLuPZD20JkSAtJI9MuYJtndWYm1xO6aIrpCsG05E2NVSr7ziyaEEuiL1Xc8TlT\n" +
+ "//v4JMO9As9x/Pcik1mD8f7a8qLibt4+yboD1/Vra4SgfWyWaniG326q5Upk8Bl1hksCKKTO\n" +
+ "7vSEp32TaP90SOuH054HQc4Ki0ffye0aBJMifV77RVz6GErggO6iyIsFjSVpCi+bwQZ6wrkk\n" +
+ "lV3znF1li5e8dGkfMv8G/F7rCpecpvYQPD4+8PPmIELFAoRXw/PKsFXf2z9Jj3KxCirGmnWa\n" +
+ "6pV7BuKiXH2ir11ZD4zrZ8Qi2SlAJ4VfY3BIgt2nkZ8FRkmT0wroc+Basp8PDcuKzgT2HBgX\n" +
+ "r9ZhanQBsf1OZxaU33jeGUd03f4Kgf22xawruBhcdwlfRybZSUQHGpiTbhflPn6n1L697/xv\n" +
+ "kr4StZ2YIb2UHppAWbDBxZOvBct4tBi7L3A5hr+/TQr2em7kYbyrDn1x8wgNxvk7mJ2s58Dk\n" +
+ "b8Sw+XG0UnmuLhrPBF6Q7juOHN2BTaSn2X8IPtOmf5Md3KCBwb8xoIz1VUMGlgyQpvu6dL6p\n" +
+ "DDFkeCWmZloPz5tlZfwDtvgzrPxykz5sl9nwu3T5nQeufx8z76FmN1ACbxbKP4cUD29WPVRX\n" +
+ "fXQOdkzT2ogLgDkVXvOMZgeiLJ8Ws2nWPXKct4EsrykjhPvkdFLv5D65hvAnWYXBldq4DUfz\n" +
+ "tYYzorGqiyQT0p27FA6z/ohsOzkrYT5DHmOcgMJCItgnifuFh2LnXPpmW+PGPtHY4Ij7hAaC\n" +
+ "XCE++XLdlHsrEpx0Fv2f3zjmdLYRRLFkYq/g5jMWw0xAhTx9MyLBNSOTELeEZ1gOMyEUBMkg\n" +
+ "64uTVRkSZCNjOMj8QuzozG0QT8zKXUPZufka7ltYMt/LrJvUx1PqeX/Hf5hd7ZTj/2xdOZlA\n" +
+ "DcaB5H8jclPjsFn2HoLeVHnaKt1ImdQMmJpktGzC20rT1ZVqg/jIhm1hEC6rhIgXI6UaXxl7\n" +
+ "9sun22kYio6itWgJFtlQvdEgiTHlYF5Agq6Yeiv9/gw2HTnd0BFL1RHrYeUHHBxvM4Nfalu0\n" +
+ "kVRhhnJBpa5kvP74Ck1DpSaSQ6ftLOmbJ0LBZQbWxPuH1bOcztDPxW5s7F5dPqfKRfzD57s+\n" +
+ "CktZTUI5jCkxGUdsLboqCaX/9ne6mr/KQqWNbkJ6Vpl/uBMa3Iuk4UdbVdLPa9QB37vxLChI\n" +
+ "E0iRpbPCa9GBdvyf1iTlvSEAJ+xkKaxKf3DFt4ro2+CcUllEG51wegf09GacjX0vtmrJVsZm\n" +
+ "rMnt96KXdL+DtJicFFovTuu4ssf6lV2cIrKLbHBrcNuHjCAuuhsF/r3p5kewh2ZZFfkqfQ73\n" +
+ "T6XlHrAB7+jVKRPCavljLiiU/mWIZ5caadS1wDlf0Yoor76bIpr5Ifn6QiV7O1zOuboZwL4k\n" +
+ "QhLgRCCD2wn4BkeGccn4quAZSFvEpL4G9vjl5efeEI71WegKBIwEqL4w8eJCitufg1I47Bz7\n" +
+ "k8/tPa2R3qZmoS5pTW4ObX84i+nbTpuVanJ6BmaLqS/Imti8pOnu/+Nk57DYAqz/+PboZNqo\n" +
+ "wQ+d9/s7/ORYYxD085yJvTZdTsldaslunLviDXPE6WUVtt3XzNxCR7cUxNcIh8kOwbxPkwhF\n" +
+ "nLdqQVDHs8KWUb/mJPUkipwWxnwlb/nSjs+6T2P6ansxq4FNFQJeXVCLF3Mnc4ZDeC3GB/KK\n" +
+ "21Z3JYUAynAWuK5y4N3Ed3GUHhJjUReBvW6T+3MsgHapQbzvHvKTmueIuxa9nHXsUaxojV7V\n" +
+ "PNxp9TRvUX5KLJ+OPZsVhut32zpe0/HdSHeUVawdIun1chs73Gb67bZA0vnhirbASCStNnyB\n" +
+ "gTaw4o53N99N/11/i3zurK5bxqnAhEfe+H8cY5qwVOf3zksctxdjBO6OyfG7EyEbFLgxt3MK\n" +
+ "rRzwleobPeYBAp0Lotu+iBngfg9EcoC3kh7XTx2Kqc8OGISjRF7Vsf42AVWxNZc6Y1Z2kfcm\n" +
+ "zJil/iTM8sNSfbhOQ4HDA5Sn+WJXFRkz1fx+7O8bpikDBZAanEUDxO7gsn/VFezgIqJZsJGN\n" +
+ "4U2Y+C3TkRT7jxvYISFJtTr7KzQEJurFvjHUBjf+KcDc4J4CAQdDilAro4auJm9ji1k7+6dn\n" +
+ "rd5iX4Uu1GIs92wWbZ+jI7CwWDCG8GFwaPXa3+rfMgzWQLK5Z5papSZ3HTU2zEFNj4w51M5n\n" +
+ "4N9hmyZolUROZ/Md7gB5lI73EcAxVmbmpSCQ+tTarj3jIfzXU8gx3xrTx/IjhqYFX1jvzf1n\n" +
+ "Q6BNzyctkUAVpilUv8FFdCVl5qVhNHcOzzXGemxUNT/m5e/1P0dAk/dt3bgw1HfGvzvhoXG0\n" +
+ "19OMLCpf64P8uQbq53Dg6dlWXIQt8Bpg61x9z53kdD02AsK8LPy6H9O9HdQIgJX29o3BLwT5\n" +
+ "wMinuRUzgKPscuLOlHS9wCXbTJKa7mAK5gt4wf5Cpks6Ps7TYY2bq5AF0cUlHNnhiJ5XnbiA\n" +
+ "wB87rVdZLJaLHJRkw2P/Fd9xuEAHfmFqHkOHIF4g9dlPOV1nAzetM/B88QTWUta7W6uH9SrP\n" +
+ "wHkvN3D+Dri1KpAyGNauMJTXCl4iyF+9+oCD2IrXYo/imlGiNHvgoiBQeSnG//F5ZV4typ4u\n" +
+ "akQZu4NvOjI7fmkr4JW2w+hAo1zhNGCsEyl7jjU9x//xtfpKT2dZfg7JY6C2LlqyMbDXJFO0\n" +
+ "ru54525F7mHpJD1MG1a58G+bBhVGA1NxB1OSzmC9fdIpkFPsE01/bv0lcM22Shd6Y3jWW+U+\n" +
+ "4KupG6U7+RWwnNfQE8EwYAt3FLhHUz5SfdctalR8W2xG1HaUB911r7dX1/v9Hj617wYsgLwD\n" +
+ "rfQRJiQuMpleYjlsRGW9gonyH0k4WYHb4WbAB74QSkV4NYiqoYh5CRPzfpG3gCosNDw3pbil\n" +
+ "ZmA6MGB7x4EtviOMbyNbHy4SgLXpRxhOrBSFokvLseV9RsNW2xlXbS07zl1IFIo2GqZFvG7I\n" +
+ "RuvREl8D+83OMskSwKltdTIubJlLrFNPKbAXnXk4IIGRykhlkv+68zfP1hVqR2B7CTElHTvs\n" +
+ "VaLMtXKDPRvRae02HpiDCbzVKBMVlyttetXQSXg6d2YY9mT6O3ZlYri5aM47j1vwEnmgurSt\n" +
+ "hwoJF0mVCmbvNWR2JXLZ8IG8LP+xdkop7bBufL3Urt34iRucih0krQMp0txmIp3N9V8Bou5l\n" +
+ "Ce8Hc0J4uvcf5y3UHa28PydhK6XAJP8j4Lfkmkz0XrcXed4Z8psdsN+A78rJUHOsemcz1xmt\n" +
+ "r+qHdvDCW3SJ6vAS1NeaaKE7KepaWGFpIyA7uAegKvVKzSMigJZqF0DVhN6kVo675hBifJsz\n" +
+ "yZ+6douRnIqITYIrT0pF96O0D1totzUJ+zLTH+sOsrVusBDDNrad8ZX/YirSiS5vMDeyPKB5\n" +
+ "DJ6e0LgGhOyVigqNM/EBngFfk4OsKCHNi56KfQ3Egn0LAT7krK72KW2ml287CRJbnSYjLyIl\n" +
+ "PH6Alfa7wje4s48AVM2D2w7sAQl7PNr9fuOFcRnDIfjsWQUMAo/m8jsqKZYeBXy8RNXbeMdh\n" +
+ "KqieZIbWJhLJ85EGwcadWXNF60IeCa/ZXov0emYNMnN7uF1ZR5nIVUyDMV9MzG2RxcpTb1lO\n" +
+ "qaauedNmP0gI7l1OSCNz/Dt1KgzP38dg5YOi71RGrxYyz7Kva5NHiFhI3mWHJEdmRpnx142m\n" +
+ "Zy3MtpIPYoMWOxpyi9oEOPps1VvXxChVO1bePOh3CPdqzONsAXz4+P38R6MMEtiYQ3qOxv/F\n" +
+ "j+bE+UNAIyG2PAfKtaXOJ8rW8qLIMUP5aPL4/gkGDSRuvSBWpo4oWTfLwtI+FLkJSursuOha\n" +
+ "+96QakdwiSJ6p+yWaB/ex3AhULVsYWaBdV71daW9GHsa4tsPReoRcfHYHvXQy6LC6fppPiGV\n" +
+ "9iwhXbbfvuaQhn2Nb7B2j1ovG8wqtfyk+j+39asVFyNTaQiB0kA/KNu/NAi+ZNTtBaskvIjp\n" +
+ "4fFYn3pBV70OIiueCJbQTMzzCCqkPzQXtfcnvBLrDwwl4f8M59elOgPHCOBKOkEsgIf3SbNS\n" +
+ "2DreFkeMpcIed6vDDXIK1PIqmremOmSnJvoa4okRyu2SdXekQWknq2rpm20mySpeJd23/QXG\n" +
+ "gsNPPW8lVUYKDOY/YcjoxFzjRemhDZiivlN+4KBLkATO3x3sU/ZD1EOXSSCk2t8J6nzSCLPk\n" +
+ "JdLhaz/V7Lqt7ML5hmlYO30oF1wUS5U9Sx0vrWO62lvzj6FYiw75er57GfnF9n6RUl1VEwOk\n" +
+ "8NgYy+/XQkXLqExe50ueeKTICtEP0YwNekgKlrgKKwEtM2VGiyzSzZ0PL63yeNixOcVuh5zy\n" +
+ "WmsY1VgPzdZ2FwAzxtvBcYPEpkL0R5U1fmAhLjwAzd4jDG11Uo+bhpwTA+mf9KTlw6hwV83V\n" +
+ "ivNDJ+SXvLG+l2Bbu1dK+CLDB011U4lDV10EfvP+Op5keWlTY8nCozy3SLcm3LYkcnSB8aKg\n" +
+ "bRgOM2ZWO8gxxmYfub5OsOeTWoA8X1OEwOUgIA99KOu1p8PPr/tJuyuQd81KLUsdFUSlqmXs\n" +
+ "vHEpF03T825RTrmyFkusRXUCSgX3dvoxQ+Cgwkac2+Amrs3tz9FhVF3dZbgGuTXEIqb9pheB\n" +
+ "rlLGpmzJc4UXdsFZh2qSpFq5Of8aLgrXDYD2Z4S0oL2qvsjXF9rTTfqnWSTpbdKqj3AavDEO\n" +
+ "yFbhBRSNZhrI5/Fi96CWkx7JDk4boeX1REmRqQr11O+emU2eJq5em156zMBKaY5qeOX6kcyl\n" +
+ "BQrpnwq12Di+tVZ6IHamNEpobZYh7Om7l96FGsPgrCt8k7AAtsDRMbslBotcv+uzIGuiRC6N\n" +
+ "rmn6fLSbkp7M6dqDCXnQYzH1rIIIAhHH8t9kjUmv+QjdbTZV7UM/3mV/U+35i+dUi7uEoq4l\n" +
+ "pIORSDnGYj/mCvqa13pe+HKB3+dvr2G1n0Ouh8zg9weANoXwvRQ1/WQcrZLTS28woIssJgWM\n" +
+ "tRDeNQzpguB7B3GgoLf6N/4/3Nj+S9cPtnrgV3u1i7Cb3tMPOzpPmUjFtahJ09pt/CTnSKvC\n" +
+ "MZt9mu6B71hRdMb7mpwswVv50HwWtBVDIQ/nMwa1UX0iLmxZ0kRRYEzvsqyrcIPrxYzIwdgu\n" +
+ "eaoQXggEqcuEwT2k+rN5l8oBYoW6y4IHaFvuo63keiQRzGyDdtjxwvu7HaUyU0tJbcTBygcs\n" +
+ "TtBOMlVcxHGRrc2R5VD7lZTCWx86ROwI1j8WtpX94HuY+siOqLFonUiEurKss/4ehfbVSfcf\n" +
+ "TyHS4h+6lobZaFfoIkN5rW5iju5rzOWQxbtKfz0Fbl5bbs1qe4tJSHOG+Wsp16gP3W1qIB5L\n" +
+ "EgLc579ve9CFE57TBIR8zGsCRhDarWLRNk786yHSB11td0esF/9bMAA3RMMBe9UVSKIP/mdi\n" +
+ "L6C3XWzOzDWihUAs9VuDOWogl2+PZ0yMp21AjeRvo0afZYLYQwNng9fksmMC3qlD9Z0ssXX1\n" +
+ "109RZsDAo4jMOv4MZV95JTJqq5Ti2TvOf7FtyWubLkHxFTjoaoc5Vi3saXpM051if4pI1JDb\n" +
+ "WFmkxnOcxosVzcGvF36FGWuV3tx3BIPTY9p5Y3h43f8RLE2J0AmZVT9EBbWDEFYcHogjNGfT\n" +
+ "S7psFrm7FOBXwTmNQ70aUL/7sheXyftywrKl89In4E+Qfp7ARoq6hbrTUM4XP2q3Onyl6UVb\n" +
+ "qEZwvX8fzK3XACUcfyHXfunvn8NRGJk0EUgxf+GIXd4h76j8s0vyOhgTV9/m9uOs/SzPaoFg\n" +
+ "QAeiOgVWfaJTO3Ra7Lu931otDh1+e+Km/+kbx1cD55hNuHcS9wlU+ohwKD7d8jMXoLgQvXjz\n" +
+ "uKD/Zn3FMxyNgwhLJPYfD/tDqR6dRVWGQXbiibW7iieYD0IrHfsbUN2KQ/SraDEypcVSxXTh\n" +
+ "5OCJioK8C2p8fUuggfJTRhzUtVhnBwRA/eV44/b99Ifo8o52+n5g7eFo7KAnRAEAjRkHK+Gk\n" +
+ "Tm10mVPIl1Vsrz/j/NT+c6aRjq8RBIIgAEYUdOjDp3jyydz9k44SgJsSdnHnYlSeYwtsxaHI\n" +
+ "zf5Dm+cI324tx6Gdq0t7cxtrlEijUkegAVBSy+PF2kb4aeVc+GjosWjJ9r7yLzV2Lsz6j/Nl\n" +
+ "3vBXXdy9Ho4ZqOrlb7usA8ecbGxZ4wPdNysVRjiNJxDBvs2SV1BJ9HtQ3gUHek1KwrYRq4uF\n" +
+ "oxsmV2J5NV2e6mYCukTCvZyHpUA9ZIuhn/U4llnoOAaJXdgNou6FEUblyBe1QQ6FWmP0xcVq\n" +
+ "MnHKY0FpBYkGX8X6suyD6NdcCPU64wyqmmBX+hfmFEoEUjmSlpQ//au9voQPSUGBk1BHeyi1\n" +
+ "+oD7uy5xesBlcnenbmzAJVk1CWpdbvvII92ZAGLeQlK1JC/xiqvdkfQZ0ifkH56M838ZnPZJ\n" +
+ "rQCW9TB9Gv53QZ09x/P2b3VB+58X+UMxeYl2gU5dZIC1ZABOGihh0zLukCayCj3pgE5u2udu\n" +
+ "ZZjmNYvIx1khp02kWZxBl/R7mLn6sPJp8AJlHZvhg0eJFdnclPzveocADzfHPeTpFn+APBnz\n" +
+ "WPCGh5E04F5mujrGuKeRJaiRwYC8PEEIzrMSTCKthMOHts/xY3Ic26ULTMxPjI11fIZ11R8x\n" +
+ "54UYrrCZo8g7pWrxyBipkjbg85diLCQy5+rAxinVubo+gdSxnbpiVOMwRkY9lNt0/7Vy7c1Y\n" +
+ "yrTOG/2sU3DbopXfwf9JNkwP14Ba5pJXHy1yy3EsM9yR+KPc52dpz33m29BxGmOTTKIVVjov\n" +
+ "w1LFJkFkLuSX46/1bx4CTd/T8+EcGw+LRQYi8qw3xrXclhEJ171ZTp0XpE2ownDUcTwRiUjj\n" +
+ "x/E+pwFrwMnKTWt83ol+xWsU83N7w+DhoQ7jmi4CWK35J5JAh/mk5ofGNozJAmOdgk7xixbV\n" +
+ "uEkvOAGj9kwfFYtR7+N8ab69lAEFNbt/mBbd8ZfPudqi46DdMM94n26nFupuwRaNYEmZbYJf\n" +
+ "xZ1G0fCBgj9dLjw+ZlnC+nVnhkY7ZG9/WtHW1bIorRvJGnbk03MBzydKC150edeW8leqBp+v\n" +
+ "bp07c372rZHCC3G2J33xn36qlOJ3/zWf1wPbuJIrrF8tY9mcttpzzF2Qopb8oCVEECjaRlQU\n" +
+ "EkgAEb1xxIW0syvLQRuKSOKz0DU4kKUxoHbyN4PQ9lw3zYKQFTBh1CeuZqlYyuPuG0Pfj+Jh\n" +
+ "Lj6KVvRjdEgXG366qjEpa74cJ6rvqiTpbkBysvPzNfwA9SPNXUM3S+pAmOmaPwvcPsWVpevn\n" +
+ "/CYtksAPtKW7wO1wj/GmQ2kzDPZ5hKqVUkpvDPwZyLkWwLseiOX95g1/9Zz0UR0tRBnPzW0L\n" +
+ "pLttlajn3kv+zs99yQqjgrIVbuDj2kT222Q5FaiKYCaPGt6XlaHF7krMl6ojU4sLlYY5frME\n" +
+ "jKQBA79r75vHvDyRmgeBr3VstOI+su+pZCNEPlmYOzzWPOFJpZQEoz6wKhRlPMbxCufV79n3\n" +
+ "FmZFk+XIYaIXypz7TMaCiy2M1uzKD3ChuJc2SiKCLF4oGKMPG4iIcYWkKPuqxaBB4MHWSJw7\n" +
+ "FxU0SGBgQTQpuPZNNSygrHOcVThaIhsTIvPEbnYHG5FQvOHa5zuEEaep5Oj4PAy86vIj1SOp\n" +
+ "Y09gNrOt9PJgOvWIVCPYaAWUBONqx8B4SyqsccXpPVzrnTdN3qXlAhnBx6k9qwRCsttgASrx\n" +
+ "M+JSSiSakCBKpsHwuzceG8cG1NbWMYNQZbk8L3ojNVEqOG7awufWqOf1TNQv0A8a7cE6MLwy\n" +
+ "pCKrb+A/oawJq/pU4AUdEDvGKsnq771Ektk8uLXM9nxhM03vsT1Tv0CvIgtWC0DTnbT9DcpQ\n" +
+ "txMcv//WtOhZ2O2OXBhruf20KbdmbehBvYpFmLsfjVH5500MR38FaOfo7MTveaHfXIPK51TM\n" +
+ "OY1JLzLjZNtwrXDSLE2paTokynENryw60MRUnPRbqcImP3Ro5kFM/wQ4QmaHK/P8c9b+S5UY\n" +
+ "nrdDsFOCr65X+8/1DeX+jFHO8TGkZ5/+C3boM9sHEk59GTG8Ly8myYWDSEeNV1QgxAuFsRgi\n" +
+ "iz7aJ5QD2IfRIMGao++g8N7rYZg2GfprfWgBdOXV7A1CTCYmszdQDLxkLmd2uRUgrFOyJBeh\n" +
+ "1d4oAVem9rljEmhKhA9VfYR5GBxjKIauP8wUsE460dxh/Y5dx/UgTcoMM2EOozkZT07KemPY\n" +
+ "NIsLuVcIrpjx+4tIh9Jqxzj96IEf4R36sf0/mAi4Vr5i7ih7hNf9WqBQXJgmxn1jP9zyY/P5\n" +
+ "5Tj4eyAD+N7apNndwWvvWakk+RSSY1wZOs1/8qlNThl+Dx7xbQjXYGJ0l4Y1BrwmBsca/gMk\n" +
+ "hZ3KarNsb2ywJzL5ddbUr3CbSZuVVtzHQTeOEOTAkKgSYemFEwVADepFfP+N+CkQD9l/jPtb\n" +
+ "JtP3NxEne8OWrGZXDfH962jXVoVGo/n7LArJCk2eCBRLu2GeeI5U7t60+D3kvilKm3KzCNCG\n" +
+ "HoSmE3iOzRmCmzBam640WW4L2IEVrhdlYyIcLpHDev+FEIRF58KrTKR3+zsHncst8yHo2SJ+\n" +
+ "nPJor3ow4UxlPi9W5sciz0vqaBUw6GDI+4UssTWZXew5P9KnQXu27QCKFX174ol/Xj7MPK5G\n" +
+ "20QXGuHcUE6WeVbu/R046begqyWmfAIBSfsYJzh9lp/RKTwdbVd+eVI6Q96MXarLb16JdZom\n" +
+ "RCjaZKu96g6xl711JMHqP8ckYgghrqLvg67Fx7b0RmGCmX9d1UjxWWBkKRa1fcCyCNlzGdRD\n" +
+ "QjZ1/+SbAMDDnzuBjPE1r8RSfW1maD11JU506s6/N1U90oXe3jmgPovjyvPUo7Kfu7mdxc7c\n" +
+ "DAPdg2wTq79gcQ2dOKMHYUCBa9zqBAoZlXeDY8MCWBFB2oH1s4ZYVd1ZXOdo880T/QFNKmAt\n" +
+ "fkxFaXKwuVBbz832ntMngm3219LTs7dV5zQj7Ualn7XtHDUoptl14m/7K1Kvp47tW1TNdtNA\n" +
+ "5gcQsxY7enKaL1M/ymQPCB2Vtq88gAS1g81gudXWwSsgR/Ibd4chHXjX0AtLgSzQTO8njFcE\n" +
+ "tPqXcf6jxPH0hy55M1j+uTbCg0+eFHOsWvtak6DHmCXKJEVMtjDK8tu8Aqs3dszlC1vcc9wY\n" +
+ "7Tm6AJPb2NXD+Ly+/b3H2RsyUYU2GmgyKU9DTzm0Mso075CAg5Kijo0KmTbJtOdkqFdpd9rf\n" +
+ "PjA+c/Lt/VFyvyRkUppHyOmecwIqW+N/1gS3ZPWZv3CpO4kBL+I9VszGpe8OxiQBv8PVmWy2\n" +
+ "j5alpanRWLIxJUqxDrR++cwvY+zk06i/cj9PaA7ZS+IjXTgYCEJjJe5gLNikVYtXeh06biiZ\n" +
+ "NKV++wfi7BwbTk/zapIFl/aYPjricw+OioitAvtMlNy9TLfazmpMjepWGu6eyEmsJUu+PyIJ\n" +
+ "qLr4zp6nPL7NDkpC+d+NW+26UKJDFuFu/9G0zzzAFIqHCAtY4iVgHTQBeZjUXhyrkQMIlbhz\n" +
+ "/AmEAYHjNJMOcj/1fNp17bCJKsq2F9PVspsDAFWs5aM3xoiMdPZbacv/J1LnZzxqLTw4ayOq\n" +
+ "xISNpve4zdHKBaMXL711fnZ9Re5dvRxBtsikW76m/GYsZqtCXc4dAgwz5bJZTwCErKv2HEwA\n" +
+ "1czSfxIKtLZfgcn2OpUTu/pCXXwjUgrW76mMxx+Ew5lRzyQuZmh8FEWLVEfl6ZUfWF/uTp9Q\n" +
+ "2pdnrqZSfGa44NCqafNdCoBRA8+gBeAVRb4fDc3gdQMaJvgM/Q05BYk+x/7QSq33aPEteKH8\n" +
+ "WE10HHKeCF/Q72BfWs4fKAe9uLOdCSpxzzbW4ng+XcguAIsKW7BMGtH8mfr7Hx0VONL8o7Jr\n" +
+ "cE035LUC7EedjiZSXBXmIqoZ/WBd4IM3I9w0MCQqORg6eqsjcsYeioF0KEwyu8U64W88KqnV\n" +
+ "9UkUbf1pdO1vV31/r2FAfJL+zj+t2syzcDzaB931FtdFGb4mF9WBQMDAXZ/tRgyh7J/L/n/g\n" +
+ "zy6SMmsqB7XJelHjNXmi0o/Mz4GzneVzYomV5iwRctD6RP8dxhUDFgNdFZKepu5vke5Ly+ot\n" +
+ "mild9RHtQFL8tfoUVfAGdHTFByujfryb3agmM9Z1JccGa0qSXWUVOGLNWHDeBoKglSavkYGo\n" +
+ "60Ik/PVq5rn3Z9BraKsNEM1pa6IE6jyMJLmK3kxzs7F4AFpMV7fpzUMPhEPrQk+PTflWt+L8\n" +
+ "tttXzOOdeFeD0Vm8dqvmye8hhL5KwORhZ5Zj9vdKG9XZwxb3YL+o41AK1K2j5u2M/PzdpgQf\n" +
+ "oX0te214mUo+6MrYi5/2WG1Quzg4ZK3iyGDNKL8ezu380DyoYCXgzBkd3j4FxppxXVi/ARD7\n" +
+ "1mYABAPecrRIxDq9i8wf42ih+nm9M9n4CQ6LCTSLeH/uZPZeIGagFuIhHIeQe8DsQ2G9eH5p\n" +
+ "a7czcNHkWn755Uor3JcR3o+jQ9APDd43W57norYHXnRJomN+45PWIEhGyOMNozh3fZ3ySQ/C\n" +
+ "dDNiReFjxm1gWwSF1C58vZJl65nnup/r7ni6Ht2gia9yxA38EeVg5yELuthsxwTbbjk47VYv\n" +
+ "qRv3B5d2HcjfS0zg4mHjwSZLu+ia5GquBAOPKgotUkti0ianeA6SZ88owXsmqPV/fUwrwFkE\n" +
+ "qC+2TueF4hVHXXrArg+Y8HfFmNdHLFALhnCzpYKIvwAh+3ruQ6degZgl+LYW5gLPoPaZoy10\n" +
+ "c1oVkdvaIrk3WSp1wmg0X7MB5lKQwg/9pH9aZ5Fg66L/NmYHck5SKlnx/Snmc8bSMcBl1m/G\n" +
+ "QLzxOQaFVE7sTRw3hRiOos2mA/13y1pF0/yxT0sICyfy/8JtLXTFYSzIvr1YqdJWPXHRkC3d\n" +
+ "vaS3FJ7sS2zBxy/rqE5Ee7S+nkQH40BFgVBZ1Y1HC9h3PX1YyFgTT7DG884Na8mPbmkBtrw+\n" +
+ "TulraUhj9pk4Hl6sGlDyMYMJBXoNollqyVCzYAlrOWtVKroCYYo3OXV7Doa0cJBmfi6ZCDbB\n" +
+ "3g9DFd48Zn2jn93r7m3TZWlBLj+pGVieBddOhA/sV1RJcWWpojhlks2zjpPsnbg6PLo+w8Jl\n" +
+ "eJXii0rAir5oHxlGZjED7Q0vXfaaAv+eZsNHjzZrYty5sBI/5csCns/RaERoKRWnPjqAihFf\n" +
+ "G4R4n86fpR3qQXS2LCzUipcQ28qydzRGAkzJndFCLtyGhoaVkMaETK8kX/k2iuf471Xxyj0F\n" +
+ "CW8jabKPKeUCAtmGQhbwDFSrJvTmfa1QaUMRiIsNkU7wUnbdh1nEasT55GWVLZdbqWRmJzTj\n" +
+ "YB2jXv4/QN4Iie8EMRCaZyL3NLtFya/Vh2sUbqGsFw16YSZZK2E3fWfM94qcOPjHiJyPxoeT\n" +
+ "CaUdSGuSm+hTFLDoh+LZfwAJEeGMUDAZ/QgiwkunszCNI7FVEhyjcPEOjZ97ijODUhfO1r2T\n" +
+ "neGCYIoNVMzR4n0nGL8fALyxirV9F+dBZlIwCWPDZWwes+8A9buAJhoAZ7umJFYhCQiqXC+q\n" +
+ "ZY0glVYTnaS4fNQ8hTdi7SIN86WUAZiNm5LIlIUHk4ysOonExQ4VB6lCIHJrj+ucK4v4ZthK\n" +
+ "fP9w8kiqpm52fln2dqygroU73DxXsOcMCi90JqUaqAgKiXD82pYVJdGkuOFcsRVKyI3BaN4c\n" +
+ "eaJg61/PBbmGdgUUeubiuG82aGo6nxYGUa7GAD/VEKyyoyl3ba2PvhSXe+Fw5LdsKUs7GM+9\n" +
+ "YJ4gwNW9upKSDoA2ZXubC6XJ/fvAPlkDQj03sBM82GUB6Y2Y30ptrODa2RlQr4/aH1ny0Tt0\n" +
+ "fWv4rEivi8ZsKHwvAyThGW4TQi6Qa6j0d/eabNJgKHnRAPXPZPuYTeudcGg5tbaKn6OeZBoW\n" +
+ "7XzN36cOel46M+3ecBYcYkbGhtjqaMCWa/8iTA5KzXRgK0n29qZZC1sbFECkji4ObMQyjMTO\n" +
+ "sZZyFrJfLo9XUfhj2EoJZxTnF7rIznjSXhZsxN2KcOlqF4hvULDuql/2AiW7nRKa3LnlhYc9\n" +
+ "z5IJ7vyDU/0bMcu6T+bo3qdDNvMbBZ3EZQQzzQqqu0Egx/MEyyXo3UQqlhq2ueZ8VkOIpWjc\n" +
+ "x62QOVhkzn/rb0ASB98UDCMBoAHnu4Vt8HhNVj9gtYzIwsLhCWWZtXe1qslbKHG9RoANsfdk\n" +
+ "utZ/GMSuUXjoV9NyQFAomsKHf30bTAU4nYkxj3RJviAO9aQojVMRqUW/tjZ8k3hb0u1G6WWD\n" +
+ "Wa6IZAhj/WiQTnFkiiY/hSWiAKm0Er+prhJmlLuaGwMjtQIMOyNpjK80TxK1BAgJdGQnreVf\n" +
+ "8HnXz9dHAAiFGaloyC0XtjaadnXdKCW0Kq2DvyPEapfWgKoJnDc94dX80plKfwps/Dg09uoI\n" +
+ "h9ctSWicYy5q2gg/rmy6fBjw3rrv6P63Iu/Yr7d23G1lC8MukkUa8fTB89wZ3n0Eh5+SplvG\n" +
+ "YyVY7kPXxaLS9WPa7GdpEFFsdLoJ5aX0ipGibkTATFWdZEgSCGwCYUpMYJW5K+FvMtzztlCn\n" +
+ "e9ZafDobzb1UFcXTbMcQye2BlFFQl3+qq3AJQi7+OzP8tmprRws5z1WfnUr/8vvps5f1PxpW\n" +
+ "kWcQv1GNlq9ICkiX10mEOq1tVW+nVowf4tw+2y1f9VyuKnKYXWhwlfvuYWfqZG++nXZANhUO\n" +
+ "5CbHpk2IYVk9W1sMNOWq/yIoJPRcVCoNwjz2M0d/ugpAcr7RrPtdG2JWeMPaW2PVkt5UCUy7\n" +
+ "kLjx+EY+EJrSjJfTKNAwNP3zMCPohFuTL7P8zW/qWdoNdr1J6Jn5epvFXxg/4AFPM+LnchuU\n" +
+ "tDxIVNH9If++y1P7wIywzSZVjlCEk+ayGtADUqCvnzwAzlYFCoBEhwTgd+KlEZoyIQYeS8nY\n" +
+ "v6ZkgL0prWsIP5Ctg5cm8yZdXzfyWZ2VQf6qQD0amZ8HjMI1TjdWTtw+sJrTSn6Y/aA9vJeo\n" +
+ "ekNLOBvlYs9UkkxoB2/P+KlWpNswm8ykO4F+kETaPRIj+0Jquc/DFO8loLZJpomo9iyu2+BP\n" +
+ "Yi4cZjRnUNyo+aDK2DXM7wGm5cR+SYHkxPRRYmhrxDdJ8GA8Y3pw+KU+j5DeKgBInnfuZiHV\n" +
+ "sfxmjdhn3OFDW5NZ5Z9PE1M+qDxNkve22sJmiMGPmbBGP8L6icoMyikAJPrvNUll+7qpgt39\n" +
+ "eZE+P3vufj8yXa7STW781cPiWGb9b09nri2b81gwan++4n1UTWRUloMWj4m9TFFY/xw7/1lq\n" +
+ "hP09aPwIZa+sy5m4WP5wD4Hp3l8VcobFeWii2PoU7HCDRyM5J1BFAtPL9l4Mpx4CWTM5OYzB\n" +
+ "H0ihCM8sNWbQFCttc9w4Bh+vzWWpFJPDqikr50aWTEgXB7LwvTx6LiKZBV9cF/MDx/8kn9qN\n" +
+ "geqJobwdsQB8zjVzmLsSEatDHkwpn9owhFV8l+BxavMvBIMGx1lc5zOQ2mXV/n+FVDnEo9fk\n" +
+ "rurpjd663byXVgGtot3dWyr3tUjvARqNjyK7uRUT8O5mK3yDbmtE6+Gtwvwemm7nD0btP1c8\n" +
+ "pY7OaE6MXWioOnzhLH/5spqrbGGV/aP6MeQ+HNKtR4Jx5ujIp/0dVKIvRJ9g+jXSfC8o4j7N\n" +
+ "zw0/+uDrV3Lx0/6IuovFeTYLyKLyuyclVv5hnNZ7msql4Ld/+2tekKGR9Li2tLiVFJ3yfOvy\n" +
+ "YpxDMi//6FgB5tXubPRDLKCP93qurNaCpEICApW5m8NpWvbYcoSSdZKM3YNNaevmP57XVO4D\n" +
+ "IXn2H/7yvA1NdmtFkOFKkkXvhO0JnrNlEa6NMx58WUm+7owtUEv7XT22S1aOpZq4sPky2yxD\n" +
+ "gwXqB5ygbnKbNYQb+mkkLIjiTecgFM72gKmtLDz6huaNvnpxK4uPo0QEqdRUhwD55fcoYiya\n" +
+ "0RsQhNjUAQXC5056WzDuz5xRahSQ2PbrT04pI4hzrlvOdJssi8TtKiL5UFjD67pwIcbmNnps\n" +
+ "1RXo4g2O1nef5/WHe048ZaPdV/pvBdTiEp3bjKFTlD35dUwFcOmq5+W964BmljjQYu/6rGdG\n" +
+ "3Sby2g/B+RCtEz7NB4GA3/5ah7SoJ0cimcA2HRF71Pa5T0cIkyEORSCA9pXrXi3pDz0RrqRQ\n" +
+ "4MsFEiTnJvl7K8MVRfGhVpZSxyvfC1WY5dZ760HKv+fBJAKPZywaIT7wg3Ka58t38u5ZiKFc\n" +
+ "mGzN6M4mvgKTG8EMKgjCcFc9v1IdkWC9vijufVcxfW3rFkPNnakWL0td9qHKq3/mlpxVpBY5\n" +
+ "aDGpdCyzIAmshRa7zXt3LzVWSLmnCzW3aNWd/eLmjLfA05e09lE5ZRF4lOAU5bIC0EB3+iLS\n" +
+ "OSfPE4APylT+7cMlkp/CdBbAfio4xrJbkvSgqwESXWisFgZ9Zih1b/APM68woGmpf5aCY2Wy\n" +
+ "0MqzuOpHerXyh/O8nai1zTyDz0Nqe5Z60ITQR98tV2DHsQDazPSU4Jp1zAA4QuW67i2xps1p\n" +
+ "g0IlOREYGCry/mCh2SPX79USHOq3trmd7OVCaaWHSzzlCuVjm3FCplHq+11/sAw9c7Y9lriS\n" +
+ "zp5li+GbZ1ZVWt38XrVoUkGrexy5Im09C0zNNYMNMMehHkLGXhDBAmBwDWgw8xP+SP8vBa6j\n" +
+ "jCI9AB3AOr7kDL688ts8B+8oYeY9/2UiH7HA7Lb9Lpz06ifrz0/Ojt535D/WPqvJj3r3NgXT\n" +
+ "f0mrcEuuFUfcjnRyKtPQevdgzX5ZRHsvyijAFAt9yUt9AlcTiZOtJerz0RIsTgq8T2tsp0Mx\n" +
+ "vtHsZWqgzKfnW2SGiFhi2aIPZgAZA5FKq5zwT6sWJsaN3iyzqU+4reKwYrx6ZKNu4fT93y23\n" +
+ "Via+Z9s4dy3JFG8hrIY06WG+9XOyFqIoccDiwFfqHGf45mAjuuy9x8SQ5eMWe57tVSFUWxwo\n" +
+ "7zDT80Lh6wWc3cPomT5OWz379x2WXmO4MxXdrx9AKBT2tUXF8aCDbtx3IhG4QRtjQ0STbkjV\n" +
+ "ftV1iQfzx9invUZlUWJOBYYO+ZvnJ8bsS9+ZlNShwxZD3Eq5RfGAyEIF4W+PS2xpZuQxGySZ\n" +
+ "C9iaxZjBGjWJ1N8XwD0c+Vsuyavzgfv3ns7dKSiarIr4znXJaBhS4kJaq7buQ7zVf1iHySHj\n" +
+ "MxkhErY0oZ8DJTxKJDuOYPfW6GtinXZpGXE3KMy1FDXUSH3RC1DdnNtQBpsbVxEjzaYD8Gzj\n" +
+ "d/rWJzE1qtTK/OwlHwZyN/5XDN4Rul76dZbqC5En2jcVo3wlh0wiOQMk5yjycX0exzEJMlU7\n" +
+ "JTlcwkR6zY/Pjgd6l6dvATedIQbS5gxeu7f0ePCbN9coIAEJF+/LtRSeONypYb0MlKxEfena\n" +
+ "LR4XQ4kH3q0ed8jl9E9pXmGJKzEL9RuXiRZw455wx3J/f8ywNWrQ4JWdXKVklTLR1QrBRPSo\n" +
+ "K0qKkC4thqs3dyxgDdywKKq/Yz5pa1KPbp6RL6Pof348nmDbbj8QG59agAaMoRrZnqJmB4DK\n" +
+ "IS2iu+ES6KSmauUTlI+ZRV3HBj5rwu3QDrhQb8w6uC3TY33RcYlFP3MVaHQnlG76tMxkHQ59\n" +
+ "E8WL1dtVTzhOhETiZJZeAgzCqKc9L6aEtHvWZdqnUoWDV0O4UUDMjpNu2o8xYH9S7cFDbrWV\n" +
+ "coBYOkk8H0B4V1toNM8IMSSGs38G0hO0aK9LHyrGfEDO6HCF4qt8K1jcvbZmbUGUvB42a1Hu\n" +
+ "A02aNM7hRsnEOpRCp0l30VSlhdB3tgb6mI1LvNXe3pwSd61Hr+DIx8xDZ0cGA+b2DP7hnYp/\n" +
+ "Z57jk2qNwTYl3Yb/K+QTiv7AN08YDg5pcmkwfR/wuOrwqQp/remhQXUivUu13pMik8YYlwMc\n" +
+ "x0r4r8EmloRkiU8OuIv05EueMspLJItIEnXxchN6BuXdmB1G9C8NN9jl4T2xsmaE0f1vMPRI\n" +
+ "5OOHmPdwoRvGC6qWkY2rpY34haRyTAWrDhELca3kIgIVgsvIikbTkQvhY2+2mTrtlVSDcYhk\n" +
+ "ngvRPIT9Q0je0IK3+3XLog+uLQykqtOYKfsA3hfAAKgnghIQjwx9TO5ys1yR7AIGeYj0fOjI\n" +
+ "+hhwEgeUe3fttRe10FGXei10Z62TXiF1skEL7odnWMpkQ4vN2n4H7LdG+dFkVU1cgJXI16cP\n" +
+ "BKrrknaRXmURgVrJk0sItjxKGzU0OG/U9amKT39LXTc6x8hhAOwNeJLUksGCQpdjsV5XBnw9\n" +
+ "5+1ekWc+MPQK+SSgxWGaNfDPw1IxEWehrIAKjRqFhlsGLY/wbgM0Y9g4XugGMey/Ibbzdvuz\n" +
+ "M6HdffYtEHdzoAAh5CEo1g7jQvzyPHVdEwhxDxV/MmcWB+B3D2AHvgE3EDealVbtp0sOBcKj\n" +
+ "NGXbOPnaI2YzkKKC6Z2DeQBBOLzz3saaoSAi6yy4b+xxK3bcEi64nGZGJKuNi3MpCBL4v71W\n" +
+ "7eUFRwKYyaHhLY3FKwTOs2paQysjDc2NIlOBSrJLfo1wgmU9sQJx7BQdkUkPZn+p7GdfFUoN\n" +
+ "k4akjIewzRwKzPTmIPVhzb4HrmbFp3EtxPlKF7Afzt+6DC1FhzSHVqMyTOL37WBCa+Qthw6l\n" +
+ "lLRtXnwAErLnnaom2qgWl24HFkvaEKu/X98eQqcf/mGcjKgHII6zfdkdHvS+lMlbdo7ATzC4\n" +
+ "Dhx50456H4Q0/4CZ5VE91q1sGglKl7o3KdFiWu+WmgKfJo6/Q9BsHNluLxRPJMA2qEv7/e7o\n" +
+ "JeJ6HBYHHdwB+1DU9VnNXdk8d1SlAGyBjVzE3s28bHRe4pLmwCh3CIbwiNn0NCXavMTMnA7H\n" +
+ "RbrW6eHdJE0AwWs1EX+SPi4PzhFkT5k8iQxQqHbRiKAoqnD5rbhqxifapAf2SA0LNrlbvGY8\n" +
+ "22kE11mwbu3QXvbhD7Ji/1U6E+z/DpYFz9xeXGdcZAFEbo3XfuHh7LQ3FKENTKFQQhVnuX9+\n" +
+ "a301TXP6se2nBIIgALj+F1K0JvkeZE0ZxpXrM+5U3lhSBmPWT8xNBJ7c+EiJtGEhOyQVUZMR\n" +
+ "mOgMJ8sWfEPHQFpgFiRPtw3/Od4vK5IFpQUPqQWCU5wZrp9qrxlwcQAPu+VG2QFbudaIKXJk\n" +
+ "udzf8ltnEc2bjGFh2opSvUsQgh0kOSTnLLVAov9fIf3qKUVeKFcG2xpFIl1BlelOTmKAU7rH\n" +
+ "diRY9ujoLTvkIg/9o+rk83GmPHR3xz3i6RSrOGeiuLZ55PffPNc7aju38GYw0PV02E7Vex6X\n" +
+ "dtmimBHav6a2WvhFZhFzG4O0jr26UKDXYDVHKb1at4ymDgiQ34KAZxT0ZxJmeNAq/KZeXXfZ\n" +
+ "0D7hZ/xhS1+3CohTQM/fG8P6lWa/ohDJirS3tjFqbm4VSjqJaOZqMmTM6SgIeTvypH52i/ZM\n" +
+ "caYsH+/BcGn2W0nv1ZHcjmuMHkrQ5UfFH4AqR99LYahcAMFYE87unzVln/ljrM2hUCkzQjBd\n" +
+ "qeR5Kgfsstnc0O0dcGdmPTRHgJDoZdQzRFN2M6CbwpHl9OO4EselflWw6Z5QwCzBkC/3Hbmp\n" +
+ "wBLZBE45JFiiIqkrxT0t5BAxEYGGyv/JSTygvY6TrsvCoH4AFVIQTi3gsy2/TdcEFU8zwrQ2\n" +
+ "5Pui05SlMlfcccuoRTMMH3qqhuzbuQMz4JgLe7UdIvcQkPGIUdUmqliXOSd1VfSjhVIrqxJe\n" +
+ "4PxKcWNUdGbstdujHh+/KvH4AauRpn9pHw/P/verYdaFFtHpSpADHahd23SGdeWVuhvBGCV1\n" +
+ "/AUb6AoGFXU+m5TV8J+DLH//yvYfzu2ajmTWHpo85/CSnxhdhwF5MWQ2mdIq/x8TC8MwRTDv\n" +
+ "iXs6QCKTGlmSieaQnV1DS6y3np1rJvZodA2/zR6CMNvXoU/R+9aYVVA8jBI4eVeMghn6vp4e\n" +
+ "E+QAlJNU9ji1xLKMzPbWJ5tXryiB+AOF/hH1U31xfFEL/XzDTE+v4rCBpi7xgYLl6CYDIziN\n" +
+ "7AJuq9RdHhLimkxqT7dYH+rPE6BUgoS3wUi1KKy3IfRESuJ3UBPitkCaUvWeE3uZrK40vj81\n" +
+ "VDC3GnXWNXxSRAkx67hi5CBTuWlhFhIrVs9VzTiODlmlf1ln/AcCfwV/xg0QQ0NcuVj6s3a4\n" +
+ "qUm//jigFtxx+AFymf+1ABprCVxD05+eKvH010FgqX5+QPUre2ikKmh9/Cmi5/P6swC1AQgE\n" +
+ "ykXGHatZBjewwuegFIa4fMlqOwQn3VG/JzpsXiQxL5cDpWlT4e58RE8GI9bKJeL5c0ceIxDN\n" +
+ "qQnMgf5HCIUeEhPqskz8Q7nr5T5BRcxQ1oVaVkhbvCAYYJyGE2PgxZbwGcO1qgHVFahWMJnK\n" +
+ "EE2vIcig1OZ4zRdld+3zOdk6q4HExzr/YxllZjFasjr99sDXRnmVTbFQ4qdCwAKtEXfx6dx7\n" +
+ "MnhQ/B/UF3hwl8ODl8uqAu7IhWEYr5LlsOD2rd+T9WiBJW5dyLoBLbhvuVyJzw3dnajipT51\n" +
+ "bDNxbsFft2X1bje54joCjpGpcuIEGntZpU65X4OQiv/cdXI4nV9LaDFvyCsqJ2xQohXSIt2Z\n" +
+ "/pDxDT/ohuCFJDVGItjOcequa/CFwpC+/kH70Pg/84dAFPMug/WgAoIe+cgJ1q5NZSPIBu69\n" +
+ "1bRxZvvaG9cMa/Bs3KLzjWCDzH3zRDUWx+vD0M8gEPjxzF2hFVnwslVPIawHR45fRV3NdDAS\n" +
+ "DMHwVtj4xbFG94OHnBGtEnAH3LTa7dM5CcHZEamHWqnVbASuQkZuiU1xrZEHqtNlNZ+rkO6a\n" +
+ "0m6izOJtlf2Mqw02tIsd0gMD03UOtHC1uie+ZcIiO1bFw6kEoSh9BB3jxt2G2QHf7nJA8o5x\n" +
+ "tJO5Q43AwUIh1evygnVDYSCNtlQ8R2wdCQ6QfUVMhfMxqGajA+SXsCHXPI4YrXGQTawussIN\n" +
+ "E0g63Q/oBxmq+XwarM0+cILrEoq6VfMzz6t5i1DQv/jVmGBlhuKw7V7XbxZV7QKjXhsAhDXq\n" +
+ "sFYxwI/4/AEiPMv2s/p2BNa7WbkgqHrQC3QHrVzwXQglO0x3+iqqSoR2qL0H6TF4QazQiXig\n" +
+ "i3dBIBS8JhdkJFEXY1ylbfSF3xl4DsDHoxHl8KZGYVcH6sThi5aumQLzYxDcstjU26agaSwp\n" +
+ "Uy0HcsZfCK2HVJfBgGJakiEqmjayZKryijz41vqgqqPj1A818TbjUE+SlewGHnnzJY6xDStb\n" +
+ "x2i/Mmu5bvymyiFWaQKKPM5/fOwkUbSO4I+P7JwqFYOIgtuEdKbf2SM9nzatn4FRSCzK9O/E\n" +
+ "pQb5hQCwSONawekPvYfVWHj3WKnUjzuWaUGvCj+h7x1NOgvUvf3P/VrFyUSXQS0zcCiixAJc\n" +
+ "s2S4tbfafNuYSUsSG7DWcastrLHWq8mUkKW/4J/ENONFjzmuXt/iXJt8vSrhWzIx2dMwUYcj\n" +
+ "/9BhwSjTVn4NmMKagxHiOXxwyFer6GbLylVP9+fXXyCt/fODm1lRBPpAdL0ycfrs90GZ1C6q\n" +
+ "gGUvbGHhXlzUmTE8pI5Ao7+m1rm2t2NWWH7IgSK1XggHr9TqGToebgHHbT+peP+7rj50EU0N\n" +
+ "lvGzbpVdoDx8Aj0k4OKDcggHR8vaw8bkuSTNn0yrGN2OlhNrZjzvy1QtH0b1kcVvrVnzJkTs\n" +
+ "gERrq37zfYrZ3nOYegLR1dvuvnl4LScLBVmLzis12XUFoQZ72NMsS4cEVhREkaKkbYrb5kWk\n" +
+ "/nh0ATDW9lC3/yvo/tS8MWsE/MHt5Bhnfb0zH8mYeBIaotjE64S1xwXLr6C+BqO73PlfCeul\n" +
+ "7c7BKZlO8yiQxTPQ1RbWaXqiNT1o/ztvVSYtwFZGWfIdwG7pyG+ewF5aQj2iyxQBiszR0JOL\n" +
+ "KODKBVjKiFqyBjRZ6o9R9orB553QhKbuVC4+vBaGh+P3UwQxlvs3rYE8zInMafcEoSTCoRh3\n" +
+ "x2pFg+mieOPeCXQ2wTSSEd3aF0w7dCNMUv5JKKNPnGgn67sg+2e3s0HoHg6xHvNZ+7FfhDJi\n" +
+ "KydDxAPW8I4f0A6hiQayPN4BavHVIfg5JsAwMkTNbdUBvrTxVLtN1089bPPT3MSEEKf1hNjX\n" +
+ "gb8h7Rgd6zGOv4ovWQHyTncB4d0L4ycP0cBgqi7wh3qhc+FeC9+PCa7DtN6rmxC3knSVOXnY\n" +
+ "8rnDnyA3WN1WgwY/eg0geejJgZglwJU4kb6YpkC3jrZfxgnRETwxmW0ezsHV3jxfTSdntvjl\n" +
+ "EMnrkZTvRX1WWWbjNfGCX6H1qwO0IAWK8PJ6rt1ESOaFGOAQW0d2V2kZpVn/RyuzWtj9VDhf\n" +
+ "ZnpJfh6t46AtbX1eVQx+iE5LhEzxE9keI2vVHTm3m3TVincByj+M7iXz31WNqwPHe11wUgY7\n" +
+ "10q/l6ZcfuJJpv1k+GAbEqkOyMcc5O8TEuGdaVlntU2GFUw56oBYaXuaF5EZ8iu+YnBOXorP\n" +
+ "Byxc+xGM18X5E00NisCWi+Tp6NbK//ig/FHIQDne8qxgBsF2RBiDfBm7TH2i/g49K+FwTEtQ\n" +
+ "dx3Liv5WY/KqTfoK0utGmTt8/HOQmchPrRRv4UaREKFoV6Vq2lBnNsI/SjbJ5E1h4bLNIF5t\n" +
+ "PnxOH9SGzvm3t5VRkyVtWLHn/U92j4mGelwNs0+Su2R3qet6Tjn0NpZI6rkOMN6t/e2+Q/5s\n" +
+ "Ll54iPUt0U7JUiS8ltRQW9pOFLhWnJNImAkHF9CT6ka/QMFk3Q0Gt7RiJDXzHcY3AHmdJ9KU\n" +
+ "b6m8nth3jpLjfbtf0nWNV6MqrsRyPNXpx/Eh6Uu7S+FUAIS+uk9ks6vl6yxStTqFBofoZQqK\n" +
+ "qfTB5MJi+G9XA31vuuYg6V5kyjxuJ2LIYgDuO7tX6Six10eJvjMHqFTdXUekU8JYeucN6o4k\n" +
+ "D0MF0VzTHW3BRCQNJn5w9xAx8KfxB98OArnJjx8KvJ1SQFm4JqpB80bIfC1TIBaArBlN1g7k\n" +
+ "FPsb+JM7YMXrH6Y47u+1ThnmXxZwzsiPwRfD8NcNDGZGcwJvKQdGyd5IMS1db4r8PSMDjB83\n" +
+ "4v+9VOesOI68XrxFvYF49xozS7Uda0lGr3Pz7LFkZTeX+32BfYyMojy7+DrOyUFmUnaaxWpT\n" +
+ "wMp3V6Cj8pm8yGa8OW/ZidqBpMs9cOMy0+ObPvQz5x9p2Fb2yZ833xakHB2pLyNUqrsVzlvW\n" +
+ "CZo2AMGHFZ4Oz58YYWEao0QXWMtRkAEVawYcmkfVocqvuVvVWzh1Z19VujPjsD6pwRbnAGnH\n" +
+ "Gkha1w7GIRsIHvBC+zKJVnPO5VF8O9Vj7cgTgHK529o+w6OgjKrjubcPqopQgSwWAzVS42Xb\n" +
+ "FaFTvYzcdnB5te41pwy7sn3wDQq7fGXFvLfmFJQ3bWlXbc6IXwH6P0DAK8GKU/bp6dv7O+XB\n" +
+ "OBofFA6NRLCbUcBU61GsuNpVIltfLjI90CaGMGwRxGLgpfbTUxNzMBR4qn7E6wb66DR6iQ4Q\n" +
+ "4FyO5TaDHwkZmEgdr2yDWQJx7otQdEc1Gtho3rscsgP3n8wEfGzWCnWLvI4amlpF+lKL8x/I\n" +
+ "lgGUIQgbA/uHzuelF7zxhpXBVYtgiRGLCXkE25foYsTMHXvv51wyrJ+6agLd7ARNL6DVGP3l\n" +
+ "I15G8+ZTwq7ypHdab1IhTLyASjnBZZmPUjGVjC/lCDgc1smm3fFv9ORGpwpdrte9eL3X2Vkf\n" +
+ "D6K4yHuyoVdZN1Br5i1yV1jo884IT+mXgL2CvwONs/flu6cSI91qXgTtXB7m7PzQXARwS+XG\n" +
+ "UTcMaLlxq3Wy04/cg0hM4CiMSQbTcV1vnP1OetmvKXr/qaBhe5guawCfKlJu1vCPUng7Ff2h\n" +
+ "bKi97D/D1x1/ScA8+W5RdxuRLWAE8JFDMA7jHxOYrX21MTTra55pGa/V6i3fJ5NLNAR1aa+z\n" +
+ "RHDNss+/vTdiDYV+ZHkOST+rZE6SAC8KfMZfxrpIyBhaPMB2mZ+iOGi/H4vJS+q/X0COPyU5\n" +
+ "2oEMLyLUnlq+yu9kNskTJNRcn9UCCXEMzUut9/I9dN8JOXxPjF5uHZww0M7qC8DJaLa3tP2P\n" +
+ "QwRADNK1UlDFKmtkg0ZdjeYGpY5Um6sOT4zz7v6TCFLmTCXNiPlrPccvaySLANU3jbQnlJ1E\n" +
+ "ed63D1G+B7TO9IA8cJQZ3Px/H05Wv2ucAc3/rcpumpXRN1RfPKn+XNoglKcd/tM/oJwdCoNI\n" +
+ "iozf5SJmpBYBbOix/AJdb9BkD8shT2IQCevY/wjYJJfmLYA/kVyVDrzJwTYX+9EvgaiF5oNI\n" +
+ "1fBZ55iHr2tG6AdoumK8NpsxxFrOhB+uhl/BfO/YseuGi04rrlfZTn9+cA8vRB0VvFEu5It4\n" +
+ "YVVg4nVsGyoJTCalj/YJb0ZHQzb2Z9qA6l9wRx072k9kUT+iODt/TFmn+D1vVi/YV2ivX79K\n" +
+ "yTyyDprqv+iixouNXwbkqGNWSK6m8DSXfVeyK1vnDTMMUaHwDL3KhDGKIYqU2f1BIiLHNSQw\n" +
+ "XlTIxKYgMShdxCZKxXqPLebDBdkMUGKjIUV+oCryrqlaCMG8nRACZos9rmtTokXdkJA+PXve\n" +
+ "UlqiwJZyMSBjk9qdR+t/Sh0IxUXF0eAtrngJffVEgfQCXdtfUS8YqZeONpDQIYtfCfzjRuoD\n" +
+ "oN1t+FpGXp7M3t0E+CT9ImT1KQsnLAxsqeoJu2NGZVSFXRuba8l2c2tlWfq8o3dNiznWoMxC\n" +
+ "i0B+JLLBIhmzhz2pQOFWHg1FgrKhcqqlm4nnA9scFwP04Ly2uZmpvIBXyf126NMkXky24+mG\n" +
+ "D+BglZabg8au7Ndx0ROpQj6BDc8B7/MZWxDXrNtMYiYgqe1pzAZpK8CketC15t/x7l82BYUX\n" +
+ "hwpAn+Nd760mJjqhC+gzQahH09GqmjDLOe+v13KYUGmCnSEg4+FLXfiN1z9mY9St3DELfjC2\n" +
+ "m+cW3XupwZ8OQ8zErkjzW4zjsvQ4Xhz/6pmpEc3t7OJ1BMc6NhSHIYp98S9615OrfxEPPP6E\n" +
+ "QhR8d8nw0Yzi59bFFsEYRvI0ODqRfQeaM5jgqBooCNrV+KI3qvOmh2CgWg1ma+Verp8VvZNq\n" +
+ "GBnmjw3qQJC2PGGc5ioIVZNbbeZRPXzhrlbk88WaYIgUJ72gsk0Kba3diSqJJ1BuUVBJhakX\n" +
+ "Tx4qxv/seRggUgO3ell5E0e3a5xIEr/DycYI44i6LcYEn1eTCGtfuKHhcKv66nF+8iabaowN\n" +
+ "JIc8fhXO+vXK/tEBHC457Mskn5vSiAeZpWqQHQ8h2xpPTbmPnpYvgSxmmQZBpwv4R8s8PL3i\n" +
+ "XE8gtTyC4/fp8HN6WqG8Zq7wXnrdxyzA8Dw555oRnuJ+WvUXgk19rFm9VdAcXG6vwBhLYMcJ\n" +
+ "ZygnaYviqDmUldjCSZRhEWQNEeg0Xu4a4+lln7W9YeZkvQ3zj692pM5/bxfhRc2KvpfM/zNo\n" +
+ "qCP9ebJbn1vc9nFDDSfK6XAf5XH/7JoEZsXiLC7NN7R9x6RK3Rotupg1qMGtQn/FhJU7vscT\n" +
+ "JMBDfL4acoCpiII/hX54kN1nfQlPxEiVhco7FH3ZcuZ9FFpjy+uIyrsdH4QlyLXWsPq0Dajn\n" +
+ "CAi8om64U7GLayL+Lli72nHt8KWPxrCpDgVkYd1sNp4/QgBNfvsD8dOjAbCe4JzWz5Pr6k2Z\n" +
+ "OGnbXbQbptA/2r8ey/8AMHgUCU7VBZagsrYquYYskylXgtIl4QAoSieXjbsoTKRSjEs4KzUn\n" +
+ "v1C/dA0arWK4e2makIWFVrJH9OmLq7fF4nXsvKwjaz65k2rcHUCg7mQGHC07/9NyQnqE0UUx\n" +
+ "knlHKYvnRu7b5SjLBh6JqN0sbaDdh8vvmZS+zR1TlQ+Uq/ajfpWr1QPfqrgXooTI0KzVJHpw\n" +
+ "ske2e8072lsEW3sIP6WTdv4Q6vJJev7vAKmBOUMLWxtXK56/lH9H+mYlxNpi13NLpN0cNhk6\n" +
+ "1C8buigM8CNd1ePQyxbzAEbVqjP0bMDMI3PxuQBCF6MbDr2/wG6bed/qyYbRYOo4feW0Nsao\n" +
+ "itmFy7s9ZBPXynpAvDqKxSrhW3BNBQIA82KGohQKpRXhi4dr87LJTtu39bin6WLBXredeCH0\n" +
+ "5Jr5jJEdABo7Inkf+wR16svhzJzzpLAuEl8MOUDdZ6PDJS5B2Vnw1zuFbMedhbHz3EWAOkGL\n" +
+ "d35zyMy8TEudBq5lxplIZ0SjPEaJz6wuc2E1Mil2VFIYP5TAPWjFpgr4DB5LIi88aYz74/xd\n" +
+ "lX2VLkWJKuWcXnTaeff46PoXSTpQ+5AGir0fHD2FEhzT5AUx3FF0BKdvXqxeT1QBiYKDMR3p\n" +
+ "MPC0X+3efqz6wAeriiLpIOPauTBtHaaSkSjqOJtoVGkW48Anv7pyMsfwH1U8ayUDmE/6Rz7p\n" +
+ "jKiowjP2aXnGiqIjV18O6zLpW7QHFpvyylda6DrArFbBMIItElZvBmDLafqt/iOT4XKOA92U\n" +
+ "sG3KonD+ZSteuS38MPt4jxYNnnxyBdh5UIpvZ5UhVeHPTt7sjAROdyJnSvuhBUEv/OgDunQl\n" +
+ "+2gsdjn/sTvSCvg2uiXkBIxEm2rXNByEXAzt8eqlNqiNCNN3Z+3Itb8VNQFIWV22BGZWl/+3\n" +
+ "wN2uj/QfcDel5oi22wbhjNkxVfR0BmTefHIuK6yfxE0Gc/om86JLnjT1VaaXYjX8RCd/XRfo\n" +
+ "mkExlaP/JWqK4gpNWStrGHnhN1eqRQiCibAWk2ykzwe0q/QFWcYz9TNGEqbc7tZTg17vSVkX\n" +
+ "+O3FWnofEa4qV8rHBrAGL1mUjYZd8A9LUSQN4K7F+McPSE1vgzRM2146WExBEyx0n7YEtAPJ\n" +
+ "qSrpjQnz5H5TVUYgA7rs0CjQ7nHnGSzyxK+t3GUj4EMzljQO5zwsfBwQTPORvn9Skw46sUx+\n" +
+ "fLbGKt5Fo0RQiUoW6jHMmQ4d/76sBm2PiHfGLAHz5ldeeFvM9MFl1+aMjmZDjhxQkW2uNvd6\n" +
+ "iAPJJvmVf7szHloFgt8Rj2MlBMCnSUnv5cH2RTxRVrBKOuJ9sXHJWjyIABm1n4zYoI8veTzi\n" +
+ "vSjZz0zWaJpYhDC8XB3qaR3Oj29zZiUmCuVND84EVogig7sfjiRDVAmAfvEW21wTAdMwa7MH\n" +
+ "GbmgQs2dzFxfnLpsF8602HEb+41X4W4emzymxQ69YjCjpho27bNo7GM+Im/ye7afFb9dbkKB\n" +
+ "V7f1tn4fv1FkS4fyBqVx+v35rYqjOQFoA9jnFjkx/qwqG9z3MW8D5/zvlaQ7iw8sy9Vki0J3\n" +
+ "E8ge+GvtMaklCAmLsU1OSi5VM46R7h8KlJ9FEnd/ti3QA7DHxrko0gsZXna+fBVGs/wx9dLp\n" +
+ "ZKIrJy35Hi1Jz6ScpFeX3yGT3qo5WKfmLzTxDpVbZ7O06+uidndAsEO6LIEC4s+iTrylvC+4\n" +
+ "RhFt4ECZ0uqP+aOmM/l69K1RLGEAtwvZeo2/3XDyTkEmpa3g9PZtuSrN5QIQk9YKK/JdrskG\n" +
+ "oj7VqUmy3UbWam1xXaPzOF0nU8loT7ibsscCdAp9ePrn8wAJONMOPfIrcOxe+itqALWDl9OS\n" +
+ "tmR+nbLdV/pxDarCeEJphYgNxgLdKwOpN3BlB1EemKkOSwedqBGupAsszVw7uuc15hfOY5z9\n" +
+ "TV6OdnbG8Ne4JML7Iy48hcFG5i1yNk7up0xzvIrqPPOmZiBR5+0d7b5oByfHZBUy6+19ok6K\n" +
+ "0q4bddPWpNIyEErsddXcEoL1Iic2zkAPYB/IbqSyKv5aub1M/kqwvluw4FzZ0dDpJHewrO9/\n" +
+ "8uWwRhlmgHSCTqkJpUx8U2GrmX+Rn992cBFkuKoV+KceuBxwLsg+uG5c1Ml+kam5V3PrjHez\n" +
+ "TY/DoV/VnM8froXvBEaTw3NtdaYMz81+O5wzuYN1D2YnkIbZqESXEstnNna0vezcsOiEmg8Y\n" +
+ "Z/47oz3vU7+g1YXqOtWn6lnxzDTWe5W3FaCtAE4NmMgfXjn0wnIHFEEADRmwGO9+ftxvx8Uy\n" +
+ "wAMQMy753rCu5IebsKpy+Fe5UQAUSy9Xa3OptkgG14EayOCvq6rAyGa5AQdeMKX6PMT+g7co\n" +
+ "hYSVwIXUXzv32q2nV+FpfXPC2DkfgeIlWCWFaaBsSkG6G66JA/IYojkfDJDXYyQV4bjSp6A5\n" +
+ "hI5EzIcajmF45shoBa4wBJ5NrwJx1Mfu7uqfjZCUhP52gD9vIcC3975ReTQIgVfngDwNkcok\n" +
+ "xP7WWPUt/Q+2ZlYEANNgm/XMSgEN63FPvAs62ljNcLp0YCuXpsztDLXsrDKoXkM6LjGhSkXd\n" +
+ "shDR1TQ2GvWb76YWicgbNq5j5FdmWK1GbxxpdzRtkVBaqyHOFB7gAlGYFtF8CXlrhXKxySRZ\n" +
+ "jHWCPbDD0MHSZudWn3tvOoVTxDKG5PA3AqYKl8PoY0vJgNlGR6UrJQ0kDqWyeCODuXkfOVdF\n" +
+ "9ID79DTrNVDMtoQq270z6JSjRXA4VUAaZVzFdqFhsproIY7McL8J4luVfwc2lBhcNwyt0g0r\n" +
+ "M+3zfWELP0e6OxuSt5bsvuB9VtXngtmu4mEXse+oeyiHmuF9kTlsUASB2kne4AsnhqyIklVI\n" +
+ "eaofT3+JgoX4Kpy6vesU7jmUmdDQ5C5d3ccQTLJlNHiFmwQkitO7cpYN+lsLYO57kiWWMjWd\n" +
+ "tL5pg/tOycjsMcqZ+DOPQhcWa7c7WaIJilsqqQA9jKEeurQAr2sxN83BZ0ej1HDEbwA5cpu2\n" +
+ "M1gvCUIgTifKL+EdKzKTleSnMhWSgPgQHGBrkbBXoR5S7XrqCkpCXwpKhwXRCBplzSy8AHuo\n" +
+ "36LK63ofUKWSnrtqQHlLeLGs9k9lSxq5sWELRLhi0vuVe70YSY756VRvsA3V+Rh3h9zOU3mJ\n" +
+ "ob+0WCmzMzrCEkb50qF1mO6nE07gcy2nZ2fZXPlNJLBfPF48kGzNmUUuRG3dRwvGvNt6foMK\n" +
+ "+140jw8Q+/YnwOXahNM23BpkUhvrRaYhLjIOC9ak+uMdIu5ZyjT7CgerH2SDoSOD8CuGYOuI\n" +
+ "Z6vjGBKxEX92UDClwgiIK/2YfgAIpGEAQOWCSRWitU6Jhex/SFi7aVYJ92Biw4wBtcHaHuJo\n" +
+ "4TSSRe67z+LA5X823HG6ibh1nR+u1BIFCgoPKRLpt6w6LUArJZqbYSNyCc/rCynkf5Wz8ZRk\n" +
+ "kC3rDVWmeWAtvLU9k6i1KOUk4vFtaePeGxTNolybo98cOYlj+JFs5mWR+ro6/n4Ryr/IgK0n\n" +
+ "lMvvbIiQM4ckslusg7JOimp+Qvo3hKdbbLLu9ezLZbX6xgT1H5e6Zif3lg8zpbESDCoZ+ZkA\n" +
+ "2X0Q84ofDRRH7beKV+IkG1uIvai+DVlFB1aWvCbV1N2YFX2kdVVYvKuiSxlt66kehSlKsyQg\n" +
+ "U3VnWezaEUhriQrN6u3uqjgAHj0GPhfbtXLNkF7cqb77SreTM1Mkxl/Tx0NHAmPnOvs6DJYO\n" +
+ "goG5W1ywekIpkmDBzXMeFTnjCaXDyBQgpsUklUASySUeJxV39Y2iehjJRiShgyFO1MGF48u2\n" +
+ "ZYZAUN8c1J87DLgy6+pZg5m9eZ/Y5Q1uIP0vnKYA13PmCEvlOdcqb8bgimSixNpWIm58GAc8\n" +
+ "EOtQFCkrwhGq66lDiVBEEhJi7nllTV1WBiZpU//mCqPwV12MYjX45UJlAogpQH6D9rJWEfaC\n" +
+ "xjYyxSpF63jOxkkpcrD89UehYm3bq4eDOGUBW4bFj86iEX0b5Ic3dxMVtK6F/fWGWb823+fF\n" +
+ "mcVKVXV4d8kFAOboGPlC6nJTX9hP6n3CcdHBpU7D4+yamKSPMN0oorOveTkNwofDWwT/xXKC\n" +
+ "Qszrxv56awpebYOByT7CrVnyT1WdsOafrt2r9g7DPUqJwBMPjuuipBNAb5syK90bNWxRwsRz\n" +
+ "+gKSzzg0clu3UfSWof0Kdffclc5FPKPICAcfoVFonUwS2FzmiKpfOI88xVJMv6MjxtxERgiM\n" +
+ "DuBRK/ebHX775Fq/acD6EWAbqN6fysPaBLAoQ0D7RRweEFY8ULWnnVT43OJPO4cP/oYYBIIg\n" +
+ "ANKPCZsvO0TH1Rt1Q7BPtwuOKTt+RBdeXSSF6K3FaLTJH1zCtsVyeRQIjCLZKcssRJy1FGcQ\n" +
+ "OdAnbNIZ68EkC79ESzQ5w/nmXZ85BQBcy5Kez5M0w0f3T2QxBsS7+meWyArZpL1WVJOq+Sca\n" +
+ "6vT+M2Vz09xhBd03Trzyiob/YhmS9UCqlbcGNN6Q61yBT4y0FegjC3Sn5ky7hIP/528rWr4n\n" +
+ "QjjWo2GtcLLoTXRjleIL7VsZPRJ/c5oyWlkwBMX91T3Ta7uhKh85YqChm+6wq++Ov0V9tbxQ\n" +
+ "3JcVjH0lQy0U5dvLWiefkM+AsAJMkKyas+PVuRgIuBFvasILF5dnachcwF7Uunun09hq62nK\n" +
+ "zh3Coy0jSEfcHU92BHoSLisAt/A/ufIMvyqjdLMHnLX6vsWEUj+0XhlqSgAnFED2ngk4sM5q\n" +
+ "/TEH7Z7E/COP9nwJc8HHpIAz50YUgoar77TKZXFYhbc3Zw4Onvl2dYqWOkoTV6qjQ8qOR2Km\n" +
+ "34kCm4PqhHwgJvkMLp7LLX7W+YIg9cqd/rygIxEf6NoIWkp+9DJFfuCMF2qeT8jRnaSHs0To\n" +
+ "OdIrFlUi+V7SGos6AP6R44gkeuXNyon2LD3DnABmmqyjKM9JtqWgxtn/vLNBgOwBcqIp0l3q\n" +
+ "3Znk4QKEIIMCNszdCSZUJwBW5CZDQ4F3ai6X2lxN9g4kmX3n5yKtLkjrWZMfCIwjU51cQYBo\n" +
+ "15Ue0N1+E8WDXHYZ9ahvXWo+dBMmX3bf3SWl+U9xwiljAzZ9DOi/ABrdZsMkVu2N/nm2h7iE\n" +
+ "tf0YqOhwwdenGCNhRJg6MMzoXQLvB68hHO1gZj+Wo6SYLr5DKqdHT/2aXLPiLQtFtWwcwASB\n" +
+ "HGOVqMzMTLa/uqwy9Qr7kF6hddargZwGBozYNCAT1TS/eqGNBm18y7a691qoDg2vzSuHiEAN\n" +
+ "piz/kWW1skxWHFlmdtVT8zHkxg88/h0DAeMiupXvfhr3DygVOih7onC+0L1wgvAOHc/Mcuoq\n" +
+ "qr7rTdm2tdl9sYz9gGKcprVQJ94HW9Io+PJZi/cG1x483V+1K2mWcSDPUImRXNGx/VoSfaJR\n" +
+ "DlMVvNuhBYuVBvjrkx6XDndJPZSjzEDQliwV4rLb8VJJ70faOkI5WKcqesjYzebxqT5J2f48\n" +
+ "KLuZ9rjiW67e3fg8UW5RQyIjJwGwIglPKy+CEuBQaJCZxpzMZ3yzbttiDCkV0NlzuT9exdTr\n" +
+ "ExDsZg6kiZPivYQdQDOJBhhY3LpwZHr7FCjN/QhjC01W5fmbQNTanieL3IU0GjopnjvI748q\n" +
+ "cV2GFVpHWZECq2xGnPBTMfTRkmSQ8WlGcVJ/nMku+Ww0iN4UsWk/m/7ij3JK+KcgVtMHPJxG\n" +
+ "7VbxfnAzdHTSD1K3t1wI3NA5A0Imwd3erO1LPVuMw82PsjM0hUdR4Dtvd/GsCFLlCuUwePBU\n" +
+ "k2ZucLdRPWwdJT/Fy4rX+qbxhfRmvtFw12MzDcLS7sKYvGXIMd129Os6xv8h4Wk6Gag0TrnS\n" +
+ "74sdi8PWo4oK+pDTu8Cb3wZRrTEq9af9XQkFXIWCw8YHgCivEL71XUBAQGPhMF+sdifsgo6I\n" +
+ "ziwheYUtH9pXq2Jo6i4YvfedhJlPssmGento9OHGWTywi3ZHWbAc1h9A4kTivCh2zPO9ee5D\n" +
+ "VTvZuhIbhdA86G0lI+sQRCnLyLjZe7oeK0yOJDzMLUztNHFQhQ+kqQkZvt+bRqvcOYeWn9BI\n" +
+ "d3X+HdyutBWhhMSoknuEByDiK9zo30Wf1yOYt2xqb5p4fvSbFgkP3beD7jcznuEw6TNVApcc\n" +
+ "JpzyqITwXnZo+mJ38CCrPMYEr6RxNTA+XH4uav4BqrSoD5k1IrKqcXUOVoyUk1GOb/fcqUdl\n" +
+ "XJzCb3B0tCKM2qdBrtn7qUc/a7RhzNSwlousUe1OvMeZR3POIQecOjHVH7nD/ihlfhnTc5zl\n" +
+ "s03/ydPiCl5MIHNkaD8ZcIL9s+ejs+g1Mj8r15srXIqW4Q9HXYnwTyWmsTRtKKSKId3IHTmx\n" +
+ "tGycpLLhdZRuUao+lmzvwN6j4C2q3sgiISqnT3Qnti/8ZQxtaJ5yfu8tmGqX9kNlJA5JSzew\n" +
+ "CiEFf2LtG7ZPWoHrleY3zhLrMwbPWdKENohfZuCsZGmhqiqmO0FcOy2NosX3pUjiMrVet/RS\n" +
+ "x24k4Cec2xA6cThnuzBJ5TKxdclLIoNj9tNMsH2sUUEfIY0JcSLntHkdd2S6cb9NyWDCYi4W\n" +
+ "30+ibNY+RYug4Z3AjBMSUqdiKPLO+seP02kHiKm3IzVMQ1zg1abC3dMUgBfxOVOOqHcPaJ0K\n" +
+ "6/hQYhH9CxWggGF5R1yB5Rq6mHw5eD/nnUINjIc8D/dkO0j3hDpOLbtpeAW/O+3RUlAewO+K\n" +
+ "Hqy4B4WkvVTD4estRV8sl0R9hJpSMfXtlGXjkcTujVLcG5XVnzooNYr5QOHoiwS33Lk2aswV\n" +
+ "07ZFgzADntGdBV1oWlX4bEvH4Uhw0UQ3WfSu9Ejv5Lea+Ttp3ygktMGPcrAb1GMUlYBK0twr\n" +
+ "smvHaPAvW0YWN1yFsXEYC8Uhked7n/9IiYBQr1ddUVhjFPYgt9Wb7pbqj0ZXWacLsh8rybGX\n" +
+ "JhsIOKC1Q3ELNoSQU4XR6G3Iq5+sq0YF3R5doJVeqYK4ui4U4uvoqIyOIfAD+Fkd2B5ZedA9\n" +
+ "wR67vjxlsfXISLA5KGFnnFKuAO+k7XcxD5uScCPqz/7WLUl3qSZkL6FdRfJ5hDBh4OSmeqR3\n" +
+ "OkLz4x1PRUjcpcXYhSvnNmsjz88+xZE+uaUASTchKhj3GNvV8tRfXDkgeKOjFnrTCc9ti1vl\n" +
+ "hVfUhFBtVgXcZ6yTWhLTjkZxU6oK3Or1jNfGJ3+8OSGYfRIuSFT0xgi/IND8UYCg2wJVKjhb\n" +
+ "Ysah6CnUeQJuhWlAeHX0avGczd1wuVZVaxbHxtOiwi4IS/qYzTU8R0tFjT6sOOKkC8V1gHca\n" +
+ "AY8DS4uThZj8NZrKCQNRjxjLZvd4O9BqVM4zoVIc0/MumfKAzpj51QXtWsfeL6aUwLcjli4E\n" +
+ "cfk0h3FG0PXw6xmZQMZqRNbVDXydziMXg0tpwHBg9b3zTl5d10DGnMT0mkeVl5j+PhUO0Mmt\n" +
+ "sCXDiaZDVxFXwAIkSz6/5pdn2Iom+8GUe0qCctrEkL6T5hequlQsZAIw4VExd0FdW5zt1lnB\n" +
+ "cBFmUofzV36LG2BHqLXYj9FU+pUiMiOOlP3kPtvFwDmOIMBDHAsUJOIHUX4LHUjR/tAz5+Vn\n" +
+ "cPQVkqqUrps9sQ+syXLHrAPO7qZdRuRyLwjAxARhJozT1rOl39Qv2SnwK/OqP3UzTbA41U5q\n" +
+ "4zcveXZc+C/4zlufru36fLdMtzwnKnumewnUBdGF930V5aD1qsU4UAp/mDnnFZd3yN86ofWM\n" +
+ "fwcX80kZptrl2nxK5Zx3q/u5cPC0uFbbptHHYsPO+AGL5oPo+D6aJXbFh5BFT+od6+f3QFae\n" +
+ "icjnHtPnglfHlDMNCu3pjXDrCX1MpZKaNkfk2mL0rC5kXyOhsbSlZAjb74Xu76VZSIXQ7ad2\n" +
+ "P+c+67bVuG2/eTsiXwGjz/VxTfuQzdORdT6g8IFK9LxYsmeAO6dn9eoKGl4I1V6Dpwa5eLyo\n" +
+ "m6Y7Zi6h3Xe/1y7QQqsdtVRuc6HTSDnS912YqeAMCY1dBuRmRNlnBsVJvpJANAU6l937R52F\n" +
+ "yKZ82C3le/OAPYwJFy7KRpp0OyEwU+DLt7jE47Y5gA+pXJkBNBw2MJoGIKOv8CIXCEg0BPx6\n" +
+ "t7YVvs/H/qKkNLN+2Z1V+u8STlJEq+S2u5jGDBgsJ1JfrXu+difolZkLM32c8TgyplwlPtB0\n" +
+ "uQ6g56Z2Wn0hIcznfpHJLjsAAymRqa6ymEEG+RYGfG//pCyl2IBfx0tNJrdVDLdR0bkSzSEx\n" +
+ "Zmqs3mo05YW08fIaguvZI5TwShuj4VECmS19hvx++dRzg4KPB0nwbUcWXZ4cpXMCGrm37NbR\n" +
+ "wvHfaRByNmVF8e33H9lkMibyT0HMCc29twvMf3EMFSbdKvWlxt3P9hckos4nPKhzhxMKK+x1\n" +
+ "Sk8u9iV2IyNNaTKXhWn/6QdN8+yT8ALuuDyfNmvrnASD6xX1W/qA9i1KFf2ae8S0Y6YeJlgJ\n" +
+ "jq74MN02Z2DD+2EI05a/+fTPDtfnnQ7QZZKcANxXqmhjBBswQpi8fzvG+Zl+CESkJeMnNCaF\n" +
+ "9Pk0DclEeULVWNLZlsPBdfk3zvjCAVPm3L6MoxSKMb40cI5ICmTGpyfouof2YwxW/rvR1P8O\n" +
+ "2Ekkto5tesyxuFLjEkzmMbqDsVe+XBFzTXB+juygxPgkxtEmvw0EAzXp427BmLsPxBdKkA9s\n" +
+ "e5J6xGHkA+3hmOKTWizloRDR+fEQY18QkHp0oj8LYM9mlFhtVu2bbvd9AMJA0F9ypNCDsBOk\n" +
+ "Q9oBsecvfJ4W42uWXsE4jtUCthiJCljRVrNCqT8LS+wohHLKKhr7Ka70umS4PVzYHuvPE97t\n" +
+ "KmfhSJx+oO3yW9feaJQA668/Qc0lyJZ91flDEcKyKoHSWH7gFewPBwuSsU+Tk5wqI15PWf1X\n" +
+ "4lI1mOPaYg31zgckJSvh4YOME3+HTwNAun8gU8h73bMeAO8l6Fu7ijnyin+zBfNCjMgm+tWp\n" +
+ "zvecGCpqbIgbPEMYWqRbo0hvvO6BVQTNWeIMdZY3iZD4PHdHLI76Cuk3jbtvDf44k5m8e6mx\n" +
+ "w+MYOC1R7ep5STedsvmjdW5Fhs0W3oKl9OXk6DkRKFkhhEIZo7LN5KsjIzjkF1Lwj+nzJ3PH\n" +
+ "aPfPq3IAdI25DyKkeZFglrMGDNS8zlNkHzlYohNfBwi15aqwWnT+Us9KogYjpQFqYHt235aI\n" +
+ "JjqPPrQx12lh90DZSuBv00dRsT6nJ6lpAApSj1zfiOUscfc5SgJTV2/WtwmslYy4dQLQMoX3\n" +
+ "JaQwhMlp8ymkKSmTbsLGqJg0PraxzfnpNAsuH/rmr0vmsCVfePbf9ioKlvxJAzqtJ349dSOX\n" +
+ "RgFHcjSuMP/oGV5sL5Q9hKVQ+Lu+iavwR7FP8RVrK+hUzihdeLRueDbCN52UTCkdSrHkynog\n" +
+ "NMKgF/ISuQA3l28NSGdRnpCcKXH7YGbEkpoAd1JoxwtKkoPKsdZaezInM28lFvfY4EIgyeS6\n" +
+ "ec4z6bghHgQIj9CygiWsVeZWm2f8MWf/jHUcp9vZqI0t0hfHtCrsZAWbb68np+3lHhM8CiqA\n" +
+ "KDbW8M3BKeMCWNGMyadYADu+sX3MFjbgHnDncDCfNl1ZhWmtifpIPzjVkmWjwyEgYsSdC7yT\n" +
+ "L09tPrH72poWJRkoWjr0vWqHuqH2dYgUyUw3j63+7Cq6gYcG3ZUvdD8LTVfP6wj53m6GW4/f\n" +
+ "P3la/iS9jSU514dR7/ZsZvxmyxMg7ebjfiyh3oHUJQnJj8xG0gmiys2q7wL+xuKuSZSbOPsy\n" +
+ "z37Rve4xzlSR9ZltIjRx3oL0c+c2VQ2xs1+cIOhiU1udENvjseivKeFOGj+uUrJlCBXIGZep\n" +
+ "gMd6pTHNyLCuwkEnYKc3vxxWe2yeaZxQrBqfI690jq9uGRvmA9JQK9CnSAP9524300JhaGhF\n" +
+ "Mg4J8YmoSv9+gCsbsq90uAiLSrIkeIRpGmg3TAayntJ1lOXnSDZhZAJh3CTk7T8E3zJS+GG6\n" +
+ "mbAvcvo7WRL880W00ZOBtZBEhM7dkIxyqib9zn41SGyAWZAVy6g2G1aRnbz4G+edfwQ1H5jf\n" +
+ "iNGL6KTMioItA8ZpJQQ12aXTqylFi+5wT2N+pdUqBurQWoCnLhY2O1irbIfCUIwnDk5D3a7/\n" +
+ "ySgtiotJLjGkEL+dMcLGqZOo8G1yw0kbjo+iy0mM6MkmM03fTw5KNxjl26UpkjK1Il7vHhtZ\n" +
+ "SN7IqnMQ1gRIYjyIBkS8TRG1z4T/w4Lrh0fqAvy0etZj0Gv38XrlpSI03YrADGD0rI1Z+VZE\n" +
+ "VF0viG+iea7DHg4sP6AtKaHajZcUlinE2/pq9VD03enHxHBqcpo3v34VFlwBVgUfa1Bx5qky\n" +
+ "pOyqJ5XMBjjCcF3UT2GiQ2HigmyFbC8Wx0gAYy7BEmgBVhfqZAUeicrSlY/8hm300j1kkXS1\n" +
+ "QajIqPPWCM9BNqkN1fmsVnL7Npevg8h9zVKoczQH9lZmqnDzW/qckwu4McV8m75LRTNP+ADJ\n" +
+ "KDHREz2Y4VMjGjYoY/xJLwCOZyWd05yqQRiX1ijLjPQtA1BnWVvmtSY7l+V1V+jtjSMwQmNq\n" +
+ "AGirprCNyOBWfUv+lMvxAscCsdMwSWb48bFEQqGW+6onPYHi3QdtJ73U8yGYqCdX2z8Ri5OB\n" +
+ "ceLq8oe11qIIad9IOoklGwE0tvcxl8tZ+uDKV3t/sqCmrFV1/eYdTwaHn0tDVbdjwp78ZvXX\n" +
+ "7r2RgU5ePyP37f05wOgZTfD97KqO2l8oERl6SO9FgsPD7MZqA5MOL9CwLa9kFDe3PRW0OTlm\n" +
+ "5LbCWRNPR+X6qyd7zhWfOsjdyhRa9QYa82q4IOyeUtMHRiy4n9vRSzKVdGlCxbHxkoL8gVzC\n" +
+ "cjhKgnOn1xbMVD9TOPRS4ywu65DiEz3yH5ZUQdeUcxNAIfsyIJF4uLilBctx4QfRg2yk7mYZ\n" +
+ "HN/b5yrCzLIEGT50jbRqQVLdg38ZtnlG/BrvnsQesWqyfw8HQsZgY3Tr50TGxvqBIZpn+ywu\n" +
+ "UADKWcFnnxqggFmlEY/Cnove/yW/6AdZceiq6paNaW2eZY6PKSOgE6LaitOiHw1PCjUCjXVV\n" +
+ "3wts+LSjrFMf4x4QiruEXXu+V5VjX7jatDE+ko9Uz6IU0BkTHi8dublk7fgMq7UopTF/xsnR\n" +
+ "Vwv58q8+YnL+21cF+NQYj2QRPZ+s+xdIcGccrseOILXodpFbVPDGPqkKGIz8qb4STNtM5G4g\n" +
+ "qRTy/lh+oX/8tOll7q2EIYxkUBMUmeFA22S6lmCisiBzwtJT2P6571POxVvG75CvX+6YDUyt\n" +
+ "27K10jYStSrweUrNIO/KjrJ/yb9nSWOaLrni0y/42fa6L1YN9kg3VM4BCMhz9hO9N6gk1jiG\n" +
+ "aSrD6St1f91OLCoYuxjq8aKeo3uRS1mNXdpePIStKTd8ebEE9HMbYCgN9bdnTkmA7KhvQmKU\n" +
+ "5coukIyfFzgST1zngRoNU5HOJTlrb+eMgzdJqciTP64dEPtq5s39kVl18Ks8KsmUiR/eQnqZ\n" +
+ "7wESvxQEPH6JTojnzUrVctzuMWSiAU3o8EK6t7LjyZovKg1Ve1W6FKLerv6PZ+Jbmns1XjqU\n" +
+ "7pJ9pZNoCcjwWPXSx0M97cdEtrcxVMgxB05LEWIyJblPA3flZLpEOVwSAHuJyWXz4PRKJW1k\n" +
+ "56/uSuEAI0DHtfonYz9LQ5zTlmmYAneJTSGU6PZrzzuvGrHegSSZkPRtfVH8C4RmNqUK7iWT\n" +
+ "2MPaHzCAYGXa0AdX98pTh3uPx4LNuL1TrVTrofpypSbatuMVae4588PrAnCcW8yES1wG5Zvz\n" +
+ "tVCzdciw6bZ14dr6EDn9YBDcgqBeFsUpPS8zuQhQeU7/repqjRB8p4KhuZvSb8bMbACYcQhy\n" +
+ "cPvvinsGYHQJ9lrTgEhJetocgrJJxqe1OKnu10uWDg2h/sgaibl0jTmvaQ7Y9FwTZ5NUbNMG\n" +
+ "EeqvVnzsnLsgTAksIRzziIp4ZHFS3INDy+S6VvIDnca/mGTjwlkdgjC75kyihAsmdExopl9R\n" +
+ "W1awVUnXFQXaN7GQYwGApOwZV7VAUxIcfuy+TJjZYg1Fac9mI8RtsipJpfxZ7ZvKKBD7liQl\n" +
+ "kkWUxMdkkYkj0mtIG5Xpiswj/S0gl9wNxzvUNA1tZ/1zXuAmOtL1qGcJF6VBN8+sVS4vxoDb\n" +
+ "xPlWpgVJpZcnuX7qAuehsi70r/51aGCVfcEdVFapntsY8h5X0bbp4F9IcvlUaOqXrMc9IXoq\n" +
+ "nG0jrQ3rLGlYZo2i2gIMBdUCDZkLTlQ/0FGTF2nQi0htDC4cjo++c/y/PVrZ4UPcQdPJaOcE\n" +
+ "z/csRks02TriAc9dspI8dzF7/6qXUgUFPUEcRWv+hS5j2VL3zKlIhRQ14dDS8tHnAUrmoEoK\n" +
+ "YP7GrC/3L7YRaYbom5OrMhz/waZCIz/ZjSwxfd3LvrMMA8fuAKTu1t4qXGdZ7ocYMIQPLxMg\n" +
+ "pUe51B9xwnyiU7Ky0sPBX8s7KDItEO+YXJM32fwB7egddz5qzO09SskraOGloWab8nY0YX0k\n" +
+ "PDtmXUT5J+uzERFrfoZmpnta4qJoE1SKGyS/4L7+30mRrSaD2o+sUyWd+kP6pG23PiGUtqcf\n" +
+ "iQMSoGwehTfSC3cG5XyVbfkXVOukIA+jh5ysABs3KotOPikgVQJCYDS+JnTCkOZKrf0DwRfL\n" +
+ "/roKcv2/ON7W1o25W+QN2yIhNbN88Rjt/5twSB/SDFePFmItzinkibv5y+GZxr9HEaKZ/1jy\n" +
+ "myG1cI8gIUj/nihfEQ/WEjWSyJsO4smvu2Uf7ZN4zTSk/QRukyVrmoOq4dKzStiWssF980Ho\n" +
+ "LCbKzCJFcy13so3MqDtORxtDSp+960XWMOyVHZGUKRgWIKEX2AbJpaEvwCYKdeYAZzIuCwO9\n" +
+ "O1ixtfv1KvzMzCjF4Sk9mucFcVWeEeX7Uw5DDCZ01t8uXxiFkjh+bDal39a7NF+VNDiaAmrZ\n" +
+ "ezysTyjA9h7Su9uizTyeK/eZ+w8hDn1wUe9CgcCGz8PUJxOv14qcUNpZussP0hQVErIWPtH3\n" +
+ "mPRoClx/o+AGTVtLRCR7Fjy6n6Zq8SyGDKf/0xa1QufjHmoqumtnx7tNCsS748+Ys6PFwlVv\n" +
+ "OXcpimPVisL1kfrQHDdBodXnKNZW3rWNtFGYBP1VPVUxqJJ283WRyb/X9bVGx1gPaHzFQj7w\n" +
+ "z7BVhMpPmBt/vAXdIvDGsQGb/16O52bucATPWyOQMLUBVorR07v/5NolSzHYi19ehveceUHb\n" +
+ "omX5uAPXqSR6DvUFbG/n+rOTsYCTlqNIiF4Dm3iAjizMMh15MWvV0+PHyLLl8xcYze9Eb5eO\n" +
+ "5dMRERZCNejwJ6PfFKS46XkfmLCZbWbj30CU6Qb6reC4v6sucId63+TghD3CmbyQ8MZwjB7P\n" +
+ "e4gm1deP0Gw4EqvQvsi7Kq8WQii8OYLO0HSKyF8jzfb8JnIe57A0mge5Ru0KaC8blGuO0e5V\n" +
+ "Mm1SQDXuHJC1dErv4jd3+9Yj4TFgRYumplQR24jYCpC4OyFqcPEBJleAdOkrb775sa48rUqM\n" +
+ "F9dZjP+1MJNhEhgv7g/LLUUuKwwEa6o/Ksbvx6fNikWX+40EyS/wvKzpTZ1vsVxIHMmae2C/\n" +
+ "x3C5YVFN20PUc9VsXjQrFw2T7Q3rtqWgXRgFJrcoY7NDyvJY0UyCBqcWp28N6MKOhXztA6mG\n" +
+ "kS3gZXkpKE+q7yJCByjKLs6D1vTQgj241bswl8KxjlQLw+iC2VYAtZoC4O2BWyD4K9rL7H/I\n" +
+ "i6/ppxf6ofzNv5JHZ/7if19WP/n5XlX9XuMX0ZPU15nRXpapt1hOtT1ER1+bzifyOsywoKYE\n" +
+ "12IgKsb+/LK4k9KlKP+93S+yoFtSWKfunvA0Lyb3Js8h8OY8Kq1Izzw4UjO5npX/uYq18VDr\n" +
+ "LzU6q8d1IX7rYhgrWOmARkzzFbKQ7V9FGyxpbp23Fp4y/GY/F1wnZGRk9CUgdI7ZXIp+pR3R\n" +
+ "eVSECRmLpMEAgKwcoVY5SE7mZYMEh5W3T4GUKgZhIuZPL/8/OoX3HivC2q/A+BMxbJCBL/mx\n" +
+ "bsRTpRvPK8cJgf4QxgE5ylWDpamz52nuNNHxxl+Z7vm0MZ3I7Gj6Fc1pHd+ZhCCjKmIlAseq\n" +
+ "Lofz0yafH0NPd5F8T9onl9Of3ekLWjHSZqNKZVRgLqGPxcM4QMQSQ8vsme/bPABmhrMKZ1YG\n" +
+ "VBpYtJzVTnG55m3r8sFPmPNZ5tdLiLFj5WQvClnVE1Q/eU/1iOjVin9tIfDSf5O+/x6PYs4O\n" +
+ "wHayiyxiAurfPEqlzFcnPznn7R3r+L0mcwGu6YWNOduY3TwoF2NyHnaVzVQrmOQgCE0yD56X\n" +
+ "z2Ur7HwoNYi7Nzfz0CBV2gN3PLGdiYn7J6wyBa5zR5jDhMF0W+oza/+tZFPVjo73PRT4DYc3\n" +
+ "HEHjkHzDJETGdzOHPuQyTIyZGxx2BIyeReQn6oOIREtIub+Ct3KWd7CnW5wMbFaXEYHCagfj\n" +
+ "5/rl0uGBS4KsYrMEXvGCHlJcVDmelx80rBN+Vo6Yrkj8B1LOEgM1lR+9LOBXPrhf9+PRZ4Qp\n" +
+ "9HuoO7Z6x3R4JgERXrEmZUJVj/JVIaF0fjylTj88257mCRWxyO5YpfhBF2s0qLYdMvNKUAzM\n" +
+ "/NjP8oODt3TU0Xt30RTu06amfY6ZnWTX9uzaOffi2BrWrcib97frCqPdOEdPhRcqniIL5U1k\n" +
+ "YnVrSSDUQolLqs32MGdaRCkpGF1lN7YQCBRNXFf0f2KvzC4svuDfsqEnTL7R2Vcu+akAshXQ\n" +
+ "sSjEXmKd49Ky7sZZnEbmfnDaB/+4ZWAYk4gajJib5ewB2pHzp5muKInTyECgYMc2ReVD/tz/\n" +
+ "pd9NG1NzdStchS082PL8DwRJx6HvWjVo9sSd9DGIfVonx6txQ34QCTF9psQ2R0Q1LRtJLyZd\n" +
+ "Dgehsti4GCBZdAQ8dT+2sG4QxKTHwaHCp2mDNWI86eaoZ5q4m0UG+kJrKZM+bbZARbBS5Go6\n" +
+ "8cK2JiiRh465rPEh+CuWJQT12Whk89nbe5nvq+ILez8iYVj3IiBwy2FAVtidPIEgYfVI5TWD\n" +
+ "ElfmXJXR7r2bMCY7RfLR6u8JWLJEoHEGsB2DqUwWixPRaxMbvGMm6t3nwbnhqgeJFNpW+Ntc\n" +
+ "PpZF5XSFj0xTqr6M6alUd26vC7CuXc/MDe29raHZ2k95R8zEnfl/p0HiErPGGA0rA6HmY1L4\n" +
+ "m2yc4wmPwg0cW1m8T9U4bPQaXWQ19wOqrBFety/T+m+3Y/L8aGoHmQlNJpqzbw/DmorcmyjB\n" +
+ "B3EHg5pmn+AwxuQOjTolFP+mmW593LEdkuBpITZHa9mLl5Q2ts3ABIIB4IUmz0F0Z0EOYEjt\n" +
+ "lKryNdgwXjLJF5zLjcs+Rn7FuD9LWs9FRKS8hTYFxGwcnMJLbFgWobGIK8VwXUlhiuj4dlOH\n" +
+ "Llq6eerJUz96gUR5dY0pjci+uVhF9Pr0uJKeGCHJLluqJ8hvE6r0qyXJquWdMgFU480YKlAB\n" +
+ "5XXxVI2geOurRMSSoUXKOk/ZR3i41orN7/gZQPZXvZbNPSVNifbJnqhi0qy9nBsiEtV05tQ4\n" +
+ "kCBnnQmAlNgq//AnuN1H+UNjHxUvtU80yBZMsfbz0BZ6MWF/AlTXEwNnBTXpQI9hYus83AR9\n" +
+ "lht+11eNmwTEVj9VGQVk1S0OTCWe9Gv3mxrPyFGhOJ8vFtBDhpVjSZ5cFCPhGMCZxjrIbzf4\n" +
+ "xjz4fPdSnN3XpBRxuE0FW39coYHX4jNn2FhKtOljHUZjrFL91ZYYo2xdou7VgE7GfVvb7V70\n" +
+ "MiK0OsW8du1c8Iawqmb0H1cWo/GCA8TaFdjfXOWZjEfHpXJvGqW+zcYn2DN0UNYnuP4ITOd4\n" +
+ "A3OQiTaX1XV4M+vKOR1A0OzFty0IxMxcTEwSQM1JQ+zpE11DBMWf4JEo35uAmtvHXPjlyHd2\n" +
+ "YY0ohoV70z8CGMrBN6ws5zIE7n3q7klEWHds5PZMDlzoPZd2rwQIYAM4FwEheYIAAAAAAAAA\n" +
+ "AAAA";
diff --git a/comm/mailnews/db/gloda/test/unit/test_startup_offline.js b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
new file mode 100644
index 0000000000..d7ba435d76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
@@ -0,0 +1,53 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/*
+ * Test gloda starts up with indexing suppressed when offline at startup.
+ */
+
+var messageInjection;
+
+add_setup(async function () {
+ // We must do this before the first load otherwise gloda is started without
+ // picking up the necessary initialisation.
+ Services.io.manageOfflineStatus = false;
+ Services.io.offline = true;
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_gloda_offline_startup() {
+ // Set up a folder for indexing and check the message doesn't get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Now go online...
+ Services.io.offline = false;
+
+ // ...and check we have done the indexing and indexed the message.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+});
diff --git a/comm/mailnews/db/gloda/test/unit/xpcshell.ini b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
new file mode 100644
index 0000000000..4efbf97583
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
@@ -0,0 +1,38 @@
+[DEFAULT]
+head = head_gloda.js
+tail =
+support-files = base_*.js resources/*
+prefs =
+ gloda.loglevel=Debug
+
+[test_corrupt_database.js]
+[test_folder_logic.js]
+[test_fts3_tokenizer.js]
+[test_gloda_content_imap_offline.js]
+[test_gloda_content_local.js]
+[test_index_addressbook.js]
+[test_index_bad_messages.js]
+[test_index_compaction.js]
+[test_index_junk_imap_offline.js]
+[test_index_junk_imap_online.js]
+[test_index_junk_local.js]
+[test_index_messages_imap_offline.js]
+[test_index_messages_imap_online.js]
+[test_index_messages_imap_online_to_offline.js]
+[test_index_messages_local.js]
+[test_index_sweep_folder.js]
+[test_intl.js]
+[test_migration.js]
+[test_mime_attachments_size.js]
+[test_mime_emitter.js]
+[test_msg_search.js]
+[test_noun_mimetype.js]
+[test_nuke_migration.js]
+[test_nuke_migration_from_future.js]
+[test_query_core.js]
+[test_query_messages_imap_offline.js]
+[test_query_messages_imap_online.js]
+[test_query_messages_imap_online_to_offline.js]
+[test_query_messages_local.js]
+[test_smime_mimemsg_representation.js]
+[test_startup_offline.js]