summaryrefslogtreecommitdiffstats
path: root/comm/mailnews/db
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /comm/mailnews/db
parentInitial commit. (diff)
downloadthunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.tar.xz
thunderbird-6bf0a5cb5034a7e684dcc3500e841785237ce2dd.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'comm/mailnews/db')
-rw-r--r--comm/mailnews/db/gloda/.project11
-rw-r--r--comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm576
-rw-r--r--comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm501
-rw-r--r--comm/mailnews/db/gloda/components/components.conf25
-rw-r--r--comm/mailnews/db/gloda/components/moz.build13
-rw-r--r--comm/mailnews/db/gloda/content/autocomplete-richlistitem.js644
-rw-r--r--comm/mailnews/db/gloda/content/glodacomplete.js466
-rw-r--r--comm/mailnews/db/gloda/jar.mn8
-rw-r--r--comm/mailnews/db/gloda/modules/Collection.jsm834
-rw-r--r--comm/mailnews/db/gloda/modules/Everybody.jsm23
-rw-r--r--comm/mailnews/db/gloda/modules/Facet.jsm599
-rw-r--r--comm/mailnews/db/gloda/modules/Gloda.jsm2275
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaConstants.jsm250
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaContent.jsm285
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDataModel.jsm1020
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDatabind.jsm210
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaDatastore.jsm4402
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm188
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm947
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaIndexer.jsm1491
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm310
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm361
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaPublic.jsm45
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm642
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm175
-rw-r--r--comm/mailnews/db/gloda/modules/GlodaUtils.jsm84
-rw-r--r--comm/mailnews/db/gloda/modules/IndexMsg.jsm3464
-rw-r--r--comm/mailnews/db/gloda/modules/MimeMessage.jsm821
-rw-r--r--comm/mailnews/db/gloda/modules/NounFreetag.jsm91
-rw-r--r--comm/mailnews/db/gloda/modules/NounMimetype.jsm582
-rw-r--r--comm/mailnews/db/gloda/modules/NounTag.jsm97
-rw-r--r--comm/mailnews/db/gloda/modules/SuffixTree.jsm381
-rw-r--r--comm/mailnews/db/gloda/modules/moz.build31
-rw-r--r--comm/mailnews/db/gloda/moz.build13
-rw-r--r--comm/mailnews/db/gloda/test/moz.build12
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_gloda_content.js226
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_junk.js217
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_index_messages.js1461
-rw-r--r--comm/mailnews/db/gloda/test/unit/base_query_messages.js729
-rw-r--r--comm/mailnews/db/gloda/test/unit/head_gloda.js19
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm431
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm847
-rw-r--r--comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm293
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_corrupt_database.js86
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_folder_logic.js60
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js299
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js34
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js31
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_addressbook.js139
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js210
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_compaction.js395
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js49
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_junk_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js36
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js42
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_messages_local.js133
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js265
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_intl.js355
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_migration.js151
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js445
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_mime_emitter.js746
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_msg_search.js155
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js144
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration.js62
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js12
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_core.js658
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js37
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js38
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js40
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_query_messages_local.js33
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js894
-rw-r--r--comm/mailnews/db/gloda/test/unit/test_startup_offline.js53
-rw-r--r--comm/mailnews/db/gloda/test/unit/xpcshell.ini38
-rw-r--r--comm/mailnews/db/mork/components.conf12
-rw-r--r--comm/mailnews/db/mork/mdb.h2550
-rw-r--r--comm/mailnews/db/mork/mork.h255
-rw-r--r--comm/mailnews/db/mork/morkArray.cpp250
-rw-r--r--comm/mailnews/db/mork/morkArray.h97
-rw-r--r--comm/mailnews/db/mork/morkAtom.cpp432
-rw-r--r--comm/mailnews/db/mork/morkAtom.h362
-rw-r--r--comm/mailnews/db/mork/morkAtomMap.cpp378
-rw-r--r--comm/mailnews/db/mork/morkAtomMap.h394
-rw-r--r--comm/mailnews/db/mork/morkAtomSpace.cpp233
-rw-r--r--comm/mailnews/db/mork/morkAtomSpace.h227
-rw-r--r--comm/mailnews/db/mork/morkBead.cpp361
-rw-r--r--comm/mailnews/db/mork/morkBead.h244
-rw-r--r--comm/mailnews/db/mork/morkBlob.cpp96
-rw-r--r--comm/mailnews/db/mork/morkBlob.h140
-rw-r--r--comm/mailnews/db/mork/morkBuilder.cpp892
-rw-r--r--comm/mailnews/db/mork/morkBuilder.h303
-rw-r--r--comm/mailnews/db/mork/morkCell.cpp99
-rw-r--r--comm/mailnews/db/mork/morkCell.h91
-rw-r--r--comm/mailnews/db/mork/morkCellObject.cpp453
-rw-r--r--comm/mailnews/db/mork/morkCellObject.h180
-rw-r--r--comm/mailnews/db/mork/morkCh.cpp344
-rw-r--r--comm/mailnews/db/mork/morkCh.h125
-rw-r--r--comm/mailnews/db/mork/morkConfig.cpp173
-rw-r--r--comm/mailnews/db/mork/morkConfig.h170
-rw-r--r--comm/mailnews/db/mork/morkCursor.cpp173
-rw-r--r--comm/mailnews/db/mork/morkCursor.h134
-rw-r--r--comm/mailnews/db/mork/morkDeque.cpp246
-rw-r--r--comm/mailnews/db/mork/morkDeque.h244
-rw-r--r--comm/mailnews/db/mork/morkEnv.cpp519
-rw-r--r--comm/mailnews/db/mork/morkEnv.h221
-rw-r--r--comm/mailnews/db/mork/morkFactory.cpp521
-rw-r--r--comm/mailnews/db/mork/morkFactory.h214
-rw-r--r--comm/mailnews/db/mork/morkFile.cpp738
-rw-r--r--comm/mailnews/db/mork/morkFile.h360
-rw-r--r--comm/mailnews/db/mork/morkHandle.cpp357
-rw-r--r--comm/mailnews/db/mork/morkHandle.h183
-rw-r--r--comm/mailnews/db/mork/morkIntMap.cpp212
-rw-r--r--comm/mailnews/db/mork/morkIntMap.h144
-rw-r--r--comm/mailnews/db/mork/morkMap.cpp852
-rw-r--r--comm/mailnews/db/mork/morkMap.h379
-rw-r--r--comm/mailnews/db/mork/morkNode.cpp550
-rw-r--r--comm/mailnews/db/mork/morkNode.h290
-rw-r--r--comm/mailnews/db/mork/morkNodeMap.cpp139
-rw-r--r--comm/mailnews/db/mork/morkNodeMap.h101
-rw-r--r--comm/mailnews/db/mork/morkObject.cpp176
-rw-r--r--comm/mailnews/db/mork/morkObject.h146
-rw-r--r--comm/mailnews/db/mork/morkParser.cpp1331
-rw-r--r--comm/mailnews/db/mork/morkParser.h547
-rw-r--r--comm/mailnews/db/mork/morkPool.cpp483
-rw-r--r--comm/mailnews/db/mork/morkPool.h162
-rw-r--r--comm/mailnews/db/mork/morkPortTableCursor.cpp381
-rw-r--r--comm/mailnews/db/mork/morkPortTableCursor.h142
-rw-r--r--comm/mailnews/db/mork/morkProbeMap.cpp1107
-rw-r--r--comm/mailnews/db/mork/morkProbeMap.h423
-rw-r--r--comm/mailnews/db/mork/morkQuickSort.cpp182
-rw-r--r--comm/mailnews/db/mork/morkQuickSort.h24
-rw-r--r--comm/mailnews/db/mork/morkRow.cpp769
-rw-r--r--comm/mailnews/db/mork/morkRow.h208
-rw-r--r--comm/mailnews/db/mork/morkRowCellCursor.cpp220
-rw-r--r--comm/mailnews/db/mork/morkRowCellCursor.h118
-rw-r--r--comm/mailnews/db/mork/morkRowMap.cpp250
-rw-r--r--comm/mailnews/db/mork/morkRowMap.h228
-rw-r--r--comm/mailnews/db/mork/morkRowObject.cpp530
-rw-r--r--comm/mailnews/db/mork/morkRowObject.h204
-rw-r--r--comm/mailnews/db/mork/morkRowSpace.cpp540
-rw-r--r--comm/mailnews/db/mork/morkRowSpace.h243
-rw-r--r--comm/mailnews/db/mork/morkSearchRowCursor.cpp153
-rw-r--r--comm/mailnews/db/mork/morkSearchRowCursor.h100
-rw-r--r--comm/mailnews/db/mork/morkSink.cpp247
-rw-r--r--comm/mailnews/db/mork/morkSink.h155
-rw-r--r--comm/mailnews/db/mork/morkSpace.cpp136
-rw-r--r--comm/mailnews/db/mork/morkSpace.h108
-rw-r--r--comm/mailnews/db/mork/morkStore.cpp1981
-rw-r--r--comm/mailnews/db/mork/morkStore.h770
-rw-r--r--comm/mailnews/db/mork/morkStream.cpp790
-rw-r--r--comm/mailnews/db/mork/morkStream.h258
-rw-r--r--comm/mailnews/db/mork/morkTable.cpp1415
-rw-r--r--comm/mailnews/db/mork/morkTable.h742
-rw-r--r--comm/mailnews/db/mork/morkTableRowCursor.cpp410
-rw-r--r--comm/mailnews/db/mork/morkTableRowCursor.h150
-rw-r--r--comm/mailnews/db/mork/morkThumb.cpp455
-rw-r--r--comm/mailnews/db/mork/morkThumb.h176
-rw-r--r--comm/mailnews/db/mork/morkUniqRowCursor.h89
-rw-r--r--comm/mailnews/db/mork/morkWriter.cpp1936
-rw-r--r--comm/mailnews/db/mork/morkWriter.h340
-rw-r--r--comm/mailnews/db/mork/morkYarn.cpp70
-rw-r--r--comm/mailnews/db/mork/morkYarn.h75
-rw-r--r--comm/mailnews/db/mork/morkZone.cpp487
-rw-r--r--comm/mailnews/db/mork/morkZone.h313
-rw-r--r--comm/mailnews/db/mork/moz.build68
-rw-r--r--comm/mailnews/db/mork/nsIMdbFactoryFactory.h33
-rw-r--r--comm/mailnews/db/mork/nsMorkFactory.cpp14
-rw-r--r--comm/mailnews/db/mork/nsMorkFactory.h27
-rw-r--r--comm/mailnews/db/mork/orkinHeap.cpp72
-rw-r--r--comm/mailnews/db/mork/orkinHeap.h50
-rw-r--r--comm/mailnews/db/moz.build9
-rw-r--r--comm/mailnews/db/msgdb/.eslintrc.js5
-rw-r--r--comm/mailnews/db/msgdb/moz.build11
-rw-r--r--comm/mailnews/db/msgdb/public/moz.build25
-rw-r--r--comm/mailnews/db/msgdb/public/nsDBFolderInfo.h151
-rw-r--r--comm/mailnews/db/msgdb/public/nsIDBChangeAnnouncer.idl42
-rw-r--r--comm/mailnews/db/msgdb/public/nsIDBChangeListener.idl117
-rw-r--r--comm/mailnews/db/msgdb/public/nsIDBFolderInfo.idl94
-rw-r--r--comm/mailnews/db/msgdb/public/nsIMsgDatabase.idl506
-rw-r--r--comm/mailnews/db/msgdb/public/nsIMsgOfflineImapOperation.idl50
-rw-r--r--comm/mailnews/db/msgdb/public/nsINewsDatabase.idl18
-rw-r--r--comm/mailnews/db/msgdb/public/nsImapMailDatabase.h46
-rw-r--r--comm/mailnews/db/msgdb/public/nsMailDatabase.h62
-rw-r--r--comm/mailnews/db/msgdb/public/nsMsgDatabase.h447
-rw-r--r--comm/mailnews/db/msgdb/public/nsMsgHdr.h92
-rw-r--r--comm/mailnews/db/msgdb/public/nsMsgThread.h65
-rw-r--r--comm/mailnews/db/msgdb/public/nsNewsDatabase.h57
-rw-r--r--comm/mailnews/db/msgdb/src/components.conf44
-rw-r--r--comm/mailnews/db/msgdb/src/moz.build22
-rw-r--r--comm/mailnews/db/msgdb/src/nsDBFolderInfo.cpp749
-rw-r--r--comm/mailnews/db/msgdb/src/nsImapMailDatabase.cpp217
-rw-r--r--comm/mailnews/db/msgdb/src/nsMailDatabase.cpp380
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgDatabase.cpp4730
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.cpp317
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.h133
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgHdr.cpp936
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.cpp385
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.h52
-rw-r--r--comm/mailnews/db/msgdb/src/nsMsgThread.cpp1050
-rw-r--r--comm/mailnews/db/msgdb/src/nsNewsDatabase.cpp307
-rw-r--r--comm/mailnews/db/msgdb/test/moz.build6
-rw-r--r--comm/mailnews/db/msgdb/test/unit/head_maildb.js21
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_enumerator_cleanup.js56
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_filter_enumerator.js100
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_mailTelemetry.js38
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_maildb.js67
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_propertyEnumerator.js66
-rw-r--r--comm/mailnews/db/msgdb/test/unit/test_references_parsing.js124
-rw-r--r--comm/mailnews/db/msgdb/test/unit/xpcshell.ini10
210 files changed, 78926 insertions, 0 deletions
diff --git a/comm/mailnews/db/gloda/.project b/comm/mailnews/db/gloda/.project
new file mode 100644
index 0000000000..08f9557936
--- /dev/null
+++ b/comm/mailnews/db/gloda/.project
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>gloda</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ </buildSpec>
+ <natures>
+ </natures>
+</projectDescription>
diff --git a/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm b/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm
new file mode 100644
index 0000000000..98f67eadda
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/GlodaAutoComplete.jsm
@@ -0,0 +1,576 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * glautocomp.js decides which autocomplete item type to
+ * use when one enters text in global search box. There are
+ * following types of autocomplete item: gloda-contact-chunk-richlistitem,
+ * gloda-fulltext-all-richlistitem, gloda-fulltext-single-richlistitem, gloda-multi-richlistitem,
+ * gloda-single-identity-richlistitem, gloda-single-tag-richlistitem.
+ */
+
+var EXPORTED_SYMBOLS = ["GlodaAutoComplete"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var Gloda = null;
+var MultiSuffixTree = null;
+var TagNoun = null;
+var FreeTagNoun = null;
+
+function ResultRowFullText(aItem, words, typeForStyle) {
+ this.item = aItem;
+ this.words = words;
+ this.typeForStyle = "gloda-fulltext-" + typeForStyle + "-richlistitem";
+}
+ResultRowFullText.prototype = {
+ multi: false,
+ fullText: true,
+};
+
+function ResultRowSingle(aItem, aCriteriaType, aCriteria, aExplicitNounID) {
+ this.nounID = aExplicitNounID || aItem.NOUN_ID;
+ this.nounDef = Gloda._nounIDToDef[this.nounID];
+ this.criteriaType = aCriteriaType;
+ this.criteria = aCriteria;
+ this.item = aItem;
+ this.typeForStyle = "gloda-single-" + this.nounDef.name + "-richlistitem";
+}
+ResultRowSingle.prototype = {
+ multi: false,
+ fullText: false,
+};
+
+function ResultRowMulti(aNounID, aCriteriaType, aCriteria, aQuery) {
+ this.nounID = aNounID;
+ this.nounDef = Gloda._nounIDToDef[aNounID];
+ this.criteriaType = aCriteriaType;
+ this.criteria = aCriteria;
+ this.collection = aQuery.getCollection(this);
+ this.collection.becomeExplicit();
+ this.renderer = null;
+}
+ResultRowMulti.prototype = {
+ multi: true,
+ typeForStyle: "gloda-multi-richlistitem",
+ fullText: false,
+ onItemsAdded(aItems) {
+ if (this.renderer) {
+ for (let [, item] of aItems.entries()) {
+ this.renderer.renderItem(item);
+ }
+ }
+ },
+ onItemsModified(aItems) {},
+ onItemsRemoved(aItems) {},
+ onQueryCompleted() {},
+};
+
+function nsAutoCompleteGlodaResult(aListener, aCompleter, aString) {
+ this.listener = aListener;
+ this.completer = aCompleter;
+ this.searchString = aString;
+ this._results = [];
+ this._pendingCount = 0;
+ this._problem = false;
+ // Track whether we have reported anything to the complete controller so
+ // that we know not to send notifications to it during calls to addRows
+ // prior to that point.
+ this._initiallyReported = false;
+
+ this.wrappedJSObject = this;
+}
+nsAutoCompleteGlodaResult.prototype = {
+ getObjectAt(aIndex) {
+ return this._results[aIndex] || null;
+ },
+ markPending(aCompleter) {
+ this._pendingCount++;
+ },
+ markCompleted(aCompleter) {
+ if (--this._pendingCount == 0 && this.active) {
+ this.listener.onSearchResult(this.completer, this);
+ }
+ },
+ announceYourself() {
+ this._initiallyReported = true;
+ this.listener.onSearchResult(this.completer, this);
+ },
+ addRows(aRows) {
+ if (!aRows.length) {
+ return;
+ }
+ this._results.push.apply(this._results, aRows);
+ if (this._initiallyReported && this.active) {
+ this.listener.onSearchResult(this.completer, this);
+ }
+ },
+ // ==== nsIAutoCompleteResult
+ searchString: null,
+ get searchResult() {
+ if (this._problem) {
+ return Ci.nsIAutoCompleteResult.RESULT_FAILURE;
+ }
+ if (this._results.length) {
+ return !this._pendingCount
+ ? Ci.nsIAutoCompleteResult.RESULT_SUCCESS
+ : Ci.nsIAutoCompleteResult.RESULT_SUCCESS_ONGOING;
+ }
+ return !this._pendingCount
+ ? Ci.nsIAutoCompleteResult.RESULT_NOMATCH
+ : Ci.nsIAutoCompleteResult.RESULT_NOMATCH_ONGOING;
+ },
+ active: false,
+ defaultIndex: -1,
+ errorDescription: null,
+ get matchCount() {
+ return this._results === null ? 0 : this._results.length;
+ },
+ // this is the lower text, (shows the url in firefox)
+ // we try and show the contact's name here.
+ getValueAt(aIndex) {
+ let thing = this._results[aIndex];
+ return thing.name || thing.value || thing.subject || null;
+ },
+ getLabelAt(aIndex) {
+ return this.getValueAt(aIndex);
+ },
+ // rich uses this to be the "title". it is the upper text
+ // we try and show the identity here.
+ getCommentAt(aIndex) {
+ let thing = this._results[aIndex];
+ if (thing.value) {
+ // identity
+ return thing.contact.name;
+ }
+ return thing.name || thing.subject;
+ },
+ // rich uses this to be the "type"
+ getStyleAt(aIndex) {
+ let row = this._results[aIndex];
+ return row.typeForStyle;
+ },
+ // rich uses this to be the icon
+ getImageAt(aIndex) {
+ let thing = this._results[aIndex];
+ if (!thing.value) {
+ return null;
+ }
+
+ return ""; // we don't want to use gravatars as is.
+ /*
+ let md5hash = GlodaUtils.md5HashString(thing.value);
+ let gravURL = "http://www.gravatar.com/avatar/" + md5hash +
+ "?d=identicon&s=32&r=g";
+ return gravURL;
+ */
+ },
+ getFinalCompleteValueAt(aIndex) {
+ return this.getValueAt(aIndex);
+ },
+ removeValueAt() {},
+ _stop() {},
+};
+
+var MAX_POPULAR_CONTACTS = 200;
+
+/**
+ * Complete contacts/identities based on name/email. Instant phase is based on
+ * a suffix-tree built of popular contacts/identities. Delayed phase relies
+ * on a LIKE search of all known contacts.
+ */
+function ContactIdentityCompleter() {
+ // get all the contacts
+ let contactQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ contactQuery.orderBy("-popularity").limit(MAX_POPULAR_CONTACTS);
+ this.contactCollection = contactQuery.getCollection(this, null);
+ this.contactCollection.becomeExplicit();
+}
+ContactIdentityCompleter.prototype = {
+ _popularitySorter(a, b) {
+ return b.popularity - a.popularity;
+ },
+ complete(aResult, aString) {
+ if (aString.length < 3) {
+ // In CJK, first name or last name is sometime used as 1 character only.
+ // So we allow autocompleted search even if 1 character.
+ //
+ // [U+3041 - U+9FFF ... Full-width Katakana, Hiragana
+ // and CJK Ideograph
+ // [U+AC00 - U+D7FF ... Hangul
+ // [U+F900 - U+FFDC ... CJK compatibility ideograph
+ if (!aString.match(/[\u3041-\u9fff\uac00-\ud7ff\uf900-\uffdc]/)) {
+ return false;
+ }
+ }
+
+ let matches;
+ if (this.suffixTree) {
+ matches = this.suffixTree.findMatches(aString.toLowerCase());
+ } else {
+ matches = [];
+ }
+
+ // let's filter out duplicates due to identity/contact double-hits by
+ // establishing a map based on the contact id for these guys.
+ // let's also favor identities as we do it, because that gets us the
+ // most accurate gravat, potentially
+ let contactToThing = {};
+ for (let iMatch = 0; iMatch < matches.length; iMatch++) {
+ let thing = matches[iMatch];
+ if (
+ thing.NOUN_ID == GlodaConstants.NOUN_CONTACT &&
+ !(thing.id in contactToThing)
+ ) {
+ contactToThing[thing.id] = thing;
+ } else if (thing.NOUN_ID == GlodaConstants.NOUN_IDENTITY) {
+ contactToThing[thing.contactID] = thing;
+ }
+ }
+ // and since we can now map from contacts down to identities, map contacts
+ // to the first identity for them that we find...
+ matches = Object.keys(contactToThing)
+ .map(id => contactToThing[id])
+ .map(val =>
+ val.NOUN_ID == GlodaConstants.NOUN_IDENTITY ? val : val.identities[0]
+ );
+
+ let rows = matches.map(
+ match => new ResultRowSingle(match, "text", aResult.searchString)
+ );
+ aResult.addRows(rows);
+
+ // - match against database contacts / identities
+ let pending = { contactToThing, pendingCount: 2 };
+
+ let contactQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ contactQuery.nameLike(
+ contactQuery.WILDCARD,
+ aString,
+ contactQuery.WILDCARD
+ );
+ pending.contactColl = contactQuery.getCollection(this, aResult);
+ pending.contactColl.becomeExplicit();
+
+ let identityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ identityQuery
+ .kind("email")
+ .valueLike(identityQuery.WILDCARD, aString, identityQuery.WILDCARD);
+ pending.identityColl = identityQuery.getCollection(this, aResult);
+ pending.identityColl.becomeExplicit();
+
+ aResult._contactCompleterPending = pending;
+
+ return true;
+ },
+ onItemsAdded(aItems, aCollection) {},
+ onItemsModified(aItems, aCollection) {},
+ onItemsRemoved(aItems, aCollection) {},
+ onQueryCompleted(aCollection) {
+ // handle the initial setup case...
+ if (aCollection.data == null) {
+ // cheat and explicitly add our own contact...
+ if (
+ Gloda.myContact &&
+ !(Gloda.myContact.id in this.contactCollection._idMap)
+ ) {
+ this.contactCollection._onItemsAdded([Gloda.myContact]);
+ }
+
+ // the set of identities owned by the contacts is automatically loaded as part
+ // of the contact loading...
+ // (but only if we actually have any contacts)
+ this.identityCollection =
+ this.contactCollection.subCollections[GlodaConstants.NOUN_IDENTITY];
+
+ let contactNames = this.contactCollection.items.map(
+ c => c.name.replace(" ", "").toLowerCase() || "x"
+ );
+ // if we had no contacts, we will have no identity collection!
+ let identityMails;
+ if (this.identityCollection) {
+ identityMails = this.identityCollection.items.map(i =>
+ i.value.toLowerCase()
+ );
+ }
+
+ // The suffix tree takes two parallel lists; the first contains strings
+ // while the second contains objects that correspond to those strings.
+ // In the degenerate case where identityCollection does not exist, it will
+ // be undefined. Calling concat with an argument of undefined simply
+ // duplicates the list we called concat on, and is thus harmless. Our
+ // use of && on identityCollection allows its undefined value to be
+ // passed through to concat. identityMails will likewise be undefined.
+ this.suffixTree = new MultiSuffixTree(
+ contactNames.concat(identityMails),
+ this.contactCollection.items.concat(
+ this.identityCollection && this.identityCollection.items
+ )
+ );
+
+ return;
+ }
+
+ // handle the completion case
+ let result = aCollection.data;
+ let pending = result._contactCompleterPending;
+
+ if (--pending.pendingCount == 0) {
+ let possibleDudes = [];
+
+ let contactToThing = pending.contactToThing;
+
+ let items;
+
+ // check identities first because they are better than contacts in terms
+ // of display
+ items = pending.identityColl.items;
+ for (let iIdentity = 0; iIdentity < items.length; iIdentity++) {
+ let identity = items[iIdentity];
+ if (!(identity.contactID in contactToThing)) {
+ contactToThing[identity.contactID] = identity;
+ possibleDudes.push(identity);
+ // augment the identity with its contact's popularity
+ identity.popularity = identity.contact.popularity;
+ }
+ }
+ items = pending.contactColl.items;
+ for (let iContact = 0; iContact < items.length; iContact++) {
+ let contact = items[iContact];
+ if (!(contact.id in contactToThing)) {
+ contactToThing[contact.id] = contact;
+ possibleDudes.push(contact.identities[0]);
+ }
+ }
+
+ // sort in order of descending popularity
+ possibleDudes.sort(this._popularitySorter);
+ let rows = possibleDudes.map(
+ dude => new ResultRowSingle(dude, "text", result.searchString)
+ );
+ result.addRows(rows);
+ result.markCompleted(this);
+
+ // the collections no longer care about the result, make it clear.
+ delete pending.identityColl.data;
+ delete pending.contactColl.data;
+ // the result object no longer needs us or our data
+ delete result._contactCompleterPending;
+ }
+ },
+};
+
+/**
+ * Complete tags that are used on contacts.
+ */
+function ContactTagCompleter() {
+ FreeTagNoun.populateKnownFreeTags();
+ this._buildSuffixTree();
+ FreeTagNoun.addListener(this);
+}
+ContactTagCompleter.prototype = {
+ _buildSuffixTree() {
+ let tagNames = [],
+ tags = [];
+ for (let [tagName, tag] of Object.entries(FreeTagNoun.knownFreeTags)) {
+ tagNames.push(tagName.toLowerCase());
+ tags.push(tag);
+ }
+ this._suffixTree = new MultiSuffixTree(tagNames, tags);
+ this._suffixTreeDirty = false;
+ },
+ onFreeTagAdded(aTag) {
+ this._suffixTreeDirty = true;
+ },
+ complete(aResult, aString) {
+ // now is not the best time to do this; have onFreeTagAdded use a timer.
+ if (this._suffixTreeDirty) {
+ this._buildSuffixTree();
+ }
+
+ if (aString.length < 2) {
+ // No async mechanism that will add new rows.
+ return false;
+ }
+
+ let tags = this._suffixTree.findMatches(aString.toLowerCase());
+ let rows = [];
+ for (let tag of tags) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ query.freeTags(tag);
+ let resRow = new ResultRowMulti(
+ GlodaConstants.NOUN_CONTACT,
+ "tag",
+ tag.name,
+ query
+ );
+ rows.push(resRow);
+ }
+ aResult.addRows(rows);
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+/**
+ * Complete tags that are used on messages
+ */
+function MessageTagCompleter() {
+ this._buildSuffixTree();
+}
+MessageTagCompleter.prototype = {
+ _buildSuffixTree() {
+ let tagNames = [],
+ tags = [];
+ let tagArray = TagNoun.getAllTags();
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ tagNames.push(tag.tag.toLowerCase());
+ tags.push(tag);
+ }
+ this._suffixTree = new MultiSuffixTree(tagNames, tags);
+ this._suffixTreeDirty = false;
+ },
+ complete(aResult, aString) {
+ if (aString.length < 2) {
+ return false;
+ }
+
+ let tags = this._suffixTree.findMatches(aString.toLowerCase());
+ let rows = [];
+ for (let tag of tags) {
+ let resRow = new ResultRowSingle(tag, "tag", tag.tag, TagNoun.id);
+ rows.push(resRow);
+ }
+ aResult.addRows(rows);
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+/**
+ * Complete with helpful hints about full-text search
+ */
+function FullTextCompleter() {}
+FullTextCompleter.prototype = {
+ complete(aResult, aSearchString) {
+ if (aSearchString.length < 4) {
+ return false;
+ }
+ // We use code very similar to that in GlodaMsgSearcher.jsm, except that we
+ // need to detect when we found phrases, as well as strip commas.
+ aSearchString = aSearchString.trim();
+ let terms = [];
+ let phraseFound = false;
+ while (aSearchString) {
+ let term = "";
+ if (aSearchString.startsWith('"')) {
+ let endIndex = aSearchString.indexOf(aSearchString[0], 1);
+ // eat the quote if it has no friend
+ if (endIndex == -1) {
+ aSearchString = aSearchString.substring(1);
+ continue;
+ }
+ phraseFound = true;
+ term = aSearchString.substring(1, endIndex).trim();
+ if (term) {
+ terms.push(term);
+ }
+ aSearchString = aSearchString.substring(endIndex + 1);
+ continue;
+ }
+
+ let spaceIndex = aSearchString.indexOf(" ");
+ if (spaceIndex == -1) {
+ terms.push(aSearchString.replace(/,/g, ""));
+ break;
+ }
+
+ term = aSearchString.substring(0, spaceIndex).replace(/,/g, "");
+ if (term) {
+ terms.push(term);
+ }
+ aSearchString = aSearchString.substring(spaceIndex + 1);
+ }
+
+ if (terms.length == 1 && !phraseFound) {
+ aResult.addRows([new ResultRowFullText(aSearchString, terms, "single")]);
+ } else {
+ aResult.addRows([new ResultRowFullText(aSearchString, terms, "all")]);
+ }
+
+ return false; // no async mechanism that will add new rows
+ },
+};
+
+function GlodaAutoComplete() {
+ this.wrappedJSObject = this;
+ try {
+ // set up our awesome globals!
+ if (Gloda === null) {
+ let loadNS = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+ );
+ Gloda = loadNS.Gloda;
+
+ loadNS = ChromeUtils.import("resource:///modules/gloda/GlodaUtils.jsm");
+ loadNS = ChromeUtils.import("resource:///modules/gloda/SuffixTree.jsm");
+ MultiSuffixTree = loadNS.MultiSuffixTree;
+ loadNS = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+ TagNoun = loadNS.TagNoun;
+ loadNS = ChromeUtils.import("resource:///modules/gloda/NounFreetag.jsm");
+ FreeTagNoun = loadNS.FreeTagNoun;
+ }
+
+ this.completers = [];
+ this.curResult = null;
+
+ this.completers.push(new FullTextCompleter()); // not async.
+ this.completers.push(new ContactIdentityCompleter()); // potentially async.
+ this.completers.push(new ContactTagCompleter()); // not async.
+ this.completers.push(new MessageTagCompleter()); // not async.
+ } catch (e) {
+ console.error(e);
+ }
+}
+
+GlodaAutoComplete.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIAutoCompleteSearch"]),
+
+ startSearch(aString, aParam, aResult, aListener) {
+ try {
+ let result = new nsAutoCompleteGlodaResult(aListener, this, aString);
+ // save this for hacky access to the search. I somewhat suspect we simply
+ // should not be using the formal autocomplete mechanism at all.
+ // Used in glodacomplete.xml.
+ this.curResult = result;
+
+ // Guard against late async results being sent.
+ this.curResult.active = true;
+
+ if (aParam == "global") {
+ for (let completer of this.completers) {
+ // they will return true if they have something pending.
+ if (completer.complete(result, aString)) {
+ result.markPending(completer);
+ }
+ }
+ // } else {
+ // It'd be nice to do autocomplete in the quicksearch modes based
+ // on the specific values for that mode in the current view.
+ // But we don't do that yet.
+ }
+
+ result.announceYourself();
+ } catch (e) {
+ console.error(e);
+ }
+ },
+
+ stopSearch() {
+ this.curResult.active = false;
+ },
+};
diff --git a/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm b/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm
new file mode 100644
index 0000000000..0ee1737f16
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/MimeMessageEmitter.jsm
@@ -0,0 +1,501 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var EXPORTED_SYMBOLS = ["MimeMessageEmitter"];
+
+var kStateUnknown = 0;
+var kStateInHeaders = 1;
+var kStateInBody = 2;
+var kStateInAttachment = 3;
+
+/**
+ * When the saneBodySize flag is active, limit body parts to at most this many
+ * bytes. See |MsgHdrToMimeMessage| for more information on the flag.
+ *
+ * The choice of 20k was made on the very scientific basis of running a query
+ * against my indexed e-mail and finding the point where these things taper
+ * off. I chose 20 because things had tapered off pretty firmly by 16, so
+ * 20 gave it some space and it was also the end of a mini-plateau.
+ */
+var MAX_SANE_BODY_PART_SIZE = 20 * 1024;
+
+/**
+ * Custom nsIMimeEmitter to build a sub-optimal javascript representation of a
+ * MIME message. The intent is that a better mechanism than is evolved to
+ * provide a javascript-accessible representation of the message.
+ *
+ * Processing occurs in two passes. During the first pass, libmime is parsing
+ * the stream it is receiving, and generating header and body events for all
+ * MimeMessage instances it encounters. This provides us with the knowledge
+ * of each nested message in addition to the top level message, their headers
+ * and sort-of their bodies. The sort-of is that we may get more than
+ * would normally be displayed in cases involving multipart/alternatives.
+ * We have augmented libmime to have a notify_nested_options parameter which
+ * is enabled when we are the consumer. This option causes MimeMultipart to
+ * always emit a content-type header (via addHeaderField), defaulting to
+ * text/plain when an explicit value is not present. Additionally,
+ * addHeaderField is called with a custom "x-jsemitter-part-path" header with
+ * the value being the part path (ex: 1.2.2). Having the part path greatly
+ * simplifies our life for building the part hierarchy.
+ * During the second pass, the libmime object model is traversed, generating
+ * attachment notifications for all leaf nodes. From our perspective, this
+ * means file attachments and embedded messages (message/rfc822). We use this
+ * pass to create the attachment objects proper, which we then substitute into
+ * the part tree we have already built.
+ */
+function MimeMessageEmitter() {
+ this._mimeMsg = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+ );
+ this._utils = ChromeUtils.import("resource:///modules/gloda/GlodaUtils.jsm");
+
+ this._url = null;
+ this._partRE = this._utils.GlodaUtils.PART_RE;
+
+ this._outputListener = null;
+
+ this._curPart = null;
+ this._curAttachment = null;
+ this._partMap = {};
+ this._bogusPartTranslation = {};
+
+ this._state = kStateUnknown;
+
+ this._writeBody = false;
+}
+
+var deathToNewlines = /\n/g;
+
+MimeMessageEmitter.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIMimeEmitter"]),
+
+ initialize(aUrl, aChannel, aFormat) {
+ this._url = aUrl;
+ this._curPart = new this._mimeMsg.MimeMessage();
+ // the partName is intentionally ""! not a place-holder!
+ this._curPart.partName = "";
+ this._curAttachment = "";
+ this._partMap[""] = this._curPart;
+
+ // pull options across...
+ let options = this._mimeMsg.MsgHdrToMimeMessage.OPTION_TUNNEL;
+ this._saneBodySize =
+ options && "saneBodySize" in options ? options.saneBodySize : false;
+
+ this._mimeMsg.MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aUrl.spec] =
+ this._curPart;
+ },
+
+ complete() {
+ this._url = null;
+
+ this._outputListener = null;
+
+ this._curPart = null;
+ this._curAttachment = null;
+ this._partMap = null;
+ this._bogusPartTranslation = null;
+ },
+
+ setPipe(aInputStream, aOutputStream) {
+ // we do not care about these
+ },
+ set outputListener(aListener) {
+ this._outputListener = aListener;
+ },
+ get outputListener() {
+ return this._outputListener;
+ },
+
+ _stripParams(aValue) {
+ let indexSemi = aValue.indexOf(";");
+ if (indexSemi >= 0) {
+ aValue = aValue.substring(0, indexSemi);
+ }
+ return aValue;
+ },
+
+ _beginPayload(aContentType) {
+ let contentTypeNoParams = this._stripParams(aContentType).toLowerCase();
+ if (
+ contentTypeNoParams == "text/plain" ||
+ contentTypeNoParams == "text/html" ||
+ contentTypeNoParams == "text/enriched"
+ ) {
+ this._curPart = new this._mimeMsg.MimeBody(contentTypeNoParams);
+ this._writeBody = true;
+ } else if (contentTypeNoParams == "message/rfc822") {
+ // startHeader will take care of this
+ this._curPart = new this._mimeMsg.MimeMessage();
+ // do not fall through into the content-type setting case; this
+ // content-type needs to get clobbered by the actual content-type of
+ // the enclosed message.
+ this._writeBody = false;
+ return;
+ } else if (contentTypeNoParams.startsWith("multipart/")) {
+ // this is going to fall-down with TNEF encapsulation and such, we really
+ // need to just be consuming the object model.
+ this._curPart = new this._mimeMsg.MimeContainer(contentTypeNoParams);
+ this._writeBody = false;
+ } else {
+ this._curPart = new this._mimeMsg.MimeUnknown(contentTypeNoParams);
+ this._writeBody = false;
+ }
+ // put the full content-type in the headers and normalize out any newlines
+ this._curPart.headers["content-type"] = [
+ aContentType.replace(deathToNewlines, ""),
+ ];
+ },
+
+ // ----- Header Routines
+ /**
+ * StartHeader provides the base case for our processing. It is the first
+ * notification we receive when processing begins on the outer rfc822
+ * message. We do not receive an x-jsemitter-part-path notification for the
+ * message, but the aIsRootMailHeader tells us everything we need to know.
+ * (Or it would if we hadn't already set everything up in initialize.)
+ *
+ * When dealing with nested RFC822 messages, we will receive the
+ * addHeaderFields for the content-type and the x-jsemitter-part-path
+ * prior to the startHeader call. This is because the MIME multipart
+ * container that holds the message is the one generating the notification.
+ * For that reason, we do not process them here, but instead in
+ * addHeaderField and _beginPayload.
+ *
+ * We do need to track our state for addHeaderField's benefit though.
+ */
+ startHeader(aIsRootMailHeader, aIsHeaderOnly, aMsgID, aOutputCharset) {
+ this._state = kStateInHeaders;
+ },
+ /**
+ * Receives a header field name and value for the current MIME part, which
+ * can be an rfc822/message or one of its sub-parts.
+ *
+ * The emitter architecture treats rfc822/messages as special because it was
+ * architected around presentation. In that case, the organizing concept
+ * is the single top-level rfc822/message. (It did not 'look into' nested
+ * messages in most cases.)
+ * As a result the interface is biased towards being 'in the headers' or
+ * 'in the body', corresponding to calls to startHeader and startBody,
+ * respectively.
+ * This information is interesting to us because the message itself is an
+ * odd pseudo-mime-part. Because it has only one child, its headers are,
+ * in a way, its payload, but they also serve as the description of its
+ * MIME child part. This introduces a complication in that we see the
+ * content-type for the message's "body" part before we actually see any
+ * of the headers. To deal with this, we punt on the construction of the
+ * body part to the call to startBody() and predicate our logic on the
+ * _state field.
+ */
+ addHeaderField(aField, aValue) {
+ if (this._state == kStateInBody) {
+ aField = aField.toLowerCase();
+ if (aField == "content-type") {
+ this._beginPayload(aValue, true);
+ } else if (aField == "x-jsemitter-part-path") {
+ // This is either naming the current part, or referring to an already
+ // existing part (in the case of multipart/related on its second pass).
+ // As such, check if the name already exists in our part map.
+ let partName = this._stripParams(aValue);
+ // if it does, then make the already-existing part at that path current
+ if (partName in this._partMap) {
+ this._curPart = this._partMap[partName];
+ this._writeBody = "body" in this._curPart;
+ } else {
+ // otherwise, name the part we are holding onto and place it.
+ this._curPart.partName = partName;
+ this._placePart(this._curPart);
+ }
+ } else if (aField == "x-jsemitter-encrypted" && aValue == "1") {
+ this._curPart.isEncrypted = true;
+ }
+ // There is no other field to be emitted in the body case other than the
+ // ones we just handled. (They were explicitly added for the js
+ // emitter.)
+ } else if (this._state == kStateInHeaders) {
+ let lowerField = aField.toLowerCase();
+ if (lowerField in this._curPart.headers) {
+ this._curPart.headers[lowerField].push(aValue);
+ } else {
+ this._curPart.headers[lowerField] = [aValue];
+ }
+ }
+ },
+ addAllHeaders(aAllHeaders, aHeaderSize) {
+ // This is called by the parsing code after the calls to AddHeaderField (or
+ // AddAttachmentField if the part is an attachment), and seems to serve
+ // a specialized, quasi-redundant purpose. (nsMimeBaseEmitter creates a
+ // nsIMimeHeaders instance and hands it to the nsIMsgMailNewsUrl.)
+ // nop
+ },
+ writeHTMLHeaders(aName) {
+ // It doesn't look like this should even be part of the interface; I think
+ // only the nsMimeHtmlDisplayEmitter::EndHeader call calls this signature.
+ // nop
+ },
+ endHeader(aName) {},
+ updateCharacterSet(aCharset) {
+ // we do not need to worry about this. it turns out this notification is
+ // exclusively for the benefit of the UI. libmime, believe it or not,
+ // is actually doing the right thing under the hood and handles all the
+ // encoding issues for us.
+ // so, get ready for the only time you will ever hear this:
+ // three cheers for libmime!
+ },
+
+ /**
+ * Place a part in its proper location; requires the parent to be present.
+ * However, we no longer require in-order addition of children. (This is
+ * currently a hedge against extension code doing wacky things. Our
+ * motivating use-case is multipart/related which actually does generate
+ * everything in order on its first pass, but has a wacky second pass. It
+ * does not actually trigger the out-of-order code because we have
+ * augmented the libmime code to generate its x-jsemitter-part-path info
+ * a second time, in which case we reuse the part we already created.)
+ *
+ * @param aPart Part to place.
+ */
+ _placePart(aPart) {
+ let partName = aPart.partName;
+ this._partMap[partName] = aPart;
+
+ let [storagePartName, , parentPart] = this._findOrCreateParent(partName);
+ let lastDotIndex = storagePartName.lastIndexOf(".");
+ if (parentPart !== undefined) {
+ let indexInParent =
+ parseInt(storagePartName.substring(lastDotIndex + 1)) - 1;
+ // handle out-of-order notification...
+ if (indexInParent < parentPart.parts.length) {
+ parentPart.parts[indexInParent] = aPart;
+ } else {
+ while (indexInParent > parentPart.parts.length) {
+ parentPart.parts.push(null);
+ }
+ parentPart.parts.push(aPart);
+ }
+ }
+ },
+
+ /**
+ * In case the MIME structure is wrong, (i.e. we have no parent to add the
+ * current part to), this function recursively makes sure we create the
+ * missing bits in the hierarchy.
+ * What happens in the case of encrypted emails (mimecryp.cpp):
+ * 1. is the message
+ * 1.1 doesn't exist
+ * 1.1.1 is the multipart/alternative that holds the text/plain and text/html
+ * 1.1.1.1 is text/plain
+ * 1.1.1.2 is text/html
+ * This function fills the missing bits.
+ */
+ _findOrCreateParent(aPartName) {
+ let partName = aPartName + "";
+ let parentName = partName.substring(0, partName.lastIndexOf("."));
+ let parentPart;
+ if (parentName in this._partMap) {
+ parentPart = this._partMap[parentName];
+ let lastDotIndex = partName.lastIndexOf(".");
+ let indexInParent = parseInt(partName.substring(lastDotIndex + 1)) - 1;
+ if (
+ "parts" in parentPart &&
+ indexInParent == parentPart.parts.length - 1
+ ) {
+ return [partName, parentName, parentPart];
+ }
+ return this._findAnotherContainer(aPartName);
+ }
+
+ // Find the grandparent
+ let [, , grandParentPart] = this._findOrCreateParent(parentName);
+ // Create the missing part.
+ parentPart = new this._mimeMsg.MimeContainer("multipart/fake-container");
+ // Add it to the grandparent, remember we added it in the hierarchy.
+ grandParentPart.parts.push(parentPart);
+ this._partMap[parentName] = parentPart;
+ return [partName, parentName, parentPart];
+ },
+
+ /**
+ * In the case of UUEncoded attachments, libmime tells us about the attachment
+ * as a child of a MimeBody. This obviously doesn't make us happy, so in case
+ * libmime wants us to attach an attachment to something that's not a
+ * container, we walk up the mime tree to find a suitable container to hold
+ * the attachment.
+ * The results are cached so that they're consistent across calls — this
+ * ensures the call to _replacePart works fine.
+ */
+ _findAnotherContainer(aPartName) {
+ if (aPartName in this._bogusPartTranslation) {
+ return this._bogusPartTranslation[aPartName];
+ }
+
+ let parentName = aPartName + "";
+ let parentPart;
+ while (!(parentPart && "parts" in parentPart) && parentName.length) {
+ parentName = parentName.substring(0, parentName.lastIndexOf("."));
+ parentPart = this._partMap[parentName];
+ }
+ let childIndex = parentPart.parts.length;
+ let fallbackPartName =
+ (parentName ? parentName + "." : "") + (childIndex + 1);
+ return (this._bogusPartTranslation[aPartName] = [
+ fallbackPartName,
+ parentName,
+ parentPart,
+ ]);
+ },
+
+ /**
+ * In the case of attachments, we need to replace an existing part with a
+ * more representative part...
+ *
+ * @param aPart Part to place.
+ */
+ _replacePart(aPart) {
+ // _partMap always maps the libmime names to parts
+ let partName = aPart.partName;
+ this._partMap[partName] = aPart;
+
+ let [storagePartName, , parentPart] = this._findOrCreateParent(partName);
+
+ let childNamePart = storagePartName.substring(
+ storagePartName.lastIndexOf(".") + 1
+ );
+ let childIndex = parseInt(childNamePart) - 1;
+
+ // The attachment has been encapsulated properly in a MIME part (most of
+ // the cases). This does not hold for UUencoded-parts for instance (see
+ // test_mime_attachments_size.js for instance).
+ if (childIndex < parentPart.parts.length) {
+ let oldPart = parentPart.parts[childIndex];
+ parentPart.parts[childIndex] = aPart;
+ // copy over information from the original part
+ aPart.parts = oldPart.parts;
+ aPart.headers = oldPart.headers;
+ aPart.isEncrypted = oldPart.isEncrypted;
+ } else {
+ parentPart.parts[childIndex] = aPart;
+ }
+ },
+
+ // ----- Attachment Routines
+ // The attachment processing happens after the initial streaming phase (during
+ // which time we receive the messages, both bodies and headers). Our caller
+ // traverses the libmime child object hierarchy, emitting an attachment for
+ // each leaf object or sub-message.
+ startAttachment(aName, aContentType, aUrl, aIsExternalAttachment) {
+ this._state = kStateInAttachment;
+
+ // we need to strip our magic flags from the URL; this regexp matches all
+ // the specific flags that the jsmimeemitter understands (we abuse the URL
+ // parameters to pass information all the way to here)
+ aUrl = aUrl.replace(
+ /((header=filter|emitter=js|examineEncryptedParts=(true|false)))&?/g,
+ ""
+ );
+ // the url should contain a part= piece that tells us the part name, which
+ // we then use to figure out where to place that part if it's a real
+ // attachment.
+ let partMatch, partName;
+ if (aUrl.startsWith("http") || aUrl.startsWith("file")) {
+ // if we have a remote url, unlike non external mail part urls, it may also
+ // contain query strings starting with ?; PART_RE does not handle this.
+ partMatch = aUrl.match(/[?&]part=[^&]+$/);
+ partMatch = partMatch && partMatch[0];
+ partName = partMatch && partMatch.split("part=")[1];
+ } else {
+ partMatch = this._partRE.exec(aUrl);
+ partName = partMatch && partMatch[1];
+ }
+ this._curAttachment = partName;
+
+ if (aContentType == "message/rfc822") {
+ // we want to offer extension authors a way to see attachments as the
+ // message readers sees them, which means attaching an extra url property
+ // to the part that was already created before
+ if (partName) {
+ // we disguise this MimeMessage into something that can be used as a
+ // MimeAttachment so that it is transparent for the user code
+ this._partMap[partName].url = aUrl;
+ this._partMap[partName].isExternal = aIsExternalAttachment;
+ this._partMap[partName].name = aName;
+ }
+ } else if (partName) {
+ let part = new this._mimeMsg.MimeMessageAttachment(
+ partName,
+ aName,
+ aContentType,
+ aUrl,
+ aIsExternalAttachment
+ );
+ // replace the existing part with the attachment...
+ this._replacePart(part);
+ }
+ },
+ addAttachmentField(aField, aValue) {
+ // What gets passed in here is X-Mozilla-PartURL with a value that
+ // is completely identical to aUrl from the call to startAttachment.
+ // (it's the same variable they use in each case). As such, there is
+ // no reason to handle that here.
+ // However, we also pass information about the size of the attachment, and
+ // that we want to handle
+ if (
+ aField == "X-Mozilla-PartSize" &&
+ this._curAttachment in this._partMap
+ ) {
+ this._partMap[this._curAttachment].size = parseInt(aValue);
+ }
+ },
+ endAttachment() {
+ // don't need to do anything here, since we don't care about the headers.
+ },
+ endAllAttachments() {
+ // nop
+ },
+
+ // ----- Body Routines
+ /**
+ * We don't get an x-jsemitter-part-path for the message body, and we ignored
+ * our body part's content-type in addHeaderField, so this serves as our
+ * notice to set up the part (giving it a name).
+ */
+ startBody(aIsBodyOnly, aMsgID, aOutCharset) {
+ this._state = kStateInBody;
+
+ let subPartName =
+ this._curPart.partName == "" ? "1" : this._curPart.partName + ".1";
+ this._beginPayload(this._curPart.get("content-type", "text/plain"));
+ this._curPart.partName = subPartName;
+ this._placePart(this._curPart);
+ },
+
+ /**
+ * Write to the body. When saneBodySize is active, we stop adding if we are
+ * already at the limit for this body part.
+ */
+ writeBody(aBuf, aSize, aOutAmountWritten) {
+ if (
+ this._writeBody &&
+ (!this._saneBodySize || this._curPart.size < MAX_SANE_BODY_PART_SIZE)
+ ) {
+ this._curPart.appendBody(aBuf);
+ }
+ },
+
+ endBody() {},
+
+ // ----- Generic Write (confusing)
+ // (binary data writing...)
+ write(aBuf, aSize, aOutAmountWritten) {
+ // we don't actually ever get called because we don't have the attachment
+ // binary payloads pass through us, but we do the following just in case
+ // we did get called (otherwise the caller gets mad and throws exceptions).
+ aOutAmountWritten.value = aSize;
+ },
+
+ // (string writing)
+ utilityWrite(aBuf) {
+ this.write(aBuf, aBuf.length, {});
+ },
+};
diff --git a/comm/mailnews/db/gloda/components/components.conf b/comm/mailnews/db/gloda/components/components.conf
new file mode 100644
index 0000000000..52d2739bcd
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/components.conf
@@ -0,0 +1,25 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ "cid": "{3bbe4d77-3f70-4252-9500-bc00c26f476d}",
+ "contract_ids": ["@mozilla.org/autocomplete/search;1?name=gloda"],
+ "jsm": "resource:///modules/GlodaAutoComplete.jsm",
+ "constructor": "GlodaAutoComplete",
+ },
+ {
+ "cid": "{8cddbbbc-7ced-46b0-a936-8cddd1928c24}",
+ "contract_ids": [
+ "@mozilla.org/gloda/jsmimeemitter;1",
+ ],
+ "jsm": "resource:///modules/MimeMessageEmitter.jsm",
+ "constructor": "MimeMessageEmitter",
+ "categories": {
+ "mime-emitter": "@mozilla.org/messenger/mimeemitter;1?type=application/x-js-mime-message"
+ },
+ },
+]
diff --git a/comm/mailnews/db/gloda/components/moz.build b/comm/mailnews/db/gloda/components/moz.build
new file mode 100644
index 0000000000..c2f151a815
--- /dev/null
+++ b/comm/mailnews/db/gloda/components/moz.build
@@ -0,0 +1,13 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXTRA_JS_MODULES += [
+ "GlodaAutoComplete.jsm",
+ "MimeMessageEmitter.jsm",
+]
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
diff --git a/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js b/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js
new file mode 100644
index 0000000000..916c6ef5d5
--- /dev/null
+++ b/comm/mailnews/db/gloda/content/autocomplete-richlistitem.js
@@ -0,0 +1,644 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/* global MozXULElement, MozElements */
+
+// Wrap in a block to prevent leaking to window scope.
+{
+ const gGlodaCompleteStrings = Services.strings.createBundle(
+ "chrome://messenger/locale/glodaComplete.properties"
+ );
+
+ /**
+ * The MozGlodacompleteBaseRichlistitem widget is the
+ * abstract base class for all the gloda autocomplete items.
+ *
+ * @abstract
+ * @augments {MozElements.MozRichlistitem}
+ */
+ class MozGlodacompleteBaseRichlistitem extends MozElements.MozRichlistitem {
+ connectedCallback() {
+ if (this.delayConnectedCallback()) {
+ return;
+ }
+ this._boundaryCutoff = null;
+ }
+
+ get boundaryCutoff() {
+ if (!this._boundaryCutoff) {
+ this._boundaryCutoff = Services.prefs.getIntPref(
+ "toolkit.autocomplete.richBoundaryCutoff"
+ );
+ }
+ return this._boundaryCutoff;
+ }
+
+ _getBoundaryIndices(aText, aSearchTokens) {
+ // Short circuit for empty search ([""] == "")
+ if (aSearchTokens == "") {
+ return [0, aText.length];
+ }
+
+ // Find which regions of text match the search terms.
+ let regions = [];
+ for (let search of aSearchTokens) {
+ let matchIndex;
+ let startIndex = 0;
+ let searchLen = search.length;
+
+ // Find all matches of the search terms, but stop early for perf.
+ let lowerText = aText.toLowerCase().substr(0, this.boundaryCutoff);
+ while ((matchIndex = lowerText.indexOf(search, startIndex)) >= 0) {
+ // Start the next search from where this one finished.
+ startIndex = matchIndex + searchLen;
+ regions.push([matchIndex, startIndex]);
+ }
+ }
+
+ // Sort the regions by start position then end position.
+ regions = regions.sort(function (a, b) {
+ let start = a[0] - b[0];
+ return start == 0 ? a[1] - b[1] : start;
+ });
+
+ // Generate the boundary indices from each region.
+ let start = 0;
+ let end = 0;
+ let boundaries = [];
+ for (let i = 0; i < regions.length; i++) {
+ // We have a new boundary if the start of the next is past the end.
+ let region = regions[i];
+ if (region[0] > end) {
+ // First index is the beginning of match.
+ boundaries.push(start);
+ // Second index is the beginning of non-match.
+ boundaries.push(end);
+
+ // Track the new region now that we've stored the previous one.
+ start = region[0];
+ }
+
+ // Push back the end index for the current or new region.
+ end = Math.max(end, region[1]);
+ }
+
+ // Add the last region.
+ boundaries.push(start);
+ boundaries.push(end);
+
+ // Put on the end boundary if necessary.
+ if (end < aText.length) {
+ boundaries.push(aText.length);
+ }
+
+ // Skip the first item because it's always 0.
+ return boundaries.slice(1);
+ }
+
+ _getSearchTokens(aSearch) {
+ let search = aSearch.toLowerCase();
+ return search.split(/\s+/);
+ }
+
+ _needsAlternateEmphasis(aText) {
+ for (let i = aText.length - 1; i >= 0; i--) {
+ let charCode = aText.charCodeAt(i);
+ // Arabic, Syriac, Indic languages are likely to have ligatures
+ // that are broken when using the main emphasis styling.
+ if (0x0600 <= charCode && charCode <= 0x109f) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ _setUpDescription(aDescriptionElement, aText) {
+ // Get rid of all previous text.
+ while (aDescriptionElement.hasChildNodes()) {
+ aDescriptionElement.lastChild.remove();
+ }
+
+ // Get the indices that separate match and non-match text.
+ let search = this.getAttribute("text");
+ let tokens = this._getSearchTokens(search);
+ let indices = this._getBoundaryIndices(aText, tokens);
+
+ // If we're searching for something that needs alternate emphasis,
+ // we'll need to check the text that we match.
+ let checkAlt = this._needsAlternateEmphasis(search);
+
+ let next;
+ let start = 0;
+ let len = indices.length;
+ // Even indexed boundaries are matches, so skip the 0th if it's empty.
+ for (let i = indices[0] == 0 ? 1 : 0; i < len; i++) {
+ next = indices[i];
+ let text = aText.substr(start, next - start);
+ start = next;
+
+ if (i % 2 == 0) {
+ // Emphasize the text for even indices
+ let span = aDescriptionElement.appendChild(
+ document.createElementNS("http://www.w3.org/1999/xhtml", "span")
+ );
+ span.className =
+ checkAlt && this._needsAlternateEmphasis(text)
+ ? "ac-emphasize-alt"
+ : "ac-emphasize-text";
+ span.textContent = text;
+ } else {
+ // Otherwise, it's plain text
+ aDescriptionElement.appendChild(document.createTextNode(text));
+ }
+ }
+ }
+
+ _setUpOverflow(aParentBox, aEllipsis) {
+ // Hide the ellipsis in case there's just enough to not underflow.
+ aEllipsis.hidden = true;
+
+ // Start with the parent's width and subtract off its children.
+ let tooltip = [];
+ let children = aParentBox.children;
+ let widthDiff = aParentBox.getBoundingClientRect().width;
+
+ for (let i = 0; i < children.length; i++) {
+ // Only consider a child if it actually takes up space.
+ let childWidth = children[i].getBoundingClientRect().width;
+ if (childWidth > 0) {
+ // Subtract a little less to account for subpixel rounding.
+ widthDiff -= childWidth - 0.5;
+
+ // Add to the tooltip if it's not hidden and has text.
+ let childText = children[i].textContent;
+ if (childText) {
+ tooltip.push(childText);
+ }
+ }
+ }
+
+ // If the children take up more space than the parent.. overflow!
+ if (widthDiff < 0) {
+ // Re-show the ellipsis now that we know it's needed.
+ aEllipsis.hidden = false;
+
+ // Separate text components with a ndash --
+ aParentBox.tooltipText = tooltip.join(" \u2013 ");
+ }
+ }
+
+ _doUnderflow(aName) {
+ // Hide the ellipsis right when we know we're underflowing instead of
+ // waiting for the timeout to trigger the _setUpOverflow calculations.
+ this[aName + "Box"].tooltipText = "";
+ this[aName + "OverflowEllipsis"].hidden = true;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodacompleteBaseRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ /**
+ * The MozGlodaContactChunkRichlistitem widget displays an autocomplete item with
+ * contact chunk: e.g. image, name and description of the contact.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaContactChunkRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ static get inheritedAttributes() {
+ return {
+ "description.ac-comment": "selected",
+ "label.ac-comment": "selected",
+ "description.ac-url-text": "selected",
+ "label.ac-url-text": "selected",
+ };
+ }
+
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-contact-chunk-richlistitem");
+ this.appendChild(
+ MozXULElement.parseXULToFragment(`
+ <vbox>
+ <hbox>
+ <hbox class="ac-title"
+ flex="1"
+ onunderflow="_doUnderflow('_name');">
+ <description class="ac-normal-text ac-comment"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-comment"
+ hidden="true"></label>
+ </hbox>
+ <hbox>
+ <hbox class="ac-url"
+ flex="1"
+ onunderflow="_doUnderflow('_identity');">
+ <description class="ac-normal-text ac-url-text"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-url-text"
+ hidden="true"></label>
+ </hbox>
+ </vbox>
+ `)
+ );
+
+ let ellipsis = "\u2026";
+ try {
+ ellipsis = Services.prefs.getComplexValue(
+ "intl.ellipsis",
+ Ci.nsIPrefLocalizedString
+ ).data;
+ } catch (ex) {
+ // Do nothing.. we already have a default.
+ }
+
+ this._identityOverflowEllipsis = this.querySelector("label.ac-url-text");
+ this._nameOverflowEllipsis = this.querySelector("label.ac-comment");
+
+ this._identityOverflowEllipsis.value = ellipsis;
+ this._nameOverflowEllipsis.value = ellipsis;
+
+ this._identityBox = this.querySelector(".ac-url");
+ this._identity = this.querySelector("description.ac-url-text");
+
+ this._nameBox = this.querySelector(".ac-title");
+ this._name = this.querySelector("description.ac-comment");
+
+ this._adjustAcItem();
+
+ this.initializeAttributeInheritance();
+ }
+
+ get label() {
+ let identity = this.obj;
+ return identity.accessibleLabel;
+ }
+
+ _adjustAcItem() {
+ let contact = this.obj;
+
+ if (contact == null) {
+ return;
+ }
+
+ let identity = contact.identities[0];
+
+ // Emphasize the matching search terms for the description.
+ this._setUpDescription(this._name, contact.name);
+ this._setUpDescription(this._identity, identity.value);
+
+ // Set up overflow on a timeout because the contents of the box
+ // might not have a width yet even though we just changed them.
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._nameBox,
+ this._nameOverflowEllipsis
+ );
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._identityBox,
+ this._identityOverflowEllipsis
+ );
+ }
+ }
+
+ customElements.define(
+ "gloda-contact-chunk-richlistitem",
+ MozGlodaContactChunkRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaFulltextAllRichlistitem widget displays an autocomplete full text of
+ * all the items: e.g. full text explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaFulltextAllRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-fulltext-all-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation");
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesMentioningMany.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.words.join(", "))
+ );
+ this.appendChild(this._explanation);
+ }
+
+ get label() {
+ return "full text search: " + this.row.item; // what is this for? l10n?
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaFulltextAllRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-fulltext-all-richlistitem",
+ MozGlodaFulltextAllRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaFulltextAllRichlistitem widget displays an autocomplete full text
+ * of single item: e.g. full text explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaFulltextSingleRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-fulltext-single-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation", "gloda-fulltext-single");
+ this._parameters = document.createXULElement("description");
+
+ this.appendChild(this._explanation);
+ this.appendChild(this._parameters);
+
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesMentioning.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.item)
+ );
+ }
+
+ get label() {
+ return "full text search: " + this.row.item;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaFulltextSingleRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-fulltext-single-richlistitem",
+ MozGlodaFulltextSingleRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaMultiRichlistitem widget displays an autocomplete description of multiple
+ * type items: e.g. explanation of the items.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaMultiRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-multi-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._identityHolder = document.createXULElement("hbox");
+ this._identityHolder.setAttribute("flex", "1");
+
+ this.appendChild(this._explanation);
+ this.appendChild(this._identityHolder);
+ this._adjustAcItem();
+ }
+
+ get label() {
+ return this._explanation.value;
+ }
+
+ renderItem(aObj) {
+ let node = document.createXULElement("richlistitem");
+
+ node.obj = aObj;
+ node.setAttribute(
+ "type",
+ "gloda-" + this.row.nounDef.name + "-chunk-richlistitem"
+ );
+
+ this._identityHolder.appendChild(node);
+ }
+
+ _adjustAcItem() {
+ // clear out any lingering children.
+ while (this._identityHolder.hasChildNodes()) {
+ this._identityHolder.lastChild.remove();
+ }
+
+ let row = this.row;
+ if (row == null) {
+ return;
+ }
+
+ this._explanation.value =
+ row.nounDef.name + "s " + row.criteriaType + "ed " + row.criteria;
+
+ // render anyone already in there.
+ for (let item of row.collection.items) {
+ this.renderItem(item);
+ }
+ // listen up, yo.
+ row.renderer = this;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaMultiRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define("gloda-multi-richlistitem", MozGlodaMultiRichlistitem, {
+ extends: "richlistitem",
+ });
+
+ /**
+ * The MozGlodaSingleIdentityRichlistitem widget displays an autocomplete item with
+ * single identity: e.g. image, name and description of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaSingleIdentityRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ static get inheritedAttributes() {
+ return {
+ "description.ac-comment": "selected",
+ "label.ac-comment": "selected",
+ "description.ac-url-text": "selected",
+ "label.ac-url-text": "selected",
+ };
+ }
+
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+
+ this.setAttribute("is", "gloda-single-identity-richlistitem");
+ this.appendChild(
+ MozXULElement.parseXULToFragment(`
+ <hbox class="gloda-single-identity">
+ <vbox>
+ <hbox>
+ <hbox class="ac-title"
+ flex="1"
+ onunderflow="_doUnderflow('_name');">
+ <description class="ac-normal-text ac-comment"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-comment"
+ hidden="true"></label>
+ </hbox>
+ <hbox>
+ <hbox class="ac-url"
+ flex="1"
+ onunderflow="_doUnderflow('_identity');">
+ <description class="ac-normal-text ac-url-text"
+ inherits="selected"></description>
+ </hbox>
+ <label class="ac-ellipsis-after ac-url-text"
+ hidden="true"></label>
+ </hbox>
+ </vbox>
+ </hbox>
+ `)
+ );
+
+ let ellipsis = "\u2026";
+ try {
+ ellipsis = Services.prefs.getComplexValue(
+ "intl.ellipsis",
+ Ci.nsIPrefLocalizedString
+ ).data;
+ } catch (ex) {
+ // Do nothing.. we already have a default.
+ }
+
+ this._identityOverflowEllipsis = this.querySelector("label.ac-url-text");
+ this._nameOverflowEllipsis = this.querySelector("label.ac-comment");
+
+ this._identityOverflowEllipsis.value = ellipsis;
+ this._nameOverflowEllipsis.value = ellipsis;
+
+ this._identityBox = this.querySelector(".ac-url");
+ this._identity = this.querySelector("description.ac-url-text");
+
+ this._nameBox = this.querySelector(".ac-title");
+ this._name = this.querySelector("description.ac-comment");
+
+ this._adjustAcItem();
+
+ this.initializeAttributeInheritance();
+ }
+
+ get label() {
+ let identity = this.row.item;
+ return identity.accessibleLabel;
+ }
+
+ _adjustAcItem() {
+ let identity = this.row.item;
+
+ if (identity == null) {
+ return;
+ }
+
+ // Emphasize the matching search terms for the description.
+ this._setUpDescription(this._name, identity.contact.name);
+ this._setUpDescription(this._identity, identity.value);
+
+ // Set up overflow on a timeout because the contents of the box
+ // might not have a width yet even though we just changed them.
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._nameBox,
+ this._nameOverflowEllipsis
+ );
+ setTimeout(
+ this._setUpOverflow,
+ 0,
+ this._identityBox,
+ this._identityOverflowEllipsis
+ );
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaSingleIdentityRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-single-identity-richlistitem",
+ MozGlodaSingleIdentityRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+
+ /**
+ * The MozGlodaSingleTagRichlistitem widget displays an autocomplete item with
+ * single tag: e.g. explanation of the item.
+ *
+ * @augments MozGlodacompleteBaseRichlistitem
+ */
+ class MozGlodaSingleTagRichlistitem extends MozGlodacompleteBaseRichlistitem {
+ connectedCallback() {
+ super.connectedCallback();
+ if (this.delayConnectedCallback() || this.hasChildNodes()) {
+ return;
+ }
+ this.setAttribute("is", "gloda-single-tag-richlistitem");
+ this._explanation = document.createXULElement("description");
+ this._explanation.classList.add("explanation", "gloda-single");
+ this.appendChild(this._explanation);
+ let label = gGlodaCompleteStrings.GetStringFromName(
+ "glodaComplete.messagesTagged.label"
+ );
+ this._explanation.setAttribute(
+ "value",
+ label.replace("#1", this.row.item.tag)
+ );
+ }
+
+ get label() {
+ return "tag " + this.row.item.tag;
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodaSingleTagRichlistitem, [
+ Ci.nsIDOMXULSelectControlItemElement,
+ ]);
+
+ customElements.define(
+ "gloda-single-tag-richlistitem",
+ MozGlodaSingleTagRichlistitem,
+ {
+ extends: "richlistitem",
+ }
+ );
+}
diff --git a/comm/mailnews/db/gloda/content/glodacomplete.js b/comm/mailnews/db/gloda/content/glodacomplete.js
new file mode 100644
index 0000000000..64578d4143
--- /dev/null
+++ b/comm/mailnews/db/gloda/content/glodacomplete.js
@@ -0,0 +1,466 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals MozElements, MozXULElement */
+
+"use strict";
+
+// Wrap in a block to prevent leaking to window scope.
+{
+ const MozPopupElement = MozElements.MozElementMixin(XULPopupElement);
+
+ /**
+ * The MozGlodacompleteRichResultPopup class creates the panel
+ * to append all the results for the gloda search autocomplete.
+ *
+ * @augments {MozPopupElement}
+ */
+ class MozGlodacompleteRichResultPopup extends MozPopupElement {
+ constructor() {
+ super();
+
+ this.addEventListener("popupshowing", event => {
+ // If normalMaxRows wasn't already set by the input, then set it here
+ // so that we restore the correct number when the popup is hidden.
+
+ // Null-check this.mInput; see bug 1017914
+ if (this._normalMaxRows < 0 && this.mInput) {
+ this._normalMaxRows = this.mInput.maxRows;
+ }
+
+ this.mPopupOpen = true;
+ });
+
+ this.addEventListener("popupshown", event => {
+ if (this._adjustHeightOnPopupShown) {
+ delete this._adjustHeightOnPopupShown;
+ this.adjustHeight();
+ }
+ });
+
+ this.addEventListener("popuphiding", event => {
+ let isListActive = true;
+ if (this.selectedIndex == -1) {
+ isListActive = false;
+ }
+ this.mInput.controller.stopSearch();
+ this.mPopupOpen = false;
+
+ // Reset the maxRows property to the cached "normal" value (if there's
+ // any), and reset normalMaxRows so that we can detect whether it was set
+ // by the input when the popupshowing handler runs.
+
+ // Null-check this.mInput; see bug 1017914
+ if (this.mInput && this._normalMaxRows > 0) {
+ this.mInput.maxRows = this._normalMaxRows;
+ }
+ this._normalMaxRows = -1;
+ // If the list was being navigated and then closed, make sure
+ // we fire accessible focus event back to textbox
+
+ // Null-check this.mInput; see bug 1017914
+ if (isListActive && this.mInput) {
+ this.mInput.mIgnoreFocus = true;
+ this.mInput._focus();
+ this.mInput.mIgnoreFocus = false;
+ }
+ });
+
+ this.attachShadow({ mode: "open" });
+
+ let slot = document.createElement("slot");
+ slot.part = "content";
+ this.shadowRoot.appendChild(slot);
+ }
+
+ connectedCallback() {
+ if (this.delayConnectedCallback()) {
+ return;
+ }
+ this.textContent = "";
+
+ this.mInput = null;
+
+ this.mPopupOpen = false;
+
+ this._currentIndex = 0;
+
+ /**
+ * This is the default number of rows that we give the autocomplete
+ * popup when the textbox doesn't have a "maxrows" attribute
+ * for us to use.
+ */
+ this.defaultMaxRows = 6;
+
+ /**
+ * In some cases (e.g. when the input's dropmarker button is clicked),
+ * the input wants to display a popup with more rows. In that case, it
+ * should increase its maxRows property and store the "normal" maxRows
+ * in this field. When the popup is hidden, we restore the input's
+ * maxRows to the value stored in this field.
+ *
+ * This field is set to -1 between uses so that we can tell when it's
+ * been set by the input and when we need to set it in the popupshowing
+ * handler.
+ */
+ this._normalMaxRows = -1;
+
+ this._previousSelectedIndex = -1;
+
+ this.mLastMoveTime = Date.now();
+
+ this.mousedOverIndex = -1;
+
+ this.richlistbox = document.createXULElement("richlistbox");
+ this.richlistbox.setAttribute("flex", "1");
+ this.richlistbox.classList.add("autocomplete-richlistbox");
+
+ this.appendChild(this.richlistbox);
+
+ if (!this.listEvents) {
+ this.listEvents = {
+ handleEvent: event => {
+ if (!this.parentNode) {
+ return;
+ }
+
+ switch (event.type) {
+ case "mouseup":
+ // Don't call onPopupClick for the scrollbar buttons, thumb,
+ // slider, etc. If we hit the richlistbox and not a
+ // richlistitem, we ignore the event.
+ if (
+ event.target.closest("richlistbox, richlistitem").localName ==
+ "richlistitem"
+ ) {
+ this.onPopupClick(event);
+ }
+ break;
+ case "mousemove":
+ if (Date.now() - this.mLastMoveTime <= 30) {
+ return;
+ }
+
+ let item = event.target.closest("richlistbox, richlistitem");
+
+ // If we hit the richlistbox and not a richlistitem, we ignore
+ // the event.
+ if (item.localName == "richlistbox") {
+ return;
+ }
+
+ let index = this.richlistbox.getIndexOfItem(item);
+
+ this.mousedOverIndex = index;
+
+ if (item.selectedByMouseOver) {
+ this.richlistbox.selectedIndex = index;
+ }
+
+ this.mLastMoveTime = Date.now();
+ break;
+ }
+ },
+ };
+ this.richlistbox.addEventListener("mouseup", this.listEvents);
+ this.richlistbox.addEventListener("mousemove", this.listEvents);
+ }
+ }
+
+ // nsIAutoCompletePopup
+ get input() {
+ return this.mInput;
+ }
+
+ get overrideValue() {
+ return null;
+ }
+
+ get popupOpen() {
+ return this.mPopupOpen;
+ }
+
+ get maxRows() {
+ return (this.mInput && this.mInput.maxRows) || this.defaultMaxRows;
+ }
+
+ set selectedIndex(val) {
+ if (val != this.richlistbox.selectedIndex) {
+ this._previousSelectedIndex = this.richlistbox.selectedIndex;
+ }
+ this.richlistbox.selectedIndex = val;
+ // Since ensureElementIsVisible may cause an expensive Layout flush,
+ // invoke it only if there may be a scrollbar, so if we could fetch
+ // more results than we can show at once.
+ // maxResults is the maximum number of fetched results, maxRows is the
+ // maximum number of rows we show at once, without a scrollbar.
+ if (this.mPopupOpen && this.maxResults > this.maxRows) {
+ // when clearing the selection (val == -1, so selectedItem will be
+ // null), we want to scroll back to the top. see bug #406194
+ this.richlistbox.ensureElementIsVisible(
+ this.richlistbox.selectedItem || this.richlistbox.firstElementChild
+ );
+ }
+ }
+
+ get selectedIndex() {
+ return this.richlistbox.selectedIndex;
+ }
+
+ get maxResults() {
+ // This is how many richlistitems will be kept around.
+ // Note, this getter may be overridden, or instances
+ // can have the nomaxresults attribute set to have no
+ // limit.
+ if (this.getAttribute("nomaxresults") == "true") {
+ return Infinity;
+ }
+
+ return 20;
+ }
+
+ get matchCount() {
+ return Math.min(this.mInput.controller.matchCount, this.maxResults);
+ }
+
+ get overflowPadding() {
+ return Number(this.getAttribute("overflowpadding"));
+ }
+
+ set view(val) {}
+
+ get view() {
+ return this.mInput.controller;
+ }
+
+ closePopup() {
+ if (this.mPopupOpen) {
+ this.hidePopup();
+ this.style.removeProperty("--panel-width");
+ }
+ }
+
+ getNextIndex(aReverse, aAmount, aIndex, aMaxRow) {
+ if (aMaxRow < 0) {
+ return -1;
+ }
+
+ let newIdx = aIndex + (aReverse ? -1 : 1) * aAmount;
+ if (
+ (aReverse && aIndex == -1) ||
+ (newIdx > aMaxRow && aIndex != aMaxRow)
+ ) {
+ newIdx = aMaxRow;
+ } else if ((!aReverse && aIndex == -1) || (newIdx < 0 && aIndex != 0)) {
+ newIdx = 0;
+ }
+
+ if (
+ (newIdx < 0 && aIndex == 0) ||
+ (newIdx > aMaxRow && aIndex == aMaxRow)
+ ) {
+ aIndex = -1;
+ } else {
+ aIndex = newIdx;
+ }
+
+ return aIndex;
+ }
+
+ onPopupClick(aEvent) {
+ this.input.controller.handleEnter(true, aEvent);
+ }
+
+ onSearchBegin() {
+ this.mousedOverIndex = -1;
+
+ if (typeof this._onSearchBegin == "function") {
+ this._onSearchBegin();
+ }
+ }
+
+ openAutocompletePopup(aInput, aElement) {
+ // until we have "baseBinding", (see bug #373652) this allows
+ // us to override openAutocompletePopup(), but still call
+ // the method on the base class
+ this._openAutocompletePopup(aInput, aElement);
+ }
+
+ _openAutocompletePopup(aInput, aElement) {
+ if (!this.mPopupOpen) {
+ // It's possible that the panel is hidden initially
+ // to avoid impacting startup / new window performance
+ aInput.popup.hidden = false;
+
+ this.mInput = aInput;
+ // clear any previous selection, see bugs 400671 and 488357
+ this.selectedIndex = -1;
+
+ let width = aElement.getBoundingClientRect().width;
+ this.style.setProperty(
+ "--panel-width",
+ (width > 100 ? width : 100) + "px"
+ );
+ // invalidate() depends on the width attribute
+ this._invalidate();
+
+ this.openPopup(aElement, "after_start", 0, 0, false, false);
+ }
+ }
+
+ invalidate(reason) {
+ // Don't bother doing work if we're not even showing
+ if (!this.mPopupOpen) {
+ return;
+ }
+
+ this._invalidate(reason);
+ }
+
+ _invalidate(reason) {
+ setTimeout(() => this.adjustHeight(), 0);
+
+ // remove all child nodes because we never want to reuse them.
+ while (this.richlistbox.hasChildNodes()) {
+ this.richlistbox.lastChild.remove();
+ }
+
+ this._currentIndex = 0;
+ this._appendCurrentResult();
+ }
+
+ _collapseUnusedItems() {
+ let existingItemsCount = this.richlistbox.children.length;
+ for (let i = this.matchCount; i < existingItemsCount; ++i) {
+ let item = this.richlistbox.children[i];
+
+ item.collapsed = true;
+ if (typeof item._onCollapse == "function") {
+ item._onCollapse();
+ }
+ }
+ }
+
+ adjustHeight() {
+ // Figure out how many rows to show
+ let rows = this.richlistbox.children;
+ let numRows = Math.min(this.matchCount, this.maxRows, rows.length);
+
+ // Default the height to 0 if we have no rows to show
+ let height = 0;
+ if (numRows) {
+ let firstRowRect = rows[0].getBoundingClientRect();
+ if (this._rlbPadding == undefined) {
+ let style = window.getComputedStyle(this.richlistbox);
+ let paddingTop = parseInt(style.paddingTop) || 0;
+ let paddingBottom = parseInt(style.paddingBottom) || 0;
+ this._rlbPadding = paddingTop + paddingBottom;
+ }
+
+ // The class `forceHandleUnderflow` is for the item might need to
+ // handle OverUnderflow or Overflow when the height of an item will
+ // be changed dynamically.
+ for (let i = 0; i < numRows; i++) {
+ if (rows[i].classList.contains("forceHandleUnderflow")) {
+ rows[i].handleOverUnderflow();
+ }
+ }
+
+ let lastRowRect = rows[numRows - 1].getBoundingClientRect();
+ // Calculate the height to have the first row to last row shown
+ height = lastRowRect.bottom - firstRowRect.top + this._rlbPadding;
+ }
+
+ let currentHeight = this.richlistbox.getBoundingClientRect().height;
+ if (height <= currentHeight) {
+ this._collapseUnusedItems();
+ }
+ this.richlistbox.style.removeProperty("height");
+ // We need to get the ceiling of the calculated value to ensure that the box fully contains
+ // all of its contents and doesn't cause a scrollbar since nsIBoxObject only expects a
+ // `long`. e.g. if `height` is 99.5 the richlistbox would render at height 99px with a
+ // scrollbar for the extra 0.5px.
+ this.richlistbox.height = Math.ceil(height);
+ }
+
+ _appendCurrentResult() {
+ let controller = this.mInput.controller;
+ let glodaCompleter = Cc[
+ "@mozilla.org/autocomplete/search;1?name=gloda"
+ ].getService(Ci.nsIAutoCompleteSearch).wrappedJSObject;
+
+ // Process maxRows per chunk to improve performance and user experience
+ for (let i = 0; i < this.maxRows; i++) {
+ if (this._currentIndex >= this.matchCount) {
+ return;
+ }
+
+ let item;
+
+ // trim the leading/trailing whitespace
+ let trimmedSearchString = controller.searchString.trim();
+ let result = glodaCompleter.curResult;
+
+ item = document.createXULElement("richlistitem", {
+ is: result.getStyleAt(this._currentIndex),
+ });
+
+ // set these attributes before we set the class
+ // so that we can use them from the constructor
+ let row = result.getObjectAt(this._currentIndex);
+ item.setAttribute("text", trimmedSearchString);
+ item.setAttribute("type", result.getStyleAt(this._currentIndex));
+
+ item.row = row;
+
+ // set the class at the end so we can use the attributes
+ // in the xbl constructor
+ item.className = "autocomplete-richlistitem";
+ this.richlistbox.appendChild(item);
+ this._currentIndex++;
+ }
+
+ // yield after each batch of items so that typing the url bar is responsive
+ setTimeout(() => this._appendCurrentResult(), 0);
+ }
+
+ selectBy(aReverse, aPage) {
+ try {
+ let amount = aPage ? 5 : 1;
+
+ // because we collapsed unused items, we can't use this.richlistbox.getRowCount(), we need to use the matchCount
+ this.selectedIndex = this.getNextIndex(
+ aReverse,
+ amount,
+ this.selectedIndex,
+ this.matchCount - 1
+ );
+ if (this.selectedIndex == -1) {
+ this.input._focus();
+ }
+ } catch (ex) {
+ // do nothing - occasionally timer-related js errors happen here
+ // e.g. "this.selectedIndex has no properties", when you type fast and hit a
+ // navigation key before this popup has opened
+ }
+ }
+
+ disconnectedCallback() {
+ if (this.listEvents) {
+ this.richlistbox.removeEventListener("mouseup", this.listEvents);
+ this.richlistbox.removeEventListener("mousemove", this.listEvents);
+ delete this.listEvents;
+ }
+ }
+ }
+
+ MozXULElement.implementCustomInterface(MozGlodacompleteRichResultPopup, [
+ Ci.nsIAutoCompletePopup,
+ ]);
+ customElements.define(
+ "glodacomplete-rich-result-popup",
+ MozGlodacompleteRichResultPopup,
+ { extends: "panel" }
+ );
+}
diff --git a/comm/mailnews/db/gloda/jar.mn b/comm/mailnews/db/gloda/jar.mn
new file mode 100644
index 0000000000..6dbf20d9c3
--- /dev/null
+++ b/comm/mailnews/db/gloda/jar.mn
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+gloda.jar:
+% content gloda %content/
+ content/glodacomplete.js (content/glodacomplete.js)
+ content/autocomplete-richlistitem.js (content/autocomplete-richlistitem.js)
diff --git a/comm/mailnews/db/gloda/modules/Collection.jsm b/comm/mailnews/db/gloda/modules/Collection.jsm
new file mode 100644
index 0000000000..e229161fc9
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Collection.jsm
@@ -0,0 +1,834 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaCollection", "GlodaCollectionManager"];
+
+var LOG = console.createInstance({
+ prefix: "gloda.collection",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * @namespace Central registry and logic for all collections.
+ *
+ * The collection manager is a singleton that has the following tasks:
+ * - Let views of objects (nouns) know when their objects have changed. For
+ * example, an attribute has changed due to user action.
+ * - Let views of objects based on queries know when new objects match their
+ * query, or when their existing objects no longer match due to changes.
+ * - Caching/object-identity maintenance. It is ideal if we only ever have
+ * one instance of an object at a time. (More specifically, only one instance
+ * per database row 'id'.) The collection mechanism lets us find existing
+ * instances to this end. Caching can be directly integrated by being treated
+ * as a special collection.
+ */
+var GlodaCollectionManager = {
+ _collectionsByNoun: {},
+ _cachesByNoun: {},
+
+ /**
+ * Registers the existence of a collection with the collection manager. This
+ * is done using a weak reference so that the collection can go away if it
+ * wants to.
+ */
+ registerCollection(aCollection) {
+ let collections;
+ let nounID = aCollection.query._nounDef.id;
+ if (!(nounID in this._collectionsByNoun)) {
+ collections = this._collectionsByNoun[nounID] = [];
+ } else {
+ // purge dead weak references while we're at it
+ collections = this._collectionsByNoun[nounID].filter(aRef => aRef.get());
+ this._collectionsByNoun[nounID] = collections;
+ }
+ collections.push(Cu.getWeakReference(aCollection));
+ },
+
+ getCollectionsForNounID(aNounID) {
+ if (!(aNounID in this._collectionsByNoun)) {
+ return [];
+ }
+
+ // generator would be nice, but I suspect get() is too expensive to use
+ // twice (guard/predicate and value)
+ let weakCollections = this._collectionsByNoun[aNounID];
+ let collections = [];
+ for (let iColl = 0; iColl < weakCollections.length; iColl++) {
+ let collection = weakCollections[iColl].get();
+ if (collection) {
+ collections.push(collection);
+ }
+ }
+ return collections;
+ },
+
+ defineCache(aNounDef, aCacheSize) {
+ this._cachesByNoun[aNounDef.id] = new GlodaLRUCacheCollection(
+ aNounDef,
+ aCacheSize
+ );
+ },
+
+ /**
+ * Attempt to locate an instance of the object of the given noun type with the
+ * given id. Counts as a cache hit if found. (And if it wasn't in a cache,
+ * but rather a collection, it is added to the cache.)
+ */
+ cacheLookupOne(aNounID, aID, aDoCache) {
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ if (aID in cache._idMap) {
+ let item = cache._idMap[aID];
+ return cache.hit(item);
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ if (aID in collection._idMap) {
+ let item = collection._idMap[aID];
+ if (cache) {
+ cache.add([item]);
+ }
+ return item;
+ }
+ }
+
+ LOG.debug("cacheLookupOne:\nhit null");
+ return null;
+ },
+
+ /**
+ * Lookup multiple nouns by ID from the cache/existing collections.
+ *
+ * @param aNounID The kind of noun identified by its ID.
+ * @param aIDMap A dictionary/map whose keys must be gloda noun ids for the
+ * given noun type and whose values are ignored.
+ * @param aTargetMap An object to hold the noun id's (key) and noun instances
+ * (value) for the noun instances that were found available in memory
+ * because they were cached or in existing query collections.
+ * @param [aDoCache=true] Should we add any items to the cache that we found
+ * in collections that were in memory but not in the cache? You would
+ * likely want to pass false if you are only updating in-memory
+ * representations rather than performing a new query.
+ *
+ * @returns [The number that were found, the number that were not found,
+ * a dictionary whose keys are the ids of noun instances that
+ * were not found.]
+ */
+ cacheLookupMany(aNounID, aIDMap, aTargetMap, aDoCache) {
+ let foundCount = 0,
+ notFoundCount = 0,
+ notFound = {};
+
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ for (let key in aIDMap) {
+ let cacheValue = cache._idMap[key];
+ if (cacheValue === undefined) {
+ notFoundCount++;
+ notFound[key] = null;
+ } else {
+ foundCount++;
+ aTargetMap[key] = cacheValue;
+ cache.hit(cacheValue);
+ }
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ for (let key in notFound) {
+ let collValue = collection._idMap[key];
+ if (collValue !== undefined) {
+ aTargetMap[key] = collValue;
+ delete notFound[key];
+ foundCount++;
+ notFoundCount--;
+ if (cache) {
+ cache.add([collValue]);
+ }
+ }
+ }
+ }
+
+ return [foundCount, notFoundCount, notFound];
+ },
+
+ /**
+ * Friendlier version of |cacheLookupMany|; takes a list of ids and returns
+ * an object whose keys and values are the gloda id's and instances of the
+ * instances that were found. We don't tell you who we didn't find. The
+ * assumption is this is being used for in-memory updates where we only need
+ * to tweak what is in memory.
+ */
+ cacheLookupManyList(aNounID, aIds) {
+ let checkMap = {},
+ targetMap = {};
+ for (let id of aIds) {
+ checkMap[id] = null;
+ }
+ // do not promote found items into the cache
+ this.cacheLookupMany(aNounID, checkMap, targetMap, false);
+ return targetMap;
+ },
+
+ /**
+ * Attempt to locate an instance of the object of the given noun type with the
+ * given id. Counts as a cache hit if found. (And if it wasn't in a cache,
+ * but rather a collection, it is added to the cache.)
+ */
+ cacheLookupOneByUniqueValue(aNounID, aUniqueValue, aDoCache) {
+ let cache = this._cachesByNoun[aNounID];
+
+ if (cache) {
+ if (aUniqueValue in cache._uniqueValueMap) {
+ let item = cache._uniqueValueMap[aUniqueValue];
+ return cache.hit(item);
+ }
+ }
+
+ if (aDoCache === false) {
+ cache = null;
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ if (aUniqueValue in collection._uniqueValueMap) {
+ let item = collection._uniqueValueMap[aUniqueValue];
+ if (cache) {
+ cache.add([item]);
+ }
+ return item;
+ }
+ }
+
+ return null;
+ },
+
+ /**
+ * Checks whether the provided item with the given id is actually a duplicate
+ * of an instance that already exists in the cache/a collection. If it is,
+ * the pre-existing instance is returned and counts as a cache hit. If it
+ * is not, the passed-in instance is added to the cache and returned.
+ */
+ cacheLoadUnifyOne(aItem) {
+ let items = [aItem];
+ this.cacheLoadUnify(aItem.NOUN_ID, items);
+ return items[0];
+ },
+
+ /**
+ * Given a list of items, check if any of them already have duplicate,
+ * canonical, instances in the cache or collections. Items with pre-existing
+ * instances are replaced by those instances in the provided list, and each
+ * counts as a cache hit. Items without pre-existing instances are added
+ * to the cache and left intact.
+ */
+ cacheLoadUnify(aNounID, aItems, aCacheIfMissing) {
+ let cache = this._cachesByNoun[aNounID];
+ if (aCacheIfMissing === undefined) {
+ aCacheIfMissing = true;
+ }
+
+ // track the items we haven't yet found in a cache/collection (value) and
+ // their index in aItems (key). We're somewhat abusing the dictionary
+ // metaphor with the intent of storing tuples here. We also do it because
+ // it allows random-access deletion theoretically without cost. (Since
+ // we delete during iteration, that may be wrong, but it sounds like the
+ // semantics still work?)
+ let unresolvedIndexToItem = {};
+ let numUnresolved = 0;
+
+ if (cache) {
+ for (let iItem = 0; iItem < aItems.length; iItem++) {
+ let item = aItems[iItem];
+
+ if (item.id in cache._idMap) {
+ let realItem = cache._idMap[item.id];
+ // update the caller's array with the reference to the 'real' item
+ aItems[iItem] = realItem;
+ cache.hit(realItem);
+ } else {
+ unresolvedIndexToItem[iItem] = item;
+ numUnresolved++;
+ }
+ }
+
+ // we're done if everyone was a hit.
+ if (numUnresolved == 0) {
+ return;
+ }
+ } else {
+ for (let iItem = 0; iItem < aItems.length; iItem++) {
+ unresolvedIndexToItem[iItem] = aItems[iItem];
+ }
+ numUnresolved = aItems.length;
+ }
+
+ let needToCache = [];
+ // next, let's fall back to our collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ for (let [iItem, item] of Object.entries(unresolvedIndexToItem)) {
+ if (item.id in collection._idMap) {
+ let realItem = collection._idMap[item.id];
+ // update the caller's array to now have the 'real' object
+ aItems[iItem] = realItem;
+ // flag that we need to cache this guy (we use an inclusive cache)
+ needToCache.push(realItem);
+ // we no longer need to resolve this item...
+ delete unresolvedIndexToItem[iItem];
+ // stop checking collections if we got everybody
+ if (--numUnresolved == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ // anything left in unresolvedIndexToItem should be added to the cache
+ // unless !aCacheIfMissing. plus, we already have 'needToCache'
+ if (cache && aCacheIfMissing) {
+ cache.add(
+ needToCache.concat(
+ Object.keys(unresolvedIndexToItem).map(
+ key => unresolvedIndexToItem[key]
+ )
+ )
+ );
+ }
+ },
+
+ cacheCommitDirty() {
+ for (let id in this._cachesByNoun) {
+ let cache = this._cachesByNoun[id];
+ cache.commitDirty();
+ }
+ },
+
+ /**
+ * Notifies the collection manager that an item has been loaded and should
+ * be cached, assuming caching is active.
+ */
+ itemLoaded(aItem) {
+ let cache = this._cachesByNoun[aItem.NOUN_ID];
+ if (cache) {
+ cache.add([aItem]);
+ }
+ },
+
+ /**
+ * Notifies the collection manager that multiple items has been loaded and
+ * should be cached, assuming caching is active.
+ */
+ itemsLoaded(aNounID, aItems) {
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ cache.add(aItems);
+ }
+ },
+
+ /**
+ * This should be called when items are added to the global database. This
+ * should generally mean during indexing by indexers or an attribute
+ * provider.
+ * We walk all existing collections for the given noun type and add the items
+ * to the collection if the item meets the query that defines the collection.
+ */
+ itemsAdded(aNounID, aItems) {
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ cache.add(aItems);
+ }
+
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let addItems = aItems.filter(item => collection.query.test(item));
+ if (addItems.length) {
+ collection._onItemsAdded(addItems);
+ }
+ }
+ },
+ /**
+ * This should be called when items in the global database are modified. For
+ * example, as a result of indexing. This should generally only be called
+ * by indexers or by attribute providers.
+ * We walk all existing collections for the given noun type. For items
+ * currently included in each collection but should no longer be (per the
+ * collection's defining query) we generate onItemsRemoved events. For items
+ * not currently included in the collection but should now be, we generate
+ * onItemsAdded events. For items included that still match the query, we
+ * generate onItemsModified events.
+ */
+ itemsModified(aNounID, aItems) {
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let added = [],
+ modified = [],
+ removed = [];
+ for (let item of aItems) {
+ if (item.id in collection._idMap) {
+ // currently in... but should it still be there?
+ if (collection.query.test(item)) {
+ modified.push(item); // yes, keep it
+ } else if (!collection.query.frozen) {
+ // oy, so null queries really don't want any notifications, and they
+ // sorta fit into our existing model, except for the removal bit.
+ // so we need a specialized check for them, and we're using the
+ // frozen attribute to this end.
+ removed.push(item); // no, bin it
+ }
+ } else if (collection.query.test(item)) {
+ // not in, should it be?
+ added.push(item); // yep, add it
+ }
+ }
+ if (added.length) {
+ collection._onItemsAdded(added);
+ }
+ if (modified.length) {
+ collection._onItemsModified(modified);
+ }
+ if (removed.length) {
+ collection._onItemsRemoved(removed);
+ }
+ }
+ },
+ /**
+ * This should be called when items in the global database are permanently-ish
+ * deleted. (This is distinct from concepts like message deletion which may
+ * involved trash folders or other modified forms of existence. Deleted
+ * means the data is gone and if it were to come back, it would come back
+ * via an itemsAdded event.)
+ * We walk all existing collections for the given noun type. For items
+ * currently in the collection, we generate onItemsRemoved events.
+ *
+ * @param aItemIds A list of item ids that are being deleted.
+ */
+ itemsDeleted(aNounID, aItemIds) {
+ // cache
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ for (let itemId of aItemIds) {
+ if (itemId in cache._idMap) {
+ cache.deleted(cache._idMap[itemId]);
+ }
+ }
+ }
+
+ // collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let removeItems = aItemIds
+ .filter(itemId => itemId in collection._idMap)
+ .map(itemId => collection._idMap[itemId]);
+ if (removeItems.length) {
+ collection._onItemsRemoved(removeItems);
+ }
+ }
+ },
+ /**
+ * Like |itemsDeleted| but for the case where the deletion is based on an
+ * attribute that SQLite can more efficiently check than we can and where the
+ * cost of scanning the in-memory items is presumably much cheaper than
+ * trying to figure out what actually got deleted.
+ *
+ * Since we are doing an in-memory walk, this is obviously O(n) where n is the
+ * number of noun instances of a given type in-memory. We are assuming this
+ * is a reasonable number of things and that this type of deletion call is
+ * not going to happen all that frequently. If these assumptions are wrong,
+ * callers are advised to re-think the whole situation.
+ *
+ * @param aNounID Type of noun we are talking about here.
+ * @param aFilter A filter function that returns true when the item should be
+ * thought of as deleted, or false if the item is still good. Screw this
+ * up and you will get some seriously wacky bugs, yo.
+ */
+ itemsDeletedByAttribute(aNounID, aFilter) {
+ // cache
+ let cache = this._cachesByNoun[aNounID];
+ if (cache) {
+ for (let id in cache._idMap) {
+ let item = cache._idMap[id];
+ if (aFilter(item)) {
+ cache.deleted(item);
+ }
+ }
+ }
+
+ // collections
+ for (let collection of this.getCollectionsForNounID(aNounID)) {
+ let removeItems = collection.items.filter(aFilter);
+ if (removeItems.length) {
+ collection._onItemsRemoved(removeItems);
+ }
+ }
+ },
+};
+
+/**
+ * @class A current view of the set of first-class nouns meeting a given query.
+ * Assuming a listener is present, events are
+ * generated when new objects meet the query, existing objects no longer meet
+ * the query, or existing objects have experienced a change in attributes that
+ * does not affect their ability to be present (but the listener may care about
+ * because it is exposing those attributes).
+ * @class
+ */
+function GlodaCollection(
+ aNounDef,
+ aItems,
+ aQuery,
+ aListener,
+ aMasterCollection
+) {
+ // if aNounDef is null, we are just being invoked for subclassing
+ if (aNounDef === undefined) {
+ return;
+ }
+
+ this._nounDef = aNounDef;
+ // should we also maintain a unique value mapping...
+ if (this._nounDef.usesUniqueValue) {
+ this._uniqueValueMap = {};
+ }
+
+ this.pendingItems = [];
+ this._pendingIdMap = {};
+ this.items = [];
+ this._idMap = {};
+
+ // force the listener to null for our call to _onItemsAdded; no events for
+ // the initial load-out.
+ this._listener = null;
+ if (aItems && aItems.length) {
+ this._onItemsAdded(aItems);
+ }
+
+ this.query = aQuery || null;
+ if (this.query) {
+ this.query.collection = this;
+ if (this.query.options.stashColumns) {
+ this.stashedColumns = {};
+ }
+ }
+ this._listener = aListener || null;
+
+ this.deferredCount = 0;
+ this.resolvedCount = 0;
+
+ if (aMasterCollection) {
+ this.masterCollection = aMasterCollection.masterCollection;
+ } else {
+ this.masterCollection = this;
+ /** a dictionary of dictionaries. at the top level, the keys are noun IDs.
+ * each of these sub-dictionaries maps the IDs of desired noun instances to
+ * the actual instance, or null if it has not yet been loaded.
+ */
+ this.referencesByNounID = {};
+ /**
+ * a dictionary of dictionaries. at the top level, the keys are noun IDs.
+ * each of the sub-dictionaries maps the IDs of the _recognized parent
+ * noun_ to the list of children, or null if the list has not yet been
+ * populated.
+ *
+ * So if we have a noun definition A with ID 1 who is the recognized parent
+ * noun of noun definition B with ID 2, AND we have an instance A(1) with
+ * two children B(10), B(11), then an example might be: {2: {1: [10, 11]}}.
+ */
+ this.inverseReferencesByNounID = {};
+ this.subCollections = {};
+ }
+}
+
+GlodaCollection.prototype = {
+ get listener() {
+ return this._listener;
+ },
+ set listener(aListener) {
+ this._listener = aListener;
+ },
+
+ /**
+ * If this collection still has a query associated with it, drop the query
+ * and replace it with an 'explicit query'. This means that the Collection
+ * Manager will not attempt to match new items indexed to the system against
+ * our query criteria.
+ * Once you call this method, your collection's listener will no longer
+ * receive onItemsAdded notifications that are not the result of your
+ * initial database query. It will, however, receive onItemsModified
+ * notifications if items in the collection are re-indexed.
+ */
+ becomeExplicit() {
+ if (!(this.query instanceof this._nounDef.explicitQueryClass)) {
+ this.query = new this._nounDef.explicitQueryClass(this);
+ }
+ },
+
+ /**
+ * Clear the contents of this collection. This only makes sense for explicit
+ * collections or wildcard collections. (Actual query-based collections
+ * should represent the state of the query, so unless we're going to delete
+ * all the items, clearing the collection would violate that constraint.)
+ */
+ clear() {
+ this._idMap = {};
+ if (this._uniqueValueMap) {
+ this._uniqueValueMap = {};
+ }
+ this.items = [];
+ },
+
+ _onItemsAdded(aItems) {
+ this.items.push.apply(this.items, aItems);
+ if (this._uniqueValueMap) {
+ for (let item of this.items) {
+ this._idMap[item.id] = item;
+ this._uniqueValueMap[item.uniqueValue] = item;
+ }
+ } else {
+ for (let item of this.items) {
+ this._idMap[item.id] = item;
+ }
+ }
+ if (this._listener) {
+ try {
+ this._listener.onItemsAdded(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsAdded: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ _onItemsModified(aItems) {
+ if (this._listener) {
+ try {
+ this._listener.onItemsModified(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsModified: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ /**
+ * Given a list of items that definitely no longer belong in this collection,
+ * remove them from the collection and notify the listener. The 'tricky'
+ * part is that we need to remove the deleted items from our list of items.
+ */
+ _onItemsRemoved(aItems) {
+ // we want to avoid the O(n^2) deletion performance case, and deletion
+ // should be rare enough that the extra cost of building the deletion map
+ // should never be a real problem.
+ let deleteMap = {};
+ // build the delete map while also nuking from our id map/unique value map
+ for (let item of aItems) {
+ deleteMap[item.id] = true;
+ delete this._idMap[item.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[item.uniqueValue];
+ }
+ }
+ let items = this.items;
+ // in-place filter. probably needless optimization.
+ let iWrite = 0;
+ for (let iRead = 0; iRead < items.length; iRead++) {
+ let item = items[iRead];
+ if (!(item.id in deleteMap)) {
+ items[iWrite++] = item;
+ }
+ }
+ items.splice(iWrite);
+
+ if (this._listener) {
+ try {
+ this._listener.onItemsRemoved(aItems, this);
+ } catch (ex) {
+ LOG.error(
+ "caught exception from listener in onItemsRemoved: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ }
+ }
+ },
+
+ _onQueryCompleted() {
+ this.query.completed = true;
+ if (this._listener && this._listener.onQueryCompleted) {
+ this._listener.onQueryCompleted(this);
+ }
+ },
+};
+
+/**
+ * Create an LRU cache collection for the given noun with the given size.
+ *
+ * @class
+ */
+function GlodaLRUCacheCollection(aNounDef, aCacheSize) {
+ GlodaCollection.call(this, aNounDef, null, null, null);
+
+ this._head = null; // aka oldest!
+ this._tail = null; // aka newest!
+ this._size = 0;
+ // let's keep things sane, and simplify our logic a little...
+ if (aCacheSize < 32) {
+ aCacheSize = 32;
+ }
+ this._maxCacheSize = aCacheSize;
+}
+/**
+ * @class A LRU-discard cache. We use a doubly linked-list for the eviction
+ * tracking. Since we require that there is at most one LRU-discard cache per
+ * noun class, we simplify our lives by adding our own attributes to the
+ * cached objects.
+ * @augments GlodaCollection
+ */
+GlodaLRUCacheCollection.prototype = new GlodaCollection();
+GlodaLRUCacheCollection.prototype.add = function (aItems) {
+ for (let item of aItems) {
+ if (item.id in this._idMap) {
+ // DEBUGME so, we're dealing with this, but it shouldn't happen. need
+ // trace-debuggage.
+ continue;
+ }
+ this._idMap[item.id] = item;
+ if (this._uniqueValueMap) {
+ this._uniqueValueMap[item.uniqueValue] = item;
+ }
+
+ item._lruPrev = this._tail;
+ // we do have to make sure that we will set _head the first time we insert
+ // something
+ if (this._tail !== null) {
+ this._tail._lruNext = item;
+ } else {
+ this._head = item;
+ }
+ item._lruNext = null;
+ this._tail = item;
+
+ this._size++;
+ }
+
+ while (this._size > this._maxCacheSize) {
+ let item = this._head;
+
+ // we never have to deal with the possibility of needing to make _head/_tail
+ // null.
+ this._head = item._lruNext;
+ this._head._lruPrev = null;
+ // (because we are nice, we will delete the properties...)
+ delete item._lruNext;
+ delete item._lruPrev;
+
+ // nuke from our id map
+ delete this._idMap[item.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[item.uniqueValue];
+ }
+
+ // flush dirty items to disk (they may not have this attribute, in which
+ // case, this returns false, which is fine.)
+ if (item.dirty) {
+ this._nounDef.objUpdate.call(this._nounDef.datastore, item);
+ delete item.dirty;
+ }
+
+ this._size--;
+ }
+};
+
+GlodaLRUCacheCollection.prototype.hit = function (aItem) {
+ // don't do anything in the 0 or 1 items case, or if we're already
+ // the last item
+ if (this._head === this._tail || this._tail === aItem) {
+ return aItem;
+ }
+
+ // - unlink the item
+ if (aItem._lruPrev !== null) {
+ aItem._lruPrev._lruNext = aItem._lruNext;
+ } else {
+ this._head = aItem._lruNext;
+ }
+ // (_lruNext cannot be null)
+ aItem._lruNext._lruPrev = aItem._lruPrev;
+ // - link it in to the end
+ this._tail._lruNext = aItem;
+ aItem._lruPrev = this._tail;
+ aItem._lruNext = null;
+ // update tail tracking
+ this._tail = aItem;
+
+ return aItem;
+};
+
+GlodaLRUCacheCollection.prototype.deleted = function (aItem) {
+ // unlink the item
+ if (aItem._lruPrev !== null) {
+ aItem._lruPrev._lruNext = aItem._lruNext;
+ } else {
+ this._head = aItem._lruNext;
+ }
+ if (aItem._lruNext !== null) {
+ aItem._lruNext._lruPrev = aItem._lruPrev;
+ } else {
+ this._tail = aItem._lruPrev;
+ }
+
+ // (because we are nice, we will delete the properties...)
+ delete aItem._lruNext;
+ delete aItem._lruPrev;
+
+ // nuke from our id map
+ delete this._idMap[aItem.id];
+ if (this._uniqueValueMap) {
+ delete this._uniqueValueMap[aItem.uniqueValue];
+ }
+
+ this._size--;
+};
+
+/**
+ * If any of the cached items are dirty, commit them, and make them no longer
+ * dirty.
+ */
+GlodaLRUCacheCollection.prototype.commitDirty = function () {
+ // we can only do this if there is an update method available...
+ if (!this._nounDef.objUpdate) {
+ return;
+ }
+
+ for (let iItem in this._idMap) {
+ let item = this._idMap[iItem];
+ if (item.dirty) {
+ LOG.debug("flushing dirty: " + item);
+ this._nounDef.objUpdate.call(this._nounDef.datastore, item);
+ delete item.dirty;
+ }
+ }
+};
diff --git a/comm/mailnews/db/gloda/modules/Everybody.jsm b/comm/mailnews/db/gloda/modules/Everybody.jsm
new file mode 100644
index 0000000000..4f33134ef9
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Everybody.jsm
@@ -0,0 +1,23 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [];
+
+const { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+GlodaFundAttr.init();
+const { GlodaExplicitAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaExplicitAttr.jsm"
+);
+GlodaExplicitAttr.init();
+
+ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+ChromeUtils.import("resource:///modules/gloda/NounFreetag.jsm");
+ChromeUtils.import("resource:///modules/gloda/NounMimetype.jsm");
+ChromeUtils.import("resource:///modules/gloda/IndexMsg.jsm");
+const { GlodaABAttrs } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgIndexer.jsm"
+);
+GlodaABAttrs.init();
diff --git a/comm/mailnews/db/gloda/modules/Facet.jsm b/comm/mailnews/db/gloda/modules/Facet.jsm
new file mode 100644
index 0000000000..96425b8838
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Facet.jsm
@@ -0,0 +1,599 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides faceting logic.
+ */
+
+var EXPORTED_SYMBOLS = ["FacetDriver", "FacetUtils"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "Gloda",
+ "resource:///modules/gloda/GlodaPublic.jsm"
+);
+
+/**
+ * Decides the appropriate faceters for the noun type and drives the faceting
+ * process. This class and the faceters are intended to be reusable so that
+ * you only need one instance per faceting session. (Although each faceting
+ * pass is accordingly destructive to previous results.)
+ *
+ * Our strategy for faceting is to process one attribute at a time across all
+ * the items in the provided set. The alternative would be to iterate over
+ * the items and then iterate over the attributes on each item. While both
+ * approaches have caching downsides
+ */
+function FacetDriver(aNounDef, aWindow) {
+ this.nounDef = aNounDef;
+ this._window = aWindow;
+
+ this._makeFaceters();
+}
+FacetDriver.prototype = {
+ /**
+ * Populate |this.faceters| with a set of faceters appropriate to the noun
+ * definition associated with this instance.
+ */
+ _makeFaceters() {
+ let faceters = (this.faceters = []);
+
+ function makeFaceter(aAttrDef, aFacetDef) {
+ let facetType = aFacetDef.type;
+
+ if (aAttrDef.singular) {
+ if (facetType == "date") {
+ faceters.push(new DateFaceter(aAttrDef, aFacetDef));
+ } else {
+ faceters.push(new DiscreteFaceter(aAttrDef, aFacetDef));
+ }
+ } else if (facetType == "nonempty?") {
+ faceters.push(new NonEmptySetFaceter(aAttrDef, aFacetDef));
+ } else {
+ faceters.push(new DiscreteSetFaceter(aAttrDef, aFacetDef));
+ }
+ }
+
+ for (let key in this.nounDef.attribsByBoundName) {
+ let attrDef = this.nounDef.attribsByBoundName[key];
+ // ignore attributes that do not want to be faceted
+ if (!attrDef.facet) {
+ continue;
+ }
+
+ makeFaceter(attrDef, attrDef.facet);
+
+ if ("extraFacets" in attrDef) {
+ for (let facetDef of attrDef.extraFacets) {
+ makeFaceter(attrDef, facetDef);
+ }
+ }
+ }
+ },
+ /**
+ * Asynchronously facet the provided items, calling the provided callback when
+ * completed.
+ */
+ go(aItems, aCallback, aCallbackThis) {
+ this.items = aItems;
+ this.callback = aCallback;
+ this.callbackThis = aCallbackThis;
+
+ this._nextFaceter = 0;
+ this._drive();
+ },
+
+ _MAX_FACETING_TIMESLICE_MS: 100,
+ _FACETING_YIELD_DURATION_MS: 0,
+ _driveWrapper(aThis) {
+ aThis._drive();
+ },
+ _drive() {
+ let start = Date.now();
+
+ while (this._nextFaceter < this.faceters.length) {
+ let faceter = this.faceters[this._nextFaceter++];
+ // for now we facet in one go, but the long-term plan allows for them to
+ // be generators.
+ faceter.facetItems(this.items);
+
+ let delta = Date.now() - start;
+ if (delta > this._MAX_FACETING_TIMESLICE_MS) {
+ this._window.setTimeout(
+ this._driveWrapper,
+ this._FACETING_YIELD_DURATION_MS,
+ this
+ );
+ return;
+ }
+ }
+
+ // we only get here once we are done with the faceters
+ this.callback.call(this.callbackThis);
+ },
+};
+
+var FacetUtils = {
+ _groupSizeComparator(a, b) {
+ return b[1].length - a[1].length;
+ },
+
+ /**
+ * Given a list where each entry is a tuple of [group object, list of items
+ * belonging to that group], produce a new list of the top grouped items. We
+ * used to also produce an "other" aggregation, but that turned out to be
+ * conceptually difficult to deal with, so that's gone, leaving this method
+ * with much less to do.
+ *
+ * @param aAttrDef The attribute for the facet we are working with.
+ * @param aGroups The list of groups built for the facet.
+ * @param aMaxCount The number of result rows you want back.
+ */
+ makeTopGroups(aAttrDef, aGroups, aMaxCount) {
+ let nounDef = aAttrDef.objectNounDef;
+ let realGroupsToUse = aMaxCount;
+
+ let orderedBySize = aGroups.concat();
+ orderedBySize.sort(this._groupSizeComparator);
+
+ // - get the real groups to use and order them by the attribute comparator
+ let outGroups = orderedBySize.slice(0, realGroupsToUse);
+ let comparator = nounDef.comparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ outGroups.sort(comparatorHelper);
+
+ return outGroups;
+ },
+};
+
+/**
+ * Facet discrete things like message authors, boolean values, etc. Only
+ * appropriate for use on singular values. Use |DiscreteSetFaceter| for
+ * non-singular values.
+ */
+function DiscreteFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DiscreteFaceter.prototype = {
+ type: "discrete",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ if (this.attrDef.objectNounDef.isPrimitive) {
+ return this.facetPrimitiveItems(aItems);
+ }
+ return this.facetComplexItems(aItems);
+ },
+ /**
+ * Facet an attribute whose value is primitive, meaning that it is a raw
+ * numeric value or string, rather than a complex object.
+ */
+ facetPrimitiveItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+
+ let valStrToVal = {};
+ let groups = (this.groups = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ if (val === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ // We need to use hasOwnProperty because we cannot guarantee that the
+ // contents of val won't collide with the attributes in Object.prototype.
+ if (groups.hasOwnProperty(val)) {
+ groups[val].push(item);
+ } else {
+ groups[val] = [item];
+ valStrToVal[val] = val;
+ this.groupCount++;
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ valStrToVal[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+ /**
+ * Facet an attribute whose value is a complex object that can be identified
+ * by its 'id' attribute. This is the case where the value is itself a noun
+ * instance.
+ */
+ facetComplexItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+ let idAttr = this.facetDef.groupIdAttr;
+
+ let groups = (this.groups = {});
+ let groupMap = (this.groupMap = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ if (val === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ let valId = val == null ? null : val[idAttr];
+ // We need to use hasOwnProperty because tag nouns are complex objects
+ // with id's that are non-numeric and so can collide with the contents
+ // of Object.prototype. (Note: the "tags" attribute is actually handled
+ // by the DiscreteSetFaceter.)
+ if (groupMap.hasOwnProperty(valId)) {
+ groups[valId].push(item);
+ } else {
+ groupMap[valId] = val;
+ groups[valId] = [item];
+ this.groupCount++;
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ groupMap[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+};
+
+/**
+ * Facet sets of discrete items. For example, tags applied to messages.
+ *
+ * The main differences between us and |DiscreteFaceter| are:
+ * - The empty set is notable.
+ * - Specific set configurations could be interesting, but are not low-hanging
+ * fruit.
+ */
+function DiscreteSetFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DiscreteSetFaceter.prototype = {
+ type: "discrete",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ if (this.attrDef.objectNounDef.isPrimitive) {
+ return this.facetPrimitiveItems(aItems);
+ }
+ return this.facetComplexItems(aItems);
+ },
+ /**
+ * Facet an attribute whose value is primitive, meaning that it is a raw
+ * numeric value or string, rather than a complex object.
+ */
+ facetPrimitiveItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+
+ let groups = (this.groups = {});
+ let valStrToVal = {};
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ if (vals == null || vals.length == 0) {
+ vals = [null];
+ }
+ for (let val of vals) {
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ // We need to use hasOwnProperty because we cannot guarantee that the
+ // contents of val won't collide with the attributes in
+ // Object.prototype.
+ if (groups.hasOwnProperty(val)) {
+ groups[val].push(item);
+ } else {
+ groups[val] = [item];
+ valStrToVal[val] = val;
+ this.groupCount++;
+ }
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ valStrToVal[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+ /**
+ * Facet an attribute whose value is a complex object that can be identified
+ * by its 'id' attribute. This is the case where the value is itself a noun
+ * instance.
+ */
+ facetComplexItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+ let filter = this.facetDef.filter;
+ let idAttr = this.facetDef.groupIdAttr;
+
+ let groups = (this.groups = {});
+ let groupMap = (this.groupMap = {});
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals === GlodaConstants.IGNORE_FACET) {
+ continue;
+ }
+
+ if (vals == null || vals.length == 0) {
+ vals = [null];
+ }
+ for (let val of vals) {
+ // skip items the filter tells us to ignore
+ if (filter && !filter(val)) {
+ continue;
+ }
+
+ let valId = val == null ? null : val[idAttr];
+ // We need to use hasOwnProperty because tag nouns are complex objects
+ // with id's that are non-numeric and so can collide with the contents
+ // of Object.prototype.
+ if (groupMap.hasOwnProperty(valId)) {
+ groups[valId].push(item);
+ } else {
+ groupMap[valId] = val;
+ groups[valId] = [item];
+ this.groupCount++;
+ }
+ }
+ }
+
+ let orderedGroups = Object.keys(groups).map(key => [
+ groupMap[key],
+ groups[key],
+ ]);
+ let comparator = this.facetDef.groupComparator;
+ function comparatorHelper(a, b) {
+ return comparator(a[0], b[0]);
+ }
+ orderedGroups.sort(comparatorHelper);
+ this.orderedGroups = orderedGroups;
+ },
+};
+
+/**
+ * Given a non-singular attribute, facet it as if it were a boolean based on
+ * whether there is anything in the list (set).
+ */
+function NonEmptySetFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+NonEmptySetFaceter.prototype = {
+ type: "boolean",
+ /**
+ * Facet the given set of items, deferring to the appropriate helper method
+ */
+ facetItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+
+ let trueValues = [];
+ let falseValues = [];
+
+ this.groupCount = 0;
+
+ for (let item of aItems) {
+ let vals = attrKey in item ? item[attrKey] : null;
+ if (vals == null || vals.length == 0) {
+ falseValues.push(item);
+ } else {
+ trueValues.push(item);
+ }
+ }
+
+ this.orderedGroups = [];
+ if (trueValues.length) {
+ this.orderedGroups.push([true, trueValues]);
+ }
+ if (falseValues.length) {
+ this.orderedGroups.push([false, falseValues]);
+ }
+ this.groupCount = this.orderedGroups.length;
+ },
+ makeQuery(aGroupValues, aInclusive) {
+ let query = (this.query = lazy.Gloda.newQuery(GlodaConstants.NOUN_MESSAGE));
+
+ let constraintFunc = query[this.attrDef.boundName];
+ constraintFunc.call(query);
+
+ // Our query is always for non-empty lists (at this time), so we want to
+ // invert if they're excluding 'true' or including 'false', which means !=.
+ let invert = aGroupValues[0] != aInclusive;
+
+ return [query, invert];
+ },
+};
+
+/**
+ * Facet dates. We build a hierarchical nested structure of year, month, and
+ * day nesting levels. This decision was made speculatively in the hopes that
+ * it would allow us to do clustered analysis and that there might be a benefit
+ * for that. For example, if you search for "Christmas", we might notice
+ * clusters of messages around December of each year. We could then present
+ * these in a list as likely candidates, rather than a graphical timeline.
+ * Alternately, it could be used to inform a non-linear visualization. As it
+ * stands (as of this writing), it's just a complicating factor.
+ */
+function DateFaceter(aAttrDef, aFacetDef) {
+ this.attrDef = aAttrDef;
+ this.facetDef = aFacetDef;
+}
+DateFaceter.prototype = {
+ type: "date",
+ /**
+ *
+ */
+ facetItems(aItems) {
+ let attrKey = this.attrDef.boundName;
+
+ let years = (this.years = { _subCount: 0 });
+ // generally track the time range
+ let oldest = null,
+ newest = null;
+
+ this.validItems = [];
+
+ // just cheat and put us at the front...
+ this.groupCount = aItems.length ? 1000 : 0;
+ this.orderedGroups = null;
+
+ /** The number of items with a null/missing attribute. */
+ this.missing = 0;
+
+ /**
+ * The number of items with a date that is unreasonably far in the past or
+ * in the future. Old-wise, we are concerned about incorrectly formatted
+ * messages (spam) that end up placed around the UNIX epoch. New-wise,
+ * we are concerned about messages that can't be explained by users who
+ * don't know how to set their clocks (both the current user and people
+ * sending them mail), mainly meaning spam.
+ * We want to avoid having our clever time-scale logic being made useless by
+ * these unreasonable messages.
+ */
+ this.unreasonable = 0;
+ // feb 1, 1970
+ let tooOld = new Date(1970, 1, 1);
+ // 3 days from now
+ let tooNew = new Date(Date.now() + 3 * 24 * 60 * 60 * 1000);
+
+ for (let item of aItems) {
+ let val = attrKey in item ? item[attrKey] : null;
+ // -- missing
+ if (val == null) {
+ this.missing++;
+ continue;
+ }
+
+ // -- unreasonable
+ if (val < tooOld || val > tooNew) {
+ this.unreasonable++;
+ continue;
+ }
+
+ this.validItems.push(item);
+
+ // -- time range
+ if (oldest == null) {
+ oldest = newest = val;
+ } else if (val < oldest) {
+ oldest = val;
+ } else if (val > newest) {
+ newest = val;
+ }
+
+ // -- bucket
+ // - year
+ let year,
+ valYear = val.getYear();
+ if (valYear in years) {
+ year = years[valYear];
+ year._dateCount++;
+ } else {
+ year = years[valYear] = {
+ _dateCount: 1,
+ _subCount: 0,
+ };
+ years._subCount++;
+ }
+
+ // - month
+ let month,
+ valMonth = val.getMonth();
+ if (valMonth in year) {
+ month = year[valMonth];
+ month._dateCount++;
+ } else {
+ month = year[valMonth] = {
+ _dateCount: 1,
+ _subCount: 0,
+ };
+ year._subCount++;
+ }
+
+ // - day
+ let valDate = val.getDate();
+ if (valDate in month) {
+ month[valDate].push(item);
+ } else {
+ month[valDate] = [item];
+ }
+ }
+
+ this.oldest = oldest;
+ this.newest = newest;
+ },
+
+ _unionMonth(aMonthObj) {
+ let dayItemLists = [];
+ for (let key in aMonthObj) {
+ let dayItemList = aMonthObj[key];
+ if (typeof key == "string" && key.startsWith("_")) {
+ continue;
+ }
+ dayItemLists.push(dayItemList);
+ }
+ return dayItemLists;
+ },
+
+ _unionYear(aYearObj) {
+ let monthItemLists = [];
+ for (let key in aYearObj) {
+ let monthObj = aYearObj[key];
+ if (typeof key == "string" && key.startsWith("_")) {
+ continue;
+ }
+ monthItemLists.push(this._unionMonth(monthObj));
+ }
+ return monthItemLists;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/Gloda.jsm b/comm/mailnews/db/gloda/modules/Gloda.jsm
new file mode 100644
index 0000000000..77b2288e53
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/Gloda.jsm
@@ -0,0 +1,2275 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["Gloda"];
+
+const { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+const {
+ GlodaAttributeDBDef,
+ GlodaAccount,
+ GlodaConversation,
+ GlodaFolder,
+ GlodaMessage,
+ GlodaContact,
+ GlodaIdentity,
+ GlodaAttachment,
+} = ChromeUtils.import("resource:///modules/gloda/GlodaDataModel.jsm");
+const { GlodaCollection, GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { whittlerRegistry, mimeMsgToContentAndMeta } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaContent.jsm"
+);
+const { GlodaQueryClassFactory } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaQueryClassFactory.jsm"
+);
+const { GlodaUtils } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaUtils.jsm"
+);
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * @see |Gloda.BadItemContentsError|
+ */
+function BadItemContentsError(aMessage) {
+ this.message = aMessage;
+}
+BadItemContentsError.prototype = {
+ toString() {
+ return this.message;
+ },
+};
+
+/**
+ * Provides the user-visible (and extension visible) global database
+ * functionality. There is currently a dependency/ordering
+ * problem in that the concept of 'gloda' also includes some logic that is
+ * contributed by built-in extensions, if you will. Those built-in extensions
+ * (fundattr.js, GlodaExplicitAttr.jsm) also import this file. To avoid a circular
+ * dependency, those built-in extensions are loaded by Everybody.jsm. The
+ * simplest/best solution is probably to move Everybody.jsm to be Gloda.jsm and
+ * have it re-export only 'Gloda'. Gloda.jsm (this file) can then move to be
+ * gloda_int.js (or whatever our eventual naming scheme is), which built-in
+ * extensions can explicitly rely upon.
+ *
+ * === Concepts
+ *
+ * == Nouns
+ *
+ * Inspired by reasonable uses of triple-stores, I have tried to leverage
+ * existing model and terminology rather than rolling out own for everything.
+ * The idea with triple-stores is that you have a subject, a predicate, and an
+ * object. For example, if we are talking about a message, that is the
+ * subject, the predicate could roughly be sent-by, and the object a person.
+ * We can generalize this idea to say that the subject and objects are nouns.
+ * Since we want to be more flexible than only dealing with messages, we
+ * therefore introduce the concept of nouns as an organizing principle.
+ *
+ * == Attributes
+ *
+ * Our attributes definitions are basically our predicates. When we define
+ * an attribute, it's a label with a bunch of meta-data. Our attribute
+ * instances are basically a 'triple' in a triple-store. The attributes
+ * are stored in database rows that imply a specific noun-type (ex: the
+ * messageAttributes table), with an ID identifying the message which is our
+ * subject, an attribute ID which identifies the attribute definition in use
+ * (and therefore the predicate), plus an object ID (given context aka the
+ * noun type by the attribute's meta-data) which identifies the 'object'.
+ *
+ * == But...
+ *
+ * Things aren't entirely as clear as they could be right now, terminology/
+ * concept/implementation-wise. Some work is probably still in order.
+ *
+ * === Implementation
+ *
+ * == Nouns
+ *
+ * So, we go and define the nouns that are roughly the classes in our data
+ * model. Every 'class' we define in GlodaDataModel.jsm is a noun that gets defined
+ * here in the Gloda core. We provide sufficient meta-data about the noun to
+ * serialize/deserialize its representation from our database representation.
+ * Nouns do not have to be defined in this class, but can also be contributed
+ * by external code.
+ * We have a concept of 'first class' nouns versus non-first class nouns. The
+ * distinction is meant to be whether we can store meta-information about those
+ * nouns using attributes. Right now, only message are real first-class nouns,
+ * but we want to expand that to include contacts and eventually events and
+ * tasks as lightning-integration occurs. In practice, we are stretching the
+ * definition of first-class nouns slightly to include things we can't store
+ * meta-data about, but want to be able to query about. We do want to resolve
+ * this.
+ *
+ * == Attributes
+ *
+ * Attributes are defined by "attribute providers" who are responsible for
+ * taking an instance of a first-class noun (for which they are registered)
+ * plus perhaps some other meta-data, and returning a list of attributes
+ * extracted from that noun. For now, this means messages. Attribute
+ * providers may create new data records as a side-effect of the indexing
+ * process, although we have not yet fully dealt with the problem of deleting
+ * these records should they become orphaned in the database due to the
+ * purging of a message and its attributes.
+ * All of the 'core' gloda attributes are provided by the GlodaFundAttr.jsm and
+ * GlodaExplicitAttr.jsm providers.
+ *
+ * === (Notable) Future Work
+ *
+ * == Attributes
+ *
+ * Attribute mechanisms currently lack any support for 'overriding' attributes
+ * provided by other attribute providers. For example, the fundattr provider
+ * tells us who a message is 'from' based on the e-mail address present.
+ * However, other plugins may actually know better. For example, the bugzilla
+ * daemon e-mails based on bug activity although the daemon gets the credit
+ * as the official sender. A bugzilla plugin can easily extract the actual
+ * person/e-mail addressed who did something on the bug to cause the
+ * notification to be sent. In practice, we would like that person to be
+ * the 'sender' of the bugmail. But we can't really do that right, yet.
+ *
+ * @namespace
+ */
+var Gloda = {
+ /**
+ * Initialize logging, the datastore (SQLite database), the core nouns and
+ * attributes, and the contact and identities that belong to the presumed
+ * current user (based on accounts).
+ *
+ * Additional nouns and the core attribute providers are initialized by the
+ * Everybody.jsm module which ensures all of those dependencies are loaded
+ * (and initialized).
+ */
+ _init() {
+ this._initLogging();
+ GlodaDatastore._init(this._nounIDToDef);
+ this._initAttributes();
+ this._initMyIdentities();
+ },
+
+ _log: null,
+ /**
+ * Initialize logging; the error console window gets Warning/Error, and stdout
+ * (via dump) gets everything.
+ */
+ _initLogging() {
+ this._log = console.createInstance({
+ prefix: "gloda",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ this._log.info("Logging Initialized");
+ },
+
+ /**
+ * Callers should access the unique ID for the GlodaDatastore
+ * with this getter. If the GlodaDatastore has not been
+ * initialized, this value is null.
+ *
+ * @returns a UUID as a string, ex: "c4dd0159-9287-480f-a648-a4613e147fdb"
+ */
+ get datastoreID() {
+ return GlodaDatastore._datastoreID;
+ },
+
+ /**
+ * Lookup a gloda message from an nsIMsgDBHdr, with the result returned as a
+ * collection. Keep in mind that the message may not be indexed, so you
+ * may end up with an empty collection. (Also keep in mind that this query
+ * is asynchronous, so you will want your action-taking logic to be found
+ * in your listener's onQueryCompleted method; the result will not be in
+ * the collection when this method returns.)
+ *
+ * @param aMsgHdr The header of the message you want the gloda message for.
+ * @param aListener The listener that should be registered with the collection
+ * @param aData The (optional) value to set as the data attribute on the
+ * collection.
+ *
+ * @returns The collection that will receive the results.
+ *
+ * @testpoint gloda.ns.getMessageCollectionForHeader()
+ */
+ getMessageCollectionForHeader(aMsgHdr, aListener, aData) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.folder(aMsgHdr.folder).messageKey(aMsgHdr.messageKey);
+ return query.getCollection(aListener, aData);
+ },
+
+ /**
+ * Given a list of message headers, return a collection containing the gloda
+ * messages that correspond to those headers. Keep in mind that gloda may
+ * not have indexed all the messages, so the returned collection may not have
+ * a message for each header you provide. (Also keep in mind that this query
+ * is asynchronous, so you will want your action-taking logic to be found
+ * in your listener's onQueryCompleted method; no results will be present in
+ * the collection when this method returns.)
+ *
+ * @param aHeaders An array of headers
+ * @param aListener The listener that should be registered with the collection
+ * @param aData The (optional) value to set as the data attribute on the
+ * collection.
+ *
+ * @returns The collection that will receive the results.
+ *
+ * @testpoint gloda.ns.getMessageCollectionForHeaders()
+ */
+ getMessageCollectionForHeaders(aHeaders, aListener, aData) {
+ // group the headers by the folder they are found in
+ let headersByFolder = {};
+ for (let header of aHeaders) {
+ let folderURI = header.folder.URI;
+ let headersForFolder = headersByFolder[folderURI];
+ if (headersForFolder === undefined) {
+ headersByFolder[folderURI] = [header];
+ } else {
+ headersForFolder.push(header);
+ }
+ }
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let clause;
+ // build a query, using a separate union clause for each folder.
+ for (let folderURI in headersByFolder) {
+ let headersForFolder = headersByFolder[folderURI];
+ let folder = this.getFolderForFolder(headersForFolder[0].folder);
+ // if this is the first or clause, just use the query itself
+ if (!clause) {
+ clause = query;
+ } else {
+ // Create a new query clause via the 'or' command.
+ clause = query.or();
+ }
+
+ clause.folder(folder);
+ let messageKeys = headersForFolder.map(hdr => hdr.messageKey);
+ clause.messageKey.apply(clause, messageKeys);
+ }
+
+ return query.getCollection(aListener, aData);
+ },
+
+ /**
+ * @testpoint gloda.ns.getMessageContent
+ */
+ getMessageContent(aGlodaMessage, aMimeMsg) {
+ return mimeMsgToContentAndMeta(
+ aMimeMsg,
+ aGlodaMessage.folderMessage.folder
+ )[0];
+ },
+
+ getFolderForFolder(aMsgFolder) {
+ return GlodaDatastore._mapFolder(aMsgFolder);
+ },
+
+ /**
+ * Takes one or more strings containing lists of comma-delimited e-mail
+ * addresses with optional display names, and returns a list of sub-lists of
+ * identities, where each sub-list corresponds to each of the strings passed
+ * as arguments. These identities are loaded from the database if they
+ * already exist, or created if they do not yet exist.
+ * If the identities need to be created, they will also result in the
+ * creation of a gloda contact. If a display name was provided with the
+ * e-mail address, it will become the name of the gloda contact. If a
+ * display name was not provided, the e-mail address will also serve as the
+ * contact name.
+ * This method uses the indexer's callback handle mechanism, and does not
+ * obey traditional return semantics.
+ *
+ * We normalize all e-mail addresses to be lowercase as a normative measure.
+ *
+ * @param aCallbackHandle The GlodaIndexer callback handle (or equivalent)
+ * that you are operating under.
+ * @param aAddrGroups... One or more strings. Each string can contain zero or more
+ * e-mail addresses with display name. If more than one address is given,
+ * they should be comma-delimited. For example
+ * '"Bob Smith" <bob@example.com>' is an address with display name. Mime
+ * header decoding is performed, but is ignorant of any folder-level
+ * character set overrides.
+ * @returns via the callback handle mechanism, a list containing one sub-list
+ * for each string argument passed. Each sub-list contains zero or more
+ * GlodaIdentity instances corresponding to the addresses provided.
+ */
+ *getOrCreateMailIdentities(aCallbackHandle, ...aAddrGroups) {
+ let addresses = {};
+ let resultLists = [];
+
+ // parse the strings
+ for (let aMailAddresses of aAddrGroups) {
+ let parsed = GlodaUtils.parseMailAddresses(aMailAddresses);
+
+ let resultList = [];
+ resultLists.push(resultList);
+
+ for (let iAddress = 0; iAddress < parsed.count; iAddress++) {
+ let address = parsed.addresses[iAddress].toLowerCase();
+ if (address in addresses) {
+ addresses[address].push(resultList);
+ } else {
+ addresses[address] = [parsed.names[iAddress], resultList];
+ }
+ }
+ }
+
+ let addressList = Object.keys(addresses);
+ if (addressList.length == 0) {
+ yield aCallbackHandle.doneWithResult(resultLists);
+ // we should be stopped before we reach this point, but safety first.
+ return;
+ }
+
+ let query = this.newQuery(GlodaConstants.NOUN_IDENTITY);
+ query.kind("email");
+ query.value.apply(query, addressList);
+ let collection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ // put the identities in the appropriate result lists
+ for (let identity of collection.items) {
+ let nameAndResultLists = addresses[identity.value];
+ this._log.debug(
+ " found identity for '" +
+ nameAndResultLists[0] +
+ "' (" +
+ identity.value +
+ ")"
+ );
+ // index 0 is the name, skip it
+ for (let iResList = 1; iResList < nameAndResultLists.length; iResList++) {
+ nameAndResultLists[iResList].push(identity);
+ }
+ delete addresses[identity.value];
+ }
+
+ // create the identities that did not exist yet
+ for (let address in addresses) {
+ let nameAndResultLists = addresses[address];
+ let name = nameAndResultLists[0];
+
+ this._log.debug(" creating contact for '" + name + "' (" + address + ")");
+
+ // try and find an existing address book contact.
+ let card = MailServices.ab.cardForEmailAddress(address);
+ // XXX when we have the address book GUID stuff, we need to use that to
+ // find existing contacts... (this will introduce a new query phase
+ // where we batch all the GUIDs for an async query)
+ // XXX when the address book supports multiple e-mail addresses, we
+ // should also just create identities for any that don't yet exist
+
+ // if there is no name, just use the e-mail (the ab indexer actually
+ // processes the card's displayName for synchronization, so we don't
+ // need to do that.)
+ if (!name) {
+ name = address;
+ }
+
+ let contact = GlodaDatastore.createContact(null, null, name, 0, 0);
+
+ // we must create the identity. use a blank description because there's
+ // nothing to differentiate it from other identities, as this contact
+ // only has one initially (us).
+ // XXX when we have multiple e-mails and there is a meaning associated
+ // with each e-mail, try and use that to populate the description.
+ // XXX we are creating the identity here before we insert the contact.
+ // conceptually it is good for us to be creating the identity before
+ // exposing it to the address-book indexer, but we could get our id's
+ // in a bad way from not deferring the identity insertion until after
+ // the contact insertion.
+ let identity = GlodaDatastore.createIdentity(
+ contact.id,
+ contact,
+ "email",
+ address,
+ /* description */ "",
+ /* relay? */ false
+ );
+ contact._identities = [identity];
+
+ // give the address book indexer a chance if we have a card.
+ // (it will fix-up the name based on the card as appropriate)
+ if (card) {
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(contact, { card }, true, true, aCallbackHandle)
+ );
+ } else {
+ // grokNounItem will issue the insert for us...
+ GlodaDatastore.insertContact(contact);
+ }
+
+ for (let iResList = 1; iResList < nameAndResultLists.length; iResList++) {
+ nameAndResultLists[iResList].push(identity);
+ }
+ }
+
+ yield aCallbackHandle.doneWithResult(resultLists);
+ },
+
+ /**
+ * Dictionary of the user's known identities; key is the identity id, value
+ * is the actual identity. This is populated by _initMyIdentities based on
+ * the accounts defined.
+ */
+ myIdentities: {},
+ /**
+ * The contact corresponding to the current user. We are assuming that only
+ * a single user/human being uses the current profile. This is known to be
+ * a flawed assumption, but is the best first approximation available.
+ * The contact is based on the default account's default identity. The user
+ * can change both, if desired, in Account Settings.
+ *
+ * @TODO attempt to deal with multiple people using the same profile
+ */
+ myContact: null,
+ /**
+ * Populate myIdentities with all of our identities. Currently we do this
+ * by assuming that there is one human/user per profile, and that all of the
+ * accounts defined in the profile belong to them. The single contact is
+ * stored on myContact.
+ *
+ * @TODO deal with account addition/modification/removal
+ * @TODO attempt to deal with multiple people using the same profile
+ */
+ _initMyIdentities() {
+ let myContact = null;
+ let myIdentities = {};
+ // Process each email at most once; stored here.
+ let myEmailAddresses = new Set();
+
+ let fullName, fallbackName;
+ let existingIdentities = [];
+ let identitiesToCreate = [];
+
+ let allIdentities = MailServices.accounts.allIdentities;
+ let defaultMsgIdentity = MailServices.accounts.defaultAccount
+ ? MailServices.accounts.defaultAccount.defaultIdentity
+ : null;
+ let defaultMsgIdentityKey = defaultMsgIdentity
+ ? defaultMsgIdentity.key
+ : null;
+ let defaultIdentity;
+
+ // Nothing to do if there are no accounts/identities.
+ if (allIdentities.length == 0) {
+ return;
+ }
+
+ for (let msgIdentity of allIdentities) {
+ let emailAddress = msgIdentity.email;
+ let replyTo = msgIdentity.replyTo;
+ let msgIdentityDescription = msgIdentity.fullName || msgIdentity.email;
+ let isDefaultMsgIdentity = msgIdentity.key == defaultMsgIdentityKey;
+
+ if (!fullName || isDefaultMsgIdentity) {
+ fullName = msgIdentity.fullName;
+ }
+ if (!fallbackName || isDefaultMsgIdentity) {
+ fallbackName = msgIdentity.email;
+ }
+
+ // Find the identities if they exist, flag to create them if they don't.
+ for (let address of [emailAddress, replyTo]) {
+ if (!address) {
+ continue;
+ }
+ let parsed = GlodaUtils.parseMailAddresses(address);
+ if (myEmailAddresses.has(parsed.addresses[0])) {
+ continue;
+ }
+ let identity = GlodaDatastore.getIdentity("email", parsed.addresses[0]);
+ if (identity) {
+ if (identity.description != msgIdentityDescription) {
+ // If the user changed the identity name, update the db.
+ identity._description = msgIdentityDescription;
+ GlodaDatastore.updateIdentity(identity);
+ }
+ existingIdentities.push(identity);
+ if (isDefaultMsgIdentity) {
+ defaultIdentity = identity;
+ }
+ } else {
+ identitiesToCreate.push([
+ parsed.addresses[0],
+ msgIdentityDescription,
+ ]);
+ }
+ myEmailAddresses.add(parsed.addresses[0]);
+ }
+ }
+ // We need to establish the identity.contact portions of the relationship.
+ for (let identity of existingIdentities) {
+ identity._contact = GlodaDatastore.getContactByID(identity.contactID);
+ if (defaultIdentity && defaultIdentity.id == identity.id) {
+ if (identity.contact.name != (fullName || fallbackName)) {
+ // If the user changed the default identity, update the db.
+ identity.contact.name = fullName || fallbackName;
+ GlodaDatastore.updateContact(identity.contact);
+ }
+ defaultIdentity._contact = identity.contact;
+ }
+ }
+
+ if (defaultIdentity) {
+ // The contact is based on the default account's default identity.
+ myContact = defaultIdentity.contact;
+ } else if (existingIdentities.length) {
+ // Just use the first guy's contact.
+ myContact = existingIdentities[0].contact;
+ } else {
+ // Create a new contact.
+ myContact = GlodaDatastore.createContact(
+ null,
+ null,
+ fullName || fallbackName,
+ 0,
+ 0
+ );
+ GlodaDatastore.insertContact(myContact);
+ }
+
+ for (let emailAndDescription of identitiesToCreate) {
+ // XXX This won't always be of type "email" as we add new account types.
+ let identity = GlodaDatastore.createIdentity(
+ myContact.id,
+ myContact,
+ "email",
+ emailAndDescription[0],
+ emailAndDescription[1],
+ false
+ );
+ existingIdentities.push(identity);
+ }
+
+ for (let identity of existingIdentities) {
+ myIdentities[identity.id] = identity;
+ }
+
+ this.myContact = myContact;
+ this.myIdentities = myIdentities;
+ myContact._identities = Object.keys(myIdentities).map(
+ id => myIdentities[id]
+ );
+
+ // We need contacts to make these objects reachable via the collection
+ // manager.
+ this._myContactCollection = this.explicitCollection(
+ GlodaConstants.NOUN_CONTACT,
+ [this.myContact]
+ );
+ this._myIdentitiesCollection = this.explicitCollection(
+ GlodaConstants.NOUN_IDENTITY,
+ this.myContact._identities
+ );
+ },
+
+ /** Next Noun ID to hand out, these don't need to be persisted (for now). */
+ _nextNounID: 1000,
+
+ /**
+ * Maps noun names to noun IDs.
+ */
+ _nounNameToNounID: {},
+ /**
+ * Maps noun IDs to noun definition dictionaries. (Noun definition
+ * dictionaries provided to us at the time a noun was defined, plus some
+ * additional stuff we put in there.)
+ */
+ _nounIDToDef: {},
+
+ _managedToJSON(aItem) {
+ return aItem.id;
+ },
+
+ /**
+ * Define a noun. Takes a dictionary with the following keys/values:
+ *
+ * @param aNounDef.name The name of the noun. This is not a display name
+ * (anything being displayed needs to be localized, after all), but simply
+ * the canonical name for debugging purposes and for people to pass to
+ * lookupNoun. The suggested convention is lower-case-dash-delimited,
+ * with names being singular (since it's a single noun we are referring
+ * to.)
+ * @param aNounDef.class The 'class' to which an instance of the noun will
+ * belong (aka will pass an instanceof test). You may also provide this
+ * as 'clazz' if the keyword makes your IDE angry.
+ * @param aNounDef.allowsArbitraryAttrs Is this a 'first class noun'/can it be
+ * a subject, AKA can this noun have attributes stored on it that relate
+ * it to other things? For example, a message is first-class; we store
+ * attributes of messages. A date is not first-class now, nor is it
+ * likely to be; we will not store attributes about a date, although dates
+ * will be the objects of other subjects. (For example: we might
+ * associate a date with a calendar event, but the date is an attribute of
+ * the calendar event and not vice versa.)
+ * @param aNounDef.usesParameter A boolean indicating whether this noun
+ * requires use of the 'parameter' BLOB storage field on the attribute
+ * bindings in the database to persist itself. Use of parameters should
+ * be limited to a reasonable number of values (16-32 is okay, more than
+ * that is pushing it and 256 should be considered an absolute upper
+ * bound) because of the database organization. When false, your
+ * toParamAndValue function is expected to return null for the parameter
+ * and likewise your fromParamAndValue should expect ignore and generally
+ * ignore the argument.
+ * @param aNounDef.toParamAndValue A function that takes an instantiated noun
+ * instance and returns a 2-element list of [parameter, value] where
+ * parameter may only be non-null if you passed a usesParameter of true.
+ * Parameter may be of any type (BLOB), and value must be numeric (pass
+ * 0 if you don't need the value).
+ *
+ * @param aNounDef.isPrimitive True when the noun instance is a raw numeric
+ * value/string/boolean. False when the instance is an object. When
+ * false, it is assumed the attribute that serves as a unique identifier
+ * for the value is "id" unless 'idAttr' is provided.
+ * @param [aNounDef.idAttr="id"] For non-primitive nouns, this is the
+ * attribute on the object that uniquely identifies it.
+ *
+ * @param aNounDef.schema Unsupported mechanism by which you can define a
+ * table that corresponds to this noun. The table will be created if it
+ * does not exist.
+ * - name The table name; don't conflict with other things!
+ * - columns A list of [column name, sqlite type] tuples. You should
+ * always include a definition like ["id", "INTEGER PRIMARY KEY"] for
+ * now (and it should be the first column name too.) If you care about
+ * how the attributes are poked into your object (for example, you want
+ * underscores used for some of them because the attributes should be
+ * immutable), then you can include a third string that is the name of
+ * the attribute to use.
+ * - indices A dictionary of lists of column names, where the key name
+ * becomes the index name. Ex: {foo: ["bar"]} results in an index on
+ * the column "bar" where the index is named "foo".
+ */
+ defineNoun(aNounDef, aNounID) {
+ this._log.info("Defining noun: " + aNounDef.name);
+ if (aNounID === undefined) {
+ aNounID = this._nextNounID++;
+ }
+ aNounDef.id = aNounID;
+
+ // Let people whose editors get angry about illegal attribute names use
+ // clazz instead of class.
+ if (aNounDef.clazz) {
+ aNounDef.class = aNounDef.clazz;
+ }
+
+ if (!("idAttr" in aNounDef)) {
+ aNounDef.idAttr = "id";
+ }
+ if (!("comparator" in aNounDef)) {
+ aNounDef.comparator = function () {
+ throw new Error(
+ "Noun type '" + aNounDef.name + "' lacks a real comparator."
+ );
+ };
+ }
+
+ // We allow nouns to have data tables associated with them where we do all
+ // the legwork. The schema attribute is the gateway to this magical world
+ // of functionality. Said door is officially unsupported.
+ if (aNounDef.schema) {
+ if (!aNounDef.tableName) {
+ if (aNounDef.schema.name) {
+ aNounDef.tableName = "ext_" + aNounDef.schema.name;
+ } else {
+ aNounDef.tableName = "ext_" + aNounDef.name;
+ }
+ }
+ // this creates the data table and binder and hooks everything up
+ GlodaDatastore.createNounTable(aNounDef);
+
+ if (!aNounDef.toParamAndValue) {
+ aNounDef.toParamAndValue = function (aThing) {
+ if (aThing instanceof aNounDef.class) {
+ return [null, aThing.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aThing];
+ };
+ }
+ }
+
+ // if it has a table, you can query on it. seems straight-forward.
+ if (aNounDef.tableName) {
+ [
+ aNounDef.queryClass,
+ aNounDef.nullQueryClass,
+ aNounDef.explicitQueryClass,
+ aNounDef.wildcardQueryClass,
+ ] = GlodaQueryClassFactory(aNounDef);
+ aNounDef._dbMeta = {};
+ aNounDef.class.prototype.NOUN_ID = aNounDef.id;
+ aNounDef.class.prototype.NOUN_DEF = aNounDef;
+ aNounDef.toJSON = this._managedToJSON;
+
+ aNounDef.specialLoadAttribs = [];
+
+ // - define the 'id' constrainer
+ let idConstrainer = function (...aArgs) {
+ let constraint = [GlodaConstants.kConstraintIdIn, null, ...aArgs];
+ this._constraints.push(constraint);
+ return this;
+ };
+ aNounDef.queryClass.prototype.id = idConstrainer;
+ }
+ if (aNounDef.cache) {
+ let cacheCost = aNounDef.cacheCost || 1024;
+ let cacheBudget = aNounDef.cacheBudget || 128 * 1024;
+ let cacheSize = Math.floor(cacheBudget / cacheCost);
+ if (cacheSize) {
+ GlodaCollectionManager.defineCache(aNounDef, cacheSize);
+ }
+ }
+ aNounDef.attribsByBoundName = {};
+ aNounDef.domExposeAttribsByBoundName = {};
+
+ aNounDef.objectNounOfAttributes = [];
+
+ this._nounNameToNounID[aNounDef.name] = aNounID;
+ this._nounIDToDef[aNounID] = aNounDef;
+ aNounDef.actions = [];
+
+ this._attrProviderOrderByNoun[aNounDef.id] = [];
+ this._attrOptimizerOrderByNoun[aNounDef.id] = [];
+ this._attrProvidersByNoun[aNounDef.id] = {};
+
+ return aNounDef;
+ },
+
+ /**
+ * Lookup a noun (ID) suitable for passing to defineAttribute's various
+ * noun arguments. Throws an exception if the noun with the given name
+ * cannot be found; the assumption is that you can't live without the noun.
+ */
+ lookupNoun(aNounName) {
+ if (aNounName in this._nounNameToNounID) {
+ return this._nounNameToNounID[aNounName];
+ }
+
+ throw Error(
+ "Unable to locate noun with name '" +
+ aNounName +
+ "', but I " +
+ "do know about: " +
+ Object.keys(this._nounNameToNounID).join(", ")
+ );
+ },
+
+ /**
+ * Lookup a noun def given a name.
+ */
+ lookupNounDef(aNounName) {
+ return this._nounIDToDef[this.lookupNoun(aNounName)];
+ },
+
+ /**
+ * Define an action on a noun. During the prototype stage, this was conceived
+ * of as a way to expose all the constraints possible given a noun. For
+ * example, if you have an identity or a contact, you could use this to
+ * see all the messages sent from/to a given contact. It was likewise
+ * thought potentially usable for future expansion. For example, you could
+ * also decide to send an e-mail to a contact when you have the contact
+ * instance available.
+ * Outside of the 'expmess' checkbox-happy prototype, this functionality is
+ * not used. As such, this functionality should be considered in flux and
+ * subject to changes. Also, very open to specific suggestsions motivated
+ * by use cases.
+ * One conceptual issue raised by this mechanism is the interaction of actions
+ * with facts like "this message is read". We currently implement the 'fact'
+ * by defining an attribute with a 'boolean' noun type. To deal with this,
+ * in various places we pass-in the attribute as well as the noun value.
+ * Since the relationships for booleans and integers in these cases is
+ * standard and well-defined, this works out pretty well, but suggests we
+ * need to think things through.
+ *
+ * @param aNounID The ID of the noun you want to define an action on.
+ * @param aActionMeta The dictionary describing the noun. The dictionary
+ * should have the following fields:
+ * - actionType: a string indicating the type of action. Currently, only
+ * "filter" is a legal value.
+ * - actionTarget: the noun ID of the noun type on which this action is
+ * applicable. For example,
+ *
+ * The following should be present for actionType=="filter";
+ * - shortName: The name that should be used to display this constraint. For
+ * example, a checkbox-heavy UI might display a checkbox for each constraint
+ * using shortName as the label.
+ * - makeConstraint: A function that takes the attribute that is the source
+ * of the noun and the noun instance as arguments, and returns APV-style
+ * constraints. Since the APV-style query mechanism is now deprecated,
+ * this signature is deprecated. Probably the way to update this would be
+ * to pass in the query instance that constraints should be contributed to.
+ */
+ defineNounAction(aNounID, aActionMeta) {
+ let nounDef = this._nounIDToDef[aNounID];
+ nounDef.actions.push(aActionMeta);
+ },
+
+ /**
+ * Retrieve all of the actions (as defined using defineNounAction) for the
+ * given noun type (via noun ID) with the given action type (ex: filter).
+ */
+ getNounActions(aNounID, aActionType) {
+ let nounDef = this._nounIDToDef[aNounID];
+ if (!nounDef) {
+ return [];
+ }
+ return nounDef.actions.filter(
+ action => !aActionType || action.actionType == aActionType
+ );
+ },
+
+ /** Attribute providers in the sequence to process them. */
+ _attrProviderOrderByNoun: {},
+ /** Attribute providers that provide optimizers, in the sequence to proc. */
+ _attrOptimizerOrderByNoun: {},
+ /** Maps attribute providers to the list of attributes they provide */
+ _attrProviders: {},
+ /**
+ * Maps nouns to their attribute providers to a list of the attributes they
+ * provide for the noun.
+ */
+ _attrProvidersByNoun: {},
+
+ /**
+ * Define the core nouns (that are not defined elsewhere) and a few noun
+ * actions. Core nouns could be defined in other files, assuming dependency
+ * issues are resolved via the Everybody.jsm mechanism or something else.
+ * Right now, noun_tag defines the tag noun. If we broke more of these out,
+ * we would probably want to move the 'class' code from GlodaDataModel.jsm, the
+ * SQL table def and helper code from GlodaDatastore.jsm (and this code) to their
+ * own noun_*.js files. There are some trade-offs to be made, and I think
+ * we can deal with those once we start to integrate lightning/calendar and
+ * our noun space gets large and more heterogeneous.
+ */
+ _initAttributes() {
+ this.defineNoun(
+ {
+ name: "bool",
+ clazz: Boolean,
+ allowsArbitraryAttrs: false,
+ isPrimitive: true,
+ // favor true before false
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return b - a;
+ },
+ toParamAndValue(aBool) {
+ return [null, aBool ? 1 : 0];
+ },
+ },
+ GlodaConstants.NOUN_BOOLEAN
+ );
+ this.defineNoun(
+ {
+ name: "number",
+ clazz: Number,
+ allowsArbitraryAttrs: false,
+ continuous: true,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a - b;
+ },
+ toParamAndValue(aNum) {
+ return [null, aNum];
+ },
+ },
+ GlodaConstants.NOUN_NUMBER
+ );
+ this.defineNoun(
+ {
+ name: "string",
+ clazz: String,
+ allowsArbitraryAttrs: false,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.localeCompare(b);
+ },
+ toParamAndValue(aString) {
+ return [null, aString];
+ },
+ },
+ GlodaConstants.NOUN_STRING
+ );
+ this.defineNoun(
+ {
+ name: "date",
+ clazz: Date,
+ allowsArbitraryAttrs: false,
+ continuous: true,
+ isPrimitive: true,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a - b;
+ },
+ toParamAndValue(aDate) {
+ return [null, aDate.valueOf() * 1000];
+ },
+ },
+ GlodaConstants.NOUN_DATE
+ );
+ this.defineNoun(
+ {
+ name: "fulltext",
+ clazz: String,
+ allowsArbitraryAttrs: false,
+ continuous: false,
+ isPrimitive: true,
+ comparator(a, b) {
+ throw new Error("Fulltext nouns are not comparable!");
+ },
+ // as noted on NOUN_FULLTEXT, we just pass the string around. it never
+ // hits the database, so it's okay.
+ toParamAndValue(aString) {
+ return [null, aString];
+ },
+ },
+ GlodaConstants.NOUN_FULLTEXT
+ );
+
+ this.defineNoun(
+ {
+ name: "folder",
+ clazz: GlodaFolder,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ queryHelpers: {
+ /**
+ * Query for accounts based on the account associated with folders. We
+ * walk all of the folders associated with an account and put them in
+ * the list of folders that match if gloda would index them. This is
+ * unsuitable for producing a persistable constraint since it does not
+ * adapt for added/deleted folders. However, it is sufficient for
+ * faceting. Also, we don't persist constraints yet.
+ *
+ * @TODO The long-term solution is to move towards using arithmetic
+ * encoding on folder-id's like we use for MIME types and friends.
+ */
+ Account(aAttrDef, aArguments) {
+ let folderValues = [];
+ let seenRootFolders = {};
+ for (let iArg = 0; iArg < aArguments.length; iArg++) {
+ let givenFolder = aArguments[iArg];
+ let givenMsgFolder = givenFolder.getXPCOMFolder(
+ givenFolder.kActivityFolderOnlyNoData
+ );
+ let rootFolder = givenMsgFolder.rootFolder;
+
+ // skip processing this folder if we have already processed its
+ // root folder.
+ if (rootFolder.URI in seenRootFolders) {
+ continue;
+ }
+ seenRootFolders[rootFolder.URI] = true;
+
+ for (let folder of rootFolder.descendants) {
+ let folderFlags = folder.flags;
+
+ // Ignore virtual folders, non-mail folders.
+ // XXX this is derived from GlodaIndexer's shouldIndexFolder.
+ // This should probably just use centralized code or the like.
+ if (
+ !(folderFlags & Ci.nsMsgFolderFlags.Mail) ||
+ folderFlags & Ci.nsMsgFolderFlags.Virtual
+ ) {
+ continue;
+ }
+ // we only index local or IMAP folders
+ if (
+ !(folder instanceof Ci.nsIMsgLocalMailFolder) &&
+ !(folder instanceof Ci.nsIMsgImapMailFolder)
+ ) {
+ continue;
+ }
+
+ let glodaFolder = Gloda.getFolderForFolder(folder);
+ folderValues.push(glodaFolder);
+ }
+ }
+ return this._inConstraintHelper(aAttrDef, folderValues);
+ },
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ toParamAndValue(aFolderOrGlodaFolder) {
+ if (aFolderOrGlodaFolder instanceof GlodaFolder) {
+ return [null, aFolderOrGlodaFolder.id];
+ }
+ return [null, GlodaDatastore._mapFolder(aFolderOrGlodaFolder).id];
+ },
+ },
+ GlodaConstants.NOUN_FOLDER
+ );
+ this.defineNoun(
+ {
+ name: "account",
+ clazz: GlodaAccount,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ equals(a, b) {
+ if ((a && !b) || (!a && b)) {
+ return false;
+ }
+ if (!a && !b) {
+ return true;
+ }
+ return a.id == b.id;
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ },
+ GlodaConstants.NOUN_ACCOUNT
+ );
+ this.defineNoun(
+ {
+ name: "conversation",
+ clazz: GlodaConversation,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 512,
+ tableName: "conversations",
+ attrTableName: "messageAttributes",
+ attrIDColumnName: "conversationID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._conversationFromRow,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.subject.localeCompare(b.subject);
+ },
+ toParamAndValue(aConversation) {
+ if (aConversation instanceof GlodaConversation) {
+ return [null, aConversation.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aConversation];
+ },
+ },
+ GlodaConstants.NOUN_CONVERSATION
+ );
+ this.defineNoun(
+ {
+ name: "message",
+ clazz: GlodaMessage,
+ allowsArbitraryAttrs: true,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 2048,
+ tableName: "messages",
+ // we will always have a fulltext row, even for messages where we don't
+ // have the body available. this is because we want the subject indexed.
+ dbQueryJoinMagic:
+ " INNER JOIN messagesText ON messages.id = messagesText.rowid",
+ attrTableName: "messageAttributes",
+ attrIDColumnName: "messageID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._messageFromRow,
+ dbAttribAdjuster: GlodaDatastore.adjustMessageAttributes,
+ dbQueryValidityConstraintSuffix:
+ " AND +deleted = 0 AND +folderID IS NOT NULL AND +messageKey IS NOT NULL",
+ // This is what's used when we have no validity constraints, i.e. we allow
+ // for ghost messages, which do not have a row in the messagesText table.
+ dbQueryJoinMagicWithNoValidityConstraints:
+ " LEFT JOIN messagesText ON messages.id = messagesText.rowid",
+ objInsert: GlodaDatastore.insertMessage,
+ objUpdate: GlodaDatastore.updateMessage,
+ toParamAndValue(aMessage) {
+ if (aMessage instanceof GlodaMessage) {
+ return [null, aMessage.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aMessage];
+ },
+ },
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.defineNoun(
+ {
+ name: "contact",
+ clazz: GlodaContact,
+ allowsArbitraryAttrs: true,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 128,
+ tableName: "contacts",
+ attrTableName: "contactAttributes",
+ attrIDColumnName: "contactID",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._contactFromRow,
+ dbAttribAdjuster: GlodaDatastore.adjustAttributes,
+ objInsert: GlodaDatastore.insertContact,
+ objUpdate: GlodaDatastore.updateContact,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+ toParamAndValue(aContact) {
+ if (aContact instanceof GlodaContact) {
+ return [null, aContact.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aContact];
+ },
+ },
+ GlodaConstants.NOUN_CONTACT
+ );
+ this.defineNoun(
+ {
+ name: "identity",
+ clazz: GlodaIdentity,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ cache: true,
+ cacheCost: 128,
+ usesUniqueValue: true,
+ tableName: "identities",
+ datastore: GlodaDatastore,
+ objFromRow: GlodaDatastore._identityFromRow,
+ /**
+ * Short string is the contact name, long string includes the identity
+ * value too, delimited by a colon. Not tremendously localizable.
+ */
+ userVisibleString(aIdentity, aLong) {
+ if (!aLong) {
+ return aIdentity.contact.name;
+ }
+ if (aIdentity.contact.name == aIdentity.value) {
+ return aIdentity.value;
+ }
+ return aIdentity.contact.name + " (" + aIdentity.value + ")";
+ },
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.contact.name.localeCompare(b.contact.name);
+ },
+ toParamAndValue(aIdentity) {
+ if (aIdentity instanceof GlodaIdentity) {
+ return [null, aIdentity.id];
+ }
+ // assume they're just passing the id directly
+ return [null, aIdentity];
+ },
+ },
+ GlodaConstants.NOUN_IDENTITY
+ );
+ this.defineNoun(
+ {
+ name: "attachment-infos",
+ clazz: GlodaAttachment,
+ allowsArbitraryAttrs: false,
+ isPrimitive: false,
+ toJSON(x) {
+ return [
+ x._name,
+ x._contentType,
+ x._size,
+ x._part,
+ x._externalUrl,
+ x._isExternal,
+ ];
+ },
+ fromJSON(x, aGlodaMessage) {
+ let [name, contentType, size, _part, _externalUrl, isExternal] = x;
+ return new GlodaAttachment(
+ aGlodaMessage,
+ name,
+ contentType,
+ size,
+ _part,
+ _externalUrl,
+ isExternal
+ );
+ },
+ },
+ GlodaConstants.NOUN_ATTACHMENT
+ );
+
+ // parameterized identity is just two identities; we store the first one
+ // (whose value set must be very constrainted, like the 'me' identities)
+ // as the parameter, the second (which does not need to be constrained)
+ // as the value.
+ this.defineNoun(
+ {
+ name: "parameterized-identity",
+ clazz: null,
+ allowsArbitraryAttrs: false,
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ // First sort by the first identity in the tuple
+ // Since our general use-case is for the first guy to be "me", we only
+ // compare the identity value, not the name.
+ let fic = a[0].value.localeCompare(b[0].value);
+ if (fic) {
+ return fic;
+ }
+ // Next compare the second identity in the tuple, but use the contact
+ // this time to be consistent with our identity comparator.
+ return a[1].contact.name.localeCompare(b[1].contact.name);
+ },
+ computeDelta(aCurValues, aOldValues) {
+ let oldMap = {};
+ for (let tupe of aOldValues) {
+ let [originIdentity, targetIdentity] = tupe;
+ let targets = oldMap[originIdentity];
+ if (targets === undefined) {
+ targets = oldMap[originIdentity] = {};
+ }
+ targets[targetIdentity] = true;
+ }
+
+ let added = [],
+ removed = [];
+ for (let tupe of aCurValues) {
+ let [originIdentity, targetIdentity] = tupe;
+ let targets = oldMap[originIdentity];
+ if (targets === undefined || !(targetIdentity in targets)) {
+ added.push(tupe);
+ } else {
+ delete targets[targetIdentity];
+ }
+ }
+
+ for (let originIdentity in oldMap) {
+ let targets = oldMap[originIdentity];
+ for (let targetIdentity in targets) {
+ removed.push([originIdentity, targetIdentity]);
+ }
+ }
+
+ return [added, removed];
+ },
+ contributeObjDependencies(
+ aJsonValues,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ ) {
+ // nothing to do with a zero-length list
+ if (aJsonValues.length == 0) {
+ return false;
+ }
+
+ let nounIdentityDef =
+ Gloda._nounIDToDef[GlodaConstants.NOUN_IDENTITY];
+ let references = aReferencesByNounID[nounIdentityDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[nounIdentityDef.id] = {};
+ }
+
+ for (let tupe of aJsonValues) {
+ let [originIdentityID, targetIdentityID] = tupe;
+ if (!(originIdentityID in references)) {
+ references[originIdentityID] = null;
+ }
+ if (!(targetIdentityID in references)) {
+ references[targetIdentityID] = null;
+ }
+ }
+
+ return true;
+ },
+ resolveObjDependencies(
+ aJsonValues,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ ) {
+ let references = aReferencesByNounID[GlodaConstants.NOUN_IDENTITY];
+
+ let results = [];
+ for (let tupe of aJsonValues) {
+ let [originIdentityID, targetIdentityID] = tupe;
+ results.push([
+ references[originIdentityID],
+ references[targetIdentityID],
+ ]);
+ }
+
+ return results;
+ },
+ toJSON(aIdentityTuple) {
+ return [aIdentityTuple[0].id, aIdentityTuple[1].id];
+ },
+ toParamAndValue(aIdentityTuple) {
+ return [aIdentityTuple[0].id, aIdentityTuple[1].id];
+ },
+ },
+ GlodaConstants.NOUN_PARAM_IDENTITY
+ );
+
+ GlodaDatastore.getAllAttributes();
+ },
+
+ /**
+ * Create accessor functions to 'bind' an attribute to underlying normalized
+ * attribute storage, as well as creating the appropriate query object
+ * constraint helper functions. This name is somewhat of a misnomer because
+ * special attributes are not 'bound' (because specific/non-generic per-class
+ * code provides the properties) but still depend on this method to
+ * establish their constraint helper methods.
+ *
+ * @XXX potentially rename to not suggest binding is required.
+ */
+ _bindAttribute(aAttrDef, aSubjectNounDef) {
+ let objectNounDef = aAttrDef.objectNounDef;
+
+ // -- the query constraint helpers
+ if (aSubjectNounDef.queryClass !== undefined) {
+ let constrainer;
+ let canQuery = true;
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special == GlodaConstants.kSpecialFulltext
+ ) {
+ constrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintFulltext,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+ } else if (aAttrDef.canQuery || aAttrDef.attributeName.startsWith("_")) {
+ constrainer = function (...aArgs) {
+ let constraint = [GlodaConstants.kConstraintIn, aAttrDef, ...aArgs];
+ this._constraints.push(constraint);
+ return this;
+ };
+ } else {
+ constrainer = function () {
+ throw new Error(
+ "Cannot query on attribute " +
+ aAttrDef.attributeName +
+ " because its canQuery parameter hasn't been set to true." +
+ " Reading the comments about Gloda.defineAttribute may be a" +
+ " sensible thing to do now."
+ );
+ };
+ canQuery = false;
+ }
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName] = constrainer;
+
+ // Don't bind extra query-able attributes if we're unable to perform a
+ // search on the attribute.
+ if (!canQuery) {
+ return;
+ }
+
+ // - ranged value helper: fooRange
+ if (objectNounDef.continuous) {
+ // takes one or more tuples of [lower bound, upper bound]
+ let rangedConstrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintRanges,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + "Range"] =
+ rangedConstrainer;
+ }
+
+ // - string LIKE helper for special on-row attributes: fooLike
+ // (it is impossible to store a string as an indexed attribute, which is
+ // why we do this for on-row only.)
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special == GlodaConstants.kSpecialString
+ ) {
+ let likeConstrainer = function (...aArgs) {
+ let constraint = [
+ GlodaConstants.kConstraintStringLike,
+ aAttrDef,
+ ...aArgs,
+ ];
+ this._constraints.push(constraint);
+ return this;
+ };
+
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + "Like"] =
+ likeConstrainer;
+ }
+
+ // - Custom helpers provided by the noun type...
+ if ("queryHelpers" in objectNounDef) {
+ for (let name in objectNounDef.queryHelpers) {
+ let helper = objectNounDef.queryHelpers[name];
+ // we need a new closure...
+ let helperFunc = helper;
+ aSubjectNounDef.queryClass.prototype[aAttrDef.boundName + name] =
+ function (...aArgs) {
+ return helperFunc.call(this, aAttrDef, ...aArgs);
+ };
+ }
+ }
+ }
+ },
+
+ /**
+ * Names of attribute-specific localized strings and the JS attribute they are
+ * exposed as in the attribute's "strings" attribute (if the provider has a
+ * string bundle exposed on its "strings" attribute). They are rooted at
+ * "gloda.SUBJECT-NOUN-NAME.attr.ATTR-NAME.*".
+ *
+ * Please consult the localization notes in gloda.properties to understand
+ * what these are used for.
+ */
+ _ATTR_LOCALIZED_STRINGS: {
+ /* - Faceting */
+ facetNameLabel: "facetNameLabel",
+ noneLabel: "noneLabel",
+ includeLabel: "includeLabel",
+ excludeLabel: "excludeLabel",
+ remainderLabel: "remainderLabel",
+ mustMatchLabel: "mustMatchLabel",
+ cantMatchLabel: "cantMatchLabel",
+ mayMatchLabel: "mayMatchLabel",
+ mustMatchNoneLabel: "mustMatchNoneLabel",
+ mustMatchSomeLabel: "mustMatchSomeLabel",
+ mayMatchAnyLabel: "mayMatchAnyLabel",
+ },
+ /**
+ * Define an attribute and all its meta-data. Takes a single dictionary as
+ * its argument, with the following required properties:
+ *
+ * @param aAttrDef.provider The object instance providing a 'process' method.
+ * @param aAttrDef.extensionName The name of the extension providing these
+ * attributes.
+ * @param aAttrDef.attributeType The type of attribute, one of the values from
+ * the kAttr* enumeration.
+ * @param aAttrDef.attributeName The name of the attribute, which also doubles
+ * as the bound property name if you pass 'bind' a value of true. You are
+ * responsible for avoiding collisions, which presumably will mean
+ * checking/updating a wiki page in the future, or just prefixing your
+ * attribute name with your extension name or something like that.
+ * @param aAttrDef.bind Should this attribute be 'bound' as a convenience
+ * attribute on the subject's object (true/false)? For example, with an
+ * attributeName of "foo" and passing true for 'bind' with a subject noun
+ * of NOUN_MESSAGE, GlodaMessage instances will expose a "foo" getter that
+ * returns the value of the attribute. If 'singular' is true, this means
+ * an instance of the object class corresponding to the noun type or null
+ * if the attribute does not exist. If 'singular' is false, this means a
+ * list of instances of the object class corresponding to the noun type,
+ * where the list may be empty if no instances of the attribute are
+ * present.
+ * @param aAttrDef.bindName Optional override of attributeName for purposes of
+ * the binding property's name.
+ * @param aAttrDef.singular Is the attribute going to happen at most once
+ * (true), or potentially multiple times (false). This affects whether
+ * the binding returns a list or just a single item (which is null when
+ * the attribute is not present).
+ * @param [aAttrDef.emptySetIsSignificant=false] Should we
+ * @param aAttrDef.subjectNouns A list of object types (NOUNs) that this
+ * attribute can be set on. Each element in the list should be one of the
+ * NOUN_* constants or a dynamically registered noun type.
+ * @param aAttrDef.objectNoun The object type (one of the NOUN_* constants or
+ * a dynamically registered noun types) that is the 'object' in the
+ * traditional RDF triple. More pragmatically, in the database row used
+ * to represent an attribute, we store the subject (ex: message ID),
+ * attribute ID, and an integer which is the integer representation of the
+ * 'object' whose type you are defining right here.
+ */
+ defineAttribute(aAttrDef) {
+ // ensure required properties exist on aAttrDef
+ if (
+ !("provider" in aAttrDef) ||
+ !("extensionName" in aAttrDef) ||
+ !("attributeType" in aAttrDef) ||
+ !("attributeName" in aAttrDef) ||
+ !("singular" in aAttrDef) ||
+ !("subjectNouns" in aAttrDef) ||
+ !("objectNoun" in aAttrDef)
+ ) {
+ // perhaps we should have a list of required attributes, perchance with
+ // and explanation of what it holds, and use that to be friendlier?
+ throw Error(
+ "You omitted a required attribute defining property, please" +
+ " consult the documentation as penance."
+ );
+ }
+
+ // -- Fill in defaults
+ if (!("emptySetIsSignificant" in aAttrDef)) {
+ aAttrDef.emptySetIsSignificant = false;
+ }
+
+ if (!("canQuery" in aAttrDef)) {
+ aAttrDef.canQuery = !!aAttrDef.facet;
+ }
+
+ // return if the attribute has already been defined
+ if (aAttrDef.dbDef) {
+ return aAttrDef;
+ }
+
+ // - first time we've seen a provider init logic
+ if (!(aAttrDef.provider.providerName in this._attrProviders)) {
+ this._attrProviders[aAttrDef.provider.providerName] = [];
+ if (aAttrDef.provider.contentWhittle) {
+ whittlerRegistry.registerWhittler(aAttrDef.provider);
+ }
+ }
+
+ let compoundName = aAttrDef.extensionName + ":" + aAttrDef.attributeName;
+ // -- Database Definition
+ let attrDBDef;
+ if (compoundName in GlodaDatastore._attributeDBDefs) {
+ // the existence of the GlodaAttributeDBDef means that either it has
+ // already been fully defined, or has been loaded from the database but
+ // not yet 'bound' to a provider (and had important meta-info that
+ // doesn't go in the db copied over)
+ attrDBDef = GlodaDatastore._attributeDBDefs[compoundName];
+ } else {
+ // we need to create the attribute definition in the database
+ let attrID = null;
+ attrID = GlodaDatastore._createAttributeDef(
+ aAttrDef.attributeType,
+ aAttrDef.extensionName,
+ aAttrDef.attributeName,
+ null
+ );
+
+ attrDBDef = new GlodaAttributeDBDef(
+ GlodaDatastore,
+ attrID,
+ compoundName,
+ aAttrDef.attributeType,
+ aAttrDef.extensionName,
+ aAttrDef.attributeName
+ );
+ GlodaDatastore._attributeDBDefs[compoundName] = attrDBDef;
+ GlodaDatastore._attributeIDToDBDefAndParam[attrID] = [attrDBDef, null];
+ }
+
+ aAttrDef.dbDef = attrDBDef;
+ attrDBDef.attrDef = aAttrDef;
+
+ aAttrDef.id = aAttrDef.dbDef.id;
+
+ if ("bindName" in aAttrDef) {
+ aAttrDef.boundName = aAttrDef.bindName;
+ } else {
+ aAttrDef.boundName = aAttrDef.attributeName;
+ }
+
+ aAttrDef.objectNounDef = this._nounIDToDef[aAttrDef.objectNoun];
+ aAttrDef.objectNounDef.objectNounOfAttributes.push(aAttrDef);
+
+ // -- Facets
+ function normalizeFacetDef(aFacetDef) {
+ if (!("groupIdAttr" in aFacetDef)) {
+ aFacetDef.groupIdAttr = aAttrDef.objectNounDef.idAttr;
+ }
+ if (!("groupComparator" in aFacetDef)) {
+ aFacetDef.groupComparator = aAttrDef.objectNounDef.comparator;
+ }
+ if (!("filter" in aFacetDef)) {
+ aFacetDef.filter = null;
+ }
+ }
+ // No facet attribute means no facet desired; set an explicit null so that
+ // code can check without doing an "in" check.
+ if (!("facet" in aAttrDef)) {
+ aAttrDef.facet = null;
+ } else if (aAttrDef.facet === true) {
+ // Promote "true" facet values to the defaults. Where attributes have
+ // specified values, make sure we fill in any missing defaults.
+ aAttrDef.facet = {
+ type: "default",
+ groupIdAttr: aAttrDef.objectNounDef.idAttr,
+ groupComparator: aAttrDef.objectNounDef.comparator,
+ filter: null,
+ };
+ } else {
+ normalizeFacetDef(aAttrDef.facet);
+ }
+ if ("extraFacets" in aAttrDef) {
+ for (let facetDef of aAttrDef.extraFacets) {
+ normalizeFacetDef(facetDef);
+ }
+ }
+
+ function gatherLocalizedStrings(aBundle, aPropRoot, aStickIn) {
+ for (let propName in Gloda._ATTR_LOCALIZED_STRINGS) {
+ let attrName = Gloda._ATTR_LOCALIZED_STRINGS[propName];
+ try {
+ aStickIn[attrName] = aBundle.GetStringFromName(aPropRoot + propName);
+ } catch (ex) {
+ // do nothing. nsIStringBundle throws exceptions when not found
+ }
+ }
+ }
+
+ // -- L10n.
+ // If the provider has a string bundle, populate a "strings" attribute with
+ // our standard attribute strings that can be UI exposed.
+ if ("strings" in aAttrDef.provider && aAttrDef.facet) {
+ let bundle = aAttrDef.provider.strings;
+
+ // -- attribute strings
+ let attrStrings = (aAttrDef.facet.strings = {});
+ // we use the first subject the attribute applies to as the basis of
+ // where to get the string from. Mainly because we currently don't have
+ // any attributes with multiple subjects nor a use-case where we expose
+ // multiple noun types via the UI. (Just messages right now.)
+ let canonicalSubject = this._nounIDToDef[aAttrDef.subjectNouns[0]];
+ let propRoot =
+ "gloda." +
+ canonicalSubject.name +
+ ".attr." +
+ aAttrDef.attributeName +
+ ".";
+ gatherLocalizedStrings(bundle, propRoot, attrStrings);
+
+ // -- alias strings for synthetic facets
+ if ("extraFacets" in aAttrDef) {
+ for (let facetDef of aAttrDef.extraFacets) {
+ facetDef.strings = {};
+ let aliasPropRoot =
+ "gloda." + canonicalSubject.name + ".attr." + facetDef.alias + ".";
+ gatherLocalizedStrings(bundle, aliasPropRoot, facetDef.strings);
+ }
+ }
+ }
+
+ // -- Subject Noun Binding
+ for (
+ let iSubject = 0;
+ iSubject < aAttrDef.subjectNouns.length;
+ iSubject++
+ ) {
+ let subjectType = aAttrDef.subjectNouns[iSubject];
+ let subjectNounDef = this._nounIDToDef[subjectType];
+ this._bindAttribute(aAttrDef, subjectNounDef);
+
+ // update the provider maps...
+ if (
+ !this._attrProviderOrderByNoun[subjectType].includes(aAttrDef.provider)
+ ) {
+ this._attrProviderOrderByNoun[subjectType].push(aAttrDef.provider);
+ if (aAttrDef.provider.optimize) {
+ this._attrOptimizerOrderByNoun[subjectType].push(aAttrDef.provider);
+ }
+ this._attrProvidersByNoun[subjectType][aAttrDef.provider.providerName] =
+ [];
+ }
+ this._attrProvidersByNoun[subjectType][
+ aAttrDef.provider.providerName
+ ].push(aAttrDef);
+
+ subjectNounDef.attribsByBoundName[aAttrDef.boundName] = aAttrDef;
+ if (aAttrDef.domExpose) {
+ subjectNounDef.domExposeAttribsByBoundName[aAttrDef.boundName] =
+ aAttrDef;
+ }
+
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special & GlodaConstants.kSpecialColumn
+ ) {
+ subjectNounDef.specialLoadAttribs.push(aAttrDef);
+ }
+
+ // if this is a parent column attribute, make note of it so that if we
+ // need to do an inverse references lookup, we know what column we are
+ // issuing against.
+ if (
+ "special" in aAttrDef &&
+ aAttrDef.special === GlodaConstants.kSpecialColumnParent
+ ) {
+ subjectNounDef.parentColumnAttr = aAttrDef;
+ }
+
+ if (
+ aAttrDef.objectNounDef.tableName ||
+ aAttrDef.objectNounDef.contributeObjDependencies
+ ) {
+ subjectNounDef.hasObjDependencies = true;
+ }
+ }
+
+ this._attrProviders[aAttrDef.provider.providerName].push(aAttrDef);
+ return aAttrDef;
+ },
+
+ /**
+ * Retrieve the attribute provided by the given extension with the given
+ * attribute name. The original idea was that plugins would effectively
+ * name-space attributes, helping avoid collisions. Since we are leaning
+ * towards using binding heavily, this doesn't really help, as the collisions
+ * will just occur on the attribute name instead. Also, this can turn
+ * extensions into liars as name changes/moves to core/etc. happen.
+ *
+ * @TODO consider removing the extension name argument parameter requirement
+ */
+ getAttrDef(aPluginName, aAttrName) {
+ let compoundName = aPluginName + ":" + aAttrName;
+ return GlodaDatastore._attributeDBDefs[compoundName];
+ },
+
+ /**
+ * Create a new query instance for the given noun-type. This provides
+ * a generic way to provide constraint-based queries of any first-class
+ * nouns supported by the system.
+ *
+ * The idea is that every attribute on an object can be used to express
+ * a constraint on the query object. Constraints implicitly 'AND' together,
+ * but providing multiple arguments to a constraint function results in an
+ * 'OR'ing of those values. Additionally, you can call or() on the returned
+ * query to create an alternate query that is effectively a giant OR against
+ * all the constraints you create on the main query object (or any other
+ * alternate queries returned by or()). (Note: there is no nesting of these
+ * alternate queries. query.or().or() is equivalent to query.or())
+ * For each attribute, there is a constraint with the same name that takes
+ * one or more arguments. The arguments represent a set of OR values that
+ * objects matching the query can have. (If you want the constraint
+ * effectively ANDed together, just invoke the constraint function
+ * multiple times.) For example, newQuery(NOUN_PERSON).age(25) would
+ * constraint to all the people aged 25, while age(25, 26) would constrain
+ * to all the people age 25 or 26.
+ * For each attribute with a 'continuous' noun, there is a constraint with the
+ * attribute name with "Range" appended. It takes two arguments which are an
+ * inclusive lower bound and an inclusive lower bound for values in the
+ * range. If you would like an open-ended range on either side, pass null
+ * for that argument. If you would like to specify multiple ranges that
+ * should be ORed together, simply pass additional (pairs of) arguments.
+ * For example, newQuery(NOUN_PERSON).age(25,100) would constraint to all
+ * the people who are >= 25 and <= 100. Likewise age(25, null) would just
+ * return all the people who are 25 or older. And age(25,30,35,40) would
+ * return people who are either 25-30 or 35-30.
+ * There are also full-text constraint columns. In a nutshell, their
+ * arguments are the strings that should be passed to the SQLite FTS3
+ * MATCH clause.
+ *
+ * @param aNounID The (integer) noun-id of the noun you want to query on.
+ * @param aOptions an optional dictionary of query options, see the GlodaQuery
+ * class documentation.
+ */
+ newQuery(aNounID, aOptions) {
+ let nounDef = this._nounIDToDef[aNounID];
+ return new nounDef.queryClass(aOptions);
+ },
+
+ /**
+ * Create a collection/query for the given noun-type that only matches the
+ * provided items. This is to be used when you have an explicit set of items
+ * that you would still like to receive updates for.
+ */
+ explicitCollection(aNounID, aItems) {
+ let nounDef = this._nounIDToDef[aNounID];
+ let collection = new GlodaCollection(nounDef, aItems, null, null);
+ let query = new nounDef.explicitQueryClass(collection);
+ collection.query = query;
+ GlodaCollectionManager.registerCollection(collection);
+ return collection;
+ },
+
+ /**
+ * Debugging 'wildcard' collection creation support. A wildcard collection
+ * will 'accept' any new item instances presented to the collection manager
+ * as new. The result is that it allows you to be notified as new items
+ * as they are indexed, existing items as they are loaded from the database,
+ * etc.
+ * Because the items are added to the collection without limit, this will
+ * result in a leak if you don't do something to clean up after the
+ * collection. (Forgetting about the collection will suffice, as it is still
+ * weakly held.)
+ */
+ _wildcardCollection(aNounID, aItems) {
+ let nounDef = this._nounIDToDef[aNounID];
+ let collection = new GlodaCollection(nounDef, aItems, null, null);
+ let query = new nounDef.wildcardQueryClass(collection);
+ collection.query = query;
+ GlodaCollectionManager.registerCollection(collection);
+ return collection;
+ },
+
+ /**
+ * Attribute providers attempting to index something that experience a fatal
+ * problem should throw one of these. For example:
+ * "throw new Gloda.BadItemContentsError('Message lacks an author.');".
+ *
+ * We're not really taking advantage of this yet, but it's a good idea.
+ */
+ BadItemContentsError,
+
+ /* eslint-disable complexity */
+ /**
+ * Populate a gloda representation of an item given the thus-far built
+ * representation, the previous representation, and one or more raw
+ * representations. The attribute providers/optimizers for the given noun
+ * type are invoked, allowing them to contribute/alter things. Following
+ * that, we build and persist our attribute representations.
+ *
+ * The result of the processing ends up with attributes in 3 different forms:
+ * - Database attribute rows (to be added and removed).
+ * - In-memory representation.
+ * - JSON-able representation.
+ *
+ * @param aItem The noun instance you want processed.
+ * @param aRawReps A dictionary that we pass to the attribute providers.
+ * There is a(n implied) contract between the caller of grokNounItem for a
+ * given noun type and the attribute providers for that noun type, and we
+ * have nothing to do with it OTHER THAN inserting a 'trueGlodaRep'
+ * value into it. In the event of reindexing an existing object, the
+ * gloda representation we pass to the indexers is actually a clone that
+ * allows the asynchronous indexers to mutate the object without
+ * causing visible changes in the existing representation of the gloda
+ * object. We patch the changes back onto the original item atomically
+ * once indexing completes. The 'trueGlodaRep' is then useful for
+ * objects that hang off of the gloda instance that need a reference
+ * back to their containing object for API convenience purposes.
+ * @param aIsConceptuallyNew Is the item "new" in the sense that it would
+ * never have been visible from within user code? This translates into
+ * whether this should trigger an itemAdded notification or an
+ * itemModified notification.
+ * @param aIsRecordNew Is the item "new" in the sense that we should INSERT
+ * a record rather than UPDATE-ing a record. For example, when dealing
+ * with messages where we may have a ghost, the ghost message is not a
+ * new record, but is conceptually new.
+ * @param aCallbackHandle The GlodaIndexer-style callback handle that is being
+ * used to drive this processing in an async fashion. (See
+ * GlodaIndexer._callbackHandle).
+ * @param aDoCache Should we allow this item to be contributed to its noun
+ * cache?
+ */
+ *grokNounItem(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aIsRecordNew,
+ aCallbackHandle,
+ aDoCache
+ ) {
+ let itemNounDef = aItem.NOUN_DEF;
+ let attribsByBoundName = itemNounDef.attribsByBoundName;
+
+ this._log.info(" ** grokNounItem: " + itemNounDef.name);
+
+ let addDBAttribs = [];
+ let removeDBAttribs = [];
+
+ let jsonDict = {};
+
+ let aOldItem;
+ aRawReps.trueGlodaRep = aItem;
+ if (aIsConceptuallyNew) {
+ // there is no old item if we are new.
+ aOldItem = {};
+ } else {
+ aOldItem = aItem;
+ // we want to create a clone of the existing item so that we can know the
+ // deltas that happened for indexing purposes
+ aItem = aItem._clone();
+ }
+
+ // Have the attribute providers directly set properties on the aItem
+ let attrProviders = this._attrProviderOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrProviders.length; iProvider++) {
+ this._log.info(" * provider: " + attrProviders[iProvider].providerName);
+ yield aCallbackHandle.pushAndGo(
+ attrProviders[iProvider].process(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aCallbackHandle
+ )
+ );
+ }
+
+ let attrOptimizers = this._attrOptimizerOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrOptimizers.length; iProvider++) {
+ this._log.info(
+ " * optimizer: " + attrOptimizers[iProvider].providerName
+ );
+ yield aCallbackHandle.pushAndGo(
+ attrOptimizers[iProvider].optimize(
+ aItem,
+ aRawReps,
+ aIsConceptuallyNew,
+ aCallbackHandle
+ )
+ );
+ }
+ this._log.info(" ** done with providers.");
+
+ // Iterate over the attributes on the item
+ for (let key of Object.keys(aItem)) {
+ let value = aItem[key];
+ // ignore keys that start with underscores, they are private and not
+ // persisted by our attribute mechanism. (they are directly handled by
+ // the object implementation.)
+ if (key.startsWith("_")) {
+ continue;
+ }
+ // find the attribute definition that corresponds to this key
+ let attrib = attribsByBoundName[key];
+ // if there's no attribute, that's not good, but not horrible.
+ if (attrib === undefined) {
+ this._log.warn("new proc ignoring attrib: " + key);
+ continue;
+ }
+
+ let attribDB = attrib.dbDef;
+ let objectNounDef = attrib.objectNounDef;
+
+ // - translate for our JSON rep
+ if (attrib.singular) {
+ if (objectNounDef.toJSON) {
+ jsonDict[attrib.id] = objectNounDef.toJSON(value);
+ } else {
+ jsonDict[attrib.id] = value;
+ }
+ } else if (objectNounDef.toJSON) {
+ let toJSON = objectNounDef.toJSON;
+ jsonDict[attrib.id] = [];
+ for (let subValue of value) {
+ jsonDict[attrib.id].push(toJSON(subValue));
+ }
+ } else {
+ jsonDict[attrib.id] = value;
+ }
+
+ let oldValue = aOldItem[key];
+
+ // the 'old' item is still the canonical one; update it
+ // do the update now, because we may skip operations on addDBAttribs and
+ // removeDBattribs, if the attribute is not to generate entries in
+ // messageAttributes
+ if (oldValue !== undefined || !aIsConceptuallyNew) {
+ aOldItem[key] = value;
+ }
+
+ // the new canQuery property has to be set to true to generate entries
+ // in the messageAttributes table. Any other truthy value (like a non
+ // empty string), will still make the message query-able but without
+ // using the database.
+ if (attrib.canQuery !== true) {
+ continue;
+ }
+
+ // - database index attributes
+
+ // perform a delta analysis against the old value, if we have one
+ if (oldValue !== undefined) {
+ // in the singular case if they don't match, it's one add and one remove
+ if (attrib.singular) {
+ // test for identicality, failing that, see if they have explicit
+ // equals support.
+ if (
+ value !== oldValue &&
+ (!value.equals || !value.equals(oldValue))
+ ) {
+ addDBAttribs.push(attribDB.convertValuesToDBAttributes([value])[0]);
+ removeDBAttribs.push(
+ attribDB.convertValuesToDBAttributes([oldValue])[0]
+ );
+ }
+ } else if (objectNounDef.computeDelta) {
+ // in the plural case, we have to figure the deltas accounting for
+ // possible changes in ordering (which is insignificant from an
+ // indexing perspective)
+ // some nouns may not meet === equivalence needs, so must provide a
+ // custom computeDelta method to help us out
+ let [valuesAdded, valuesRemoved] = objectNounDef.computeDelta(
+ value,
+ oldValue
+ );
+ // convert the values to database-style attribute rows
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesAdded)
+ );
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesRemoved)
+ );
+ } else {
+ // build a map of the previous values; we will delete the values as
+ // we see them so that we will know what old values are no longer
+ // present in the current set of values.
+ let oldValueMap = {};
+ for (let anOldValue of oldValue) {
+ // remember, the key is just the toString'ed value, so we need to
+ // store and use the actual value as the value!
+ oldValueMap[anOldValue] = anOldValue;
+ }
+ // traverse the current values...
+ let valuesAdded = [];
+ for (let curValue of value) {
+ if (curValue in oldValueMap) {
+ delete oldValueMap[curValue];
+ } else {
+ valuesAdded.push(curValue);
+ }
+ }
+ // anything still on oldValueMap was removed.
+ let valuesRemoved = Object.keys(oldValueMap).map(
+ key => oldValueMap[key]
+ );
+ // convert the values to database-style attribute rows
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesAdded)
+ );
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(valuesRemoved)
+ );
+ }
+
+ // Add/remove the empty set indicator as appropriate.
+ if (attrib.emptySetIsSignificant) {
+ // if we are now non-zero but previously were zero, remove.
+ if (value.length && !oldValue.length) {
+ removeDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ } else if (!value.length && oldValue.length) {
+ // We are now zero length but previously were not, add.
+ addDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+ } else {
+ // no old value, all values are new
+ // add the db reps on the new values
+ if (attrib.singular) {
+ value = [value];
+ }
+ addDBAttribs.push.apply(
+ addDBAttribs,
+ attribDB.convertValuesToDBAttributes(value)
+ );
+ // Add the empty set indicator for the attribute id if appropriate.
+ if (!value.length && attrib.emptySetIsSignificant) {
+ addDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+ }
+
+ // Iterate over any remaining values in old items for purge purposes.
+ for (let key of Object.keys(aOldItem)) {
+ let value = aOldItem[key];
+ // ignore keys that start with underscores, they are private and not
+ // persisted by our attribute mechanism. (they are directly handled by
+ // the object implementation.)
+ if (key.startsWith("_")) {
+ continue;
+ }
+ // ignore things we saw in the new guy
+ if (key in aItem) {
+ continue;
+ }
+
+ // find the attribute definition that corresponds to this key
+ let attrib = attribsByBoundName[key];
+ // if there's no attribute, that's not good, but not horrible.
+ if (attrib === undefined) {
+ continue;
+ }
+
+ // delete these from the old item, as the old item is canonical, and
+ // should no longer have these values
+ delete aOldItem[key];
+
+ if (attrib.canQuery !== true) {
+ this._log.debug(
+ "Not inserting attribute " +
+ attrib.attributeName +
+ " into the db, since we don't plan on querying on it"
+ );
+ continue;
+ }
+
+ if (attrib.singular) {
+ value = [value];
+ }
+ let attribDB = attrib.dbDef;
+ removeDBAttribs.push.apply(
+ removeDBAttribs,
+ attribDB.convertValuesToDBAttributes(value)
+ );
+ // remove the empty set marker if there should have been one
+ if (!value.length && attrib.emptySetIsSignificant) {
+ removeDBAttribs.push([GlodaDatastore.kEmptySetAttrId, attribDB.id]);
+ }
+ }
+
+ aItem._jsonText = JSON.stringify(jsonDict);
+ this._log.debug(" json text: " + aItem._jsonText);
+
+ if (aIsRecordNew) {
+ this._log.debug(" inserting item");
+ itemNounDef.objInsert.call(itemNounDef.datastore, aItem);
+ } else {
+ this._log.debug(" updating item");
+ itemNounDef.objUpdate.call(itemNounDef.datastore, aItem);
+ }
+
+ this._log.debug(
+ " adjusting attributes, add: " + addDBAttribs + " rem: " + removeDBAttribs
+ );
+ itemNounDef.dbAttribAdjuster.call(
+ itemNounDef.datastore,
+ aItem,
+ addDBAttribs,
+ removeDBAttribs
+ );
+
+ if (!aIsConceptuallyNew && "_declone" in aOldItem) {
+ aOldItem._declone(aItem);
+ }
+
+ // Cache ramifications...
+ if (aDoCache === undefined || aDoCache) {
+ if (aIsConceptuallyNew) {
+ GlodaCollectionManager.itemsAdded(aItem.NOUN_ID, [aItem]);
+ } else {
+ GlodaCollectionManager.itemsModified(aOldItem.NOUN_ID, [aOldItem]);
+ }
+ }
+
+ this._log.debug(" done grokking.");
+
+ yield GlodaConstants.kWorkDone;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Processes a list of noun instances for their score within a given context.
+ * This is primarily intended for use by search ranking mechanisms, but could
+ * be used elsewhere too. (It does, however, depend on the complicity of the
+ * score method implementations to not get confused.)
+ *
+ * @param aItems The non-empty list of items to score.
+ * @param aContext A noun-specific dictionary that we just pass to the funcs.
+ * @param aExtraScoreFuncs A list of extra scoring functions to apply.
+ * @returns A list of integer scores equal in length to aItems.
+ */
+ scoreNounItems(aItems, aContext, aExtraScoreFuncs) {
+ let scores = [];
+ // bail if there is nothing to score
+ if (!aItems.length) {
+ return scores;
+ }
+
+ let itemNounDef = aItems[0].NOUN_DEF;
+ if (aExtraScoreFuncs == null) {
+ aExtraScoreFuncs = [];
+ }
+
+ for (let item of aItems) {
+ let score = 0;
+ let attrProviders = this._attrProviderOrderByNoun[itemNounDef.id];
+ for (let iProvider = 0; iProvider < attrProviders.length; iProvider++) {
+ let provider = attrProviders[iProvider];
+ if (provider.score) {
+ score += provider.score(item);
+ }
+ }
+ for (let extraScoreFunc of aExtraScoreFuncs) {
+ score += extraScoreFunc(item, aContext);
+ }
+ scores.push(score);
+ }
+
+ return scores;
+ },
+};
+
+/* and initialize the Gloda object/NS before we return... */
+try {
+ Gloda._init();
+} catch (ex) {
+ Gloda._log.debug(
+ "Exception during Gloda init (" +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ "): " +
+ ex
+ );
+}
+/* but don't forget that we effectively depend on Everybody.jsm too, and
+ currently on our importer to be importing that if they need us fully armed
+ and operational. */
diff --git a/comm/mailnews/db/gloda/modules/GlodaConstants.jsm b/comm/mailnews/db/gloda/modules/GlodaConstants.jsm
new file mode 100644
index 0000000000..1e6d253f09
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaConstants.jsm
@@ -0,0 +1,250 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * The constants used by Gloda files. Avoid importing anything into this file.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaConstants"];
+
+var GlodaConstants = {
+ /**
+ * The indexer is idle.
+ */
+ kIndexerIdle: 0,
+ /**
+ * The indexer is doing something. We used to have other specific states, but
+ * they have been rendered irrelevant and wiped from existence.
+ */
+ kIndexerIndexing: 1,
+
+ /**
+ * Synchronous activities performed that can be thought of as one processing
+ * token. Potentially yield the event-loop and re-schedule for later based
+ * on how long we've actually taken/etc. The goal here is that code that
+ * is doing stuff synchronously yields with kWorkSync periodically to make
+ * sure that it doesn't dominate the event-loop. Unless the processing
+ * in question is particularly intensive, it should be reasonable to apply
+ * some decimation factor (ex: 32 or 64) with the general goal of yielding
+ * every 3-10 milliseconds.
+ */
+ kWorkSync: 0,
+ /**
+ * Asynchronous activity performed, you need to relinquish flow control and
+ * trust us to call callbackDriver later.
+ */
+ kWorkAsync: 1,
+ /**
+ * We are all done with our task, close us and figure out something else to do.
+ */
+ kWorkDone: 2,
+ /**
+ * We are not done with our task, but we think it's a good idea to take a
+ * breather because we believe we have tied up the event loop for a
+ * non-trivial amount of time. So please re-schedule us in the future.
+ *
+ * This is currently only used internally by the indexer's batching logic;
+ * minor changes may be required if used by actual indexers.
+ */
+ kWorkPause: 3,
+ /**
+ * We are done with our task, and have a result that we are returning. This
+ * should only be used by your callback handler's doneWithResult method.
+ * Ex: you are passed aCallbackHandle, and you do
+ * "yield aCallbackHandle.doneWithResult(myResult);".
+ */
+ kWorkDoneWithResult: 4,
+
+ /**
+ * An attribute that is a defining characteristic of the subject.
+ */
+ kAttrFundamental: 0,
+ /**
+ * An attribute that is an optimization derived from two or more fundamental
+ * attributes and exists solely to improve database query performance.
+ */
+ kAttrOptimization: 1,
+ /**
+ * An attribute that is derived from the content of the subject. For example,
+ * a message that references a bugzilla bug could have a "derived" attribute
+ * that captures the bugzilla reference. This is not
+ */
+ kAttrDerived: 2,
+ /**
+ * An attribute that is the result of an explicit and intentional user action
+ * upon the subject. For example, a tag placed on a message by a user (or
+ * at the user's request by a filter) is explicit.
+ */
+ kAttrExplicit: 3,
+ /**
+ * An attribute that is indirectly the result of a user's behaviour. For
+ * example, if a user consults a message multiple times, we may conclude that
+ * the user finds the message interesting. It is "implied", if you will,
+ * that the message is interesting.
+ */
+ kAttrImplicit: 4,
+
+ /**
+ * This attribute is not 'special'; it is stored as a (thing id, attribute id,
+ * attribute id) tuple in the database rather than on thing's row or on
+ * thing's fulltext row. (Where "thing" could be a message or any other
+ * first class noun.)
+ */
+ kSpecialNotAtAll: 0,
+ /**
+ * This attribute is stored as a numeric column on the row for the noun. The
+ * attribute definition should include this value as 'special' and the
+ * column name that stores the attribute as 'specialColumnName'.
+ */
+ kSpecialColumn: 16,
+ kSpecialColumnChildren: 16 | 1,
+ kSpecialColumnParent: 16 | 2,
+ /**
+ * This attribute is stored as a string column on the row for the noun. It
+ * differs from kSpecialColumn in that it is a string, which once had
+ * query ramifications and one day may have them again.
+ */
+ kSpecialString: 32,
+ /**
+ * This attribute is stored as a fulltext column on the fulltext table for
+ * the noun. The attribute definition should include this value as 'special'
+ * and the column name that stores the table as 'specialColumnName'.
+ */
+ kSpecialFulltext: 64,
+
+ /**
+ * The extensionName used for the attributes defined by core gloda plugins
+ * such as GlodaFundAttr.jsm and GlodaExplicitAttr.jsm.
+ */
+ BUILT_IN: "built-in",
+
+ /**
+ * Special sentinel value that will cause facets to skip a noun instance
+ * when an attribute has this value.
+ */
+ IGNORE_FACET: "ignore-facet",
+
+ /*
+ * The following are explicit noun IDs. While most extension-provided nouns
+ * will have dynamically allocated id's that are looked up by name, these
+ * id's can be relied upon to exist and be accessible via these
+ * pseudo-constants. It's not really clear that we need these, although it
+ * does potentially simplify code to not have to look up all of their nouns
+ * at initialization time.
+ */
+ /**
+ * Boolean values, expressed as 0/1 in the database and non-continuous for
+ * constraint purposes. Like numbers, such nouns require their attributes
+ * to provide them with context, lacking any of their own.
+ * Having this as a noun type may be a bad idea; a change of nomenclature
+ * (so that we are not claiming a boolean value is a noun, but still using
+ * it in the same way) or implementation to require each boolean noun
+ * actually be its own noun may be in order.
+ */
+ NOUN_BOOLEAN: 1,
+ /**
+ * A number, which could mean an integer or floating point values. We treat
+ * these as continuous, meaning that queries on them can have ranged
+ * constraints expressed on them. Lacking any inherent context, numbers
+ * depend on their attributes to parameterize them as required.
+ * Same deal as with NOUN_BOOLEAN, we may need to change this up conceptually.
+ */
+ NOUN_NUMBER: 2,
+ /**
+ * A (non-fulltext) string.
+ * Same deal as with NOUN_BOOLEAN, we may need to change this up conceptually.
+ */
+ NOUN_STRING: 3,
+ /** A date, encoded as a PRTime, represented as a js Date object. */
+ NOUN_DATE: 10,
+ /**
+ * Fulltext search support, somewhat magical. This is only intended to be
+ * used for kSpecialFulltext attributes, and exclusively as a constraint
+ * mechanism. The values are always represented as strings. It is presumed
+ * that the user of this functionality knows how to generate SQLite FTS3
+ * style MATCH queries, or is okay with us just gluing them together with
+ * " OR " when used in an or-constraint case. Gloda's query mechanism
+ * currently lacks the ability to to compile Gloda-style and-constraints
+ * into a single MATCH query, but it will turn out okay, just less
+ * efficiently than it could.
+ */
+ NOUN_FULLTEXT: 20,
+ /**
+ * Represents a MIME Type. We currently lack any human-intelligible
+ * descriptions of mime types.
+ */
+ NOUN_MIME_TYPE: 40,
+ /**
+ * Captures a message tag as well as when the tag's presence was observed,
+ * hoping to approximate when the tag was applied. It's a somewhat dubious
+ * attempt to not waste our opporunity to store a value along with the tag.
+ * (The tag is actually stored as an attribute parameter on the attribute
+ * definition, rather than a value in the attribute 'instance' for the
+ * message.)
+ */
+ NOUN_TAG: 50,
+ /**
+ * Doesn't actually work owing to a lack of an object to represent a folder.
+ * We do expose the folderURI and folderID of a message, but need to map that
+ * to a good abstraction. Probably something thin around a SteelFolder or
+ * the like; we would contribute the functionality to easily move from a
+ * folder to the list of gloda messages in that folder, as well as the
+ * indexing preferences for that folder.
+ *
+ * @TODO folder noun and related abstraction
+ */
+ NOUN_FOLDER: 100,
+ /**
+ * All messages belong to a conversation. See GlodaDataModel.jsm for the
+ * definition of the GlodaConversation class.
+ */
+ NOUN_CONVERSATION: 101,
+ /**
+ * A one-to-one correspondence with underlying (indexed) nsIMsgDBHdr
+ * instances. See GlodaDataModel.jsm for the definition of the GlodaMessage class.
+ */
+ NOUN_MESSAGE: 102,
+ /**
+ * Corresponds to a human being, who may have multiple electronic identities
+ * (a la NOUN_IDENTITY). There is no requirement for association with an
+ * address book contact, although when the address book contact exists,
+ * we want to be associated with it. See GlodaDataModel.jsm for the definition
+ * of the GlodaContact class.
+ */
+ NOUN_CONTACT: 103,
+ /**
+ * A single identity of a contact, who may have one or more. E-mail accounts,
+ * instant messaging accounts, social network site accounts, etc. are each
+ * identities. See GlodaDataModel.jsm for the definition of the GlodaIdentity
+ * class.
+ */
+ NOUN_IDENTITY: 104,
+ /**
+ * An attachment to a message. A message may have many different attachments.
+ */
+ NOUN_ATTACHMENT: 105,
+ /**
+ * An account related to a message. A message can have only one account.
+ */
+ NOUN_ACCOUNT: 106,
+
+ /**
+ * Parameterized identities, for use in the from-me, to-me, cc-me optimization
+ * cases. Not for reuse without some thought. These nouns use the parameter
+ * to store the 'me' identity that we are talking about, and the value to
+ * store the identity of the other party. So in both the from-me and to-me
+ * cases involving 'me' and 'foo@bar', the 'me' identity is always stored via
+ * the attribute parameter, and the 'foo@bar' identity is always stored as
+ * the attribute value. See GlodaFundAttr.jsm for more information on this, but
+ * you probably shouldn't be touching this unless you are fundattr.
+ */
+ NOUN_PARAM_IDENTITY: 200,
+
+ kConstraintIdIn: 0,
+ kConstraintIn: 1,
+ kConstraintRanges: 2,
+ kConstraintEquals: 3,
+ kConstraintStringLike: 4,
+ kConstraintFulltext: 5,
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaContent.jsm b/comm/mailnews/db/gloda/modules/GlodaContent.jsm
new file mode 100644
index 0000000000..5f1daf5e9c
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaContent.jsm
@@ -0,0 +1,285 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "GlodaContent",
+ "whittlerRegistry",
+ "mimeMsgToContentAndMeta",
+ "mimeMsgToContentSnippetAndMeta",
+];
+
+/**
+ * Given a MimeMsg and the corresponding folder, return the GlodaContent object.
+ *
+ * @param aMimeMsg: the MimeMessage instance
+ * @param folder: the nsIMsgDBFolder
+ * @returns an array containing the GlodaContent instance, and the meta dictionary
+ * that the Gloda content providers may have filled with useful data.
+ */
+
+function mimeMsgToContentAndMeta(aMimeMsg, folder) {
+ let content = new GlodaContent();
+ let meta = { subject: aMimeMsg.get("subject") };
+ let bodyLines = aMimeMsg.coerceBodyToPlaintext(folder).split(/\r?\n/);
+
+ for (let whittler of whittlerRegistry.getWhittlers()) {
+ whittler.contentWhittle(meta, bodyLines, content);
+ }
+
+ return [content, meta];
+}
+
+/**
+ * Given a MimeMsg, return the whittled content string, suitable for summarizing
+ * a message.
+ *
+ * @param aMimeMsg: the MimeMessage instance
+ * @param folder: the nsIMsgDBFolder
+ * @param length: optional number of characters to trim the whittled content.
+ * If the actual length of the message is greater than |length|, then the return
+ * value is the first (length-1) characters with an ellipsis appended.
+ * @returns an array containing the text of the snippet, and the meta dictionary
+ * that the Gloda content providers may have filled with useful data.
+ */
+
+function mimeMsgToContentSnippetAndMeta(aMimeMsg, folder, length) {
+ let [content, meta] = mimeMsgToContentAndMeta(aMimeMsg, folder);
+
+ let text = content.getContentSnippet(length + 1);
+ if (length && text.length > length) {
+ text = text.substring(0, length - 1) + "\u2026"; // ellipsis
+ }
+ return [text, meta];
+}
+
+/**
+ * A registry of gloda providers that have contentWhittle() functions.
+ * used by mimeMsgToContentSnippet, but populated by the Gloda object as it's
+ * processing providers.
+ */
+function WhittlerRegistry() {
+ this._whittlers = [];
+}
+
+WhittlerRegistry.prototype = {
+ /**
+ * Add a provider as a content whittler.
+ */
+ registerWhittler(provider) {
+ this._whittlers.push(provider);
+ },
+ /**
+ * get the list of content whittlers, sorted from the most specific to
+ * the most generic
+ */
+ getWhittlers() {
+ // Use the concat() trick to avoid mutating the internal object and
+ // leaking an internal representation.
+ return this._whittlers.concat().reverse();
+ },
+};
+
+const whittlerRegistry = new WhittlerRegistry();
+
+function GlodaContent() {
+ this._contentPriority = null;
+ this._producing = false;
+ this._hunks = [];
+}
+
+GlodaContent.prototype = {
+ kPriorityBase: 0,
+ kPriorityPerfect: 100,
+
+ kHunkMeta: 1,
+ kHunkQuoted: 2,
+ kHunkContent: 3,
+
+ _resetContent() {
+ this._keysAndValues = [];
+ this._keysAndDeltaValues = [];
+ this._hunks = [];
+ this._curHunk = null;
+ },
+
+ /* ===== Consumer API ===== */
+ hasContent() {
+ return this._contentPriority != null;
+ },
+
+ /**
+ * Return content suitable for snippet display. This means that no quoting
+ * or meta-data should be returned.
+ *
+ * @param aMaxLength The maximum snippet length desired.
+ */
+ getContentSnippet(aMaxLength) {
+ let content = this.getContentString();
+ if (aMaxLength) {
+ content = content.substring(0, aMaxLength);
+ }
+ return content;
+ },
+
+ getContentString(aIndexingPurposes) {
+ let data = "";
+ for (let hunk of this._hunks) {
+ if (hunk.hunkType == this.kHunkContent) {
+ if (data) {
+ data += "\n" + hunk.data;
+ } else {
+ data = hunk.data;
+ }
+ }
+ }
+
+ if (aIndexingPurposes) {
+ // append the values for indexing. we assume the keywords are cruft.
+ // this may be crazy, but things that aren't a science aren't an exact
+ // science.
+ for (let kv of this._keysAndValues) {
+ data += "\n" + kv[1];
+ }
+ for (let kon of this._keysAndValues) {
+ data += "\n" + kon[1] + "\n" + kon[2];
+ }
+ }
+
+ return data;
+ },
+
+ /* ===== Producer API ===== */
+ /**
+ * Called by a producer with the priority they believe their interpretation
+ * of the content comes in at.
+ *
+ * @returns true if we believe the producer's interpretation will be
+ * interesting and they should go ahead and generate events. We return
+ * false if we don't think they are interesting, in which case they should
+ * probably not issue calls to us, although we don't care. (We will
+ * ignore their calls if we return false, this allows the simplification
+ * of code that needs to run anyways.)
+ */
+ volunteerContent(aPriority) {
+ if (this._contentPriority === null || this._contentPriority < aPriority) {
+ this._contentPriority = aPriority;
+ this._resetContent();
+ this._producing = true;
+ return true;
+ }
+ this._producing = false;
+ return false;
+ },
+
+ keyValue(aKey, aValue) {
+ if (!this._producing) {
+ return;
+ }
+
+ this._keysAndValues.push([aKey, aValue]);
+ },
+ keyValueDelta(aKey, aOldValue, aNewValue) {
+ if (!this._producing) {
+ return;
+ }
+
+ this._keysAndDeltaValues.push([aKey, aOldValue, aNewValue]);
+ },
+
+ /**
+ * Meta lines are lines that have to do with the content but are not the
+ * content and can generally be related to an attribute that has been derived
+ * and stored on the item.
+ * For example, a bugzilla bug may note that an attachment was created; this
+ * is not content and wouldn't be desired in a snippet, but is still
+ * potentially interesting meta-data.
+ *
+ * @param aLineOrLines The line or list of lines that are meta-data.
+ * @param aAttr The attribute this meta-data is associated with.
+ * @param aIndex If the attribute is non-singular, indicate the specific
+ * index of the item in the attribute's bound list that the meta-data
+ * is associated with.
+ */
+ meta(aLineOrLines, aAttr, aIndex) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ this._curHunk = {
+ hunkType: this.kHunkMeta,
+ attr: aAttr,
+ index: aIndex,
+ data,
+ };
+ this._hunks.push(this._curHunk);
+ },
+ /**
+ * Quoted lines reference previous messages or what not.
+ *
+ * @param aLineOrLiens The line or list of lines that are quoted.
+ * @param aDepth The depth of the quoting.
+ * @param aOrigin The item that originated the original content, if known.
+ * For example, perhaps a GlodaMessage?
+ * @param aTarget A reference to the location in the original content, if
+ * known. For example, the index of a line in a message or something?
+ */
+ quoted(aLineOrLines, aDepth, aOrigin, aTarget) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ if (
+ !this._curHunk ||
+ this._curHunk.hunkType != this.kHunkQuoted ||
+ this._curHunk.depth != aDepth ||
+ this._curHunk.origin != aOrigin ||
+ this._curHunk.target != aTarget
+ ) {
+ this._curHunk = {
+ hunkType: this.kHunkQuoted,
+ data,
+ depth: aDepth,
+ origin: aOrigin,
+ target: aTarget,
+ };
+ this._hunks.push(this._curHunk);
+ } else {
+ this._curHunk.data += "\n" + data;
+ }
+ },
+
+ content(aLineOrLines) {
+ if (!this._producing) {
+ return;
+ }
+
+ let data;
+ if (typeof aLineOrLines == "string") {
+ data = aLineOrLines;
+ } else {
+ data = aLineOrLines.join("\n");
+ }
+
+ if (!this._curHunk || this._curHunk.hunkType != this.kHunkContent) {
+ this._curHunk = { hunkType: this.kHunkContent, data };
+ this._hunks.push(this._curHunk);
+ } else {
+ this._curHunk.data += "\n" + data;
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm b/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm
new file mode 100644
index 0000000000..d9361c079c
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDataModel.jsm
@@ -0,0 +1,1020 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "GlodaAttributeDBDef",
+ "GlodaAccount",
+ "GlodaConversation",
+ "GlodaFolder",
+ "GlodaMessage",
+ "GlodaContact",
+ "GlodaIdentity",
+ "GlodaAttachment",
+];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var LOG = console.createInstance({
+ prefix: "gloda.datamodel",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * @class Represents a gloda attribute definition's DB form. This class
+ * stores the information in the database relating to this attribute
+ * definition. Access its attrDef attribute to get at the really juicy data.
+ * This main interesting thing this class does is serve as the keeper of the
+ * mapping from parameters to attribute ids in the database if this is a
+ * parameterized attribute.
+ */
+function GlodaAttributeDBDef(
+ aDatastore,
+ aID,
+ aCompoundName,
+ aAttrType,
+ aPluginName,
+ aAttrName
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._compoundName = aCompoundName;
+ this._attrType = aAttrType;
+ this._pluginName = aPluginName;
+ this._attrName = aAttrName;
+
+ this.attrDef = null;
+
+ /** Map parameter values to the underlying database id. */
+ this._parameterBindings = {};
+}
+
+GlodaAttributeDBDef.prototype = {
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get attributeName() {
+ return this._attrName;
+ },
+
+ get parameterBindings() {
+ return this._parameterBindings;
+ },
+
+ /**
+ * Bind a parameter value to the attribute definition, allowing use of the
+ * attribute-parameter as an attribute.
+ *
+ * @returns
+ */
+ bindParameter(aValue) {
+ // people probably shouldn't call us with null, but handle it
+ if (aValue == null) {
+ return this._id;
+ }
+ if (aValue in this._parameterBindings) {
+ return this._parameterBindings[aValue];
+ }
+ // no database entry exists if we are here, so we must create it...
+ let id = this._datastore._createAttributeDef(
+ this._attrType,
+ this._pluginName,
+ this._attrName,
+ aValue
+ );
+ this._parameterBindings[aValue] = id;
+ this._datastore.reportBinding(id, this, aValue);
+ return id;
+ },
+
+ /**
+ * Given a list of values, return a list (regardless of plurality) of
+ * database-ready [attribute id, value] tuples. This is intended to be used
+ * to directly convert the value of a property on an object that corresponds
+ * to a bound attribute.
+ *
+ * @param {Array} aInstanceValues An array of instance values regardless of
+ * whether or not the attribute is singular.
+ */
+ convertValuesToDBAttributes(aInstanceValues) {
+ let nounDef = this.attrDef.objectNounDef;
+ let dbAttributes = [];
+ if (nounDef.usesParameter) {
+ for (let instanceValue of aInstanceValues) {
+ let [param, dbValue] = nounDef.toParamAndValue(instanceValue);
+ dbAttributes.push([this.bindParameter(param), dbValue]);
+ }
+ } else if ("toParamAndValue" in nounDef) {
+ // Not generating any attributes is ok. This basically means the noun is
+ // just an informative property on the Gloda Message and has no real
+ // indexing purposes.
+ for (let instanceValue of aInstanceValues) {
+ dbAttributes.push([
+ this._id,
+ nounDef.toParamAndValue(instanceValue)[1],
+ ]);
+ }
+ }
+ return dbAttributes;
+ },
+
+ toString() {
+ return this._compoundName;
+ },
+};
+
+var GlodaHasAttributesMixIn = {
+ *enumerateAttributes() {
+ let nounDef = this.NOUN_DEF;
+ for (let key in this) {
+ let value = this[key];
+ let attrDef = nounDef.attribsByBoundName[key];
+ // we expect to not have attributes for underscore prefixed values (those
+ // are managed by the instance's logic. we also want to not explode
+ // should someone crap other values in there, we get both birds with this
+ // one stone.
+ if (attrDef === undefined) {
+ continue;
+ }
+ if (attrDef.singular) {
+ // ignore attributes with null values
+ if (value != null) {
+ yield [attrDef, [value]];
+ }
+ } else if (value.length) {
+ // ignore attributes with no values
+ yield [attrDef, value];
+ }
+ }
+ },
+
+ domContribute(aDomNode) {
+ let nounDef = this.NOUN_DEF;
+ for (let attrName in nounDef.domExposeAttribsByBoundName) {
+ let attr = nounDef.domExposeAttribsByBoundName[attrName];
+ if (this[attrName]) {
+ aDomNode.setAttribute(attr.domExpose, this[attrName]);
+ }
+ }
+ },
+};
+
+function MixIn(aConstructor, aMixIn) {
+ let proto = aConstructor.prototype;
+ for (let [name, func] of Object.entries(aMixIn)) {
+ if (name.startsWith("get_")) {
+ proto.__defineGetter__(name.substring(4), func);
+ } else {
+ proto[name] = func;
+ }
+ }
+}
+
+/**
+ * @class A gloda wrapper around nsIMsgIncomingServer.
+ */
+function GlodaAccount(aIncomingServer) {
+ this._incomingServer = aIncomingServer;
+}
+
+GlodaAccount.prototype = {
+ NOUN_ID: 106,
+ get id() {
+ return this._incomingServer.key;
+ },
+ get name() {
+ return this._incomingServer.prettyName;
+ },
+ get incomingServer() {
+ return this._incomingServer;
+ },
+ toString() {
+ return "Account: " + this.id;
+ },
+
+ toLocaleString() {
+ return this.name;
+ },
+};
+
+/**
+ * @class A gloda conversation (thread) exists so that messages can belong.
+ */
+function GlodaConversation(
+ aDatastore,
+ aID,
+ aSubject,
+ aOldestMessageDate,
+ aNewestMessageDate
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._subject = aSubject;
+ this._oldestMessageDate = aOldestMessageDate;
+ this._newestMessageDate = aNewestMessageDate;
+}
+
+GlodaConversation.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_CONVERSATION,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get subject() {
+ return this._subject;
+ },
+ get oldestMessageDate() {
+ return this._oldestMessageDate;
+ },
+ get newestMessageDate() {
+ return this._newestMessageDate;
+ },
+
+ getMessagesCollection(aListener, aData) {
+ let query = new GlodaMessage.prototype.NOUN_DEF.queryClass();
+ query.conversation(this._id).orderBy("date");
+ return query.getCollection(aListener, aData);
+ },
+
+ toString() {
+ return "Conversation:" + this._id;
+ },
+
+ toLocaleString() {
+ return this._subject;
+ },
+};
+
+function GlodaFolder(
+ aDatastore,
+ aID,
+ aURI,
+ aDirtyStatus,
+ aPrettyName,
+ aIndexingPriority
+) {
+ // _datastore is now set by GlodaDatastore
+ this._id = aID;
+ this._uri = aURI;
+ this._dirtyStatus = aDirtyStatus;
+ this._prettyName = aPrettyName;
+ this._account = null;
+ this._activeIndexing = false;
+ this._indexingPriority = aIndexingPriority;
+ this._deleted = false;
+ this._compacting = false;
+}
+
+GlodaFolder.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_FOLDER,
+ // set by GlodaDatastore
+ _datastore: null,
+
+ /** The folder is believed to be up-to-date */
+ kFolderClean: 0,
+ /** The folder has some un-indexed or dirty messages */
+ kFolderDirty: 1,
+ /** The folder needs to be entirely re-indexed, regardless of the flags on
+ * the messages in the folder. This state will be downgraded to dirty */
+ kFolderFilthy: 2,
+
+ _kFolderDirtyStatusMask: 0x7,
+ /**
+ * The (local) folder has been compacted and all of its message keys are
+ * potentially incorrect. This is not a possible state for IMAP folders
+ * because their message keys are based on UIDs rather than offsets into
+ * the mbox file.
+ */
+ _kFolderCompactedFlag: 0x8,
+
+ /** The folder should never be indexed. */
+ kIndexingNeverPriority: -1,
+ /** The lowest priority assigned to a folder. */
+ kIndexingLowestPriority: 0,
+ /** The highest priority assigned to a folder. */
+ kIndexingHighestPriority: 100,
+
+ /** The indexing priority for a folder if no other priority is assigned. */
+ kIndexingDefaultPriority: 20,
+ /** Folders marked check new are slightly more important I guess. */
+ kIndexingCheckNewPriority: 30,
+ /** Favorite folders are more interesting to the user, presumably. */
+ kIndexingFavoritePriority: 40,
+ /** The indexing priority for inboxes. */
+ kIndexingInboxPriority: 50,
+ /** The indexing priority for sent mail folders. */
+ kIndexingSentMailPriority: 60,
+
+ get id() {
+ return this._id;
+ },
+ get uri() {
+ return this._uri;
+ },
+ get dirtyStatus() {
+ return this._dirtyStatus & this._kFolderDirtyStatusMask;
+ },
+ /**
+ * Mark a folder as dirty if it was clean. Do nothing if it was already dirty
+ * or filthy. For use by GlodaMsgIndexer only. And maybe rkent and his
+ * marvelous extensions.
+ */
+ _ensureFolderDirty() {
+ if (this.dirtyStatus == this.kFolderClean) {
+ this._dirtyStatus =
+ (this.kFolderDirty & this._kFolderDirtyStatusMask) |
+ (this._dirtyStatus & ~this._kFolderDirtyStatusMask);
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+ /**
+ * Definitely for use only by GlodaMsgIndexer to downgrade the dirty status of
+ * a folder.
+ */
+ _downgradeDirtyStatus(aNewStatus) {
+ if (this.dirtyStatus != aNewStatus) {
+ this._dirtyStatus =
+ (aNewStatus & this._kFolderDirtyStatusMask) |
+ (this._dirtyStatus & ~this._kFolderDirtyStatusMask);
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+ /**
+ * Indicate whether this folder is currently being compacted. The
+ * |GlodaMsgIndexer| keeps this in-memory-only value up-to-date.
+ */
+ get compacting() {
+ return this._compacting;
+ },
+ /**
+ * Set whether this folder is currently being compacted. This is really only
+ * for the |GlodaMsgIndexer| to set.
+ */
+ set compacting(aCompacting) {
+ this._compacting = aCompacting;
+ },
+ /**
+ * Indicate whether this folder was compacted and has not yet been
+ * compaction processed.
+ */
+ get compacted() {
+ return Boolean(this._dirtyStatus & this._kFolderCompactedFlag);
+ },
+ /**
+ * For use only by GlodaMsgIndexer to set/clear the compaction state of this
+ * folder.
+ */
+ _setCompactedState(aCompacted) {
+ if (this.compacted != aCompacted) {
+ if (aCompacted) {
+ this._dirtyStatus |= this._kFolderCompactedFlag;
+ } else {
+ this._dirtyStatus &= ~this._kFolderCompactedFlag;
+ }
+ this._datastore.updateFolderDirtyStatus(this);
+ }
+ },
+
+ get name() {
+ return this._prettyName;
+ },
+ toString() {
+ return "Folder:" + this._id;
+ },
+
+ toLocaleString() {
+ let xpcomFolder = this.getXPCOMFolder(this.kActivityFolderOnlyNoData);
+ if (!xpcomFolder) {
+ return this._prettyName;
+ }
+ return (
+ xpcomFolder.prettyName + " (" + xpcomFolder.rootFolder.prettyName + ")"
+ );
+ },
+
+ get indexingPriority() {
+ return this._indexingPriority;
+ },
+
+ /** We are going to index this folder. */
+ kActivityIndexing: 0,
+ /** Asking for the folder to perform header retrievals. */
+ kActivityHeaderRetrieval: 1,
+ /** We only want the folder for its metadata but are not going to open it. */
+ kActivityFolderOnlyNoData: 2,
+
+ /** Is this folder known to be actively used for indexing? */
+ _activeIndexing: false,
+ /** Get our indexing status. */
+ get indexing() {
+ return this._activeIndexing;
+ },
+ /**
+ * Set our indexing status. Normally, this will be enabled through passing
+ * an activity type of kActivityIndexing (which will set us), but we will
+ * still need to be explicitly disabled by the indexing code.
+ * When disabling indexing, we will call forgetFolderIfUnused to take care of
+ * shutting things down.
+ * We are not responsible for committing changes to the message database!
+ * That is on you!
+ */
+ set indexing(aIndexing) {
+ this._activeIndexing = aIndexing;
+ },
+
+ /**
+ * Retrieve the nsIMsgFolder instance corresponding to this folder, providing
+ * an explanation of why you are requesting it for tracking/cleanup purposes.
+ *
+ * @param aActivity One of the kActivity* constants. If you pass
+ * kActivityIndexing, we will set indexing for you, but you will need to
+ * clear it when you are done.
+ * @returns The nsIMsgFolder if available, null on failure.
+ */
+ getXPCOMFolder(aActivity) {
+ switch (aActivity) {
+ case this.kActivityIndexing:
+ // mark us as indexing, but don't bother with live tracking. we do
+ // that independently and only for header retrieval.
+ this.indexing = true;
+ break;
+ case this.kActivityHeaderRetrieval:
+ case this.kActivityFolderOnlyNoData:
+ // we don't have to do anything here.
+ break;
+ }
+
+ return MailServices.folderLookup.getFolderForURL(this.uri);
+ },
+
+ /**
+ * Retrieve a GlodaAccount instance corresponding to this folder.
+ *
+ * @returns The GlodaAccount instance.
+ */
+ getAccount() {
+ if (!this._account) {
+ let msgFolder = this.getXPCOMFolder(this.kActivityFolderOnlyNoData);
+ this._account = new GlodaAccount(msgFolder.server);
+ }
+ return this._account;
+ },
+};
+
+/**
+ * @class A message representation.
+ */
+function GlodaMessage(
+ aDatastore,
+ aID,
+ aFolderID,
+ aMessageKey,
+ aConversationID,
+ aConversation,
+ aDate,
+ aHeaderMessageID,
+ aDeleted,
+ aJsonText,
+ aNotability,
+ aSubject,
+ aIndexedBodyText,
+ aAttachmentNames
+) {
+ // _datastore is now set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._folderID = aFolderID;
+ this._messageKey = aMessageKey;
+ this._conversationID = aConversationID;
+ this._conversation = aConversation;
+ this._date = aDate;
+ this._headerMessageID = aHeaderMessageID;
+ this._jsonText = aJsonText;
+ this._notability = aNotability;
+ this._subject = aSubject;
+ this._indexedBodyText = aIndexedBodyText;
+ this._attachmentNames = aAttachmentNames;
+
+ // only set _deleted if we're deleted, otherwise the undefined does our
+ // speaking for us.
+ if (aDeleted) {
+ this._deleted = aDeleted;
+ }
+}
+
+GlodaMessage.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_MESSAGE,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get folderID() {
+ return this._folderID;
+ },
+ get messageKey() {
+ return this._messageKey;
+ },
+ get conversationID() {
+ return this._conversationID;
+ },
+ // conversation is special
+ get headerMessageID() {
+ return this._headerMessageID;
+ },
+ get notability() {
+ return this._notability;
+ },
+ set notability(aNotability) {
+ this._notability = aNotability;
+ },
+
+ get subject() {
+ return this._subject;
+ },
+ get indexedBodyText() {
+ return this._indexedBodyText;
+ },
+ get attachmentNames() {
+ return this._attachmentNames;
+ },
+
+ get date() {
+ return this._date;
+ },
+ set date(aNewDate) {
+ this._date = aNewDate;
+ },
+
+ get folder() {
+ // XXX due to a deletion bug it is currently possible to get in a state
+ // where we have an illegal folderID value. This will result in an
+ // exception. As a workaround, let's just return null in that case.
+ try {
+ if (this._folderID != null) {
+ return this._datastore._mapFolderID(this._folderID);
+ }
+ } catch (ex) {}
+ return null;
+ },
+ get folderURI() {
+ // XXX just like for folder, handle mapping failures and return null
+ try {
+ if (this._folderID != null) {
+ return this._datastore._mapFolderID(this._folderID).uri;
+ }
+ } catch (ex) {}
+ return null;
+ },
+ get account() {
+ // XXX due to a deletion bug it is currently possible to get in a state
+ // where we have an illegal folderID value. This will result in an
+ // exception. As a workaround, let's just return null in that case.
+ try {
+ if (this._folderID == null) {
+ return null;
+ }
+ let folder = this._datastore._mapFolderID(this._folderID);
+ return folder.getAccount();
+ } catch (ex) {}
+ return null;
+ },
+ get conversation() {
+ return this._conversation;
+ },
+
+ toString() {
+ // uh, this is a tough one...
+ return "Message:" + this._id;
+ },
+
+ _clone() {
+ return new GlodaMessage(
+ /* datastore */ null,
+ this._id,
+ this._folderID,
+ this._messageKey,
+ this._conversationID,
+ this._conversation,
+ this._date,
+ this._headerMessageID,
+ "_deleted" in this ? this._deleted : undefined,
+ "_jsonText" in this ? this._jsonText : undefined,
+ this._notability,
+ this._subject,
+ this._indexedBodyText,
+ this._attachmentNames
+ );
+ },
+
+ /**
+ * Provide a means of propagating changed values on our clone back to
+ * ourselves. This is required because of an object identity trick gloda
+ * does; when indexing an already existing object, all mutations happen on
+ * a clone of the existing object so that
+ */
+ _declone(aOther) {
+ if ("_content" in aOther) {
+ this._content = aOther._content;
+ }
+
+ // The _indexedAuthor/_indexedRecipients fields don't get updated on
+ // fulltext update so we don't need to propagate.
+ this._indexedBodyText = aOther._indexedBodyText;
+ this._attachmentNames = aOther._attachmentNames;
+ },
+
+ /**
+ * Mark this message as a ghost. Ghosts are characterized by having no folder
+ * id and no message key. They also are not deleted or they would be of
+ * absolutely no use to us.
+ *
+ * These changes are suitable for persistence.
+ */
+ _ghost() {
+ this._folderID = null;
+ this._messageKey = null;
+ if ("_deleted" in this) {
+ delete this._deleted;
+ }
+ },
+
+ /**
+ * Are we a ghost (which implies not deleted)? We are not a ghost if we have
+ * a definite folder location (we may not know our message key in the case
+ * of IMAP moves not fully completed) and are not deleted.
+ */
+ get _isGhost() {
+ return this._folderID == null && !this._isDeleted;
+ },
+
+ /**
+ * If we were dead, un-dead us.
+ */
+ _ensureNotDeleted() {
+ if ("_deleted" in this) {
+ delete this._deleted;
+ }
+ },
+
+ /**
+ * Are we deleted? This is private because deleted gloda messages are not
+ * visible to non-core-gloda code.
+ */
+ get _isDeleted() {
+ return "_deleted" in this && this._deleted;
+ },
+
+ /**
+ * Trash this message's in-memory representation because it should no longer
+ * be reachable by any code. The database record is gone, it's not coming
+ * back.
+ */
+ _objectPurgedMakeYourselfUnpleasant() {
+ this._id = null;
+ this._folderID = null;
+ this._messageKey = null;
+ this._conversationID = null;
+ this._conversation = null;
+ this.date = null;
+ this._headerMessageID = null;
+ },
+
+ /**
+ * Return the underlying nsIMsgDBHdr from the folder storage for this, or
+ * null if the message does not exist for one reason or another. We may log
+ * to our logger in the failure cases.
+ *
+ * This method no longer caches the result, so if you need to hold onto it,
+ * hold onto it.
+ *
+ * In the process of retrieving the underlying message header, we may have to
+ * open the message header database associated with the folder. This may
+ * result in blocking while the load happens, so you may want to try and find
+ * an alternate way to initiate the load before calling us.
+ * We provide hinting to the GlodaDatastore via the GlodaFolder so that it
+ * knows when it's a good time for it to go and detach from the database.
+ *
+ * @returns The nsIMsgDBHdr associated with this message if available, null on
+ * failure.
+ */
+ get folderMessage() {
+ if (this._folderID === null || this._messageKey === null) {
+ return null;
+ }
+
+ // XXX like for folder and folderURI, return null if we can't map the folder
+ let glodaFolder;
+ try {
+ glodaFolder = this._datastore._mapFolderID(this._folderID);
+ } catch (ex) {
+ return null;
+ }
+ let folder = glodaFolder.getXPCOMFolder(
+ glodaFolder.kActivityHeaderRetrieval
+ );
+ if (folder) {
+ let folderMessage;
+ try {
+ folderMessage = folder.GetMessageHeader(this._messageKey);
+ } catch (ex) {
+ folderMessage = null;
+ }
+ if (folderMessage !== null) {
+ // verify the message-id header matches what we expect...
+ if (folderMessage.messageId != this._headerMessageID) {
+ LOG.info(
+ "Message with message key " +
+ this._messageKey +
+ " in folder '" +
+ folder.URI +
+ "' does not match expected " +
+ "header! (" +
+ this._headerMessageID +
+ " expected, got " +
+ folderMessage.messageId +
+ ")"
+ );
+ folderMessage = null;
+ }
+ }
+ return folderMessage;
+ }
+
+ // this only gets logged if things have gone very wrong. we used to throw
+ // here, but it's unlikely our caller can do anything more meaningful than
+ // treating this as a disappeared message.
+ LOG.info(
+ "Unable to locate folder message for: " +
+ this._folderID +
+ ":" +
+ this._messageKey
+ );
+ return null;
+ },
+ get folderMessageURI() {
+ let folderMessage = this.folderMessage;
+ if (folderMessage) {
+ return folderMessage.folder.getUriForMsg(folderMessage);
+ }
+ return null;
+ },
+};
+MixIn(GlodaMessage, GlodaHasAttributesMixIn);
+
+/**
+ * @class Contacts correspond to people (one per person), and may own multiple
+ * identities (e-mail address, IM account, etc.)
+ */
+function GlodaContact(
+ aDatastore,
+ aID,
+ aDirectoryUUID,
+ aContactUUID,
+ aName,
+ aPopularity,
+ aFrecency,
+ aJsonText
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._directoryUUID = aDirectoryUUID;
+ this._contactUUID = aContactUUID;
+ this._name = aName;
+ this._popularity = aPopularity;
+ this._frecency = aFrecency;
+ if (aJsonText) {
+ this._jsonText = aJsonText;
+ }
+
+ this._identities = null;
+}
+
+GlodaContact.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_CONTACT,
+ // set by GlodaDatastore
+ _datastore: null,
+
+ get id() {
+ return this._id;
+ },
+ get directoryUUID() {
+ return this._directoryUUID;
+ },
+ get contactUUID() {
+ return this._contactUUID;
+ },
+ get name() {
+ return this._name;
+ },
+ set name(aName) {
+ this._name = aName;
+ },
+
+ get popularity() {
+ return this._popularity;
+ },
+ set popularity(aPopularity) {
+ this._popularity = aPopularity;
+ this.dirty = true;
+ },
+
+ get frecency() {
+ return this._frecency;
+ },
+ set frecency(aFrecency) {
+ this._frecency = aFrecency;
+ this.dirty = true;
+ },
+
+ get identities() {
+ return this._identities;
+ },
+
+ toString() {
+ return "Contact:" + this._id;
+ },
+
+ get accessibleLabel() {
+ return "Contact: " + this._name;
+ },
+
+ _clone() {
+ return new GlodaContact(
+ /* datastore */ null,
+ this._id,
+ this._directoryUUID,
+ this._contactUUID,
+ this._name,
+ this._popularity,
+ this._frecency
+ );
+ },
+};
+MixIn(GlodaContact, GlodaHasAttributesMixIn);
+
+/**
+ * @class A specific means of communication for a contact.
+ */
+function GlodaIdentity(
+ aDatastore,
+ aID,
+ aContactID,
+ aContact,
+ aKind,
+ aValue,
+ aDescription,
+ aIsRelay
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._id = aID;
+ this._contactID = aContactID;
+ this._contact = aContact;
+ this._kind = aKind;
+ this._value = aValue;
+ this._description = aDescription;
+ this._isRelay = aIsRelay;
+ // Cached indication of whether there is an address book card for this
+ // identity. We keep this up-to-date via address book listener
+ // notifications in |GlodaABIndexer|.
+ this._hasAddressBookCard = undefined;
+}
+
+GlodaIdentity.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_IDENTITY,
+ // set by GlodaDatastore
+ _datastore: null,
+ get id() {
+ return this._id;
+ },
+ get contactID() {
+ return this._contactID;
+ },
+ get contact() {
+ return this._contact;
+ },
+ get kind() {
+ return this._kind;
+ },
+ get value() {
+ return this._value;
+ },
+ get description() {
+ return this._description;
+ },
+ get isRelay() {
+ return this._isRelay;
+ },
+
+ get uniqueValue() {
+ return this._kind + "@" + this._value;
+ },
+
+ toString() {
+ return "Identity:" + this._kind + ":" + this._value;
+ },
+
+ toLocaleString() {
+ if (this.contact.name == this.value) {
+ return this.value;
+ }
+ return this.contact.name + " : " + this.value;
+ },
+
+ get abCard() {
+ // for our purposes, the address book only speaks email
+ if (this._kind != "email") {
+ return false;
+ }
+ let card = MailServices.ab.cardForEmailAddress(this._value);
+ this._hasAddressBookCard = card != null;
+ return card;
+ },
+
+ /**
+ * Indicates whether we have an address book card for this identity. This
+ * value is cached once looked-up and kept up-to-date by |GlodaABIndexer|
+ * and its notifications.
+ */
+ get inAddressBook() {
+ if (this._hasAddressBookCard !== undefined) {
+ return this._hasAddressBookCard;
+ }
+ return (this.abCard && true) || false;
+ },
+};
+
+/**
+ * An attachment, with as much information as we can gather on it
+ */
+function GlodaAttachment(
+ aGlodaMessage,
+ aName,
+ aContentType,
+ aSize,
+ aPart,
+ aExternalUrl,
+ aIsExternal
+) {
+ // _datastore set on the prototype by GlodaDatastore
+ this._glodaMessage = aGlodaMessage;
+ this._name = aName;
+ this._contentType = aContentType;
+ this._size = aSize;
+ this._part = aPart;
+ this._externalUrl = aExternalUrl;
+ this._isExternal = aIsExternal;
+}
+
+GlodaAttachment.prototype = {
+ NOUN_ID: GlodaConstants.NOUN_ATTACHMENT,
+ // set by GlodaDatastore
+ get name() {
+ return this._name;
+ },
+ get contentType() {
+ return this._contentType;
+ },
+ get size() {
+ return this._size;
+ },
+ get url() {
+ if (this.isExternal) {
+ return this._externalUrl;
+ }
+
+ let uri = this._glodaMessage.folderMessageURI;
+ if (!uri) {
+ throw new Error(
+ "The message doesn't exist anymore, unable to rebuild attachment URL"
+ );
+ }
+ let msgService = MailServices.messageServiceFromURI(uri);
+ let neckoURL = msgService.getUrlForUri(uri);
+ let url = neckoURL.spec;
+ let hasParamAlready = url.match(/\?[a-z]+=[^\/]+$/);
+ let sep = hasParamAlready ? "&" : "?";
+ return (
+ url +
+ sep +
+ "part=" +
+ this._part +
+ "&filename=" +
+ encodeURIComponent(this._name)
+ );
+ },
+ get isExternal() {
+ return this._isExternal;
+ },
+
+ toString() {
+ return "attachment: " + this._name + ":" + this._contentType;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm b/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm
new file mode 100644
index 0000000000..eda41cb91a
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDatabind.jsm
@@ -0,0 +1,210 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaDatabind"];
+
+function GlodaDatabind(aNounDef, aDatastore) {
+ this._nounDef = aNounDef;
+ this._tableName = aNounDef.tableName;
+ this._tableDef = aNounDef.schema;
+ this._datastore = aDatastore;
+ this._log = console.createInstance({
+ prefix: `gloda.databind.${this._tableName}`,
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ // process the column definitions and make sure they have an attribute mapping
+ for (let [iColDef, coldef] of this._tableDef.columns.entries()) {
+ // default to the other dude's thing.
+ if (coldef.length < 3) {
+ coldef[2] = coldef[0];
+ }
+ if (coldef[0] == "id") {
+ this._idAttr = coldef[2];
+ }
+ // colDef[3] is the index of us in our SQL bindings, storage-numbering
+ coldef[3] = iColDef;
+ }
+
+ // XXX This is obviously synchronous and not perfectly async. Since we are
+ // doing this, we don't actually need to move to ordinal binding below
+ // since we could just as well compel creation of the name map and thereby
+ // avoid ever acquiring the mutex after bootstrap.
+ // However, this specific check can be cleverly avoided with future work.
+ // Namely, at startup we can scan for extension-defined tables and get their
+ // maximum id so that we don't need to do it here. The table will either
+ // be brand new and thus have a maximum id of 1 or we will already know it
+ // because of that scan.
+ this._nextId = 1;
+ let stmt = this._datastore._createSyncStatement(
+ "SELECT MAX(id) FROM " + this._tableName,
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+
+ let insertColumns = [];
+ let insertValues = [];
+ let updateItems = [];
+ for (let [iColDef, coldef] of this._tableDef.columns.entries()) {
+ let column = coldef[0];
+ let placeholder = "?" + (iColDef + 1);
+ insertColumns.push(column);
+ insertValues.push(placeholder);
+ if (column != "id") {
+ updateItems.push(column + " = " + placeholder);
+ }
+ }
+
+ let insertSql =
+ "INSERT INTO " +
+ this._tableName +
+ " (" +
+ insertColumns.join(", ") +
+ ") VALUES (" +
+ insertValues.join(", ") +
+ ")";
+
+ // For the update, we want the 'id' to be a constraint and not a value
+ // that gets set...
+ let updateSql =
+ "UPDATE " +
+ this._tableName +
+ " SET " +
+ updateItems.join(", ") +
+ " WHERE id = ?1";
+ this._insertStmt = aDatastore._createAsyncStatement(insertSql);
+ this._updateStmt = aDatastore._createAsyncStatement(updateSql);
+
+ if (this._tableDef.fulltextColumns) {
+ for (let [iColDef, coldef] of this._tableDef.fulltextColumns.entries()) {
+ if (coldef.length < 3) {
+ coldef[2] = coldef[0];
+ }
+ // colDef[3] is the index of us in our SQL bindings, storage-numbering
+ coldef[3] = iColDef + 1;
+ }
+
+ let insertColumns = [];
+ let insertValues = [];
+ let updateItems = [];
+ for (var [iColDef, coldef] of this._tableDef.fulltextColumns.entries()) {
+ let column = coldef[0];
+ // +2 instead of +1 because docid is implied
+ let placeholder = "?" + (iColDef + 2);
+ insertColumns.push(column);
+ insertValues.push(placeholder);
+ if (column != "id") {
+ updateItems.push(column + " = " + placeholder);
+ }
+ }
+
+ let insertFulltextSql =
+ "INSERT INTO " +
+ this._tableName +
+ "Text (docid," +
+ insertColumns.join(", ") +
+ ") VALUES (?1," +
+ insertValues.join(", ") +
+ ")";
+
+ // For the update, we want the 'id' to be a constraint and not a value
+ // that gets set...
+ let updateFulltextSql =
+ "UPDATE " +
+ this._tableName +
+ "Text SET " +
+ updateItems.join(", ") +
+ " WHERE docid = ?1";
+
+ this._insertFulltextStmt =
+ aDatastore._createAsyncStatement(insertFulltextSql);
+ this._updateFulltextStmt =
+ aDatastore._createAsyncStatement(updateFulltextSql);
+ }
+}
+
+GlodaDatabind.prototype = {
+ /**
+ * Perform appropriate binding coercion based on the schema provided to us.
+ * Although we end up effectively coercing JS Date objects to numeric values,
+ * we should not be provided with JS Date objects! There is no way for us
+ * to know to turn them back into JS Date objects on the way out.
+ * Additionally, there is the small matter of storage's bias towards
+ * PRTime representations which may not always be desirable.
+ */
+ bindByType(aStmt, aColDef, aValue) {
+ aStmt.bindByIndex(aColDef[3], aValue);
+ },
+
+ objFromRow(aRow) {
+ let getVariant = this._datastore._getVariant;
+ let obj = new this._nounDef.class();
+ for (let [iCol, colDef] of this._tableDef.columns.entries()) {
+ obj[colDef[2]] = getVariant(aRow, iCol);
+ }
+ return obj;
+ },
+
+ objInsert(aThing) {
+ let bindByType = this.bindByType;
+ if (!aThing[this._idAttr]) {
+ aThing[this._idAttr] = this._nextId++;
+ }
+
+ let stmt = this._insertStmt;
+ for (let colDef of this._tableDef.columns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+
+ stmt.executeAsync(this._datastore.trackAsync());
+
+ if (this._insertFulltextStmt) {
+ stmt = this._insertFulltextStmt;
+ stmt.bindByIndex(0, aThing[this._idAttr]);
+ for (let colDef of this._tableDef.fulltextColumns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+ }
+ },
+
+ objUpdate(aThing) {
+ let bindByType = this.bindByType;
+ let stmt = this._updateStmt;
+ // note, we specially bound the location of 'id' for the insert, but since
+ // we're using named bindings, there is nothing special about setting it
+ for (let colDef of this._tableDef.columns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+
+ if (this._updateFulltextStmt) {
+ stmt = this._updateFulltextStmt;
+ // fulltextColumns doesn't include id/docid, need to explicitly set it
+ stmt.bindByIndex(0, aThing[this._idAttr]);
+ for (let colDef of this._tableDef.fulltextColumns) {
+ bindByType(stmt, colDef, aThing[colDef[2]]);
+ }
+ stmt.executeAsync(this._datastore.trackAsync());
+ }
+ },
+
+ adjustAttributes(...aArgs) {
+ // just proxy the call over to the datastore... we have to do this for
+ // 'this' reasons. we don't refactor things to avoid this because it does
+ // make some sense to have all the methods exposed from a single object,
+ // even if the implementation does live elsewhere.
+ return this._datastore.adjustAttributes(...aArgs);
+ },
+
+ // also proxied...
+ queryFromQuery(...aArgs) {
+ return this._datastore.queryFromQuery(...aArgs);
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm b/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm
new file mode 100644
index 0000000000..1391ceaaf2
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaDatastore.jsm
@@ -0,0 +1,4402 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This file looks to Myk Melez <myk@mozilla.org>'s Mozilla Labs snowl
+ * project's (https://hg.mozilla.org/labs/snowl/) modules/GlodaDatastore.jsm
+ * for inspiration and idioms (and also a name :).
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaDatastore"];
+
+const {
+ GlodaAttributeDBDef,
+ GlodaConversation,
+ GlodaFolder,
+ GlodaMessage,
+ GlodaContact,
+ GlodaIdentity,
+} = ChromeUtils.import("resource:///modules/gloda/GlodaDataModel.jsm");
+const { GlodaDatabind } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatabind.jsm"
+);
+const { GlodaCollection, GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var MIN_CACHE_SIZE = 8 * 1048576;
+var MAX_CACHE_SIZE = 64 * 1048576;
+var MEMSIZE_FALLBACK_BYTES = 256 * 1048576;
+
+var PCH_LOG = console.createInstance({
+ prefix: "gloda.ds.pch",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Commit async handler; hands off the notification to
+ * |GlodaDatastore._asyncCompleted|.
+ */
+function PostCommitHandler(aCallbacks) {
+ this.callbacks = aCallbacks;
+ GlodaDatastore._pendingAsyncStatements++;
+}
+
+PostCommitHandler.prototype = {
+ handleResult(aResultSet) {},
+
+ handleError(aError) {
+ PCH_LOG.error("database error:" + aError);
+ },
+
+ handleCompletion(aReason) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ if (aReason == Ci.mozIStorageStatementCallback.REASON_FINISHED) {
+ for (let callback of this.callbacks) {
+ try {
+ callback();
+ } catch (ex) {
+ PCH_LOG.error(
+ "PostCommitHandler callback (" +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ") threw: " +
+ ex
+ );
+ }
+ }
+ }
+ try {
+ GlodaDatastore._asyncCompleted();
+ } catch (e) {
+ PCH_LOG.error("Exception in handleCompletion:", e);
+ }
+ },
+};
+
+var QFQ_LOG = console.createInstance({
+ prefix: "gloda.ds.qfq",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Singleton collection listener used by |QueryFromQueryCallback| to assist in
+ * the loading of referenced noun instances. Which is to say, messages have
+ * identities (specific e-mail addresses) associated with them via attributes.
+ * And these identities in turn reference / are referenced by contacts (the
+ * notion of a person).
+ *
+ * This listener is primarily concerned with fixing up the references in each
+ * noun instance to its referenced instances once they have been loaded. It
+ * also deals with caching so that our identity invariant is maintained: user
+ * code should only ever see one distinct instance of a thing at a time.
+ */
+var QueryFromQueryResolver = {
+ onItemsAdded(aIgnoredItems, aCollection, aFake) {
+ let originColl = aCollection.dataStack
+ ? aCollection.dataStack.pop()
+ : aCollection.data;
+ // QFQ_LOG.debug("QFQR: originColl: " + originColl);
+ if (aCollection.completionShifter) {
+ aCollection.completionShifter.push(originColl);
+ } else {
+ aCollection.completionShifter = [originColl];
+ }
+
+ if (!aFake) {
+ originColl.deferredCount--;
+ originColl.resolvedCount++;
+ }
+
+ // bail if we are still pending on some other load completion
+ if (originColl.deferredCount > 0) {
+ // QFQ_LOG.debug("QFQR: bailing " + originColl._nounDef.name);
+ return;
+ }
+
+ let referencesByNounID = originColl.masterCollection.referencesByNounID;
+ let inverseReferencesByNounID =
+ originColl.masterCollection.inverseReferencesByNounID;
+
+ if (originColl.pendingItems) {
+ for (let item of originColl.pendingItems) {
+ // QFQ_LOG.debug("QFQR: loading deferred " + item.NOUN_ID + ":" + item.id);
+ GlodaDatastore.loadNounDeferredDeps(
+ item,
+ referencesByNounID,
+ inverseReferencesByNounID
+ );
+ }
+
+ // we need to consider the possibility that we are racing a collection very
+ // much like our own. as such, this means we need to perform cache
+ // unification as our last step.
+ GlodaCollectionManager.cacheLoadUnify(
+ originColl._nounDef.id,
+ originColl.pendingItems,
+ false
+ );
+
+ // just directly tell the collection about the items. we know the query
+ // matches (at least until we introduce predicates that we cannot express
+ // in SQL.)
+ // QFQ_LOG.debug(" QFQR: about to trigger listener: " + originColl._listener +
+ // "with collection: " + originColl._nounDef.name);
+ originColl._onItemsAdded(originColl.pendingItems);
+ delete originColl.pendingItems;
+ delete originColl._pendingIdMap;
+ }
+ },
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ let originColl = aCollection.completionShifter
+ ? aCollection.completionShifter.shift()
+ : aCollection.data;
+ // QFQ_LOG.debug(" QFQR about to trigger completion with collection: " +
+ // originColl._nounDef.name);
+ if (originColl.deferredCount <= 0) {
+ originColl._onQueryCompleted();
+ }
+ },
+};
+
+/**
+ * Handles the results from a GlodaDatastore.queryFromQuery call in cooperation
+ * with the |QueryFromQueryResolver| collection listener. We do a lot of
+ * legwork related to satisfying references to other noun instances on the
+ * noun instances the user directly queried. Messages reference identities
+ * reference contacts which in turn (implicitly) reference identities again.
+ * We have to spin up those other queries and stitch things together.
+ *
+ * While the code is generally up to the existing set of tasks it is called to
+ * handle, I would not be surprised for it to fall down if things get more
+ * complex. Some of the logic here 'evolved' a bit and could benefit from
+ * additional documentation and a fresh go-through.
+ */
+function QueryFromQueryCallback(aStatement, aNounDef, aCollection) {
+ this.statement = aStatement;
+ this.nounDef = aNounDef;
+ this.collection = aCollection;
+
+ // QFQ_LOG.debug("Creating QFQCallback for noun: " + aNounDef.name);
+
+ // the master collection holds the referencesByNounID
+ this.referencesByNounID = {};
+ this.masterReferencesByNounID =
+ this.collection.masterCollection.referencesByNounID;
+ this.inverseReferencesByNounID = {};
+ this.masterInverseReferencesByNounID =
+ this.collection.masterCollection.inverseReferencesByNounID;
+ // we need to contribute our references as we load things; we need this
+ // because of the potential for circular dependencies and our inability to
+ // put things into the caching layer (or collection's _idMap) until we have
+ // fully resolved things.
+ if (this.nounDef.id in this.masterReferencesByNounID) {
+ this.selfReferences = this.masterReferencesByNounID[this.nounDef.id];
+ } else {
+ this.selfReferences = this.masterReferencesByNounID[this.nounDef.id] = {};
+ }
+ if (this.nounDef.parentColumnAttr) {
+ if (this.nounDef.id in this.masterInverseReferencesByNounID) {
+ this.selfInverseReferences =
+ this.masterInverseReferencesByNounID[this.nounDef.id];
+ } else {
+ this.selfInverseReferences = this.masterInverseReferencesByNounID[
+ this.nounDef.id
+ ] = {};
+ }
+ }
+
+ this.needsLoads = false;
+
+ GlodaDatastore._pendingAsyncStatements++;
+}
+
+QueryFromQueryCallback.prototype = {
+ handleResult(aResultSet) {
+ try {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ let pendingItems = this.collection.pendingItems;
+ let pendingIdMap = this.collection._pendingIdMap;
+ let row;
+ let nounDef = this.nounDef;
+ let nounID = nounDef.id;
+ while ((row = aResultSet.getNextRow())) {
+ let item = nounDef.objFromRow.call(nounDef.datastore, row);
+ if (this.collection.stashedColumns) {
+ let stashed = (this.collection.stashedColumns[item.id] = []);
+ for (let iCol of this.collection.query.options.stashColumns) {
+ stashed.push(GlodaDatastore._getVariant(row, iCol));
+ }
+ }
+ // try and replace the item with one from the cache, if we can
+ let cachedItem = GlodaCollectionManager.cacheLookupOne(
+ nounID,
+ item.id,
+ false
+ );
+
+ // if we already have a copy in the pending id map, skip it
+ if (item.id in pendingIdMap) {
+ continue;
+ }
+
+ // QFQ_LOG.debug("loading item " + nounDef.id + ":" + item.id + " existing: " +
+ // this.selfReferences[item.id] + " cached: " + cachedItem);
+ if (cachedItem) {
+ item = cachedItem;
+ } else if (this.selfReferences[item.id] != null) {
+ // We may already have been loaded by this process.
+ item = this.selfReferences[item.id];
+ } else {
+ // Perform loading logic which may produce reference dependencies.
+ this.needsLoads =
+ GlodaDatastore.loadNounItem(
+ item,
+ this.referencesByNounID,
+ this.inverseReferencesByNounID
+ ) || this.needsLoads;
+ }
+
+ // add ourself to the references by our id
+ // QFQ_LOG.debug("saving item " + nounDef.id + ":" + item.id + " to self-refs");
+ this.selfReferences[item.id] = item;
+
+ // if we're tracking it, add ourselves to our parent's list of children
+ // too
+ if (this.selfInverseReferences) {
+ let parentID = item[nounDef.parentColumnAttr.idStorageAttributeName];
+ let childrenList = this.selfInverseReferences[parentID];
+ if (childrenList === undefined) {
+ childrenList = this.selfInverseReferences[parentID] = [];
+ }
+ childrenList.push(item);
+ }
+
+ pendingItems.push(item);
+ pendingIdMap[item.id] = item;
+ }
+ } catch (e) {
+ GlodaDatastore._log.error("Exception in handleResult:", e);
+ }
+ },
+
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "Async queryFromQuery error: " + aError.result + ": " + aError.message
+ );
+ },
+
+ handleCompletion(aReason) {
+ try {
+ try {
+ this.statement.finalize();
+ this.statement = null;
+
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ // QFQ_LOG.debug("handleCompletion: " + this.collection._nounDef.name);
+
+ if (this.needsLoads) {
+ for (let nounID in this.referencesByNounID) {
+ let references = this.referencesByNounID[nounID];
+ if (nounID == this.nounDef.id) {
+ continue;
+ }
+ let nounDef = GlodaDatastore._nounIDToDef[nounID];
+ // QFQ_LOG.debug(" have references for noun: " + nounDef.name);
+ // try and load them out of the cache/existing collections. items in the
+ // cache will be fully formed, which is nice for us.
+ // XXX this mechanism will get dubious when we have multiple paths to a
+ // single noun-type. For example, a -> b -> c, a-> c; two paths to c
+ // and we're looking at issuing two requests to c, the latter of which
+ // will be a superset of the first one. This does not currently pose
+ // a problem because we only have a -> b -> c -> b, and sequential
+ // processing means no alarms and no surprises.
+ let masterReferences = this.masterReferencesByNounID[nounID];
+ if (masterReferences === undefined) {
+ masterReferences = this.masterReferencesByNounID[nounID] = {};
+ }
+ let outReferences;
+ if (nounDef.parentColumnAttr) {
+ outReferences = {};
+ } else {
+ outReferences = masterReferences;
+ }
+ let [, notFoundCount, notFound] =
+ GlodaCollectionManager.cacheLookupMany(
+ nounDef.id,
+ references,
+ outReferences
+ );
+
+ if (nounDef.parentColumnAttr) {
+ let inverseReferences;
+ if (nounDef.id in this.masterInverseReferencesByNounID) {
+ inverseReferences =
+ this.masterInverseReferencesByNounID[nounDef.id];
+ } else {
+ inverseReferences = this.masterInverseReferencesByNounID[
+ nounDef.id
+ ] = {};
+ }
+
+ for (let key in outReferences) {
+ let item = outReferences[key];
+ masterReferences[item.id] = item;
+ let parentID =
+ item[nounDef.parentColumnAttr.idStorageAttributeName];
+ let childrenList = inverseReferences[parentID];
+ if (childrenList === undefined) {
+ childrenList = inverseReferences[parentID] = [];
+ }
+ childrenList.push(item);
+ }
+ }
+
+ // QFQ_LOG.debug(" found: " + foundCount + " not found: " + notFoundCount);
+ if (notFoundCount === 0) {
+ this.collection.resolvedCount++;
+ } else {
+ this.collection.deferredCount++;
+ let query = new nounDef.queryClass();
+ query.id.apply(query, Object.keys(notFound));
+
+ // we fully expect/allow for there being no such subcollection yet.
+ let subCollection =
+ nounDef.id in this.collection.masterCollection.subCollections
+ ? this.collection.masterCollection.subCollections[nounDef.id]
+ : undefined;
+ this.collection.masterCollection.subCollections[nounDef.id] =
+ GlodaDatastore.queryFromQuery(
+ query,
+ QueryFromQueryResolver,
+ this.collection,
+ subCollection,
+ this.collection.masterCollection,
+ { becomeExplicit: true }
+ );
+ }
+ }
+
+ for (let nounID in this.inverseReferencesByNounID) {
+ let inverseReferences = this.inverseReferencesByNounID[nounID];
+ this.collection.deferredCount++;
+ let nounDef = GlodaDatastore._nounIDToDef[nounID];
+
+ // QFQ_LOG.debug("Want to load inverse via " + nounDef.parentColumnAttr.boundName);
+
+ let query = new nounDef.queryClass();
+ // we want to constrain using the parent column
+ let queryConstrainer = query[nounDef.parentColumnAttr.boundName];
+ queryConstrainer.apply(query, Object.keys(inverseReferences));
+ // we fully expect/allow for there being no such subcollection yet.
+ let subCollection =
+ nounDef.id in this.collection.masterCollection.subCollections
+ ? this.collection.masterCollection.subCollections[nounDef.id]
+ : undefined;
+ this.collection.masterCollection.subCollections[nounDef.id] =
+ GlodaDatastore.queryFromQuery(
+ query,
+ QueryFromQueryResolver,
+ this.collection,
+ subCollection,
+ this.collection.masterCollection,
+ { becomeExplicit: true }
+ );
+ }
+ } else {
+ this.collection.deferredCount--;
+ this.collection.resolvedCount++;
+ }
+
+ // QFQ_LOG.debug(" defer: " + this.collection.deferredCount +
+ // " resolved: " + this.collection.resolvedCount);
+
+ // process immediately and kick-up to the master collection...
+ if (this.collection.deferredCount <= 0) {
+ // this guy will resolve everyone using referencesByNounID and issue the
+ // call to this.collection._onItemsAdded to propagate things to the
+ // next concerned subCollection or the actual listener if this is the
+ // master collection. (Also, call _onQueryCompleted).
+ QueryFromQueryResolver.onItemsAdded(
+ null,
+ { data: this.collection },
+ true
+ );
+ QueryFromQueryResolver.onQueryCompleted({ data: this.collection });
+ }
+ } catch (e) {
+ console.error(e);
+ QFQ_LOG.error("Exception:", e);
+ }
+ } finally {
+ GlodaDatastore._asyncCompleted();
+ }
+ },
+};
+
+/**
+ * Used by |GlodaDatastore.folderCompactionPassBlockFetch| to accumulate the
+ * results and pass them back in to the compaction process in
+ * |GlodaMsgIndexer._worker_folderCompactionPass|.
+ */
+function CompactionBlockFetcherHandler(aCallback) {
+ this.callback = aCallback;
+ this.idsAndMessageKeys = [];
+ GlodaDatastore._pendingAsyncStatements++;
+}
+CompactionBlockFetcherHandler.prototype = {
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ this.idsAndMessageKeys.push([
+ row.getInt64(0), // id
+ row.getInt64(1), // messageKey
+ row.getString(2), // headerMessageID
+ ]);
+ }
+ },
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "CompactionBlockFetcherHandler error: " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ this.callback(this.idsAndMessageKeys);
+ },
+};
+
+/**
+ * Use this as the callback handler when you have a SQL query that returns a
+ * single row with a single integer column value, like a COUNT() query.
+ */
+function SingletonResultValueHandler(aCallback) {
+ this.callback = aCallback;
+ this.result = null;
+ GlodaDatastore._pendingAsyncStatements++;
+}
+SingletonResultValueHandler.prototype = {
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ this.result = row.getInt64(0);
+ }
+ },
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "SingletonResultValueHandler error: " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ this.callback(this.result);
+ },
+};
+
+/**
+ * Wrapper that duplicates actions taken on a real statement to an explain
+ * statement. Currently only fires an explain statement once.
+ */
+function ExplainedStatementWrapper(
+ aRealStatement,
+ aExplainStatement,
+ aSQLString,
+ aExplainHandler
+) {
+ this.real = aRealStatement;
+ this.explain = aExplainStatement;
+ this.sqlString = aSQLString;
+ this.explainHandler = aExplainHandler;
+ this.done = false;
+}
+ExplainedStatementWrapper.prototype = {
+ bindByIndex(aColIndex, aValue) {
+ this.real.bindByIndex(aColIndex, aValue);
+ if (!this.done) {
+ this.explain.bindByIndex(aColIndex, aValue);
+ }
+ },
+ executeAsync(aCallback) {
+ if (!this.done) {
+ this.explainHandler.sqlEnRoute(this.sqlString);
+ this.explain.executeAsync(this.explainHandler);
+ this.explain.finalize();
+ this.done = true;
+ }
+ return this.real.executeAsync(aCallback);
+ },
+ finalize() {
+ if (!this.done) {
+ this.explain.finalize();
+ }
+ this.real.finalize();
+ },
+};
+
+/**
+ * Writes a single JSON document to the provide file path in a streaming
+ * fashion. At startup we open an array to place the queries in and at
+ * shutdown we close it.
+ */
+function ExplainedStatementProcessor(aDumpPath) {
+ Services.obs.addObserver(this, "quit-application");
+
+ this._sqlStack = [];
+ this._curOps = [];
+ this._objsWritten = 0;
+
+ let filePath = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ filePath.initWithPath(aDumpPath);
+
+ this._ostream = Cc[
+ "@mozilla.org/network/file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ this._ostream.init(filePath, -1, -1, 0);
+
+ let s = '{"queries": [';
+ this._ostream.write(s, s.length);
+}
+ExplainedStatementProcessor.prototype = {
+ sqlEnRoute(aSQLString) {
+ this._sqlStack.push(aSQLString);
+ },
+ handleResult(aResultSet) {
+ let row;
+ // addr opcode (s) p1 p2 p3 p4 (s) p5 comment (s)
+ while ((row = aResultSet.getNextRow())) {
+ this._curOps.push([
+ row.getInt64(0), // addr
+ row.getString(1), // opcode
+ row.getInt64(2), // p1
+ row.getInt64(3), // p2
+ row.getInt64(4), // p3
+ row.getString(5), // p4
+ row.getString(6), // p5
+ row.getString(7), // comment
+ ]);
+ }
+ },
+ handleError(aError) {
+ console.error("Unexpected error in EXPLAIN handler: " + aError);
+ },
+ handleCompletion(aReason) {
+ let obj = {
+ sql: this._sqlStack.shift(),
+ operations: this._curOps,
+ };
+ let s = (this._objsWritten++ ? ", " : "") + JSON.stringify(obj, null, 2);
+ this._ostream.write(s, s.length);
+
+ this._curOps = [];
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic == "quit-application") {
+ this.shutdown();
+ }
+ },
+
+ shutdown() {
+ let s = "]}";
+ this._ostream.write(s, s.length);
+ this._ostream.close();
+
+ Services.obs.removeObserver(this, "quit-application");
+ },
+};
+
+// See the documentation on GlodaDatastore._schemaVersion to understand these:
+var DB_SCHEMA_ACCEPT_LEAVE_LOW = 31,
+ DB_SCHEMA_ACCEPT_LEAVE_HIGH = 34,
+ DB_SCHEMA_ACCEPT_DOWNGRADE_LOW = 35,
+ DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH = 39,
+ DB_SCHEMA_DOWNGRADE_DELTA = 5;
+
+/**
+ * Database abstraction layer. Contains explicit SQL schemas for our
+ * fundamental representations (core 'nouns', if you will) as well as
+ * specialized functions for then dealing with each type of object. At the
+ * same time, we are beginning to support extension-provided tables, which
+ * call into question whether we really need our hand-rolled code, or could
+ * simply improve the extension-provided table case to work for most of our
+ * hand-rolled cases.
+ * For now, the argument can probably be made that our explicit schemas and code
+ * is readable/intuitive (not magic) and efficient (although generic stuff
+ * could also be made efficient, if slightly evil through use of eval or some
+ * other code generation mechanism.)
+ *
+ * === Data Model Interaction / Dependencies
+ *
+ * Dependent on and assumes limited knowledge of the GlodaDataModel.jsm
+ * implementations. GlodaDataModel.jsm actually has an implicit dependency on
+ * our implementation, reaching back into the datastore via the _datastore
+ * attribute which we pass into every instance we create.
+ * We pass a reference to ourself as we create the GlodaDataModel.jsm instances (and
+ * they store it as _datastore) because of a half-implemented attempt to make
+ * it possible to live in a world where we have multiple datastores. This
+ * would be desirable in the cases where we are dealing with multiple SQLite
+ * databases. This could be because of per-account global databases or
+ * some other segmentation. This was abandoned when the importance of
+ * per-account databases was diminished following public discussion, at least
+ * for the short-term, but no attempted was made to excise the feature or
+ * preclude it. (Merely a recognition that it's too much to try and implement
+ * correct right now, especially because our solution might just be another
+ * (aggregating) layer on top of things, rather than complicating the lower
+ * levels.)
+ *
+ * === Object Identity / Caching
+ *
+ * The issue of object identity is handled by integration with the Collection.jsm
+ * provided GlodaCollectionManager. By "Object Identity", I mean that we only
+ * should ever have one object instance alive at a time that corresponds to
+ * an underlying database row in the database. Where possible we avoid
+ * performing database look-ups when we can check if the object is already
+ * present in memory; in practice, this means when we are asking for an object
+ * by ID. When we cannot avoid a database query, we attempt to make sure that
+ * we do not return a duplicate object instance, instead replacing it with the
+ * 'live' copy of the object. (Ideally, we would avoid any redundant
+ * construction costs, but that is not currently the case.)
+ * Although you should consult the GlodaCollectionManager for details, the
+ * general idea is that we have 'collections' which represent views of the
+ * database (based on a query) which use a single mechanism for double duty.
+ * The collections are registered with the collection manager via weak
+ * reference. The first 'duty' is that since the collections may be desired
+ * to be 'live views' of the data, we want them to update as changes occur.
+ * The weak reference allows the collection manager to track the 'live'
+ * collections and update them. The second 'duty' is the caching/object
+ * identity duty. In theory, every live item should be referenced by at least
+ * one collection, making it reachable for object identity/caching purposes.
+ * There is also an explicit (inclusive) caching layer present to both try and
+ * avoid poor performance from some of the costs of this strategy, as well as
+ * to try and keep track of objects that are being worked with that are not
+ * (yet) tracked by a collection. Using a size-bounded cache is clearly not
+ * a guarantee of correctness for this, but is suspected will work quite well.
+ * (Well enough to be dangerous because the inevitable failure case will not be
+ * expected.)
+ *
+ * The current strategy may not be the optimal one, feel free to propose and/or
+ * implement better ones, especially if you have numbers.
+ * The current strategy is not fully implemented in this file, but the common
+ * cases are believed to be covered. (Namely, we fail to purge items from the
+ * cache as they are purged from the database.)
+ *
+ * === Things That May Not Be Obvious (Gotchas)
+ *
+ * Although the schema includes "triggers", they are currently not used
+ * and were added when thinking about implementing the feature. We will
+ * probably implement this feature at some point, which is why they are still
+ * in there.
+ *
+ * We, and the layers above us, are not sufficiently thorough at cleaning out
+ * data from the database, and may potentially orphan it _as new functionality
+ * is added in the future at layers above us_. That is, currently we should
+ * not be leaking database rows, but we may in the future. This is because
+ * we/the layers above us lack a mechanism to track dependencies based on
+ * attributes. Say a plugin exists that extracts recipes from messages and
+ * relates them via an attribute. To do so, it must create new recipe rows
+ * in its own table as new recipes are discovered. No automatic mechanism
+ * will purge recipes as their source messages are purged, nor does any
+ * event-driven mechanism explicitly inform the plugin. (It could infer
+ * such an event from the indexing/attribute-providing process, or poll the
+ * states of attributes to accomplish this, but that is not desirable.) This
+ * needs to be addressed, and may be best addressed at layers above
+ * GlodaDatastore.jsm.
+ *
+ * @namespace
+ */
+var GlodaDatastore = {
+ _log: null,
+
+ /* ******************* SCHEMA ******************* */
+
+ /**
+ * Schema version policy. IMPORTANT! We expect the following potential things
+ * to happen in the life of gloda that can impact our schema and the ability
+ * to move between different versions of Thunderbird:
+ *
+ * - Fundamental changes to the schema so that two versions of Thunderbird
+ * cannot use the same global database. To wit, Thunderbird N+1 needs to
+ * blow away the database of Thunderbird N and reindex from scratch.
+ * Likewise, Thunderbird N will need to blow away Thunderbird N+1's
+ * database because it can't understand it. And we can't simply use a
+ * different file because there would be fatal bookkeeping losses.
+ *
+ * - Bidirectional minor schema changes (rare).
+ * Thunderbird N+1 does something that does not affect Thunderbird N's use
+ * of the database, and a user switching back to Thunderbird N will not be
+ * negatively impacted. It will also be fine when they go back to N+1 and
+ * N+1 will not be missing any vital data. The historic example of this is
+ * when we added a missing index that was important for performance. In
+ * that case, Thunderbird N could have potentially left the schema revision
+ * intact (if there was a safe revision), rather than swapping it on the
+ * downgrade, compelling N+1 to redo the transform on upgrade.
+ *
+ * - Backwards compatible, upgrade-transition minor schema changes.
+ * Thunderbird N+1 does something that does not require nuking the
+ * database / a full re-index, but does require processing on upgrade from
+ * a version of the database previously used by Thunderbird. These changes
+ * do not impact N's ability to use the database. For example, adding a
+ * new indexed attribute that affects a small number of messages could be
+ * handled by issuing a query on upgrade to dirty/index those messages.
+ * However, if the user goes back to N from N+1, when they upgrade to N+1
+ * again, we need to re-index. In this case N would need to have downgrade
+ * the schema revision.
+ *
+ * - Backwards incompatible, minor schema changes.
+ * Thunderbird N+1 does something that does not require nuking the database
+ * but will break Thunderbird N's ability to use the database.
+ *
+ * - Regression fixes. Sometimes we may land something that screws up
+ * databases, or the platform changes in a way that breaks our code and we
+ * had insufficient unit test coverage and so don't detect it until some
+ * databases have gotten messed up.
+ *
+ * Accordingly, every version of Thunderbird has a concept of potential schema
+ * versions with associated semantics to prepare for the minor schema upgrade
+ * cases were inter-op is possible. These ranges and their semantics are:
+ * - accepts and leaves intact. Covers:
+ * - regression fixes that no longer exist with the landing of the upgrade
+ * code as long as users never go back a build in the given channel.
+ * - bidirectional minor schema changes.
+ * - accepts but downgrades version to self. Covers:
+ * - backwards compatible, upgrade-transition minor schema changes.
+ * - nuke range (anything beyond a specific revision needs to be nuked):
+ * - backwards incompatible, minor scheme changes
+ * - fundamental changes
+ *
+ *
+ * SO, YOU WANT TO CHANGE THE SCHEMA?
+ *
+ * Use the ranges below for Thunderbird 11 as a guide, bumping things as little
+ * as possible. If we start to use up the "accepts and leaves intact" range
+ * without majorly changing things up, re-do the numbering acceptance range
+ * to give us additional runway.
+ *
+ * Also, if we keep needing non-nuking upgrades, consider adding an additional
+ * table to the database that can tell older versions of Thunderbird what to
+ * do when confronted with a newer database and where it can set flags to tell
+ * the newer Thunderbird what the older Thunderbird got up to. For example,
+ * it would be much easier if we just tell Thunderbird N what to do when it's
+ * confronted with the database.
+ *
+ *
+ * CURRENT STATE OF THE MIGRATION LOGIC:
+ *
+ * Thunderbird 11: uses 30 (regression fix from 26)
+ * - accepts and leaves intact: 31-34
+ * - accepts and downgrades by 5: 35-39
+ * - nukes: 40+
+ */
+ _schemaVersion: 30,
+ // what is the schema in the database right now?
+ _actualSchemaVersion: 0,
+ _schema: {
+ tables: {
+ // ----- Messages
+ folderLocations: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["folderURI", "TEXT NOT NULL"],
+ ["dirtyStatus", "INTEGER NOT NULL"],
+ ["name", "TEXT NOT NULL"],
+ ["indexingPriority", "INTEGER NOT NULL"],
+ ],
+
+ triggers: {
+ delete: "DELETE from messages WHERE folderID = OLD.id",
+ },
+ },
+
+ conversations: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["subject", "TEXT NOT NULL"],
+ ["oldestMessageDate", "INTEGER"],
+ ["newestMessageDate", "INTEGER"],
+ ],
+
+ indices: {
+ subject: ["subject"],
+ oldestMessageDate: ["oldestMessageDate"],
+ newestMessageDate: ["newestMessageDate"],
+ },
+
+ fulltextColumns: [["subject", "TEXT"]],
+
+ triggers: {
+ delete: "DELETE from messages WHERE conversationID = OLD.id",
+ },
+ },
+
+ /**
+ * A message record correspond to an actual message stored in a folder
+ * somewhere, or is a ghost record indicating a message that we know
+ * should exist, but which we have not seen (and which we may never see).
+ * We represent these ghost messages by storing NULL values in the
+ * folderID and messageKey fields; this may need to change to other
+ * sentinel values if this somehow impacts performance.
+ */
+ messages: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["folderID", "INTEGER"],
+ ["messageKey", "INTEGER"],
+ // conversationID used to have a REFERENCES but I'm losing it for
+ // presumed performance reasons and it doesn't do anything for us.
+ ["conversationID", "INTEGER NOT NULL"],
+ ["date", "INTEGER"],
+ // we used to have the parentID, but because of the very real
+ // possibility of multiple copies of a message with a given
+ // message-id, the parentID concept is unreliable.
+ ["headerMessageID", "TEXT"],
+ ["deleted", "INTEGER NOT NULL default 0"],
+ ["jsonAttributes", "TEXT"],
+ // Notability attempts to capture the static 'interestingness' of a
+ // message as a result of being starred/flagged, labeled, read
+ // multiple times, authored by someone in your address book or that
+ // you converse with a lot, etc.
+ ["notability", "INTEGER NOT NULL default 0"],
+ ],
+
+ indices: {
+ messageLocation: ["folderID", "messageKey"],
+ headerMessageID: ["headerMessageID"],
+ conversationID: ["conversationID"],
+ date: ["date"],
+ deleted: ["deleted"],
+ },
+
+ // note: if reordering the columns, you need to change this file's
+ // row-loading logic, GlodaMsgSearcher.jsm's ranking usages and also the
+ // column saturations in nsGlodaRankerFunction
+ fulltextColumns: [
+ ["body", "TEXT"],
+ ["subject", "TEXT"],
+ ["attachmentNames", "TEXT"],
+ ["author", "TEXT"],
+ ["recipients", "TEXT"],
+ ],
+
+ triggers: {
+ delete: "DELETE FROM messageAttributes WHERE messageID = OLD.id",
+ },
+ },
+
+ // ----- Attributes
+ attributeDefinitions: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["attributeType", "INTEGER NOT NULL"],
+ ["extensionName", "TEXT NOT NULL"],
+ ["name", "TEXT NOT NULL"],
+ ["parameter", "BLOB"],
+ ],
+
+ triggers: {
+ delete: "DELETE FROM messageAttributes WHERE attributeID = OLD.id",
+ },
+ },
+
+ messageAttributes: {
+ columns: [
+ // conversationID and messageID used to have REFERENCES back to their
+ // appropriate types. I removed it when removing attributeID for
+ // better reasons and because the code is not capable of violating
+ // this constraint, so the check is just added cost. (And we have
+ // unit tests that sanity check my assertions.)
+ ["conversationID", "INTEGER NOT NULL"],
+ ["messageID", "INTEGER NOT NULL"],
+ // This used to be REFERENCES attributeDefinitions(id) but then we
+ // introduced sentinel values and it's hard to justify the effort
+ // to compel injection of the record or the overhead to do the
+ // references checking.
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+
+ indices: {
+ attribQuery: [
+ "attributeID",
+ "value",
+ /* covering: */ "conversationID",
+ "messageID",
+ ],
+ // This is required for deletion of a message's attributes to be
+ // performant. We could optimize this index away if we changed our
+ // deletion logic to issue specific attribute deletions based on the
+ // information it already has available in the message's JSON blob.
+ // The rub there is that if we screwed up we could end up leaking
+ // attributes and there is a non-trivial performance overhead to
+ // the many requests it would cause (which can also be reduced in
+ // the future by changing our SQL dispatch code.)
+ messageAttribFastDeletion: ["messageID"],
+ },
+ },
+
+ // ----- Contacts / Identities
+
+ /**
+ * Corresponds to a human being and roughly to an address book entry.
+ * Contrast with an identity, which is a specific e-mail address, IRC
+ * nick, etc. Identities belong to contacts, and this relationship is
+ * expressed on the identityAttributes table.
+ */
+ contacts: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["directoryUUID", "TEXT"],
+ ["contactUUID", "TEXT"],
+ ["popularity", "INTEGER"],
+ ["frecency", "INTEGER"],
+ ["name", "TEXT"],
+ ["jsonAttributes", "TEXT"],
+ ],
+ indices: {
+ popularity: ["popularity"],
+ frecency: ["frecency"],
+ },
+ },
+
+ contactAttributes: {
+ columns: [
+ ["contactID", "INTEGER NOT NULL"],
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+ indices: {
+ contactAttribQuery: [
+ "attributeID",
+ "value",
+ /* covering: */ "contactID",
+ ],
+ },
+ },
+
+ /**
+ * Identities correspond to specific e-mail addresses, IRC nicks, etc.
+ */
+ identities: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["contactID", "INTEGER NOT NULL"],
+ ["kind", "TEXT NOT NULL"], // ex: email, irc, etc.
+ ["value", "TEXT NOT NULL"], // ex: e-mail address, irc nick/handle...
+ ["description", "NOT NULL"], // what makes this identity different
+ // from the others? (ex: home, work, etc.)
+ ["relay", "INTEGER NOT NULL"], // is the identity just a relay
+ // mechanism? (ex: mailing list, twitter 'bouncer', IRC gateway, etc.)
+ ],
+
+ indices: {
+ contactQuery: ["contactID"],
+ valueQuery: ["kind", "value"],
+ },
+ },
+ },
+ },
+
+ /* ******************* LOGIC ******************* */
+ /**
+ * We only have one connection; this name exists for legacy reasons but helps
+ * track when we are intentionally doing synchronous things during startup.
+ * We do nothing synchronous once our setup has completed.
+ */
+ syncConnection: null,
+ /**
+ * We only have one connection and we only do asynchronous things after setup;
+ * this name still exists mainly for legacy reasons.
+ */
+ asyncConnection: null,
+
+ /**
+ * Our "mailnews.database.global.datastore." preferences branch for debug
+ * notification handling. We register as an observer against this.
+ */
+ _prefBranch: null,
+
+ /**
+ * The unique ID assigned to an index when it has been built. This value
+ * changes once the index has been rebuilt.
+ */
+ _datastoreID: null,
+
+ /**
+ * Initialize logging, create the database if it doesn't exist, "upgrade" it
+ * if it does and it's not up-to-date, fill our authoritative folder uri/id
+ * mapping.
+ */
+ _init(aNounIDToDef) {
+ this._log = console.createInstance({
+ prefix: "gloda.datastore",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ this._log.debug("Beginning datastore initialization.");
+
+ this._nounIDToDef = aNounIDToDef;
+
+ let branch = Services.prefs.getBranch(
+ "mailnews.database.global.datastore."
+ );
+ this._prefBranch = branch;
+
+ // Not sure the weak reference really makes a difference given that we are a
+ // GC root.
+ branch.addObserver("", this);
+ // claim the pref changed so we can centralize our logic there.
+ this.observe(null, "nsPref:changed", "explainToPath");
+
+ // Get the path to our global database
+ var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ var dbConnection;
+
+ // Report about the size of the database through telemetry (if there's a
+ // database, naturally).
+ if (dbFile.exists()) {
+ try {
+ let h = Services.telemetry.getHistogramById(
+ "THUNDERBIRD_GLODA_SIZE_MB"
+ );
+ h.add(dbFile.fileSize / 1048576);
+ } catch (e) {
+ this._log.warn("Couldn't report telemetry", e);
+ }
+ }
+
+ // Create the file if it does not exist
+ if (!dbFile.exists()) {
+ this._log.debug("Creating database because it doesn't exist.");
+ dbConnection = this._createDB(dbFile);
+ } else {
+ // It does exist, but we (someday) might need to upgrade the schema
+ // (Exceptions may be thrown if the database is corrupt)
+ try {
+ dbConnection = Services.storage.openUnsharedDatabase(dbFile);
+ let cacheSize = this._determineCachePages(dbConnection);
+ // see _createDB...
+ dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cacheSize);
+ dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL");
+
+ // Register custom tokenizer to index all language text
+ var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService(
+ Ci.nsIFts3Tokenizer
+ );
+ tokenizer.registerTokenizer(dbConnection);
+
+ // -- database schema changes
+ let dbSchemaVersion = (this._actualSchemaVersion =
+ dbConnection.schemaVersion);
+ // - database from the future!
+ if (dbSchemaVersion > this._schemaVersion) {
+ if (
+ dbSchemaVersion >= DB_SCHEMA_ACCEPT_LEAVE_LOW &&
+ dbSchemaVersion <= DB_SCHEMA_ACCEPT_LEAVE_HIGH
+ ) {
+ this._log.debug(
+ "db from the future in acceptable range; leaving " +
+ "version at: " +
+ dbSchemaVersion
+ );
+ } else if (
+ dbSchemaVersion >= DB_SCHEMA_ACCEPT_DOWNGRADE_LOW &&
+ dbSchemaVersion <= DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH
+ ) {
+ let newVersion = dbSchemaVersion - DB_SCHEMA_DOWNGRADE_DELTA;
+ this._log.debug(
+ "db from the future in downgrade range; setting " +
+ "version to " +
+ newVersion +
+ " down from " +
+ dbSchemaVersion
+ );
+ dbConnection.schemaVersion = this._actualSchemaVersion = newVersion;
+ } else {
+ // too far from the future, nuke it.
+ dbConnection = this._nukeMigration(dbFile, dbConnection);
+ }
+ } else if (dbSchemaVersion < this._schemaVersion) {
+ // - database from the past! migrate it, possibly.
+ this._log.debug(
+ "Need to migrate database. (DB version: " +
+ this._actualSchemaVersion +
+ " desired version: " +
+ this._schemaVersion
+ );
+ dbConnection = this._migrate(
+ dbFile,
+ dbConnection,
+ this._actualSchemaVersion,
+ this._schemaVersion
+ );
+ this._log.debug("Migration call completed.");
+ }
+ // else: this database is juuust right.
+
+ // If we never had a datastore ID, make sure to create one now.
+ if (!this._prefBranch.prefHasUserValue("id")) {
+ this._datastoreID = this._generateDatastoreID();
+ this._prefBranch.setCharPref("id", this._datastoreID);
+ } else {
+ this._datastoreID = this._prefBranch.getCharPref("id");
+ }
+ } catch (ex) {
+ // Handle corrupt databases, other oddities
+ if (ex.result == Cr.NS_ERROR_FILE_CORRUPTED) {
+ this._log.warn("Database was corrupt, removing the old one.");
+ dbFile.remove(false);
+ this._log.warn("Removed old database, creating a new one.");
+ dbConnection = this._createDB(dbFile);
+ } else {
+ this._log.error(
+ "Unexpected error when trying to open the database:",
+ ex
+ );
+ throw ex;
+ }
+ }
+ }
+
+ this.syncConnection = dbConnection;
+ this.asyncConnection = dbConnection;
+
+ this._log.debug("Initializing folder mappings.");
+ this._getAllFolderMappings();
+ // we need to figure out the next id's for all of the tables where we
+ // manage that.
+ this._log.debug("Populating managed id counters.");
+ this._populateAttributeDefManagedId();
+ this._populateConversationManagedId();
+ this._populateMessageManagedId();
+ this._populateContactManagedId();
+ this._populateIdentityManagedId();
+
+ this._log.debug("Completed datastore initialization.");
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic != "nsPref:changed") {
+ return;
+ }
+
+ if (aData == "explainToPath") {
+ let explainToPath = null;
+ try {
+ explainToPath = this._prefBranch.getCharPref("explainToPath");
+ if (explainToPath.trim() == "") {
+ explainToPath = null;
+ }
+ } catch (ex) {
+ // don't care if the pref is not there.
+ }
+
+ // It is conceivable that the name is changing and this isn't a boolean
+ // toggle, so always clean out the explain processor.
+ if (this._explainProcessor) {
+ this._explainProcessor.shutdown();
+ this._explainProcessor = null;
+ }
+
+ if (explainToPath) {
+ this._createAsyncStatement = this._createExplainedAsyncStatement;
+ this._explainProcessor = new ExplainedStatementProcessor(explainToPath);
+ } else {
+ this._createAsyncStatement = this._realCreateAsyncStatement;
+ }
+ }
+ },
+
+ datastoreIsShutdown: false,
+
+ /**
+ * Perform datastore shutdown.
+ */
+ shutdown() {
+ // Clear out any pending transaction by committing it.
+ // The indexer has been shutdown by this point; it no longer has any active
+ // indexing logic and it no longer has active event listeners capable of
+ // generating new activity.
+ // Semantic consistency of the database is guaranteed by the indexer's
+ // strategy of only yielding control at coherent times. Although it takes
+ // multiple calls and multiple SQL operations to update the state of our
+ // database representations, the generator does not yield until it has
+ // issued all the database statements required for said update. As such,
+ // this commit will leave us in a good way (and the commit will happen
+ // because closing the connection will drain the async execution queue.)
+ while (this._transactionDepth) {
+ this._log.info("Closing pending transaction out for shutdown.");
+ // just schedule this function to be run again once the transaction has
+ // been closed out.
+ this._commitTransaction();
+ }
+
+ this.datastoreIsShutdown = true;
+
+ this._log.info("Closing db connection");
+
+ // we do not expect exceptions, but it's a good idea to avoid having our
+ // shutdown process explode.
+ try {
+ this._cleanupAsyncStatements();
+ this._cleanupSyncStatements();
+ } catch (ex) {
+ this._log.debug("Unexpected exception during statement cleanup: " + ex);
+ }
+
+ // it's conceivable we might get a spurious exception here, but we really
+ // shouldn't get one. again, we want to ensure shutdown runs to completion
+ // and doesn't break our caller.
+ try {
+ // This currently causes all pending asynchronous operations to be run to
+ // completion. this simplifies things from a correctness perspective,
+ // and, honestly, is a lot easier than us tracking all of the async
+ // event tasks so that we can explicitly cancel them.
+ // This is a reasonable thing to do because we don't actually ever have
+ // a huge number of statements outstanding. The indexing process needs
+ // to issue async requests periodically, so the most we have in-flight
+ // from a write perspective is strictly less than the work required to
+ // update the database state for a single message.
+ // However, the potential for multiple pending expensive queries does
+ // exist, and it may be advisable to attempt to track and cancel those.
+ // For simplicity we don't currently do this, and I expect this should
+ // not pose a major problem, but those are famous last words.
+ // Note: asyncClose does not spin a nested event loop, but the thread
+ // manager shutdown code will spin the async thread's event loop, so it
+ // nets out to be the same.
+ this.asyncConnection.asyncClose();
+ } catch (ex) {
+ this._log.debug(
+ "Potentially expected exception during connection closure: " + ex
+ );
+ }
+
+ this.asyncConnection = null;
+ this.syncConnection = null;
+ },
+
+ /**
+ * Generates and returns a UUID.
+ *
+ * @returns a UUID as a string, ex: "c4dd0159-9287-480f-a648-a4613e147fdb"
+ */
+ _generateDatastoreID() {
+ let uuid = Services.uuid.generateUUID().toString();
+ // We snip off the { and } from each end of the UUID.
+ return uuid.substring(1, uuid.length - 2);
+ },
+
+ _determineCachePages(aDBConn) {
+ try {
+ // For the details of the computations, one should read
+ // nsNavHistory::InitDB. We're slightly diverging from them in the sense
+ // that we won't allow gloda to use insane amounts of memory cache, and
+ // we start with 1% instead of 6% like them.
+ let pageStmt = aDBConn.createStatement("PRAGMA page_size");
+ pageStmt.executeStep();
+ let pageSize = pageStmt.row.page_size;
+ pageStmt.finalize();
+ let cachePermillage = this._prefBranch.getIntPref(
+ "cache_to_memory_permillage"
+ );
+ cachePermillage = Math.min(cachePermillage, 50);
+ cachePermillage = Math.max(cachePermillage, 0);
+ let physMem = Services.sysinfo.getPropertyAsInt64("memsize");
+ if (physMem == 0) {
+ physMem = MEMSIZE_FALLBACK_BYTES;
+ }
+ let cacheSize = Math.round((physMem * cachePermillage) / 1000);
+ cacheSize = Math.max(cacheSize, MIN_CACHE_SIZE);
+ cacheSize = Math.min(cacheSize, MAX_CACHE_SIZE);
+ let cachePages = Math.round(cacheSize / pageSize);
+ return cachePages;
+ } catch (ex) {
+ this._log.warn("Error determining cache size: " + ex);
+ // A little bit lower than on my personal machine, will result in ~40M.
+ return 1000;
+ }
+ },
+
+ /**
+ * Create our database; basically a wrapper around _createSchema.
+ */
+ _createDB(aDBFile) {
+ var dbConnection = Services.storage.openUnsharedDatabase(aDBFile);
+ // We now follow the Firefox strategy for places, which mainly consists in
+ // picking a default 32k page size, and then figuring out the amount of
+ // cache accordingly. The default 32k come from mozilla/toolkit/storage,
+ // but let's get it directly from sqlite in case they change it.
+ let cachePages = this._determineCachePages(dbConnection);
+ // This is a maximum number of pages to be used. If the database does not
+ // get this large, then the memory does not get used.
+ // Do not forget to update the code in _init if you change this value.
+ dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cachePages);
+ // The mozStorage default is NORMAL which shaves off some fsyncs in the
+ // interest of performance. Since everything we do after bootstrap is
+ // async, we do not care about the performance, but we really want the
+ // correctness. Bug reports and support avenues indicate a non-zero number
+ // of corrupt databases. Note that this may not fix everything; OS X
+ // also supports an F_FULLSYNC flag enabled by PRAGMA fullfsync that we are
+ // not enabling that is much more comprehensive. We can think about
+ // turning that on after we've seen how this reduces our corruption count.
+ dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL");
+ // Register custom tokenizer to index all language text
+ var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService(
+ Ci.nsIFts3Tokenizer
+ );
+ tokenizer.registerTokenizer(dbConnection);
+
+ // We're creating a new database, so let's generate a new ID for this
+ // version of the datastore. This way, indexers can know when the index
+ // has been rebuilt in the event that they need to rebuild dependent data.
+ this._datastoreID = this._generateDatastoreID();
+ this._prefBranch.setCharPref("id", this._datastoreID);
+
+ dbConnection.beginTransaction();
+ try {
+ this._createSchema(dbConnection);
+ dbConnection.commitTransaction();
+ } catch (ex) {
+ dbConnection.rollbackTransaction();
+ throw ex;
+ }
+
+ return dbConnection;
+ },
+
+ _createTableSchema(aDBConnection, aTableName, aTableDef) {
+ // - Create the table
+ this._log.info("Creating table: " + aTableName);
+ let columnDefs = [];
+ for (let [column, type] of aTableDef.columns) {
+ columnDefs.push(column + " " + type);
+ }
+ aDBConnection.createTable(aTableName, columnDefs.join(", "));
+
+ // - Create the fulltext table if applicable
+ if (aTableDef.fulltextColumns) {
+ let columnDefs = [];
+ for (let [column, type] of aTableDef.fulltextColumns) {
+ columnDefs.push(column + " " + type);
+ }
+ let createFulltextSQL =
+ "CREATE VIRTUAL TABLE " +
+ aTableName +
+ "Text" +
+ " USING fts3(tokenize mozporter, " +
+ columnDefs.join(", ") +
+ ")";
+ this._log.info("Creating fulltext table: " + createFulltextSQL);
+ aDBConnection.executeSimpleSQL(createFulltextSQL);
+ }
+
+ // - Create its indices
+ if (aTableDef.indices) {
+ for (let indexName in aTableDef.indices) {
+ let indexColumns = aTableDef.indices[indexName];
+ aDBConnection.executeSimpleSQL(
+ "CREATE INDEX " +
+ indexName +
+ " ON " +
+ aTableName +
+ "(" +
+ indexColumns.join(", ") +
+ ")"
+ );
+ }
+ }
+
+ // - Create the attributes table if applicable
+ if (aTableDef.genericAttributes) {
+ aTableDef.genericAttributes = {
+ columns: [
+ ["nounID", "INTEGER NOT NULL"],
+ ["attributeID", "INTEGER NOT NULL"],
+ ["value", "NUMERIC"],
+ ],
+ indices: {},
+ };
+ aTableDef.genericAttributes.indices[aTableName + "AttribQuery"] = [
+ "attributeID",
+ "value",
+ /* covering: */ "nounID",
+ ];
+ // let's use this very function! (since we created genericAttributes,
+ // explodey recursion is avoided.)
+ this._createTableSchema(
+ aDBConnection,
+ aTableName + "Attributes",
+ aTableDef.genericAttributes
+ );
+ }
+ },
+
+ /**
+ * Create our database schema assuming a newly created database. This
+ * comes down to creating normal tables, their full-text variants (if
+ * applicable), and their indices.
+ */
+ _createSchema(aDBConnection) {
+ // -- For each table...
+ for (let tableName in this._schema.tables) {
+ let tableDef = this._schema.tables[tableName];
+ this._createTableSchema(aDBConnection, tableName, tableDef);
+ }
+
+ aDBConnection.schemaVersion = this._actualSchemaVersion =
+ this._schemaVersion;
+ },
+
+ /**
+ * Create a table for a noun, replete with data binding.
+ */
+ createNounTable(aNounDef) {
+ // give it a _jsonText attribute if appropriate...
+ if (aNounDef.allowsArbitraryAttrs) {
+ aNounDef.schema.columns.push(["jsonAttributes", "STRING", "_jsonText"]);
+ }
+ // check if the table exists
+ if (!this.asyncConnection.tableExists(aNounDef.tableName)) {
+ // it doesn't! create it (and its potentially many variants)
+ try {
+ this._createTableSchema(
+ this.asyncConnection,
+ aNounDef.tableName,
+ aNounDef.schema
+ );
+ } catch (ex) {
+ this._log.error(
+ "Problem creating table " +
+ aNounDef.tableName +
+ " " +
+ "because: " +
+ ex +
+ " at " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber
+ );
+ return;
+ }
+ }
+
+ aNounDef._dataBinder = new GlodaDatabind(aNounDef, this);
+ aNounDef.datastore = aNounDef._dataBinder;
+ aNounDef.objFromRow = aNounDef._dataBinder.objFromRow;
+ aNounDef.objInsert = aNounDef._dataBinder.objInsert;
+ aNounDef.objUpdate = aNounDef._dataBinder.objUpdate;
+ aNounDef.dbAttribAdjuster = aNounDef._dataBinder.adjustAttributes;
+
+ if (aNounDef.schema.genericAttributes) {
+ aNounDef.attrTableName = aNounDef.tableName + "Attributes";
+ aNounDef.attrIDColumnName = "nounID";
+ }
+ },
+
+ _nukeMigration(aDBFile, aDBConnection) {
+ aDBConnection.close();
+ aDBFile.remove(false);
+ this._log.warn(
+ "Global database has been purged due to schema change. " +
+ "old version was " +
+ this._actualSchemaVersion +
+ ", new version is: " +
+ this._schemaVersion
+ );
+ return this._createDB(aDBFile);
+ },
+
+ /**
+ * Migrate the database _to the latest version_ from an older version. We
+ * only keep enough logic around to get us to the recent version. This code
+ * is not a time machine! If we need to blow away the database to get to the
+ * most recent version, then that's the sum total of the migration!
+ */
+ _migrate(aDBFile, aDBConnection, aCurVersion, aNewVersion) {
+ // version 12:
+ // - notability column added
+ // version 13:
+ // - we are adding a new fulltext index column. blow away!
+ // - note that I screwed up and failed to mark the schema change; apparently
+ // no database will claim to be version 13...
+ // version 14ish, still labeled 13?:
+ // - new attributes: forwarded, repliedTo, bcc, recipients
+ // - altered fromMeTo and fromMeCc to fromMe
+ // - altered toMe and ccMe to just be toMe
+ // - exposes bcc to cc-related attributes
+ // - MIME type DB schema overhaul
+ // version 15ish, still labeled 13:
+ // - change tokenizer to mozporter to support CJK
+ // (We are slip-streaming this so that only people who want to test CJK
+ // have to test it. We will properly bump the schema revision when the
+ // gloda correctness patch lands.)
+ // version 16ish, labeled 14 and now 16
+ // - gloda message id's start from 32 now
+ // - all kinds of correctness changes (blow away)
+ // version 17
+ // - more correctness fixes. (blow away)
+ // version 18
+ // - significant empty set support (blow away)
+ // version 19
+ // - there was a typo that was resulting in deleted getting set to the
+ // numeric value of the javascript undefined value. (migrate-able)
+ // version 20
+ // - tokenizer changes to provide for case/accent-folding. (blow away)
+ // version 21
+ // - add the messagesAttribFastDeletion index we thought was already covered
+ // by an index we removed a while ago (migrate-able)
+ // version 26
+ // - bump page size and also cache size (blow away)
+ // version 30
+ // - recover from bug 732372 that affected TB 11 beta / TB 12 alpha / TB 13
+ // trunk. The fix is bug 734507. The revision bump happens
+ // asynchronously. (migrate-able)
+
+ // nuke if prior to 26
+ if (aCurVersion < 26) {
+ return this._nukeMigration(aDBFile, aDBConnection);
+ }
+
+ // They must be desiring our "a.contact is undefined" fix!
+ // This fix runs asynchronously as the first indexing job the indexer ever
+ // performs. It is scheduled by the enabling of the message indexer and
+ // it is the one that updates the schema version when done.
+
+ // return the same DB connection since we didn't create a new one or do
+ // anything.
+ return aDBConnection;
+ },
+
+ /**
+ * Asynchronously update the schema version; only for use by in-tree callers
+ * who asynchronously perform migration work triggered by their initial
+ * indexing sweep and who have properly updated the schema version in all
+ * the appropriate locations in this file.
+ *
+ * This is done without doing anything about the current transaction state,
+ * which is desired.
+ */
+ _updateSchemaVersion(newSchemaVersion) {
+ this._actualSchemaVersion = newSchemaVersion;
+ let stmt = this._createAsyncStatement(
+ // we need to concat; pragmas don't like "?1" binds
+ "PRAGMA user_version = " + newSchemaVersion,
+ true
+ );
+ stmt.executeAsync(this.trackAsync());
+ stmt.finalize();
+ },
+
+ _outstandingAsyncStatements: [],
+
+ /**
+ * Unless debugging, this is just _realCreateAsyncStatement, but in some
+ * debugging modes this is instead the helpful wrapper
+ * _createExplainedAsyncStatement.
+ */
+ _createAsyncStatement: null,
+
+ _realCreateAsyncStatement(aSQLString, aWillFinalize) {
+ let statement = null;
+ try {
+ statement = this.asyncConnection.createAsyncStatement(aSQLString);
+ } catch (ex) {
+ throw new Error(
+ "error creating async statement " +
+ aSQLString +
+ " - " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ if (!aWillFinalize) {
+ this._outstandingAsyncStatements.push(statement);
+ }
+
+ return statement;
+ },
+
+ /**
+ * The ExplainedStatementProcessor instance used by
+ * _createExplainedAsyncStatement. This will be null if
+ * _createExplainedAsyncStatement is not being used as _createAsyncStatement.
+ */
+ _explainProcessor: null,
+
+ /**
+ * Wrapped version of _createAsyncStatement that EXPLAINs the statement. When
+ * used this decorates _createAsyncStatement, in which case we are found at
+ * that name and the original is at _orig_createAsyncStatement. This is
+ * controlled by the explainToPath preference (see |_init|).
+ */
+ _createExplainedAsyncStatement(aSQLString, aWillFinalize) {
+ let realStatement = this._realCreateAsyncStatement(
+ aSQLString,
+ aWillFinalize
+ );
+ // don't wrap transaction control statements.
+ if (
+ aSQLString == "COMMIT" ||
+ aSQLString == "BEGIN TRANSACTION" ||
+ aSQLString == "ROLLBACK"
+ ) {
+ return realStatement;
+ }
+
+ let explainSQL = "EXPLAIN " + aSQLString;
+ let explainStatement = this._realCreateAsyncStatement(explainSQL);
+
+ return new ExplainedStatementWrapper(
+ realStatement,
+ explainStatement,
+ aSQLString,
+ this._explainProcessor
+ );
+ },
+
+ _cleanupAsyncStatements() {
+ this._outstandingAsyncStatements.forEach(stmt => stmt.finalize());
+ },
+
+ _outstandingSyncStatements: [],
+
+ _createSyncStatement(aSQLString, aWillFinalize) {
+ let statement = null;
+ try {
+ statement = this.syncConnection.createStatement(aSQLString);
+ } catch (ex) {
+ throw new Error(
+ "error creating sync statement " +
+ aSQLString +
+ " - " +
+ this.syncConnection.lastError +
+ ": " +
+ this.syncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ if (!aWillFinalize) {
+ this._outstandingSyncStatements.push(statement);
+ }
+
+ return statement;
+ },
+
+ _cleanupSyncStatements() {
+ this._outstandingSyncStatements.forEach(stmt => stmt.finalize());
+ },
+
+ /**
+ * Perform a synchronous executeStep on the statement, handling any
+ * SQLITE_BUSY fallout that could conceivably happen from a collision on our
+ * read with the async writes.
+ * Basically we keep trying until we succeed or run out of tries.
+ * We believe this to be a reasonable course of action because we don't
+ * expect this to happen much.
+ */
+ _syncStep(aStatement) {
+ let tries = 0;
+ while (tries < 32000) {
+ try {
+ return aStatement.executeStep();
+ } catch (e) {
+ // SQLITE_BUSY becomes NS_ERROR_FAILURE
+ if (e.result == Cr.NS_ERROR_FAILURE) {
+ tries++;
+ // we really need to delay here, somehow. unfortunately, we can't
+ // allow event processing to happen, and most of the things we could
+ // do to delay ourselves result in event processing happening. (Use
+ // of a timer, a synchronous dispatch, etc.)
+ // in theory, nsIThreadEventFilter could allow us to stop other events
+ // that aren't our timer from happening, but it seems slightly
+ // dangerous and 'notxpcom' suggests it ain't happening anyways...
+ // so, let's just be dumb and hope that the underlying file I/O going
+ // on makes us more likely to yield to the other thread so it can
+ // finish what it is doing...
+ } else {
+ throw e;
+ }
+ }
+ }
+ this._log.error("Synchronous step gave up after " + tries + " tries.");
+ return false;
+ },
+
+ _bindVariant(aStatement, aIndex, aVariant) {
+ aStatement.bindByIndex(aIndex, aVariant);
+ },
+
+ /**
+ * Helper that uses the appropriate getter given the data type; should be
+ * mooted once we move to 1.9.2 and can use built-in variant support.
+ */
+ _getVariant(aRow, aIndex) {
+ let typeOfIndex = aRow.getTypeOfIndex(aIndex);
+ if (typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ // XPConnect would just end up going through an intermediary double stage
+ // for the int64 case anyways...
+ return null;
+ }
+ if (
+ typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_INTEGER ||
+ typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_DOUBLE
+ ) {
+ return aRow.getDouble(aIndex);
+ }
+ // typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_TEXT
+ return aRow.getString(aIndex);
+ },
+
+ /** Simple nested transaction support as a performance optimization. */
+ _transactionDepth: 0,
+ _transactionGood: false,
+
+ /**
+ * Self-memoizing BEGIN TRANSACTION statement.
+ */
+ get _beginTransactionStatement() {
+ let statement = this._createAsyncStatement("BEGIN TRANSACTION");
+ this.__defineGetter__("_beginTransactionStatement", () => statement);
+ return this._beginTransactionStatement;
+ },
+
+ /**
+ * Self-memoizing COMMIT statement.
+ */
+ get _commitTransactionStatement() {
+ let statement = this._createAsyncStatement("COMMIT");
+ this.__defineGetter__("_commitTransactionStatement", () => statement);
+ return this._commitTransactionStatement;
+ },
+
+ /**
+ * Self-memoizing ROLLBACK statement.
+ */
+ get _rollbackTransactionStatement() {
+ let statement = this._createAsyncStatement("ROLLBACK");
+ this.__defineGetter__("_rollbackTransactionStatement", () => statement);
+ return this._rollbackTransactionStatement;
+ },
+
+ _pendingPostCommitCallbacks: null,
+ /**
+ * Register a callback to be invoked when the current transaction's commit
+ * completes.
+ */
+ runPostCommit(aCallback) {
+ this._pendingPostCommitCallbacks.push(aCallback);
+ },
+
+ /**
+ * Begin a potentially nested transaction; only the outermost transaction gets
+ * to be an actual transaction, and the failure of any nested transaction
+ * results in a rollback of the entire outer transaction. If you really
+ * need an atomic transaction
+ */
+ _beginTransaction() {
+ if (this._transactionDepth == 0) {
+ this._pendingPostCommitCallbacks = [];
+ this._beginTransactionStatement.executeAsync(this.trackAsync());
+ this._transactionGood = true;
+ }
+ this._transactionDepth++;
+ },
+ /**
+ * Commit a potentially nested transaction; if we are the outer-most
+ * transaction and no sub-transaction issues a rollback
+ * (via _rollbackTransaction) then we commit, otherwise we rollback.
+ */
+ _commitTransaction() {
+ this._transactionDepth--;
+ if (this._transactionDepth == 0) {
+ try {
+ if (this._transactionGood) {
+ this._commitTransactionStatement.executeAsync(
+ new PostCommitHandler(this._pendingPostCommitCallbacks)
+ );
+ } else {
+ this._rollbackTransactionStatement.executeAsync(this.trackAsync());
+ }
+ } catch (ex) {
+ this._log.error("Commit problem:", ex);
+ }
+ this._pendingPostCommitCallbacks = [];
+ }
+ },
+ /**
+ * Abort the commit of the potentially nested transaction. If we are not the
+ * outermost transaction, we set a flag that tells the outermost transaction
+ * that it must roll back.
+ */
+ _rollbackTransaction() {
+ this._transactionDepth--;
+ this._transactionGood = false;
+ if (this._transactionDepth == 0) {
+ try {
+ this._rollbackTransactionStatement.executeAsync(this.trackAsync());
+ } catch (ex) {
+ this._log.error("Rollback problem:", ex);
+ }
+ }
+ },
+
+ _pendingAsyncStatements: 0,
+ /**
+ * The function to call, if any, when we hit 0 pending async statements.
+ */
+ _pendingAsyncCompletedListener: null,
+ _asyncCompleted() {
+ if (--this._pendingAsyncStatements == 0) {
+ if (this._pendingAsyncCompletedListener !== null) {
+ this._pendingAsyncCompletedListener();
+ this._pendingAsyncCompletedListener = null;
+ }
+ }
+ },
+ _asyncTrackerListener: {
+ handleResult() {},
+ handleError(aError) {
+ GlodaDatastore._log.error(
+ "got error in _asyncTrackerListener.handleError(): " +
+ aError.result +
+ ": " +
+ aError.message
+ );
+ },
+ handleCompletion() {
+ try {
+ // the helper method exists because the other classes need to call it too
+ GlodaDatastore._asyncCompleted();
+ } catch (e) {
+ this._log.error("Exception in handleCompletion:", e);
+ }
+ },
+ },
+ /**
+ * Increments _pendingAsyncStatements and returns a listener that will
+ * decrement the value when the statement completes.
+ */
+ trackAsync() {
+ this._pendingAsyncStatements++;
+ return this._asyncTrackerListener;
+ },
+
+ /* ********** Attribute Definitions ********** */
+ /** Maps (attribute def) compound names to the GlodaAttributeDBDef objects. */
+ _attributeDBDefs: {},
+ /** Map attribute ID to the definition and parameter value that produce it. */
+ _attributeIDToDBDefAndParam: {},
+
+ /**
+ * This attribute id indicates that we are encoding that a non-singular
+ * attribute has an empty set. The value payload that goes with this should
+ * the attribute id of the attribute we are talking about.
+ */
+ kEmptySetAttrId: 1,
+
+ /**
+ * We maintain the attributeDefinitions next id counter mainly because we can.
+ * Since we mediate the access, there's no real risk to doing so, and it
+ * allows us to keep the writes on the async connection without having to
+ * wait for a completion notification.
+ *
+ * Start from 32 so we can have a number of sentinel values.
+ */
+ _nextAttributeId: 32,
+
+ _populateAttributeDefManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM attributeDefinitions",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ // 0 gets returned even if there are no messages...
+ let highestSeen = stmt.getInt64(0);
+ if (highestSeen != 0) {
+ this._nextAttributeId = highestSeen + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ get _insertAttributeDefStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO attributeDefinitions (id, attributeType, extensionName, \
+ name, parameter) \
+ VALUES (?1, ?2, ?3, ?4, ?5)"
+ );
+ this.__defineGetter__("_insertAttributeDefStatement", () => statement);
+ return this._insertAttributeDefStatement;
+ },
+
+ /**
+ * Create an attribute definition and return the row ID. Special/atypical
+ * in that it doesn't directly return a GlodaAttributeDBDef; we leave that up
+ * to the caller since they know much more than actually needs to go in the
+ * database.
+ *
+ * @returns The attribute id allocated to this attribute.
+ */
+ _createAttributeDef(aAttrType, aExtensionName, aAttrName, aParameter) {
+ let attributeId = this._nextAttributeId++;
+
+ let iads = this._insertAttributeDefStatement;
+ iads.bindByIndex(0, attributeId);
+ iads.bindByIndex(1, aAttrType);
+ iads.bindByIndex(2, aExtensionName);
+ iads.bindByIndex(3, aAttrName);
+ this._bindVariant(iads, 4, aParameter);
+
+ iads.executeAsync(this.trackAsync());
+
+ return attributeId;
+ },
+
+ /**
+ * Sync-ly look-up all the attribute definitions, populating our authoritative
+ * _attributeDBDefss and _attributeIDToDBDefAndParam maps. (In other words,
+ * once this method is called, those maps should always be in sync with the
+ * underlying database.)
+ */
+ getAllAttributes() {
+ let stmt = this._createSyncStatement(
+ "SELECT id, attributeType, extensionName, name, parameter \
+ FROM attributeDefinitions",
+ true
+ );
+
+ // map compound name to the attribute
+ let attribs = {};
+ // map the attribute id to [attribute, parameter] where parameter is null
+ // in cases where parameter is unused.
+ let idToAttribAndParam = {};
+
+ this._log.info("loading all attribute defs");
+
+ while (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ let rowId = stmt.getInt64(0);
+ let rowAttributeType = stmt.getInt64(1);
+ let rowExtensionName = stmt.getString(2);
+ let rowName = stmt.getString(3);
+ let rowParameter = this._getVariant(stmt, 4);
+
+ let compoundName = rowExtensionName + ":" + rowName;
+
+ let attrib;
+ if (compoundName in attribs) {
+ attrib = attribs[compoundName];
+ } else {
+ attrib = new GlodaAttributeDBDef(
+ this,
+ /* aID */ null,
+ compoundName,
+ rowAttributeType,
+ rowExtensionName,
+ rowName
+ );
+ attribs[compoundName] = attrib;
+ }
+ // if the parameter is null, the id goes on the attribute def, otherwise
+ // it is a parameter binding and goes in the binding map.
+ if (rowParameter == null) {
+ this._log.debug(compoundName + " primary: " + rowId);
+ attrib._id = rowId;
+ idToAttribAndParam[rowId] = [attrib, null];
+ } else {
+ this._log.debug(
+ compoundName + " binding: " + rowParameter + " = " + rowId
+ );
+ attrib._parameterBindings[rowParameter] = rowId;
+ idToAttribAndParam[rowId] = [attrib, rowParameter];
+ }
+ }
+ stmt.finalize();
+
+ this._log.info("done loading all attribute defs");
+
+ this._attributeDBDefs = attribs;
+ this._attributeIDToDBDefAndParam = idToAttribAndParam;
+ },
+
+ /**
+ * Helper method for GlodaAttributeDBDef to tell us when their bindParameter
+ * method is called and they have created a new binding (using
+ * GlodaDatastore._createAttributeDef). In theory, that method could take
+ * an additional argument and obviate the need for this method.
+ */
+ reportBinding(aID, aAttrDef, aParamValue) {
+ this._attributeIDToDBDefAndParam[aID] = [aAttrDef, aParamValue];
+ },
+
+ /* ********** Folders ********** */
+ /** next folder (row) id to issue, populated by _getAllFolderMappings. */
+ _nextFolderId: 1,
+
+ get _insertFolderLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO folderLocations (id, folderURI, dirtyStatus, name, \
+ indexingPriority) VALUES \
+ (?1, ?2, ?3, ?4, ?5)"
+ );
+ this.__defineGetter__("_insertFolderLocationStatement", () => statement);
+ return this._insertFolderLocationStatement;
+ },
+
+ /**
+ * Authoritative map from folder URI to folder ID. (Authoritative in the
+ * sense that this map exactly represents the state of the underlying
+ * database. If it does not, it's a bug in updating the database.)
+ */
+ _folderByURI: {},
+ /** Authoritative map from folder ID to folder URI */
+ _folderByID: {},
+
+ /** Initialize our _folderByURI/_folderByID mappings, called by _init(). */
+ _getAllFolderMappings() {
+ let stmt = this._createSyncStatement(
+ "SELECT id, folderURI, dirtyStatus, name, indexingPriority \
+ FROM folderLocations",
+ true
+ );
+
+ while (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ let folderID = stmt.getInt64(0);
+ let folderURI = stmt.getString(1);
+ let dirtyStatus = stmt.getInt32(2);
+ let folderName = stmt.getString(3);
+ let indexingPriority = stmt.getInt32(4);
+
+ let folder = new GlodaFolder(
+ this,
+ folderID,
+ folderURI,
+ dirtyStatus,
+ folderName,
+ indexingPriority
+ );
+
+ this._folderByURI[folderURI] = folder;
+ this._folderByID[folderID] = folder;
+
+ if (folderID >= this._nextFolderId) {
+ this._nextFolderId = folderID + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ _folderKnown(aFolder) {
+ let folderURI = aFolder.URI;
+ return folderURI in this._folderByURI;
+ },
+
+ _folderIdKnown(aFolderID) {
+ return aFolderID in this._folderByID;
+ },
+
+ /**
+ * Return the default messaging priority for a folder of this type, based
+ * on the folder's flags. If aAllowSpecialFolderIndexing is true, then
+ * folders suchs as Trash and Junk will be indexed.
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {boolean} aAllowSpecialFolderIndexing
+ * @returns {number}
+ */
+ getDefaultIndexingPriority(aFolder, aAllowSpecialFolderIndexing) {
+ let indexingPriority = GlodaFolder.prototype.kIndexingDefaultPriority;
+ // Do not walk into trash/junk folders, unless the user is explicitly
+ // telling us to do so.
+ let specialFolderFlags =
+ Ci.nsMsgFolderFlags.Trash | Ci.nsMsgFolderFlags.Junk;
+ if (aFolder.isSpecialFolder(specialFolderFlags, true)) {
+ indexingPriority = aAllowSpecialFolderIndexing
+ ? GlodaFolder.prototype.kIndexingDefaultPriority
+ : GlodaFolder.prototype.kIndexingNeverPriority;
+ } else if (
+ aFolder.flags &
+ (Ci.nsMsgFolderFlags.Queue | Ci.nsMsgFolderFlags.Newsgroup)
+ // In unit testing at least folders can be
+ // confusingly labeled ImapPublic when they
+ // should not be. Or at least I don't think they
+ // should be. So they're legit for now.
+ // | Ci.nsMsgFolderFlags.ImapPublic
+ // | Ci.nsMsgFolderFlags.ImapOtherUser
+ ) {
+ // Queue folders should always be ignored just because messages should not
+ // spend much time in there.
+ // We hate newsgroups, and public IMAP folders are similar.
+ // Other user IMAP folders should be ignored because it's not this user's
+ // mail.
+ indexingPriority = GlodaFolder.prototype.kIndexingNeverPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.Inbox) {
+ indexingPriority = GlodaFolder.prototype.kIndexingInboxPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.SentMail) {
+ indexingPriority = GlodaFolder.prototype.kIndexingSentMailPriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.Favorite) {
+ indexingPriority = GlodaFolder.prototype.kIndexingFavoritePriority;
+ } else if (aFolder.flags & Ci.nsMsgFolderFlags.CheckNew) {
+ indexingPriority = GlodaFolder.prototype.kIndexingCheckNewPriority;
+ }
+
+ return indexingPriority;
+ },
+
+ /**
+ * Map a folder URI to a GlodaFolder instance, creating the mapping if it does
+ * not yet exist.
+ *
+ * @param aFolder The nsIMsgFolder instance you would like the GlodaFolder
+ * instance for.
+ * @returns The existing or newly created GlodaFolder instance.
+ */
+ _mapFolder(aFolder) {
+ let folderURI = aFolder.URI;
+ if (folderURI in this._folderByURI) {
+ return this._folderByURI[folderURI];
+ }
+
+ let folderID = this._nextFolderId++;
+
+ // If there's an indexingPriority stored on the folder, just use that.
+ // Otherwise, fall back to the default for folders of this type.
+ let indexingPriority = NaN;
+ try {
+ let pri = aFolder.getStringProperty("indexingPriority"); // Might throw.
+ indexingPriority = parseInt(pri); // Might return NaN.
+ } catch (ex) {}
+ if (isNaN(indexingPriority)) {
+ indexingPriority = this.getDefaultIndexingPriority(aFolder);
+ }
+
+ // If there are messages in the folder, it is filthy. If there are no
+ // messages, it can be clean.
+ let dirtyStatus = aFolder.getTotalMessages(false)
+ ? GlodaFolder.prototype.kFolderFilthy
+ : GlodaFolder.prototype.kFolderClean;
+ let folder = new GlodaFolder(
+ this,
+ folderID,
+ folderURI,
+ dirtyStatus,
+ aFolder.prettyName,
+ indexingPriority
+ );
+
+ this._insertFolderLocationStatement.bindByIndex(0, folder.id);
+ this._insertFolderLocationStatement.bindByIndex(1, folder.uri);
+ this._insertFolderLocationStatement.bindByIndex(2, folder.dirtyStatus);
+ this._insertFolderLocationStatement.bindByIndex(3, folder.name);
+ this._insertFolderLocationStatement.bindByIndex(4, folder.indexingPriority);
+ this._insertFolderLocationStatement.executeAsync(this.trackAsync());
+
+ this._folderByURI[folderURI] = folder;
+ this._folderByID[folderID] = folder;
+ this._log.debug("!! mapped " + folder.id + " from " + folderURI);
+ return folder;
+ },
+
+ /**
+ * Map an integer gloda folder ID to the corresponding GlodaFolder instance.
+ *
+ * @param aFolderID The known valid gloda folder ID for which you would like
+ * a GlodaFolder instance.
+ * @returns The GlodaFolder instance with the given id. If no such instance
+ * exists, we will throw an exception.
+ */
+ _mapFolderID(aFolderID) {
+ if (aFolderID === null) {
+ return null;
+ }
+ if (aFolderID in this._folderByID) {
+ return this._folderByID[aFolderID];
+ }
+ throw new Error("Got impossible folder ID: " + aFolderID);
+ },
+
+ /**
+ * Mark the gloda folder as deleted for any outstanding references to it and
+ * remove it from our tables so we don't hand out any new references. The
+ * latter is especially important in the case a folder with the same name
+ * is created afterwards; we don't want to confuse the new one with the old
+ * one!
+ */
+ _killGlodaFolderIntoTombstone(aGlodaFolder) {
+ aGlodaFolder._deleted = true;
+ delete this._folderByURI[aGlodaFolder.uri];
+ delete this._folderByID[aGlodaFolder.id];
+ },
+
+ get _updateFolderDirtyStatusStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET dirtyStatus = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateFolderDirtyStatusStatement", () => statement);
+ return this._updateFolderDirtyStatusStatement;
+ },
+
+ updateFolderDirtyStatus(aFolder) {
+ let ufds = this._updateFolderDirtyStatusStatement;
+ ufds.bindByIndex(1, aFolder.id);
+ ufds.bindByIndex(0, aFolder.dirtyStatus);
+ ufds.executeAsync(this.trackAsync());
+ },
+
+ get _updateFolderIndexingPriorityStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET indexingPriority = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__(
+ "_updateFolderIndexingPriorityStatement",
+ () => statement
+ );
+ return this._updateFolderIndexingPriorityStatement;
+ },
+
+ updateFolderIndexingPriority(aFolder) {
+ let ufip = this._updateFolderIndexingPriorityStatement;
+ ufip.bindByIndex(1, aFolder.id);
+ ufip.bindByIndex(0, aFolder.indexingPriority);
+ ufip.executeAsync(this.trackAsync());
+ },
+
+ get _updateFolderLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE folderLocations SET folderURI = ?1 \
+ WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateFolderLocationStatement", () => statement);
+ return this._updateFolderLocationStatement;
+ },
+
+ /**
+ * Non-recursive asynchronous folder renaming based on the URI.
+ *
+ * @TODO provide a mechanism for recursive folder renames or have a higher
+ * layer deal with it and remove this note.
+ */
+ renameFolder(aOldFolder, aNewURI) {
+ if (!(aOldFolder.URI in this._folderByURI)) {
+ return;
+ }
+ let folder = this._mapFolder(aOldFolder); // ensure the folder is mapped
+ let oldURI = folder.uri;
+ this._folderByURI[aNewURI] = folder;
+ folder._uri = aNewURI;
+ this._log.info("renaming folder URI " + oldURI + " to " + aNewURI);
+ this._updateFolderLocationStatement.bindByIndex(1, folder.id);
+ this._updateFolderLocationStatement.bindByIndex(0, aNewURI);
+ this._updateFolderLocationStatement.executeAsync(this.trackAsync());
+
+ delete this._folderByURI[oldURI];
+ },
+
+ get _deleteFolderByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM folderLocations WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteFolderByIDStatement", () => statement);
+ return this._deleteFolderByIDStatement;
+ },
+
+ deleteFolderByID(aFolderID) {
+ let dfbis = this._deleteFolderByIDStatement;
+ dfbis.bindByIndex(0, aFolderID);
+ dfbis.executeAsync(this.trackAsync());
+ },
+
+ /* ********** Conversation ********** */
+ /** The next conversation id to allocate. Initialize at startup. */
+ _nextConversationId: 1,
+
+ _populateConversationManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM conversations",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextConversationId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertConversationStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO conversations (id, subject, oldestMessageDate, \
+ newestMessageDate) \
+ VALUES (?1, ?2, ?3, ?4)"
+ );
+ this.__defineGetter__("_insertConversationStatement", () => statement);
+ return this._insertConversationStatement;
+ },
+
+ get _insertConversationTextStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO conversationsText (docid, subject) \
+ VALUES (?1, ?2)"
+ );
+ this.__defineGetter__("_insertConversationTextStatement", () => statement);
+ return this._insertConversationTextStatement;
+ },
+
+ /**
+ * Asynchronously create a conversation.
+ */
+ createConversation(aSubject, aOldestMessageDate, aNewestMessageDate) {
+ // create the data row
+ let conversationID = this._nextConversationId++;
+ let ics = this._insertConversationStatement;
+ ics.bindByIndex(0, conversationID);
+ ics.bindByIndex(1, aSubject);
+ if (aOldestMessageDate == null) {
+ ics.bindByIndex(2, null);
+ } else {
+ ics.bindByIndex(2, aOldestMessageDate);
+ }
+ if (aNewestMessageDate == null) {
+ ics.bindByIndex(3, null);
+ } else {
+ ics.bindByIndex(3, aNewestMessageDate);
+ }
+ ics.executeAsync(this.trackAsync());
+
+ // create the fulltext row, using the same rowid/docid
+ let icts = this._insertConversationTextStatement;
+ icts.bindByIndex(0, conversationID);
+ icts.bindByIndex(1, aSubject);
+ icts.executeAsync(this.trackAsync());
+
+ // create it
+ let conversation = new GlodaConversation(
+ this,
+ conversationID,
+ aSubject,
+ aOldestMessageDate,
+ aNewestMessageDate
+ );
+ // it's new! let the collection manager know about it.
+ GlodaCollectionManager.itemsAdded(conversation.NOUN_ID, [conversation]);
+ // return it
+ return conversation;
+ },
+
+ get _deleteConversationByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM conversations WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteConversationByIDStatement", () => statement);
+ return this._deleteConversationByIDStatement;
+ },
+
+ /**
+ * Asynchronously delete a conversation given its ID.
+ */
+ deleteConversationByID(aConversationID) {
+ let dcbids = this._deleteConversationByIDStatement;
+ dcbids.bindByIndex(0, aConversationID);
+ dcbids.executeAsync(this.trackAsync());
+
+ GlodaCollectionManager.itemsDeleted(GlodaConversation.prototype.NOUN_ID, [
+ aConversationID,
+ ]);
+ },
+
+ _conversationFromRow(aStmt) {
+ let oldestMessageDate, newestMessageDate;
+ if (aStmt.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ oldestMessageDate = null;
+ } else {
+ oldestMessageDate = aStmt.getInt64(2);
+ }
+ if (aStmt.getTypeOfIndex(3) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ newestMessageDate = null;
+ } else {
+ newestMessageDate = aStmt.getInt64(3);
+ }
+ return new GlodaConversation(
+ this,
+ aStmt.getInt64(0),
+ aStmt.getString(1),
+ oldestMessageDate,
+ newestMessageDate
+ );
+ },
+
+ /* ********** Message ********** */
+ /**
+ * Next message id, managed because of our use of asynchronous inserts.
+ * Initialized by _populateMessageManagedId called by _init.
+ *
+ * Start from 32 to leave us all kinds of magical sentinel values at the
+ * bottom.
+ */
+ _nextMessageId: 32,
+
+ _populateMessageManagedId() {
+ let stmt = this._createSyncStatement("SELECT MAX(id) FROM messages", true);
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ // 0 gets returned even if there are no messages...
+ let highestSeen = stmt.getInt64(0);
+ if (highestSeen != 0) {
+ this._nextMessageId = highestSeen + 1;
+ }
+ }
+ stmt.finalize();
+ },
+
+ get _insertMessageStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messages (id, folderID, messageKey, conversationID, date, \
+ headerMessageID, jsonAttributes, notability) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"
+ );
+ this.__defineGetter__("_insertMessageStatement", () => statement);
+ return this._insertMessageStatement;
+ },
+
+ get _insertMessageTextStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messagesText (docid, subject, body, attachmentNames, \
+ author, recipients) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)"
+ );
+ this.__defineGetter__("_insertMessageTextStatement", () => statement);
+ return this._insertMessageTextStatement;
+ },
+
+ /**
+ * Create a GlodaMessage with the given properties. Because this is only half
+ * of the process of creating a message (the attributes still need to be
+ * completed), it's on the caller's head to call GlodaCollectionManager's
+ * itemAdded method once the message is fully created.
+ *
+ * This method uses the async connection, any downstream logic that depends on
+ * this message actually existing in the database must be done using an
+ * async query.
+ */
+ createMessage(
+ aFolder,
+ aMessageKey,
+ aConversationID,
+ aDatePRTime,
+ aHeaderMessageID
+ ) {
+ let folderID;
+ if (aFolder != null) {
+ folderID = this._mapFolder(aFolder).id;
+ } else {
+ folderID = null;
+ }
+
+ let messageID = this._nextMessageId++;
+
+ let message = new GlodaMessage(
+ this,
+ messageID,
+ folderID,
+ aMessageKey,
+ aConversationID,
+ /* conversation */ null,
+ aDatePRTime ? new Date(aDatePRTime / 1000) : null,
+ aHeaderMessageID,
+ /* deleted */ false,
+ /* jsonText */ undefined,
+ /* notability*/ 0
+ );
+
+ // We would love to notify the collection manager about the message at this
+ // point (at least if it's not a ghost), but we can't yet. We need to wait
+ // until the attributes have been indexed, which means it's out of our
+ // hands. (Gloda.processMessage does it.)
+
+ return message;
+ },
+
+ insertMessage(aMessage) {
+ this._log.debug("insertMessage " + aMessage);
+ let ims = this._insertMessageStatement;
+ ims.bindByIndex(0, aMessage.id);
+ if (aMessage.folderID == null) {
+ ims.bindByIndex(1, null);
+ } else {
+ ims.bindByIndex(1, aMessage.folderID);
+ }
+ if (aMessage.messageKey == null) {
+ ims.bindByIndex(2, null);
+ } else {
+ ims.bindByIndex(2, aMessage.messageKey);
+ }
+ ims.bindByIndex(3, aMessage.conversationID);
+ if (aMessage.date == null) {
+ ims.bindByIndex(4, null);
+ } else {
+ ims.bindByIndex(4, aMessage.date * 1000);
+ }
+ ims.bindByIndex(5, aMessage.headerMessageID);
+ if (aMessage._jsonText) {
+ ims.bindByIndex(6, aMessage._jsonText);
+ } else {
+ ims.bindByIndex(6, null);
+ }
+ ims.bindByIndex(7, aMessage.notability);
+
+ try {
+ ims.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+
+ // we create the full-text row for any message that isn't a ghost,
+ // whether we have the body or not
+ if (aMessage.folderID !== null) {
+ this._insertMessageText(aMessage);
+ }
+ },
+
+ /**
+ * Inserts a full-text row. This should only be called if you're sure you want
+ * to insert a row into the table.
+ */
+ _insertMessageText(aMessage) {
+ if (aMessage._content && aMessage._content.hasContent()) {
+ aMessage._indexedBodyText = aMessage._content.getContentString(true);
+ } else if (aMessage._bodyLines) {
+ aMessage._indexedBodyText = aMessage._bodyLines.join("\n");
+ } else {
+ aMessage._indexedBodyText = null;
+ }
+
+ let imts = this._insertMessageTextStatement;
+ imts.bindByIndex(0, aMessage.id);
+ imts.bindByIndex(1, aMessage._subject);
+ if (aMessage._indexedBodyText == null) {
+ imts.bindByIndex(2, null);
+ } else {
+ imts.bindByIndex(2, aMessage._indexedBodyText);
+ }
+ if (aMessage._attachmentNames === null) {
+ imts.bindByIndex(3, null);
+ } else {
+ imts.bindByIndex(3, aMessage._attachmentNames.join("\n"));
+ }
+
+ // if (aMessage._indexAuthor)
+ imts.bindByIndex(4, aMessage._indexAuthor);
+ // if (aMessage._indexRecipients)
+ imts.bindByIndex(5, aMessage._indexRecipients);
+
+ try {
+ imts.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing fulltext statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+ },
+
+ get _updateMessageStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = ?1, \
+ messageKey = ?2, \
+ conversationID = ?3, \
+ date = ?4, \
+ headerMessageID = ?5, \
+ jsonAttributes = ?6, \
+ notability = ?7, \
+ deleted = ?8 \
+ WHERE id = ?9"
+ );
+ this.__defineGetter__("_updateMessageStatement", () => statement);
+ return this._updateMessageStatement;
+ },
+
+ get _updateMessageTextStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messagesText SET body = ?1, \
+ attachmentNames = ?2 \
+ WHERE docid = ?3"
+ );
+
+ this.__defineGetter__("_updateMessageTextStatement", () => statement);
+ return this._updateMessageTextStatement;
+ },
+
+ /**
+ * Update the database row associated with the message. If the message is
+ * not a ghost and has _isNew defined, messagesText is affected.
+ *
+ * aMessage._isNew is currently equivalent to the fact that there is no
+ * full-text row associated with this message, and we work with this
+ * assumption here. Note that if aMessage._isNew is not defined, then
+ * we don't do anything.
+ */
+ updateMessage(aMessage) {
+ this._log.debug("updateMessage " + aMessage);
+ let ums = this._updateMessageStatement;
+ ums.bindByIndex(8, aMessage.id);
+ if (aMessage.folderID === null) {
+ ums.bindByIndex(0, null);
+ } else {
+ ums.bindByIndex(0, aMessage.folderID);
+ }
+ if (aMessage.messageKey === null) {
+ ums.bindByIndex(1, null);
+ } else {
+ ums.bindByIndex(1, aMessage.messageKey);
+ }
+ ums.bindByIndex(2, aMessage.conversationID);
+ if (aMessage.date === null) {
+ ums.bindByIndex(3, null);
+ } else {
+ ums.bindByIndex(3, aMessage.date * 1000);
+ }
+ ums.bindByIndex(4, aMessage.headerMessageID);
+ if (aMessage._jsonText) {
+ ums.bindByIndex(5, aMessage._jsonText);
+ } else {
+ ums.bindByIndex(5, null);
+ }
+ ums.bindByIndex(6, aMessage.notability);
+ ums.bindByIndex(7, aMessage._isDeleted ? 1 : 0);
+
+ ums.executeAsync(this.trackAsync());
+
+ if (aMessage.folderID !== null) {
+ if ("_isNew" in aMessage && aMessage._isNew === true) {
+ this._insertMessageText(aMessage);
+ } else {
+ this._updateMessageText(aMessage);
+ }
+ }
+ },
+
+ /**
+ * Updates the full-text row associated with this message. This only performs
+ * the UPDATE query if the indexed body text has changed, which means that if
+ * the body hasn't changed but the attachments have, we don't update.
+ */
+ _updateMessageText(aMessage) {
+ let newIndexedBodyText;
+ if (aMessage._content && aMessage._content.hasContent()) {
+ newIndexedBodyText = aMessage._content.getContentString(true);
+ } else if (aMessage._bodyLines) {
+ newIndexedBodyText = aMessage._bodyLines.join("\n");
+ } else {
+ newIndexedBodyText = null;
+ }
+
+ // If the body text matches, don't perform an update
+ if (newIndexedBodyText == aMessage._indexedBodyText) {
+ this._log.debug(
+ "in _updateMessageText, skipping update because body matches"
+ );
+ return;
+ }
+
+ aMessage._indexedBodyText = newIndexedBodyText;
+ let umts = this._updateMessageTextStatement;
+ umts.bindByIndex(2, aMessage.id);
+
+ if (aMessage._indexedBodyText == null) {
+ umts.bindByIndex(0, null);
+ } else {
+ umts.bindByIndex(0, aMessage._indexedBodyText);
+ }
+
+ if (aMessage._attachmentNames == null) {
+ umts.bindByIndex(1, null);
+ } else {
+ umts.bindByIndex(1, aMessage._attachmentNames.join("\n"));
+ }
+
+ try {
+ umts.executeAsync(this.trackAsync());
+ } catch (ex) {
+ throw new Error(
+ "error executing fulltext statement... " +
+ this.asyncConnection.lastError +
+ ": " +
+ this.asyncConnection.lastErrorString +
+ " - " +
+ ex
+ );
+ }
+ },
+
+ get _updateMessageLocationStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = ?1, messageKey = ?2 WHERE id = ?3"
+ );
+ this.__defineGetter__("_updateMessageLocationStatement", () => statement);
+ return this._updateMessageLocationStatement;
+ },
+
+ /**
+ * Given a list of gloda message ids, and a list of their new message keys in
+ * the given new folder location, asynchronously update the message's
+ * database locations. Also, update the in-memory representations.
+ */
+ updateMessageLocations(
+ aMessageIds,
+ aNewMessageKeys,
+ aDestFolder,
+ aDoNotNotify
+ ) {
+ this._log.debug(
+ "updateMessageLocations:\n" +
+ "ids: " +
+ aMessageIds +
+ "\n" +
+ "keys: " +
+ aNewMessageKeys +
+ "\n" +
+ "dest folder: " +
+ aDestFolder +
+ "\n" +
+ "do not notify?" +
+ aDoNotNotify +
+ "\n"
+ );
+ let statement = this._updateMessageLocationStatement;
+ let destFolderID =
+ typeof aDestFolder == "number"
+ ? aDestFolder
+ : this._mapFolder(aDestFolder).id;
+
+ // map gloda id to the new message key for in-memory rep transform below
+ let cacheLookupMap = {};
+
+ for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) {
+ let id = aMessageIds[iMsg],
+ msgKey = aNewMessageKeys[iMsg];
+ statement.bindByIndex(0, destFolderID);
+ statement.bindByIndex(1, msgKey);
+ statement.bindByIndex(2, id);
+ statement.executeAsync(this.trackAsync());
+
+ cacheLookupMap[id] = msgKey;
+ }
+
+ // - perform the cache lookup so we can update in-memory representations
+ // found in memory items, and converted to list form for notification
+ let inMemoryItems = {},
+ modifiedItems = [];
+ GlodaCollectionManager.cacheLookupMany(
+ GlodaMessage.prototype.NOUN_ID,
+ cacheLookupMap,
+ inMemoryItems,
+ /* do not cache */ false
+ );
+ for (let glodaId in inMemoryItems) {
+ let glodaMsg = inMemoryItems[glodaId];
+ glodaMsg._folderID = destFolderID;
+ glodaMsg._messageKey = cacheLookupMap[glodaId];
+ modifiedItems.push(glodaMsg);
+ }
+
+ // tell the collection manager about the modified messages so it can update
+ // any existing views...
+ if (!aDoNotNotify && modifiedItems.length) {
+ GlodaCollectionManager.itemsModified(
+ GlodaMessage.prototype.NOUN_ID,
+ modifiedItems
+ );
+ }
+ },
+
+ get _updateMessageKeyStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET messageKey = ?1 WHERE id = ?2"
+ );
+ this.__defineGetter__("_updateMessageKeyStatement", () => statement);
+ return this._updateMessageKeyStatement;
+ },
+
+ /**
+ * Update the message keys for the gloda messages with the given id's. This
+ * is to be used in response to msgKeyChanged notifications and is similar to
+ * `updateMessageLocations` except that we do not update the folder and we
+ * do not perform itemsModified notifications (because message keys are not
+ * intended to be relevant to the gloda message abstraction).
+ */
+ updateMessageKeys(aMessageIds, aNewMessageKeys) {
+ this._log.debug(
+ "updateMessageKeys:\n" +
+ "ids: " +
+ aMessageIds +
+ "\n" +
+ "keys:" +
+ aNewMessageKeys +
+ "\n"
+ );
+ let statement = this._updateMessageKeyStatement;
+
+ // map gloda id to the new message key for in-memory rep transform below
+ let cacheLookupMap = {};
+
+ for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) {
+ let id = aMessageIds[iMsg],
+ msgKey = aNewMessageKeys[iMsg];
+ statement.bindByIndex(0, msgKey);
+ statement.bindByIndex(1, id);
+ statement.executeAsync(this.trackAsync());
+
+ cacheLookupMap[id] = msgKey;
+ }
+
+ // - perform the cache lookup so we can update in-memory representations
+ let inMemoryItems = {};
+ GlodaCollectionManager.cacheLookupMany(
+ GlodaMessage.prototype.NOUN_ID,
+ cacheLookupMap,
+ inMemoryItems,
+ /* do not cache */ false
+ );
+ for (let glodaId in inMemoryItems) {
+ let glodaMsg = inMemoryItems[glodaId];
+ glodaMsg._messageKey = cacheLookupMap[glodaId];
+ }
+ },
+
+ /**
+ * Asynchronously mutate message folder id/message keys for the given
+ * messages, indicating that we are moving them to the target folder, but
+ * don't yet know their target message keys.
+ *
+ * Updates in-memory representations too.
+ */
+ updateMessageFoldersByKeyPurging(aGlodaIds, aDestFolder) {
+ let destFolderID = this._mapFolder(aDestFolder).id;
+
+ let sqlStr =
+ "UPDATE messages SET folderID = ?1, \
+ messageKey = ?2 \
+ WHERE id IN (" +
+ aGlodaIds.join(", ") +
+ ")";
+ let statement = this._createAsyncStatement(sqlStr, true);
+ statement.bindByIndex(0, destFolderID);
+ statement.bindByIndex(1, null);
+ statement.executeAsync(this.trackAsync());
+ statement.finalize();
+
+ let cached = GlodaCollectionManager.cacheLookupManyList(
+ GlodaMessage.prototype.NOUN_ID,
+ aGlodaIds
+ );
+ for (let id in cached) {
+ let glodaMsg = cached[id];
+ glodaMsg._folderID = destFolderID;
+ glodaMsg._messageKey = null;
+ }
+ },
+
+ _messageFromRow(aRow) {
+ this._log.debug("_messageFromRow " + aRow);
+ let folderId,
+ messageKey,
+ date,
+ jsonText,
+ subject,
+ indexedBodyText,
+ attachmentNames;
+ if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ folderId = null;
+ } else {
+ folderId = aRow.getInt64(1);
+ }
+ if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ messageKey = null;
+ } else {
+ messageKey = aRow.getInt64(2);
+ }
+ if (aRow.getTypeOfIndex(4) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ date = null;
+ } else {
+ date = new Date(aRow.getInt64(4) / 1000);
+ }
+ if (aRow.getTypeOfIndex(7) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ jsonText = undefined;
+ } else {
+ jsonText = aRow.getString(7);
+ }
+ // only queryFromQuery queries will have these columns
+ if (aRow.numEntries >= 14) {
+ if (aRow.getTypeOfIndex(10) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ subject = undefined;
+ } else {
+ subject = aRow.getString(10);
+ }
+ if (aRow.getTypeOfIndex(9) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ indexedBodyText = undefined;
+ } else {
+ indexedBodyText = aRow.getString(9);
+ }
+ if (aRow.getTypeOfIndex(11) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ attachmentNames = null;
+ } else {
+ attachmentNames = aRow.getString(11);
+ if (attachmentNames) {
+ attachmentNames = attachmentNames.split("\n");
+ } else {
+ attachmentNames = null;
+ }
+ }
+ // we ignore 12, author
+ // we ignore 13, recipients
+ }
+ return new GlodaMessage(
+ this,
+ aRow.getInt64(0),
+ folderId,
+ messageKey,
+ aRow.getInt64(3),
+ null,
+ date,
+ aRow.getString(5),
+ aRow.getInt64(6),
+ jsonText,
+ aRow.getInt64(8),
+ subject,
+ indexedBodyText,
+ attachmentNames
+ );
+ },
+
+ get _updateMessagesMarkDeletedByFolderID() {
+ // When marking deleted clear the folderID and messageKey so that the
+ // indexing process can reuse it without any location constraints.
+ let statement = this._createAsyncStatement(
+ "UPDATE messages SET folderID = NULL, messageKey = NULL, \
+ deleted = 1 WHERE folderID = ?1"
+ );
+ this.__defineGetter__(
+ "_updateMessagesMarkDeletedByFolderID",
+ () => statement
+ );
+ return this._updateMessagesMarkDeletedByFolderID;
+ },
+
+ /**
+ * Efficiently mark all the messages in a folder as deleted. Unfortunately,
+ * we obviously do not know the id's of the messages affected by this which
+ * complicates in-memory updates. The options are sending out to the SQL
+ * database for a list of the message id's or some form of in-memory
+ * traversal. I/O costs being what they are, users having a propensity to
+ * have folders with tens of thousands of messages, and the unlikeliness
+ * of all of those messages being gloda-memory-resident, we go with the
+ * in-memory traversal.
+ */
+ markMessagesDeletedByFolderID(aFolderID) {
+ let statement = this._updateMessagesMarkDeletedByFolderID;
+ statement.bindByIndex(0, aFolderID);
+ statement.executeAsync(this.trackAsync());
+
+ // Have the collection manager generate itemsRemoved events for any
+ // in-memory messages in that folder.
+ GlodaCollectionManager.itemsDeletedByAttribute(
+ GlodaMessage.prototype.NOUN_ID,
+ aMsg => aMsg._folderID == aFolderID
+ );
+ },
+
+ /**
+ * Mark all the gloda messages as deleted blind-fire. Check if any of the
+ * messages are known to the collection manager and update them to be deleted
+ * along with the requisite collection notifications.
+ */
+ markMessagesDeletedByIDs(aMessageIDs) {
+ // When marking deleted clear the folderID and messageKey so that the
+ // indexing process can reuse it without any location constraints.
+ let sqlString =
+ "UPDATE messages SET folderID = NULL, messageKey = NULL, " +
+ "deleted = 1 WHERE id IN (" +
+ aMessageIDs.join(",") +
+ ")";
+
+ let statement = this._createAsyncStatement(sqlString, true);
+ statement.executeAsync(this.trackAsync());
+ statement.finalize();
+
+ GlodaCollectionManager.itemsDeleted(
+ GlodaMessage.prototype.NOUN_ID,
+ aMessageIDs
+ );
+ },
+
+ get _countDeletedMessagesStatement() {
+ let statement = this._createAsyncStatement(
+ "SELECT COUNT(*) FROM messages WHERE deleted = 1"
+ );
+ this.__defineGetter__("_countDeletedMessagesStatement", () => statement);
+ return this._countDeletedMessagesStatement;
+ },
+
+ /**
+ * Count how many messages are currently marked as deleted in the database.
+ */
+ countDeletedMessages(aCallback) {
+ let cms = this._countDeletedMessagesStatement;
+ cms.executeAsync(new SingletonResultValueHandler(aCallback));
+ },
+
+ get _deleteMessageByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messages WHERE id = ?1"
+ );
+ this.__defineGetter__("_deleteMessageByIDStatement", () => statement);
+ return this._deleteMessageByIDStatement;
+ },
+
+ get _deleteMessageTextByIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messagesText WHERE docid = ?1"
+ );
+ this.__defineGetter__("_deleteMessageTextByIDStatement", () => statement);
+ return this._deleteMessageTextByIDStatement;
+ },
+
+ /**
+ * Delete a message and its fulltext from the database. It is assumed that
+ * the message was already marked as deleted and so is not visible to the
+ * collection manager and so nothing needs to be done about that.
+ */
+ deleteMessageByID(aMessageID) {
+ let dmbids = this._deleteMessageByIDStatement;
+ dmbids.bindByIndex(0, aMessageID);
+ dmbids.executeAsync(this.trackAsync());
+
+ this.deleteMessageTextByID(aMessageID);
+ },
+
+ deleteMessageTextByID(aMessageID) {
+ let dmt = this._deleteMessageTextByIDStatement;
+ dmt.bindByIndex(0, aMessageID);
+ dmt.executeAsync(this.trackAsync());
+ },
+
+ get _folderCompactionStatement() {
+ let statement = this._createAsyncStatement(
+ "SELECT id, messageKey, headerMessageID FROM messages \
+ WHERE folderID = ?1 AND \
+ messageKey >= ?2 AND +deleted = 0 ORDER BY messageKey LIMIT ?3"
+ );
+ this.__defineGetter__("_folderCompactionStatement", () => statement);
+ return this._folderCompactionStatement;
+ },
+
+ folderCompactionPassBlockFetch(
+ aFolderID,
+ aStartingMessageKey,
+ aLimit,
+ aCallback
+ ) {
+ let fcs = this._folderCompactionStatement;
+ fcs.bindByIndex(0, aFolderID);
+ fcs.bindByIndex(1, aStartingMessageKey);
+ fcs.bindByIndex(2, aLimit);
+ fcs.executeAsync(new CompactionBlockFetcherHandler(aCallback));
+ },
+
+ /* ********** Message Attributes ********** */
+ get _insertMessageAttributeStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO messageAttributes (conversationID, messageID, attributeID, \
+ value) \
+ VALUES (?1, ?2, ?3, ?4)"
+ );
+ this.__defineGetter__("_insertMessageAttributeStatement", () => statement);
+ return this._insertMessageAttributeStatement;
+ },
+
+ get _deleteMessageAttributeStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messageAttributes WHERE attributeID = ?1 AND value = ?2 \
+ AND conversationID = ?3 AND messageID = ?4"
+ );
+ this.__defineGetter__("_deleteMessageAttributeStatement", () => statement);
+ return this._deleteMessageAttributeStatement;
+ },
+
+ /**
+ * Insert and remove attributes relating to a GlodaMessage. This is performed
+ * inside a pseudo-transaction (we create one if we aren't in one, using
+ * our _beginTransaction wrapper, but if we are in one, no additional
+ * meaningful semantics are added).
+ * No attempt is made to verify uniqueness of inserted attributes, either
+ * against the current database or within the provided list of attributes.
+ * The caller is responsible for ensuring that unwanted duplicates are
+ * avoided.
+ *
+ * @param aMessage The GlodaMessage the attributes belong to. This is used
+ * to provide the message id and conversation id.
+ * @param aAddDBAttributes A list of attribute tuples to add, where each tuple
+ * contains an attribute ID and a value. Lest you forget, an attribute ID
+ * corresponds to a row in the attribute definition table. The attribute
+ * definition table stores the 'parameter' for the attribute, if any.
+ * (Which is to say, our frequent Attribute-Parameter-Value triple has
+ * the Attribute-Parameter part distilled to a single attribute id.)
+ * @param aRemoveDBAttributes A list of attribute tuples to remove.
+ */
+ adjustMessageAttributes(aMessage, aAddDBAttributes, aRemoveDBAttributes) {
+ let imas = this._insertMessageAttributeStatement;
+ let dmas = this._deleteMessageAttributeStatement;
+ this._beginTransaction();
+ try {
+ for (let iAttrib = 0; iAttrib < aAddDBAttributes.length; iAttrib++) {
+ let attribValueTuple = aAddDBAttributes[iAttrib];
+
+ imas.bindByIndex(0, aMessage.conversationID);
+ imas.bindByIndex(1, aMessage.id);
+ imas.bindByIndex(2, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ imas.bindByIndex(3, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ imas.bindByIndex(3, attribValueTuple[1]);
+ } else {
+ imas.bindByIndex(3, attribValueTuple[1]);
+ }
+ imas.executeAsync(this.trackAsync());
+ }
+
+ for (let iAttrib = 0; iAttrib < aRemoveDBAttributes.length; iAttrib++) {
+ let attribValueTuple = aRemoveDBAttributes[iAttrib];
+
+ dmas.bindByIndex(0, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ dmas.bindByIndex(1, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ dmas.bindByIndex(1, attribValueTuple[1]);
+ } else {
+ dmas.bindByIndex(1, attribValueTuple[1]);
+ }
+ dmas.bindByIndex(2, aMessage.conversationID);
+ dmas.bindByIndex(3, aMessage.id);
+ dmas.executeAsync(this.trackAsync());
+ }
+
+ this._commitTransaction();
+ } catch (ex) {
+ this._log.error("adjustMessageAttributes:", ex);
+ this._rollbackTransaction();
+ throw ex;
+ }
+ },
+
+ get _deleteMessageAttributesByMessageIDStatement() {
+ let statement = this._createAsyncStatement(
+ "DELETE FROM messageAttributes WHERE messageID = ?1"
+ );
+ this.__defineGetter__(
+ "_deleteMessageAttributesByMessageIDStatement",
+ () => statement
+ );
+ return this._deleteMessageAttributesByMessageIDStatement;
+ },
+
+ /**
+ * Clear all the message attributes for a given GlodaMessage. No changes
+ * are made to the in-memory representation of the message; it is up to the
+ * caller to ensure that it handles things correctly.
+ *
+ * @param aMessage The GlodaMessage whose database attributes should be
+ * purged.
+ */
+ clearMessageAttributes(aMessage) {
+ if (aMessage.id != null) {
+ this._deleteMessageAttributesByMessageIDStatement.bindByIndex(
+ 0,
+ aMessage.id
+ );
+ this._deleteMessageAttributesByMessageIDStatement.executeAsync(
+ this.trackAsync()
+ );
+ }
+ },
+
+ _stringSQLQuoter(aString) {
+ return "'" + aString.replace(/\'/g, "''") + "'";
+ },
+ _numberQuoter(aNum) {
+ return aNum;
+ },
+
+ /* ===== Generic Attribute Support ===== */
+ adjustAttributes(aItem, aAddDBAttributes, aRemoveDBAttributes) {
+ let nounDef = aItem.NOUN_DEF;
+ let dbMeta = nounDef._dbMeta;
+ if (dbMeta.insertAttrStatement === undefined) {
+ dbMeta.insertAttrStatement = this._createAsyncStatement(
+ "INSERT INTO " +
+ nounDef.attrTableName +
+ " (" +
+ nounDef.attrIDColumnName +
+ ", attributeID, value) " +
+ " VALUES (?1, ?2, ?3)"
+ );
+ // we always create this at the same time (right here), no need to check
+ dbMeta.deleteAttrStatement = this._createAsyncStatement(
+ "DELETE FROM " +
+ nounDef.attrTableName +
+ " WHERE " +
+ " attributeID = ?1 AND value = ?2 AND " +
+ nounDef.attrIDColumnName +
+ " = ?3"
+ );
+ }
+
+ let ias = dbMeta.insertAttrStatement;
+ let das = dbMeta.deleteAttrStatement;
+ this._beginTransaction();
+ try {
+ for (let iAttr = 0; iAttr < aAddDBAttributes.length; iAttr++) {
+ let attribValueTuple = aAddDBAttributes[iAttr];
+
+ ias.bindByIndex(0, aItem.id);
+ ias.bindByIndex(1, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ ias.bindByIndex(2, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ ias.bindByIndex(2, attribValueTuple[1]);
+ } else {
+ ias.bindByIndex(2, attribValueTuple[1]);
+ }
+ ias.executeAsync(this.trackAsync());
+ }
+
+ for (let iAttr = 0; iAttr < aRemoveDBAttributes.length; iAttr++) {
+ let attribValueTuple = aRemoveDBAttributes[iAttr];
+
+ das.bindByIndex(0, attribValueTuple[0]);
+ // use 0 instead of null, otherwise the db gets upset. (and we don't
+ // really care anyways.)
+ if (attribValueTuple[1] == null) {
+ das.bindByIndex(1, 0);
+ } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) {
+ das.bindByIndex(1, attribValueTuple[1]);
+ } else {
+ das.bindByIndex(1, attribValueTuple[1]);
+ }
+ das.bindByIndex(2, aItem.id);
+ das.executeAsync(this.trackAsync());
+ }
+
+ this._commitTransaction();
+ } catch (ex) {
+ this._log.error("adjustAttributes:", ex);
+ this._rollbackTransaction();
+ throw ex;
+ }
+ },
+
+ clearAttributes(aItem) {
+ let nounDef = aItem.NOUN_DEF;
+ let dbMeta = nounDef._dbMeta;
+ if (dbMeta.clearAttrStatement === undefined) {
+ dbMeta.clearAttrStatement = this._createAsyncStatement(
+ "DELETE FROM " +
+ nounDef.attrTableName +
+ " WHERE " +
+ nounDef.attrIDColumnName +
+ " = ?1"
+ );
+ }
+
+ if (aItem.id != null) {
+ dbMeta.clearAttrstatement.bindByIndex(0, aItem.id);
+ dbMeta.clearAttrStatement.executeAsync(this.trackAsync());
+ }
+ },
+
+ /**
+ * escapeStringForLIKE is only available on statements, and sometimes we want
+ * to use it before we create our statement, so we create a statement just
+ * for this reason.
+ */
+ get _escapeLikeStatement() {
+ let statement = this._createAsyncStatement("SELECT 0");
+ this.__defineGetter__("_escapeLikeStatement", () => statement);
+ return this._escapeLikeStatement;
+ },
+
+ *_convertToDBValuesAndGroupByAttributeID(aAttrDef, aValues) {
+ let objectNounDef = aAttrDef.objectNounDef;
+ if (!objectNounDef.usesParameter) {
+ let dbValues = [];
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let value = aValues[iValue];
+ // If the empty set is significant and it's an empty signifier, emit
+ // the appropriate dbvalue.
+ if (value == null && aAttrDef.emptySetIsSignificant) {
+ yield [this.kEmptySetAttrId, [aAttrDef.id]];
+ // Bail if the only value was us; we don't want to add a
+ // value-posessing wildcard into the mix.
+ if (aValues.length == 1) {
+ return;
+ }
+ continue;
+ }
+ let dbValue = objectNounDef.toParamAndValue(value)[1];
+ if (dbValue != null) {
+ dbValues.push(dbValue);
+ }
+ }
+ yield [aAttrDef.special ? undefined : aAttrDef.id, dbValues];
+ return;
+ }
+
+ let curParam, attrID, dbValues;
+ let attrDBDef = aAttrDef.dbDef;
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let value = aValues[iValue];
+ // If the empty set is significant and it's an empty signifier, emit
+ // the appropriate dbvalue.
+ if (value == null && aAttrDef.emptySetIsSignificant) {
+ yield [this.kEmptySetAttrId, [aAttrDef.id]];
+ // Bail if the only value was us; we don't want to add a
+ // value-posessing wildcard into the mix.
+ if (aValues.length == 1) {
+ return;
+ }
+ continue;
+ }
+ let [dbParam, dbValue] = objectNounDef.toParamAndValue(value);
+ if (curParam === undefined) {
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ if (dbValue != null) {
+ dbValues = [dbValue];
+ } else {
+ dbValues = [];
+ }
+ } else if (curParam == dbParam) {
+ if (dbValue != null) {
+ dbValues.push(dbValue);
+ }
+ } else {
+ yield [attrID, dbValues];
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ if (dbValue != null) {
+ dbValues = [dbValue];
+ } else {
+ dbValues = [];
+ }
+ }
+ }
+ if (dbValues !== undefined) {
+ yield [attrID, dbValues];
+ }
+ },
+
+ *_convertRangesToDBStringsAndGroupByAttributeID(
+ aAttrDef,
+ aValues,
+ aValueColumnName
+ ) {
+ let objectNounDef = aAttrDef.objectNounDef;
+ if (!objectNounDef.usesParameter) {
+ let dbStrings = [];
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let [lowerVal, upperVal] = aValues[iValue];
+ // they both can't be null. that is the law.
+ if (lowerVal == null) {
+ dbStrings.push(
+ aValueColumnName +
+ " <= " +
+ objectNounDef.toParamAndValue(upperVal)[1]
+ );
+ } else if (upperVal == null) {
+ dbStrings.push(
+ aValueColumnName +
+ " >= " +
+ objectNounDef.toParamAndValue(lowerVal)[1]
+ );
+ } else {
+ // No one is null!
+ dbStrings.push(
+ aValueColumnName +
+ " BETWEEN " +
+ objectNounDef.toParamAndValue(lowerVal)[1] +
+ " AND " +
+ objectNounDef.toParamAndValue(upperVal)[1]
+ );
+ }
+ }
+ yield [aAttrDef.special ? undefined : aAttrDef.id, dbStrings];
+ return;
+ }
+
+ let curParam, attrID, dbStrings;
+ let attrDBDef = aAttrDef.dbDef;
+ for (let iValue = 0; iValue < aValues.length; iValue++) {
+ let [lowerVal, upperVal] = aValues[iValue];
+
+ let dbString, dbParam, lowerDBVal, upperDBVal;
+ // they both can't be null. that is the law.
+ if (lowerVal == null) {
+ [dbParam, upperDBVal] = objectNounDef.toParamAndValue(upperVal);
+ dbString = aValueColumnName + " <= " + upperDBVal;
+ } else if (upperVal == null) {
+ [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal);
+ dbString = aValueColumnName + " >= " + lowerDBVal;
+ } else {
+ // no one is null!
+ [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal);
+ dbString =
+ aValueColumnName +
+ " BETWEEN " +
+ lowerDBVal +
+ " AND " +
+ objectNounDef.toParamAndValue(upperVal)[1];
+ }
+
+ if (curParam === undefined) {
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ dbStrings = [dbString];
+ } else if (curParam === dbParam) {
+ dbStrings.push(dbString);
+ } else {
+ yield [attrID, dbStrings];
+ curParam = dbParam;
+ attrID = attrDBDef.bindParameter(curParam);
+ dbStrings = [dbString];
+ }
+ }
+ if (dbStrings !== undefined) {
+ yield [attrID, dbStrings];
+ }
+ },
+
+ /* eslint-disable complexity */
+ /**
+ * Perform a database query given a GlodaQueryClass instance that specifies
+ * a set of constraints relating to the noun type associated with the query.
+ * A GlodaCollection is returned containing the results of the look-up.
+ * By default the collection is "live", and will mutate (generating events to
+ * its listener) as the state of the database changes.
+ * This functionality is made user/extension visible by the Query's
+ * getCollection (asynchronous).
+ *
+ * @param [aArgs] See |GlodaQuery.getCollection| for info.
+ */
+ queryFromQuery(
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection,
+ aArgs
+ ) {
+ // when changing this method, be sure that GlodaQuery's testMatch function
+ // likewise has its changes made.
+ let nounDef = aQuery._nounDef;
+
+ let whereClauses = [];
+ let unionQueries = [aQuery].concat(aQuery._unions);
+ let boundArgs = [];
+
+ // Use the dbQueryValidityConstraintSuffix to provide constraints that
+ // filter items down to those that are valid for the query mechanism to
+ // return. For example, in the case of messages, deleted or ghost
+ // messages should not be returned by this query layer. We require
+ // hand-rolled SQL to do that for now.
+ let validityConstraintSuffix;
+ if (
+ nounDef.dbQueryValidityConstraintSuffix &&
+ !aQuery.options.noDbQueryValidityConstraints
+ ) {
+ validityConstraintSuffix = nounDef.dbQueryValidityConstraintSuffix;
+ } else {
+ validityConstraintSuffix = "";
+ }
+
+ for (let iUnion = 0; iUnion < unionQueries.length; iUnion++) {
+ let curQuery = unionQueries[iUnion];
+ let selects = [];
+
+ let lastConstraintWasSpecial = false;
+ let curConstraintIsSpecial;
+
+ for (
+ let iConstraint = 0;
+ iConstraint < curQuery._constraints.length;
+ iConstraint++
+ ) {
+ let constraint = curQuery._constraints[iConstraint];
+ let [constraintType, attrDef] = constraint;
+ let constraintValues = constraint.slice(2);
+
+ let tableName, idColumnName, valueColumnName;
+ if (constraintType == GlodaConstants.kConstraintIdIn) {
+ // we don't need any of the next cases' setup code, and we especially
+ // would prefer that attrDef isn't accessed since it's null for us.
+ } else if (attrDef.special) {
+ tableName = nounDef.tableName;
+ idColumnName = "id"; // canonical id for a table is "id".
+ valueColumnName = attrDef.specialColumnName;
+ curConstraintIsSpecial = true;
+ } else {
+ tableName = nounDef.attrTableName;
+ idColumnName = nounDef.attrIDColumnName;
+ valueColumnName = "value";
+ curConstraintIsSpecial = false;
+ }
+
+ let select = null,
+ test = null;
+ if (constraintType === GlodaConstants.kConstraintIdIn) {
+ // this is somewhat of a trick. this does mean that this can be the
+ // only constraint. Namely, our idiom is:
+ // SELECT * FROM blah WHERE id IN (a INTERSECT b INTERSECT c)
+ // but if we only have 'a', then that becomes "...IN (a)", and if
+ // 'a' is not a select but a list of id's... tricky, no?
+ select = constraintValues.join(",");
+ } else if (constraintType === GlodaConstants.kConstraintIn) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintIn
+ let clauses = [];
+ for (let [
+ attrID,
+ values,
+ ] of this._convertToDBValuesAndGroupByAttributeID(
+ attrDef,
+ constraintValues
+ )) {
+ let clausePart;
+ if (attrID !== undefined) {
+ clausePart =
+ "(attributeID = " + attrID + (values.length ? " AND " : "");
+ } else {
+ clausePart = "(";
+ }
+ if (values.length) {
+ // strings need to be escaped, we would use ? binding, except
+ // that gets mad if we have too many strings... so we use our
+ // own escaping logic. correctly escaping is easy, but it still
+ // feels wrong to do it. (just double the quote character...)
+ if (
+ "special" in attrDef &&
+ attrDef.special == GlodaConstants.kSpecialString
+ ) {
+ clausePart +=
+ valueColumnName +
+ " IN (" +
+ values
+ .map(v => "'" + v.replace(/\'/g, "''") + "'")
+ .join(",") +
+ "))";
+ } else {
+ clausePart +=
+ valueColumnName + " IN (" + values.join(",") + "))";
+ }
+ } else {
+ clausePart += ")";
+ }
+ clauses.push(clausePart);
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintRanges) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintRanges
+ let clauses = [];
+ for (let [
+ attrID,
+ dbStrings,
+ ] of this._convertRangesToDBStringsAndGroupByAttributeID(
+ attrDef,
+ constraintValues,
+ valueColumnName
+ )) {
+ if (attrID !== undefined) {
+ clauses.push(
+ "(attributeID = " +
+ attrID +
+ " AND (" +
+ dbStrings.join(" OR ") +
+ "))"
+ );
+ } else {
+ clauses.push("(" + dbStrings.join(" OR ") + ")");
+ }
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintEquals) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintEquals
+ let clauses = [];
+ for (let [
+ attrID,
+ values,
+ ] of this._convertToDBValuesAndGroupByAttributeID(
+ attrDef,
+ constraintValues
+ )) {
+ if (attrID !== undefined) {
+ clauses.push(
+ "(attributeID = " +
+ attrID +
+ " AND (" +
+ values.map(_ => valueColumnName + " = ?").join(" OR ") +
+ "))"
+ );
+ } else {
+ clauses.push(
+ "(" +
+ values.map(_ => valueColumnName + " = ?").join(" OR ") +
+ ")"
+ );
+ }
+ boundArgs.push.apply(boundArgs, values);
+ }
+ test = clauses.join(" OR ");
+ } else if (constraintType === GlodaConstants.kConstraintStringLike) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintStringLike
+ let likePayload = "";
+ for (let valuePart of constraintValues) {
+ if (typeof valuePart == "string") {
+ likePayload += this._escapeLikeStatement.escapeStringForLIKE(
+ valuePart,
+ "/"
+ );
+ } else {
+ likePayload += "%";
+ }
+ }
+ test = valueColumnName + " LIKE ? ESCAPE '/'";
+ boundArgs.push(likePayload);
+ } else if (constraintType === GlodaConstants.kConstraintFulltext) {
+ // @testpoint gloda.datastore.sqlgen.kConstraintFulltext
+ let matchStr = constraintValues[0];
+ select =
+ "SELECT docid FROM " +
+ nounDef.tableName +
+ "Text" +
+ " WHERE " +
+ attrDef.specialColumnName +
+ " MATCH ?";
+ boundArgs.push(matchStr);
+ }
+
+ if (curConstraintIsSpecial && lastConstraintWasSpecial && test) {
+ selects[selects.length - 1] += " AND " + test;
+ } else if (select) {
+ selects.push(select);
+ } else if (test) {
+ select =
+ "SELECT " + idColumnName + " FROM " + tableName + " WHERE " + test;
+ selects.push(select);
+ } else {
+ this._log.warn(
+ "Unable to translate constraint of type " +
+ constraintType +
+ " on attribute bound as " +
+ nounDef.name
+ );
+ }
+
+ lastConstraintWasSpecial = curConstraintIsSpecial;
+ }
+
+ if (selects.length) {
+ whereClauses.push(
+ "id IN (" +
+ selects.join(" INTERSECT ") +
+ ")" +
+ validityConstraintSuffix
+ );
+ }
+ }
+
+ let sqlString = "SELECT * FROM " + nounDef.tableName;
+ if (!aQuery.options.noMagic) {
+ if (
+ aQuery.options.noDbQueryValidityConstraints &&
+ nounDef.dbQueryJoinMagicWithNoValidityConstraints
+ ) {
+ sqlString += nounDef.dbQueryJoinMagicWithNoValidityConstraints;
+ } else if (nounDef.dbQueryJoinMagic) {
+ sqlString += nounDef.dbQueryJoinMagic;
+ }
+ }
+
+ if (whereClauses.length) {
+ sqlString += " WHERE (" + whereClauses.join(") OR (") + ")";
+ }
+
+ if (aQuery.options.explicitSQL) {
+ sqlString = aQuery.options.explicitSQL;
+ }
+
+ if (aQuery.options.outerWrapColumns) {
+ sqlString =
+ "SELECT *, " +
+ aQuery.options.outerWrapColumns.join(", ") +
+ " FROM (" +
+ sqlString +
+ ")";
+ }
+
+ if (aQuery._order.length) {
+ let orderClauses = [];
+ for (let colName of aQuery._order) {
+ if (colName.startsWith("-")) {
+ orderClauses.push(colName.substring(1) + " DESC");
+ } else {
+ orderClauses.push(colName + " ASC");
+ }
+ }
+ sqlString += " ORDER BY " + orderClauses.join(", ");
+ }
+
+ if (aQuery._limit) {
+ if (!("limitClauseAlreadyIncluded" in aQuery.options)) {
+ sqlString += " LIMIT ?";
+ }
+ boundArgs.push(aQuery._limit);
+ }
+
+ this._log.debug("QUERY FROM QUERY: " + sqlString + " ARGS: " + boundArgs);
+
+ // if we want to become explicit, replace the query (which has already
+ // provided our actual SQL query) with an explicit query. This will be
+ // what gets attached to the collection in the event we create a new
+ // collection. If we are reusing one, we assume that the explicitness,
+ // if desired, already happened.
+ // (we do not need to pass an argument to the explicitQueryClass constructor
+ // because it will be passed in to the collection's constructor, which will
+ // ensure that the collection attribute gets set.)
+ if (aArgs && "becomeExplicit" in aArgs && aArgs.becomeExplicit) {
+ aQuery = new nounDef.explicitQueryClass();
+ } else if (aArgs && "becomeNull" in aArgs && aArgs.becomeNull) {
+ aQuery = new nounDef.nullQueryClass();
+ }
+
+ return this._queryFromSQLString(
+ sqlString,
+ boundArgs,
+ nounDef,
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection
+ );
+ },
+ /* eslint-enable complexity */
+
+ _queryFromSQLString(
+ aSqlString,
+ aBoundArgs,
+ aNounDef,
+ aQuery,
+ aListener,
+ aListenerData,
+ aExistingCollection,
+ aMasterCollection
+ ) {
+ let statement = this._createAsyncStatement(aSqlString, true);
+ for (let [iBinding, bindingValue] of aBoundArgs.entries()) {
+ this._bindVariant(statement, iBinding, bindingValue);
+ }
+
+ let collection;
+ if (aExistingCollection) {
+ collection = aExistingCollection;
+ } else {
+ collection = new GlodaCollection(
+ aNounDef,
+ [],
+ aQuery,
+ aListener,
+ aMasterCollection
+ );
+ GlodaCollectionManager.registerCollection(collection);
+ // we don't want to overwrite the existing listener or its data, but this
+ // does raise the question about what should happen if we get passed in
+ // a different listener and/or data.
+ if (aListenerData !== undefined) {
+ collection.data = aListenerData;
+ }
+ }
+ if (aListenerData) {
+ if (collection.dataStack) {
+ collection.dataStack.push(aListenerData);
+ } else {
+ collection.dataStack = [aListenerData];
+ }
+ }
+
+ statement.executeAsync(
+ new QueryFromQueryCallback(statement, aNounDef, collection)
+ );
+ statement.finalize();
+ return collection;
+ },
+
+ /* eslint-disable complexity */
+ loadNounItem(aItem, aReferencesByNounID, aInverseReferencesByNounID) {
+ let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam;
+
+ let hadDeps = aItem._deps != null;
+ let deps = aItem._deps || {};
+ let hasDeps = false;
+
+ for (let attrib of aItem.NOUN_DEF.specialLoadAttribs) {
+ let objectNounDef = attrib.objectNounDef;
+
+ if (
+ "special" in attrib &&
+ attrib.special === GlodaConstants.kSpecialColumnChildren
+ ) {
+ let invReferences = aInverseReferencesByNounID[objectNounDef.id];
+ if (invReferences === undefined) {
+ invReferences = aInverseReferencesByNounID[objectNounDef.id] = {};
+ }
+ // only contribute if it's not already pending or there
+ if (
+ !(attrib.id in deps) &&
+ aItem[attrib.storageAttributeName] == null
+ ) {
+ // this._log.debug(" Adding inv ref for: " + aItem.id);
+ if (!(aItem.id in invReferences)) {
+ invReferences[aItem.id] = null;
+ }
+ deps[attrib.id] = null;
+ hasDeps = true;
+ }
+ } else if (
+ "special" in attrib &&
+ attrib.special === GlodaConstants.kSpecialColumnParent
+ ) {
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[objectNounDef.id] = {};
+ }
+ // nothing to contribute if it's already there
+ if (
+ !(attrib.id in deps) &&
+ aItem[attrib.valueStorageAttributeName] == null
+ ) {
+ let parentID = aItem[attrib.idStorageAttributeName];
+ if (!(parentID in references)) {
+ references[parentID] = null;
+ }
+ // this._log.debug(" Adding parent ref for: " +
+ // aItem[attrib.idStorageAttributeName]);
+ deps[attrib.id] = null;
+ hasDeps = true;
+ } else {
+ this._log.debug(
+ " paranoia value storage: " +
+ aItem[attrib.valueStorageAttributeName]
+ );
+ }
+ }
+ }
+
+ // bail here if arbitrary values are not allowed, there just is no
+ // encoded json, or we already had dependencies for this guy, implying
+ // the json pass has already been performed
+ if (!aItem.NOUN_DEF.allowsArbitraryAttrs || !aItem._jsonText || hadDeps) {
+ if (hasDeps) {
+ aItem._deps = deps;
+ }
+ return hasDeps;
+ }
+
+ // this._log.debug(" load json: " + aItem._jsonText);
+ let jsonDict = JSON.parse(aItem._jsonText);
+ delete aItem._jsonText;
+
+ // Iterate over the attributes on the item
+ for (let attribId in jsonDict) {
+ let jsonValue = jsonDict[attribId];
+ // It is technically impossible for attribute ids to go away at this
+ // point in time. This would require someone to monkey around with
+ // our schema. But we will introduce this functionality one day, so
+ // prepare for it now.
+ if (!(attribId in attribIDToDBDefAndParam)) {
+ continue;
+ }
+ // find the attribute definition that corresponds to this key
+ let dbAttrib = attribIDToDBDefAndParam[attribId][0];
+
+ let attrib = dbAttrib.attrDef;
+ // The attribute definition will fail to exist if no one defines the
+ // attribute anymore. This can happen for many reasons: an extension
+ // was uninstalled, an extension was changed and no longer defines the
+ // attribute, or patches are being applied/unapplied. Ignore this
+ // attribute if missing.
+ if (attrib == null) {
+ continue;
+ }
+ let objectNounDef = attrib.objectNounDef;
+
+ // If it has a tableName member but no fromJSON, then it's a persistent
+ // object that needs to be loaded, which also means we need to hold it in
+ // a collection owned by our collection.
+ // (If it has a fromJSON method, then it's a special case like
+ // MimeTypeNoun where it is authoritatively backed by a table but caches
+ // everything into memory. There is no case where fromJSON would be
+ // implemented but we should still be doing database lookups.)
+ if (objectNounDef.tableName && !objectNounDef.fromJSON) {
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (references === undefined) {
+ references = aReferencesByNounID[objectNounDef.id] = {};
+ }
+
+ if (attrib.singular) {
+ if (!(jsonValue in references)) {
+ references[jsonValue] = null;
+ }
+ } else {
+ for (let key in jsonValue) {
+ let anID = jsonValue[key];
+ if (!(anID in references)) {
+ references[anID] = null;
+ }
+ }
+ }
+
+ deps[attribId] = jsonValue;
+ hasDeps = true;
+ } else if (objectNounDef.contributeObjDependencies) {
+ /* if it has custom contribution logic, use it */
+ if (
+ objectNounDef.contributeObjDependencies(
+ jsonValue,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ )
+ ) {
+ deps[attribId] = jsonValue;
+ hasDeps = true;
+ } else {
+ // just propagate the value, it's some form of simple sentinel
+ aItem[attrib.boundName] = jsonValue;
+ }
+ } else if (objectNounDef.fromJSON) {
+ // otherwise, the value just needs to be de-persisted, or...
+ if (attrib.singular) {
+ // For consistency with the non-singular case, we don't assign the
+ // attribute if undefined is returned.
+ let deserialized = objectNounDef.fromJSON(jsonValue, aItem);
+ if (deserialized !== undefined) {
+ aItem[attrib.boundName] = deserialized;
+ }
+ } else {
+ // Convert all the entries in the list filtering out any undefined
+ // values. (TagNoun will do this if the tag is now dead.)
+ let outList = [];
+ for (let key in jsonValue) {
+ let val = jsonValue[key];
+ let deserialized = objectNounDef.fromJSON(val, aItem);
+ if (deserialized !== undefined) {
+ outList.push(deserialized);
+ }
+ }
+ // Note: It's possible if we filtered things out that this is an empty
+ // list. This is acceptable because this is somewhat of an unusual
+ // case and I don't think we want to further complicate our
+ // semantics.
+ aItem[attrib.boundName] = outList;
+ }
+ } else {
+ // it's fine as is
+ aItem[attrib.boundName] = jsonValue;
+ }
+ }
+
+ if (hasDeps) {
+ aItem._deps = deps;
+ }
+ return hasDeps;
+ },
+ /* eslint-enable complexity */
+
+ loadNounDeferredDeps(aItem, aReferencesByNounID, aInverseReferencesByNounID) {
+ if (aItem._deps === undefined) {
+ return;
+ }
+
+ let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam;
+
+ for (let [attribId, jsonValue] of Object.entries(aItem._deps)) {
+ let dbAttrib = attribIDToDBDefAndParam[attribId][0];
+ let attrib = dbAttrib.attrDef;
+
+ let objectNounDef = attrib.objectNounDef;
+ let references = aReferencesByNounID[objectNounDef.id];
+ if (attrib.special) {
+ if (attrib.special === GlodaConstants.kSpecialColumnChildren) {
+ let inverseReferences = aInverseReferencesByNounID[objectNounDef.id];
+ // this._log.info("inverse assignment: " + objectNounDef.id +
+ // " of " + aItem.id)
+ aItem[attrib.storageAttributeName] = inverseReferences[aItem.id];
+ } else if (attrib.special === GlodaConstants.kSpecialColumnParent) {
+ // this._log.info("parent column load: " + objectNounDef.id +
+ // " storage value: " + aItem[attrib.idStorageAttributeName]);
+ aItem[attrib.valueStorageAttributeName] =
+ references[aItem[attrib.idStorageAttributeName]];
+ }
+ } else if (objectNounDef.tableName) {
+ if (attrib.singular) {
+ aItem[attrib.boundName] = references[jsonValue];
+ } else {
+ aItem[attrib.boundName] = Object.keys(jsonValue).map(
+ key => references[jsonValue[key]]
+ );
+ }
+ } else if (objectNounDef.contributeObjDependencies) {
+ aItem[attrib.boundName] = objectNounDef.resolveObjDependencies(
+ jsonValue,
+ aReferencesByNounID,
+ aInverseReferencesByNounID
+ );
+ }
+ // there is no other case
+ }
+
+ delete aItem._deps;
+ },
+
+ /* ********** Contact ********** */
+ _nextContactId: 1,
+
+ _populateContactManagedId() {
+ let stmt = this._createSyncStatement("SELECT MAX(id) FROM contacts", true);
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextContactId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertContactStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO contacts (id, directoryUUID, contactUUID, name, popularity,\
+ frecency, jsonAttributes) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
+ );
+ this.__defineGetter__("_insertContactStatement", () => statement);
+ return this._insertContactStatement;
+ },
+
+ createContact(aDirectoryUUID, aContactUUID, aName, aPopularity, aFrecency) {
+ let contactID = this._nextContactId++;
+ let contact = new GlodaContact(
+ this,
+ contactID,
+ aDirectoryUUID,
+ aContactUUID,
+ aName,
+ aPopularity,
+ aFrecency
+ );
+ return contact;
+ },
+
+ insertContact(aContact) {
+ let ics = this._insertContactStatement;
+ ics.bindByIndex(0, aContact.id);
+ if (aContact.directoryUUID == null) {
+ ics.bindByIndex(1, null);
+ } else {
+ ics.bindByIndex(1, aContact.directoryUUID);
+ }
+ if (aContact.contactUUID == null) {
+ ics.bindByIndex(2, null);
+ } else {
+ ics.bindByIndex(2, aContact.contactUUID);
+ }
+ ics.bindByIndex(3, aContact.name);
+ ics.bindByIndex(4, aContact.popularity);
+ ics.bindByIndex(5, aContact.frecency);
+ if (aContact._jsonText) {
+ ics.bindByIndex(6, aContact._jsonText);
+ } else {
+ ics.bindByIndex(6, null);
+ }
+
+ ics.executeAsync(this.trackAsync());
+
+ return aContact;
+ },
+
+ get _updateContactStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE contacts SET directoryUUID = ?1, \
+ contactUUID = ?2, \
+ name = ?3, \
+ popularity = ?4, \
+ frecency = ?5, \
+ jsonAttributes = ?6 \
+ WHERE id = ?7"
+ );
+ this.__defineGetter__("_updateContactStatement", () => statement);
+ return this._updateContactStatement;
+ },
+
+ updateContact(aContact) {
+ let ucs = this._updateContactStatement;
+ ucs.bindByIndex(6, aContact.id);
+ ucs.bindByIndex(0, aContact.directoryUUID);
+ ucs.bindByIndex(1, aContact.contactUUID);
+ ucs.bindByIndex(2, aContact.name);
+ ucs.bindByIndex(3, aContact.popularity);
+ ucs.bindByIndex(4, aContact.frecency);
+ if (aContact._jsonText) {
+ ucs.bindByIndex(5, aContact._jsonText);
+ } else {
+ ucs.bindByIndex(5, null);
+ }
+
+ ucs.executeAsync(this.trackAsync());
+ },
+
+ _contactFromRow(aRow) {
+ let directoryUUID, contactUUID, jsonText;
+ if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ directoryUUID = null;
+ } else {
+ directoryUUID = aRow.getString(1);
+ }
+ if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ contactUUID = null;
+ } else {
+ contactUUID = aRow.getString(2);
+ }
+ if (aRow.getTypeOfIndex(6) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) {
+ jsonText = undefined;
+ } else {
+ jsonText = aRow.getString(6);
+ }
+
+ return new GlodaContact(
+ this,
+ aRow.getInt64(0),
+ directoryUUID,
+ contactUUID,
+ aRow.getString(5),
+ aRow.getInt64(3),
+ aRow.getInt64(4),
+ jsonText
+ );
+ },
+
+ get _selectContactByIDStatement() {
+ let statement = this._createSyncStatement(
+ "SELECT * FROM contacts WHERE id = ?1"
+ );
+ this.__defineGetter__("_selectContactByIDStatement", () => statement);
+ return this._selectContactByIDStatement;
+ },
+
+ /**
+ * Synchronous contact lookup currently only for use by gloda's creation
+ * of the concept of "me". It is okay for it to be doing synchronous work
+ * because it is part of the startup process before any user code could
+ * have gotten a reference to Gloda, but no one else should do this.
+ */
+ getContactByID(aContactID) {
+ let contact = GlodaCollectionManager.cacheLookupOne(
+ GlodaContact.prototype.NOUN_ID,
+ aContactID
+ );
+
+ if (contact === null) {
+ let scbi = this._selectContactByIDStatement;
+ scbi.bindByIndex(0, aContactID);
+ if (this._syncStep(scbi)) {
+ contact = this._contactFromRow(scbi);
+ GlodaCollectionManager.itemLoaded(contact);
+ }
+ scbi.reset();
+ }
+
+ return contact;
+ },
+
+ /* ********** Identity ********** */
+ /** next identity id, managed for async use reasons. */
+ _nextIdentityId: 1,
+ _populateIdentityManagedId() {
+ let stmt = this._createSyncStatement(
+ "SELECT MAX(id) FROM identities",
+ true
+ );
+ if (stmt.executeStep()) {
+ // no chance of this SQLITE_BUSY on this call
+ this._nextIdentityId = stmt.getInt64(0) + 1;
+ }
+ stmt.finalize();
+ },
+
+ get _insertIdentityStatement() {
+ let statement = this._createAsyncStatement(
+ "INSERT INTO identities (id, contactID, kind, value, description, relay) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)"
+ );
+ this.__defineGetter__("_insertIdentityStatement", () => statement);
+ return this._insertIdentityStatement;
+ },
+
+ createIdentity(aContactID, aContact, aKind, aValue, aDescription, aIsRelay) {
+ let identityID = this._nextIdentityId++;
+ let iis = this._insertIdentityStatement;
+ iis.bindByIndex(0, identityID);
+ iis.bindByIndex(1, aContactID);
+ iis.bindByIndex(2, aKind);
+ iis.bindByIndex(3, aValue);
+ iis.bindByIndex(4, aDescription);
+ iis.bindByIndex(5, aIsRelay ? 1 : 0);
+ iis.executeAsync(this.trackAsync());
+
+ let identity = new GlodaIdentity(
+ this,
+ identityID,
+ aContactID,
+ aContact,
+ aKind,
+ aValue,
+ aDescription,
+ aIsRelay
+ );
+ GlodaCollectionManager.itemsAdded(identity.NOUN_ID, [identity]);
+ return identity;
+ },
+
+ get _updateIdentityStatement() {
+ let statement = this._createAsyncStatement(
+ "UPDATE identities SET contactID = ?1, \
+ kind = ?2, \
+ value = ?3, \
+ description = ?4, \
+ relay = ?5 \
+ WHERE id = ?6"
+ );
+ this.__defineGetter__("_updateIdentityStatement", () => statement);
+ return this._updateIdentityStatement;
+ },
+
+ updateIdentity(aIdentity) {
+ let ucs = this._updateIdentityStatement;
+ ucs.bindByIndex(5, aIdentity.id);
+ ucs.bindByIndex(0, aIdentity.contactID);
+ ucs.bindByIndex(1, aIdentity.kind);
+ ucs.bindByIndex(2, aIdentity.value);
+ ucs.bindByIndex(3, aIdentity.description);
+ ucs.bindByIndex(4, aIdentity.relay ? 1 : 0);
+
+ ucs.executeAsync(this.trackAsync());
+ },
+
+ _identityFromRow(aRow) {
+ return new GlodaIdentity(
+ this,
+ aRow.getInt64(0),
+ aRow.getInt64(1),
+ null,
+ aRow.getString(2),
+ aRow.getString(3),
+ aRow.getString(4),
+ !!aRow.getInt32(5)
+ );
+ },
+
+ get _selectIdentityByKindValueStatement() {
+ let statement = this._createSyncStatement(
+ "SELECT * FROM identities WHERE kind = ?1 AND value = ?2"
+ );
+ this.__defineGetter__(
+ "_selectIdentityByKindValueStatement",
+ () => statement
+ );
+ return this._selectIdentityByKindValueStatement;
+ },
+
+ /**
+ * Synchronous lookup of an identity by kind and value, only for use by
+ * the legacy gloda core code that creates a concept of "me".
+ * Ex: (email, foo@example.com)
+ */
+ getIdentity(aKind, aValue) {
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaIdentity.prototype.NOUN_ID,
+ aKind + "@" + aValue
+ );
+
+ let ibkv = this._selectIdentityByKindValueStatement;
+ ibkv.bindByIndex(0, aKind);
+ ibkv.bindByIndex(1, aValue);
+ if (this._syncStep(ibkv)) {
+ identity = this._identityFromRow(ibkv);
+ GlodaCollectionManager.itemLoaded(identity);
+ }
+ ibkv.reset();
+
+ return identity;
+ },
+};
+GlodaAttributeDBDef.prototype._datastore = GlodaDatastore;
+GlodaConversation.prototype._datastore = GlodaDatastore;
+GlodaFolder.prototype._datastore = GlodaDatastore;
+GlodaMessage.prototype._datastore = GlodaDatastore;
+GlodaContact.prototype._datastore = GlodaDatastore;
+GlodaIdentity.prototype._datastore = GlodaDatastore;
diff --git a/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm b/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm
new file mode 100644
index 0000000000..7a10b4112e
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaExplicitAttr.jsm
@@ -0,0 +1,188 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides the "explicit attribute" provider for messages. It is
+ * concerned with attributes that are the result of user actions. For example,
+ * whether a message is starred (flagged), message tags, whether it is
+ * read/unread, etc.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaExplicitAttr"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { TagNoun } = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+
+/**
+ * @namespace Explicit attribute provider. Indexes/defines attributes that are
+ * explicitly a result of user action. This dubiously includes marking a
+ * message as read.
+ */
+var GlodaExplicitAttr = {
+ providerName: "gloda.explattr",
+ strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.explattr",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ /** Boost for starred messages. */
+ NOTABILITY_STARRED: 16,
+ /** Boost for tagged messages, first tag. */
+ NOTABILITY_TAGGED_FIRST: 8,
+ /** Boost for tagged messages, each additional tag. */
+ NOTABILITY_TAGGED_ADDL: 1,
+
+ defineAttributes() {
+ // Tag
+ this._attrTag = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "tag",
+ bindName: "tags",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_TAG,
+ parameterNoun: null,
+ // Property change notifications that we care about:
+ propertyChanges: ["keywords"],
+ }); // not-tested
+
+ // Star
+ this._attrStar = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "star",
+ bindName: "starred",
+ singular: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+ // Read/Unread
+ this._attrRead = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "read",
+ // Make the message query-able but without using the database.
+ canQuery: "truthy-but-not-true",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+
+ /**
+ * Has this message been replied to by the user.
+ */
+ this._attrRepliedTo = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "repliedTo",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+
+ /**
+ * Has this user forwarded this message to someone.
+ */
+ this._attrForwarded = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "forwarded",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_BOOLEAN,
+ parameterNoun: null,
+ }); // tested-by: test_attributes_explicit
+ },
+
+ *process(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+
+ aGlodaMessage.starred = aMsgHdr.isFlagged;
+ if (aGlodaMessage.starred) {
+ aGlodaMessage.notability += this.NOTABILITY_STARRED;
+ }
+
+ aGlodaMessage.read = aMsgHdr.isRead;
+
+ let flags = aMsgHdr.flags;
+ aGlodaMessage.repliedTo = Boolean(flags & Ci.nsMsgMessageFlags.Replied);
+ aGlodaMessage.forwarded = Boolean(flags & Ci.nsMsgMessageFlags.Forwarded);
+
+ let tags = (aGlodaMessage.tags = []);
+
+ // -- Tag
+ // build a map of the keywords
+ let keywords = aMsgHdr.getStringProperty("keywords");
+ let keywordList = keywords.split(" ");
+ let keywordMap = {};
+ for (let iKeyword = 0; iKeyword < keywordList.length; iKeyword++) {
+ let keyword = keywordList[iKeyword];
+ keywordMap[keyword] = true;
+ }
+
+ let tagArray = TagNoun.getAllTags();
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ if (tag.key in keywordMap) {
+ tags.push(tag);
+ }
+ }
+
+ if (tags.length) {
+ aGlodaMessage.notability +=
+ this.NOTABILITY_TAGGED_FIRST +
+ (tags.length - 1) * this.NOTABILITY_TAGGED_ADDL;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Duplicates the notability logic from process(). Arguably process should
+ * be factored to call us, grokNounItem should be factored to call us, or we
+ * should get sufficiently fancy that our code wildly diverges.
+ */
+ score(aMessage, aContext) {
+ let score = 0;
+ if (aMessage.starred) {
+ score += this.NOTABILITY_STARRED;
+ }
+ if (aMessage.tags.length) {
+ score +=
+ this.NOTABILITY_TAGGED_FIRST +
+ (aMessage.tags.length - 1) * this.NOTABILITY_TAGGED_ADDL;
+ }
+ return score;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm b/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm
new file mode 100644
index 0000000000..364ea61bb0
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaFundAttr.jsm
@@ -0,0 +1,947 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaFundAttr"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+const { GlodaUtils } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaUtils.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaAttachment } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { MimeTypeNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounMimetype.jsm"
+);
+const { GlodaContent } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaContent.jsm"
+);
+
+/**
+ * @namespace The Gloda Fundamental Attribute provider is a special attribute
+ * provider; it provides attributes that the rest of the providers should be
+ * able to assume exist. Also, it may end up accessing things at a lower level
+ * than most extension providers should do. In summary, don't mimic this code
+ * unless you won't complain when your code breaks.
+ */
+var GlodaFundAttr = {
+ providerName: "gloda.fundattr",
+ strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.fundattr",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ POPULARITY_FROM_ME_TO: 10,
+ POPULARITY_FROM_ME_CC: 4,
+ POPULARITY_FROM_ME_BCC: 3,
+ POPULARITY_TO_ME: 5,
+ POPULARITY_CC_ME: 1,
+ POPULARITY_BCC_ME: 1,
+
+ /** Boost for messages 'I' sent */
+ NOTABILITY_FROM_ME: 10,
+ /** Boost for messages involving 'me'. */
+ NOTABILITY_INVOLVING_ME: 1,
+ /** Boost for message from someone in 'my' address book. */
+ NOTABILITY_FROM_IN_ADDR_BOOK: 10,
+ /** Boost for the first person involved in my address book. */
+ NOTABILITY_INVOLVING_ADDR_BOOK_FIRST: 8,
+ /** Boost for each additional person involved in my address book. */
+ NOTABILITY_INVOLVING_ADDR_BOOK_ADDL: 2,
+
+ defineAttributes() {
+ /* ***** Conversations ***** */
+ // conversation: subjectMatches
+ this._attrConvSubject = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "subjectMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "subject",
+ subjectNouns: [GlodaConstants.NOUN_CONVERSATION],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ /* ***** Messages ***** */
+ // folder
+ this._attrFolder = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "folder",
+ singular: true,
+ facet: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "folderID",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FOLDER,
+ }); // tested-by: test_attributes_fundamental
+ this._attrAccount = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "account",
+ canQuery: "memory",
+ singular: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_ACCOUNT,
+ });
+ this._attrMessageKey = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "messageKey",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "messageKey",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ // We need to surface the deleted attribute for querying, but there is no
+ // reason for user code, so let's call it "_deleted" rather than deleted.
+ // (In fact, our validity constraints require a special query formulation
+ // that user code should have no clue exists. That's right user code,
+ // that's a dare.)
+ Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "_deleted",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "deleted",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ });
+
+ // -- fulltext search helpers
+ // fulltextMatches. Match over message subject, body, and attachments
+ // @testpoint gloda.noun.message.attr.fulltextMatches
+ this._attrFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "fulltextMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "messagesText",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // subjectMatches. Fulltext match on subject
+ // @testpoint gloda.noun.message.attr.subjectMatches
+ this._attrSubjectText = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "subjectMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "subject",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // bodyMatches. super-synthetic full-text matching...
+ // @testpoint gloda.noun.message.attr.bodyMatches
+ this._attrBody = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "bodyMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "body",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // attachmentNamesMatch
+ // @testpoint gloda.noun.message.attr.attachmentNamesMatch
+ this._attrAttachmentNames = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "attachmentNamesMatch",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "attachmentNames",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // @testpoint gloda.noun.message.attr.authorMatches
+ this._attrAuthorFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "authorMatches",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "author",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ this._attrRecipientsFulltext = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "recipientsMatch",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "recipients",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ });
+
+ // --- synthetic stuff for some reason
+ // conversation
+ // @testpoint gloda.noun.message.attr.conversation
+ this._attrConversation = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "conversation",
+ singular: true,
+ special: GlodaConstants.kSpecialColumnParent,
+ specialColumnName: "conversationID",
+ idStorageAttributeName: "_conversationID",
+ valueStorageAttributeName: "_conversation",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_CONVERSATION,
+ canQuery: true,
+ });
+
+ // --- Fundamental
+ // From
+ this._attrFrom = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "from",
+ singular: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ // To
+ this._attrTo = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "to",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ // Cc
+ this._attrCc = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "cc",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+ /**
+ * Bcc'ed recipients; only makes sense for sent messages.
+ */
+ this._attrBcc = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "bcc",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ // Date. now lives on the row.
+ this._attrDate = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "date",
+ singular: true,
+ facet: {
+ type: "date",
+ },
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "date",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_DATE,
+ }); // tested-by: test_attributes_fundamental
+
+ // Header message ID.
+ this._attrHeaderMessageID = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "headerMessageID",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "headerMessageID",
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ // Attachment MIME Types
+ this._attrAttachmentTypes = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "attachmentTypes",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: {
+ type: "default",
+ // This will group the MIME types by their category.
+ groupIdAttr: "category",
+ queryHelper: "Category",
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_MIME_TYPE,
+ });
+
+ // Attachment infos
+ this._attrIsEncrypted = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "isEncrypted",
+ singular: true,
+ emptySetIsSignificant: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ });
+
+ // Attachment infos
+ this._attrAttachmentInfos = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "attachmentInfos",
+ singular: false,
+ emptySetIsSignificant: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_ATTACHMENT,
+ });
+
+ // --- Optimization
+ /**
+ * Involves means any of from/to/cc/bcc. The queries get ugly enough
+ * without this that it seems to justify the cost, especially given the
+ * frequent use case. (In fact, post-filtering for the specific from/to/cc
+ * is probably justifiable rather than losing this attribute...)
+ */
+ this._attrInvolves = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "involves",
+ singular: false,
+ facet: {
+ type: "default",
+ /**
+ * Filter out 'me', as we have other facets that deal with that, and the
+ * 'me' identities are so likely that they distort things.
+ *
+ * @returns true if the identity is not one of my identities, false if it
+ * is.
+ */
+ filter(aItem) {
+ return !(aItem.id in Gloda.myIdentities);
+ },
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ /**
+ * Any of to/cc/bcc.
+ */
+ this._attrRecipients = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "recipients",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested
+
+ // From Me (To/Cc/Bcc)
+ this._attrFromMe = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrOptimization,
+ attributeName: "fromMe",
+ singular: false,
+ // The interesting thing to a facet is whether the message is from me.
+ facet: {
+ type: "nonempty?",
+ },
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_PARAM_IDENTITY,
+ }); // not-tested
+ // To/Cc/Bcc Me
+ this._attrToMe = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "toMe",
+ // The interesting thing to a facet is whether the message is to me.
+ facet: {
+ type: "nonempty?",
+ },
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_PARAM_IDENTITY,
+ }); // not-tested
+
+ // -- Mailing List
+ // Non-singular, but a hard call. Namely, it is obvious that a message can
+ // be addressed to multiple mailing lists. However, I don't see how you
+ // could receive a message with more than one set of List-* headers,
+ // since each list-serve would each send you a copy. Based on our current
+ // decision to treat each physical message as separate, it almost seems
+ // right to limit the list attribute to the copy that originated at the
+ // list. That may sound entirely wrong, but keep in mind that until we
+ // have seen a message from the list with the List headers, we can't
+ // definitely know it's a mailing list (although heuristics could take us
+ // pretty far). As such, the quasi-singular thing is appealing.
+ // Of course, the reality is that we really want to know if a message was
+ // sent to multiple mailing lists and be able to query on that.
+ // Additionally, our implicit-to logic needs to work on messages that
+ // weren't relayed by the list-serve, especially messages sent to the list
+ // by the user.
+ this._attrList = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "mailing-list",
+ bindName: "mailingLists",
+ singular: false,
+ emptySetIsSignificant: true,
+ facet: true,
+ subjectNouns: [GlodaConstants.NOUN_MESSAGE],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // not-tested, not-implemented
+ },
+
+ RE_LIST_POST: /<mailto:([^>]+)>/,
+
+ /**
+ *
+ * Specializations:
+ * - Mailing Lists. Replies to a message on a mailing list frequently only
+ * have the list-serve as the 'to', so we try to generate a synthetic 'to'
+ * based on the author of the parent message when possible. (The 'possible'
+ * part is that we may not have a copy of the parent message at the time of
+ * processing.)
+ * - Newsgroups. Same deal as mailing lists.
+ */
+ *process(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+ let aMimeMsg = aRawReps.mime;
+
+ // -- From
+ // Let's use replyTo if available.
+ // er, since we are just dealing with mailing lists for now, forget the
+ // reply-to...
+ // TODO: deal with default charset issues
+ let author = null;
+ /*
+ try {
+ author = aMsgHdr.getStringProperty("replyTo");
+ }
+ catch (ex) {
+ }
+ */
+ if (author == null || author == "") {
+ author = aMsgHdr.author;
+ }
+
+ let normalizedListPost = "";
+ if (aMimeMsg && aMimeMsg.has("list-post")) {
+ let match = this.RE_LIST_POST.exec(aMimeMsg.get("list-post"));
+ if (match) {
+ normalizedListPost = "<" + match[1] + ">";
+ }
+ }
+
+ // Do not use the MIME decoded variants of any of the email addresses
+ // because if name is encoded and has a comma in it, it will break the
+ // address parser (which already knows how to do the decoding anyways).
+ let [
+ authorIdentities,
+ toIdentities,
+ ccIdentities,
+ bccIdentities,
+ listIdentities,
+ ] = yield aCallbackHandle.pushAndGo(
+ Gloda.getOrCreateMailIdentities(
+ aCallbackHandle,
+ author,
+ aMsgHdr.recipients,
+ aMsgHdr.ccList,
+ aMsgHdr.bccList,
+ normalizedListPost
+ )
+ );
+
+ if (authorIdentities.length != 1) {
+ throw new Gloda.BadItemContentsError(
+ "Message with subject '" +
+ aMsgHdr.mime2DecodedSubject +
+ "' somehow lacks a valid author. Bailing."
+ );
+ }
+ let authorIdentity = authorIdentities[0];
+ aGlodaMessage.from = authorIdentity;
+
+ // -- To, Cc, Bcc
+ aGlodaMessage.to = toIdentities;
+ aGlodaMessage.cc = ccIdentities;
+ aGlodaMessage.bcc = bccIdentities;
+
+ // -- Mailing List
+ if (listIdentities.length) {
+ aGlodaMessage.mailingLists = listIdentities;
+ }
+
+ let findIsEncrypted = x =>
+ x.isEncrypted || (x.parts ? x.parts.some(findIsEncrypted) : false);
+
+ // -- Encryption
+ aGlodaMessage.isEncrypted = false;
+ if (aMimeMsg) {
+ aGlodaMessage.isEncrypted = findIsEncrypted(aMimeMsg);
+ }
+
+ // -- Attachments
+ if (aMimeMsg) {
+ // nsParseMailbox.cpp puts the attachment flag on msgHdrs as soon as it
+ // finds a multipart/mixed part. This is a good heuristic, but if it turns
+ // out the part has no filename, then we don't treat it as an attachment.
+ // We just streamed the message, and we have all the information to figure
+ // that out, so now is a good place to clear the flag if needed.
+ let attachmentTypes = new Set();
+ for (let attachment of aMimeMsg.allAttachments) {
+ // getMimeType expects the content type to contain at least a "/".
+ if (!attachment.contentType.includes("/")) {
+ continue;
+ }
+ attachmentTypes.add(MimeTypeNoun.getMimeType(attachment.contentType));
+ }
+ if (attachmentTypes.size) {
+ aGlodaMessage.attachmentTypes = Array.from(attachmentTypes);
+ }
+
+ let aMsgHdr = aRawReps.header;
+ let wasStreamed =
+ aMsgHdr &&
+ !aGlodaMessage.isEncrypted &&
+ (aMsgHdr.flags & Ci.nsMsgMessageFlags.Offline ||
+ aMsgHdr.folder instanceof Ci.nsIMsgLocalMailFolder);
+
+ // Clear the flag if it turns out there's no attachment after all and we
+ // streamed completely the message (if we didn't, then we have no
+ // knowledge of attachments, unless bug 673370 is fixed).
+ if (wasStreamed && !aMimeMsg.allAttachments.length) {
+ aMsgHdr.markHasAttachments(false);
+ }
+
+ // This is not the same kind of attachments as above. Now, we want to
+ // provide convenience attributes to Gloda consumers, so that they can run
+ // through the list of attachments of a given message, to possibly build a
+ // visualization on top of it. We still reject bogus mime types, which
+ // means yencode won't be supported. Oh, I feel really bad.
+ let attachmentInfos = [];
+ for (let att of aMimeMsg.allUserAttachments) {
+ attachmentInfos.push(
+ this.glodaAttFromMimeAtt(aRawReps.trueGlodaRep, att)
+ );
+ }
+ aGlodaMessage.attachmentInfos = attachmentInfos;
+ }
+
+ // TODO: deal with mailing lists, including implicit-to. this will require
+ // convincing the indexer to pass us in the previous message if it is
+ // available. (which we'll simply pass to everyone... it can help body
+ // logic for quoting purposes, etc. too.)
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ glodaAttFromMimeAtt(aGlodaMessage, aAtt) {
+ // So we don't want to store the URL because it can change over time if
+ // the message is moved. What we do is store the full URL if it's a
+ // detached attachment, otherwise just keep the part information, and
+ // rebuild the URL according to where the message is sitting.
+ let part, externalUrl;
+ if (aAtt.isExternal) {
+ externalUrl = aAtt.url;
+ } else {
+ let matches = aAtt.url.match(GlodaUtils.PART_RE);
+ if (matches && matches.length) {
+ part = matches[1];
+ } else {
+ this._log.error("Error processing attachment: " + aAtt.url);
+ }
+ }
+ return new GlodaAttachment(
+ aGlodaMessage,
+ aAtt.name,
+ aAtt.contentType,
+ aAtt.size,
+ part,
+ externalUrl,
+ aAtt.isExternal
+ );
+ },
+
+ *optimize(aGlodaMessage, aRawReps, aIsNew, aCallbackHandle) {
+ let aMsgHdr = aRawReps.header;
+
+ // for simplicity this is used for both involves and recipients
+ let involvesIdentities = {};
+ let involves = aGlodaMessage.involves || [];
+ let recipients = aGlodaMessage.recipients || [];
+
+ // 'me' specialization optimizations
+ let toMe = aGlodaMessage.toMe || [];
+ let fromMe = aGlodaMessage.fromMe || [];
+
+ let myIdentities = Gloda.myIdentities; // needless optimization?
+ let authorIdentity = aGlodaMessage.from;
+ let isFromMe = authorIdentity.id in myIdentities;
+
+ // The fulltext search column for the author. We want to have in here:
+ // - The e-mail address and display name as enclosed on the message.
+ // - The name per the address book card for this e-mail address, if we have
+ // one.
+ aGlodaMessage._indexAuthor = aMsgHdr.mime2DecodedAuthor;
+ // The fulltext search column for the recipients. (same deal)
+ aGlodaMessage._indexRecipients = aMsgHdr.mime2DecodedRecipients;
+
+ if (isFromMe) {
+ aGlodaMessage.notability += this.NOTABILITY_FROM_ME;
+ } else {
+ let authorDisplayName = MailServices.ab.cardForEmailAddress(
+ authorIdentity.value
+ )?.displayName;
+ if (authorDisplayName !== null) {
+ aGlodaMessage.notability += this.NOTABILITY_FROM_IN_ADDR_BOOK;
+ // @testpoint gloda.noun.message.attr.authorMatches
+ aGlodaMessage._indexAuthor += " " + authorDisplayName;
+ }
+ }
+
+ involves.push(authorIdentity);
+ involvesIdentities[authorIdentity.id] = true;
+
+ let involvedAddrBookCount = 0;
+
+ for (let toIdentity of aGlodaMessage.to) {
+ if (!(toIdentity.id in involvesIdentities)) {
+ involves.push(toIdentity);
+ recipients.push(toIdentity);
+ involvesIdentities[toIdentity.id] = true;
+ let toDisplayName = MailServices.ab.cardForEmailAddress(
+ toIdentity.value
+ )?.displayName;
+ if (toDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + toDisplayName;
+ }
+ }
+
+ // optimization attribute to-me ('I' am the parameter)
+ if (toIdentity.id in myIdentities) {
+ toMe.push([toIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_TO_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, toIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ toIdentity.contact.popularity += this.POPULARITY_FROM_ME_TO;
+ }
+ }
+ }
+ for (let ccIdentity of aGlodaMessage.cc) {
+ if (!(ccIdentity.id in involvesIdentities)) {
+ involves.push(ccIdentity);
+ recipients.push(ccIdentity);
+ involvesIdentities[ccIdentity.id] = true;
+ let ccDisplayName = MailServices.ab.cardForEmailAddress(
+ ccIdentity.value
+ )?.displayName;
+ if (ccDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + ccDisplayName;
+ }
+ }
+ // optimization attribute cc-me ('I' am the parameter)
+ if (ccIdentity.id in myIdentities) {
+ toMe.push([ccIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_CC_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, ccIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ ccIdentity.contact.popularity += this.POPULARITY_FROM_ME_CC;
+ }
+ }
+ }
+ // just treat bcc like cc; the intent is the same although the exact
+ // semantics differ.
+ for (let bccIdentity of aGlodaMessage.bcc) {
+ if (!(bccIdentity.id in involvesIdentities)) {
+ involves.push(bccIdentity);
+ recipients.push(bccIdentity);
+ involvesIdentities[bccIdentity.id] = true;
+ let bccDisplayName = MailServices.ab.cardForEmailAddress(
+ bccIdentity.value
+ )?.displayName;
+ if (bccDisplayName !== null) {
+ involvedAddrBookCount++;
+ // @testpoint gloda.noun.message.attr.recipientsMatch
+ aGlodaMessage._indexRecipients += " " + bccDisplayName;
+ }
+ }
+ // optimization attribute cc-me ('I' am the parameter)
+ if (bccIdentity.id in myIdentities) {
+ toMe.push([bccIdentity, authorIdentity]);
+ if (aIsNew) {
+ authorIdentity.contact.popularity += this.POPULARITY_BCC_ME;
+ }
+ }
+ // optimization attribute from-me-to ('I' am the parameter)
+ if (isFromMe) {
+ fromMe.push([authorIdentity, bccIdentity]);
+ // also, popularity
+ if (aIsNew) {
+ bccIdentity.contact.popularity += this.POPULARITY_FROM_ME_BCC;
+ }
+ }
+ }
+
+ if (involvedAddrBookCount) {
+ aGlodaMessage.notability +=
+ this.NOTABILITY_INVOLVING_ADDR_BOOK_FIRST +
+ (involvedAddrBookCount - 1) * this.NOTABILITY_INVOLVING_ADDR_BOOK_ADDL;
+ }
+
+ aGlodaMessage.involves = involves;
+ aGlodaMessage.recipients = recipients;
+ if (toMe.length) {
+ aGlodaMessage.toMe = toMe;
+ aGlodaMessage.notability += this.NOTABILITY_INVOLVING_ME;
+ }
+ if (fromMe.length) {
+ aGlodaMessage.fromMe = fromMe;
+ }
+
+ // Content
+ if (aRawReps.bodyLines) {
+ aGlodaMessage._content = aRawReps.content = new GlodaContent();
+ if (this.contentWhittle({}, aRawReps.bodyLines, aGlodaMessage._content)) {
+ // we were going to do something here?
+ }
+ } else {
+ aRawReps.content = null;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Duplicates the notability logic from optimize(). Arguably optimize should
+ * be factored to call us, grokNounItem should be factored to call us, or we
+ * should get sufficiently fancy that our code wildly diverges.
+ */
+ score(aMessage, aContext) {
+ let score = 0;
+
+ let authorIdentity = aMessage.from;
+ if (authorIdentity.id in Gloda.myIdentities) {
+ score += this.NOTABILITY_FROM_ME;
+ } else if (authorIdentity.inAddressBook) {
+ score += this.NOTABILITY_FROM_IN_ADDR_BOOK;
+ }
+ if (aMessage.toMe) {
+ score += this.NOTABILITY_INVOLVING_ME;
+ }
+
+ let involvedAddrBookCount = 0;
+ for (let identity of aMessage.to) {
+ if (identity.inAddressBook) {
+ involvedAddrBookCount++;
+ }
+ }
+ for (let identity of aMessage.cc) {
+ if (identity.inAddressBook) {
+ involvedAddrBookCount++;
+ }
+ }
+ if (involvedAddrBookCount) {
+ score +=
+ this.NOTABILITY_INVOLVING_ADDR_BOOK_FIRST +
+ (involvedAddrBookCount - 1) * this.NOTABILITY_INVOLVING_ADDR_BOOK_ADDL;
+ }
+ return score;
+ },
+
+ _countQuoteDepthAndNormalize(aLine) {
+ let count = 0;
+ let lastStartOffset = 0;
+
+ for (let i = 0; i < aLine.length; i++) {
+ let c = aLine[i];
+ if (c == ">") {
+ count++;
+ lastStartOffset = i + 1;
+ } else if (c != " ") {
+ return [
+ count,
+ lastStartOffset ? aLine.substring(lastStartOffset) : aLine,
+ ];
+ }
+ }
+
+ return [count, lastStartOffset ? aLine.substring(lastStartOffset) : aLine];
+ },
+
+ /**
+ * Attempt to understand simple quoting constructs that use ">" with
+ * obvious phrases to enter the quoting block. No support for other types
+ * of quoting at this time. Also no support for piercing the wrapper of
+ * forwarded messages to actually be the content of the forwarded message.
+ */
+ contentWhittle(aMeta, aBodyLines, aContent) {
+ if (!aContent.volunteerContent(aContent.kPriorityBase)) {
+ return false;
+ }
+
+ // duplicate the list; we mutate somewhat...
+ let bodyLines = aBodyLines.concat();
+
+ // lastNonBlankLine originally was just for detecting quoting idioms where
+ // the "wrote" line was separated from the quoted block by a blank line.
+ // Now we also use it for whitespace suppression at the boundaries of
+ // quoted and un-quoted text. (We keep blank lines within the same
+ // 'block' of quoted or non-quoted text.)
+ // Because we now have two goals for it, and we still want to suppress blank
+ // lines when there is a 'wrote' line involved, we introduce...
+ // prevLastNonBlankLine! This arguably suggests refactoring should be the
+ // next step, but things work for now.
+ let rangeStart = 0,
+ lastNonBlankLine = null,
+ prevLastNonBlankLine = null;
+ let inQuoteDepth = 0;
+ for (let [iLine, line] of bodyLines.entries()) {
+ if (!line || line == "\xa0") {
+ /* unicode non breaking space */
+ continue;
+ }
+
+ if (line.startsWith(">")) {
+ if (!inQuoteDepth) {
+ let rangeEnd = iLine - 1;
+ let quoteRangeStart = iLine;
+ // see if the last non-blank-line was a lead-in...
+ if (lastNonBlankLine != null) {
+ // TODO: localize quote range start detection
+ if (aBodyLines[lastNonBlankLine].includes("wrote")) {
+ quoteRangeStart = lastNonBlankLine;
+ rangeEnd = lastNonBlankLine - 1;
+ // we 'used up' lastNonBlankLine, let's promote the prev guy to
+ // be the new lastNonBlankLine for the next logic block
+ lastNonBlankLine = prevLastNonBlankLine;
+ }
+ // eat the trailing whitespace...
+ if (lastNonBlankLine != null) {
+ rangeEnd = Math.min(rangeEnd, lastNonBlankLine);
+ }
+ }
+ if (rangeEnd >= rangeStart) {
+ aContent.content(aBodyLines.slice(rangeStart, rangeEnd + 1));
+ }
+
+ [inQuoteDepth, line] = this._countQuoteDepthAndNormalize(line);
+ bodyLines[iLine] = line;
+ rangeStart = quoteRangeStart;
+ } else {
+ let curQuoteDepth;
+ [curQuoteDepth, line] = this._countQuoteDepthAndNormalize(line);
+ bodyLines[iLine] = line;
+
+ if (curQuoteDepth != inQuoteDepth) {
+ // we could do some "wrote" compensation here, but it's not really
+ // as important. let's wait for a more clever algorithm.
+ aContent.quoted(aBodyLines.slice(rangeStart, iLine), inQuoteDepth);
+ inQuoteDepth = curQuoteDepth;
+ rangeStart = iLine;
+ }
+ }
+ } else if (inQuoteDepth) {
+ aContent.quoted(aBodyLines.slice(rangeStart, iLine), inQuoteDepth);
+ inQuoteDepth = 0;
+ rangeStart = iLine;
+ }
+
+ prevLastNonBlankLine = lastNonBlankLine;
+ lastNonBlankLine = iLine;
+ }
+
+ if (inQuoteDepth) {
+ aContent.quoted(aBodyLines.slice(rangeStart), inQuoteDepth);
+ } else {
+ aContent.content(aBodyLines.slice(rangeStart, lastNonBlankLine + 1));
+ }
+
+ return true;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm b/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm
new file mode 100644
index 0000000000..05919e4d67
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaIndexer.jsm
@@ -0,0 +1,1491 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file currently contains a fairly general implementation of asynchronous
+ * indexing with a very explicit message indexing implementation. As gloda
+ * will eventually want to index more than just messages, the message-specific
+ * things should ideally lose their special hold on this file. This will
+ * benefit readability/size as well.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaIndexer", "IndexingJob"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "GlodaCollectionManager",
+ "resource:///modules/gloda/Collection.jsm"
+);
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "GlodaDatastore",
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+/**
+ * @class Capture the indexing batch concept explicitly.
+ *
+ * @param aJobType The type of thing we are indexing. Current choices are:
+ * "folder" and "message". Previous choices included "account". The indexer
+ * currently knows too much about these; they should be de-coupled.
+ * @param aID Specific to the job type, but for now only used to hold folder
+ * IDs.
+ *
+ * @ivar items The list of items to process during this job/batch. (For
+ * example, if this is a "messages" job, this would be the list of messages
+ * to process, although the specific representation is determined by the
+ * job.) The list will only be mutated through the addition of extra items.
+ * @ivar offset The current offset into the 'items' list (if used), updated as
+ * processing occurs. If 'items' is not used, the processing code can also
+ * update this in a similar fashion. This is used by the status
+ * notification code in conjunction with goal.
+ * @ivar goal The total number of items to index/actions to perform in this job.
+ * This number may increase during the life of the job, but should not
+ * decrease. This is used by the status notification code in conjunction
+ * with the goal.
+ */
+function IndexingJob(aJobType, aID, aItems) {
+ this.jobType = aJobType;
+ this.id = aID;
+ this.items = aItems != null ? aItems : [];
+ this.offset = 0;
+ this.goal = null;
+ this.callback = null;
+ this.callbackThis = null;
+}
+IndexingJob.prototype = {
+ /**
+ * Invoke the callback associated with this job, passing through all arguments
+ * received by this function to the callback function.
+ */
+ safelyInvokeCallback(...aArgs) {
+ if (!this.callback) {
+ return;
+ }
+ try {
+ this.callback.apply(this.callbackThis, aArgs);
+ } catch (ex) {
+ GlodaIndexer._log.warn("job callback invocation problem:", ex);
+ }
+ },
+ toString() {
+ return (
+ "[job:" +
+ this.jobType +
+ " id:" +
+ this.id +
+ " items:" +
+ (this.items ? this.items.length : "no") +
+ " offset:" +
+ this.offset +
+ " goal:" +
+ this.goal +
+ "]"
+ );
+ },
+};
+
+/**
+ * @namespace Core indexing logic, plus message-specific indexing logic.
+ *
+ * === Indexing Goals
+ * We have the following goals:
+ *
+ * Responsiveness
+ * - When the user wants to quit, we should be able to stop and quit in a timely
+ * fashion.
+ * - We should not interfere with the user's thunderbird usage.
+ *
+ * Correctness
+ * - Quitting should not result in any information loss; we should (eventually)
+ * end up at the same indexed state regardless of whether a user lets
+ * indexing run to completion or restarts thunderbird in the middle of the
+ * process. (It is okay to take slightly longer in the latter case.)
+ *
+ * Worst Case Scenario Avoidance
+ * - We should try to be O(1) memory-wise regardless of what notifications
+ * are thrown at us.
+ *
+ * === Indexing Throttling
+ *
+ * Adaptive Indexing
+ * - The indexer tries to stay out of the way of other running code in
+ * Thunderbird (autosync) and other code on the system. We try and target
+ * some number of milliseconds of activity between intentional inactive
+ * periods. The number of milliseconds of activity varies based on whether we
+ * believe the user to be actively using the computer or idle. We use our
+ * inactive periods as a way to measure system load; if we receive our
+ * notification promptly at the end of our inactive period, we believe the
+ * system is not heavily loaded. If we do not get notified promptly, we
+ * assume there is other stuff going on and back off.
+ *
+ */
+var GlodaIndexer = {
+ /**
+ * A partial attempt to generalize to support multiple databases. Each
+ * database would have its own datastore would have its own indexer. But
+ * we rather inter-mingle our use of this field with the singleton global
+ * GlodaDatastore.
+ */
+ _log: console.createInstance({
+ prefix: "gloda.indexer",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+ /**
+ * Our nsITimer that we use to schedule ourselves on the main thread
+ * intermittently. The timer always exists but may not always be active.
+ */
+ _timer: null,
+ /**
+ * Our nsITimer that we use to schedule events in the "far" future. For now,
+ * this means not compelling an initial indexing sweep until some number of
+ * seconds after startup.
+ */
+ _longTimer: null,
+
+ /**
+ * Periodic performance adjustment parameters: The overall goal is to adjust
+ * our rate of work so that we don't interfere with the user's activities
+ * when they are around (non-idle), and the system in general (when idle).
+ * Being nice when idle isn't quite as important, but is a good idea so that
+ * when the user un-idles we are able to back off nicely. Also, we give
+ * other processes on the system a chance to do something.
+ *
+ * We do this by organizing our work into discrete "tokens" of activity,
+ * then processing the number of tokens that we have determined will
+ * not impact the UI. Then we pause to give other activities a chance to get
+ * some work done, and we measure whether anything happened during our pause.
+ * If something else is going on in our application during that pause, we
+ * give it priority (up to a point) by delaying further indexing.
+ *
+ * Keep in mind that many of our operations are actually asynchronous, so we
+ * aren't entirely starving the event queue. However, a lot of the async
+ * stuff can end up not having any actual delay between events. For
+ * example, we only index offline message bodies, so there's no network
+ * latency involved, just disk IO; the only meaningful latency will be the
+ * initial disk seek (if there is one... pre-fetching may seriously be our
+ * friend).
+ *
+ * In order to maintain responsiveness, I assert that we want to minimize the
+ * length of the time we are dominating the event queue. This suggests
+ * that we want break up our blocks of work frequently. But not so
+ * frequently that there is a lot of waste. Accordingly our algorithm is
+ * basically:
+ *
+ * - Estimate the time that it takes to process a token, and schedule the
+ * number of tokens that should fit into that time.
+ * - Detect user activity, and back off immediately if found.
+ * - Try to delay commits and garbage collection until the user is inactive,
+ * as these tend to cause a brief pause in the UI.
+ */
+
+ /**
+ * The number of milliseconds before we declare the user idle and step up our
+ * indexing.
+ */
+ _INDEX_IDLE_ADJUSTMENT_TIME: 5000,
+
+ /**
+ * The time delay in milliseconds before we should schedule our initial sweep.
+ */
+ _INITIAL_SWEEP_DELAY: 10000,
+
+ /**
+ * How many milliseconds in the future should we schedule indexing to start
+ * when turning on indexing (and it was not previously active).
+ */
+ _INDEX_KICKOFF_DELAY: 200,
+
+ /**
+ * The time interval, in milliseconds, of pause between indexing batches. The
+ * maximum processor consumption is determined by this constant and the
+ * active |_cpuTargetIndexTime|.
+ *
+ * For current constants, that puts us at 50% while the user is active and 83%
+ * when idle.
+ */
+ _INDEX_INTERVAL: 32,
+
+ /**
+ * Number of indexing 'tokens' we are allowed to consume before yielding for
+ * each incremental pass. Consider a single token equal to indexing a single
+ * medium-sized message. This may be altered by user session (in)activity.
+ * Because we fetch message bodies, which is potentially asynchronous, this
+ * is not a precise knob to twiddle.
+ */
+ _indexTokens: 2,
+
+ /**
+ * Stopwatches used to measure performance during indexing, and during
+ * pauses between indexing. These help us adapt our indexing constants so
+ * as to not explode your computer. Kind of us, no?
+ */
+ _perfIndexStopwatch: null,
+ _perfPauseStopwatch: null,
+ /**
+ * Do we have an uncommitted indexer transaction that idle callback should commit?
+ */
+ _idleToCommit: false,
+ /**
+ * Target CPU time per batch of tokens, current value (milliseconds).
+ */
+ _cpuTargetIndexTime: 32,
+ /**
+ * Target CPU time per batch of tokens, during non-idle (milliseconds).
+ */
+ _CPU_TARGET_INDEX_TIME_ACTIVE: 32,
+ /**
+ * Target CPU time per batch of tokens, during idle (milliseconds).
+ */
+ _CPU_TARGET_INDEX_TIME_IDLE: 160,
+ /**
+ * Average CPU time per processed token (milliseconds).
+ */
+ _cpuAverageTimePerToken: 16,
+ /**
+ * Damping factor for _cpuAverageTimePerToken, as an approximate
+ * number of tokens to include in the average time.
+ */
+ _CPU_AVERAGE_TIME_DAMPING: 200,
+ /**
+ * Maximum tokens per batch. This is normally just a sanity check.
+ */
+ _CPU_MAX_TOKENS_PER_BATCH: 100,
+ /**
+ * CPU usage during a pause to declare that system was busy (milliseconds).
+ * This is typically set as 1.5 times the minimum resolution of the cpu
+ * usage clock, which is 16 milliseconds on Windows systems, and (I think)
+ * smaller on other systems, so we take the worst case.
+ */
+ _CPU_IS_BUSY_TIME: 24,
+ /**
+ * Time that return from pause may be late before the system is declared
+ * busy, in milliseconds. (Same issues as _CPU_IS_BUSY_TIME).
+ */
+ _PAUSE_LATE_IS_BUSY_TIME: 24,
+ /**
+ * Number of times that we will repeat a pause while waiting for a
+ * free CPU.
+ */
+ _PAUSE_REPEAT_LIMIT: 10,
+ /**
+ * Minimum time delay between commits, in milliseconds.
+ */
+ _MINIMUM_COMMIT_TIME: 5000,
+ /**
+ * Maximum time delay between commits, in milliseconds.
+ */
+ _MAXIMUM_COMMIT_TIME: 20000,
+
+ /**
+ * Unit testing hook to get us to emit additional logging that verges on
+ * inane for general usage but is helpful in unit test output to get a lay
+ * of the land and for paranoia reasons.
+ */
+ _unitTestSuperVerbose: false,
+ /**
+ * Unit test vector to get notified when a worker has a problem and it has
+ * a recover helper associated. This gets called with an argument
+ * indicating whether the recovery helper indicates recovery was possible.
+ */
+ _unitTestHookRecover: null,
+ /**
+ * Unit test vector to get notified when a worker runs into an exceptional
+ * situation (an exception propagates or gets explicitly killed) and needs
+ * to be cleaned up. This gets called with an argument indicating if there
+ * was a helper that was used or if we just did the default cleanup thing.
+ */
+ _unitTestHookCleanup: null,
+
+ /**
+ * Last commit time. Tracked to try and only commit at reasonable intervals.
+ */
+ _lastCommitTime: Date.now(),
+
+ _inited: false,
+ /**
+ * Initialize the indexer.
+ */
+ _init() {
+ if (this._inited) {
+ return;
+ }
+
+ this._inited = true;
+
+ this._callbackHandle.init();
+
+ if (Services.io.offline) {
+ this._suppressIndexing = true;
+ }
+
+ // create the timer that drives our intermittent indexing
+ this._timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ // create the timer for larger offsets independent of indexing
+ this._longTimer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+
+ this._idleService = Cc["@mozilla.org/widget/useridleservice;1"].getService(
+ Ci.nsIUserIdleService
+ );
+
+ // create our performance stopwatches
+ try {
+ this._perfIndexStopwatch = Cc["@mozilla.org/stopwatch;1"].createInstance(
+ Ci.nsIStopwatch
+ );
+ this._perfPauseStopwatch = Cc["@mozilla.org/stopwatch;1"].createInstance(
+ Ci.nsIStopwatch
+ );
+ } catch (ex) {
+ this._log.error("problem creating stopwatch!: " + ex);
+ }
+
+ // register for shutdown notifications
+ Services.obs.addObserver(this, "quit-application");
+
+ // figure out if event-driven indexing should be enabled...
+ let branch = Services.prefs.getBranch("mailnews.database.global.indexer.");
+ let eventDrivenEnabled = branch.getBoolPref("enabled", false);
+ let performInitialSweep = branch.getBoolPref("perform_initial_sweep", true);
+ // pretend we have already performed an initial sweep...
+ if (!performInitialSweep) {
+ this._initialSweepPerformed = true;
+ }
+
+ this.enabled = eventDrivenEnabled;
+ },
+
+ /**
+ * When shutdown, indexing immediately ceases and no further progress should
+ * be made. This flag goes true once, and never returns to false. Being
+ * in this state is a destructive thing from whence we cannot recover.
+ */
+ _indexerIsShutdown: false,
+
+ /**
+ * Shutdown the indexing process and datastore as quickly as possible in
+ * a synchronous fashion.
+ */
+ _shutdown() {
+ // no more timer events, please
+ try {
+ this._timer.cancel();
+ } catch (ex) {}
+ this._timer = null;
+ try {
+ this._longTimer.cancel();
+ } catch (ex) {}
+ this._longTimer = null;
+
+ this._perfIndexStopwatch = null;
+ this._perfPauseStopwatch = null;
+
+ // Remove listeners to avoid reference cycles on the off chance one of them
+ // holds a reference to the indexer object.
+ this._indexListeners = [];
+
+ this._indexerIsShutdown = true;
+
+ if (this.enabled) {
+ this._log.info("Shutting Down");
+ }
+
+ // don't let anything try and convince us to start indexing again
+ this.suppressIndexing = true;
+
+ // If there is an active job and it has a cleanup handler, run it.
+ if (this._curIndexingJob) {
+ let workerDef = this._curIndexingJob._workerDef;
+ try {
+ if (workerDef.cleanup) {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ }
+ } catch (ex) {
+ this._log.error("problem during worker cleanup during shutdown.");
+ }
+ }
+ // Definitely clean out the async call stack and any associated data
+ this._callbackHandle.cleanup();
+ this._workBatchData = undefined;
+
+ // disable ourselves and all of the specific indexers
+ this.enabled = false;
+
+ lazy.GlodaDatastore.shutdown();
+ },
+
+ /**
+ * The list of indexers registered with us. If you are a core gloda indexer
+ * (you ship with gloda), then you can import this file directly and should
+ * make sure your indexer is imported in 'Everybody.jsm' in the right order.
+ * If you are not core gloda, then you should import 'GlodaPublic.jsm' and only
+ * then should you import 'GlodaIndexer.jsm' to get at GlodaIndexer.
+ */
+ _indexers: [],
+ /**
+ * Register an indexer with the Gloda indexing mechanism.
+ *
+ * @param aIndexer.name The name of your indexer.
+ * @param aIndexer.enable Your enable function. This will be called during
+ * the call to registerIndexer if Gloda indexing is already enabled. If
+ * indexing is not yet enabled, you will be called
+ * @param aIndexer.disable Your disable function. This will be called when
+ * indexing is disabled or we are shutting down. This will only be called
+ * if enable has already been called.
+ * @param aIndexer.workers A list of tuples of the form [worker type code,
+ * worker generator function, optional scheduling trigger function]. The
+ * type code is the string used to uniquely identify the job type. If you
+ * are not core gloda, your job type must start with your extension's name
+ * and a colon; you can collow that with anything you want. The worker
+ * generator is not easily explained in here. The trigger function is
+ * invoked immediately prior to calling the generator to create it. The
+ * trigger function takes the job as an argument and should perform any
+ * finalization required on the job. Most workers should not need to use
+ * the trigger function.
+ * @param aIndexer.initialSweep We call this to tell each indexer when it is
+ * its turn to run its indexing sweep. The idea of the indexing sweep is
+ * that this is when you traverse things eligible for indexing to make
+ * sure they are indexed. Right now we just call everyone at the same
+ * time and hope that their jobs don't fight too much.
+ */
+ registerIndexer(aIndexer) {
+ this._log.info("Registering indexer: " + aIndexer.name);
+ this._indexers.push(aIndexer);
+
+ try {
+ for (let workerInfo of aIndexer.workers) {
+ let workerCode = workerInfo[0];
+ let workerDef = workerInfo[1];
+ workerDef.name = workerCode;
+ workerDef.indexer = aIndexer;
+ this._indexerWorkerDefs[workerCode] = workerDef;
+ if (!("recover" in workerDef)) {
+ workerDef.recover = null;
+ }
+ if (!("cleanup" in workerDef)) {
+ workerDef.cleanup = null;
+ }
+ if (!("onSchedule" in workerDef)) {
+ workerDef.onSchedule = null;
+ }
+ if (!("jobCanceled" in workerDef)) {
+ workerDef.jobCanceled = null;
+ }
+ }
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on worker enum.");
+ }
+
+ if (this._enabled) {
+ try {
+ aIndexer.enable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on enable: " + ex);
+ }
+ }
+ },
+
+ /**
+ * Are we enabled, read: are we processing change events?
+ */
+ _enabled: false,
+ get enabled() {
+ return this._enabled;
+ },
+ set enabled(aEnable) {
+ if (!this._enabled && aEnable) {
+ // register for offline notifications
+ Services.obs.addObserver(this, "network:offline-status-changed");
+
+ // register for idle notification
+ this._idleService.addIdleObserver(this, this._indexIdleThresholdSecs);
+
+ this._enabled = true;
+
+ for (let indexer of this._indexers) {
+ try {
+ indexer.enable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on enable: " + ex);
+ }
+ }
+
+ // if we have an accumulated desire to index things, kick it off again.
+ if (this._indexingDesired) {
+ this._indexingDesired = false; // it's edge-triggered for now
+ this.indexing = true;
+ }
+
+ // if we have not done an initial sweep, schedule scheduling one.
+ if (!this._initialSweepPerformed) {
+ this._longTimer.initWithCallback(
+ this._scheduleInitialSweep,
+ this._INITIAL_SWEEP_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ } else if (this._enabled && !aEnable) {
+ for (let indexer of this._indexers) {
+ try {
+ indexer.disable();
+ } catch (ex) {
+ this._log.warn("Helper indexer threw exception on disable: " + ex);
+ }
+ }
+
+ // remove offline observer
+ Services.obs.removeObserver(this, "network:offline-status-changed");
+
+ // remove idle
+ this._idleService.removeIdleObserver(this, this._indexIdleThresholdSecs);
+
+ this._enabled = false;
+ }
+ },
+
+ /** Track whether indexing is desired (we have jobs to prosecute). */
+ _indexingDesired: false,
+ /**
+ * Track whether we have an actively pending callback or timer event. We do
+ * this so we don't experience a transient suppression and accidentally
+ * get multiple event-chains driving indexing at the same time (which the
+ * code will not handle correctly).
+ */
+ _indexingActive: false,
+ /**
+ * Indicates whether indexing is currently ongoing. This may return false
+ * while indexing activities are still active, but they will quiesce shortly.
+ */
+ get indexing() {
+ return this._indexingDesired && !this._suppressIndexing;
+ },
+ /** Indicates whether indexing is desired. */
+ get indexingDesired() {
+ return this._indexingDesired;
+ },
+ /**
+ * Set this to true to indicate there is indexing work to perform. This does
+ * not mean indexing will begin immediately (if it wasn't active), however.
+ * If suppressIndexing has been set, we won't do anything until indexing is
+ * no longer suppressed.
+ */
+ set indexing(aShouldIndex) {
+ if (!this._indexingDesired && aShouldIndex) {
+ this._indexingDesired = true;
+ if (this.enabled && !this._indexingActive && !this._suppressIndexing) {
+ this._log.info("+++ Indexing Queue Processing Commencing");
+ this._indexingActive = true;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_KICKOFF_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ }
+ },
+
+ _suppressIndexing: false,
+ /**
+ * Set whether or not indexing should be suppressed. This is to allow us to
+ * avoid running down a laptop's battery when it is not on AC. Only code
+ * in charge of regulating that tracking should be setting this variable; if
+ * other factors want to contribute to such a decision, this logic needs to
+ * be changed to track that, since last-write currently wins.
+ */
+ set suppressIndexing(aShouldSuppress) {
+ this._suppressIndexing = aShouldSuppress;
+
+ // re-start processing if we are no longer suppressing, there is work yet
+ // to do, and the indexing process had actually stopped.
+ if (
+ !this._suppressIndexing &&
+ this._indexingDesired &&
+ !this._indexingActive
+ ) {
+ this._log.info("+++ Indexing Queue Processing Resuming");
+ this._indexingActive = true;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_KICKOFF_DELAY,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ }
+ },
+
+ /**
+ * Track whether an initial sweep has been performed. This mainly exists so
+ * that unit testing can stop us from performing an initial sweep.
+ */
+ _initialSweepPerformed: false,
+ /**
+ * Our timer-driven callback to schedule our first initial indexing sweep.
+ * Because it is invoked by an nsITimer it operates without the benefit of
+ * a 'this' context and must use GlodaIndexer instead of this.
+ * Since an initial sweep could have been performed before we get invoked,
+ * we need to check whether an initial sweep is still desired before trying
+ * to schedule one. We don't need to worry about whether one is active
+ * because the indexingSweepNeeded takes care of that.
+ */
+ _scheduleInitialSweep() {
+ if (GlodaIndexer._initialSweepPerformed) {
+ return;
+ }
+ GlodaIndexer._initialSweepPerformed = true;
+ for (let indexer of GlodaIndexer._indexers) {
+ indexer.initialSweep();
+ }
+ },
+
+ /**
+ * Our current job number. Meaningless value that increments with every job
+ * we process that resets to 0 when we run out of jobs. Currently used by
+ * the activity manager's gloda listener to tell when we have changed jobs.
+ * We really need a better listener mechanism.
+ */
+ _indexingJobCount: 0,
+
+ /**
+ * A list of IndexingJob instances to process.
+ */
+ _indexQueue: [],
+
+ /**
+ * The current indexing job.
+ */
+ _curIndexingJob: null,
+
+ /**
+ * The number of seconds before we declare the user idle and commit if
+ * needed.
+ */
+ _indexIdleThresholdSecs: 3,
+
+ _indexListeners: [],
+ /**
+ * Add an indexing progress listener. The listener will be notified of at
+ * least all major status changes (idle -> indexing, indexing -> idle), plus
+ * arbitrary progress updates during the indexing process.
+ * If indexing is not active when the listener is added, a synthetic idle
+ * notification will be generated.
+ *
+ * @param aListener A listener function, taking arguments: status (Gloda.
+ * kIndexer*), the folder name if a folder is involved (string or null),
+ * current zero-based job number (int),
+ * current item number being indexed in this job (int), total number
+ * of items in this job to be indexed (int).
+ *
+ * @TODO should probably allow for a 'this' value to be provided
+ * @TODO generalize to not be folder/message specific. use nouns!
+ */
+ addListener(aListener) {
+ // should we weakify?
+ if (!this._indexListeners.includes(aListener)) {
+ this._indexListeners.push(aListener);
+ }
+ // if we aren't indexing, give them an idle indicator, otherwise they can
+ // just be happy when we hit the next actual status point.
+ if (!this.indexing) {
+ aListener(GlodaConstants.kIndexerIdle, null, 0, 0, 1);
+ }
+ return aListener;
+ },
+ /**
+ * Remove the given listener so that it no longer receives indexing progress
+ * updates.
+ */
+ removeListener(aListener) {
+ let index = this._indexListeners.indexOf(aListener);
+ if (index != -1) {
+ this._indexListeners.splice(index, 1);
+ }
+ },
+ /**
+ * Helper method to tell listeners what we're up to. For code simplicity,
+ * the caller is just deciding when to send this update (preferably at
+ * reasonable intervals), and doesn't need to provide any indication of
+ * state... we figure that out ourselves.
+ *
+ * This was not pretty but got ugly once we moved the message indexing out
+ * to its own indexer. Some generalization is required but will likely
+ * require string hooks.
+ */
+ _notifyListeners() {
+ let status, prettyName, jobIndex, jobItemIndex, jobItemGoal, jobType;
+
+ if (this.indexing && this._curIndexingJob) {
+ let job = this._curIndexingJob;
+ status = GlodaConstants.kIndexerIndexing;
+
+ let indexer = this._indexerWorkerDefs[job.jobType].indexer;
+ if ("_indexingFolder" in indexer) {
+ prettyName =
+ indexer._indexingFolder != null
+ ? indexer._indexingFolder.prettyName
+ : null;
+ } else {
+ prettyName = null;
+ }
+
+ jobIndex = this._indexingJobCount - 1;
+ jobItemIndex = job.offset;
+ jobItemGoal = job.goal;
+ jobType = job.jobType;
+ } else {
+ status = GlodaConstants.kIndexerIdle;
+ prettyName = null;
+ jobIndex = 0;
+ jobItemIndex = 0;
+ jobItemGoal = 1;
+ jobType = null;
+ }
+
+ // Some people ascribe to the belief that the most you can give is 100%.
+ // We know better, but let's humor them.
+ if (jobItemIndex > jobItemGoal) {
+ jobItemGoal = jobItemIndex;
+ }
+
+ for (
+ let iListener = this._indexListeners.length - 1;
+ iListener >= 0;
+ iListener--
+ ) {
+ let listener = this._indexListeners[iListener];
+ try {
+ listener(
+ status,
+ prettyName,
+ jobIndex,
+ jobItemIndex,
+ jobItemGoal,
+ jobType
+ );
+ } catch (ex) {
+ this._log.error(ex);
+ }
+ }
+ },
+
+ /**
+ * A wrapped callback driver intended to be used by timers that provide
+ * arguments we really do not care about.
+ */
+ _timerCallbackDriver() {
+ GlodaIndexer.callbackDriver();
+ },
+
+ /**
+ * A simple callback driver wrapper to provide 'this'.
+ */
+ _wrapCallbackDriver(...aArgs) {
+ GlodaIndexer.callbackDriver(...aArgs);
+ },
+
+ /**
+ * The current processing 'batch' generator, produced by a call to workBatch()
+ * and used by callbackDriver to drive execution.
+ */
+ _batch: null,
+ _inCallback: false,
+ _savedCallbackArgs: null,
+ /**
+ * The root work-driver. callbackDriver creates workBatch generator instances
+ * (stored in _batch) which run until they are done (kWorkDone) or they
+ * (really the embedded activeIterator) encounter something asynchronous.
+ * The convention is that all the callback handlers end up calling us,
+ * ensuring that control-flow properly resumes. If the batch completes,
+ * we re-schedule ourselves after a time delay (controlled by _INDEX_INTERVAL)
+ * and return. (We use one-shot timers because repeating-slack does not
+ * know enough to deal with our (current) asynchronous nature.)
+ */
+ callbackDriver(...aArgs) {
+ // just bail if we are shutdown
+ if (this._indexerIsShutdown) {
+ return;
+ }
+
+ // it is conceivable that someone we call will call something that in some
+ // cases might be asynchronous, and in other cases immediately generate
+ // events without returning. In the interest of (stack-depth) sanity,
+ // let's handle this by performing a minimal time-delay callback.
+ // this is also now a good thing sequencing-wise. if we get our callback
+ // with data before the underlying function has yielded, we obviously can't
+ // cram the data in yet. Our options in this case are to either mark the
+ // fact that the callback has already happened and immediately return to
+ // the iterator when it does bubble up the kWorkAsync, or we can do as we
+ // have been doing, but save the
+ if (this._inCallback) {
+ this._savedCallbackArgs = aArgs;
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ 0,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ return;
+ }
+ this._inCallback = true;
+
+ try {
+ if (this._batch === null) {
+ this._batch = this.workBatch();
+ }
+
+ // kWorkAsync, kWorkDone, kWorkPause are allowed out; kWorkSync is not
+ // On kWorkDone, we want to schedule another timer to fire on us if we are
+ // not done indexing. (On kWorkAsync, we don't care what happens, because
+ // someone else will be receiving the callback, and they will call us when
+ // they are done doing their thing.
+ let args;
+ if (this._savedCallbackArgs != null) {
+ args = this._savedCallbackArgs;
+ this._savedCallbackArgs = null;
+ } else {
+ args = aArgs;
+ }
+
+ let result;
+ if (args.length == 0) {
+ result = this._batch.next().value;
+ } else if (args.length == 1) {
+ result = this._batch.next(args[0]).value;
+ } else {
+ // Arguments works with destructuring assignment.
+ result = this._batch.next(args).value;
+ }
+ switch (result) {
+ // job's done, close the batch and re-schedule ourselves if there's more
+ // to do.
+ case GlodaConstants.kWorkDone:
+ this._batch.return();
+ this._batch = null;
+ // the batch wants to get re-scheduled, do so.
+ // (intentional fall-through to re-scheduling logic)
+ case GlodaConstants.kWorkPause:
+ if (this.indexing) {
+ this._timer.initWithCallback(
+ this._timerCallbackDriver,
+ this._INDEX_INTERVAL,
+ Ci.nsITimer.TYPE_ONE_SHOT
+ );
+ } else {
+ // it's important to indicate no more callbacks are in flight
+ this._indexingActive = false;
+ }
+ break;
+ case GlodaConstants.kWorkAsync:
+ // there is nothing to do. some other code is now responsible for
+ // calling us.
+ break;
+ }
+ } finally {
+ this._inCallback = false;
+ }
+ },
+
+ _callbackHandle: {
+ init() {
+ this.wrappedCallback = GlodaIndexer._wrapCallbackDriver;
+ this.callbackThis = GlodaIndexer;
+ this.callback = GlodaIndexer.callbackDriver;
+ },
+ /**
+ * The stack of generators we are processing. The (numerically) last one is
+ * also the |activeIterator|.
+ */
+ activeStack: [],
+ /**
+ * The generator at the top of the |activeStack| and that we will call next
+ * or send on next if nothing changes.
+ */
+ activeIterator: null,
+ /**
+ * Meta-information about the generators at each level of the stack.
+ */
+ contextStack: [],
+ /**
+ * Push a new generator onto the stack. It becomes the active generator.
+ */
+ push(aIterator, aContext) {
+ this.activeStack.push(aIterator);
+ this.contextStack.push(aContext);
+ this.activeIterator = aIterator;
+ },
+ /**
+ * For use by generators that want to call another asynchronous process
+ * implemented as a generator. They should do
+ * "yield aCallbackHandle.pushAndGo(someGenerator(arg1, arg2));".
+ *
+ * @public
+ */
+ pushAndGo(aIterator, aContext) {
+ this.push(aIterator, aContext);
+ return GlodaConstants.kWorkSync;
+ },
+ /**
+ * Pop the active generator off the stack.
+ */
+ pop() {
+ this.activeIterator.return();
+ this.activeStack.pop();
+ this.contextStack.pop();
+ if (this.activeStack.length) {
+ this.activeIterator = this.activeStack[this.activeStack.length - 1];
+ } else {
+ this.activeIterator = null;
+ }
+ },
+ /**
+ * Someone propagated an exception and we need to clean-up all the active
+ * logic as best we can. Which is not really all that well.
+ *
+ * @param [aOptionalStopAtDepth=0] The length the stack should be when this
+ * method completes. Pass 0 or omit for us to clear everything out.
+ * Pass 1 to leave just the top-level generator intact.
+ */
+ cleanup(aOptionalStopAtDepth) {
+ if (aOptionalStopAtDepth === undefined) {
+ aOptionalStopAtDepth = 0;
+ }
+ while (this.activeStack.length > aOptionalStopAtDepth) {
+ this.pop();
+ }
+ },
+ /**
+ * For use when a generator finishes up by calling |doneWithResult| on us;
+ * the async driver calls this to pop that generator off the stack
+ * and get the result it passed in to its call to |doneWithResult|.
+ *
+ * @protected
+ */
+ popWithResult() {
+ this.pop();
+ let result = this._result;
+ this._result = null;
+ return result;
+ },
+ _result: null,
+ /**
+ * For use by generators that want to return a result to the calling
+ * asynchronous generator. Specifically, they should do
+ * "yield aCallbackHandle.doneWithResult(RESULT);".
+ *
+ * @public
+ */
+ doneWithResult(aResult) {
+ this._result = aResult;
+ return GlodaConstants.kWorkDoneWithResult;
+ },
+
+ /* be able to serve as a collection listener, resuming the active iterator's
+ last yield kWorkAsync */
+ onItemsAdded() {},
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ GlodaIndexer.callbackDriver();
+ },
+ },
+ _workBatchData: undefined,
+ /* eslint-disable complexity */
+ /**
+ * The workBatch generator handles a single 'batch' of processing, managing
+ * the database transaction and keeping track of "tokens". It drives the
+ * activeIterator generator which is doing the work.
+ * workBatch will only produce kWorkAsync, kWorkPause, and kWorkDone
+ * notifications. If activeIterator returns kWorkSync and there are still
+ * tokens available, workBatch will keep driving the activeIterator until it
+ * encounters a kWorkAsync (which workBatch will yield to callbackDriver), or
+ * it runs out of tokens and yields a kWorkPause or kWorkDone.
+ */
+ *workBatch() {
+ // Do we still have an open transaction? If not, start a new one.
+ if (!this._idleToCommit) {
+ lazy.GlodaDatastore._beginTransaction();
+ } else {
+ // We'll manage commit ourself while this routine is active.
+ this._idleToCommit = false;
+ }
+
+ this._perfIndexStopwatch.start();
+ let batchCount;
+ let haveMoreWork = true;
+ let transactionToCommit = true;
+ let inIdle;
+
+ let notifyDecimator = 0;
+
+ while (haveMoreWork) {
+ // Both explicit work activity points (sync + async) and transfer of
+ // control return (via kWorkDone*) results in a token being eaten. The
+ // idea now is to make tokens less precious so that the adaptive logic
+ // can adjust them with less impact. (Before this change, doing 1
+ // token's work per cycle ended up being an entire non-idle time-slice's
+ // work.)
+ // During this loop we track the clock real-time used even though we
+ // frequently yield to asynchronous operations. These asynchronous
+ // operations are either database queries or message streaming requests.
+ // Both may involve disk I/O but no network I/O (since we only stream
+ // messages that are already available offline), but in an ideal
+ // situation will come from cache and so the work this function kicks off
+ // will dominate.
+ // We do not use the CPU time to this end because...
+ // 1) Our timer granularity on linux is worse for CPU than for wall time.
+ // 2) That can fail to account for our I/O cost.
+ // 3) If something with a high priority / low latency need (like playing
+ // a video) is fighting us, although using CPU time will accurately
+ // express how much time we are actually spending to index, our goal
+ // is to control the duration of our time slices, not be "right" about
+ // the actual CPU cost. In that case, if we attempted to take on more
+ // work, we would likely interfere with the higher priority process or
+ // make ourselves less responsive by drawing out the period of time we
+ // are dominating the main thread.
+ this._perfIndexStopwatch.start();
+ // For telemetry purposes, we want to know how many messages we've been
+ // processing during that batch, and how long it took, pauses included.
+ let t0 = Date.now();
+ this._indexedMessageCount = 0;
+ batchCount = 0;
+ while (batchCount < this._indexTokens) {
+ if (
+ this._callbackHandle.activeIterator === null &&
+ !this._hireJobWorker()
+ ) {
+ haveMoreWork = false;
+ break;
+ }
+ batchCount++;
+
+ // XXX for performance, we may want to move the try outside the for loop
+ // with a quasi-redundant outer loop that shunts control back inside
+ // if we left the loop due to an exception (without consuming all the
+ // tokens.)
+ try {
+ switch (
+ this._callbackHandle.activeIterator.next(this._workBatchData).value
+ ) {
+ case GlodaConstants.kWorkSync:
+ this._workBatchData = undefined;
+ break;
+ case GlodaConstants.kWorkAsync:
+ this._workBatchData = yield GlodaConstants.kWorkAsync;
+ break;
+ case GlodaConstants.kWorkDone:
+ this._callbackHandle.pop();
+ this._workBatchData = undefined;
+ break;
+ case GlodaConstants.kWorkDoneWithResult:
+ this._workBatchData = this._callbackHandle.popWithResult();
+ break;
+ default:
+ break;
+ }
+ } catch (ex) {
+ this._log.debug("Exception in batch processing:", ex);
+ let workerDef = this._curIndexingJob._workerDef;
+ if (workerDef.recover) {
+ let recoverToDepth;
+ try {
+ recoverToDepth = workerDef.recover.call(
+ workerDef.indexer,
+ this._curIndexingJob,
+ this._callbackHandle.contextStack,
+ ex
+ );
+ } catch (ex2) {
+ this._log.error(
+ "Worker '" +
+ workerDef.name +
+ "' recovery function itself failed:",
+ ex2
+ );
+ }
+ if (this._unitTestHookRecover) {
+ this._unitTestHookRecover(
+ recoverToDepth,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+
+ if (recoverToDepth) {
+ this._callbackHandle.cleanup(recoverToDepth);
+ continue;
+ }
+ }
+ // (we either did not have a recover handler or it couldn't recover)
+ // call the cleanup helper if there is one
+ if (workerDef.cleanup) {
+ try {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ } catch (ex2) {
+ this._log.error(
+ "Worker '" +
+ workerDef.name +
+ "' cleanup function itself failed:",
+ ex2
+ );
+ }
+ if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ true,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+ } else if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ false,
+ ex,
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+
+ // Clean out everything on the async stack, warn about the job, kill.
+ // We do not log this warning lightly; it will break unit tests and
+ // be visible to users. Anything expected should likely have a
+ // recovery function or the cleanup logic should be extended to
+ // indicate that the failure is acceptable.
+ this._callbackHandle.cleanup();
+ this._log.warn(
+ "Problem during " + this._curIndexingJob + ", bailing:",
+ ex
+ );
+ this._curIndexingJob = null;
+ // the data must now be invalid
+ this._workBatchData = undefined;
+ }
+ }
+ this._perfIndexStopwatch.stop();
+
+ // idleTime can throw if there is no idle-provider available, such as an
+ // X session without the relevant extensions available. In this case
+ // we assume that the user is never idle.
+ try {
+ // We want to stop ASAP when leaving idle, so we can't rely on the
+ // standard polled callback. We do the polling ourselves.
+ if (this._idleService.idleTime < this._INDEX_IDLE_ADJUSTMENT_TIME) {
+ inIdle = false;
+ this._cpuTargetIndexTime = this._CPU_TARGET_INDEX_TIME_ACTIVE;
+ } else {
+ inIdle = true;
+ this._cpuTargetIndexTime = this._CPU_TARGET_INDEX_TIME_IDLE;
+ }
+ } catch (ex) {
+ inIdle = false;
+ }
+
+ // take a breather by having the caller re-schedule us sometime in the
+ // future, but only if we're going to perform another loop iteration.
+ if (haveMoreWork) {
+ notifyDecimator = (notifyDecimator + 1) % 32;
+ if (!notifyDecimator) {
+ this._notifyListeners();
+ }
+
+ for (
+ let pauseCount = 0;
+ pauseCount < this._PAUSE_REPEAT_LIMIT;
+ pauseCount++
+ ) {
+ this._perfPauseStopwatch.start();
+
+ yield GlodaConstants.kWorkPause;
+
+ this._perfPauseStopwatch.stop();
+ // We repeat the pause if the pause was longer than
+ // we expected, or if it used a significant amount
+ // of cpu, either of which indicate significant other
+ // activity.
+ if (
+ this._perfPauseStopwatch.cpuTimeSeconds * 1000 <
+ this._CPU_IS_BUSY_TIME &&
+ this._perfPauseStopwatch.realTimeSeconds * 1000 -
+ this._INDEX_INTERVAL <
+ this._PAUSE_LATE_IS_BUSY_TIME
+ ) {
+ break;
+ }
+ }
+ }
+
+ // All pauses have been taken, how effective were we? Report!
+ // XXX: there's possibly a lot of fluctuation since we go through here
+ // every 5 messages or even less
+ if (this._indexedMessageCount > 0) {
+ let delta = (Date.now() - t0) / 1000; // in seconds
+ let v = Math.round(this._indexedMessageCount / delta);
+ try {
+ let h = Services.telemetry.getHistogramById(
+ "THUNDERBIRD_INDEXING_RATE_MSG_PER_S"
+ );
+ h.add(v);
+ } catch (e) {
+ this._log.warn("Couldn't report telemetry", e, v);
+ }
+ }
+
+ if (batchCount > 0) {
+ let totalTime = this._perfIndexStopwatch.realTimeSeconds * 1000;
+ let timePerToken = totalTime / batchCount;
+ // Damp the average time since it is a rough estimate only.
+ this._cpuAverageTimePerToken =
+ (totalTime +
+ this._CPU_AVERAGE_TIME_DAMPING * this._cpuAverageTimePerToken) /
+ (batchCount + this._CPU_AVERAGE_TIME_DAMPING);
+ // We use the larger of the recent or the average time per token, so
+ // that we can respond quickly to slow down indexing if there
+ // is a sudden increase in time per token.
+ let bestTimePerToken = Math.max(
+ timePerToken,
+ this._cpuAverageTimePerToken
+ );
+ // Always index at least one token!
+ this._indexTokens = Math.max(
+ 1,
+ this._cpuTargetIndexTime / bestTimePerToken
+ );
+ // But no more than the a maximum limit, just for sanity's sake.
+ this._indexTokens = Math.min(
+ this._CPU_MAX_TOKENS_PER_BATCH,
+ this._indexTokens
+ );
+ this._indexTokens = Math.ceil(this._indexTokens);
+ }
+
+ // Should we try to commit now?
+ let elapsed = Date.now() - this._lastCommitTime;
+ // Commit tends to cause a brief UI pause, so we try to delay it (but not
+ // forever) if the user is active. If we're done and idling, we'll also
+ // commit, otherwise we'll let the idle callback do it.
+ let doCommit =
+ transactionToCommit &&
+ (elapsed > this._MAXIMUM_COMMIT_TIME ||
+ (inIdle && (elapsed > this._MINIMUM_COMMIT_TIME || !haveMoreWork)));
+ if (doCommit) {
+ lazy.GlodaCollectionManager.cacheCommitDirty();
+ // Set up an async notification to happen after the commit completes so
+ // that we can avoid the indexer doing something with the database that
+ // causes the main thread to block against the completion of the commit
+ // (which can be a while) on 1.9.1.
+ lazy.GlodaDatastore.runPostCommit(this._callbackHandle.wrappedCallback);
+ // kick off the commit
+ lazy.GlodaDatastore._commitTransaction();
+ yield GlodaConstants.kWorkAsync;
+ this._lastCommitTime = Date.now();
+ // Restart the transaction if we still have work.
+ if (haveMoreWork) {
+ lazy.GlodaDatastore._beginTransaction();
+ } else {
+ transactionToCommit = false;
+ }
+ }
+ }
+
+ this._notifyListeners();
+
+ // If we still have a transaction to commit, tell idle to do the commit
+ // when it gets around to it.
+ if (transactionToCommit) {
+ this._idleToCommit = true;
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Maps indexing job type names to a worker definition.
+ * The worker definition is an object with the following attributes where
+ * only worker is required:
+ * - worker:
+ * - onSchedule: A function to be invoked when the worker is scheduled. The
+ * job is passed as an argument.
+ * - recover:
+ * - cleanup:
+ */
+ _indexerWorkerDefs: {},
+ /**
+ * Perform the initialization step and return a generator if there is any
+ * steady-state processing to be had.
+ */
+ _hireJobWorker() {
+ // In no circumstances should there be data bouncing around from previous
+ // calls if we are here. |killActiveJob| depends on this.
+ this._workBatchData = undefined;
+
+ if (this._indexQueue.length == 0) {
+ this._log.info("--- Done indexing, disabling timer renewal.");
+
+ this._curIndexingJob = null;
+ this._indexingDesired = false;
+ this._indexingJobCount = 0;
+ return false;
+ }
+
+ let job = (this._curIndexingJob = this._indexQueue.shift());
+ this._indexingJobCount++;
+
+ let generator = null;
+
+ if (job.jobType in this._indexerWorkerDefs) {
+ let workerDef = this._indexerWorkerDefs[job.jobType];
+ job._workerDef = workerDef;
+
+ // Prior to creating the worker, call the scheduling trigger function
+ // if there is one. This is so that jobs can be finalized. The
+ // initial use case is event-driven message indexing that accumulates
+ // a list of messages to index but wants it locked down once we start
+ // processing the list.
+ if (workerDef.onSchedule) {
+ workerDef.onSchedule.call(workerDef.indexer, job);
+ }
+
+ generator = workerDef.worker.call(
+ workerDef.indexer,
+ job,
+ this._callbackHandle
+ );
+ } else {
+ // Nothing we can do about this. Be loud about it and try to schedule
+ // something else.
+ this._log.error("Unknown job type: " + job.jobType);
+ return this._hireJobWorker();
+ }
+
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Hired job of type: " + job.jobType);
+ }
+
+ this._notifyListeners();
+
+ if (generator) {
+ this._callbackHandle.push(generator);
+ return true;
+ }
+ return false;
+ },
+
+ /**
+ * Schedule a job for indexing.
+ */
+ indexJob(aJob) {
+ this._log.info("Queue-ing job for indexing: " + aJob.jobType);
+
+ this._indexQueue.push(aJob);
+ this.indexing = true;
+ },
+
+ /**
+ * Kill the active job. This means a few things:
+ * - Kill all the generators in the callbackHandle stack.
+ * - If we are currently waiting on an async return, we need to make sure it
+ * does not screw us up.
+ * - Make sure the job's cleanup function gets called if appropriate.
+ *
+ * The async return case is actually not too troublesome. Since there is an
+ * active indexing job and we are not (by fiat) in that call stack, we know
+ * that the callback driver is guaranteed to get triggered again somehow.
+ * The only issue is to make sure that _workBatchData does not end up with
+ * the data. We compel |_hireJobWorker| to erase it to this end.
+ *
+ * @note You MUST NOT call this function from inside a job or an async function
+ * on the callbackHandle's stack of generators. If you are in that
+ * situation, you should just throw an exception. At the very least,
+ * use a timeout to trigger us.
+ */
+ killActiveJob() {
+ // There is nothing to do if we have no job
+ if (!this._curIndexingJob) {
+ return;
+ }
+
+ // -- Blow away the stack with cleanup.
+ let workerDef = this._curIndexingJob._workerDef;
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Killing job of type: " + this._curIndexingJob.jobType);
+ }
+ if (this._unitTestHookCleanup) {
+ this._unitTestHookCleanup(
+ !!workerDef.cleanup,
+ "no exception, this was killActiveJob",
+ this._curIndexingJob,
+ this._callbackHandle
+ );
+ }
+ this._callbackHandle.cleanup();
+ if (workerDef.cleanup) {
+ workerDef.cleanup.call(workerDef.indexer, this._curIndexingJob);
+ }
+
+ // Eliminate the job.
+ this._curIndexingJob = null;
+ },
+
+ /**
+ * Purge all jobs that the filter function returns true for. This does not
+ * kill the active job, use |killActiveJob| to do that.
+ *
+ * Make sure to call this function before killActiveJob
+ *
+ * @param aFilterElimFunc A filter function that takes an |IndexingJob| and
+ * returns true if the job should be purged, false if it should not be.
+ * The filter sees the jobs in the order they are scheduled.
+ */
+ purgeJobsUsingFilter(aFilterElimFunc) {
+ for (let iJob = 0; iJob < this._indexQueue.length; iJob++) {
+ let job = this._indexQueue[iJob];
+
+ // If the filter says to, splice the job out of existence (and make sure
+ // to fixup iJob to compensate.)
+ if (aFilterElimFunc(job)) {
+ if (this._unitTestSuperVerbose) {
+ this._log.debug("Purging job of type: " + job.jobType);
+ }
+ this._indexQueue.splice(iJob--, 1);
+ let workerDef = this._indexerWorkerDefs[job.jobType];
+ if (workerDef.jobCanceled) {
+ workerDef.jobCanceled.call(workerDef.indexer, job);
+ }
+ }
+ }
+ },
+
+ /* *********** Event Processing *********** */
+ observe(aSubject, aTopic, aData) {
+ // idle
+ if (aTopic == "idle") {
+ // Do we need to commit an indexer transaction?
+ if (this._idleToCommit) {
+ this._idleToCommit = false;
+ lazy.GlodaCollectionManager.cacheCommitDirty();
+ lazy.GlodaDatastore._commitTransaction();
+ this._lastCommitTime = Date.now();
+ this._notifyListeners();
+ }
+ } else if (aTopic == "network:offline-status-changed") {
+ // offline status
+ if (aData == "offline") {
+ this.suppressIndexing = true;
+ } else {
+ // online
+ this.suppressIndexing = false;
+ }
+ } else if (aTopic == "quit-application") {
+ // shutdown fallback
+ this._shutdown();
+ }
+ },
+};
+// we used to initialize here; now we have GlodaPublic.jsm do it for us after the
+// indexers register themselves so we know about all our built-in indexers
+// at init-time.
diff --git a/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm b/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm
new file mode 100644
index 0000000000..54ceacb59a
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaMsgIndexer.jsm
@@ -0,0 +1,310 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaABIndexer", "GlodaABAttrs"];
+
+const { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+const { FreeTagNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounFreetag.jsm"
+);
+
+var GlodaABIndexer = {
+ _log: null,
+ _notifications: [
+ "addrbook-contact-created",
+ "addrbook-contact-updated",
+ "addrbook-contact-deleted",
+ ],
+
+ name: "index_ab",
+ enable() {
+ if (this._log == null) {
+ this._log = console.createInstance({
+ prefix: "gloda.index_ab",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+ }
+
+ for (let topic of this._notifications) {
+ Services.obs.addObserver(this, topic);
+ }
+ },
+
+ disable() {
+ for (let topic of this._notifications) {
+ Services.obs.removeObserver(this, topic);
+ }
+ },
+
+ // it's a getter so we can reference 'this'
+ get workers() {
+ return [
+ [
+ "ab-card",
+ {
+ worker: this._worker_index_card,
+ },
+ ],
+ ];
+ },
+
+ *_worker_index_card(aJob, aCallbackHandle) {
+ let card = aJob.id;
+
+ if (card.primaryEmail) {
+ // load the identity
+ let query = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ query.kind("email");
+ // we currently normalize all e-mail addresses to be lowercase
+ query.value(card.primaryEmail.toLowerCase());
+ let identityCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ if (identityCollection.items.length) {
+ let identity = identityCollection.items[0];
+ // force the identity to know it has an associated ab card.
+ identity._hasAddressBookCard = true;
+
+ this._log.debug("Found identity, processing card.");
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ identity.contact,
+ { card },
+ false,
+ false,
+ aCallbackHandle
+ )
+ );
+ this._log.debug("Done processing card.");
+ }
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ initialSweep() {},
+
+ observe(subject, topic, data) {
+ subject.QueryInterface(Ci.nsIAbCard);
+
+ switch (topic) {
+ case "addrbook-contact-created": {
+ // When an address book card is added, update the cached GlodaIdentity
+ // object's cached idea of whether the identity has an ab card.
+ this._log.debug("Received Card Add Notification");
+
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + subject.primaryEmail.toLowerCase()
+ );
+ if (identity) {
+ identity._hasAddressBookCard = true;
+ }
+ break;
+ }
+ case "addrbook-contact-updated": {
+ this._log.debug("Received Card Change Notification");
+
+ let job = new IndexingJob("ab-card", subject);
+ GlodaIndexer.indexJob(job);
+ break;
+ }
+ case "addrbook-contact-deleted": {
+ // When an address book card is added, update the cached GlodaIdentity
+ // object's cached idea of whether the identity has an ab card.
+ this._log.debug("Received Card Removal Notification");
+
+ let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + subject.primaryEmail.toLowerCase()
+ );
+ if (identity) {
+ identity._hasAddressBookCard = false;
+ }
+ break;
+ }
+ }
+ },
+};
+GlodaIndexer.registerIndexer(GlodaABIndexer);
+
+var GlodaABAttrs = {
+ providerName: "gloda.ab_attr",
+ _log: null,
+
+ init() {
+ this._log = console.createInstance({
+ prefix: "gloda.abattrs",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ });
+
+ try {
+ this.defineAttributes();
+ } catch (ex) {
+ this._log.error("Error in init: " + ex);
+ throw ex;
+ }
+ },
+
+ defineAttributes() {
+ /* ***** Contacts ***** */
+ this._attrIdentityContact = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "identities",
+ singular: false,
+ special: GlodaConstants.kSpecialColumnChildren,
+ // specialColumnName: "contactID",
+ storageAttributeName: "_identities",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_IDENTITY,
+ }); // tested-by: test_attributes_fundamental
+ this._attrContactName = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "name",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "name",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrContactPopularity = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "popularity",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "popularity",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // not-tested
+ this._attrContactFrecency = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "frecency",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "frecency",
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ }); // not-tested
+
+ /* ***** Identities ***** */
+ this._attrIdentityContact = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrDerived,
+ attributeName: "contact",
+ singular: true,
+ special: GlodaConstants.kSpecialColumnParent,
+ specialColumnName: "contactID", // the column in the db
+ idStorageAttributeName: "_contactID",
+ valueStorageAttributeName: "_contact",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_CONTACT,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrIdentityKind = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "kind",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "kind",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+ this._attrIdentityValue = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "value",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "value",
+ subjectNouns: [GlodaConstants.NOUN_IDENTITY],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ }); // tested-by: test_attributes_fundamental
+
+ /* ***** Contact Meta ***** */
+ // Freeform tags; not explicit like thunderbird's fundamental tags.
+ // we differentiate for now because of fundamental implementation
+ // differences.
+ this._attrFreeTag = Gloda.defineAttribute({
+ provider: this,
+ extensionName: GlodaConstants.BUILT_IN,
+ attributeType: GlodaConstants.kAttrExplicit,
+ attributeName: "freetag",
+ bind: true,
+ bindName: "freeTags",
+ singular: false,
+ subjectNouns: [GlodaConstants.NOUN_CONTACT],
+ objectNoun: Gloda.lookupNoun("freetag"),
+ parameterNoun: null,
+ canQuery: true,
+ }); // not-tested
+ // we need to find any existing bound freetag attributes, and use them to
+ // populate to FreeTagNoun's understanding
+ if ("parameterBindings" in this._attrFreeTag) {
+ for (let freeTagName in this._attrFreeTag.parameterBindings) {
+ this._log.debug("Telling FreeTagNoun about: " + freeTagName);
+ FreeTagNoun.getFreeTag(freeTagName);
+ }
+ }
+ },
+
+ *process(aContact, aRawReps, aIsNew, aCallbackHandle) {
+ let card = aRawReps.card;
+ if (aContact.NOUN_ID != GlodaConstants.NOUN_CONTACT) {
+ this._log.warn("Somehow got a non-contact: " + aContact);
+ return; // this will produce an exception; we like.
+ }
+
+ // update the name
+ if (card.displayName && card.displayName != aContact.name) {
+ aContact.name = card.displayName;
+ }
+
+ aContact.freeTags = [];
+
+ let tags = null;
+ try {
+ tags = card.getProperty("Categories", null);
+ } catch (ex) {
+ this._log.error("Problem accessing property: " + ex);
+ }
+ if (tags) {
+ for (let tagName of tags.split(",")) {
+ tagName = tagName.trim();
+ if (tagName) {
+ aContact.freeTags.push(FreeTagNoun.getFreeTag(tagName));
+ }
+ }
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm b/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm
new file mode 100644
index 0000000000..f81def2560
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaMsgSearcher.jsm
@@ -0,0 +1,361 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaMsgSearcher"];
+
+const { Gloda } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * How much time boost should a 'score point' amount to? The authoritative,
+ * incontrivertible answer, across all time and space, is a week.
+ * Note that gloda stores timestamps as PRTimes for no exceedingly good
+ * reason.
+ */
+var FUZZSCORE_TIMESTAMP_FACTOR = 1000 * 1000 * 60 * 60 * 24 * 7;
+
+var RANK_USAGE = "glodaRank(matchinfo(messagesText), 1.0, 2.0, 2.0, 1.5, 1.5)";
+
+var DASCORE =
+ "(((" +
+ RANK_USAGE +
+ " + messages.notability) * " +
+ FUZZSCORE_TIMESTAMP_FACTOR +
+ ") + messages.date)";
+
+/**
+ * A new optimization decision we are making is that we do not want to carry
+ * around any data in our ephemeral tables that is not used for whittling the
+ * result set. The idea is that the btree page cache or OS cache is going to
+ * save us from the disk seeks and carrying around the extra data is just going
+ * to be CPU/memory churn that slows us down.
+ *
+ * Additionally, we try and avoid row lookups that would have their results
+ * discarded by the LIMIT. Because of limitations in FTS3 (which might
+ * be addressed in FTS4 by a feature request), we can't avoid the 'messages'
+ * lookup since that has the message's date and static notability but we can
+ * defer the 'messagesText' lookup.
+ *
+ * This is the access pattern we are after here:
+ * 1) Order the matches with minimized lookup and result storage costs.
+ * - The innermost MATCH does the doclist magic and provides us with
+ * matchinfo() support which does not require content row retrieval
+ * from messagesText. Unfortunately, this is not enough to whittle anything
+ * because we still need static interestingness, so...
+ * - Based on the match we retrieve the date and notability for that row from
+ * 'messages' using this in conjunction with matchinfo() to provide a score
+ * that we can then use to LIMIT our results.
+ * 2) We reissue the MATCH query so that we will be able to use offsets(), but
+ * we intersect the results of this MATCH against our LIMITed results from
+ * step 1.
+ * - We use 'docid IN (phase 1 query)' to accomplish this because it results in
+ * efficient lookup. If we just use a join, we get O(mn) performance because
+ * a cartesian join ends up being performed where either we end up performing
+ * the fulltext query M times and table scan intersect with the results from
+ * phase 1 or we do the fulltext once but traverse the entire result set from
+ * phase 1 N times.
+ * - We believe that the re-execution of the MATCH query should have no disk
+ * costs because it should still be cached by SQLite or the OS. In the case
+ * where memory is so constrained this is not true our behavior is still
+ * probably preferable than the old way because that would have caused lots
+ * of swapping.
+ * - This part of the query otherwise resembles the basic gloda query but with
+ * the inclusion of the offsets() invocation. The messages table lookup
+ * should not involve any disk traffic because the pages should still be
+ * cached (SQLite or OS) from phase 1. The messagesText lookup is new, and
+ * this is the major disk-seek reduction optimization we are making. (Since
+ * we avoid this lookup for all of the documents that were excluded by the
+ * LIMIT.) Since offsets() also needs to retrieve the row from messagesText
+ * there is a nice synergy there.
+ */
+var NUEVO_FULLTEXT_SQL =
+ "SELECT messages.*, messagesText.*, offsets(messagesText) AS osets " +
+ "FROM messagesText, messages " +
+ "WHERE" +
+ " messagesText MATCH ?1 " +
+ " AND messagesText.docid IN (" +
+ "SELECT docid " +
+ "FROM messagesText JOIN messages ON messagesText.docid = messages.id " +
+ "WHERE messagesText MATCH ?1 " +
+ "ORDER BY " +
+ DASCORE +
+ " DESC " +
+ "LIMIT ?2" +
+ " )" +
+ " AND messages.id = messagesText.docid " +
+ " AND +messages.deleted = 0" +
+ " AND +messages.folderID IS NOT NULL" +
+ " AND +messages.messageKey IS NOT NULL";
+
+function identityFunc(x) {
+ return x;
+}
+
+function oneLessMaxZero(x) {
+ if (x <= 1) {
+ return 0;
+ }
+ return x - 1;
+}
+
+function reduceSum(accum, curValue) {
+ return accum + curValue;
+}
+
+/*
+ * Columns are: body, subject, attachment names, author, recipients
+ */
+
+/**
+ * Scores if all search terms match in a column. We bias against author
+ * slightly and recipient a bit more in this case because a search that
+ * entirely matches just on a person should give a mention of that person
+ * in the subject or attachment a fighting chance.
+ * Keep in mind that because of our indexing in the face of address book
+ * contacts (namely, we index the name used in the e-mail as well as the
+ * display name on the address book card associated with the e-mail address)
+ * a contact is going to bias towards matching multiple times.
+ */
+var COLUMN_ALL_MATCH_SCORES = [4, 20, 20, 16, 12];
+/**
+ * Score for each distinct term that matches in the column. This is capped
+ * by COLUMN_ALL_SCORES.
+ */
+var COLUMN_PARTIAL_PER_MATCH_SCORES = [1, 4, 4, 4, 3];
+/**
+ * If a term matches multiple times, what is the marginal score for each
+ * additional match. We count the total number of matches beyond the
+ * first match for each term. In other words, if we have 3 terms which
+ * matched 5, 3, and 0 times, then the total from our perspective is
+ * (5 - 1) + (3 - 1) + 0 = 4 + 2 + 0 = 6. We take the minimum of that value
+ * and the value in COLUMN_MULTIPLE_MATCH_LIMIT and multiply by the value in
+ * COLUMN_MULTIPLE_MATCH_SCORES.
+ */
+var COLUMN_MULTIPLE_MATCH_SCORES = [1, 0, 0, 0, 0];
+var COLUMN_MULTIPLE_MATCH_LIMIT = [10, 0, 0, 0, 0];
+
+/**
+ * Score the message on its offsets (from stashedColumns).
+ */
+function scoreOffsets(aMessage, aContext) {
+ let score = 0;
+
+ let termTemplate = aContext.terms.map(_ => 0);
+ // for each column, a list of the incidence of each term
+ let columnTermIncidence = [
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ termTemplate.concat(),
+ ];
+
+ // we need a friendlyParseInt because otherwise the radix stuff happens
+ // because of the extra arguments map parses. curse you, map!
+ let offsetNums = aContext.stashedColumns[aMessage.id][0]
+ .split(" ")
+ .map(x => parseInt(x));
+ for (let i = 0; i < offsetNums.length; i += 4) {
+ let columnIndex = offsetNums[i];
+ let termIndex = offsetNums[i + 1];
+ columnTermIncidence[columnIndex][termIndex]++;
+ }
+
+ for (let iColumn = 0; iColumn < COLUMN_ALL_MATCH_SCORES.length; iColumn++) {
+ let termIncidence = columnTermIncidence[iColumn];
+ if (termIncidence.every(identityFunc)) {
+ // Bestow all match credit.
+ score += COLUMN_ALL_MATCH_SCORES[iColumn];
+ } else if (termIncidence.some(identityFunc)) {
+ // Bestow partial match credit.
+ score += Math.min(
+ COLUMN_ALL_MATCH_SCORES[iColumn],
+ COLUMN_PARTIAL_PER_MATCH_SCORES[iColumn] *
+ termIncidence.filter(identityFunc).length
+ );
+ }
+ // Bestow multiple match credit.
+ score +=
+ Math.min(
+ termIncidence.map(oneLessMaxZero).reduce(reduceSum, 0),
+ COLUMN_MULTIPLE_MATCH_LIMIT[iColumn]
+ ) * COLUMN_MULTIPLE_MATCH_SCORES[iColumn];
+ }
+
+ return score;
+}
+
+/**
+ * The searcher basically looks like a query, but is specialized for fulltext
+ * search against messages. Most of the explicit specialization involves
+ * crafting a SQL query that attempts to order the matches by likelihood that
+ * the user was looking for it. This is based on full-text matches combined
+ * with an explicit (generic) interest score value placed on the message at
+ * indexing time. This is followed by using the more generic gloda scoring
+ * mechanism to explicitly score the messages given the search context in
+ * addition to the more generic score adjusting rules.
+ */
+function GlodaMsgSearcher(aListener, aSearchString, aAndTerms) {
+ this.listener = aListener;
+
+ this.searchString = aSearchString;
+ this.fulltextTerms = this.parseSearchString(aSearchString);
+ this.andTerms = aAndTerms != null ? aAndTerms : true;
+
+ this.query = null;
+ this.collection = null;
+
+ this.scores = null;
+}
+GlodaMsgSearcher.prototype = {
+ /**
+ * Number of messages to retrieve initially.
+ */
+ get retrievalLimit() {
+ return Services.prefs.getIntPref(
+ "mailnews.database.global.search.msg.limit"
+ );
+ },
+
+ /**
+ * Parse the string into terms/phrases by finding matching double-quotes.
+ */
+ parseSearchString(aSearchString) {
+ aSearchString = aSearchString.trim();
+ let terms = [];
+
+ /*
+ * Add the term as long as the trim on the way in didn't obliterate it.
+ *
+ * In the future this might have other helper logic; it did once before.
+ */
+ function addTerm(aTerm) {
+ if (aTerm) {
+ terms.push(aTerm);
+ }
+ }
+
+ while (aSearchString) {
+ if (aSearchString.startsWith('"')) {
+ let endIndex = aSearchString.indexOf(aSearchString[0], 1);
+ // eat the quote if it has no friend
+ if (endIndex == -1) {
+ aSearchString = aSearchString.substring(1);
+ continue;
+ }
+
+ addTerm(aSearchString.substring(1, endIndex).trim());
+ aSearchString = aSearchString.substring(endIndex + 1);
+ continue;
+ }
+
+ let spaceIndex = aSearchString.indexOf(" ");
+ if (spaceIndex == -1) {
+ addTerm(aSearchString);
+ break;
+ }
+
+ addTerm(aSearchString.substring(0, spaceIndex));
+ aSearchString = aSearchString.substring(spaceIndex + 1);
+ }
+
+ return terms;
+ },
+
+ buildFulltextQuery() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noMagic: true,
+ explicitSQL: NUEVO_FULLTEXT_SQL,
+ limitClauseAlreadyIncluded: true,
+ // osets is 0-based column number 14 (volatile to column changes)
+ // save the offset column for extra analysis
+ stashColumns: [14],
+ });
+
+ let fulltextQueryString = "";
+
+ for (let [iTerm, term] of this.fulltextTerms.entries()) {
+ if (iTerm) {
+ fulltextQueryString += this.andTerms ? " " : " OR ";
+ }
+
+ // Put our term in quotes. This is needed for the tokenizer to be able
+ // to do useful things. The exception is people clever enough to use
+ // NEAR.
+ if (/^NEAR(\/\d+)?$/.test(term)) {
+ fulltextQueryString += term;
+ } else if (term.length == 1 && term.charCodeAt(0) >= 0x2000) {
+ // This is a single-character CJK search query, so add a wildcard.
+ // Our tokenizer treats anything at/above 0x2000 as CJK for now.
+ fulltextQueryString += term + "*";
+ } else if (
+ (term.length == 2 &&
+ term.charCodeAt(0) >= 0x2000 &&
+ term.charCodeAt(1) >= 0x2000) ||
+ term.length >= 3
+ ) {
+ fulltextQueryString += '"' + term + '"';
+ }
+ }
+
+ query.fulltextMatches(fulltextQueryString);
+ query.limit(this.retrievalLimit);
+
+ return query;
+ },
+
+ getCollection(aListenerOverride, aData) {
+ if (aListenerOverride) {
+ this.listener = aListenerOverride;
+ }
+
+ this.query = this.buildFulltextQuery();
+ this.collection = this.query.getCollection(this, aData);
+ this.completed = false;
+
+ return this.collection;
+ },
+
+ sortBy: "-dascore",
+
+ onItemsAdded(aItems, aCollection) {
+ let newScores = Gloda.scoreNounItems(
+ aItems,
+ {
+ terms: this.fulltextTerms,
+ stashedColumns: aCollection.stashedColumns,
+ },
+ [scoreOffsets]
+ );
+ if (this.scores) {
+ this.scores = this.scores.concat(newScores);
+ } else {
+ this.scores = newScores;
+ }
+
+ if (this.listener) {
+ this.listener.onItemsAdded(aItems, aCollection);
+ }
+ },
+ onItemsModified(aItems, aCollection) {
+ if (this.listener) {
+ this.listener.onItemsModified(aItems, aCollection);
+ }
+ },
+ onItemsRemoved(aItems, aCollection) {
+ if (this.listener) {
+ this.listener.onItemsRemoved(aItems, aCollection);
+ }
+ },
+ onQueryCompleted(aCollection) {
+ this.completed = true;
+ if (this.listener) {
+ this.listener.onQueryCompleted(aCollection);
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaPublic.jsm b/comm/mailnews/db/gloda/modules/GlodaPublic.jsm
new file mode 100644
index 0000000000..555a6d8921
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaPublic.jsm
@@ -0,0 +1,45 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["Gloda"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+/* nothing to import, just run some code */ ChromeUtils.import(
+ "resource:///modules/gloda/Everybody.jsm"
+);
+const { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+// initialize the indexer! (who was actually imported as a nested dep by the
+// things Everybody.jsm imported.) We waited until now so it could know about
+// its indexers.
+GlodaIndexer._init();
+const { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+/**
+ * Expose some junk
+ */
+function proxy(aSourceObj, aSourceAttr, aDestObj, aDestAttr) {
+ aDestObj[aDestAttr] = function (...aArgs) {
+ return aSourceObj[aSourceAttr](...aArgs);
+ };
+}
+
+proxy(GlodaIndexer, "addListener", Gloda, "addIndexerListener");
+proxy(GlodaIndexer, "removeListener", Gloda, "removeIndexerListener");
+proxy(GlodaMsgIndexer, "isMessageIndexed", Gloda, "isMessageIndexed");
+proxy(
+ GlodaMsgIndexer,
+ "setFolderIndexingPriority",
+ Gloda,
+ "setFolderIndexingPriority"
+);
+proxy(
+ GlodaMsgIndexer,
+ "resetFolderIndexingPriority",
+ Gloda,
+ "resetFolderIndexingPriority"
+);
diff --git a/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm b/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm
new file mode 100644
index 0000000000..2e53cf5925
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaQueryClassFactory.jsm
@@ -0,0 +1,642 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaQueryClassFactory"];
+
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * @class Query class core; each noun gets its own sub-class where attributes
+ * have helper methods bound.
+ *
+ * @param aOptions A dictionary of options. Current legal options are:
+ * - noMagic: Indicates that the noun's dbQueryJoinMagic should be ignored.
+ * Currently, this means that messages will not have their
+ * full-text indexed values re-attached. This is planned to be
+ * offset by having queries/cache lookups that do not request
+ * noMagic to ensure that their data does get loaded.
+ * - explicitSQL: A hand-rolled alternate representation for the core
+ * SELECT portion of the SQL query. The queryFromQuery logic still
+ * generates its normal query, we just ignore its result in favor of
+ * your provided value. This means that the positional parameter
+ * list is still built and you should/must rely on those bound
+ * parameters (using '?'). The replacement occurs prior to the
+ * outerWrapColumns, ORDER BY, and LIMIT contributions to the query.
+ * - outerWrapColumns: If provided, wraps the query in a "SELECT *,blah
+ * FROM (actual query)" where blah is your list of outerWrapColumns
+ * made comma-delimited. The idea is that this allows you to
+ * reference the result of expressions inside the query using their
+ * names rather than having to duplicate the logic. In practice,
+ * this makes things more readable but is unlikely to improve
+ * performance. (Namely, my use of 'offsets' for full-text stuff
+ * ends up in the EXPLAIN plan twice despite this.)
+ * - noDbQueryValidityConstraints: Indicates that any validity constraints
+ * should be ignored. This should be used when you need to get every
+ * match regardless of whether it's valid.
+ *
+ * @property _owner The query instance that holds the list of unions...
+ * @property _constraints A list of (lists of OR constraints) that are ANDed
+ * together. For example [[FROM bob, FROM jim], [DATE last week]] would
+ * be requesting us to find all the messages from either bob or jim, and
+ * sent in the last week.
+ * @property _unions A list of other queries whose results are unioned with our
+ * own. There is no concept of nesting or sub-queries apart from this
+ * mechanism.
+ */
+function GlodaQueryClass(aOptions) {
+ this.options = aOptions != null ? aOptions : {};
+
+ // if we are an 'or' clause, who is our parent whom other 'or' clauses should
+ // spawn from...
+ this._owner = null;
+ // our personal chain of and-ing.
+ this._constraints = [];
+ // the other instances we union with
+ this._unions = [];
+
+ this._order = [];
+ this._limit = 0;
+}
+
+GlodaQueryClass.prototype = {
+ WILDCARD: {},
+
+ get constraintCount() {
+ return this._constraints.length;
+ },
+
+ or() {
+ let owner = this._owner || this;
+ let orQuery = new this._queryClass();
+ orQuery._owner = owner;
+ owner._unions.push(orQuery);
+ return orQuery;
+ },
+
+ orderBy(...aArgs) {
+ this._order.push(...aArgs);
+ return this;
+ },
+
+ limit(aLimit) {
+ this._limit = aLimit;
+ return this;
+ },
+
+ /**
+ * Return a collection asynchronously populated by this collection. You must
+ * provide a listener to receive notifications from the collection as it
+ * receives updates. The listener object should implement onItemsAdded,
+ * onItemsModified, and onItemsRemoved methods, all of which take a single
+ * argument which is the list of items which have been added, modified, or
+ * removed respectively.
+ *
+ * @param aListener The collection listener.
+ * @param [aData] The data attribute to set on the collection.
+ * @param [aArgs.becomeExplicit] Make the collection explicit so that the
+ * collection will only ever contain results found from the database
+ * query and the query will not be updated as new items are indexed that
+ * also match the query.
+ * @param [aArgs.becomeNull] Change the collection's query to a null query so
+ * that it will never receive any additional added/modified/removed events
+ * apart from the underlying database query. This is really only intended
+ * for gloda internal use but may be acceptable for non-gloda use. Please
+ * ask on mozilla.dev.apps.thunderbird first to make sure there isn't a
+ * better solution for your use-case. (Note: removals will still happen
+ * when things get fully deleted.)
+ */
+ getCollection(aListener, aData, aArgs) {
+ this.completed = false;
+ return this._nounDef.datastore.queryFromQuery(
+ this,
+ aListener,
+ aData,
+ /* aExistingCollection */ null,
+ /* aMasterCollection */ null,
+ aArgs
+ );
+ },
+
+ /* eslint-disable complexity */
+ /**
+ * Test whether the given first-class noun instance satisfies this query.
+ *
+ * @testpoint gloda.query.test
+ */
+ test(aObj) {
+ // when changing this method, be sure that GlodaDatastore's queryFromQuery
+ // method likewise has any required changes made.
+ let unionQueries = [this].concat(this._unions);
+
+ for (let iUnion = 0; iUnion < unionQueries.length; iUnion++) {
+ let curQuery = unionQueries[iUnion];
+
+ // assume success until a specific (or) constraint proves us wrong
+ let querySatisfied = true;
+ for (
+ let iConstraint = 0;
+ iConstraint < curQuery._constraints.length;
+ iConstraint++
+ ) {
+ let constraint = curQuery._constraints[iConstraint];
+ let [constraintType, attrDef] = constraint;
+ let boundName = attrDef ? attrDef.boundName : "id";
+ if (
+ boundName in aObj &&
+ aObj[boundName] === GlodaConstants.IGNORE_FACET
+ ) {
+ querySatisfied = false;
+ break;
+ }
+
+ let constraintValues = constraint.slice(2);
+
+ if (constraintType === GlodaConstants.kConstraintIdIn) {
+ if (!constraintValues.includes(aObj.id)) {
+ querySatisfied = false;
+ break;
+ }
+ } else if (
+ constraintType === GlodaConstants.kConstraintIn ||
+ constraintType === GlodaConstants.kConstraintEquals
+ ) {
+ // @testpoint gloda.query.test.kConstraintIn
+ let objectNounDef = attrDef.objectNounDef;
+
+ // if they provide an equals comparator, use that.
+ // (note: the next case has better optimization possibilities than
+ // this mechanism, but of course has higher initialization costs or
+ // code complexity costs...)
+ if (objectNounDef.equals) {
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ // If there are no constraints, then we are just testing for there
+ // being a value. Succeed (continue) in that case.
+ if (
+ constraintValues.length == 0 &&
+ testValues.length &&
+ testValues[0] != null
+ ) {
+ continue;
+ }
+
+ // If there are no test values and the empty set is significant,
+ // then check if any of the constraint values are null (our
+ // empty indicator.)
+ if (testValues.length == 0 && attrDef.emptySetIsSignificant) {
+ let foundEmptySetSignifier = false;
+ for (let constraintValue of constraintValues) {
+ if (constraintValue == null) {
+ foundEmptySetSignifier = true;
+ break;
+ }
+ }
+ if (foundEmptySetSignifier) {
+ continue;
+ }
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ for (let value of constraintValues) {
+ if (objectNounDef.equals(testValue, value)) {
+ foundMatch = true;
+ break;
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ } else {
+ // otherwise, we need to convert everyone to their param/value form
+ // in order to test for equality
+ // let's just do the simple, obvious thing for now. which is
+ // what we did in the prior case but exploding values using
+ // toParamAndValue, and then comparing.
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ // If there are no constraints, then we are just testing for there
+ // being a value. Succeed (continue) in that case.
+ if (
+ constraintValues.length == 0 &&
+ testValues.length &&
+ testValues[0] != null
+ ) {
+ continue;
+ }
+ // If there are no test values and the empty set is significant,
+ // then check if any of the constraint values are null (our
+ // empty indicator.)
+ if (testValues.length == 0 && attrDef.emptySetIsSignificant) {
+ let foundEmptySetSignifier = false;
+ for (let constraintValue of constraintValues) {
+ if (constraintValue == null) {
+ foundEmptySetSignifier = true;
+ break;
+ }
+ }
+ if (foundEmptySetSignifier) {
+ continue;
+ }
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ let [aParam, aValue] = objectNounDef.toParamAndValue(testValue);
+ for (let value of constraintValues) {
+ // skip empty set check sentinel values
+ if (value == null && attrDef.emptySetIsSignificant) {
+ continue;
+ }
+ let [bParam, bValue] = objectNounDef.toParamAndValue(value);
+ if (aParam == bParam && aValue == bValue) {
+ foundMatch = true;
+ break;
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ }
+ } else if (constraintType === GlodaConstants.kConstraintRanges) {
+ // @testpoint gloda.query.test.kConstraintRanges
+ let objectNounDef = attrDef.objectNounDef;
+
+ let testValues;
+ if (!(boundName in aObj)) {
+ testValues = [];
+ } else if (attrDef.singular) {
+ testValues = [aObj[boundName]];
+ } else {
+ testValues = aObj[boundName];
+ }
+
+ let foundMatch = false;
+ for (let testValue of testValues) {
+ let [tParam, tValue] = objectNounDef.toParamAndValue(testValue);
+ for (let rangeTuple of constraintValues) {
+ let [lowerRValue, upperRValue] = rangeTuple;
+ if (lowerRValue == null) {
+ let [upperParam, upperValue] =
+ objectNounDef.toParamAndValue(upperRValue);
+ if (tParam == upperParam && tValue <= upperValue) {
+ foundMatch = true;
+ break;
+ }
+ } else if (upperRValue == null) {
+ let [lowerParam, lowerValue] =
+ objectNounDef.toParamAndValue(lowerRValue);
+ if (tParam == lowerParam && tValue >= lowerValue) {
+ foundMatch = true;
+ break;
+ }
+ } else {
+ // no one is null
+ let [upperParam, upperValue] =
+ objectNounDef.toParamAndValue(upperRValue);
+ let [lowerParam, lowerValue] =
+ objectNounDef.toParamAndValue(lowerRValue);
+ if (
+ tParam == lowerParam &&
+ tValue >= lowerValue &&
+ tParam == upperParam &&
+ tValue <= upperValue
+ ) {
+ foundMatch = true;
+ break;
+ }
+ }
+ }
+ if (foundMatch) {
+ break;
+ }
+ }
+ if (!foundMatch) {
+ querySatisfied = false;
+ break;
+ }
+ } else if (constraintType === GlodaConstants.kConstraintStringLike) {
+ // @testpoint gloda.query.test.kConstraintStringLike
+ let curIndex = 0;
+ let value = boundName in aObj ? aObj[boundName] : "";
+ // the attribute must be singular, we don't support arrays of strings.
+ for (let valuePart of constraintValues) {
+ if (typeof valuePart == "string") {
+ let index = value.indexOf(valuePart);
+ // if curIndex is null, we just need any match
+ // if it's not null, it must match the offset of our found match
+ if (curIndex === null) {
+ if (index == -1) {
+ querySatisfied = false;
+ } else {
+ curIndex = index + valuePart.length;
+ }
+ } else if (index != curIndex) {
+ querySatisfied = false;
+ } else {
+ curIndex = index + valuePart.length;
+ }
+ if (!querySatisfied) {
+ break;
+ }
+ } else {
+ // wild!
+ curIndex = null;
+ }
+ }
+ // curIndex must be null or equal to the length of the string
+ if (querySatisfied && curIndex !== null && curIndex != value.length) {
+ querySatisfied = false;
+ }
+ } else if (constraintType === GlodaConstants.kConstraintFulltext) {
+ // @testpoint gloda.query.test.kConstraintFulltext
+ // this is beyond our powers. Even if we have the fulltext content in
+ // memory, which we may not, the tokenization and such to perform
+ // the testing gets very complicated in the face of i18n, etc.
+ // so, let's fail if the item is not already in the collection, and
+ // let the testing continue if it is. (some other constraint may no
+ // longer apply...)
+ if (!(aObj.id in this.collection._idMap)) {
+ querySatisfied = false;
+ }
+ }
+
+ if (!querySatisfied) {
+ break;
+ }
+ }
+
+ if (querySatisfied) {
+ return true;
+ }
+ }
+ return false;
+ },
+ /* eslint-enable complexity */
+
+ /**
+ * Helper code for noun definitions of queryHelpers that want to build a
+ * traditional in/equals constraint. The goal is to let them build a range
+ * without having to know how we structure |_constraints|.
+ *
+ * @protected
+ */
+ _inConstraintHelper(aAttrDef, aValues) {
+ let constraint = [GlodaConstants.kConstraintIn, aAttrDef].concat(aValues);
+ this._constraints.push(constraint);
+ return this;
+ },
+
+ /**
+ * Helper code for noun definitions of queryHelpers that want to build a
+ * range. The goal is to let them build a range without having to know how
+ * we structure |_constraints| or requiring them to mark themselves as
+ * continuous to get a "Range".
+ *
+ * @protected
+ */
+ _rangedConstraintHelper(aAttrDef, aRanges) {
+ let constraint = [GlodaConstants.kConstraintRanges, aAttrDef].concat(
+ aRanges
+ );
+ this._constraints.push(constraint);
+ return this;
+ },
+};
+
+/**
+ * @class A query that never matches anything.
+ *
+ * Collections corresponding to this query are intentionally frozen in time and
+ * do not want to be notified of any updates. We need the collection to be
+ * registered with the collection manager so that the noun instances in the
+ * collection are always 'reachable' via the collection for as long as we might
+ * be handing out references to the instances. (The other way to avoid updates
+ * would be to not register the collection, but then items might not be
+ * reachable.)
+ * This is intended to be used in implementation details behind the gloda
+ * abstraction barrier. For example, the message indexer likes to be able
+ * to represent 'ghost' and deleted messages, but these should never be exposed
+ * to the user. For code simplicity, it wants to be able to use the query
+ * mechanism. But it doesn't want updates that are effectively
+ * nonsensical. For example, a ghost message that is reused by message
+ * indexing may already be present in a collection; when the collection manager
+ * receives an itemsAdded event, a GlodaExplicitQueryClass would result in
+ * an item added notification in that case, which would wildly not be desired.
+ */
+function GlodaNullQueryClass() {}
+
+GlodaNullQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation, which
+ * does not happen for null queries.
+ */
+ options: {},
+
+ /**
+ * Provide a duck-typing way of indicating to GlodaCollectionManager that our
+ * associated collection just doesn't want anything to change. Our test
+ * function is able to convey most of it, but special-casing has to happen
+ * somewhere, so it happens here.
+ */
+ frozen: true,
+
+ /**
+ * Since our query never matches anything, it doesn't make sense to let
+ * someone attempt to construct a boolean OR involving us.
+ *
+ * @returns null
+ */
+ or() {
+ return null;
+ },
+
+ /**
+ * Return nothing (null) because it does not make sense to create a collection
+ * based on a null query. This method is normally used (on a normal query)
+ * to return a collection populated by the constraints of the query. We
+ * match nothing, so we should return nothing. More importantly, you are
+ * currently doing something wrong if you try and do this, so null is
+ * appropriate. It may turn out that it makes sense for us to return an
+ * empty collection in the future for sentinel value purposes, but we'll
+ * cross that bridge when we come to it.
+ *
+ * @returns null
+ */
+ getCollection() {
+ return null;
+ },
+
+ /**
+ * Never matches anything.
+ *
+ * @param aObj The object someone wants us to test for relevance to our
+ * associated collection. But we don't care! Not a fig!
+ * @returns false
+ */
+ test(aObj) {
+ return false;
+ },
+};
+
+/**
+ * @class A query that only 'tests' for already belonging to the collection.
+ *
+ * This type of collection is useful for when you (or rather your listener)
+ * are interested in hearing about modifications to your collection or removals
+ * from your collection because of deletion, but do not want to be notified
+ * about newly indexed items matching your normal query constraints.
+ *
+ * @param aCollection The collection this query belongs to. This needs to be
+ * passed-in here or the collection should set the attribute directly when
+ * the query is passed in to a collection's constructor.
+ */
+function GlodaExplicitQueryClass(aCollection) {
+ this.collection = aCollection;
+}
+
+GlodaExplicitQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation, which
+ * does not happen for explicit queries.
+ */
+ options: {},
+
+ /**
+ * Since our query is intended to only match the contents of our collection,
+ * it doesn't make sense to let someone attempt to construct a boolean OR
+ * involving us.
+ *
+ * @returns null
+ */
+ or() {
+ return null;
+ },
+
+ /**
+ * Return nothing (null) because it does not make sense to create a collection
+ * based on an explicit query. This method is normally used (on a normal
+ * query) to return a collection populated by the constraints of the query.
+ * In the case of an explicit query, we expect it will be associated with
+ * either a hand-created collection or the results of a normal query that is
+ * immediately converted into an explicit query. In all likelihood, calling
+ * this method on an instance of this type is an error, so it is helpful to
+ * return null because people will error hard.
+ *
+ * @returns null
+ */
+ getCollection() {
+ return null;
+ },
+
+ /**
+ * Matches only items that are already in the collection associated with this
+ * query (by id).
+ *
+ * @param aObj The object/item to test for already being in the associated
+ * collection.
+ * @returns true when the object is in the associated collection, otherwise
+ * false.
+ */
+ test(aObj) {
+ return aObj.id in this.collection._idMap;
+ },
+};
+
+/**
+ * @class A query that 'tests' true for everything. Intended for debugging purposes
+ * only.
+ */
+function GlodaWildcardQueryClass() {}
+
+GlodaWildcardQueryClass.prototype = {
+ /**
+ * No options; they are currently only needed for SQL query generation.
+ */
+ options: {},
+
+ // don't let people try and mess with us
+ or() {
+ return null;
+ },
+ // don't let people try and query on us (until we have a real use case for
+ // that...)
+ getCollection() {
+ return null;
+ },
+ /**
+ * Everybody wins!
+ */
+ test(aObj) {
+ return true;
+ },
+};
+
+/**
+ * Factory method to effectively create per-noun subclasses of GlodaQueryClass,
+ * GlodaNullQueryClass, GlodaExplicitQueryClass, and GlodaWildcardQueryClass.
+ * For GlodaQueryClass this allows us to add per-noun helpers. For the others,
+ * this is merely a means of allowing us to attach the (per-noun) nounDef to
+ * the 'class'.
+ */
+function GlodaQueryClassFactory(aNounDef) {
+ let newQueryClass = function (aOptions) {
+ GlodaQueryClass.call(this, aOptions);
+ };
+ newQueryClass.prototype = new GlodaQueryClass();
+ newQueryClass.prototype._queryClass = newQueryClass;
+ newQueryClass.prototype._nounDef = aNounDef;
+
+ let newNullClass = function (aCollection) {
+ GlodaNullQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newNullClass.prototype = new GlodaNullQueryClass();
+ newNullClass.prototype._queryClass = newNullClass;
+ newNullClass.prototype._nounDef = aNounDef;
+
+ let newExplicitClass = function (aCollection) {
+ GlodaExplicitQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newExplicitClass.prototype = new GlodaExplicitQueryClass();
+ newExplicitClass.prototype._queryClass = newExplicitClass;
+ newExplicitClass.prototype._nounDef = aNounDef;
+
+ let newWildcardClass = function (aCollection) {
+ GlodaWildcardQueryClass.call(this);
+ this.collection = aCollection;
+ };
+ newWildcardClass.prototype = new GlodaWildcardQueryClass();
+ newWildcardClass.prototype._queryClass = newWildcardClass;
+ newWildcardClass.prototype._nounDef = aNounDef;
+
+ return [newQueryClass, newNullClass, newExplicitClass, newWildcardClass];
+}
diff --git a/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm b/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm
new file mode 100644
index 0000000000..2e0fb7b5be
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaSyntheticView.jsm
@@ -0,0 +1,175 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file is charged with providing you a way to have a pretty gloda-backed
+ * nsIMsgDBView.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaSyntheticView"];
+
+/**
+ * Create a synthetic view suitable for passing to |FolderDisplayWidget.show|.
+ * You must pass a query, collection, or conversation in.
+ *
+ * @param {GlodaQuery} [aArgs.query] A gloda query to run.
+ * @param {GlodaCollection} [aArgs.collection] An already-populated collection
+ * to display. Do not call getCollection on a query and hand us that. We
+ * will not register ourselves as a listener and things will not work.
+ * @param {GlodaConversation} [aArgs.conversation] A conversation whose messages
+ * you want to display.
+ */
+function GlodaSyntheticView(aArgs) {
+ if ("query" in aArgs) {
+ this.query = aArgs.query;
+ this.collection = this.query.getCollection(this);
+ this.completed = false;
+ this.viewType = "global";
+ } else if ("collection" in aArgs) {
+ this.query = null;
+ this.collection = aArgs.collection;
+ this.completed = true;
+ this.viewType = "global";
+ } else if ("conversation" in aArgs) {
+ this.collection = aArgs.conversation.getMessagesCollection(this);
+ this.query = this.collection.query;
+ this.completed = false;
+ this.viewType = "conversation";
+ this.selectedMessage = aArgs.message.folderMessage;
+ } else {
+ throw new Error("You need to pass a query or collection");
+ }
+
+ this.customColumns = [];
+}
+GlodaSyntheticView.prototype = {
+ defaultSort: [
+ [Ci.nsMsgViewSortType.byDate, Ci.nsMsgViewSortOrder.descending],
+ ],
+
+ /**
+ * Request the search be performed and notification provided to
+ * aSearchListener. If results are already available, they should
+ * be provided to aSearchListener without re-performing the search.
+ */
+ search(aSearchListener, aCompletionCallback) {
+ this.searchListener = aSearchListener;
+ this.completionCallback = aCompletionCallback;
+
+ this.searchListener.onNewSearch();
+ if (this.completed) {
+ this.reportResults(this.collection.items);
+ // we're not really aborting, but it closes things out nicely
+ this.abortSearch();
+ }
+ },
+
+ abortSearch() {
+ if (this.searchListener) {
+ this.searchListener.onSearchDone(Cr.NS_OK);
+ }
+ if (this.completionCallback) {
+ this.completionCallback();
+ }
+ this.searchListener = null;
+ this.completionCallback = null;
+ },
+
+ reportResults(aItems) {
+ for (let item of aItems) {
+ let hdr = item.folderMessage;
+ if (hdr) {
+ this.searchListener.onSearchHit(hdr, hdr.folder);
+ }
+ }
+ },
+
+ /**
+ * Helper function used by |DBViewWrapper.getMsgHdrForMessageID| since there
+ * are no actual backing folders for it to check.
+ */
+ getMsgHdrForMessageID(aMessageId) {
+ for (let item of this.collection.items) {
+ if (item.headerMessageID == aMessageId) {
+ let hdr = item.folderMessage;
+ if (hdr) {
+ return hdr;
+ }
+ }
+ }
+ return null;
+ },
+
+ /**
+ * The default set of columns to show.
+ */
+ DEFAULT_COLUMN_STATES: {
+ threadCol: {
+ visible: true,
+ },
+ flaggedCol: {
+ visible: true,
+ },
+ subjectCol: {
+ visible: true,
+ },
+ correspondentCol: {
+ visible: Services.prefs.getBoolPref("mail.threadpane.use_correspondents"),
+ },
+ senderCol: {
+ visible: !Services.prefs.getBoolPref(
+ "mail.threadpane.use_correspondents"
+ ),
+ },
+ dateCol: {
+ visible: true,
+ },
+ locationCol: {
+ visible: true,
+ },
+ },
+
+ // --- settings persistence
+ getPersistedSetting(aSetting) {
+ try {
+ return JSON.parse(
+ Services.prefs.getCharPref(
+ "mailnews.database.global.views." + this.viewType + "." + aSetting
+ )
+ );
+ } catch (e) {
+ return this.getDefaultSetting(aSetting);
+ }
+ },
+ setPersistedSetting(aSetting, aValue) {
+ Services.prefs.setCharPref(
+ "mailnews.database.global.views." + this.viewType + "." + aSetting,
+ JSON.stringify(aValue)
+ );
+ },
+ getDefaultSetting(aSetting) {
+ if (aSetting == "columns") {
+ return this.DEFAULT_COLUMN_STATES;
+ }
+ return undefined;
+ },
+
+ // --- collection listener
+ onItemsAdded(aItems, aCollection) {
+ if (this.searchListener) {
+ this.reportResults(aItems);
+ }
+ },
+ onItemsModified(aItems, aCollection) {},
+ onItemsRemoved(aItems, aCollection) {},
+ onQueryCompleted(aCollection) {
+ this.completed = true;
+ if (this.searchListener) {
+ this.searchListener.onSearchDone(Cr.NS_OK);
+ }
+ if (this.completionCallback) {
+ this.completionCallback();
+ }
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/GlodaUtils.jsm b/comm/mailnews/db/gloda/modules/GlodaUtils.jsm
new file mode 100644
index 0000000000..a2b7fe4174
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/GlodaUtils.jsm
@@ -0,0 +1,84 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["GlodaUtils"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * @namespace A holding place for logic that is not gloda-specific and should
+ * reside elsewhere.
+ */
+var GlodaUtils = {
+ /**
+ * This Regexp is super-complicated and used at least in two different parts of
+ * the code, so let's expose it from one single location.
+ */
+ PART_RE: new RegExp(
+ "^[^?]+\\?(?:/;section=\\d+\\?)?(?:[^&]+&)*part=([^&]+)(?:&[^&]+)*$"
+ ),
+
+ deMime(aString) {
+ return MailServices.mimeConverter.decodeMimeHeader(
+ aString,
+ null,
+ false,
+ true
+ );
+ },
+
+ _headerParser: MailServices.headerParser,
+
+ /**
+ * Parses an RFC 2822 list of e-mail addresses and returns an object with
+ * 4 attributes, as described below. We will use the example of the user
+ * passing an argument of '"Bob Smith" <bob@example.com>'.
+ *
+ * This method (by way of nsIMsgHeaderParser) takes care of decoding mime
+ * headers, but is not aware of folder-level character set overrides.
+ *
+ * count: the number of addresses parsed. (ex: 1)
+ * addresses: a list of e-mail addresses (ex: ["bob@example.com"])
+ * names: a list of names (ex: ["Bob Smith"])
+ * fullAddresses: aka the list of name and e-mail together (ex: ['"Bob Smith"
+ * <bob@example.com>']).
+ *
+ * This method is a convenience wrapper around nsIMsgHeaderParser.
+ */
+ parseMailAddresses(aMailAddresses) {
+ let addresses = this._headerParser.parseEncodedHeader(aMailAddresses);
+ return {
+ names: addresses.map(a => a.name || null),
+ addresses: addresses.map(a => a.email),
+ fullAddresses: addresses.map(a => a.toString()),
+ count: addresses.length,
+ };
+ },
+
+ /**
+ * MD5 hash a string and return the hex-string result. Impl from nsICryptoHash
+ * docs.
+ */
+ md5HashString(aString) {
+ let data = [...new TextEncoder().encode(aString)];
+
+ let hasher = Cc["@mozilla.org/security/hash;1"].createInstance(
+ Ci.nsICryptoHash
+ );
+ hasher.init(Ci.nsICryptoHash.MD5);
+ hasher.update(data, data.length);
+ let hash = hasher.finish(false);
+
+ // return the two-digit hexadecimal code for a byte
+ function toHexString(charCode) {
+ return ("0" + charCode.toString(16)).slice(-2);
+ }
+
+ // convert the binary hash data to a hex string.
+ let hex = Object.keys(hash).map(i => toHexString(hash.charCodeAt(i)));
+ return hex.join("");
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/IndexMsg.jsm b/comm/mailnews/db/gloda/modules/IndexMsg.jsm
new file mode 100644
index 0000000000..9a4add589e
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/IndexMsg.jsm
@@ -0,0 +1,3464 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/*
+ * This file currently contains a fairly general implementation of asynchronous
+ * indexing with a very explicit message indexing implementation. As gloda
+ * will eventually want to index more than just messages, the message-specific
+ * things should ideally lose their special hold on this file. This will
+ * benefit readability/size as well.
+ */
+
+const EXPORTED_SYMBOLS = ["GlodaMsgIndexer"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+const { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+const { GlodaContact, GlodaFolder } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+const { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+const { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+
+const lazy = {};
+ChromeUtils.defineModuleGetter(
+ lazy,
+ "MailUtils",
+ "resource:///modules/MailUtils.jsm"
+);
+
+// Cr does not have mailnews error codes!
+var NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE = 0x80550005;
+
+var GLODA_MESSAGE_ID_PROPERTY = "gloda-id";
+/**
+ * Message header property to track dirty status; one of
+ * |GlodaIndexer.kMessageClean|, |GlodaIndexer.kMessageDirty|,
+ * |GlodaIndexer.kMessageFilthy|.
+ */
+var GLODA_DIRTY_PROPERTY = "gloda-dirty";
+
+/**
+ * The sentinel GLODA_MESSAGE_ID_PROPERTY value indicating that a message fails
+ * to index and we should not bother trying again, at least not until a new
+ * release is made.
+ *
+ * This should ideally just flip between 1 and 2, with GLODA_OLD_BAD_MESSAGE_ID
+ * flipping in the other direction. If we start having more trailing badness,
+ * _indexerGetEnumerator and GLODA_OLD_BAD_MESSAGE_ID will need to be altered.
+ *
+ * When flipping this, be sure to update glodaTestHelper.js's copy.
+ */
+var GLODA_BAD_MESSAGE_ID = 2;
+/**
+ * The gloda id we used to use to mark messages as bad, but now should be
+ * treated as eligible for indexing. This is only ever used for consideration
+ * when creating msg header enumerators with `_indexerGetEnumerator` which
+ * means we only will re-index such messages in an indexing sweep. Accordingly
+ * event-driven indexing will still treat such messages as unindexed (and
+ * unindexable) until an indexing sweep picks them up.
+ */
+var GLODA_OLD_BAD_MESSAGE_ID = 1;
+var GLODA_FIRST_VALID_MESSAGE_ID = 32;
+
+var JUNK_SCORE_PROPERTY = "junkscore";
+var JUNK_SPAM_SCORE_STR = Ci.nsIJunkMailPlugin.IS_SPAM_SCORE.toString();
+
+/**
+ * The processing flags that tell us that a message header has not yet been
+ * reported to us via msgsClassified. If it has one of these flags, it is
+ * still being processed.
+ */
+var NOT_YET_REPORTED_PROCESSING_FLAGS =
+ Ci.nsMsgProcessingFlags.NotReportedClassified |
+ Ci.nsMsgProcessingFlags.ClassifyJunk;
+
+// for list comprehension fun
+function* range(begin, end) {
+ for (let i = begin; i < end; ++i) {
+ yield i;
+ }
+}
+
+/**
+ * We do not set properties on the messages until we perform a DB commit; this
+ * helper class tracks messages that we have indexed but are not yet marked
+ * as such on their header.
+ */
+var PendingCommitTracker = {
+ /**
+ * Maps message URIs to their gloda ids.
+ *
+ * I am not entirely sure why I chose the URI for the key rather than
+ * gloda folder ID + message key. Most likely it was to simplify debugging
+ * since the gloda folder ID is opaque while the URI is very informative. It
+ * is also possible I was afraid of IMAP folder renaming triggering a UID
+ * renumbering?
+ */
+ _indexedMessagesPendingCommitByKey: {},
+ /**
+ * Map from the pending commit gloda id to a tuple of [the corresponding
+ * message header, dirtyState].
+ */
+ _indexedMessagesPendingCommitByGlodaId: {},
+ /**
+ * Do we have a post-commit handler registered with this transaction yet?
+ */
+ _pendingCommit: false,
+
+ /**
+ * The function gets called when the commit actually happens to flush our
+ * message id's.
+ *
+ * It is very possible that by the time this call happens we have left the
+ * folder and nulled out msgDatabase on the folder. Since nulling it out
+ * is what causes the commit, if we set the headers here without somehow
+ * forcing a commit, we will lose. Badly.
+ * Accordingly, we make a list of all the folders that the headers belong to
+ * as we iterate, make sure to re-attach their msgDatabase before forgetting
+ * the headers, then make sure to zero the msgDatabase again, triggering a
+ * commit. If there were a way to directly get the nsIMsgDatabase from the
+ * header we could do that and call commit directly. We don't track
+ * databases along with the headers since the headers can change because of
+ * moves and that would increase the number of moving parts.
+ */
+ _commitCallback() {
+ let foldersByURI = {};
+ let lastFolder = null;
+
+ for (let glodaId in PendingCommitTracker._indexedMessagesPendingCommitByGlodaId) {
+ let [msgHdr, dirtyState] =
+ PendingCommitTracker._indexedMessagesPendingCommitByGlodaId[glodaId];
+ // Mark this message as indexed.
+ // It's conceivable the database could have gotten blown away, in which
+ // case the message headers are going to throw exceptions when we try
+ // and touch them. So we wrap this in a try block that complains about
+ // this unforeseen circumstance. (noteFolderDatabaseGettingBlownAway
+ // should have been called and avoided this situation in all known
+ // situations.)
+ try {
+ let curGlodaId = msgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ if (curGlodaId != glodaId) {
+ msgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, glodaId);
+ }
+ let headerDirty = msgHdr.getUint32Property(GLODA_DIRTY_PROPERTY);
+ if (headerDirty != dirtyState) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, dirtyState);
+ }
+
+ // Make sure this folder is in our foldersByURI map.
+ if (lastFolder == msgHdr.folder) {
+ continue;
+ }
+ lastFolder = msgHdr.folder;
+ let folderURI = lastFolder.URI;
+ if (!(folderURI in foldersByURI)) {
+ foldersByURI[folderURI] = lastFolder;
+ }
+ } catch (ex) {
+ GlodaMsgIndexer._log.error(
+ "Exception while attempting to mark message with gloda state after" +
+ "db commit",
+ ex
+ );
+ }
+ }
+
+ // it is vitally important to do this before we forget about the headers!
+ for (let uri in foldersByURI) {
+ let folder = foldersByURI[uri];
+ // This will not cause a parse. The database is in-memory since we have
+ // a header that belongs to it. This just causes the folder to
+ // re-acquire a reference from the database manager.
+ folder.msgDatabase;
+ // And this will cause a commit. (And must be done since we don't want
+ // to cause a leak.)
+ folder.msgDatabase = null;
+ }
+
+ PendingCommitTracker._indexedMessagesPendingCommitByGlodaId = {};
+ PendingCommitTracker._indexedMessagesPendingCommitByKey = {};
+
+ PendingCommitTracker._pendingCommit = false;
+ },
+
+ /**
+ * Track a message header that should be marked with the given gloda id when
+ * the database commits.
+ */
+ track(aMsgHdr, aGlodaId) {
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ this._indexedMessagesPendingCommitByKey[pendingKey] = aGlodaId;
+ this._indexedMessagesPendingCommitByGlodaId[aGlodaId] = [
+ aMsgHdr,
+ GlodaMsgIndexer.kMessageClean,
+ ];
+
+ if (!this._pendingCommit) {
+ GlodaDatastore.runPostCommit(this._commitCallback);
+ this._pendingCommit = true;
+ }
+ },
+
+ /**
+ * Get the current state of a message header given that we cannot rely on just
+ * looking at the header's properties because we defer setting those
+ * until the SQLite commit happens.
+ *
+ * @returns Tuple of [gloda id, dirty status].
+ */
+ getGlodaState(aMsgHdr) {
+ // If it's in the pending commit table, then the message is basically
+ // clean. Return that info.
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ if (pendingKey in this._indexedMessagesPendingCommitByKey) {
+ let glodaId =
+ PendingCommitTracker._indexedMessagesPendingCommitByKey[pendingKey];
+ return [glodaId, this._indexedMessagesPendingCommitByGlodaId[glodaId][1]];
+ }
+
+ // Otherwise the header's concept of state is correct.
+ let glodaId = aMsgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ let glodaDirty = aMsgHdr.getUint32Property(GLODA_DIRTY_PROPERTY);
+ return [glodaId, glodaDirty];
+ },
+
+ /**
+ * Update our structure to reflect moved headers. Moves are currently
+ * treated as weakly interesting and do not require a reindexing
+ * although collections will get notified. So our job is to to fix-up
+ * the pending commit information if the message has a pending commit.
+ */
+ noteMove(aOldHdr, aNewHdr) {
+ let oldKey = aOldHdr.folder.URI + "#" + aOldHdr.messageKey;
+ if (!(oldKey in this._indexedMessagesPendingCommitByKey)) {
+ return;
+ }
+
+ let glodaId = this._indexedMessagesPendingCommitByKey[oldKey];
+ delete this._indexedMessagesPendingCommitByKey[oldKey];
+
+ let newKey = aNewHdr.folder.URI + "#" + aNewHdr.messageKey;
+ this._indexedMessagesPendingCommitByKey[newKey] = glodaId;
+
+ // only clobber the header, not the dirty state
+ this._indexedMessagesPendingCommitByGlodaId[glodaId][0] = aNewHdr;
+ },
+
+ /**
+ * A blind move is one where we have the source header but not the destination
+ * header. This happens for IMAP messages that do not involve offline fake
+ * headers.
+ * XXX Since IMAP moves will propagate the gloda-id/gloda-dirty bits for us,
+ * we could detect the other side of the move when it shows up as a
+ * msgsClassified event and restore the mapping information. Since the
+ * offline fake header case should now cover the bulk of IMAP move
+ * operations, we probably do not need to pursue this.
+ *
+ * We just re-dispatch to noteDirtyHeader because we can't do anything more
+ * clever.
+ */
+ noteBlindMove(aOldHdr) {
+ this.noteDirtyHeader(aOldHdr);
+ },
+
+ /**
+ * If a message is dirty we should stop tracking it for post-commit
+ * purposes. This is not because we don't want to write to its header
+ * when we commit as much as that we want to avoid |getHeaderGlodaState|
+ * reporting that the message is clean. We could complicate our state
+ * by storing that information, but this is easier and ends up the same
+ * in the end.
+ */
+ noteDirtyHeader(aMsgHdr) {
+ let pendingKey = aMsgHdr.folder.URI + "#" + aMsgHdr.messageKey;
+ if (!(pendingKey in this._indexedMessagesPendingCommitByKey)) {
+ return;
+ }
+
+ // (It is important that we get the gloda id from our own structure!)
+ let glodaId = this._indexedMessagesPendingCommitByKey[pendingKey];
+ this._indexedMessagesPendingCommitByGlodaId[glodaId][1] =
+ GlodaMsgIndexer.kMessageDirty;
+ },
+
+ /**
+ * Sometimes a folder database gets blown away. This happens for one of two
+ * expected reasons right now:
+ * - Folder compaction.
+ * - Explicit reindexing of a folder via the folder properties "rebuild index"
+ * button.
+ *
+ * When this happens, we are basically out of luck and need to discard
+ * everything about the folder. The good news is that the folder compaction
+ * pass is clever enough to re-establish the linkages that are being lost
+ * when we drop these things on the floor. Reindexing of a folder is not
+ * clever enough to deal with this but is an exceptional case of last resort
+ * (the user should not normally be performing a reindex as part of daily
+ * operation), so we accept that messages may be redundantly indexed.
+ */
+ noteFolderDatabaseGettingBlownAway(aMsgFolder) {
+ let uri = aMsgFolder.URI + "#";
+ for (let key of Object.keys(this._indexedMessagesPendingCommitByKey)) {
+ // this is not as efficient as it could be, but compaction is relatively
+ // rare and the number of pending headers is generally going to be
+ // small.
+ if (key.indexOf(uri) == 0) {
+ delete this._indexedMessagesPendingCommitByKey[key];
+ }
+ }
+ },
+};
+
+/**
+ * This callback handles processing the asynchronous query results of
+ * |GlodaMsgIndexer.getMessagesByMessageID|.
+ */
+function MessagesByMessageIdCallback(
+ aMsgIDToIndex,
+ aResults,
+ aCallback,
+ aCallbackThis
+) {
+ this.msgIDToIndex = aMsgIDToIndex;
+ this.results = aResults;
+ this.callback = aCallback;
+ this.callbackThis = aCallbackThis;
+}
+
+MessagesByMessageIdCallback.prototype = {
+ _log: console.createInstance({
+ prefix: "gloda.index_msg.mbm",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ onItemsAdded(aItems, aCollection) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ this._log.debug("getting results...");
+ for (let message of aItems) {
+ this.results[this.msgIDToIndex[message.headerMessageID]].push(message);
+ }
+ },
+ onItemsModified() {},
+ onItemsRemoved() {},
+ onQueryCompleted(aCollection) {
+ // just outright bail if we are shutdown
+ if (GlodaDatastore.datastoreIsShutdown) {
+ return;
+ }
+
+ this._log.debug("query completed, notifying... " + this.results);
+
+ this.callback.call(this.callbackThis, this.results);
+ },
+};
+
+/**
+ * The message indexer!
+ *
+ * === Message Indexing Strategy
+ * To these ends, we implement things like so:
+ *
+ * Message State Tracking
+ * - We store a property on all indexed headers indicating their gloda message
+ * id. This allows us to tell whether a message is indexed from the header,
+ * without having to consult the SQL database.
+ * - When we receive an event that indicates that a message's meta-data has
+ * changed and gloda needs to re-index the message, we set a property on the
+ * header that indicates the message is dirty. This property can indicate
+ * that the message needs to be re-indexed but the gloda-id is valid (dirty)
+ * or that the message's gloda-id is invalid (filthy) because the gloda
+ * database has been blown away.
+ * - We track whether a folder is up-to-date on our GlodaFolder representation
+ * using a concept of dirtiness, just like messages. Like messages, a folder
+ * can be dirty or filthy. A dirty folder has at least one dirty message in
+ * it which means we should scan the folder. A filthy folder means that
+ * every message in the folder should be considered filthy. Folders start
+ * out filthy when Gloda is first told about them indicating we cannot
+ * trust any of the gloda-id's in the folders. Filthy folders are downgraded
+ * to dirty folders after we mark all of the headers with gloda-id's filthy.
+ *
+ * Indexing Message Control
+ * - We index the headers of all IMAP messages. We index the bodies of all IMAP
+ * messages that are offline. We index all local messages. We plan to avoid
+ * indexing news messages.
+ * - We would like a way to express desires about indexing that either don't
+ * confound offline storage with indexing, or actually allow some choice.
+ *
+ * Indexing Messages
+ * - We have two major modes of indexing: sweep and event-driven. When we
+ * start up we kick off an indexing sweep. We use event-driven indexing
+ * as we receive events for eligible messages, but if we get too many
+ * events we start dropping them on the floor and just flag that an indexing
+ * sweep is required.
+ * - The sweep initiates folder indexing jobs based on the priorities assigned
+ * to folders. Folder indexing uses a filtered message enumerator to find
+ * messages that need to be indexed, minimizing wasteful exposure of message
+ * headers to XPConnect that we would not end up indexing.
+ * - For local folders, we use GetDatabaseWithReparse to ensure that the .msf
+ * file exists. For IMAP folders, we simply use GetDatabase because we know
+ * the auto-sync logic will make sure that the folder is up-to-date and we
+ * want to avoid creating problems through use of updateFolder.
+ *
+ * Junk Mail
+ * - We do not index junk. We do not index messages until the junk/non-junk
+ * determination has been made. If a message gets marked as junk, we act like
+ * it was deleted.
+ * - We know when a message is actively queued for junk processing thanks to
+ * folder processing flags. nsMsgDBFolder::CallFilterPlugins does this
+ * prior to initiating spam processing. Unfortunately, this method does not
+ * get called until after we receive the notification about the existence of
+ * the header. How long after can vary on different factors. The longest
+ * delay is in the IMAP case where there is a filter that requires the
+ * message body to be present; the method does not get called until all the
+ * bodies are downloaded.
+ *
+ */
+var GlodaMsgIndexer = {
+ /**
+ * A partial attempt to generalize to support multiple databases. Each
+ * database would have its own datastore would have its own indexer. But
+ * we rather inter-mingle our use of this field with the singleton global
+ * GlodaDatastore.
+ */
+ _datastore: GlodaDatastore,
+ _log: console.createInstance({
+ prefix: "gloda.index_msg",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ _junkService: MailServices.junk,
+
+ name: "index_msg",
+ /**
+ * Are we enabled, read: are we processing change events?
+ */
+ _enabled: false,
+ get enabled() {
+ return this._enabled;
+ },
+
+ enable() {
+ // initialize our listeners' this pointers
+ this._databaseAnnouncerListener.indexer = this;
+ this._msgFolderListener.indexer = this;
+
+ // register for:
+ // - folder loaded events, so we know when getDatabaseWithReparse has
+ // finished updating the index/what not (if it wasn't immediately
+ // available)
+ // - property changes (so we know when a message's read/starred state have
+ // changed.)
+ this._folderListener._init(this);
+ MailServices.mailSession.AddFolderListener(
+ this._folderListener,
+ Ci.nsIFolderListener.intPropertyChanged |
+ Ci.nsIFolderListener.propertyFlagChanged |
+ Ci.nsIFolderListener.event
+ );
+
+ MailServices.mfn.addListener(
+ this._msgFolderListener,
+ // note: intentionally no msgAdded or msgUnincorporatedMoved.
+ Ci.nsIMsgFolderNotificationService.msgsClassified |
+ Ci.nsIMsgFolderNotificationService.msgsJunkStatusChanged |
+ Ci.nsIMsgFolderNotificationService.msgsDeleted |
+ Ci.nsIMsgFolderNotificationService.msgsMoveCopyCompleted |
+ Ci.nsIMsgFolderNotificationService.msgKeyChanged |
+ Ci.nsIMsgFolderNotificationService.folderAdded |
+ Ci.nsIMsgFolderNotificationService.folderDeleted |
+ Ci.nsIMsgFolderNotificationService.folderMoveCopyCompleted |
+ Ci.nsIMsgFolderNotificationService.folderRenamed |
+ Ci.nsIMsgFolderNotificationService.folderCompactStart |
+ Ci.nsIMsgFolderNotificationService.folderCompactFinish |
+ Ci.nsIMsgFolderNotificationService.folderReindexTriggered
+ );
+
+ this._enabled = true;
+
+ this._considerSchemaMigration();
+
+ this._log.info("Event-Driven Indexing is now " + this._enabled);
+ },
+ disable() {
+ // remove FolderLoaded notification listener
+ MailServices.mailSession.RemoveFolderListener(this._folderListener);
+
+ MailServices.mfn.removeListener(this._msgFolderListener);
+
+ this._indexerLeaveFolder(); // nop if we aren't "in" a folder
+
+ this._enabled = false;
+
+ this._log.info("Event-Driven Indexing is now " + this._enabled);
+ },
+
+ /**
+ * Indicates that we have pending deletions to process, meaning that there
+ * are gloda message rows flagged for deletion. If this value is a boolean,
+ * it means the value is known reliably. If this value is null, it means
+ * that we don't know, likely because we have started up and have not checked
+ * the database.
+ */
+ pendingDeletions: null,
+
+ /**
+ * The message (or folder state) is believed up-to-date.
+ */
+ kMessageClean: 0,
+ /**
+ * The message (or folder) is known to not be up-to-date. In the case of
+ * folders, this means that some of the messages in the folder may be dirty.
+ * However, because of the way our indexing works, it is possible there may
+ * actually be no dirty messages in a folder. (We attempt to process
+ * messages in an event-driven fashion for a finite number of messages, but
+ * because we can quit without completing processing of the queue, we need to
+ * mark the folder dirty, just-in-case.) (We could do some extra leg-work
+ * and do a better job of marking the folder clean again.)
+ */
+ kMessageDirty: 1,
+ /**
+ * We have not indexed the folder at all, but messages in the folder think
+ * they are indexed. We downgrade the folder to just kMessageDirty after
+ * marking all the messages in the folder as dirty. We do this so that if we
+ * have to stop indexing the folder we can still build on our progress next
+ * time we enter the folder.
+ * We mark all folders filthy when (re-)creating the database because there
+ * may be previous state left over from an earlier database.
+ */
+ kMessageFilthy: 2,
+
+ /**
+ * A message addition job yet to be (completely) processed. Since message
+ * addition events come to us one-by-one, in order to aggregate them into a
+ * job, we need something like this. It's up to the indexing loop to
+ * decide when to null this out; it can either do it when it first starts
+ * processing it, or when it has processed the last thing. It's really a
+ * question of whether we want retrograde motion in the folder progress bar
+ * or the message progress bar.
+ */
+ _pendingAddJob: null,
+
+ /**
+ * The number of messages that we should queue for processing before letting
+ * them fall on the floor and relying on our folder-walking logic to ensure
+ * that the messages are indexed.
+ * The reason we allow for queueing messages in an event-driven fashion is
+ * that once we have reached a steady-state, it is preferable to be able to
+ * deal with new messages and modified meta-data in a prompt fashion rather
+ * than having to (potentially) walk every folder in the system just to find
+ * the message that the user changed the tag on.
+ */
+ _indexMaxEventQueueMessages: 20,
+
+ /**
+ * Unit testing hook to get us to emit additional logging that verges on
+ * inane for general usage but is helpful in unit test output to get a lay
+ * of the land and for paranoia reasons.
+ */
+ _unitTestSuperVerbose: false,
+
+ /** The GlodaFolder corresponding to the folder we are indexing. */
+ _indexingGlodaFolder: null,
+ /** The nsIMsgFolder we are currently indexing. */
+ _indexingFolder: null,
+ /** The nsIMsgDatabase we are currently indexing. */
+ _indexingDatabase: null,
+ /**
+ * The iterator we are using to iterate over the headers in
+ * this._indexingDatabase.
+ */
+ _indexingIterator: null,
+
+ /** folder whose entry we are pending on */
+ _pendingFolderEntry: null,
+
+ /**
+ * Async common logic that we want to deal with the given folder ID. Besides
+ * cutting down on duplicate code, this ensures that we are listening on
+ * the folder in case it tries to go away when we are using it.
+ *
+ * @returns true when the folder was successfully entered, false when we need
+ * to pend on notification of updating of the folder (due to re-parsing
+ * or what have you). In the event of an actual problem, an exception
+ * will escape.
+ */
+ _indexerEnterFolder(aFolderID) {
+ // leave the folder if we haven't explicitly left it.
+ if (this._indexingFolder !== null) {
+ this._indexerLeaveFolder();
+ }
+
+ this._indexingGlodaFolder = GlodaDatastore._mapFolderID(aFolderID);
+ this._indexingFolder = this._indexingGlodaFolder.getXPCOMFolder(
+ this._indexingGlodaFolder.kActivityIndexing
+ );
+
+ if (this._indexingFolder) {
+ this._log.debug("Entering folder: " + this._indexingFolder.URI);
+ }
+
+ try {
+ // The msf may need to be created or otherwise updated for local folders.
+ // This may require yielding until such time as the msf has been created.
+ try {
+ if (this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder) {
+ this._indexingDatabase = this._indexingFolder.getDatabaseWithReparse(
+ null,
+ null
+ );
+ }
+ // we need do nothing special for IMAP, news, or other
+ } catch (e) {
+ // getDatabaseWithReparse can return either NS_ERROR_NOT_INITIALIZED or
+ // NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE if the net result is that it
+ // is going to send us a notification when the reparse has completed.
+ // (note that although internally NS_MSG_ERROR_FOLDER_SUMMARY_MISSING
+ // might get flung around, it won't make it out to us, and will instead
+ // be permuted into an NS_ERROR_NOT_INITIALIZED.)
+ if (
+ e.result == Cr.NS_ERROR_NOT_INITIALIZED ||
+ e.result == NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE
+ ) {
+ // this means that we need to pend on the update; the listener for
+ // FolderLoaded events will call _indexerCompletePendingFolderEntry.
+ this._log.debug("Pending on folder load...");
+ this._pendingFolderEntry = this._indexingFolder;
+ return GlodaConstants.kWorkAsync;
+ }
+ throw e;
+ }
+ // we get an nsIMsgDatabase out of this (unsurprisingly) which
+ // explicitly inherits from nsIDBChangeAnnouncer, which has the
+ // addListener call we want.
+ if (this._indexingDatabase == null) {
+ this._indexingDatabase = this._indexingFolder.msgDatabase;
+ }
+ this._indexingDatabase.addListener(this._databaseAnnouncerListener);
+ } catch (ex) {
+ this._log.error(
+ "Problem entering folder: " +
+ (this._indexingFolder ? this._indexingFolder.prettyName : "unknown") +
+ ", skipping. Error was: " +
+ ex.fileName +
+ ":" +
+ ex.lineNumber +
+ ": " +
+ ex
+ );
+ this._indexingGlodaFolder.indexing = false;
+ this._indexingFolder = null;
+ this._indexingGlodaFolder = null;
+ this._indexingDatabase = null;
+ this._indexingEnumerator = null;
+
+ // re-throw, we just wanted to make sure this junk is cleaned up and
+ // get localized error logging...
+ throw ex;
+ }
+
+ return GlodaConstants.kWorkSync;
+ },
+
+ /**
+ * If the folder was still parsing/updating when we tried to enter, then this
+ * handler will get called by the listener who got the FolderLoaded message.
+ * All we need to do is get the database reference, register a listener on
+ * the db, and retrieve an iterator if desired.
+ */
+ _indexerCompletePendingFolderEntry() {
+ this._indexingDatabase = this._indexingFolder.msgDatabase;
+ this._indexingDatabase.addListener(this._databaseAnnouncerListener);
+ this._log.debug("...Folder Loaded!");
+
+ // the load is no longer pending; we certainly don't want more notifications
+ this._pendingFolderEntry = null;
+ // indexerEnterFolder returned kWorkAsync, which means we need to notify
+ // the callback driver to get things going again.
+ GlodaIndexer.callbackDriver();
+ },
+
+ /**
+ * Enumerate all messages in the folder.
+ */
+ kEnumAllMsgs: 0,
+ /**
+ * Enumerate messages that look like they need to be indexed.
+ */
+ kEnumMsgsToIndex: 1,
+ /**
+ * Enumerate messages that are already indexed.
+ */
+ kEnumIndexedMsgs: 2,
+
+ /**
+ * Synchronous helper to get an enumerator for the current folder (as found
+ * in |_indexingFolder|.
+ *
+ * @param aEnumKind One of |kEnumAllMsgs|, |kEnumMsgsToIndex|, or
+ * |kEnumIndexedMsgs|.
+ * @param [aAllowPreBadIds=false] Only valid for |kEnumIndexedMsgs|, tells us
+ * that we should treat message with any gloda-id as dirty, not just
+ * messages that have non-bad message id's.
+ */
+ _indexerGetEnumerator(aEnumKind, aAllowPreBadIds) {
+ if (aEnumKind == this.kEnumMsgsToIndex) {
+ // We need to create search terms for messages to index. Messages should
+ // be indexed if they're indexable (local or offline and not expunged)
+ // and either: haven't been indexed, are dirty, or are marked with with
+ // a former GLODA_BAD_MESSAGE_ID that is no longer our bad marker. (Our
+ // bad marker can change on minor schema revs so that we can try and
+ // reindex those messages exactly once and without needing to go through
+ // a pass to mark them as needing one more try.)
+ // The basic search expression is:
+ // ((GLODA_MESSAGE_ID_PROPERTY Is 0) ||
+ // (GLODA_MESSAGE_ID_PROPERTY Is GLODA_OLD_BAD_MESSAGE_ID) ||
+ // (GLODA_DIRTY_PROPERTY Isnt 0)) &&
+ // (JUNK_SCORE_PROPERTY Isnt 100)
+ // If the folder !isLocal we add the terms:
+ // - if the folder is offline -- && (Status Is nsMsgMessageFlags.Offline)
+ // - && (Status Isnt nsMsgMessageFlags.Expunged)
+
+ let searchSession = Cc[
+ "@mozilla.org/messenger/searchSession;1"
+ ].createInstance(Ci.nsIMsgSearchSession);
+ let searchTerms = [];
+ let isLocal = this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder;
+
+ searchSession.addScopeTerm(
+ Ci.nsMsgSearchScope.offlineMail,
+ this._indexingFolder
+ );
+ let nsMsgSearchAttrib = Ci.nsMsgSearchAttrib;
+ let nsMsgSearchOp = Ci.nsMsgSearchOp;
+
+ // first term: (GLODA_MESSAGE_ID_PROPERTY Is 0
+ let searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // actually don't care here
+ searchTerm.beginsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Is;
+ let value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = 0;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // second term: || GLODA_MESSAGE_ID_PROPERTY Is GLODA_OLD_BAD_MESSAGE_ID
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // OR
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Is;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = GLODA_OLD_BAD_MESSAGE_ID;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // third term: || GLODA_DIRTY_PROPERTY Isnt 0 )
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false;
+ searchTerm.endsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = 0;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_DIRTY_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // JUNK_SCORE_PROPERTY Isnt 100
+ // For symmetry with our event-driven stuff, we just directly deal with
+ // the header property.
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.str = JUNK_SPAM_SCORE_STR;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = JUNK_SCORE_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ if (!isLocal) {
+ // If the folder is offline, then the message should be too
+ if (this._indexingFolder.getFlag(Ci.nsMsgFolderFlags.Offline)) {
+ // third term: && Status Is nsMsgMessageFlags.Offline
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.MsgStatus;
+ searchTerm.op = nsMsgSearchOp.Is;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = Ci.nsMsgMessageFlags.Offline;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+ }
+
+ // fourth term: && Status Isnt nsMsgMessageFlags.Expunged
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = nsMsgSearchAttrib.MsgStatus;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = Ci.nsMsgMessageFlags.Expunged;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+ }
+
+ this._indexingEnumerator = this._indexingDatabase.getFilterEnumerator(
+ searchTerms,
+ true
+ );
+ } else if (aEnumKind == this.kEnumIndexedMsgs) {
+ // Enumerate only messages that are already indexed. This comes out to:
+ // ((GLODA_MESSAGE_ID_PROPERTY > GLODA_FIRST_VALID_MESSAGE_ID-1) &&
+ // (GLODA_DIRTY_PROPERTY Isnt kMessageFilthy))
+ // In English, a message is indexed if (by clause):
+ // 1) The message has a gloda-id and that gloda-id is in the valid range
+ // (and not in the bad message marker range).
+ // 2) The message has not been marked filthy (which invalidates the
+ // gloda-id.) We also assume that the folder would not have been
+ // entered at all if it was marked filthy.
+ let searchSession = Cc[
+ "@mozilla.org/messenger/searchSession;1"
+ ].createInstance(Ci.nsIMsgSearchSession);
+ let searchTerms = [];
+
+ searchSession.addScopeTerm(
+ Ci.nsMsgSearchScope.offlineMail,
+ this._indexingFolder
+ );
+ let nsMsgSearchAttrib = Ci.nsMsgSearchAttrib;
+ let nsMsgSearchOp = Ci.nsMsgSearchOp;
+
+ // first term: (GLODA_MESSAGE_ID_PROPERTY > GLODA_FIRST_VALID_MESSAGE_ID-1
+ let searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = false; // actually don't care here
+ searchTerm.beginsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ // use != 0 if we're allow pre-bad ids.
+ searchTerm.op = aAllowPreBadIds
+ ? nsMsgSearchOp.Isnt
+ : nsMsgSearchOp.IsGreaterThan;
+ let value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = aAllowPreBadIds ? 0 : GLODA_FIRST_VALID_MESSAGE_ID - 1;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_MESSAGE_ID_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // second term: && GLODA_DIRTY_PROPERTY Isnt kMessageFilthy)
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.endsGrouping = true;
+ searchTerm.attrib = nsMsgSearchAttrib.Uint32HdrProperty;
+ searchTerm.op = nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.attrib = searchTerm.attrib;
+ value.status = this.kMessageFilthy;
+ searchTerm.value = value;
+ searchTerm.hdrProperty = GLODA_DIRTY_PROPERTY;
+ searchTerms.push(searchTerm);
+
+ // The use-case of already indexed messages does not want them reversed;
+ // we care about seeing the message keys in order.
+ this._indexingEnumerator = this._indexingDatabase.getFilterEnumerator(
+ searchTerms,
+ false
+ );
+ } else if (aEnumKind == this.kEnumAllMsgs) {
+ this._indexingEnumerator =
+ this._indexingDatabase.reverseEnumerateMessages();
+ } else {
+ throw new Error("Unknown enumerator type requested:" + aEnumKind);
+ }
+ },
+
+ _indexerLeaveFolder() {
+ if (this._indexingFolder !== null) {
+ if (this._indexingDatabase) {
+ this._indexingDatabase.commit(Ci.nsMsgDBCommitType.kLargeCommit);
+ // remove our listener!
+ this._indexingDatabase.removeListener(this._databaseAnnouncerListener);
+ }
+ // let the gloda folder know we are done indexing
+ this._indexingGlodaFolder.indexing = false;
+ // null everyone out
+ this._indexingFolder = null;
+ this._indexingGlodaFolder = null;
+ this._indexingDatabase = null;
+ this._indexingEnumerator = null;
+ }
+ },
+
+ /**
+ * Event fed to us by our nsIFolderListener when a folder is loaded. We use
+ * this event to know when a folder we were trying to open to index is
+ * actually ready to be indexed. (The summary may have not existed, may have
+ * been out of date, or otherwise.)
+ *
+ * @param aFolder An nsIMsgFolder, already QI'd.
+ */
+ _onFolderLoaded(aFolder) {
+ if (
+ this._pendingFolderEntry !== null &&
+ aFolder.URI == this._pendingFolderEntry.URI
+ ) {
+ this._indexerCompletePendingFolderEntry();
+ }
+ },
+
+ // it's a getter so we can reference 'this'. we could memoize.
+ get workers() {
+ return [
+ [
+ "folderSweep",
+ {
+ worker: this._worker_indexingSweep,
+ jobCanceled: this._cleanup_indexingSweep,
+ cleanup: this._cleanup_indexingSweep,
+ },
+ ],
+ [
+ "folder",
+ {
+ worker: this._worker_folderIndex,
+ recover: this._recover_indexMessage,
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "folderCompact",
+ {
+ worker: this._worker_folderCompactionPass,
+ // compaction enters the folder so needs to know how to leave
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "message",
+ {
+ worker: this._worker_messageIndex,
+ onSchedule: this._schedule_messageIndex,
+ jobCanceled: this._canceled_messageIndex,
+ recover: this._recover_indexMessage,
+ cleanup: this._cleanup_indexing,
+ },
+ ],
+ [
+ "delete",
+ {
+ worker: this._worker_processDeletes,
+ },
+ ],
+
+ [
+ "fixMissingContacts",
+ {
+ worker: this._worker_fixMissingContacts,
+ },
+ ],
+ ];
+ },
+
+ _schemaMigrationInitiated: false,
+ _considerSchemaMigration() {
+ if (
+ !this._schemaMigrationInitiated &&
+ GlodaDatastore._actualSchemaVersion === 26
+ ) {
+ let job = new IndexingJob("fixMissingContacts", null);
+ GlodaIndexer.indexJob(job);
+ this._schemaMigrationInitiated = true;
+ }
+ },
+
+ initialSweep() {
+ this.indexingSweepNeeded = true;
+ },
+
+ _indexingSweepActive: false,
+ /**
+ * Indicate that an indexing sweep is desired. We kick-off an indexing
+ * sweep at start-up and whenever we receive an event-based notification
+ * that we either can't process as an event or that we normally handle
+ * during the sweep pass anyways.
+ */
+ set indexingSweepNeeded(aNeeded) {
+ if (!this._indexingSweepActive && aNeeded) {
+ let job = new IndexingJob("folderSweep", null);
+ job.mappedFolders = false;
+ GlodaIndexer.indexJob(job);
+ this._indexingSweepActive = true;
+ }
+ },
+
+ /**
+ * Performs the folder sweep, locating folders that should be indexed, and
+ * creating a folder indexing job for them, and rescheduling itself for
+ * execution after that job is completed. Once it indexes all the folders,
+ * if we believe we have deletions to process (or just don't know), it kicks
+ * off a deletion processing job.
+ *
+ * Folder traversal logic is based off the spotlight/vista indexer code; we
+ * retrieve the list of servers and folders each time want to find a new
+ * folder to index. This avoids needing to maintain a perfect model of the
+ * folder hierarchy at all times. (We may eventually want to do that, but
+ * this is sufficient and safe for now.) Although our use of dirty flags on
+ * the folders allows us to avoid tracking the 'last folder' we processed,
+ * we do so to avoid getting 'trapped' in a folder with a high rate of
+ * changes.
+ */
+ *_worker_indexingSweep(aJob) {
+ if (!aJob.mappedFolders) {
+ // Walk the folders and make sure all the folders we would want to index
+ // are mapped. Build up a list of GlodaFolders as we go, so that we can
+ // sort them by their indexing priority.
+ let foldersToProcess = (aJob.foldersToProcess = []);
+
+ for (let folder of MailServices.accounts.allFolders) {
+ if (this.shouldIndexFolder(folder)) {
+ foldersToProcess.push(Gloda.getFolderForFolder(folder));
+ }
+ }
+
+ // sort the folders by priority (descending)
+ foldersToProcess.sort(function (a, b) {
+ return b.indexingPriority - a.indexingPriority;
+ });
+
+ aJob.mappedFolders = true;
+ }
+
+ // -- process the folders (in sorted order)
+ while (aJob.foldersToProcess.length) {
+ let glodaFolder = aJob.foldersToProcess.shift();
+ // ignore folders that:
+ // - have been deleted out of existence!
+ // - are not dirty/have not been compacted
+ // - are actively being compacted
+ if (
+ glodaFolder._deleted ||
+ (!glodaFolder.dirtyStatus && !glodaFolder.compacted) ||
+ glodaFolder.compacting
+ ) {
+ continue;
+ }
+
+ // If the folder is marked as compacted, give it a compaction job.
+ if (glodaFolder.compacted) {
+ GlodaIndexer.indexJob(new IndexingJob("folderCompact", glodaFolder.id));
+ }
+
+ // add a job for the folder indexing if it was dirty
+ if (glodaFolder.dirtyStatus) {
+ GlodaIndexer.indexJob(new IndexingJob("folder", glodaFolder.id));
+ }
+
+ // re-schedule this job (although this worker will die)
+ GlodaIndexer.indexJob(aJob);
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // consider deletion
+ if (this.pendingDeletions || this.pendingDeletions === null) {
+ GlodaIndexer.indexJob(new IndexingJob("delete", null));
+ }
+
+ // we don't have any more work to do...
+ this._indexingSweepActive = false;
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * The only state we need to cleanup is that there is no longer an active
+ * indexing sweep.
+ */
+ _cleanup_indexingSweep(aJob) {
+ this._indexingSweepActive = false;
+ },
+
+ /**
+ * The number of headers to look at before yielding with kWorkSync. This
+ * is for time-slicing purposes so we still yield to the UI periodically.
+ */
+ HEADER_CHECK_SYNC_BLOCK_SIZE: 25,
+
+ FOLDER_COMPACTION_PASS_BATCH_SIZE: 512,
+ /**
+ * Special indexing pass for (local) folders than have been compacted. The
+ * compaction can cause message keys to change because message keys in local
+ * folders are simply offsets into the mbox file. Accordingly, we need to
+ * update the gloda records/objects to point them at the new message key.
+ *
+ * Our general algorithm is to perform two traversals in parallel. The first
+ * is a straightforward enumeration of the message headers in the folder that
+ * apparently have been already indexed. These provide us with the message
+ * key and the "gloda-id" property.
+ * The second is a list of tuples containing a gloda message id, its current
+ * message key per the gloda database, and the message-id header. We re-fill
+ * the list with batches on-demand. This allows us to both avoid dispatching
+ * needless UPDATEs as well as deal with messages that were tracked by the
+ * PendingCommitTracker but were discarded by the compaction notification.
+ *
+ * We end up processing two streams of gloda-id's and some extra info. In
+ * the normal case we expect these two streams to line up exactly and all
+ * we need to do is update the message key if it has changed.
+ *
+ * There are a few exceptional cases where things do not line up:
+ * 1) The gloda database knows about a message that the enumerator does not
+ * know about...
+ * a) This message exists in the folder (identified using its message-id
+ * header). This means the message got indexed but PendingCommitTracker
+ * had to forget about the info when the compaction happened. We
+ * re-establish the link and track the message in PendingCommitTracker
+ * again.
+ * b) The message does not exist in the folder. This means the message got
+ * indexed, PendingCommitTracker had to forget about the info, and
+ * then the message either got moved or deleted before now. We mark
+ * the message as deleted; this allows the gloda message to be reused
+ * if the move target has not yet been indexed or purged if it already
+ * has been and the gloda message is a duplicate. And obviously, if the
+ * event that happened was actually a delete, then the delete is the
+ * right thing to do.
+ * 2) The enumerator knows about a message that the gloda database does not
+ * know about. This is unexpected and should not happen. We log a
+ * warning. We are able to differentiate this case from case #1a by
+ * retrieving the message header associated with the next gloda message
+ * (using the message-id header per 1a again). If the gloda message's
+ * message key is after the enumerator's message key then we know this is
+ * case #2. (It implies an insertion in the enumerator stream which is how
+ * we define the unexpected case.)
+ *
+ * Besides updating the database rows, we also need to make sure that
+ * in-memory representations are updated. Immediately after dispatching
+ * UPDATE changes to the database we use the same set of data to walk the
+ * live collections and update any affected messages. We are then able to
+ * discard the information. Although this means that we will have to
+ * potentially walk the live collections multiple times, unless something
+ * has gone horribly wrong, the number of collections should be reasonable
+ * and the lookups are cheap. We bias batch sizes accordingly.
+ *
+ * Because we operate based on chunks we need to make sure that when we
+ * actually deal with multiple chunks that we don't step on our own feet with
+ * our database updates. Since compaction of message key K results in a new
+ * message key K' such that K' <= K, we can reliably issue database
+ * updates for all values <= K. Which means our feet are safe no matter
+ * when we issue the update command. For maximum cache benefit, we issue
+ * our updates prior to our new query since they should still be maximally
+ * hot at that point.
+ */
+ *_worker_folderCompactionPass(aJob, aCallbackHandle) {
+ yield this._indexerEnterFolder(aJob.id);
+
+ // It's conceivable that with a folder sweep we might end up trying to
+ // compact a folder twice. Bail early in this case.
+ if (!this._indexingGlodaFolder.compacted) {
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // this is a forward enumeration (sometimes we reverse enumerate; not here)
+ this._indexerGetEnumerator(this.kEnumIndexedMsgs);
+
+ const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
+ const FOLDER_COMPACTION_PASS_BATCH_SIZE =
+ this.FOLDER_COMPACTION_PASS_BATCH_SIZE;
+
+ // Tuples of [gloda id, message key, message-id header] from
+ // folderCompactionPassBlockFetch
+ let glodaIdsMsgKeysHeaderIds = [];
+ // Unpack each tuple from glodaIdsMsgKeysHeaderIds into these guys.
+ // (Initialize oldMessageKey because we use it to kickstart our query.)
+ let oldGlodaId,
+ oldMessageKey = -1,
+ oldHeaderMessageId;
+ // parallel lists of gloda ids and message keys to pass to
+ // GlodaDatastore.updateMessageLocations
+ let updateGlodaIds = [];
+ let updateMessageKeys = [];
+ // list of gloda id's to mark deleted
+ let deleteGlodaIds = [];
+
+ // for GC reasons we need to track the number of headers seen
+ let numHeadersSeen = 0;
+
+ // We are consuming two lists; our loop structure has to reflect that.
+ let headerIter = this._indexingEnumerator[Symbol.iterator]();
+ let mayHaveMoreGlodaMessages = true;
+ let keepIterHeader = false;
+ let keepGlodaTuple = false;
+ let msgHdr = null;
+ while (headerIter || mayHaveMoreGlodaMessages) {
+ let glodaId;
+ if (headerIter) {
+ if (!keepIterHeader) {
+ let result = headerIter.next();
+ if (result.done) {
+ headerIter = null;
+ msgHdr = null;
+ // do the loop check again
+ continue;
+ }
+ msgHdr = result.value;
+ } else {
+ keepIterHeader = false;
+ }
+ }
+
+ if (msgHdr) {
+ numHeadersSeen++;
+ if (numHeadersSeen % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ // There is no need to check with PendingCommitTracker. If a message
+ // somehow got indexed between the time the compaction killed
+ // everything and the time we run, that is a bug.
+ glodaId = msgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
+ // (there is also no need to check for gloda dirty since the enumerator
+ // filtered that for us.)
+ }
+
+ // get more [gloda id, message key, message-id header] tuples if out
+ if (!glodaIdsMsgKeysHeaderIds.length && mayHaveMoreGlodaMessages) {
+ // Since we operate on blocks, getting a new block implies we should
+ // flush the last block if applicable.
+ if (updateGlodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ updateGlodaIds,
+ updateMessageKeys,
+ aJob.id,
+ true
+ );
+ updateGlodaIds = [];
+ updateMessageKeys = [];
+ }
+
+ if (deleteGlodaIds.length) {
+ GlodaDatastore.markMessagesDeletedByIDs(deleteGlodaIds);
+ deleteGlodaIds = [];
+ }
+
+ GlodaDatastore.folderCompactionPassBlockFetch(
+ aJob.id,
+ oldMessageKey + 1,
+ FOLDER_COMPACTION_PASS_BATCH_SIZE,
+ aCallbackHandle.wrappedCallback
+ );
+ glodaIdsMsgKeysHeaderIds = yield GlodaConstants.kWorkAsync;
+ // Reverse so we can use pop instead of shift and I don't need to be
+ // paranoid about performance.
+ glodaIdsMsgKeysHeaderIds.reverse();
+
+ if (!glodaIdsMsgKeysHeaderIds.length) {
+ mayHaveMoreGlodaMessages = false;
+
+ // We shouldn't be in the loop anymore if headerIter is dead now.
+ if (!headerIter) {
+ break;
+ }
+ }
+ }
+
+ if (!keepGlodaTuple) {
+ if (mayHaveMoreGlodaMessages) {
+ [oldGlodaId, oldMessageKey, oldHeaderMessageId] =
+ glodaIdsMsgKeysHeaderIds.pop();
+ } else {
+ oldGlodaId = oldMessageKey = oldHeaderMessageId = null;
+ }
+ } else {
+ keepGlodaTuple = false;
+ }
+
+ // -- normal expected case
+ if (glodaId == oldGlodaId) {
+ // only need to do something if the key is not right
+ if (msgHdr.messageKey != oldMessageKey) {
+ updateGlodaIds.push(glodaId);
+ updateMessageKeys.push(msgHdr.messageKey);
+ }
+ } else {
+ // -- exceptional cases
+ // This should always return a value unless something is very wrong.
+ // We do not want to catch the exception if one happens.
+ let idBasedHeader = oldHeaderMessageId
+ ? this._indexingDatabase.getMsgHdrForMessageID(oldHeaderMessageId)
+ : false;
+ // - Case 1b.
+ // We want to mark the message as deleted.
+ if (idBasedHeader == null) {
+ deleteGlodaIds.push(oldGlodaId);
+ } else if (
+ idBasedHeader &&
+ ((msgHdr && idBasedHeader.messageKey < msgHdr.messageKey) || !msgHdr)
+ ) {
+ // - Case 1a
+ // The expected case is that the message referenced by the gloda
+ // database precedes the header the enumerator told us about. This
+ // is expected because if PendingCommitTracker did not mark the
+ // message as indexed/clean then the enumerator would not tell us
+ // about it.
+ // Also, if we ran out of headers from the enumerator, this is a dead
+ // giveaway that this is the expected case.
+ // tell the pending commit tracker about the gloda database one
+ PendingCommitTracker.track(idBasedHeader, oldGlodaId);
+ // and we might need to update the message key too
+ if (idBasedHeader.messageKey != oldMessageKey) {
+ updateGlodaIds.push(oldGlodaId);
+ updateMessageKeys.push(idBasedHeader.messageKey);
+ }
+ // Take another pass through the loop so that we check the
+ // enumerator header against the next message in the gloda
+ // database.
+ keepIterHeader = true;
+ } else if (msgHdr) {
+ // - Case 2
+ // Whereas if the message referenced by gloda has a message key
+ // greater than the one returned by the enumerator, then we have a
+ // header claiming to be indexed by gloda that gloda does not
+ // actually know about. This is exceptional and gets a warning.
+ this._log.warn(
+ "Observed header that claims to be gloda indexed " +
+ "but that gloda has never heard of during " +
+ "compaction." +
+ " In folder: " +
+ msgHdr.folder.URI +
+ " sketchy key: " +
+ msgHdr.messageKey +
+ " subject: " +
+ msgHdr.mime2DecodedSubject
+ );
+ // Keep this tuple around for the next enumerator provided header
+ keepGlodaTuple = true;
+ }
+ }
+ }
+ // If we don't flush the update, no one will!
+ if (updateGlodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ updateGlodaIds,
+ updateMessageKeys,
+ aJob.id,
+ true
+ );
+ }
+ if (deleteGlodaIds.length) {
+ GlodaDatastore.markMessagesDeletedByIDs(deleteGlodaIds);
+ }
+
+ this._indexingGlodaFolder._setCompactedState(false);
+
+ this._indexerLeaveFolder();
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Index the contents of a folder.
+ */
+ *_worker_folderIndex(aJob, aCallbackHandle) {
+ yield this._indexerEnterFolder(aJob.id);
+
+ if (!this.shouldIndexFolder(this._indexingFolder)) {
+ aJob.safelyInvokeCallback(true);
+ yield GlodaConstants.kWorkDone;
+ }
+
+ // Make sure listeners get notified about this job.
+ GlodaIndexer._notifyListeners();
+
+ // there is of course a cost to all this header investigation even if we
+ // don't do something. so we will yield with kWorkSync for every block.
+ const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
+
+ // we can safely presume if we are here that this folder has been selected
+ // for offline processing...
+
+ // -- Filthy Folder
+ // A filthy folder may have misleading properties on the message that claim
+ // the message is indexed. They are misleading because the database, for
+ // whatever reason, does not have the messages (accurately) indexed.
+ // We need to walk all the messages and mark them filthy if they have a
+ // dirty property. Once we have done this, we can downgrade the folder's
+ // dirty status to plain dirty. We do this rather than trying to process
+ // everyone in one go in a filthy context because if we have to terminate
+ // indexing before we quit, we don't want to have to re-index messages next
+ // time. (This could even lead to never completing indexing in a
+ // pathological situation.)
+ let glodaFolder = GlodaDatastore._mapFolder(this._indexingFolder);
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderFilthy) {
+ this._indexerGetEnumerator(this.kEnumIndexedMsgs, true);
+ let count = 0;
+ for (let msgHdr of this._indexingEnumerator) {
+ // we still need to avoid locking up the UI, pause periodically...
+ if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ let glodaMessageId = msgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ // if it has a gloda message id, we need to mark it filthy
+ if (glodaMessageId != 0) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, this.kMessageFilthy);
+ }
+ // if it doesn't have a gloda message id, we will definitely index it,
+ // so no action is required.
+ }
+ // Commit the filthy status changes to the message database.
+ this._indexingDatabase.commit(Ci.nsMsgDBCommitType.kLargeCommit);
+
+ // this will automatically persist to the database
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderDirty);
+ }
+
+ // Figure out whether we're supposed to index _everything_ or just what
+ // has not yet been indexed.
+ let force = "force" in aJob && aJob.force;
+ let enumeratorType = force ? this.kEnumAllMsgs : this.kEnumMsgsToIndex;
+
+ // Pass 1: count the number of messages to index.
+ // We do this in order to be able to report to the user what we're doing.
+ // TODO: give up after reaching a certain number of messages in folders
+ // with ridiculous numbers of messages and make the interface just say
+ // something like "over N messages to go."
+
+ this._indexerGetEnumerator(enumeratorType);
+
+ let numMessagesToIndex = 0;
+ // eslint-disable-next-line no-unused-vars
+ for (let ignore of this._indexingEnumerator) {
+ // We're only counting, so do bigger chunks on this pass.
+ ++numMessagesToIndex;
+ if (numMessagesToIndex % (HEADER_CHECK_SYNC_BLOCK_SIZE * 8) == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+ }
+
+ aJob.goal = numMessagesToIndex;
+
+ if (numMessagesToIndex > 0) {
+ // We used up the iterator, get a new one.
+ this._indexerGetEnumerator(enumeratorType);
+
+ // Pass 2: index the messages.
+ let count = 0;
+ for (let msgHdr of this._indexingEnumerator) {
+ // per above, we want to periodically release control while doing all
+ // this header traversal/investigation.
+ if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ // To keep our counts more accurate, increment the offset before
+ // potentially skipping any messages.
+ ++aJob.offset;
+
+ // Skip messages that have not yet been reported to us as existing via
+ // msgsClassified.
+ if (
+ this._indexingFolder.getProcessingFlags(msgHdr.messageKey) &
+ NOT_YET_REPORTED_PROCESSING_FLAGS
+ ) {
+ continue;
+ }
+
+ // Because the gloda id could be in-flight, we need to double-check the
+ // enumerator here since it can't know about our in-memory stuff.
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+ // if the message seems valid and we are not forcing indexing, skip it.
+ // (that means good gloda id and not dirty)
+ if (
+ !force &&
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty == this.kMessageClean
+ ) {
+ continue;
+ }
+
+ this._log.debug(">>> calling _indexMessage");
+ yield aCallbackHandle.pushAndGo(
+ this._indexMessage(msgHdr, aCallbackHandle),
+ { what: "indexMessage", msgHdr }
+ );
+ GlodaIndexer._indexedMessageCount++;
+ this._log.debug("<<< back from _indexMessage");
+ }
+ }
+
+ // This will trigger an (async) db update which cannot hit the disk prior to
+ // the actual database records that constitute the clean state.
+ // XXX There is the slight possibility that, in the event of a crash, this
+ // will hit the disk but the gloda-id properties on the headers will not
+ // get set. This should ideally be resolved by detecting a non-clean
+ // shutdown and marking all folders as dirty.
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+
+ // by definition, it's not likely we'll visit this folder again anytime soon
+ this._indexerLeaveFolder();
+
+ aJob.safelyInvokeCallback(true);
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Invoked when a "message" job is scheduled so that we can clear
+ * _pendingAddJob if that is the job. We do this so that work items are not
+ * added to _pendingAddJob while it is being processed.
+ */
+ _schedule_messageIndex(aJob, aCallbackHandle) {
+ // we do not want new work items to be added as we are processing, so
+ // clear _pendingAddJob. A new job will be created as needed.
+ if (aJob === this._pendingAddJob) {
+ this._pendingAddJob = null;
+ }
+ // update our goal from the items length
+ aJob.goal = aJob.items.length;
+ },
+ /**
+ * If the job gets canceled, we need to make sure that we clear out pending
+ * add job or our state will get wonky.
+ */
+ _canceled_messageIndex(aJob) {
+ if (aJob === this._pendingAddJob) {
+ this._pendingAddJob = null;
+ }
+ },
+
+ /**
+ * Index a specific list of messages that we know to index from
+ * event-notification hints.
+ */
+ *_worker_messageIndex(aJob, aCallbackHandle) {
+ // if we are already in the correct folder, our "get in the folder" clause
+ // will not execute, so we need to make sure this value is accurate in
+ // that case. (and we want to avoid multiple checks...)
+ for (; aJob.offset < aJob.items.length; aJob.offset++) {
+ let item = aJob.items[aJob.offset];
+ // item is either [folder ID, message key] or
+ // [folder ID, message ID]
+
+ let glodaFolderId = item[0];
+ // If the folder has been deleted since we queued, skip this message
+ if (!GlodaDatastore._folderIdKnown(glodaFolderId)) {
+ continue;
+ }
+ let glodaFolder = GlodaDatastore._mapFolderID(glodaFolderId);
+
+ // Stay out of folders that:
+ // - are compacting / compacted and not yet processed
+ // - got deleted (this would be redundant if we had a stance on id nukage)
+ // (these things could have changed since we queued the event)
+ if (
+ glodaFolder.compacting ||
+ glodaFolder.compacted ||
+ glodaFolder._deleted
+ ) {
+ continue;
+ }
+
+ // get in the folder
+ if (this._indexingGlodaFolder != glodaFolder) {
+ yield this._indexerEnterFolder(glodaFolderId);
+
+ // Now that we have the real nsIMsgFolder, sanity-check that we should
+ // be indexing it. (There are some checks that require the
+ // nsIMsgFolder.)
+ if (!this.shouldIndexFolder(this._indexingFolder)) {
+ continue;
+ }
+ }
+
+ let msgHdr;
+ // GetMessageHeader can be affected by the use cache, so we need to check
+ // ContainsKey first to see if the header is really actually there.
+ if (typeof item[1] == "number") {
+ msgHdr =
+ this._indexingDatabase.containsKey(item[1]) &&
+ this._indexingFolder.GetMessageHeader(item[1]);
+ } else {
+ // Same deal as in move processing.
+ // TODO fixme to not assume singular message-id's.
+ msgHdr = this._indexingDatabase.getMsgHdrForMessageID(item[1]);
+ }
+
+ if (msgHdr) {
+ yield aCallbackHandle.pushAndGo(
+ this._indexMessage(msgHdr, aCallbackHandle),
+ { what: "indexMessage", msgHdr }
+ );
+ } else {
+ yield GlodaConstants.kWorkSync;
+ }
+ }
+
+ // There is no real reason to stay 'in' the folder. If we are going to get
+ // more events from the folder, its database would have to be open for us
+ // to get the events, so it's not like we're creating an efficiency
+ // problem where we unload a folder just to load it again in 2 seconds.
+ // (Well, at least assuming the views are good about holding onto the
+ // database references even though they go out of their way to avoid
+ // holding onto message header references.)
+ this._indexerLeaveFolder();
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Recover from a "folder" or "message" job failing inside a call to
+ * |_indexMessage|, marking the message bad. If we were not in an
+ * |_indexMessage| call, then fail to recover.
+ *
+ * @param aJob The job that was being worked. We ignore this for now.
+ * @param aContextStack The callbackHandle mechanism's context stack. When we
+ * invoke pushAndGo for _indexMessage we put something in so we can
+ * detect when it is on the async stack.
+ * @param aException The exception that is necessitating we attempt to
+ * recover.
+ *
+ * @returns 1 if we were able to recover (because we want the call stack
+ * popped down to our worker), false if we can't.
+ */
+ _recover_indexMessage(aJob, aContextStack, aException) {
+ // See if indexMessage is on the stack...
+ if (
+ aContextStack.length >= 2 &&
+ aContextStack[1] &&
+ "what" in aContextStack[1] &&
+ aContextStack[1].what == "indexMessage"
+ ) {
+ // it is, so this is probably recoverable.
+
+ this._log.debug(
+ "Exception while indexing message, marking it bad (gloda id of 1)."
+ );
+
+ // -- Mark the message as bad
+ let msgHdr = aContextStack[1].msgHdr;
+ // (In the worst case, the header is no longer valid, which will result in
+ // exceptions. We need to be prepared for that.)
+ try {
+ msgHdr.setUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY,
+ GLODA_BAD_MESSAGE_ID
+ );
+ // clear the dirty bit if it has one
+ if (msgHdr.getUint32Property(GLODA_DIRTY_PROPERTY)) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, 0);
+ }
+ } catch (ex) {
+ // If we are indexing a folder and the message header is no longer
+ // valid, then it's quite likely the whole folder is no longer valid.
+ // But since in the event-driven message indexing case we could have
+ // other valid things to look at, let's try and recover. The folder
+ // indexing case will come back to us shortly and we will indicate
+ // recovery is not possible at that point.
+ // So do nothing here since by popping the indexing of the specific
+ // message out of existence we are recovering.
+ }
+ return 1;
+ }
+ return false;
+ },
+
+ /**
+ * Cleanup after an aborted "folder" or "message" job.
+ */
+ _cleanup_indexing(aJob) {
+ this._indexerLeaveFolder();
+ aJob.safelyInvokeCallback(false);
+ },
+
+ /**
+ * Maximum number of deleted messages to process at a time. Arbitrary; there
+ * are no real known performance constraints at this point.
+ */
+ DELETED_MESSAGE_BLOCK_SIZE: 32,
+
+ /**
+ * Process pending deletes...
+ */
+ *_worker_processDeletes(aJob, aCallbackHandle) {
+ // Count the number of messages we will eventually process. People freak
+ // out when the number is constantly increasing because they think gloda
+ // has gone rogue. (Note: new deletions can still accumulate during
+ // our execution, so we may 'expand' our count a little still.)
+ this._datastore.countDeletedMessages(aCallbackHandle.wrappedCallback);
+ aJob.goal = yield GlodaConstants.kWorkAsync;
+ this._log.debug(
+ "There are currently " +
+ aJob.goal +
+ " messages awaiting" +
+ " deletion processing."
+ );
+
+ // get a block of messages to delete.
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ query._deleted(1);
+ query.limit(this.DELETED_MESSAGE_BLOCK_SIZE);
+ let deletedCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ while (deletedCollection.items.length) {
+ for (let message of deletedCollection.items) {
+ // If it turns out our count is wrong (because some new deletions
+ // happened since we entered this worker), let's issue a new count
+ // and use that to accurately update our goal.
+ if (aJob.offset >= aJob.goal) {
+ this._datastore.countDeletedMessages(aCallbackHandle.wrappedCallback);
+ aJob.goal += yield GlodaConstants.kWorkAsync;
+ }
+
+ yield aCallbackHandle.pushAndGo(
+ this._deleteMessage(message, aCallbackHandle)
+ );
+ aJob.offset++;
+ yield GlodaConstants.kWorkSync;
+ }
+
+ deletedCollection = query.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+ }
+ this.pendingDeletions = false;
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ *_worker_fixMissingContacts(aJob, aCallbackHandle) {
+ let identityContactInfos = [];
+
+ // -- asynchronously get a list of all identities without contacts
+ // The upper bound on the number of messed up contacts is the number of
+ // contacts in the user's address book. This should be small enough
+ // (and the data size small enough) that this won't explode thunderbird.
+ let queryStmt = GlodaDatastore._createAsyncStatement(
+ "SELECT identities.id, identities.contactID, identities.value " +
+ "FROM identities " +
+ "LEFT JOIN contacts ON identities.contactID = contacts.id " +
+ "WHERE identities.kind = 'email' AND contacts.id IS NULL",
+ true
+ );
+ queryStmt.executeAsync({
+ handleResult(aResultSet) {
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ identityContactInfos.push({
+ identityId: row.getInt64(0),
+ contactId: row.getInt64(1),
+ email: row.getString(2),
+ });
+ }
+ },
+ handleError(aError) {},
+ handleCompletion(aReason) {
+ GlodaDatastore._asyncCompleted();
+ aCallbackHandle.wrappedCallback();
+ },
+ });
+ queryStmt.finalize();
+ GlodaDatastore._pendingAsyncStatements++;
+ yield GlodaConstants.kWorkAsync;
+
+ // -- perform fixes only if there were missing contacts
+ if (identityContactInfos.length) {
+ const yieldEvery = 64;
+ // - create the missing contacts
+ for (let i = 0; i < identityContactInfos.length; i++) {
+ if (i % yieldEvery === 0) {
+ yield GlodaConstants.kWorkSync;
+ }
+
+ let info = identityContactInfos[i],
+ card = MailServices.ab.cardForEmailAddress(info.email),
+ contact = new GlodaContact(
+ GlodaDatastore,
+ info.contactId,
+ null,
+ null,
+ card ? card.displayName || info.email : info.email,
+ 0,
+ 0
+ );
+ GlodaDatastore.insertContact(contact);
+
+ // update the in-memory rep of the identity to know about the contact
+ // if there is one.
+ let identity = GlodaCollectionManager.cacheLookupOne(
+ GlodaConstants.NOUN_IDENTITY,
+ info.identityId,
+ false
+ );
+ if (identity) {
+ // Unfortunately, although this fixes the (reachable) Identity and
+ // exposes the Contact, it does not make the Contact reachable from
+ // the collection manager. This will make explicit queries that look
+ // up the contact potentially see the case where
+ // contact.identities[0].contact !== contact. Alternately, that
+ // may not happen and instead the "contact" object we created above
+ // may become unlinked. (I'd have to trace some logic I don't feel
+ // like tracing.) Either way, The potential fallout is minimal
+ // since the object identity invariant will just lapse and popularity
+ // on the contact may become stale, and neither of those meaningfully
+ // affect the operation of anything in Thunderbird.
+ // If we really cared, we could find all the dominant collections
+ // that reference the identity and update their corresponding
+ // contact collection to make it reachable. That use-case does not
+ // exist outside of here, which is why we're punting.
+ identity._contact = contact;
+ contact._identities = [identity];
+ }
+
+ // NOTE: If the addressbook indexer did anything useful other than
+ // adapting to name changes, we could schedule indexing of the cards at
+ // this time. However, as of this writing, it doesn't, and this task
+ // is a one-off relevant only to the time of this writing.
+ }
+
+ // - mark all folders as dirty, initiate indexing sweep
+ this.dirtyAllKnownFolders();
+ this.indexingSweepNeeded = true;
+ }
+
+ // -- mark the schema upgrade, be done
+ GlodaDatastore._updateSchemaVersion(GlodaDatastore._schemaVersion);
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Determine whether a folder is suitable for indexing.
+ *
+ * @param aMsgFolder An nsIMsgFolder you want to see if we should index.
+ *
+ * @returns true if we want to index messages in this type of folder, false if
+ * we do not.
+ */
+ shouldIndexFolder(aMsgFolder) {
+ let folderFlags = aMsgFolder.flags;
+ // Completely ignore non-mail and virtual folders. They should never even
+ // get to be GlodaFolder instances.
+ if (
+ !(folderFlags & Ci.nsMsgFolderFlags.Mail) ||
+ folderFlags & Ci.nsMsgFolderFlags.Virtual
+ ) {
+ return false;
+ }
+
+ // Some folders do not really exist; we can detect this by getStringProperty
+ // exploding when we call it. This is primarily a concern because
+ // _mapFolder calls said exploding method, but we also don't want to
+ // even think about indexing folders that don't exist. (Such folders are
+ // likely the result of a messed up profile.)
+ try {
+ // flags is used because it should always be in the cache avoiding a miss
+ // which would compel an msf open.
+ aMsgFolder.getStringProperty("flags");
+ } catch (ex) {
+ return false;
+ }
+
+ // Now see what our gloda folder information has to say about the folder.
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgFolder);
+ return glodaFolder.indexingPriority != glodaFolder.kIndexingNeverPriority;
+ },
+
+ /**
+ * Sets the indexing priority for this folder and persists it both to Gloda,
+ * and, for backup purposes, to the nsIMsgFolder via string property as well.
+ *
+ * Setting this priority may cause the indexer to either reindex this folder,
+ * or remove this folder from the existing index.
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {number} aPriority (one of the priority constants from GlodaFolder)
+ */
+ setFolderIndexingPriority(aFolder, aPriority) {
+ let glodaFolder = GlodaDatastore._mapFolder(aFolder);
+
+ // if there's been no change, we're done
+ if (aPriority == glodaFolder.indexingPriority) {
+ return;
+ }
+
+ // save off the old priority, and set the new one
+ let previousPrio = glodaFolder.indexingPriority;
+ glodaFolder._indexingPriority = aPriority;
+
+ // persist the new priority
+ GlodaDatastore.updateFolderIndexingPriority(glodaFolder);
+ aFolder.setStringProperty("indexingPriority", Number(aPriority).toString());
+
+ // if we've been told never to index this folder...
+ if (aPriority == glodaFolder.kIndexingNeverPriority) {
+ // stop doing so
+ if (this._indexingFolder == aFolder) {
+ GlodaIndexer.killActiveJob();
+ }
+
+ // mark all existing messages as deleted
+ GlodaDatastore.markMessagesDeletedByFolderID(glodaFolder.id);
+
+ // re-index
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ } else if (previousPrio == glodaFolder.kIndexingNeverPriority) {
+ // there's no existing index, but the user now wants one
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+ GlodaDatastore.updateFolderDirtyStatus(glodaFolder);
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ }
+ },
+
+ /**
+ * Resets the indexing priority on the given folder to whatever the default
+ * is for folders of that type.
+ *
+ * @note Calls setFolderIndexingPriority under the hood, so has identical
+ * potential reindexing side-effects
+ *
+ * @param {nsIMsgFolder} aFolder
+ * @param {boolean} aAllowSpecialFolderIndexing
+ */
+ resetFolderIndexingPriority(aFolder, aAllowSpecialFolderIndexing) {
+ this.setFolderIndexingPriority(
+ aFolder,
+ GlodaDatastore.getDefaultIndexingPriority(
+ aFolder,
+ aAllowSpecialFolderIndexing
+ )
+ );
+ },
+
+ /**
+ * Queue all of the folders of all of the accounts of the current profile
+ * for indexing. We traverse all folders and queue them immediately to try
+ * and have an accurate estimate of the number of folders that need to be
+ * indexed. (We previously queued accounts rather than immediately
+ * walking their list of folders.)
+ */
+ indexEverything() {
+ this._log.info("Queueing all accounts for indexing.");
+
+ GlodaDatastore._beginTransaction();
+ for (let account of MailServices.accounts.accounts) {
+ this.indexAccount(account);
+ }
+ GlodaDatastore._commitTransaction();
+ },
+
+ /**
+ * Queue all of the folders belonging to an account for indexing.
+ */
+ indexAccount(aAccount) {
+ let rootFolder = aAccount.incomingServer.rootFolder;
+ if (rootFolder instanceof Ci.nsIMsgFolder) {
+ this._log.info("Queueing account folders for indexing: " + aAccount.key);
+
+ for (let folder of rootFolder.descendants) {
+ if (this.shouldIndexFolder(folder)) {
+ GlodaIndexer.indexJob(
+ new IndexingJob("folder", GlodaDatastore._mapFolder(folder).id)
+ );
+ }
+ }
+ } else {
+ this._log.info("Skipping Account, root folder not nsIMsgFolder");
+ }
+ },
+
+ /**
+ * Queue a single folder for indexing given an nsIMsgFolder.
+ *
+ * @param [aOptions.callback] A callback to invoke when the folder finishes
+ * indexing. First argument is true if the task ran to completion
+ * successfully, false if we had to abort for some reason.
+ * @param [aOptions.force=false] Should we force the indexing of all messages
+ * in the folder (true) or just index what hasn't been indexed (false).
+ * @returns true if we are going to index the folder, false if not.
+ */
+ indexFolder(aMsgFolder, aOptions) {
+ if (!this.shouldIndexFolder(aMsgFolder)) {
+ return false;
+ }
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgFolder);
+ // stay out of compacting/compacted folders
+ if (glodaFolder.compacting || glodaFolder.compacted) {
+ return false;
+ }
+
+ this._log.info("Queue-ing folder for indexing: " + aMsgFolder.prettyName);
+ let job = new IndexingJob("folder", glodaFolder.id);
+ if (aOptions) {
+ if ("callback" in aOptions) {
+ job.callback = aOptions.callback;
+ }
+ if ("force" in aOptions) {
+ job.force = true;
+ }
+ }
+ GlodaIndexer.indexJob(job);
+ return true;
+ },
+
+ /**
+ * Queue a list of messages for indexing.
+ *
+ * @param aFoldersAndMessages List of [nsIMsgFolder, message key] tuples.
+ */
+ indexMessages(aFoldersAndMessages) {
+ let job = new IndexingJob("message", null);
+ job.items = aFoldersAndMessages.map(fm => [
+ GlodaDatastore._mapFolder(fm[0]).id,
+ fm[1],
+ ]);
+ GlodaIndexer.indexJob(job);
+ },
+
+ /**
+ * Mark all known folders as dirty so that the next indexing sweep goes
+ * into all folders and checks their contents to see if they need to be
+ * indexed.
+ *
+ * This is being added for the migration case where we want to try and reindex
+ * all of the messages that had been marked with GLODA_BAD_MESSAGE_ID but
+ * which is now GLODA_OLD_BAD_MESSAGE_ID and so we should attempt to reindex
+ * them.
+ */
+ dirtyAllKnownFolders() {
+ // Just iterate over the datastore's folder map and tell each folder to
+ // be dirty if its priority is not disabled.
+ for (let folderID in GlodaDatastore._folderByID) {
+ let glodaFolder = GlodaDatastore._folderByID[folderID];
+ if (glodaFolder.indexingPriority !== glodaFolder.kIndexingNeverPriority) {
+ glodaFolder._ensureFolderDirty();
+ }
+ }
+ },
+
+ /**
+ * Given a message header, return whether this message is likely to have
+ * been indexed or not.
+ *
+ * This means the message must:
+ * - Be in a folder eligible for gloda indexing. (Not News, etc.)
+ * - Be in a non-filthy folder.
+ * - Be gloda-indexed and non-filthy.
+ *
+ * @param aMsgHdr A message header.
+ * @returns true if the message is likely to have been indexed.
+ */
+ isMessageIndexed(aMsgHdr) {
+ // If it's in a folder that we flat out do not index, say no.
+ if (!this.shouldIndexFolder(aMsgHdr.folder)) {
+ return false;
+ }
+ let glodaFolder = GlodaDatastore._mapFolder(aMsgHdr.folder);
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(aMsgHdr);
+ return (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != GlodaMsgIndexer.kMessageFilthy &&
+ glodaFolder &&
+ glodaFolder.dirtyStatus != glodaFolder.kFolderFilthy
+ );
+ },
+
+ /* *********** Event Processing *********** */
+
+ /**
+ * Tracks messages we have received msgKeyChanged notifications for in order
+ * to provide batching and to suppress needless reindexing when we receive
+ * the expected follow-up msgsClassified notification.
+ *
+ * The entries in this dictionary should be extremely short-lived as we
+ * receive the msgKeyChanged notification as the offline fake header is
+ * converted into a real header (which is accompanied by a msgAdded
+ * notification we don't pay attention to). Once the headers finish
+ * updating, the message classifier will get its at-bat and should likely
+ * find that the messages have already been classified and so fast-path
+ * them.
+ *
+ * The keys in this dictionary are chosen to be consistent with those of
+ * PendingCommitTracker: the folder.URI + "#" + the (new) message key.
+ * The values in the dictionary are either an object with "id" (the gloda
+ * id), "key" (the new message key), and "dirty" (is it dirty and so
+ * should still be queued for indexing) attributes, or null indicating that
+ * no change in message key occurred and so no database changes are required.
+ */
+ _keyChangedBatchInfo: {},
+
+ /**
+ * Common logic for things that want to feed event-driven indexing. This gets
+ * called by both |_msgFolderListener.msgsClassified| when we are first
+ * seeing a message as well as by |_folderListener| when things happen to
+ * existing messages. Although we could slightly specialize for the
+ * new-to-us case, it works out to be cleaner to just treat them the same
+ * and take a very small performance hit.
+ *
+ * @param aMsgHdrs array of messages to treat as potentially changed.
+ * @param aDirtyingEvent Is this event inherently dirtying? Receiving a
+ * msgsClassified notification is not inherently dirtying because it is
+ * just telling us that a message exists. We use this knowledge to
+ * ignore the msgsClassified notifications for messages we have received
+ * msgKeyChanged notifications for and fast-pathed. Since it is possible
+ * for user action to do something that dirties the message between the
+ * time we get the msgKeyChanged notification and when we receive the
+ * msgsClassified notification, we want to make sure we don't get
+ * confused. (Although since we remove the message from our ignore-set
+ * after the first notification, we would likely just mistakenly treat
+ * the msgsClassified notification as something dirtying, so it would
+ * still work out...)
+ */
+ _reindexChangedMessages(aMsgHdrs, aDirtyingEvent) {
+ let glodaIdsNeedingDeletion = null;
+ let messageKeyChangedIds = null,
+ messageKeyChangedNewKeys = null;
+ for (let msgHdr of aMsgHdrs) {
+ // -- Index this folder?
+ let msgFolder = msgHdr.folder;
+ if (!this.shouldIndexFolder(msgFolder)) {
+ continue;
+ }
+ // -- Ignore messages in filthy folders!
+ // A filthy folder can only be processed by an indexing sweep, and at
+ // that point the message will get indexed.
+ let glodaFolder = GlodaDatastore._mapFolder(msgHdr.folder);
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderFilthy) {
+ continue;
+ }
+
+ // -- msgKeyChanged event follow-up
+ if (!aDirtyingEvent) {
+ let keyChangedKey = msgHdr.folder.URI + "#" + msgHdr.messageKey;
+ if (keyChangedKey in this._keyChangedBatchInfo) {
+ var keyChangedInfo = this._keyChangedBatchInfo[keyChangedKey];
+ delete this._keyChangedBatchInfo[keyChangedKey];
+
+ // Null means to ignore this message because the key did not change
+ // (and the message was not dirty so it is safe to ignore.)
+ if (keyChangedInfo == null) {
+ continue;
+ }
+ // (the key may be null if we only generated the entry because the
+ // message was dirty)
+ if (keyChangedInfo.key !== null) {
+ if (messageKeyChangedIds == null) {
+ messageKeyChangedIds = [];
+ messageKeyChangedNewKeys = [];
+ }
+ messageKeyChangedIds.push(keyChangedInfo.id);
+ messageKeyChangedNewKeys.push(keyChangedInfo.key);
+ }
+ // ignore the message because it was not dirty
+ if (!keyChangedInfo.isDirty) {
+ continue;
+ }
+ }
+ }
+
+ // -- Index this message?
+ // We index local messages, IMAP messages that are offline, and IMAP
+ // messages that aren't offline but whose folders aren't offline either
+ let isFolderLocal = msgFolder instanceof Ci.nsIMsgLocalMailFolder;
+ if (!isFolderLocal) {
+ if (
+ !(msgHdr.flags & Ci.nsMsgMessageFlags.Offline) &&
+ msgFolder.getFlag(Ci.nsMsgFolderFlags.Offline)
+ ) {
+ continue;
+ }
+ }
+ // Ignore messages whose processing flags indicate it has not yet been
+ // classified. In the IMAP case if the Offline flag is going to get set
+ // we are going to see it before the msgsClassified event so this is
+ // very important.
+ if (
+ msgFolder.getProcessingFlags(msgHdr.messageKey) &
+ NOT_YET_REPORTED_PROCESSING_FLAGS
+ ) {
+ continue;
+ }
+
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+
+ let isSpam =
+ msgHdr.getStringProperty(JUNK_SCORE_PROPERTY) == JUNK_SPAM_SCORE_STR;
+
+ // -- Is the message currently gloda indexed?
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != this.kMessageFilthy
+ ) {
+ // - Is the message spam?
+ if (isSpam) {
+ // Treat this as a deletion...
+ if (!glodaIdsNeedingDeletion) {
+ glodaIdsNeedingDeletion = [];
+ }
+ glodaIdsNeedingDeletion.push(glodaId);
+ // and skip to the next message
+ continue;
+ }
+
+ // - Mark the message dirty if it is clean.
+ // (This is the only case in which we need to mark dirty so that the
+ // indexing sweep takes care of things if we don't process this in
+ // an event-driven fashion. If the message has no gloda-id or does
+ // and it's already dirty or filthy, it is already marked for
+ // indexing.)
+ if (glodaDirty == this.kMessageClean) {
+ msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, this.kMessageDirty);
+ }
+ // if the message is pending clean, this change invalidates that.
+ PendingCommitTracker.noteDirtyHeader(msgHdr);
+ } else if (isSpam) {
+ // If it's not indexed but is spam, ignore it.
+ continue;
+ }
+ // (we want to index the message if we are here)
+
+ // mark the folder dirty too, so we know to look inside
+ glodaFolder._ensureFolderDirty();
+
+ if (this._pendingAddJob == null) {
+ this._pendingAddJob = new IndexingJob("message", null);
+ GlodaIndexer.indexJob(this._pendingAddJob);
+ }
+ // only queue the message if we haven't overflowed our event-driven budget
+ if (this._pendingAddJob.items.length < this._indexMaxEventQueueMessages) {
+ this._pendingAddJob.items.push([
+ GlodaDatastore._mapFolder(msgFolder).id,
+ msgHdr.messageKey,
+ ]);
+ } else {
+ this.indexingSweepNeeded = true;
+ }
+ }
+
+ // Process any message key changes (from earlier msgKeyChanged events)
+ if (messageKeyChangedIds != null) {
+ GlodaDatastore.updateMessageKeys(
+ messageKeyChangedIds,
+ messageKeyChangedNewKeys
+ );
+ }
+
+ // If we accumulated any deletions in there, batch them off now.
+ if (glodaIdsNeedingDeletion) {
+ GlodaDatastore.markMessagesDeletedByIDs(glodaIdsNeedingDeletion);
+ this.pendingDeletions = true;
+ }
+ },
+
+ /* ***** Folder Changes ***** */
+ /**
+ * All additions and removals are queued for processing. Indexing messages
+ * is potentially phenomenally expensive, and deletion can still be
+ * relatively expensive due to our need to delete the message, its
+ * attributes, and all attributes that reference it. Additionally,
+ * attribute deletion costs are higher than attribute look-up because
+ * there is the actual row plus its 3 indices, and our covering indices are
+ * no help there.
+ *
+ */
+ _msgFolderListener: {
+ indexer: null,
+
+ /**
+ * We no longer use the msgAdded notification, instead opting to wait until
+ * junk/trait classification has run (or decided not to run) and all
+ * filters have run. The msgsClassified notification provides that for us.
+ */
+ msgAdded(aMsgHdr) {
+ // we are never called! we do not enable this bit!
+ },
+
+ /**
+ * Process (apparently newly added) messages that have been looked at by
+ * the message classifier. This ensures that if the message was going
+ * to get marked as spam, this will have already happened.
+ *
+ * Besides truly new (to us) messages, We will also receive this event for
+ * messages that are the result of IMAP message move/copy operations,
+ * including both moves that generated offline fake headers and those that
+ * did not. In the offline fake header case, however, we are able to
+ * ignore their msgsClassified events because we will have received a
+ * msgKeyChanged notification sometime in the recent past.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ this.indexer._log.debug("msgsClassified notification");
+ try {
+ GlodaMsgIndexer._reindexChangedMessages(aMsgHdrs, false);
+ } catch (ex) {
+ this.indexer._log.error("Explosion in msgsClassified handling:", ex);
+ }
+ },
+
+ /**
+ * Any messages which have had their junk state changed are marked for
+ * reindexing.
+ */
+ msgsJunkStatusChanged(messages) {
+ this.indexer._log.debug("JunkStatusChanged notification");
+ GlodaMsgIndexer._reindexChangedMessages(messages, true);
+ },
+
+ /**
+ * Handle real, actual deletion (move to trash and IMAP deletion model
+ * don't count); we only see the deletion here when it becomes forever,
+ * or rather _just before_ it becomes forever. Because the header is
+ * going away, we need to either process things immediately or extract the
+ * information required to purge it later without the header.
+ * To this end, we mark all messages that were indexed in the gloda message
+ * database as deleted. We set our pending deletions flag to let our
+ * indexing logic know that after its next wave of folder traversal, it
+ * should perform a deletion pass. If it turns out the messages are coming
+ * back, the fact that deletion is thus deferred can be handy, as we can
+ * reuse the existing gloda message.
+ */
+ msgsDeleted(aMsgHdrs) {
+ this.indexer._log.debug("msgsDeleted notification");
+ let glodaMessageIds = [];
+
+ for (let msgHdr of aMsgHdrs) {
+ let [glodaId, glodaDirty] = PendingCommitTracker.getGlodaState(msgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ glodaDirty != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ glodaMessageIds.push(glodaId);
+ }
+ }
+
+ if (glodaMessageIds.length) {
+ GlodaMsgIndexer._datastore.markMessagesDeletedByIDs(glodaMessageIds);
+ GlodaMsgIndexer.pendingDeletions = true;
+ }
+ },
+
+ /**
+ * Process a move or copy.
+ *
+ * Moves to a local folder or an IMAP folder where we are generating offline
+ * fake headers are dealt with efficiently because we get both the source
+ * and destination headers. The main ingredient to having offline fake
+ * headers is that allowUndo was true when the operation was performance.
+ * The only non-obvious thing is that we need to make sure that we deal
+ * with the impact of filthy folders and messages on gloda-id's (they
+ * invalidate the gloda-id).
+ *
+ * Moves to an IMAP folder that do not generate offline fake headers do not
+ * provide us with the target header, but the IMAP SetPendingAttributes
+ * logic will still attempt to propagate the properties on the message
+ * header so when we eventually see it in the msgsClassified notification,
+ * it should have the properties of the source message copied over.
+ * We make sure that gloda-id's do not get propagated when messages are
+ * moved from IMAP folders that are marked filthy or are marked as not
+ * supposed to be indexed by clearing the pending attributes for the header
+ * being tracked by the destination IMAP folder.
+ * We could fast-path the IMAP move case in msgsClassified by noticing that
+ * a message is showing up with a gloda-id header already and just
+ * performing an async location update.
+ *
+ * Moves that occur involving 'compacted' folders are fine and do not
+ * require special handling here. The one tricky super-edge-case that
+ * can happen (and gets handled by the compaction pass) is the move of a
+ * message that got gloda indexed that did not already have a gloda-id and
+ * PendingCommitTracker did not get to flush the gloda-id before the
+ * compaction happened. In that case our move logic cannot know to do
+ * anything and the gloda database still thinks the message lives in our
+ * folder. The compaction pass will deal with this by marking the message
+ * as deleted. The rationale being that marking it deleted allows the
+ * message to be re-used if it gets indexed in the target location, or if
+ * the target location has already been indexed, we no longer need the
+ * duplicate and it should be deleted. (Also, it is unable to distinguish
+ * between a case where the message got deleted versus moved.)
+ *
+ * Because copied messages are, by their nature, duplicate messages, we
+ * do not particularly care about them. As such, we defer their processing
+ * to the automatic sync logic that will happen much later on. This is
+ * potentially desirable in case the user deletes some of the original
+ * messages, allowing us to reuse the gloda message representations when
+ * we finally get around to indexing the messages. We do need to mark the
+ * folder as dirty, though, to clue in the sync logic.
+ */
+ msgsMoveCopyCompleted(aMove, aSrcMsgHdrs, aDestFolder, aDestMsgHdrs) {
+ this.indexer._log.debug("MoveCopy notification. Move: " + aMove);
+ try {
+ // ---- Move
+ if (aMove) {
+ // -- Effectively a deletion?
+ // If the destination folder is not indexed, it's like these messages
+ // are being deleted.
+ if (!GlodaMsgIndexer.shouldIndexFolder(aDestFolder)) {
+ this.msgsDeleted(aSrcMsgHdrs);
+ return;
+ }
+
+ // -- Avoid propagation of filthy gloda-id's.
+ // If the source folder is filthy or should not be indexed (and so
+ // any gloda-id's found in there are gibberish), our only job is to
+ // strip the gloda-id's off of all the destination headers because
+ // none of the gloda-id's are valid (and so we certainly don't want
+ // to try and use them as a basis for updating message keys.)
+ let srcMsgFolder = aSrcMsgHdrs[0].folder;
+ if (
+ !this.indexer.shouldIndexFolder(srcMsgFolder) ||
+ GlodaDatastore._mapFolder(srcMsgFolder).dirtyStatus ==
+ GlodaFolder.prototype.kFolderFilthy
+ ) {
+ // Local case, just modify the destination headers directly.
+ if (aDestMsgHdrs.length > 0) {
+ for (let destMsgHdr of aDestMsgHdrs) {
+ // zero it out if it exists
+ // (no need to deal with pending commit issues here; a filthy
+ // folder by definition has nothing indexed in it.)
+ let glodaId = destMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destMsgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, 0);
+ }
+ }
+
+ // Since we are moving messages from a folder where they were
+ // effectively not indexed, it is up to us to make sure the
+ // messages now get indexed.
+ this.indexer._reindexChangedMessages(aDestMsgHdrs);
+ return;
+ }
+
+ // IMAP move case, we need to operate on the pending headers using
+ // the source header to get the pending header and as the
+ // indication of what has been already set on the pending header.
+ let destDb;
+ // so, this can fail, and there's not much we can do about it.
+ try {
+ destDb = aDestFolder.msgDatabase;
+ } catch (ex) {
+ this.indexer._log.warn(
+ "Destination database for " +
+ aDestFolder.prettyName +
+ " not ready on IMAP move." +
+ " Gloda corruption possible."
+ );
+ return;
+ }
+ for (let srcMsgHdr of aSrcMsgHdrs) {
+ // zero it out if it exists
+ // (no need to deal with pending commit issues here; a filthy
+ // folder by definition has nothing indexed in it.)
+ let glodaId = srcMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destDb.setUint32AttributeOnPendingHdr(
+ srcMsgHdr,
+ GLODA_MESSAGE_ID_PROPERTY,
+ 0
+ );
+ }
+ }
+
+ // Nothing remains to be done. The msgClassified event will take
+ // care of making sure the message gets indexed.
+ return;
+ }
+
+ // --- Have destination headers (local case):
+ if (aDestMsgHdrs.length > 0) {
+ // -- Update message keys for valid gloda-id's.
+ // (Which means ignore filthy gloda-id's.)
+ let glodaIds = [];
+ let newMessageKeys = [];
+ // Track whether we see any messages that are not gloda indexed so
+ // we know if we have to mark the destination folder dirty.
+ let sawNonGlodaMessage = false;
+ for (let iMsg = 0; iMsg < aSrcMsgHdrs.length; iMsg++) {
+ let srcMsgHdr = aSrcMsgHdrs[iMsg];
+ let destMsgHdr = aDestMsgHdrs[iMsg];
+
+ let [glodaId, dirtyStatus] =
+ PendingCommitTracker.getGlodaState(srcMsgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ dirtyStatus != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ // we may need to update the pending commit map (it checks)
+ PendingCommitTracker.noteMove(srcMsgHdr, destMsgHdr);
+ // but we always need to update our database
+ glodaIds.push(glodaId);
+ newMessageKeys.push(destMsgHdr.messageKey);
+ } else {
+ sawNonGlodaMessage = true;
+ }
+ }
+
+ // this method takes care to update the in-memory representations
+ // too; we don't need to do anything
+ if (glodaIds.length) {
+ GlodaDatastore.updateMessageLocations(
+ glodaIds,
+ newMessageKeys,
+ aDestFolder
+ );
+ }
+
+ // Mark the destination folder dirty if we saw any messages that
+ // were not already gloda indexed.
+ if (sawNonGlodaMessage) {
+ let destGlodaFolder = GlodaDatastore._mapFolder(aDestFolder);
+ destGlodaFolder._ensureFolderDirty();
+ this.indexer.indexingSweepNeeded = true;
+ }
+ } else {
+ // --- No dest headers (IMAP case):
+ // Update any valid gloda indexed messages into their new folder to
+ // make the indexer's life easier when it sees the messages in their
+ // new folder.
+ let glodaIds = [];
+
+ let srcFolderIsLocal =
+ srcMsgFolder instanceof Ci.nsIMsgLocalMailFolder;
+ for (let msgHdr of aSrcMsgHdrs) {
+ let [glodaId, dirtyStatus] =
+ PendingCommitTracker.getGlodaState(msgHdr);
+ if (
+ glodaId >= GLODA_FIRST_VALID_MESSAGE_ID &&
+ dirtyStatus != GlodaMsgIndexer.kMessageFilthy
+ ) {
+ // we may need to update the pending commit map (it checks)
+ PendingCommitTracker.noteBlindMove(msgHdr);
+ // but we always need to update our database
+ glodaIds.push(glodaId);
+
+ // XXX UNDO WORKAROUND
+ // This constitutes a move from a local folder to an IMAP
+ // folder. Undo does not currently do the right thing for us,
+ // but we have a chance of not orphaning the message if we
+ // mark the source header as dirty so that when the message
+ // gets re-added we see it. (This does require that we enter
+ // the folder; we set the folder dirty after the loop to
+ // increase the probability of this but it's not foolproof
+ // depending on when the next indexing sweep happens and when
+ // the user performs an undo.)
+ msgHdr.setUint32Property(
+ GLODA_DIRTY_PROPERTY,
+ GlodaMsgIndexer.kMessageDirty
+ );
+ }
+ }
+ // XXX ALSO UNDO WORKAROUND
+ if (srcFolderIsLocal) {
+ let srcGlodaFolder = GlodaDatastore._mapFolder(srcMsgFolder);
+ srcGlodaFolder._ensureFolderDirty();
+ }
+
+ // quickly move them to the right folder, zeroing their message keys
+ GlodaDatastore.updateMessageFoldersByKeyPurging(
+ glodaIds,
+ aDestFolder
+ );
+ // we _do not_ need to mark the folder as dirty, because the
+ // message added events will cause that to happen.
+ }
+ } else {
+ // ---- Copy case
+ // -- Do not propagate gloda-id's for copies
+ // (Only applies if we have the destination header, which means local)
+ for (let destMsgHdr of aDestMsgHdrs) {
+ let glodaId = destMsgHdr.getUint32Property(
+ GLODA_MESSAGE_ID_PROPERTY
+ );
+ if (glodaId) {
+ destMsgHdr.setUint32Property(GLODA_MESSAGE_ID_PROPERTY, 0);
+ }
+ }
+
+ // mark the folder as dirty; we'll get to it later.
+ let destGlodaFolder = GlodaDatastore._mapFolder(aDestFolder);
+ destGlodaFolder._ensureFolderDirty();
+ this.indexer.indexingSweepNeeded = true;
+ }
+ } catch (ex) {
+ this.indexer._log.error(
+ "Problem encountered during message move/copy:",
+ ex.stack
+ );
+ }
+ },
+
+ /**
+ * Queue up message key changes that are a result of offline fake headers
+ * being made real for the actual update during the msgsClassified
+ * notification that is expected after this. We defer the
+ * actual work (if there is any to be done; the fake header might have
+ * guessed the right UID correctly) so that we can batch our work.
+ *
+ * The expectation is that there will be no meaningful time window between
+ * this notification and the msgsClassified notification since the message
+ * classifier should not actually need to classify the messages (they
+ * should already have been classified) and so can fast-path them.
+ */
+ msgKeyChanged(aOldMsgKey, aNewMsgHdr) {
+ try {
+ let val = null,
+ newKey = aNewMsgHdr.messageKey;
+ let [glodaId, glodaDirty] =
+ PendingCommitTracker.getGlodaState(aNewMsgHdr);
+ // If we haven't indexed this message yet, take no action, and leave it
+ // up to msgsClassified to take proper action.
+ if (glodaId < GLODA_FIRST_VALID_MESSAGE_ID) {
+ return;
+ }
+ // take no action on filthy messages,
+ // generate an entry if dirty or the keys don't match.
+ if (
+ glodaDirty !== GlodaMsgIndexer.kMessageFilthy &&
+ (glodaDirty === GlodaMsgIndexer.kMessageDirty ||
+ aOldMsgKey !== newKey)
+ ) {
+ val = {
+ id: glodaId,
+ key: aOldMsgKey !== newKey ? newKey : null,
+ isDirty: glodaDirty === GlodaMsgIndexer.kMessageDirty,
+ };
+ }
+
+ let key = aNewMsgHdr.folder.URI + "#" + aNewMsgHdr.messageKey;
+ this.indexer._keyChangedBatchInfo[key] = val;
+ } catch (ex) {
+ // this is more for the unit test to fail rather than user error reporting
+ this.indexer._log.error(
+ "Problem encountered during msgKeyChanged" +
+ " notification handling: " +
+ ex +
+ "\n\n" +
+ ex.stack +
+ " \n\n"
+ );
+ }
+ },
+
+ /**
+ * Detect newly added folders before they get messages so we map them before
+ * they get any messages added to them. If we only hear about them after
+ * they get their 1st message, then we will mark them filthy, but if we mark
+ * them before that, they get marked clean.
+ */
+ folderAdded(aMsgFolder) {
+ // This is invoked for its side-effect of invoking _mapFolder and doing so
+ // only after filtering out folders we don't care about.
+ GlodaMsgIndexer.shouldIndexFolder(aMsgFolder);
+ },
+
+ /**
+ * Handles folder no-longer-exists-ence. We mark all messages as deleted
+ * and remove the folder from our URI table. Currently, if a folder that
+ * contains other folders is deleted, we may either receive one
+ * notification for the folder that is deleted, or a notification for the
+ * folder and one for each of its descendents. This depends upon the
+ * underlying account implementation, so we explicitly handle each case.
+ * Namely, we treat it as if we're only planning on getting one, but we
+ * handle if the children are already gone for some reason.
+ */
+ folderDeleted(aFolder) {
+ this.indexer._log.debug("folderDeleted notification");
+ try {
+ let delFunc = function (aFolder, indexer) {
+ if (indexer._datastore._folderKnown(aFolder)) {
+ indexer._log.info(
+ "Processing deletion of folder " + aFolder.prettyName + "."
+ );
+ let glodaFolder = GlodaDatastore._mapFolder(aFolder);
+ indexer._datastore.markMessagesDeletedByFolderID(glodaFolder.id);
+ indexer._datastore.deleteFolderByID(glodaFolder.id);
+ GlodaDatastore._killGlodaFolderIntoTombstone(glodaFolder);
+ } else {
+ indexer._log.info(
+ "Ignoring deletion of folder " +
+ aFolder.prettyName +
+ " because it is unknown to gloda."
+ );
+ }
+ };
+
+ let descendentFolders = aFolder.descendants;
+ // (the order of operations does not matter; child, non-child, whatever.)
+ // delete the parent
+ delFunc(aFolder, this.indexer);
+ // delete all its descendents
+ for (let folder of descendentFolders) {
+ delFunc(folder, this.indexer);
+ }
+
+ this.indexer.pendingDeletions = true;
+ } catch (ex) {
+ this.indexer._log.error(
+ "Problem encountered during folder deletion" +
+ ": " +
+ ex +
+ "\n\n" +
+ ex.stack +
+ "\n\n"
+ );
+ }
+ },
+
+ /**
+ * Handle a folder being copied or moved.
+ * Moves are handled by a helper function shared with _folderRenameHelper
+ * (which takes care of any nesting involved).
+ * Copies are actually ignored, because our periodic indexing traversal
+ * should discover these automatically. We could hint ourselves into
+ * action, but arguably a set of completely duplicate messages is not
+ * a high priority for indexing.
+ */
+ folderMoveCopyCompleted(aMove, aSrcFolder, aDestFolder) {
+ this.indexer._log.debug(
+ "folderMoveCopy notification (Move: " + aMove + ")"
+ );
+ if (aMove) {
+ let srcURI = aSrcFolder.URI;
+ let targetURI =
+ aDestFolder.URI + srcURI.substring(srcURI.lastIndexOf("/"));
+ this._folderRenameHelper(aSrcFolder, targetURI);
+ } else {
+ this.indexer.indexingSweepNeeded = true;
+ }
+ },
+
+ /**
+ * We just need to update the URI <-> ID maps and the row in the database,
+ * all of which is actually done by the datastore for us.
+ * This method needs to deal with the complexity where local folders will
+ * generate a rename notification for each sub-folder, but IMAP folders
+ * will generate only a single notification. Our logic primarily handles
+ * this by not exploding if the original folder no longer exists.
+ */
+ _folderRenameHelper(aOrigFolder, aNewURI) {
+ let newFolder = lazy.MailUtils.getOrCreateFolder(aNewURI);
+ let specialFolderFlags =
+ Ci.nsMsgFolderFlags.Trash | Ci.nsMsgFolderFlags.Junk;
+ if (newFolder.isSpecialFolder(specialFolderFlags, true)) {
+ let descendentFolders = newFolder.descendants;
+
+ // First thing to do: make sure we don't index the resulting folder and
+ // its descendents.
+ GlodaMsgIndexer.resetFolderIndexingPriority(newFolder);
+ for (let folder of descendentFolders) {
+ GlodaMsgIndexer.resetFolderIndexingPriority(folder);
+ }
+
+ // Remove from the index messages from the original folder
+ this.folderDeleted(aOrigFolder);
+ } else {
+ let descendentFolders = aOrigFolder.descendants;
+
+ let origURI = aOrigFolder.URI;
+ // this rename is straightforward.
+ GlodaDatastore.renameFolder(aOrigFolder, aNewURI);
+
+ for (let folder of descendentFolders) {
+ let oldSubURI = folder.URI;
+ // mangle a new URI from the old URI. we could also try and do a
+ // parallel traversal of the new folder hierarchy, but that seems like
+ // more work.
+ let newSubURI = aNewURI + oldSubURI.substring(origURI.length);
+ this.indexer._datastore.renameFolder(oldSubURI, newSubURI);
+ }
+
+ this.indexer._log.debug(
+ "folder renamed: " + origURI + " to " + aNewURI
+ );
+ }
+ },
+
+ /**
+ * Handle folder renames, dispatching to our rename helper (which also
+ * takes care of any nested folder issues.)
+ */
+ folderRenamed(aOrigFolder, aNewFolder) {
+ this._folderRenameHelper(aOrigFolder, aNewFolder.URI);
+ },
+
+ /**
+ * Helper used by folderCompactStart/folderReindexTriggered.
+ */
+ _reindexFolderHelper(folder, isCompacting) {
+ // ignore folders we ignore...
+ if (!GlodaMsgIndexer.shouldIndexFolder(folder)) {
+ return;
+ }
+
+ let glodaFolder = GlodaDatastore._mapFolder(folder);
+ if (isCompacting) {
+ glodaFolder.compacting = true;
+ }
+
+ // Purge any explicit indexing of said folder.
+ GlodaIndexer.purgeJobsUsingFilter(function (aJob) {
+ return aJob.jobType == "folder" && aJob.id == folder.id;
+ });
+
+ // Abort the active job if it's in the folder (this covers both
+ // event-driven indexing that happens to be in the folder as well
+ // explicit folder indexing of the folder).
+ if (GlodaMsgIndexer._indexingFolder == folder) {
+ GlodaIndexer.killActiveJob();
+ }
+
+ // Tell the PendingCommitTracker to throw away anything it is tracking
+ // about the folder. We will pick up the pieces in the compaction
+ // pass.
+ PendingCommitTracker.noteFolderDatabaseGettingBlownAway(folder);
+
+ // (We do not need to mark the folder dirty because if we were indexing
+ // it, it already must have been marked dirty.)
+ },
+
+ /**
+ * folderCompactStart: Mark the folder as compacting in our in-memory
+ * representation. This should keep any new indexing out of the folder
+ * until it is done compacting. Also, kill any active or existing jobs
+ * to index the folder.
+ */
+ folderCompactStart(folder) {
+ this._reindexFolderHelper(folder, true);
+ },
+
+ /**
+ * folderReindexTriggered: We do the same thing as folderCompactStart
+ * but don't mark the folder as compacting.
+ */
+ folderReindexTriggered(folder) {
+ this._reindexFolderHelper(folder, false);
+ },
+
+ /**
+ * folderCompactFinish: Mark the folder as done compacting in our
+ * in-memory representation. Assuming the folder was known to us and
+ * not marked filthy, queue a compaction job.
+ */
+ folderCompactFinish(folder) {
+ // ignore folders we ignore...
+ if (!GlodaMsgIndexer.shouldIndexFolder(folder)) {
+ return;
+ }
+
+ let glodaFolder = GlodaDatastore._mapFolder(folder);
+ glodaFolder.compacting = false;
+ glodaFolder._setCompactedState(true);
+
+ // Queue compaction unless the folder was filthy (in which case there
+ // are no valid gloda-id's to update.)
+ if (glodaFolder.dirtyStatus != glodaFolder.kFolderFilthy) {
+ GlodaIndexer.indexJob(new IndexingJob("folderCompact", glodaFolder.id));
+ }
+
+ // Queue indexing of the folder if it is dirty. We are doing this
+ // mainly in case we were indexing it before the compaction started.
+ // It should be reasonably harmless if we weren't.
+ // (It would probably be better to just make sure that there is an
+ // indexing sweep queued or active, and if it's already active that
+ // this folder is in the queue to be processed.)
+ if (glodaFolder.dirtyStatus == glodaFolder.kFolderDirty) {
+ GlodaIndexer.indexJob(new IndexingJob("folder", glodaFolder.id));
+ }
+ },
+ },
+
+ /**
+ * A nsIFolderListener (listening on nsIMsgMailSession so we get all of
+ * these events) PRIMARILY to get folder loaded notifications. Because of
+ * deficiencies in the nsIMsgFolderListener's events at this time, we also
+ * get our folder-added and newsgroup notifications from here for now. (This
+ * will be rectified.)
+ */
+ _folderListener: {
+ indexer: null,
+
+ _init(aIndexer) {
+ this.indexer = aIndexer;
+ },
+
+ onFolderAdded(parentFolder, child) {},
+ onMessageAdded(parentFolder, msg) {},
+ onFolderRemoved(parentFolder, child) {},
+ onMessageRemoved(parentFolder, msg) {},
+ onFolderPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ /**
+ * Detect changes to folder flags and reset our indexing priority. This
+ * is important because (all?) folders start out without any flags and
+ * then get their flags added to them.
+ */
+ onFolderIntPropertyChanged(aFolderItem, aProperty, aOldValue, aNewValue) {
+ if (aProperty !== "FolderFlag") {
+ return;
+ }
+ if (!GlodaMsgIndexer.shouldIndexFolder(aFolderItem)) {
+ return;
+ }
+ // Only reset priority if folder Special Use changes.
+ if (
+ (aOldValue & Ci.nsMsgFolderFlags.SpecialUse) ==
+ (aNewValue & Ci.nsMsgFolderFlags.SpecialUse)
+ ) {
+ return;
+ }
+ GlodaMsgIndexer.resetFolderIndexingPriority(aFolderItem);
+ },
+ onFolderBoolPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ onFolderUnicharPropertyChanged(aItem, aProperty, aOldValue, aNewValue) {},
+ /**
+ * Notice when user activity adds/removes tags or changes a message's
+ * status.
+ */
+ onFolderPropertyFlagChanged(aMsgHdr, aProperty, aOldValue, aNewValue) {
+ if (
+ aProperty == "Keywords" ||
+ // We could care less about the new flag changing.
+ (aProperty == "Status" &&
+ (aOldValue ^ aNewValue) != Ci.nsMsgMessageFlags.New &&
+ // We do care about IMAP deletion, but msgsDeleted tells us that, so
+ // ignore IMAPDeleted too...
+ (aOldValue ^ aNewValue) != Ci.nsMsgMessageFlags.IMAPDeleted) ||
+ aProperty == "Flagged"
+ ) {
+ GlodaMsgIndexer._reindexChangedMessages([aMsgHdr], true);
+ }
+ },
+
+ /**
+ * Get folder loaded notifications for folders that had to do some
+ * (asynchronous) processing before they could be opened.
+ */
+ onFolderEvent(aFolder, aEvent) {
+ if (aEvent == "FolderLoaded") {
+ this.indexer._onFolderLoaded(aFolder);
+ }
+ },
+ },
+
+ /* ***** Rebuilding / Reindexing ***** */
+ /**
+ * Allow us to invalidate an outstanding folder traversal because the
+ * underlying database is going away. We use other means for detecting
+ * modifications of the message (labeling, marked (un)read, starred, etc.)
+ *
+ * This is an nsIDBChangeListener listening to an nsIDBChangeAnnouncer. To
+ * add ourselves, we get us a nice nsMsgDatabase, query it to the announcer,
+ * then call addListener.
+ */
+ _databaseAnnouncerListener: {
+ indexer: null,
+ /**
+ * XXX We really should define the operations under which we expect this to
+ * occur. While we know this must be happening as the result of a
+ * ForceClosed call, we don't have a comprehensive list of when this is
+ * expected to occur. Some reasons:
+ * - Compaction (although we should already have killed the job thanks to
+ * our compaction notification)
+ * - UID validity rolls.
+ * - Folder Rename
+ * - Folder Delete
+ * The fact that we already have the database open when getting this means
+ * that it had to be valid before we opened it, which hopefully rules out
+ * modification of the mbox file by an external process (since that is
+ * forbidden when we are running) and many other exotic things.
+ *
+ * So this really ends up just being a correctness / safety protection
+ * mechanism. At least now that we have better compaction support.
+ */
+ onAnnouncerGoingAway(aDBChangeAnnouncer) {
+ // The fact that we are getting called means we have an active folder and
+ // that we therefore are the active job. As such, we must kill the
+ // active job.
+ // XXX In the future, when we support interleaved event-driven indexing
+ // that bumps long-running indexing tasks, the semantics of this will
+ // have to change a bit since we will want to maintain being active in a
+ // folder even when bumped. However, we will probably have a more
+ // complex notion of indexing contexts on a per-job basis.
+ GlodaIndexer.killActiveJob();
+ },
+
+ onHdrFlagsChanged(aHdrChanged, aOldFlags, aNewFlags, aInstigator) {},
+ onHdrDeleted(aHdrChanged, aParentKey, aFlags, aInstigator) {},
+ onHdrAdded(aHdrChanged, aParentKey, aFlags, aInstigator) {},
+ onParentChanged(aKeyChanged, aOldParent, aNewParent, aInstigator) {},
+ onReadChanged(aInstigator) {},
+ onJunkScoreChanged(aInstigator) {},
+ onHdrPropertyChanged(aHdrToChange, aPreChange, aStatus, aInstigator) {},
+ onEvent(aDB, aEvent) {},
+ },
+
+ /**
+ * Given a list of Message-ID's, return a matching list of lists of messages
+ * matching those Message-ID's. So if you pass an array with three
+ * Message-ID's ["a", "b", "c"], you would get back an array containing
+ * 3 lists, where the first list contains all the messages with a message-id
+ * of "a", and so forth. The reason a list is returned rather than null/a
+ * message is that we accept the reality that we have multiple copies of
+ * messages with the same ID.
+ * This call is asynchronous because it depends on previously created messages
+ * to be reflected in our results, which requires us to execute on the async
+ * thread where all our writes happen. This also turns out to be a
+ * reasonable thing because we could imagine pathological cases where there
+ * could be a lot of message-id's and/or a lot of messages with those
+ * message-id's.
+ *
+ * The returned collection will include both 'ghost' messages (messages
+ * that exist for conversation-threading purposes only) as well as deleted
+ * messages in addition to the normal 'live' messages that non-privileged
+ * queries might return.
+ */
+ getMessagesByMessageID(aMessageIDs, aCallback, aCallbackThis) {
+ let msgIDToIndex = {};
+ let results = [];
+ for (let iID = 0; iID < aMessageIDs.length; ++iID) {
+ let msgID = aMessageIDs[iID];
+ results.push([]);
+ msgIDToIndex[msgID] = iID;
+ }
+
+ // (Note: although we are performing a lookup with no validity constraints
+ // and using the same object-relational-mapper-ish layer used by things
+ // that do have constraints, we are not at risk of exposing deleted
+ // messages to other code and getting it confused. The only way code
+ // can find a message is if it shows up in their queries or gets announced
+ // via GlodaCollectionManager.itemsAdded, neither of which will happen.)
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ query.headerMessageID.apply(query, aMessageIDs);
+ query.frozen = true;
+
+ let listener = new MessagesByMessageIdCallback(
+ msgIDToIndex,
+ results,
+ aCallback,
+ aCallbackThis
+ );
+ return query.getCollection(listener, null, { becomeNull: true });
+ },
+
+ /**
+ * A reference to MsgHdrToMimeMessage that unit testing can clobber when it
+ * wants to cause us to hang or inject a fault. If you are not
+ * glodaTestHelper.js then _do not touch this_.
+ */
+ _MsgHdrToMimeMessageFunc: MsgHdrToMimeMessage,
+ /**
+ * Primary message indexing logic. This method is mainly concerned with
+ * getting all the information about the message required for threading /
+ * conversation building and subsequent processing. It is responsible for
+ * determining whether to reuse existing gloda messages or whether a new one
+ * should be created. Most attribute stuff happens in fund_attr.js or
+ * expl_attr.js.
+ *
+ * Prior to calling this method, the caller must have invoked
+ * |_indexerEnterFolder|, leaving us with the following true invariants
+ * below.
+ *
+ * @pre aMsgHdr.folder == this._indexingFolder
+ * @pre aMsgHdr.folder.msgDatabase == this._indexingDatabase
+ */
+ *_indexMessage(aMsgHdr, aCallbackHandle) {
+ this._log.debug(
+ "*** Indexing message: " + aMsgHdr.messageKey + " : " + aMsgHdr.subject
+ );
+
+ // If the message is offline, then get the message body as well
+ let aMimeMsg;
+ if (
+ aMsgHdr.flags & Ci.nsMsgMessageFlags.Offline ||
+ aMsgHdr.folder instanceof Ci.nsIMsgLocalMailFolder
+ ) {
+ this._MsgHdrToMimeMessageFunc(
+ aMsgHdr,
+ aCallbackHandle.callbackThis,
+ aCallbackHandle.callback,
+ false,
+ {
+ saneBodySize: true,
+ }
+ );
+ aMimeMsg = (yield GlodaConstants.kWorkAsync)[1];
+ } else {
+ this._log.debug(" * Message is not offline -- only headers indexed");
+ }
+
+ this._log.debug(" * Got message, subject " + aMsgHdr.subject);
+
+ if (this._unitTestSuperVerbose) {
+ if (aMimeMsg) {
+ this._log.debug(" * Got Mime " + aMimeMsg.prettyString());
+ } else {
+ this._log.debug(" * NO MIME MESSAGE!!!\n");
+ }
+ }
+
+ // -- Find/create the conversation the message belongs to.
+ // Our invariant is that all messages that exist in the database belong to
+ // a conversation.
+
+ // - See if any of the ancestors exist and have a conversationID...
+ // (references are ordered from old [0] to new [n-1])
+ let references = Array.from(range(0, aMsgHdr.numReferences)).map(i =>
+ aMsgHdr.getStringReference(i)
+ );
+ // also see if we already know about the message...
+ references.push(aMsgHdr.messageId);
+
+ this.getMessagesByMessageID(
+ references,
+ aCallbackHandle.callback,
+ aCallbackHandle.callbackThis
+ );
+ // (ancestorLists has a direct correspondence to the message ids)
+ let ancestorLists = yield GlodaConstants.kWorkAsync;
+
+ this._log.debug("ancestors raw: " + ancestorLists);
+ this._log.debug(
+ "ref len: " + references.length + " anc len: " + ancestorLists.length
+ );
+ this._log.debug("references: " + references);
+ this._log.debug("ancestors: " + ancestorLists);
+
+ // pull our current message lookup results off
+ references.pop();
+ let candidateCurMsgs = ancestorLists.pop();
+
+ let conversationID = null;
+ let conversation = null;
+ // -- figure out the conversation ID
+ // if we have a clone/already exist, just use his conversation ID
+ if (candidateCurMsgs.length > 0) {
+ conversationID = candidateCurMsgs[0].conversationID;
+ conversation = candidateCurMsgs[0].conversation;
+ } else {
+ // otherwise check out our ancestors
+ // (walk from closest to furthest ancestor)
+ for (
+ let iAncestor = ancestorLists.length - 1;
+ iAncestor >= 0;
+ --iAncestor
+ ) {
+ let ancestorList = ancestorLists[iAncestor];
+
+ if (ancestorList.length > 0) {
+ // we only care about the first instance of the message because we are
+ // able to guarantee the invariant that all messages with the same
+ // message id belong to the same conversation.
+ let ancestor = ancestorList[0];
+ if (conversationID === null) {
+ conversationID = ancestor.conversationID;
+ conversation = ancestor.conversation;
+ } else if (conversationID != ancestor.conversationID) {
+ // XXX this inconsistency is known and understood and tracked by
+ // bug 478162 https://bugzilla.mozilla.org/show_bug.cgi?id=478162
+ // this._log.error("Inconsistency in conversations invariant on " +
+ // ancestor.headerMessageID + ". It has conv id " +
+ // ancestor.conversationID + " but expected " +
+ // conversationID + ". ID: " + ancestor.id);
+ }
+ }
+ }
+ }
+
+ // nobody had one? create a new conversation
+ if (conversationID === null) {
+ // (the create method could issue the id, making the call return
+ // without waiting for the database...)
+ conversation = this._datastore.createConversation(
+ aMsgHdr.mime2DecodedSubject,
+ null,
+ null
+ );
+ conversationID = conversation.id;
+ }
+
+ // Walk from furthest to closest ancestor, creating the ancestors that don't
+ // exist. (This is possible if previous messages that were consumed in this
+ // thread only had an in-reply-to or for some reason did not otherwise
+ // provide the full references chain.)
+ for (let iAncestor = 0; iAncestor < ancestorLists.length; ++iAncestor) {
+ let ancestorList = ancestorLists[iAncestor];
+
+ if (ancestorList.length == 0) {
+ this._log.debug(
+ "creating message with: null, " +
+ conversationID +
+ ", " +
+ references[iAncestor] +
+ ", null."
+ );
+ let ancestor = this._datastore.createMessage(
+ null,
+ null, // ghost
+ conversationID,
+ null,
+ references[iAncestor],
+ null, // no subject
+ null, // no body
+ null
+ ); // no attachments
+ this._datastore.insertMessage(ancestor);
+ ancestorLists[iAncestor].push(ancestor);
+ }
+ }
+ // now all our ancestors exist, though they may be ghost-like...
+
+ // find if there's a ghost version of our message or we already have indexed
+ // this message.
+ let curMsg = null;
+ this._log.debug(candidateCurMsgs.length + " candidate messages");
+ for (let iCurCand = 0; iCurCand < candidateCurMsgs.length; iCurCand++) {
+ let candMsg = candidateCurMsgs[iCurCand];
+
+ this._log.debug(
+ "candidate folderID: " +
+ candMsg.folderID +
+ " messageKey: " +
+ candMsg.messageKey
+ );
+
+ if (candMsg.folderURI == this._indexingFolder.URI) {
+ // if we are in the same folder and we have the same message key, we
+ // are definitely the same, stop looking.
+ if (candMsg.messageKey == aMsgHdr.messageKey) {
+ curMsg = candMsg;
+ break;
+ }
+ // if (we are in the same folder and) the candidate message has a null
+ // message key, we treat it as our best option unless we find an exact
+ // key match. (this would happen because the 'move' notification case
+ // has to deal with not knowing the target message key. this case
+ // will hopefully be somewhat improved in the future to not go through
+ // this path which mandates re-indexing of the message in its entirety)
+ if (candMsg.messageKey === null) {
+ curMsg = candMsg;
+ } else if (
+ curMsg === null &&
+ !this._indexingDatabase.containsKey(candMsg.messageKey)
+ ) {
+ // (We are in the same folder and) the candidate message's underlying
+ // message no longer exists/matches. Assume we are the same but
+ // were betrayed by a re-indexing or something, but we have to make
+ // sure a perfect match doesn't turn up.
+ curMsg = candMsg;
+ }
+ } else if (curMsg === null && candMsg.folderID === null) {
+ // a ghost/deleted message is fine
+ curMsg = candMsg;
+ }
+ }
+
+ let attachmentNames = aMimeMsg?.allAttachments.map(att => att.name) || null;
+
+ let isConceptuallyNew, isRecordNew, insertFulltext;
+ if (curMsg === null) {
+ curMsg = this._datastore.createMessage(
+ aMsgHdr.folder,
+ aMsgHdr.messageKey,
+ conversationID,
+ aMsgHdr.date,
+ aMsgHdr.messageId
+ );
+ curMsg._conversation = conversation;
+ isConceptuallyNew = isRecordNew = insertFulltext = true;
+ } else {
+ isRecordNew = false;
+ // the message is conceptually new if it was a ghost or dead.
+ isConceptuallyNew = curMsg._isGhost || curMsg._isDeleted;
+ // insert fulltext if it was a ghost
+ insertFulltext = curMsg._isGhost;
+ curMsg._folderID = this._datastore._mapFolder(aMsgHdr.folder).id;
+ curMsg._messageKey = aMsgHdr.messageKey;
+ curMsg.date = new Date(aMsgHdr.date / 1000);
+ // the message may have been deleted; tell it to make sure it's not.
+ curMsg._ensureNotDeleted();
+ // note: we are assuming that our matching logic is flawless in that
+ // if this message was not a ghost, we are assuming the 'body'
+ // associated with the id is still exactly the same. It is conceivable
+ // that there are cases where this is not true.
+ }
+
+ if (aMimeMsg) {
+ let bodyPlain = aMimeMsg.coerceBodyToPlaintext(aMsgHdr.folder);
+ if (bodyPlain) {
+ curMsg._bodyLines = bodyPlain.split(/\r?\n/);
+ // curMsg._content gets set by GlodaFundAttr.jsm
+ }
+ }
+
+ // Mark the message as new (for the purposes of fulltext insertion)
+ if (insertFulltext) {
+ curMsg._isNew = true;
+ }
+
+ curMsg._subject = aMsgHdr.mime2DecodedSubject;
+ curMsg._attachmentNames = attachmentNames;
+
+ // curMsg._indexAuthor gets set by GlodaFundAttr.jsm
+ // curMsg._indexRecipients gets set by GlodaFundAttr.jsm
+
+ // zero the notability so everything in grokNounItem can just increment
+ curMsg.notability = 0;
+
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ curMsg,
+ { header: aMsgHdr, mime: aMimeMsg, bodyLines: curMsg._bodyLines },
+ isConceptuallyNew,
+ isRecordNew,
+ aCallbackHandle
+ )
+ );
+
+ delete curMsg._bodyLines;
+ delete curMsg._content;
+ delete curMsg._isNew;
+ delete curMsg._indexAuthor;
+ delete curMsg._indexRecipients;
+
+ // we want to update the header for messages only after the transaction
+ // irrevocably hits the disk. otherwise we could get confused if the
+ // transaction rolls back or what not.
+ PendingCommitTracker.track(aMsgHdr, curMsg.id);
+
+ yield GlodaConstants.kWorkDone;
+ },
+
+ /**
+ * Wipe a message out of existence from our index. This is slightly more
+ * tricky than one would first expect because there are potentially
+ * attributes not immediately associated with this message that reference
+ * the message. Not only that, but deletion of messages may leave a
+ * conversation possessing only ghost messages, which we don't want, so we
+ * need to nuke the moot conversation and its moot ghost messages.
+ * For now, we are actually punting on that trickiness, and the exact
+ * nuances aren't defined yet because we have not decided whether to store
+ * such attributes redundantly. For example, if we have subject-pred-object,
+ * we could actually store this as attributes (subject, id, object) and
+ * (object, id, subject). In such a case, we could query on (subject, *)
+ * and use the results to delete the (object, id, subject) case. If we
+ * don't redundantly store attributes, we can deal with the problem by
+ * collecting up all the attributes that accept a message as their object
+ * type and issuing a delete against that. For example, delete (*, [1,2,3],
+ * message id).
+ * (We are punting because we haven't implemented support for generating
+ * attributes like that yet.)
+ *
+ * @TODO: implement deletion of attributes that reference (deleted) messages
+ */
+ *_deleteMessage(aMessage, aCallbackHandle) {
+ this._log.debug("*** Deleting message: " + aMessage);
+
+ // -- delete our attributes
+ // delete the message's attributes (if we implement the cascade delete, that
+ // could do the honors for us... right now we define the trigger in our
+ // schema but the back-end ignores it)
+ GlodaDatastore.clearMessageAttributes(aMessage);
+
+ // -- delete our message or ghost us, and maybe nuke the whole conversation
+ // Look at the other messages in the conversation.
+ // (Note: although we are performing a lookup with no validity constraints
+ // and using the same object-relational-mapper-ish layer used by things
+ // that do have constraints, we are not at risk of exposing deleted
+ // messages to other code and getting it confused. The only way code
+ // can find a message is if it shows up in their queries or gets announced
+ // via GlodaCollectionManager.itemsAdded, neither of which will happen.)
+ let convPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ convPrivQuery.conversation(aMessage.conversation);
+ let conversationCollection = convPrivQuery.getCollection(aCallbackHandle);
+ yield GlodaConstants.kWorkAsync;
+
+ let conversationMsgs = conversationCollection.items;
+
+ // Count the number of ghosts messages we see to determine if we are
+ // the last message alive.
+ let ghostCount = 0;
+ let twinMessageExists = false;
+ for (let convMsg of conversationMsgs) {
+ // ignore our own message
+ if (convMsg.id == aMessage.id) {
+ continue;
+ }
+
+ if (convMsg._isGhost) {
+ ghostCount++;
+ } else if (
+ // This message is our (living) twin if it is not a ghost, not deleted,
+ // and has the same message-id header.
+ !convMsg._isDeleted &&
+ convMsg.headerMessageID == aMessage.headerMessageID
+ ) {
+ twinMessageExists = true;
+ }
+ }
+
+ // -- If everyone else is a ghost, blow away the conversation.
+ // If there are messages still alive or deleted but we have not yet gotten
+ // to them yet _deleteMessage, then do not do this. (We will eventually
+ // hit this case if they are all deleted.)
+ if (conversationMsgs.length - 1 == ghostCount) {
+ // - Obliterate each message
+ for (let msg of conversationMsgs) {
+ GlodaDatastore.deleteMessageByID(msg.id);
+ }
+ // - Obliterate the conversation
+ GlodaDatastore.deleteConversationByID(aMessage.conversationID);
+ // *no one* should hold a reference or use aMessage after this point,
+ // trash it so such ne'er do'wells are made plain.
+ aMessage._objectPurgedMakeYourselfUnpleasant();
+ } else if (twinMessageExists) {
+ // -- Ghost or purge us as appropriate
+ // Purge us if we have a (living) twin; no ghost required.
+ GlodaDatastore.deleteMessageByID(aMessage.id);
+ // *no one* should hold a reference or use aMessage after this point,
+ // trash it so such ne'er do'wells are made plain.
+ aMessage._objectPurgedMakeYourselfUnpleasant();
+ } else {
+ // No twin, a ghost is required, we become the ghost.
+ aMessage._ghost();
+ GlodaDatastore.updateMessage(aMessage);
+ // ghosts don't have fulltext. purge it.
+ GlodaDatastore.deleteMessageTextByID(aMessage.id);
+ }
+
+ yield GlodaConstants.kWorkDone;
+ },
+};
+GlodaIndexer.registerIndexer(GlodaMsgIndexer);
diff --git a/comm/mailnews/db/gloda/modules/MimeMessage.jsm b/comm/mailnews/db/gloda/modules/MimeMessage.jsm
new file mode 100644
index 0000000000..8859f10877
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/MimeMessage.jsm
@@ -0,0 +1,821 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "MsgHdrToMimeMessage",
+ "MimeMessage",
+ "MimeContainer",
+ "MimeBody",
+ "MimeUnknown",
+ "MimeMessageAttachment",
+];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+/**
+ * The URL listener is surplus because the CallbackStreamListener ends up
+ * getting the same set of events, effectively.
+ */
+var dumbUrlListener = {
+ OnStartRunningUrl(aUrl) {},
+ OnStopRunningUrl(aUrl, aExitCode) {},
+};
+
+/**
+ * Maintain a list of all active stream listeners so that we can cancel them all
+ * during shutdown. If we don't cancel them, we risk calls into javascript
+ * from C++ after the various XPConnect contexts have already begun their
+ * teardown process.
+ */
+var activeStreamListeners = {};
+
+var shutdownCleanupObserver = {
+ _initialized: false,
+ ensureInitialized() {
+ if (this._initialized) {
+ return;
+ }
+
+ Services.obs.addObserver(this, "quit-application");
+
+ this._initialized = true;
+ },
+
+ observe(aSubject, aTopic, aData) {
+ if (aTopic == "quit-application") {
+ Services.obs.removeObserver(this, "quit-application");
+
+ for (let uri in activeStreamListeners) {
+ let streamListener = activeStreamListeners[uri];
+ if (streamListener._request) {
+ streamListener._request.cancel(Cr.NS_BINDING_ABORTED);
+ }
+ }
+ }
+ },
+};
+
+function CallbackStreamListener(aMsgHdr, aCallbackThis, aCallback) {
+ this._msgHdr = aMsgHdr;
+ // Messages opened from file or attachments do not have a folder property, but
+ // have their url stored as a string property.
+ let hdrURI = aMsgHdr.folder
+ ? aMsgHdr.folder.getUriForMsg(aMsgHdr)
+ : aMsgHdr.getStringProperty("dummyMsgUrl");
+
+ this._request = null;
+ this._stream = null;
+ if (aCallback === undefined) {
+ this._callbacksThis = [null];
+ this._callbacks = [aCallbackThis];
+ } else {
+ this._callbacksThis = [aCallbackThis];
+ this._callbacks = [aCallback];
+ }
+ activeStreamListeners[hdrURI] = this;
+}
+
+/**
+ * @implements {nsIRequestObserver}
+ * @implements {nsIStreamListener}
+ */
+CallbackStreamListener.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsIStreamListener"]),
+
+ // nsIRequestObserver part
+ onStartRequest(aRequest) {
+ this._request = aRequest;
+ },
+ onStopRequest(aRequest, aStatusCode) {
+ // Messages opened from file or attachments do not have a folder property,
+ // but have their url stored as a string property.
+ let msgURI = this._msgHdr.folder
+ ? this._msgHdr.folder.getUriForMsg(this._msgHdr)
+ : this._msgHdr.getStringProperty("dummyMsgUrl");
+ delete activeStreamListeners[msgURI];
+
+ aRequest.QueryInterface(Ci.nsIChannel);
+ let message = MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aRequest.URI.spec];
+ if (message === undefined) {
+ message = null;
+ }
+
+ delete MsgHdrToMimeMessage.RESULT_RENDEVOUZ[aRequest.URI.spec];
+
+ for (let i = 0; i < this._callbacksThis.length; i++) {
+ try {
+ this._callbacks[i].call(this._callbacksThis[i], this._msgHdr, message);
+ } catch (e) {
+ // Most of the time, exceptions will silently disappear into the endless
+ // deeps of XPConnect, and never reach the surface ever again. At least
+ // warn the user if he has dump enabled.
+ dump(
+ "The MsgHdrToMimeMessage callback threw an exception: " + e + "\n"
+ );
+ // That one will probably never make it to the original caller.
+ throw e;
+ }
+ }
+
+ this._msgHdr = null;
+ this._request = null;
+ this._stream = null;
+ this._callbacksThis = null;
+ this._callbacks = null;
+ },
+
+ // nsIStreamListener part
+
+ /**
+ * Our onDataAvailable should actually never be called. The stream converter
+ * is actually eating everything except the start and stop notification.
+ */
+ onDataAvailable(aRequest, aInputStream, aOffset, aCount) {
+ throw new Error(
+ `The stream converter should have grabbed the data for ${aRequest?.URI.spec}`
+ );
+ },
+};
+
+function stripEncryptedParts(aPart) {
+ if (aPart.parts && aPart.isEncrypted) {
+ aPart.parts = []; // Show an empty container.
+ } else if (aPart.parts) {
+ aPart.parts = aPart.parts.map(stripEncryptedParts);
+ }
+ return aPart;
+}
+
+/**
+ * Starts retrieval of a MimeMessage instance for the given message header.
+ * Your callback will be called with the message header you provide and the
+ *
+ * @param aMsgHdr The message header to retrieve the body for and build a MIME
+ * representation of the message.
+ * @param aCallbackThis The (optional) 'this' to use for your callback function.
+ * @param aCallback The callback function to invoke on completion of message
+ * parsing or failure. The first argument passed will be the nsIMsgDBHdr
+ * you passed to this function. The second argument will be the MimeMessage
+ * instance resulting from the processing on success, and null on failure.
+ * @param [aAllowDownload=false] Should we allow the message to be downloaded
+ * for this streaming request? The default is false, which means that we
+ * require that the message be available offline. If false is passed and
+ * the message is not available offline, we will propagate an exception
+ * thrown by the underlying code.
+ * @param [aOptions] Optional options.
+ * @param [aOptions.saneBodySize] Limit body sizes to a 'reasonable' size in
+ * order to combat corrupt offline/message stores creating pathological
+ * situations where we have erroneously multi-megabyte messages. This
+ * also likely reduces the impact of legitimately ridiculously large
+ * messages.
+ * @param [aOptions.examineEncryptedParts] By default, we won't reveal the
+ * contents of multipart/encrypted parts to the consumers, unless explicitly
+ * requested. In the case of MIME/PGP messages, for instance, the message
+ * will appear as an empty multipart/encrypted container, unless this option
+ * is used.
+ */
+function MsgHdrToMimeMessage(
+ aMsgHdr,
+ aCallbackThis,
+ aCallback,
+ aAllowDownload,
+ aOptions
+) {
+ shutdownCleanupObserver.ensureInitialized();
+
+ let requireOffline = !aAllowDownload;
+ // Messages opened from file or attachments do not have a folder property, but
+ // have their url stored as a string property.
+ let msgURI = aMsgHdr.folder
+ ? aMsgHdr.folder.getUriForMsg(aMsgHdr)
+ : aMsgHdr.getStringProperty("dummyMsgUrl");
+
+ let msgService = MailServices.messageServiceFromURI(msgURI);
+
+ MsgHdrToMimeMessage.OPTION_TUNNEL = aOptions;
+ // By default, Enigmail only decrypts a message streamed via libmime if it's
+ // the one currently on display in the message reader. With this option, we're
+ // letting Enigmail know that it should decrypt the message since the client
+ // explicitly asked for it.
+ let encryptedStr =
+ aOptions && aOptions.examineEncryptedParts
+ ? "&examineEncryptedParts=true"
+ : "";
+
+ // S/MIME, our other encryption backend, is not that smart, and always
+ // decrypts data. In order to protect sensitive data (e.g. not index it in
+ // Gloda), unless the client asked for encrypted data, we pass to the client
+ // callback a stripped-down version of the MIME structure where encrypted
+ // parts have been removed.
+ let wrapCallback = function (aCallback, aCallbackThis) {
+ if (aOptions && aOptions.examineEncryptedParts) {
+ return aCallback;
+ }
+ return (aMsgHdr, aMimeMsg) =>
+ aCallback.call(aCallbackThis, aMsgHdr, stripEncryptedParts(aMimeMsg));
+ };
+
+ // Apparently there used to be an old syntax where the callback was the second
+ // argument...
+ let callback = aCallback ? aCallback : aCallbackThis;
+ let callbackThis = aCallback ? aCallbackThis : null;
+
+ // if we're already streaming this msg, just add the callback
+ // to the listener.
+ let listenerForURI = activeStreamListeners[msgURI];
+ if (listenerForURI != undefined) {
+ listenerForURI._callbacks.push(wrapCallback(callback, callbackThis));
+ listenerForURI._callbacksThis.push(callbackThis);
+ return;
+ }
+ let streamListener = new CallbackStreamListener(
+ aMsgHdr,
+ callbackThis,
+ wrapCallback(callback, callbackThis)
+ );
+
+ try {
+ msgService.streamMessage(
+ msgURI,
+ streamListener, // consumer
+ null, // nsIMsgWindow
+ dumbUrlListener, // nsIUrlListener
+ true, // have them create the converter
+ // additional uri payload, note that "header=" is prepended automatically
+ "filter&emitter=js" + encryptedStr,
+ requireOffline
+ );
+ } catch (ex) {
+ // If streamMessage throws an exception, we should make sure to clear the
+ // activeStreamListener, or any subsequent attempt at sreaming this URI
+ // will silently fail
+ if (activeStreamListeners[msgURI]) {
+ delete activeStreamListeners[msgURI];
+ }
+ MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+ throw ex;
+ }
+
+ MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+}
+
+/**
+ * Let the jsmimeemitter provide us with results. The poor emitter (if I am
+ * understanding things correctly) is evaluated outside of the C.u.import
+ * world, so if we were to import him, we would not see him, but rather a new
+ * copy of him. This goes for his globals, etc. (and is why we live in this
+ * file right here). Also, it appears that the XPCOM JS wrappers aren't
+ * magically unified so that we can try and pass data as expando properties
+ * on things like the nsIUri instances either. So we have the jsmimeemitter
+ * import us and poke things into RESULT_RENDEVOUZ. We put it here on this
+ * function to try and be stealthy and avoid polluting the namespaces (or
+ * encouraging bad behaviour) of our importers.
+ *
+ * If you can come up with a prettier way to shuttle this data, please do.
+ */
+MsgHdrToMimeMessage.RESULT_RENDEVOUZ = {};
+/**
+ * Cram rich options here for the MimeMessageEmitter to grab from. We
+ * leverage the known control-flow to avoid needing a whole dictionary here.
+ * We set this immediately before constructing the emitter and clear it
+ * afterwards. Control flow is never yielded during the process and reentrancy
+ * cannot happen via any other means.
+ */
+MsgHdrToMimeMessage.OPTION_TUNNEL = null;
+
+var HeaderHandlerBase = {
+ /**
+ * Look-up a header that should be present at most once.
+ *
+ * @param aHeaderName The header name to retrieve, case does not matter.
+ * @param aDefaultValue The value to return if the header was not found, null
+ * if left unspecified.
+ * @returns the value of the header if present, and the default value if not
+ * (defaults to null). If the header was present multiple times, the first
+ * instance of the header is returned. Use getAll if you want all of the
+ * values for the multiply-defined header.
+ */
+ get(aHeaderName, aDefaultValue) {
+ if (aDefaultValue === undefined) {
+ aDefaultValue = null;
+ }
+ let lowerHeader = aHeaderName.toLowerCase();
+ if (lowerHeader in this.headers) {
+ // we require that the list cannot be empty if present
+ return this.headers[lowerHeader][0];
+ }
+ return aDefaultValue;
+ },
+ /**
+ * Look-up a header that can be present multiple times. Use get for headers
+ * that you only expect to be present at most once.
+ *
+ * @param aHeaderName The header name to retrieve, case does not matter.
+ * @returns An array containing the values observed, which may mean a zero
+ * length array.
+ */
+ getAll(aHeaderName) {
+ let lowerHeader = aHeaderName.toLowerCase();
+ if (lowerHeader in this.headers) {
+ return this.headers[lowerHeader];
+ }
+ return [];
+ },
+ /**
+ * @param aHeaderName Header name to test for its presence.
+ * @returns true if the message has (at least one value for) the given header
+ * name.
+ */
+ has(aHeaderName) {
+ let lowerHeader = aHeaderName.toLowerCase();
+ return lowerHeader in this.headers;
+ },
+ _prettyHeaderString(aIndent) {
+ if (aIndent === undefined) {
+ aIndent = "";
+ }
+ let s = "";
+ for (let header in this.headers) {
+ let values = this.headers[header];
+ s += "\n " + aIndent + header + ": " + values;
+ }
+ return s;
+ },
+};
+
+/**
+ * @ivar partName The MIME part, ex "1.2.2.1". The partName of a (top-level)
+ * message is "1", its first child is "1.1", its second child is "1.2",
+ * its first child's first child is "1.1.1", etc.
+ * @ivar headers Maps lower-cased header field names to a list of the values
+ * seen for the given header. Use get or getAll as convenience helpers.
+ * @ivar parts The list of the MIME part children of this message. Children
+ * will be either MimeMessage instances, MimeMessageAttachment instances,
+ * MimeContainer instances, or MimeUnknown instances. The latter two are
+ * the result of limitations in the Javascript representation generation
+ * at this time, combined with the need to most accurately represent the
+ * MIME structure.
+ */
+function MimeMessage() {
+ this.partName = null;
+ this.headers = {};
+ this.parts = [];
+ this.isEncrypted = false;
+}
+
+MimeMessage.prototype = {
+ __proto__: HeaderHandlerBase,
+ contentType: "message/rfc822",
+
+ /**
+ * @returns a list of all attachments contained in this message and all its
+ * sub-messages. Only MimeMessageAttachment instances will be present in
+ * the list (no sub-messages).
+ */
+ get allAttachments() {
+ let results = []; // messages are not attachments, don't include self
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allAttachments);
+ }
+ return results;
+ },
+
+ /**
+ * @returns a list of all attachments contained in this message and all its
+ * sub-messages, including the sub-messages.
+ */
+ get allInlineAttachments() {
+ // Do not include the top message, but only sub-messages.
+ let results = this.partName ? [this] : [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allInlineAttachments);
+ }
+ return results;
+ },
+
+ /**
+ * @returns a list of all attachments contained in this message, with
+ * included/forwarded messages treated as real attachments. Attachments
+ * contained in inner messages won't be shown.
+ */
+ get allUserAttachments() {
+ if (this.url) {
+ // The jsmimeemitter camouflaged us as a MimeAttachment
+ return [this];
+ }
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+
+ /**
+ * @returns the total size of this message, that is, the size of all subparts
+ */
+ get size() {
+ return this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0);
+ },
+
+ /**
+ * In the case of attached messages, libmime considers them as attachments,
+ * and if the body is, say, quoted-printable encoded, then libmime will start
+ * counting bytes and notify the js mime emitter about it. The JS mime emitter
+ * being a nice guy, it will try to set a size on us. While this is the
+ * expected behavior for MimeMsgAttachments, we must make sure we can handle
+ * that (failing to write a setter results in exceptions being thrown).
+ */
+ set size(whatever) {
+ // nop
+ },
+
+ /**
+ * @param aMsgFolder A message folder, any message folder. Because this is
+ * a hack.
+ * @returns The concatenation of all of the body parts where parts
+ * available as text/plain are pulled as-is, and parts only available
+ * as text/html are converted to plaintext form first. In other words,
+ * if we see a multipart/alternative with a text/plain, we take the
+ * text/plain. If we see a text/html without an alternative, we convert
+ * that to text.
+ */
+ coerceBodyToPlaintext(aMsgFolder) {
+ let bodies = [];
+ for (let part of this.parts) {
+ // an undefined value for something not having the method is fine
+ let body =
+ part.coerceBodyToPlaintext && part.coerceBodyToPlaintext(aMsgFolder);
+ if (body) {
+ bodies.push(body);
+ }
+ }
+ if (bodies) {
+ return bodies.join("");
+ }
+ return "";
+ },
+
+ /**
+ * Convert the message and its hierarchy into a "pretty string". The message
+ * and each MIME part get their own line. The string never ends with a
+ * newline. For a non-multi-part message, only a single line will be
+ * returned.
+ * Messages have their subject displayed, attachments have their filename and
+ * content-type (ex: image/jpeg) displayed. "Filler" classes simply have
+ * their class displayed.
+ */
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ if (aIndent === undefined) {
+ aIndent = "";
+ }
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Message " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ "subject" in
+ this.headers
+ ? this.headers.subject
+ : "";
+ if (aVerbose) {
+ s += this._prettyHeaderString(nextIndent);
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ part.prettyString(aVerbose, nextIndent, aDumpBody);
+ }
+
+ return s;
+ },
+};
+
+/**
+ * @ivar contentType The content-type of this container.
+ * @ivar parts The parts held by this container. These can be instances of any
+ * of the classes found in this file.
+ */
+function MimeContainer(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ this.parts = [];
+ this.isEncrypted = false;
+}
+
+MimeContainer.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ let results = [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allAttachments);
+ }
+ return results;
+ },
+ get allInlineAttachments() {
+ let results = [];
+ for (let iChild = 0; iChild < this.parts.length; iChild++) {
+ let child = this.parts[iChild];
+ results = results.concat(child.allInlineAttachments);
+ }
+ return results;
+ },
+ get allUserAttachments() {
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get size() {
+ return this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0);
+ },
+ set size(whatever) {
+ // nop
+ },
+ coerceBodyToPlaintext(aMsgFolder) {
+ if (this.contentType == "multipart/alternative") {
+ let htmlPart;
+ // pick the text/plain if we can find one, otherwise remember the HTML one
+ for (let part of this.parts) {
+ if (part.contentType == "text/plain") {
+ return part.body;
+ }
+ if (part.contentType == "text/html") {
+ htmlPart = part;
+ } else if (!htmlPart && part.contentType == "text/enriched") {
+ // text/enriched gets transformed into HTML, so use it if we don't
+ // already have an HTML part.
+ htmlPart = part;
+ }
+ }
+ // convert the HTML part if we have one
+ if (htmlPart) {
+ return aMsgFolder.convertMsgSnippetToPlainText(htmlPart.body);
+ }
+ }
+ // if it's not alternative, recurse/aggregate using MimeMessage logic
+ return MimeMessage.prototype.coerceBodyToPlaintext.call(this, aMsgFolder);
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Container " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ this.contentType;
+ if (aVerbose) {
+ s += this._prettyHeaderString(nextIndent);
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ part.prettyString(aVerbose, nextIndent, aDumpBody);
+ }
+
+ return s;
+ },
+ toString() {
+ return "Container: " + this.contentType;
+ },
+};
+
+/**
+ * @class Represents a body portion that we understand and do not believe to be
+ * a proper attachment. This means text/plain or text/html and it has no
+ * filename. (A filename suggests an attachment.)
+ *
+ * @ivar contentType The content type of this body materal; text/plain or
+ * text/html.
+ * @ivar body The actual body content.
+ */
+function MimeBody(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ this.body = "";
+ this.isEncrypted = false;
+}
+
+MimeBody.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return []; // we are a leaf
+ },
+ get allInlineAttachments() {
+ return []; // we are a leaf
+ },
+ get allUserAttachments() {
+ return []; // we are a leaf
+ },
+ get size() {
+ return this.body.length;
+ },
+ set size(whatever) {
+ // nop
+ },
+ appendBody(aBuf) {
+ this.body += aBuf;
+ },
+ coerceBodyToPlaintext(aMsgFolder) {
+ if (this.contentType == "text/plain") {
+ return this.body;
+ }
+ // text/enriched gets transformed into HTML by libmime
+ if (
+ this.contentType == "text/html" ||
+ this.contentType == "text/enriched"
+ ) {
+ return aMsgFolder.convertMsgSnippetToPlainText(this.body);
+ }
+ return "";
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let s =
+ "Body: " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "" +
+ this.contentType +
+ " (" +
+ this.body.length +
+ " bytes" +
+ (aDumpBody ? ": '" + this.body + "'" : "") +
+ ")";
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+ return s;
+ },
+ toString() {
+ return "Body: " + this.contentType + " (" + this.body.length + " bytes)";
+ },
+};
+
+/**
+ * @class A MIME Leaf node that doesn't have a filename so we assume it's not
+ * intended to be an attachment proper. This is probably meant for inline
+ * display or is the result of someone amusing themselves by composing messages
+ * by hand or a bad client. This class should probably be renamed or we should
+ * introduce a better named class that we try and use in preference to this
+ * class.
+ *
+ * @ivar contentType The content type of this part.
+ */
+function MimeUnknown(aContentType) {
+ this.partName = null;
+ this.contentType = aContentType;
+ this.headers = {};
+ // Looks like libmime does not always interpret us as an attachment, which
+ // means we'll have to have a default size. Returning undefined would cause
+ // the recursive size computations to fail.
+ this._size = 0;
+ this.isEncrypted = false;
+ // We want to make sure MimeUnknown has a part property: S/MIME encrypted
+ // messages have a topmost MimeUnknown part, with the encrypted bit set to 1,
+ // and we need to ensure all other encrypted parts are children of this
+ // topmost part.
+ this.parts = [];
+}
+
+MimeUnknown.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return this.parts
+ .map(child => child.allAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get allInlineAttachments() {
+ return this.parts
+ .map(child => child.allInlineAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get allUserAttachments() {
+ return this.parts
+ .map(child => child.allUserAttachments)
+ .reduce((a, b) => a.concat(b), []);
+ },
+ get size() {
+ return (
+ this._size +
+ this.parts
+ .map(child => child.size)
+ .reduce((a, b) => a + Math.max(b, 0), 0)
+ );
+ },
+ set size(aSize) {
+ this._size = aSize;
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let nextIndent = aIndent + " ";
+
+ let s =
+ "Unknown: " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "" +
+ this.contentType +
+ " (" +
+ this.size +
+ " bytes)";
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+
+ for (let iPart = 0; iPart < this.parts.length; iPart++) {
+ let part = this.parts[iPart];
+ s +=
+ "\n" +
+ nextIndent +
+ (iPart + 1) +
+ " " +
+ (part ? part.prettyString(aVerbose, nextIndent, aDumpBody) : "NULL");
+ }
+ return s;
+ },
+ toString() {
+ return "Unknown: " + this.contentType;
+ },
+};
+
+/**
+ * @class An attachment proper. We think it's an attachment because it has a
+ * filename that libmime was able to figure out.
+ *
+ * @ivar partName @see{MimeMessage.partName}
+ * @ivar name The filename of this attachment.
+ * @ivar contentType The MIME content type of this part.
+ * @ivar url The URL to stream if you want the contents of this part.
+ * @ivar isExternal Is the attachment stored someplace else than in the message?
+ * @ivar size The size of the attachment if available, -1 otherwise (size is set
+ * after initialization by jsmimeemitter.js)
+ */
+function MimeMessageAttachment(
+ aPartName,
+ aName,
+ aContentType,
+ aUrl,
+ aIsExternal
+) {
+ this.partName = aPartName;
+ this.name = aName;
+ this.contentType = aContentType;
+ this.url = aUrl;
+ this.isExternal = aIsExternal;
+ this.headers = {};
+ this.isEncrypted = false;
+ // parts is copied over from the part instance that preceded us
+ // headers is copied over from the part instance that preceded us
+ // isEncrypted is copied over from the part instance that preceded us
+}
+
+MimeMessageAttachment.prototype = {
+ __proto__: HeaderHandlerBase,
+ get allAttachments() {
+ return [this]; // we are a leaf, so just us.
+ },
+ get allInlineAttachments() {
+ return [this]; // we are a leaf, so just us.
+ },
+ get allUserAttachments() {
+ return [this];
+ },
+ prettyString(aVerbose, aIndent, aDumpBody) {
+ let s =
+ "Attachment " +
+ (this.isEncrypted ? "[encrypted] " : "") +
+ "(" +
+ this.size +
+ " bytes): " +
+ this.name +
+ ", " +
+ this.contentType;
+ if (aVerbose) {
+ s += this._prettyHeaderString(aIndent + " ");
+ }
+ return s;
+ },
+ toString() {
+ return this.prettyString(false, "");
+ },
+};
diff --git a/comm/mailnews/db/gloda/modules/NounFreetag.jsm b/comm/mailnews/db/gloda/modules/NounFreetag.jsm
new file mode 100644
index 0000000000..cb169645f1
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounFreetag.jsm
@@ -0,0 +1,91 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["FreeTag", "FreeTagNoun"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+
+function FreeTag(aTagName) {
+ this.name = aTagName;
+}
+
+FreeTag.prototype = {
+ toString() {
+ return this.name;
+ },
+};
+
+/**
+ * @namespace Tag noun provider. Since the tag unique value is stored as a
+ * parameter, we are an odd case and semantically confused.
+ */
+var FreeTagNoun = {
+ _log: console.createInstance({
+ prefix: "gloda.noun.freetag",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+ }),
+
+ name: "freetag",
+ clazz: FreeTag,
+ allowsArbitraryAttrs: false,
+ usesParameter: true,
+
+ _listeners: [],
+ addListener(aListener) {
+ this._listeners.push(aListener);
+ },
+ removeListener(aListener) {
+ let index = this._listeners.indexOf(aListener);
+ if (index >= 0) {
+ this._listeners.splice(index, 1);
+ }
+ },
+
+ populateKnownFreeTags() {
+ for (let attr of this.objectNounOfAttributes) {
+ let attrDB = attr.dbDef;
+ for (let param in attrDB.parameterBindings) {
+ this.getFreeTag(param);
+ }
+ }
+ },
+
+ knownFreeTags: {},
+ getFreeTag(aTagName) {
+ let tag = this.knownFreeTags[aTagName];
+ if (!tag) {
+ tag = this.knownFreeTags[aTagName] = new FreeTag(aTagName);
+ for (let listener of this._listeners) {
+ listener.onFreeTagAdded(tag);
+ }
+ }
+ return tag;
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.name.localeCompare(b.name);
+ },
+
+ toParamAndValue(aTag) {
+ return [aTag.name, null];
+ },
+
+ toJSON(aTag) {
+ return aTag.name;
+ },
+ fromJSON(aTagName) {
+ return this.getFreeTag(aTagName);
+ },
+};
+
+Gloda.defineNoun(FreeTagNoun);
diff --git a/comm/mailnews/db/gloda/modules/NounMimetype.jsm b/comm/mailnews/db/gloda/modules/NounMimetype.jsm
new file mode 100644
index 0000000000..fef1a33bc7
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounMimetype.jsm
@@ -0,0 +1,582 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["MimeType", "MimeTypeNoun"];
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+var LOG = console.createInstance({
+ prefix: "gloda.noun.mimetype",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var CategoryStringMap = {};
+
+/**
+ * Input data structure to allow us to build a fast mapping from mime type to
+ * category name. The keys in MimeCategoryMapping are the top-level
+ * categories. Each value can either be a list of MIME types or a nested
+ * object which recursively defines sub-categories. We currently do not use
+ * the sub-categories. They are just there to try and organize the MIME types
+ * a little and open the door to future enhancements.
+ *
+ * Do _not_ add additional top-level categories unless you have added
+ * corresponding entries to gloda.properties under the
+ * "gloda.mimetype.category" branch and are making sure localizers are aware
+ * of the change and have time to localize it.
+ *
+ * Entries with wildcards in them are part of a fallback strategy by the
+ * |mimeTypeNoun| and do not actually use regular expressions or anything like
+ * that. Everything is a straight string lookup. Given "foo/bar" we look for
+ * "foo/bar", then "foo/*", and finally "*".
+ */
+var MimeCategoryMapping = {
+ archives: [
+ "application/java-archive",
+ "application/x-java-archive",
+ "application/x-jar",
+ "application/x-java-jnlp-file",
+
+ "application/mac-binhex40",
+ "application/vnd.ms-cab-compressed",
+
+ "application/x-arc",
+ "application/x-arj",
+ "application/x-compress",
+ "application/x-compressed-tar",
+ "application/x-cpio",
+ "application/x-cpio-compressed",
+ "application/x-deb",
+
+ "application/x-bittorrent",
+
+ "application/x-rar",
+ "application/x-rar-compressed",
+ "application/x-7z-compressed",
+ "application/zip",
+ "application/x-zip-compressed",
+ "application/x-zip",
+
+ "application/x-bzip",
+ "application/x-bzip-compressed-tar",
+ "application/x-bzip2",
+ "application/x-gzip",
+ "application/x-tar",
+ "application/x-tar-gz",
+ "application/x-tarz",
+ ],
+ documents: {
+ database: [
+ "application/vnd.ms-access",
+ "application/x-msaccess",
+ "application/msaccess",
+ "application/vnd.msaccess",
+ "application/x-msaccess",
+ "application/mdb",
+ "application/x-mdb",
+
+ "application/vnd.oasis.opendocument.database",
+ ],
+ graphics: [
+ "application/postscript",
+ "application/x-bzpostscript",
+ "application/x-dvi",
+ "application/x-gzdvi",
+
+ "application/illustrator",
+
+ "application/vnd.corel-draw",
+ "application/cdr",
+ "application/coreldraw",
+ "application/x-cdr",
+ "application/x-coreldraw",
+ "image/cdr",
+ "image/x-cdr",
+ "zz-application/zz-winassoc-cdr",
+
+ "application/vnd.oasis.opendocument.graphics",
+ "application/vnd.oasis.opendocument.graphics-template",
+ "application/vnd.oasis.opendocument.image",
+
+ "application/x-dia-diagram",
+ ],
+ presentation: [
+ "application/vnd.ms-powerpoint.presentation.macroenabled.12",
+ "application/vnd.ms-powerpoint.template.macroenabled.12",
+ "application/vnd.ms-powerpoint",
+ "application/powerpoint",
+ "application/mspowerpoint",
+ "application/x-mspowerpoint",
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ "application/vnd.openxmlformats-officedocument.presentationml.template",
+
+ "application/vnd.oasis.opendocument.presentation",
+ "application/vnd.oasis.opendocument.presentation-template",
+ ],
+ spreadsheet: [
+ "application/vnd.lotus-1-2-3",
+ "application/x-lotus123",
+ "application/x-123",
+ "application/lotus123",
+ "application/wk1",
+
+ "application/x-quattropro",
+
+ "application/vnd.ms-excel.sheet.binary.macroenabled.12",
+ "application/vnd.ms-excel.sheet.macroenabled.12",
+ "application/vnd.ms-excel.template.macroenabled.12",
+ "application/vnd.ms-excel",
+ "application/msexcel",
+ "application/x-msexcel",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
+
+ "application/vnd.oasis.opendocument.formula",
+ "application/vnd.oasis.opendocument.formula-template",
+ "application/vnd.oasis.opendocument.chart",
+ "application/vnd.oasis.opendocument.chart-template",
+ "application/vnd.oasis.opendocument.spreadsheet",
+ "application/vnd.oasis.opendocument.spreadsheet-template",
+
+ "application/x-gnumeric",
+ ],
+ wordProcessor: [
+ "application/msword",
+ "application/vnd.ms-word",
+ "application/x-msword",
+ "application/msword-template",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
+ "application/vnd.ms-word.document.macroenabled.12",
+ "application/vnd.ms-word.template.macroenabled.12",
+ "application/x-mswrite",
+ "application/x-pocket-word",
+
+ "application/rtf",
+ "text/rtf",
+
+ "application/vnd.oasis.opendocument.text",
+ "application/vnd.oasis.opendocument.text-master",
+ "application/vnd.oasis.opendocument.text-template",
+ "application/vnd.oasis.opendocument.text-web",
+
+ "application/vnd.wordperfect",
+
+ "application/x-abiword",
+ "application/x-amipro",
+ ],
+ suite: ["application/vnd.ms-works"],
+ },
+ images: ["image/*"],
+ media: {
+ audio: ["audio/*"],
+ video: ["video/*"],
+ container: [
+ "application/ogg",
+
+ "application/smil",
+ "application/vnd.ms-asf",
+ "application/vnd.rn-realmedia",
+ "application/x-matroska",
+ "application/x-quicktime-media-link",
+ "application/x-quicktimeplayer",
+ ],
+ },
+ other: ["*"],
+ pdf: [
+ "application/pdf",
+ "application/x-pdf",
+ "image/pdf",
+ "file/pdf",
+ "application/x-bzpdf",
+ "application/x-gzpdf",
+ ],
+};
+
+/**
+ * Mime type abstraction that exists primarily so we can map mime types to
+ * integer id's.
+ *
+ * Instances of this class should only be retrieved via |MimeTypeNoun|; no one
+ * should ever create an instance directly.
+ */
+function MimeType(aID, aType, aSubType, aFullType, aCategory) {
+ this._id = aID;
+ this._type = aType;
+ this._subType = aSubType;
+ this._fullType = aFullType;
+ this._category = aCategory;
+}
+
+MimeType.prototype = {
+ /**
+ * The integer id we have associated with the mime type. This is stable for
+ * the lifetime of the database, which means that anything in the Gloda
+ * database can use this without fear. Things not persisted in the database
+ * should use the actual string mime type, retrieval via |fullType|.
+ */
+ get id() {
+ return this._id;
+ },
+ /**
+ * The first part of the MIME type; "text/plain" gets you "text".
+ */
+ get type() {
+ return this._type;
+ },
+ set fullType(aFullType) {
+ if (!this._fullType) {
+ this._fullType = aFullType;
+ [this._type, this._subType] = this._fullType.split("/");
+ this._category = MimeTypeNoun._getCategoryForMimeType(
+ aFullType,
+ this._type
+ );
+ }
+ },
+ /**
+ * If the |fullType| is "text/plain", subType is "plain".
+ */
+ get subType() {
+ return this._subType;
+ },
+ /**
+ * The full MIME type; "text/plain" returns "text/plain".
+ */
+ get fullType() {
+ return this._fullType;
+ },
+ toString() {
+ return this.fullType;
+ },
+
+ /**
+ * @returns the category we believe this mime type belongs to. This category
+ * name should never be shown directly to the user. Instead, use
+ * |categoryLabel| to get the localized name for the category. The
+ * category mapping comes from mimeTypesCategories.js.
+ */
+ get category() {
+ return this._category;
+ },
+ /**
+ * @returns The localized label for the category from gloda.properties in the
+ * "gloda.mimetype.category.CATEGORY.label" definition using the value
+ * from |category|.
+ */
+ get categoryLabel() {
+ return CategoryStringMap[this._category];
+ },
+};
+
+/**
+ * Mime type noun provider.
+ *
+ * The set of MIME Types is sufficiently limited that we can keep them all in
+ * memory. In theory it is also sufficiently limited that we could use the
+ * parameter mechanism in the database. However, it is more efficient, for
+ * both space and performance reasons, to store the specific mime type as a
+ * value. For future-proofing reasons, we opt to use a database table to
+ * persist the mapping rather than a hard-coded list. A preferences file or
+ * other text file would arguably suffice, but for consistency reasons, the
+ * database is not a bad thing.
+ */
+var MimeTypeNoun = {
+ name: "mime-type",
+ clazz: MimeType, // gloda supports clazz as well as class
+ allowsArbitraryAttrs: false,
+
+ _strings: Services.strings.createBundle(
+ "chrome://messenger/locale/gloda.properties"
+ ),
+
+ // note! update test_noun_mimetype if you change our internals!
+ _mimeTypes: {},
+ _mimeTypesByID: {},
+ TYPE_BLOCK_SIZE: 16384,
+ _mimeTypeHighID: {},
+ _mimeTypeRangeDummyObjects: {},
+ _highID: 0,
+
+ // we now use the exciting 'schema' mechanism of defineNoun to get our table
+ // created for us, plus some helper methods that we simply don't use.
+ schema: {
+ name: "mimeTypes",
+ columns: [
+ ["id", "INTEGER PRIMARY KEY", "_id"],
+ ["mimeType", "TEXT", "fullType"],
+ ],
+ },
+
+ _init() {
+ LOG.debug("loading MIME types");
+ this._loadCategoryMapping();
+ this._loadMimeTypes();
+ },
+
+ /**
+ * A map from MIME type to category name.
+ */
+ _mimeTypeToCategory: {},
+ /**
+ * Load the contents of MimeTypeCategories and populate
+ */
+ _loadCategoryMapping() {
+ let mimeTypeToCategory = this._mimeTypeToCategory;
+
+ function procMapObj(aSubTree, aCategories) {
+ for (let key in aSubTree) {
+ let value = aSubTree[key];
+ // Add this category to our nested categories list. Use concat since
+ // the list will be long-lived and each list needs to be distinct.
+ let categories = aCategories.concat();
+ categories.push(key);
+
+ if (categories.length == 1) {
+ CategoryStringMap[key] = MimeTypeNoun._strings.GetStringFromName(
+ "gloda.mimetype.category." + key + ".label"
+ );
+ }
+
+ // Is it an array? If so, just process this depth
+ if (Array.isArray(value)) {
+ for (let mimeTypeStr of value) {
+ mimeTypeToCategory[mimeTypeStr] = categories;
+ }
+ } else {
+ // it's yet another sub-tree branch
+ procMapObj(value, categories);
+ }
+ }
+ }
+ procMapObj(MimeCategoryMapping, []);
+ },
+
+ /**
+ * Lookup the category associated with a MIME type given its full type and
+ * type. (So, "foo/bar" and "foo" for "foo/bar".)
+ */
+ _getCategoryForMimeType(aFullType, aType) {
+ if (aFullType in this._mimeTypeToCategory) {
+ return this._mimeTypeToCategory[aFullType][0];
+ }
+ let wildType = aType + "/*";
+ if (wildType in this._mimeTypeToCategory) {
+ return this._mimeTypeToCategory[wildType][0];
+ }
+ return this._mimeTypeToCategory["*"][0];
+ },
+
+ /**
+ * In order to allow the gloda query mechanism to avoid hitting the database,
+ * we need to either define the noun type as cacheable and have a super-large
+ * cache or simply have a collection with every MIME type in it that stays
+ * alive forever.
+ * This is that collection. It is initialized by |_loadMimeTypes|. As new
+ * MIME types are created, we add them to the collection.
+ */
+ _universalCollection: null,
+
+ /**
+ * Kick off a query of all the mime types in our database, leaving
+ * |_processMimeTypes| to actually do the legwork.
+ */
+ _loadMimeTypes() {
+ // get all the existing mime types!
+ let query = Gloda.newQuery(this.id);
+ let nullFunc = function () {};
+ this._universalCollection = query.getCollection(
+ {
+ onItemsAdded: nullFunc,
+ onItemsModified: nullFunc,
+ onItemsRemoved: nullFunc,
+ onQueryCompleted(aCollection) {
+ MimeTypeNoun._processMimeTypes(aCollection.items);
+ },
+ },
+ null
+ );
+ },
+
+ /**
+ * For the benefit of our Category queryHelper, we need dummy ranged objects
+ * that cover the numerical address space allocated to the category. We
+ * can't use a real object for the upper-bound because the upper-bound is
+ * constantly growing and there is the chance the query might get persisted,
+ * which means these values need to be long-lived. Unfortunately, our
+ * solution to this problem (dummy objects) complicates the second case,
+ * should it ever occur. (Because the dummy objects cannot be persisted
+ * on their own... but there are other issues that will come up that we will
+ * just have to deal with then.)
+ */
+ _createCategoryDummies(aId, aCategory) {
+ let blockBottom = aId - (aId % this.TYPE_BLOCK_SIZE);
+ let blockTop = blockBottom + this.TYPE_BLOCK_SIZE - 1;
+ this._mimeTypeRangeDummyObjects[aCategory] = [
+ new MimeType(
+ blockBottom,
+ "!category-dummy!",
+ aCategory,
+ "!category-dummy!/" + aCategory,
+ aCategory
+ ),
+ new MimeType(
+ blockTop,
+ "!category-dummy!",
+ aCategory,
+ "!category-dummy!/" + aCategory,
+ aCategory
+ ),
+ ];
+ },
+
+ _processMimeTypes(aMimeTypes) {
+ for (let mimeType of aMimeTypes) {
+ if (mimeType.id > this._highID) {
+ this._highID = mimeType.id;
+ }
+ this._mimeTypes[mimeType] = mimeType;
+ this._mimeTypesByID[mimeType.id] = mimeType;
+
+ let blockHighID =
+ mimeType.category in this._mimeTypeHighID
+ ? this._mimeTypeHighID[mimeType.category]
+ : undefined;
+ // create the dummy range objects
+ if (blockHighID === undefined) {
+ this._createCategoryDummies(mimeType.id, mimeType.category);
+ }
+ if (blockHighID === undefined || mimeType.id > blockHighID) {
+ this._mimeTypeHighID[mimeType.category] = mimeType.id;
+ }
+ }
+ },
+
+ _addNewMimeType(aMimeTypeName) {
+ let [typeName, subTypeName] = aMimeTypeName.split("/");
+ let category = this._getCategoryForMimeType(aMimeTypeName, typeName);
+
+ if (!(category in this._mimeTypeHighID)) {
+ let nextID =
+ this._highID -
+ (this._highID % this.TYPE_BLOCK_SIZE) +
+ this.TYPE_BLOCK_SIZE;
+ this._mimeTypeHighID[category] = nextID;
+ this._createCategoryDummies(nextID, category);
+ }
+
+ let nextID = ++this._mimeTypeHighID[category];
+
+ let mimeType = new MimeType(
+ nextID,
+ typeName,
+ subTypeName,
+ aMimeTypeName,
+ category
+ );
+ if (mimeType.id > this._highID) {
+ this._highID = mimeType.id;
+ }
+
+ this._mimeTypes[aMimeTypeName] = mimeType;
+ this._mimeTypesByID[nextID] = mimeType;
+
+ // As great as the gloda extension mechanisms are, we don't think it makes
+ // a lot of sense to use them in this case. So we directly trigger object
+ // insertion without any of the grokNounItem stuff.
+ this.objInsert.call(this.datastore, mimeType);
+ // Since we bypass grokNounItem and its fun, we need to explicitly add the
+ // new MIME-type to _universalCollection ourselves. Don't try this at
+ // home, kids.
+ this._universalCollection._onItemsAdded([mimeType]);
+
+ return mimeType;
+ },
+
+ /**
+ * Map a mime type to a |MimeType| instance, creating it if necessary.
+ *
+ * @param aMimeTypeName The mime type. It may optionally include parameters
+ * (which will be ignored). A mime type is of the form "type/subtype".
+ * A type with parameters would look like 'type/subtype; param="value"'.
+ */
+ getMimeType(aMimeTypeName) {
+ // first, lose any parameters
+ let semiIndex = aMimeTypeName.indexOf(";");
+ if (semiIndex >= 0) {
+ aMimeTypeName = aMimeTypeName.substring(0, semiIndex);
+ }
+ aMimeTypeName = aMimeTypeName.trim().toLowerCase();
+
+ if (aMimeTypeName in this._mimeTypes) {
+ return this._mimeTypes[aMimeTypeName];
+ }
+ return this._addNewMimeType(aMimeTypeName);
+ },
+
+ /**
+ * Query helpers contribute additional functions to the query object for the
+ * attributes that use the noun type. For example, we define Category, so
+ * for the "attachmentTypes" attribute, "attachmentTypesCategory" would be
+ * exposed.
+ */
+ queryHelpers: {
+ /**
+ * Query for MIME type categories based on one or more MIME type objects
+ * passed in. We want the range to span the entire block allocated to the
+ * category.
+ *
+ * @param aAttrDef The attribute that is using us.
+ * @param aArguments The actual arguments object that
+ */
+ Category(aAttrDef, aArguments) {
+ let rangePairs = [];
+ // If there are no arguments then we want to fall back to the 'in'
+ // constraint which matches on any attachment.
+ if (!aArguments || aArguments.length == 0) {
+ return this._inConstraintHelper(aAttrDef, []);
+ }
+
+ for (let iArg = 0; iArg < aArguments.length; iArg++) {
+ let arg = aArguments[iArg];
+ rangePairs.push(MimeTypeNoun._mimeTypeRangeDummyObjects[arg.category]);
+ }
+ return this._rangedConstraintHelper(aAttrDef, rangePairs);
+ },
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.fullType.localeCompare(b.fullType);
+ },
+
+ toParamAndValue(aMimeType) {
+ return [null, aMimeType.id];
+ },
+ toJSON(aMimeType) {
+ return aMimeType.id;
+ },
+ fromJSON(aMimeTypeID) {
+ return this._mimeTypesByID[aMimeTypeID];
+ },
+};
+Gloda.defineNoun(MimeTypeNoun, GlodaConstants.NOUN_MIME_TYPE);
+try {
+ MimeTypeNoun._init();
+} catch (ex) {
+ LOG.error(
+ "problem init-ing: " + ex.fileName + ":" + ex.lineNumber + ": " + ex
+ );
+}
diff --git a/comm/mailnews/db/gloda/modules/NounTag.jsm b/comm/mailnews/db/gloda/modules/NounTag.jsm
new file mode 100644
index 0000000000..1e5db85a42
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/NounTag.jsm
@@ -0,0 +1,97 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["TagNoun"];
+
+const { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+const { Gloda } = ChromeUtils.import("resource:///modules/gloda/Gloda.jsm");
+const { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+
+/**
+ * @namespace Tag noun provider.
+ */
+var TagNoun = {
+ name: "tag",
+ clazz: Ci.nsIMsgTag,
+ usesParameter: true,
+ allowsArbitraryAttrs: false,
+ idAttr: "key",
+ _msgTagService: null,
+ _tagMap: null,
+ _tagList: null,
+
+ _init() {
+ // This reference can be substituted for testing purposes.
+ this._msgTagService = MailServices.tags;
+ this._updateTagMap();
+ },
+
+ getAllTags() {
+ if (this._tagList == null) {
+ this._updateTagMap();
+ }
+ return this._tagList;
+ },
+
+ _updateTagMap() {
+ this._tagMap = {};
+ let tagArray = (this._tagList = this._msgTagService.getAllTags());
+ for (let iTag = 0; iTag < tagArray.length; iTag++) {
+ let tag = tagArray[iTag];
+ this._tagMap[tag.key] = tag;
+ }
+ },
+
+ comparator(a, b) {
+ if (a == null) {
+ if (b == null) {
+ return 0;
+ }
+ return 1;
+ } else if (b == null) {
+ return -1;
+ }
+ return a.tag.localeCompare(b.tag);
+ },
+ userVisibleString(aTag) {
+ return aTag.tag;
+ },
+
+ // we cannot be an attribute value
+
+ toParamAndValue(aTag) {
+ return [aTag.key, null];
+ },
+ toJSON(aTag) {
+ return aTag.key;
+ },
+ fromJSON(aTagKey, aIgnored) {
+ let tag = this._tagMap.hasOwnProperty(aTagKey)
+ ? this._tagMap[aTagKey]
+ : undefined;
+ // you will note that if a tag is removed, we are unable to aggressively
+ // deal with this. we are okay with this, but it would be nice to be able
+ // to listen to the message tag service to know when we should rebuild.
+ if (tag === undefined && this._msgTagService.isValidKey(aTagKey)) {
+ this._updateTagMap();
+ tag = this._tagMap[aTagKey];
+ }
+ // we intentionally are returning undefined if the tag doesn't exist
+ return tag;
+ },
+ /**
+ * Convenience helper to turn a tag key into a tag name.
+ */
+ getTag(aTagKey) {
+ return this.fromJSON(aTagKey);
+ },
+};
+
+TagNoun._init();
+Gloda.defineNoun(TagNoun, GlodaConstants.NOUN_TAG);
diff --git a/comm/mailnews/db/gloda/modules/SuffixTree.jsm b/comm/mailnews/db/gloda/modules/SuffixTree.jsm
new file mode 100644
index 0000000000..239993e180
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/SuffixTree.jsm
@@ -0,0 +1,381 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["SuffixTree", "MultiSuffixTree"];
+
+/**
+ * Given a list of strings and a corresponding map of items that those strings
+ * correspond to, build a suffix tree.
+ */
+function MultiSuffixTree(aStrings, aItems) {
+ if (aStrings.length != aItems.length) {
+ throw new Error("Array lengths need to be the same.");
+ }
+
+ let s = "";
+ let offsetsToItems = [];
+ let lastLength = 0;
+ for (let i = 0; i < aStrings.length; i++) {
+ s += aStrings[i];
+ offsetsToItems.push(lastLength, s.length, aItems[i]);
+ lastLength = s.length;
+ }
+
+ this._construct(s);
+ this._offsetsToItems = offsetsToItems;
+ this._numItems = aItems.length;
+}
+
+/**
+ * @class
+ */
+function State(aStartIndex, aEndIndex, aSuffix) {
+ this.start = aStartIndex;
+ this.end = aEndIndex;
+ this.suffix = aSuffix;
+}
+
+/**
+ * Since objects are basically hash-tables anyways, we simply create an
+ * attribute whose name is the first letter of the edge string. (So, the
+ * edge string can conceptually be a multi-letter string, but since we would
+ * split it were there any ambiguity, it's okay to just use the single letter.)
+ * This avoids having to update the attribute name or worry about tripping our
+ * implementation up.
+ */
+State.prototype = {
+ get isExplicit() {
+ // our end is not inclusive...
+ return this.end <= this.start;
+ },
+ get isImplicit() {
+ // our end is not inclusive...
+ return this.end > this.start;
+ },
+
+ get length() {
+ return this.end - this.start;
+ },
+
+ toString() {
+ return (
+ "[Start: " +
+ this.start +
+ " End: " +
+ this.end +
+ (this.suffix ? " non-null suffix]" : " null suffix]")
+ );
+ },
+};
+
+/**
+ * Suffix tree implemented using Ukkonen's algorithm.
+ *
+ * @class
+ */
+function SuffixTree(aStr) {
+ this._construct(aStr);
+}
+
+/**
+ * States are
+ */
+SuffixTree.prototype = {
+ /**
+ * Find all items matching the provided substring.
+ */
+ findMatches(aSubstring) {
+ let results = [];
+ let state = this._root;
+ let index = 0;
+ let end = aSubstring.length;
+ while (index < end) {
+ state = state[aSubstring[index]];
+ // bail if there was no edge
+ if (state === undefined) {
+ return results;
+ }
+ // bail if the portion of the edge we traversed is not equal to that
+ // portion of our pattern
+ let actualTraverseLength = Math.min(state.length, end - index);
+ if (
+ this._str.substring(state.start, state.start + actualTraverseLength) !=
+ aSubstring.substring(index, index + actualTraverseLength)
+ ) {
+ return results;
+ }
+ index += state.length;
+ }
+
+ // state should now be the node which itself and all its children match...
+ // The delta is to adjust us to the offset of the last letter of our match;
+ // the edge we traversed to get here may have found us traversing more
+ // than we wanted.
+ // index - end captures the over-shoot of the edge traversal,
+ // index - end + 1 captures the fact that we want to find the last letter
+ // that matched, not just the first letter beyond it
+ // However, if this state is a leaf node (end == 'infinity'), then 'end'
+ // isn't describing an edge at all and we want to avoid accounting for it.
+ let delta;
+ /*
+ if (state.end != this._infinity)
+ //delta = index - end + 1;
+ delta = end - (index - state.length);
+ else */
+ delta = index - state.length - end + 1;
+
+ this._resultGather(state, results, {}, end, delta, true);
+ return results;
+ },
+
+ _resultGather(
+ aState,
+ aResults,
+ aPresence,
+ aPatLength,
+ aDelta,
+ alreadyAdjusted
+ ) {
+ // find the item that this state originated from based on the state's
+ // start character. offsetToItem holds [string start index, string end
+ // index (exclusive), item reference]. So we want to binary search to
+ // find the string whose start/end index contains the state's start index.
+ let low = 0;
+ let high = this._numItems - 1;
+ let mid, stringStart, stringEnd;
+
+ let patternLast = aState.start - aDelta;
+ while (low <= high) {
+ mid = low + Math.floor((high - low) / 2); // excessive, especially with js nums
+ stringStart = this._offsetsToItems[mid * 3];
+ let startDelta = stringStart - patternLast;
+ stringEnd = this._offsetsToItems[mid * 3 + 1];
+ let endDelta = stringEnd - patternLast;
+ if (startDelta > 0) {
+ high = mid - 1;
+ } else if (endDelta <= 0) {
+ low = mid + 1;
+ } else {
+ break;
+ }
+ }
+
+ // - The match occurred completely inside a source string. Success.
+ // - The match spans more than one source strings, and is therefore not
+ // a match.
+
+ // at this point, we have located the origin string that corresponds to the
+ // start index of this state.
+ // - The match terminated with the end of the preceding string, and does
+ // not match us at all. We, and potentially our children, are merely
+ // serving as a unique terminal.
+ // - The
+
+ let patternFirst = patternLast - (aPatLength - 1);
+
+ if (patternFirst >= stringStart) {
+ if (!(stringStart in aPresence)) {
+ aPresence[stringStart] = true;
+ aResults.push(this._offsetsToItems[mid * 3 + 2]);
+ }
+ }
+
+ // bail if we had it coming OR
+ // if the result terminates at/part-way through this state, meaning any
+ // of its children are not going to be actual results, just hangers
+ // on.
+ /*
+ if (bail || (end <= aState.end)) {
+dump(" bailing! (bail was: " + bail + ")\n");
+ return;
+ }
+*/
+ // process our children...
+ for (let key in aState) {
+ // edges have attributes of length 1...
+ if (key.length == 1) {
+ let statePrime = aState[key];
+ this._resultGather(
+ statePrime,
+ aResults,
+ aPresence,
+ aPatLength,
+ aDelta + aState.length, // (alreadyAdjusted ? 0 : aState.length),
+ false
+ );
+ }
+ }
+ },
+
+ /**
+ * Given a reference 'pair' of a state and a string (may be 'empty'=explicit,
+ * which means no work to do and we return immediately) follow that state
+ * (and then the successive states)'s transitions until we run out of
+ * transitions. This happens either when we find an explicit state, or
+ * find ourselves partially along an edge (conceptually speaking). In
+ * the partial case, we return the state prior to the edge traversal.
+ * (The information about the 'edge' is contained on its target State;
+ * we can do this because a state is only referenced by one other state.)
+ */
+ _canonize(aState, aStart, aEnd) {
+ if (aEnd <= aStart) {
+ return [aState, aStart];
+ }
+
+ let statePrime;
+ // we treat an aState of null as 'bottom', which has transitions for every
+ // letter in the alphabet to 'root'. rather than create all those
+ // transitions, we special-case here.
+ if (aState === null) {
+ statePrime = this._root;
+ } else {
+ statePrime = aState[this._str[aStart]];
+ }
+ while (statePrime.length <= aEnd - aStart) {
+ // (no 1 adjustment required)
+ aStart += statePrime.length;
+ aState = statePrime;
+ if (aStart < aEnd) {
+ statePrime = aState[this._str[aStart]];
+ }
+ }
+ return [aState, aStart];
+ },
+
+ /**
+ * Given a reference 'pair' whose state may or may not be explicit (and for
+ * which we will perform the required splitting to make it explicit), test
+ * whether it already possesses a transition corresponding to the provided
+ * character.
+ *
+ * @returns A list of: whether we had to make it explicit, the (potentially)
+ * new explicit state.
+ */
+ _testAndSplit(aState, aStart, aEnd, aChar) {
+ if (aStart < aEnd) {
+ // it's not explicit
+ let statePrime = aState[this._str[aStart]];
+ let length = aEnd - aStart;
+ if (aChar == this._str[statePrime.start + length]) {
+ return [true, aState];
+ }
+
+ // do splitting... aState -> rState -> statePrime
+ let rState = new State(statePrime.start, statePrime.start + length);
+ aState[this._str[statePrime.start]] = rState;
+ statePrime.start += length;
+ rState[this._str[statePrime.start]] = statePrime;
+ return [false, rState];
+ }
+
+ // it's already explicit
+ if (aState === null) {
+ // bottom case... shouldn't happen, but hey.
+ return [true, aState];
+ }
+ return [aChar in aState, aState];
+ },
+
+ _update(aState, aStart, aIndex) {
+ let oldR = this._root;
+ let textAtIndex = this._str[aIndex]; // T sub i (0-based corrected...)
+ // because of the way we store the 'end' value as a one-past form, we do
+ // not need to subtract 1 off of aIndex.
+ let [endPoint, rState] = this._testAndSplit(
+ aState,
+ aStart,
+ aIndex, // no -1
+ textAtIndex
+ );
+ while (!endPoint) {
+ let rPrime = new State(aIndex, this._infinity);
+ rState[textAtIndex] = rPrime;
+ if (oldR !== this._root) {
+ oldR.suffix = rState;
+ }
+ oldR = rState;
+ [aState, aStart] = this._canonize(aState.suffix, aStart, aIndex); // no -1
+ [endPoint, rState] = this._testAndSplit(
+ aState,
+ aStart,
+ aIndex, // no -1
+ textAtIndex
+ );
+ }
+ if (oldR !== this._root) {
+ oldR.suffix = aState;
+ }
+
+ return [aState, aStart];
+ },
+
+ _construct(aStr) {
+ this._str = aStr;
+ // just needs to be longer than the string.
+ this._infinity = aStr.length + 1;
+
+ // this._bottom = new State(0, -1, null);
+ this._root = new State(-1, 0, null); // null === bottom
+ let state = this._root;
+ let start = 0;
+
+ for (let i = 0; i < aStr.length; i++) {
+ [state, start] = this._update(state, start, i); // treat as flowing -1...
+ [state, start] = this._canonize(state, start, i + 1); // 1-length string
+ }
+ },
+
+ dump(aState, aIndent, aKey) {
+ if (aState === undefined) {
+ aState = this._root;
+ }
+ if (aIndent === undefined) {
+ aIndent = "";
+ aKey = ".";
+ }
+
+ if (aState.isImplicit) {
+ let snip;
+ if (aState.length > 10) {
+ snip =
+ this._str.slice(
+ aState.start,
+ Math.min(aState.start + 10, this._str.length)
+ ) + "...";
+ } else {
+ snip = this._str.slice(
+ aState.start,
+ Math.min(aState.end, this._str.length)
+ );
+ }
+ dump(
+ aIndent +
+ aKey +
+ ":" +
+ snip +
+ "(" +
+ aState.start +
+ ":" +
+ aState.end +
+ ")\n"
+ );
+ } else {
+ dump(
+ aIndent +
+ aKey +
+ ": (explicit:" +
+ aState.start +
+ ":" +
+ aState.end +
+ ")\n"
+ );
+ }
+ let nextIndent = aIndent + " ";
+ let keys = Object.keys(aState).filter(c => c.length == 1);
+ for (let key of keys) {
+ this.dump(aState[key], nextIndent, key);
+ }
+ },
+};
+MultiSuffixTree.prototype = SuffixTree.prototype;
diff --git a/comm/mailnews/db/gloda/modules/moz.build b/comm/mailnews/db/gloda/modules/moz.build
new file mode 100644
index 0000000000..54978c24ea
--- /dev/null
+++ b/comm/mailnews/db/gloda/modules/moz.build
@@ -0,0 +1,31 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXTRA_JS_MODULES.gloda += [
+ "Collection.jsm",
+ "Everybody.jsm",
+ "Facet.jsm",
+ "Gloda.jsm",
+ "GlodaConstants.jsm",
+ "GlodaContent.jsm",
+ "GlodaDatabind.jsm",
+ "GlodaDataModel.jsm",
+ "GlodaDatastore.jsm",
+ "GlodaExplicitAttr.jsm",
+ "GlodaFundAttr.jsm",
+ "GlodaIndexer.jsm",
+ "GlodaMsgIndexer.jsm",
+ "GlodaMsgSearcher.jsm",
+ "GlodaPublic.jsm",
+ "GlodaQueryClassFactory.jsm",
+ "GlodaSyntheticView.jsm",
+ "GlodaUtils.jsm",
+ "IndexMsg.jsm",
+ "MimeMessage.jsm",
+ "NounFreetag.jsm",
+ "NounMimetype.jsm",
+ "NounTag.jsm",
+ "SuffixTree.jsm",
+]
diff --git a/comm/mailnews/db/gloda/moz.build b/comm/mailnews/db/gloda/moz.build
new file mode 100644
index 0000000000..4c7d35cca3
--- /dev/null
+++ b/comm/mailnews/db/gloda/moz.build
@@ -0,0 +1,13 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ "modules",
+ "components",
+]
+
+TEST_DIRS += ["test"]
+
+JAR_MANIFESTS += ["jar.mn"]
diff --git a/comm/mailnews/db/gloda/test/moz.build b/comm/mailnews/db/gloda/test/moz.build
new file mode 100644
index 0000000000..c16fdd2b6c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/moz.build
@@ -0,0 +1,12 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPCSHELL_TESTS_MANIFESTS += ["unit/xpcshell.ini"]
+
+TESTING_JS_MODULES.gloda += [
+ "unit/resources/GlodaQueryHelper.jsm",
+ "unit/resources/GlodaTestHelper.jsm",
+ "unit/resources/GlodaTestHelperFunctions.jsm",
+]
diff --git a/comm/mailnews/db/gloda/test/unit/base_gloda_content.js b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
new file mode 100644
index 0000000000..d106015b48
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_gloda_content.js
@@ -0,0 +1,226 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent. This may also be implicitly tested by indexing
+ * and fulltext query tests (on messages), but the buck stops here for the
+ * content stuff.
+ *
+ * Currently, we just test quoting removal and that the content turns out right.
+ * We do not actually verify that the quoted blocks are correct (aka we might
+ * screw up eating the greater-than signs). (We have no known consumers who
+ * care about the quoted blocks.)
+ */
+
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+// We need to be able to get at GlodaFundAttr to check the number of whittler
+// invocations.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* ===== Data ===== */
+var messageInfos = [
+ {
+ name: "no quoting",
+ bode: [
+ [true, "I like hats"],
+ [true, "yes I do!"],
+ [true, "I like hats!"],
+ [true, "How bout you?"],
+ ],
+ },
+ {
+ name: "no quoting, whitespace removal",
+ bode: [
+ [true, "robots are nice..."],
+ [true, ""],
+ [true, "except for the bloodlust"],
+ ],
+ },
+ {
+ name: "bottom posting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, ">"], // This quoted blank line is significant! no lose!
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "I do enjoy them as well."],
+ [true, ""],
+ [true, "Bob"],
+ ],
+ },
+ {
+ name: "top posting",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ ],
+ },
+ {
+ name: "top posting with trailing whitespace, no intro",
+ bode: [
+ [true, "Hats are where it's at."],
+ [false, ""],
+ [false, "> I like hats"],
+ [false, "> yes I do!"],
+ [false, ""],
+ [false, ""],
+ ],
+ },
+ {
+ name: "interspersed quoting",
+ bode: [
+ [false, "John wrote:"],
+ [false, "> I like hats"],
+ [true, "I concur with this point."],
+ [false, "> yes I do!"],
+ [false, ""],
+ [true, "this point also resonates with me."],
+ [false, ""],
+ [false, "> I like hats!"],
+ [false, "> How bout you?"],
+ [false, ""],
+ [true, "Verily!"],
+ ],
+ },
+ {
+ name: "german style",
+ bode: [
+ [false, "Mark Banner <bugzilla@standard8.plus.invalid> wrote:"],
+ [false, "\xa0"],
+ [
+ false,
+ "> We haven't nailed anything down in detail yet, depending on how we are ",
+ ],
+ [
+ true,
+ "That sounds great and would definitely be appreciated by localizers.",
+ ],
+ [false, ""],
+ ],
+ },
+ {
+ name: "tortuous interference",
+ bode: [
+ [false, "> wrote"],
+ [true, "running all the time"],
+ [false, "> wrote"],
+ [true, "cheese"],
+ [false, ""],
+ ],
+ },
+];
+
+function setup_create_message(info) {
+ info.body = { body: info.bode.map(tupe => tupe[1]).join("\r\n") };
+ info.expected = info.bode
+ .filter(tupe => tupe[0])
+ .map(tupe => tupe[1])
+ .join("\n");
+
+ info._synMsg = msgGen.makeMessage(info);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ // Let's not assume an ordering.
+ for (let iMsg = 0; iMsg < messageInfos.length; iMsg++) {
+ if (messageInfos[iMsg]._synMsg == aSynthMessage) {
+ messageInfos[iMsg]._glodaMsg = aGlodaMessage;
+ }
+ }
+}
+
+/**
+ * Actually inject all the messages we created above.
+ */
+async function setup_inject_messages() {
+ // Create the messages from messageInfo.
+ messageInfos.forEach(info => {
+ setup_create_message(info);
+ });
+ let msgSet = new SyntheticMessageSet(messageInfos.map(info => info._synMsg));
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], { verifier: glodaInfoStasher })
+ );
+}
+
+function test_stream_message(info) {
+ // Currying the function for simpler usage with `base_gloda_content_tests`.
+ return () => {
+ let msgHdr = info._glodaMsg.folderMessage;
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_message_content(
+ info,
+ info._synMsg,
+ info._glodaMsg,
+ aMsgHdr,
+ aMimeMsg
+ );
+ });
+ };
+}
+
+// Instrument GlodaFundAttr so we can check the count.
+var originalWhittler = GlodaFundAttr.contentWhittle;
+var whittleCount = 0;
+GlodaFundAttr.contentWhittle = function (...aArgs) {
+ whittleCount++;
+ return originalWhittler.apply(this, aArgs);
+};
+
+function verify_message_content(aInfo, aSynMsg, aGlodaMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ throw new Error(
+ "Message streaming should work; check test_mime_emitter.js first"
+ );
+ }
+
+ whittleCount = 0;
+ let content = Gloda.getMessageContent(aGlodaMsg, aMimeMsg);
+ if (whittleCount != 1) {
+ throw new Error("Whittle count is " + whittleCount + " but should be 1!");
+ }
+
+ Assert.equal(content.getContentString(), aInfo.expected, "Message streamed");
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_gloda_content_tests = [
+ test_sanity_test_environment,
+ setup_inject_messages,
+ ...messageInfos.map(e => {
+ return test_stream_message(e);
+ }),
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_junk.js b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
new file mode 100644
index 0000000000..8529f24a56
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_junk.js
@@ -0,0 +1,217 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing in the face of junk classification and junk folders. It is
+ * gloda policy not to index junk mail.
+ *
+ * A similar test that moving things to the trash folder is deletion happens in
+ * base_index_messages.js.
+ */
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var messageInjection;
+
+const SPAM_BODY = { body: "superspam superspam superspam eevil eevil eevil" };
+const HAM_BODY = { body: "ham ham ham nice nice nice happy happy happy" };
+
+/**
+ * Make SPAM_BODY be known as spammy and HAM_BODY be known as hammy.
+ */
+async function setup_spam_filter() {
+ let [, spamSet, hamSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ { count: 1, body: HAM_BODY },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([spamSet, hamSet], []));
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ let junkListener = {
+ onMessageClassified() {
+ promiseResolve();
+ },
+ };
+
+ // Ham.
+ dump(`Marking message: ${hamSet.getMsgHdr(0)} as ham.`);
+ MailServices.junk.setMessageClassification(
+ hamSet.getMsgURI(0),
+ null, // no old classification
+ MailServices.junk.GOOD,
+ null,
+ junkListener
+ );
+ await promise;
+
+ // Reset promise for junkListener.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Spam.
+ dump(`Marking message: ${spamSet.getMsgHdr(0)} as spam.`);
+ MailServices.junk.setMessageClassification(
+ spamSet.getMsgURI(0),
+ null, // No old classification.
+ MailServices.junk.JUNK,
+ null,
+ junkListener
+ );
+ await promise;
+}
+
+/**
+ * Because gloda defers indexing until after junk, we should never index a
+ * message that gets marked as junk. So if we inject a message that will
+ * definitely be marked as junk (thanks to use of terms that guarantee it),
+ * the indexer should never index it.
+ *
+ * ONLY THIS TEST ACTUALLY RELIES ON THE BAYESIAN CLASSIFIER.
+ */
+async function test_never_indexes_a_message_marked_as_junk() {
+ // Event-driven does not index junk.
+
+ // Make a message that will be marked as junk from the get-go.
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, body: SPAM_BODY },
+ ]);
+ // Since the message is junk, gloda should not index it!
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Folder sweep does not index junk.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Reset the training data so the bayesian classifier stops doing things.
+ */
+function reset_spam_filter() {
+ MailServices.junk.resetTrainingData();
+}
+
+/**
+ * Marking a message as junk is equivalent to deleting the message, un-mark it
+ * and it should go back to being a happy message (with the same gloda-id!).
+ *
+ * THIS TEST DOES NOT RELY ON THE BAYESIAN CLASSIFIER.
+ */
+
+async function test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure() {
+ // Mark as junk is deletion.
+ // Create a message; it should get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let glodaId = msgSet.glodaMessages[0].id;
+ // Mark it as junk.
+ msgSet.setJunk(true);
+ // It will appear deleted after the event.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+ // Mark as non-junk gets indexed.
+ msgSet.setJunk(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ // We should have reused the existing gloda message so it should keep the id.
+ Assert.equal(glodaId, msgSet.glodaMessages[0].id);
+}
+
+/**
+ * Moving a message to the junk folder is equivalent to deletion. Gloda does
+ * not index junk folders at all, which is why this is an important and
+ * independent determination from marking a message directly as junk.
+ *
+ * The move to the junk folder is performed without using any explicit junk
+ * support code. This ends up being effectively the same underlying logic test
+ * as base_index_messages' test of moving a message to the trash folder.
+ */
+async function test_message_moving_to_junk_folder_is_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the junk folder.
+ await messageInjection.moveMessages(
+ msgSet,
+ await messageInjection.getJunkFolder()
+ );
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // We do not index the junk folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // (Well, the conversation will die, but we can't see that.)
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+ Assert.ok(messageInjection.messageGenerator, "Sanity that msgGen is set.");
+}
+
+/* exported tests */
+var base_index_junk_tests = [
+ test_sanity_test_environment,
+ setup_spam_filter,
+ test_never_indexes_a_message_marked_as_junk,
+ reset_spam_filter,
+ test_mark_as_junk_is_deletion_mark_as_not_junk_is_exposure,
+ test_message_moving_to_junk_folder_is_deletion,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_index_messages.js b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
new file mode 100644
index 0000000000..bea2337d7f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_index_messages.js
@@ -0,0 +1,1461 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our indexing prowess. This includes both our ability to
+ * properly be triggered by events taking place in thunderbird as well as our
+ * ability to correctly extract/index the right data.
+ * In general, if these tests pass, things are probably working quite well.
+ *
+ * This test has local, IMAP online, IMAP offline, and IMAP online-become-offline
+ * variants. See the text_index_messages_*.js files.
+ *
+ * Things we don't test that you think we might test:
+ * - Full-text search. Happens in query testing.
+ */
+
+var { MailUtils } = ChromeUtils.import("resource:///modules/MailUtils.jsm");
+var { NetUtil } = ChromeUtils.import("resource://gre/modules/NetUtil.jsm");
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var {
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+ resumeFromSimulatedHang,
+ permuteMessages,
+ makeABCardForAddressPair,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { SyntheticMessageSet, SyntheticPartMultiMixed, SyntheticPartLeaf } =
+ ChromeUtils.import("resource://testing-common/mailnews/MessageGenerator.jsm");
+var { TagNoun } = ChromeUtils.import("resource:///modules/gloda/NounTag.jsm");
+
+// Whether we can expect fulltext results
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+var messageInjection;
+var msgGen;
+var scenarios;
+
+/* ===== Indexing Basics ===== */
+
+/**
+ * Index a message, wait for a commit, make sure the header gets the property
+ * set correctly. Then modify the message, verify the dirty property shows
+ * up, flush again, and make sure the dirty property goes clean again.
+ */
+async function test_pending_commit_tracker_flushes_correctly() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Before the flush, there should be no gloda-id property.
+ let msgHdr = msgSet.getMsgHdr(0);
+ // Get it as a string to make sure it's empty rather than possessing a value.
+ Assert.equal(msgHdr.getStringProperty("gloda-id"), "");
+
+ await waitForGlodaDBFlush();
+
+ // After the flush there should be a gloda-id property and it should
+ // equal the gloda id.
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+
+ // Make sure no dirty property was written.
+ Assert.equal(msgHdr.getStringProperty("gloda-dirty"), "");
+
+ // Modify the message.
+ msgSet.setRead(true);
+ await waitForGlodaIndexer(msgSet);
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Now there should be a dirty property and it should be 1.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageDirty
+ );
+
+ // Flush.
+ await waitForGlodaDBFlush();
+
+ // Now dirty should be 0 and the gloda id should still be the same.
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageClean
+ );
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), gmsg.id);
+}
+
+/**
+ * Make sure that PendingCommitTracker causes a msgdb commit to occur so that
+ * if the nsIMsgFolder's msgDatabase attribute has already been nulled
+ * (which is normally how we force a msgdb commit), that the changes to the
+ * header actually hit the disk.
+ */
+async function test_pending_commit_causes_msgdb_commit() {
+ // New message, index it.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Force the msgDatabase closed; the sqlite commit will not yet have occurred.
+ messageInjection.getRealInjectionFolder(folder).msgDatabase = null;
+ // Make the commit happen, this causes the header to get set.
+ await waitForGlodaDBFlush();
+
+ // Force a GC. This will kill off the header and the database, losing data
+ // if we are not protecting it.
+ Cu.forceGC();
+
+ // Now retrieve the header and make sure it has the gloda id set!
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-id"),
+ msgSet.glodaMessages[0].id
+ );
+}
+
+/**
+ * Give the indexing sweep a workout.
+ *
+ * This includes:
+ * - Basic indexing sweep across never-before-indexed folders.
+ * - Indexing sweep across folders with just some changes.
+ * - Filthy pass.
+ */
+async function test_indexing_sweep() {
+ // -- Never-before-indexed folders.
+ // Turn off event-driven indexing.
+ configureGlodaIndexing({ event: false });
+
+ let [[folderA], setA1, setA2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+ let [, setB1, setB2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ { count: 2 },
+ ]);
+ let [[folderC], setC1, setC2] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 3 }, { count: 2 }]
+ );
+
+ // Make sure that event-driven job gets nuked out of existence
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Turn on event-driven indexing again; this will trigger a sweep.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setA1, setA2, setB1, setB2, setC1, setC2])
+ );
+
+ // -- Folders with some changes, pending commits.
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA1.setRead(true);
+ setB2.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setB2]));
+
+ // -- Folders with some changes, no pending commits.
+ // Force a commit to clear out our pending commits.
+ await waitForGlodaDBFlush();
+ // Indexing off.
+ configureGlodaIndexing({ event: false });
+
+ setA2.setRead(true);
+ setB1.setRead(true);
+
+ // Indexing on, killing all outstanding jobs, trigger sweep.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA2, setB1]));
+
+ // -- Filthy foldering indexing.
+ // Just mark the folder filthy and make sure that we reindex everyone.
+ // IMPORTANT! The trick of marking the folder filthy only works because
+ // we flushed/committed the database above; the PendingCommitTracker
+ // is not aware of bogus filthy-marking of folders.
+ // We leave the verification of the implementation details to
+ // test_index_sweep_folder.js.
+ let glodaFolderC = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folderC)
+ );
+ // Marked gloda folder dirty.
+ glodaFolderC._dirtyStatus = glodaFolderC.kFolderFilthy;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setC1, setC2]));
+
+ // -- Forced folder indexing.
+ var callbackInvoked = false;
+ GlodaMsgIndexer.indexFolder(
+ messageInjection.getRealInjectionFolder(folderA),
+ {
+ force: true,
+ callback() {
+ callbackInvoked = true;
+ },
+ }
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setA1, setA2]));
+ Assert.ok(callbackInvoked);
+}
+
+/**
+ * We used to screw up and downgrade filthy folders to dirty if we saw an event
+ * happen in the folder before we got to the folder; this tests that we no
+ * longer do that.
+ */
+async function test_event_driven_indexing_does_not_mess_with_filthy_folders() {
+ // Add a folder with a message.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Fake marking the folder filthy.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Generate an event in the folder.
+ msgSet.setRead(true);
+ // Make sure the indexer did not do anything and the folder is still filthy.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ Assert.equal(glodaFolder._dirtyStatus, glodaFolder.kFolderFilthy);
+ // Also, the message should not have actually gotten marked dirty.
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-dirty"), 0);
+
+ // Let's make the message un-read again for consistency with the gloda state.
+ msgSet.setRead(false);
+ // Make the folder dirty and let an indexing sweep take care of this so we
+ // don't get extra events in subsequent tests.
+ glodaFolder._dirtyStatus = glodaFolder.kFolderDirty;
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // The message won't get indexed though.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_indexing_never_priority() {
+ // Add a folder with a bunch of messages.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index it, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Verify that the setter and getter do the right thing.
+ Assert.equal(
+ glodaFolder.indexingPriority,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder.id,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([folder], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+async function test_setting_indexing_priority_never_while_indexing() {
+ if (!messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForIndexingHang();
+
+ // Explicitly tell gloda to never index this folder.
+ let XPCOMFolder = messageInjection.getRealInjectionFolder(folder);
+ let glodaFolder = Gloda.getFolderForFolder(XPCOMFolder);
+ GlodaMsgIndexer.setFolderIndexingPriority(
+ XPCOMFolder,
+ glodaFolder.kIndexingNeverPriority
+ );
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed. Also, make sure a cleanup
+ // was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { cleanedUp: 1 }));
+}
+
+/* ===== Threading / Conversation Grouping ===== */
+
+var gSynMessages = [];
+function allMessageInSameConversation(aSynthMessage, aGlodaMessage, aConvID) {
+ if (aConvID === undefined) {
+ return aGlodaMessage.conversationID;
+ }
+ Assert.equal(aConvID, aGlodaMessage.conversationID);
+ // Cheat and stash the synthetic message (we need them for one of the IMAP
+ // tests).
+ gSynMessages.push(aSynthMessage);
+ return aConvID;
+}
+
+/**
+ * Test our conversation/threading logic in the straight-forward direct
+ * reply case, the missing intermediary case, and the siblings with missing
+ * parent case. We also test all permutations of receipt of those messages.
+ * (Also tests that we index new messages.)
+ */
+async function test_threading_direct_reply() {
+ let permutationMessages = await permuteMessages(
+ scenarios.directReply,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+async function test_threading_missing_intermediary() {
+ let permutationMessages = await permuteMessages(
+ scenarios.missingIntermediary,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+async function test_threading_siblings_missing_parent() {
+ let permutationMessages = await permuteMessages(
+ scenarios.siblingsMissingParent,
+ messageInjection
+ );
+ for (const preparedMessage of permutationMessages) {
+ let message = await preparedMessage();
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([message], allMessageInSameConversation)
+ );
+ }
+}
+
+/**
+ * Test the bit that says "if we're fulltext-indexing the message and we
+ * discover it didn't have any attachments, clear the attachment bit from the
+ * message header".
+ */
+async function test_attachment_flag() {
+ // Create a synthetic message with an attachment that won't normally be listed
+ // in the attachment pane (Content-Disposition: inline, no filename, and
+ // displayable inline).
+ let smsg = msgGen.makeMessage({
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ let msgSet = new SyntheticMessageSet([smsg]);
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attachment_flag,
+ })
+ );
+}
+
+function verify_attachment_flag(smsg, gmsg) {
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentNames.length, 0);
+ Assert.equal(gmsg.attachmentInfos.length, 0);
+ Assert.equal(
+ false,
+ gmsg.folderMessage.flags & Ci.nsMsgMessageFlags.Attachment
+ );
+ }
+}
+/* ===== Fundamental Attributes (per GlodaFundAttr.jsm) ===== */
+
+/**
+ * Save the synthetic message created in test_attributes_fundamental for the
+ * benefit of test_attributes_fundamental_from_disk.
+ */
+var fundamentalSyntheticMessage;
+var fundamentalFolderHandle;
+/**
+ * We're saving this one so that we can move the message later and verify that
+ * the attributes are consistent.
+ */
+var fundamentalMsgSet;
+var fundamentalGlodaMsgAttachmentUrls;
+/**
+ * Save the resulting gloda message id corresponding to the
+ * fundamentalSyntheticMessage so we can use it to query the message from disk.
+ */
+var fundamentalGlodaMessageId;
+
+/**
+ * Test that we extract the 'fundamental attributes' of a message properly
+ * 'Fundamental' in this case is talking about the attributes defined/extracted
+ * by gloda's GlodaFundAttr.jsm and perhaps the core message indexing logic itself
+ * (which show up as kSpecial* attributes in GlodaFundAttr.jsm anyways.)
+ */
+async function test_attributes_fundamental() {
+ // Create a synthetic message with attachment.
+ let smsg = msgGen.makeMessage({
+ name: "test message",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartLeaf({ body: "I like cheese!" }),
+ msgGen.makeMessage({ body: { body: "I like wine!" } }), // That's one attachment.
+ ]),
+ attachments: [
+ { filename: "bob.txt", body: "I like bread!" }, // And that's another one.
+ ],
+ });
+ // Save it off for test_attributes_fundamental_from_disk.
+ fundamentalSyntheticMessage = smsg;
+ let msgSet = new SyntheticMessageSet([smsg]);
+ fundamentalMsgSet = msgSet;
+ let folder = (fundamentalFolderHandle =
+ await messageInjection.makeEmptyFolder());
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ // If we need to go offline, let the indexing pass run, then force us offline.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ // Now the next indexer wait will wait for the next indexing pass.
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ verifier: verify_attributes_fundamental,
+ })
+ );
+}
+
+function verify_attributes_fundamental(smsg, gmsg) {
+ // Save off the message id for test_attributes_fundamental_from_disk.
+ fundamentalGlodaMessageId = gmsg.id;
+ if (gmsg.attachmentInfos) {
+ fundamentalGlodaMsgAttachmentUrls = gmsg.attachmentInfos.map(
+ att => att.url
+ );
+ } else {
+ fundamentalGlodaMsgAttachmentUrls = [];
+ }
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(fundamentalFolderHandle).URI
+ );
+
+ // -- Subject
+ Assert.equal(smsg.subject, gmsg.conversation.subject);
+ Assert.equal(smsg.subject, gmsg.subject);
+
+ // -- Contact/identity information.
+ // - From
+ // Check the e-mail address.
+ Assert.equal(gmsg.from.kind, "email");
+ Assert.equal(smsg.fromAddress, gmsg.from.value);
+ // Check the name.
+ Assert.equal(smsg.fromName, gmsg.from.contact.name);
+
+ // - To
+ Assert.equal(smsg.toAddress, gmsg.to[0].value);
+ Assert.equal(smsg.toName, gmsg.to[0].contact.name);
+
+ // Date
+ Assert.equal(smsg.date.valueOf(), gmsg.date.valueOf());
+
+ // -- Message ID
+ Assert.equal(smsg.messageId, gmsg.headerMessageID);
+
+ // -- Attachments. We won't have these if we don't have fulltext results.
+ if (expectFulltextResults) {
+ Assert.equal(gmsg.attachmentTypes.length, 1);
+ Assert.equal(gmsg.attachmentTypes[0], "text/plain");
+ Assert.equal(gmsg.attachmentNames.length, 1);
+ Assert.equal(gmsg.attachmentNames[0], "bob.txt");
+
+ let expectedInfos = [
+ // The name for that one is generated randomly.
+ { contentType: "message/rfc822" },
+ { name: "bob.txt", contentType: "text/plain" },
+ ];
+ let expectedSize = 14;
+ Assert.equal(gmsg.attachmentInfos.length, 2);
+ for (let [i, attInfos] of gmsg.attachmentInfos.entries()) {
+ for (let k in expectedInfos[i]) {
+ Assert.equal(attInfos[k], expectedInfos[i][k]);
+ }
+ // Because it's unreliable and depends on the platform.
+ Assert.ok(Math.abs(attInfos.size - expectedSize) <= 2);
+ // Check that the attachment URLs are correct.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal: Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+
+ try {
+ // Will throw if the URL is invalid.
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ do_throw(new Error("Invalid attachment URL"));
+ }
+ }
+ } else {
+ // Make sure we don't actually get attachments!
+ Assert.equal(gmsg.attachmentTypes, null);
+ Assert.equal(gmsg.attachmentNames, null);
+ }
+}
+
+/**
+ * We now move the message into another folder, wait for it to be indexed,
+ * and make sure the magic url getter for GlodaAttachment returns a proper
+ * URL.
+ */
+async function test_moved_message_attributes() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ // Don't ask me why, let destFolder = MessageInjection.make_empty_folder would result in a
+ // random error when running test_index_messages_imap_offline.js ...
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+ fundamentalFolderHandle = destFolder;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([ignoreSet]));
+
+ // This is a fast move (third parameter set to true).
+ await messageInjection.moveMessages(fundamentalMsgSet, destFolder, true);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([fundamentalMsgSet], {
+ verifier(newSynMsg, newGlodaMsg) {
+ // Verify we still have the same number of attachments.
+ Assert.equal(
+ fundamentalGlodaMsgAttachmentUrls.length,
+ newGlodaMsg.attachmentInfos.length
+ );
+ for (let [i, attInfos] of newGlodaMsg.attachmentInfos.entries()) {
+ // Verify the url has changed.
+ Assert.notEqual(fundamentalGlodaMsgAttachmentUrls[i], attInfos.url);
+ // And verify that the new url is still valid.
+ let channel = NetUtil.newChannel({
+ uri: attInfos.url,
+ loadingPrincipal:
+ Services.scriptSecurityManager.getSystemPrincipal(),
+ securityFlags:
+ Ci.nsILoadInfo.SEC_ALLOW_CROSS_ORIGIN_SEC_CONTEXT_IS_NULL,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_OTHER,
+ });
+ try {
+ channel.asyncOpen(new PromiseTestUtils.PromiseStreamListener());
+ } catch (e) {
+ new Error("Invalid attachment URL");
+ }
+ }
+ },
+ fullyIndexed: 0,
+ })
+ );
+}
+
+/**
+ * We want to make sure that all of the fundamental properties also are there
+ * when we load them from disk. Nuke our cache, query the message back up.
+ * We previously used getMessagesByMessageID to get the message back, but he
+ * does not perform a full load-out like a query does, so we need to use our
+ * query mechanism for this.
+ */
+async function test_attributes_fundamental_from_disk() {
+ nukeGlodaCachesAndCollections();
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE).id(
+ fundamentalGlodaMessageId
+ );
+ await queryExpect(
+ query,
+ [fundamentalSyntheticMessage],
+ verify_attributes_fundamental_from_disk,
+ function (smsg) {
+ return smsg.messageId;
+ }
+ );
+}
+
+/**
+ * We are just a wrapper around verify_attributes_fundamental, adapting the
+ * return callback from getMessagesByMessageID.
+ *
+ * @param aGlodaMessageLists This should be [[theGlodaMessage]].
+ */
+function verify_attributes_fundamental_from_disk(aGlodaMessage) {
+ // Teturn the message id for test_attributes_fundamental_from_disk's benefit.
+ verify_attributes_fundamental(fundamentalSyntheticMessage, aGlodaMessage);
+ return aGlodaMessage.headerMessageID;
+}
+
+/* ===== Explicit Attributes (per GlodaExplicitAttr.jsm) ===== */
+
+/**
+ * Test the attributes defined by GlodaExplicitAttr.jsm.
+ */
+async function test_attributes_explicit() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ // -- Tags
+ // Note that the tag service does not guarantee stable nsIMsgTag references,
+ // nor does noun_tag go too far out of its way to provide stability.
+ // However, it is stable as long as we don't spook it by bringing new tags
+ // into the equation.
+ let tagOne = TagNoun.getTag("$label1");
+ let tagTwo = TagNoun.getTag("$label2");
+
+ msgSet.addTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+
+ msgSet.addTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.notEqual(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagOne.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.notEqual(gmsg.tags.indexOf(tagTwo), -1);
+
+ msgSet.removeTag(tagTwo.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.indexOf(tagOne), -1);
+ Assert.equal(gmsg.tags.indexOf(tagTwo), -1);
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Test non-query-able attributes
+ */
+async function test_attributes_cant_query() {
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Star
+ msgSet.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, true);
+
+ msgSet.setStarred(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.starred, false);
+
+ // -- Read / Unread
+ msgSet.setRead(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, true);
+
+ msgSet.setRead(false);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.read, false);
+
+ let readDbAttr = Gloda.getAttrDef(GlodaConstants.BUILT_IN, "read");
+ let readId = readDbAttr.id;
+
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messageAttributes WHERE attributeID = ?1",
+ readId
+ );
+
+ // -- Replied To
+
+ // -- Forwarded
+}
+
+/**
+ * Have the participants be in our addressbook prior to indexing so that we can
+ * verify that the hand-off to the addressbook indexer does not cause breakage.
+ */
+async function test_people_in_addressbook() {
+ var senderPair = msgGen.makeNameAndAddress(),
+ recipPair = msgGen.makeNameAndAddress();
+
+ // - Add both people to the address book.
+ makeABCardForAddressPair(senderPair);
+ makeABCardForAddressPair(recipPair);
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, to: [recipPair], from: senderPair },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0],
+ senderIdentity = gmsg.from,
+ recipIdentity = gmsg.to[0];
+
+ Assert.notEqual(senderIdentity.contact, null);
+ Assert.ok(senderIdentity.inAddressBook);
+
+ Assert.notEqual(recipIdentity.contact, null);
+ Assert.ok(recipIdentity.inAddressBook);
+}
+
+/* ===== Fulltexts Indexing ===== */
+
+/**
+ * Make sure that we are using the saneBodySize flag. This is basically the
+ * test_sane_bodies test from test_mime_emitter but we pull the indexedBodyText
+ * off the message to check and also make sure that the text contents slice
+ * off the end rather than the beginning.
+ */
+async function test_streamed_bodies_are_size_capped() {
+ if (!expectFulltextResults) {
+ return;
+ }
+
+ let hugeString =
+ "qqqqxxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx qqqqxxx \r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ let bodyString = "aabb" + hugeString + "xxyy";
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: bodyString, contentType: "text/plain" },
+ });
+ let msgSet = new SyntheticMessageSet([synMsg]);
+ let folder = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folder], [msgSet]);
+
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ await messageInjection.makeFolderAndContentsOffline(folder);
+ }
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+ Assert.ok(gmsg.indexedBodyText.startsWith("aabb"));
+ Assert.ok(!gmsg.indexedBodyText.includes("xxyy"));
+
+ if (gmsg.indexedBodyText.length > 20 * 1024 + 58 + 10) {
+ do_throw(
+ "Indexed body text is too big! (" + gmsg.indexedBodyText.length + ")"
+ );
+ }
+}
+
+/* ===== Message Deletion ===== */
+/**
+ * Test actually deleting a message on a per-message basis (not just nuking the
+ * folder like emptying the trash does.)
+ *
+ * Logic situations:
+ * - Non-last message in a conversation, twin.
+ * - Non-last message in a conversation, not a twin.
+ * - Last message in a conversation
+ */
+async function test_message_deletion() {
+ // Non-last message in conv, twin.
+ // Create and index two messages in a conversation.
+ let [, convSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([convSet], { augment: true }));
+
+ // Twin the first message in a different folder owing to our reliance on
+ // message-id's in the SyntheticMessageSet logic. (This is also why we broke
+ // up the indexing waits too.)
+ let twinFolder = await messageInjection.makeEmptyFolder();
+ let twinSet = new SyntheticMessageSet([convSet.synMessages[0]]);
+ await messageInjection.addSetsToFolders([twinFolder], [twinSet]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([twinSet], { augment: true }));
+
+ // Split the conv set into two helper sets.
+ let firstSet = convSet.slice(0, 1); // The twinned first message in the thread.
+ let secondSet = convSet.slice(1, 2); // The un-twinned second thread message.
+
+ // Make sure we can find the message (paranoia).
+ let firstQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ firstQuery.id(firstSet.glodaMessages[0].id);
+ let firstColl = await queryExpect(firstQuery, firstSet);
+
+ // Delete it (not trash! delete!).
+ await MessageInjection.deleteMessages(firstSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [firstSet] }));
+ // And our collection from that query should now be empty.
+ Assert.equal(firstColl.items.length, 0);
+
+ // Make sure it no longer shows up in a standard query.
+ firstColl = await queryExpect(firstQuery, []);
+
+ // Make sure it shows up in a privileged query.
+ let privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ let firstGlodaId = firstSet.glodaMessages[0].id;
+ privQuery.id(firstGlodaId);
+ await queryExpect(privQuery, firstSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure it no longer shows up in a privileged query; since it has a twin
+ // we don't need to leave it as a ghost.
+ await queryExpect(privQuery, []);
+
+ // Make sure that the messagesText entry got blown away.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ firstGlodaId
+ );
+
+ // Make sure the conversation still exists.
+ let conv = twinSet.glodaMessages[0].conversation;
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(conv.id);
+ let convColl = await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, no longer a twin => ghost.
+
+ // Make sure nuking the twin didn't somehow kill them both.
+ let twinQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // Let's search on the message-id now that there is no ambiguity.
+ twinQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ let twinColl = await queryExpect(twinQuery, twinSet);
+
+ // Delete the twin.
+ await MessageInjection.deleteMessages(twinSet);
+ // Which should result in an apparent deletion.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [twinSet] }));
+ // It should disappear from the collection.
+ Assert.equal(twinColl.items.length, 0);
+
+ // No longer show up in the standard query.
+ twinColl = await queryExpect(twinQuery, []);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(twinSet.synMessages[0].messageId);
+ await queryExpect(privQuery, twinSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The message should be marked as a ghost now that the deletion pass.
+ // Ghosts have no fulltext rows, so check for that.
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText WHERE docid = ?1",
+ twinSet.glodaMessages[0].id
+ );
+
+ // It still should show up in the privileged query; it's a ghost!
+ let privColl = await queryExpect(privQuery, twinSet);
+ // Make sure it looks like a ghost.
+ let twinGhost = privColl.items[0];
+ Assert.equal(twinGhost._folderID, null);
+ Assert.equal(twinGhost._messageKey, null);
+
+ // Make sure the conversation still exists.
+ await queryExpect(convQuery, [conv]);
+
+ // -- Non-last message, not a twin.
+ // This should blow away the message, the ghosts, and the conversation.
+
+ // Second message should still be around.
+ let secondQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ secondQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ let secondColl = await queryExpect(secondQuery, secondSet);
+
+ // Delete it and make sure it gets marked deleted appropriately.
+ await MessageInjection.deleteMessages(secondSet);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [secondSet] }));
+ Assert.equal(secondColl.items.length, 0);
+
+ // Still show up in a privileged query.
+ privQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ privQuery.headerMessageID(secondSet.synMessages[0].messageId);
+ await queryExpect(privQuery, secondSet);
+
+ // Force a deletion pass.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // It should no longer show up in a privileged query; we killed the ghosts.
+ await queryExpect(privQuery, []);
+
+ // - The conversation should have disappeared too.
+ // (we have no listener to watch for it to have disappeared from convQuery but
+ // this is basically how glodaTestHelper does its thing anyways.)
+ Assert.equal(convColl.items.length, 0);
+
+ // Make sure the query fails to find it too.
+ await queryExpect(convQuery, []);
+
+ // -- Identity culling verification.
+ // The identities associated with that message should no longer exist, nor
+ // should their contacts.
+}
+
+async function test_moving_to_trash_marks_deletion() {
+ // Create and index two messages in a conversation.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Move them to the trash.
+ await messageInjection.trashMessages(msgSet);
+
+ // We do not index the trash folder so this should actually make them appear
+ // deleted to an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // They will appear deleted after the events.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/**
+ * Deletion that occurs because a folder got deleted.
+ * There is no hand-holding involving the headers that were in the folder.
+ */
+async function test_folder_nuking_message_deletion() {
+ // Create and index two messages in a conversation.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2, msgsPerThread: 2 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+
+ let convId = msgSet.glodaMessages[0].conversation.id;
+ let firstGlodaId = msgSet.glodaMessages[0].id;
+ let secondGlodaId = msgSet.glodaMessages[1].id;
+
+ // Delete the folder.
+ messageInjection.deleteFolder(folder);
+ // That does generate the deletion events if the messages were in-memory,
+ // which these are.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([], { deleted: [msgSet] }));
+
+ // This should have caused us to mark all the messages as deleted; the
+ // messages should no longer show up in an unprivileged query.
+ let msgQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ msgQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgQuery, []);
+
+ // Force a sweep.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ // There should be no apparent change as the result of this pass.
+ // Well, the conversation will die, but we can't see that.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // The conversation should be gone.
+ let convQuery = Gloda.newQuery(GlodaConstants.NOUN_CONVERSATION);
+ convQuery.id(convId);
+ await queryExpect(convQuery, []);
+
+ // The messages should be entirely gone.
+ let msgPrivQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE, {
+ noDbQueryValidityConstraints: true,
+ });
+ msgPrivQuery.id(firstGlodaId, secondGlodaId);
+ await queryExpect(msgPrivQuery, []);
+}
+
+/* ===== Folder Move/Rename/Copy (Single and Nested) ===== */
+
+async function test_folder_deletion_nested() {
+ // Add a folder with a bunch of messages.
+ let [[folder1], msgSet1] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ let [[folder2], msgSet2] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Index these folders, and augment the msgSet with the glodaMessages array
+ // for later use by sqlExpectCount.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet1, msgSet2], { augment: true })
+ );
+ // The move has to be performed after the indexing, because otherwise, on
+ // IMAP, the moved message header are different entities and it's not msgSet2
+ // that ends up indexed, but the fresh headers
+ await MessageInjection.moveFolder(folder2, folder1);
+
+ // Add a trash folder, and move folder1 into it.
+ let trash = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Trash,
+ ]);
+ await MessageInjection.moveFolder(folder1, trash);
+
+ let folders = MessageInjection.get_nsIMsgFolder(trash).descendants;
+ Assert.equal(folders.length, 2);
+ let [newFolder1, newFolder2] = folders;
+
+ let glodaFolder1 = Gloda.getFolderForFolder(newFolder1);
+ let glodaFolder2 = Gloda.getFolderForFolder(newFolder2);
+
+ // Verify that Gloda properly marked this folder as not to be indexed anymore.
+ Assert.equal(
+ glodaFolder1.indexingPriority,
+ glodaFolder1.kIndexingNeverPriority
+ );
+
+ // Check that existing message is marked as deleted.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], { deleted: [msgSet1, msgSet2] })
+ );
+
+ // Make sure the deletion hit the database.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder1.id,
+ glodaFolder1.kIndexingNeverPriority
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) from folderLocations WHERE id = ? AND indexingPriority = ?",
+ glodaFolder2.id,
+ glodaFolder2.kIndexingNeverPriority
+ );
+
+ if (messageInjection.messageInjectionIsLocal()) {
+ // Add another message.
+ await messageInjection.makeNewSetsInFolders([newFolder1], [{ count: 1 }]);
+ await messageInjection.makeNewSetsInFolders([newFolder2], [{ count: 1 }]);
+
+ // Make sure that indexing returns nothing.
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+ }
+}
+
+/* ===== IMAP Nuances ===== */
+
+/**
+ * Verify that for IMAP folders we still see an index a message that is added
+ * as read.
+ */
+async function test_imap_add_unread_to_folder() {
+ if (messageInjection.messageInjectionIsLocal()) {
+ return;
+ }
+
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, read: true },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+/* ===== Message Moving ===== */
+
+/**
+ * Moving a message between folders should result in us knowing that the message
+ * is in the target location.
+ */
+async function test_message_moving() {
+ // - Inject and insert.
+ // Source folder with the message we care about.
+ let [[srcFolder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ // Dest folder with some messages in it to test some wacky local folder moving
+ // logic. (Local moves try and update the correspondence immediately.)
+ let [[destFolder], ignoreSet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 2 }]
+ );
+
+ // We want the gloda message mapping.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet, ignoreSet], { augment: true })
+ );
+ let gmsg = msgSet.glodaMessages[0];
+ // Save off the message key so we can make sure it changes.
+ let oldMessageKey = msgSet.getMsgHdr(0).messageKey;
+
+ // - Fastpath (offline) move it to a new folder.
+ // Initial move.
+ await messageInjection.moveMessages(msgSet, destFolder, true);
+
+ // - Make sure gloda sees it in the new folder.
+ // Since we are doing offline IMAP moves, the fast-path should be taken and
+ // so we should receive an itemsModified notification without a call to
+ // Gloda.grokNounItem.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { fullyIndexed: 0 }));
+
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(destFolder).URI
+ );
+
+ // - Make sure the message key is correct!
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+ // Sanity check that the messageKey actually changed for the message.
+ Assert.notEqual(gmsg.messageKey, oldMessageKey);
+
+ // - Make sure the indexer's _keyChangedBatchInfo dict is empty.
+ for (let evilKey in GlodaMsgIndexer._keyChangedBatchInfo) {
+ let evilValue = GlodaMsgIndexer._keyChangedBatchInfo[evilKey];
+ throw new Error(
+ "GlodaMsgIndexer._keyChangedBatchInfo should be empty but" +
+ "has key:\n" +
+ evilKey +
+ "\nAnd value:\n",
+ evilValue + "."
+ );
+ }
+
+ // - Slowpath (IMAP online) move it back to its origin folder.
+ // Move it back.
+ await messageInjection.moveMessages(msgSet, srcFolder, false);
+ // In the IMAP case we will end up reindexing the message because we will
+ // not be able to fast-path, but the local case will still be fast-pathed.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([msgSet], {
+ fullyIndexed: messageInjection.messageInjectionIsLocal() ? 0 : 1,
+ })
+ );
+ Assert.equal(
+ gmsg.folderURI,
+ messageInjection.getRealInjectionFolder(srcFolder).URI
+ );
+ Assert.equal(gmsg.messageKey, msgSet.getMsgHdr(0).messageKey);
+}
+
+/**
+ * Moving a gloda-indexed message out of a filthy folder should result in the
+ * destination message not having a gloda-id.
+ */
+
+/* ===== Message Copying ===== */
+
+/* ===== Sweep Complications ==== */
+
+/**
+ * Make sure that a message indexed by event-driven indexing does not
+ * get reindexed by sweep indexing that follows.
+ */
+async function test_sweep_indexing_does_not_reindex_event_indexed() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Wait for the event sweep to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a sweep of the folder.
+ GlodaMsgIndexer.indexFolder(messageInjection.getRealInjectionFolder(folder));
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+}
+
+/**
+ * Verify that moving apparently gloda-indexed messages from a filthy folder or
+ * one that simply should not be gloda indexed does not result in the target
+ * messages having the gloda-id property on them. To avoid messing with too
+ * many invariants we do the 'folder should not be gloda indexed' case.
+ * Uh, and of course, the message should still get indexed once we clear the
+ * filthy gloda-id off of it given that it is moving from a folder that is not
+ * indexed to one that is indexed.
+ */
+async function test_filthy_moves_slash_move_from_unindexed_to_indexed() {
+ // - Inject.
+ // The source folder needs a flag so we don't index it.
+ let srcFolder = await messageInjection.makeEmptyFolder(null, [
+ Ci.nsMsgFolderFlags.Junk,
+ ]);
+ // The destination folder has to be something we want to index though.
+ let destFolder = await messageInjection.makeEmptyFolder();
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [srcFolder],
+ [{ count: 1 }]
+ );
+
+ // - Mark with a bogus gloda-id.
+ msgSet.getMsgHdr(0).setUint32Property("gloda-id", 9999);
+
+ // - Disable event driven indexing so we don't get interference from indexing.
+ configureGlodaIndexing({ event: false });
+
+ // - Move.
+ await messageInjection.moveMessages(msgSet, destFolder);
+
+ // - Verify the target has no gloda-id!
+ dump(`checking ${msgSet.getMsgHdr(0)}`);
+ Assert.equal(msgSet.getMsgHdr(0).getUint32Property("gloda-id"), 0);
+
+ // - Re-enable indexing and let the indexer run.
+ // We don't want to affect other tests.
+ configureGlodaIndexing({});
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(scenarios, "Sanity that scenarios is set");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_index_messages_tests = [
+ test_sanity_test_environment,
+ test_pending_commit_tracker_flushes_correctly,
+ test_pending_commit_causes_msgdb_commit,
+ test_indexing_sweep,
+ test_event_driven_indexing_does_not_mess_with_filthy_folders,
+
+ test_threading_direct_reply,
+ test_threading_missing_intermediary,
+ test_threading_siblings_missing_parent,
+ test_attachment_flag,
+ test_attributes_fundamental,
+ test_moved_message_attributes,
+ test_attributes_fundamental_from_disk,
+ test_attributes_explicit,
+ test_attributes_cant_query,
+
+ test_people_in_addressbook,
+
+ test_streamed_bodies_are_size_capped,
+
+ test_imap_add_unread_to_folder,
+ test_message_moving,
+
+ test_message_deletion,
+ test_moving_to_trash_marks_deletion,
+ test_folder_nuking_message_deletion,
+
+ test_sweep_indexing_does_not_reindex_event_indexed,
+
+ test_filthy_moves_slash_move_from_unindexed_to_indexed,
+
+ test_indexing_never_priority,
+ test_setting_indexing_priority_never_while_indexing,
+
+ test_folder_deletion_nested,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/base_query_messages.js b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
new file mode 100644
index 0000000000..02b8cceb1a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/base_query_messages.js
@@ -0,0 +1,729 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests our querying support. We build up a deterministic little
+ * 'world' of messages spread across multiple conversations, multiple folders
+ * and multiple authors. To verify expected negative results, in addition to
+ * the 'peoples' in our world clique, we also have 'outlier' contacts that do
+ * not communicate with the others (but are also spread across folders).
+ *
+ * This is broadly intended to test all of our query features and mechanisms
+ * (apart from our specialized search implementation, which is tested by
+ * test_search_messages.js), but is probably not the place to test specific
+ * edge-cases if they do not easily fit into the 'world' data set.
+ *
+ * I feel like having the 'world' mishmash as a data source may muddle things
+ * more than it should, but it is hard to deny the benefit of not having to
+ * define a bunch of message corpuses entirely specialized for each test.
+ */
+
+var { assertExpectedMessagesIndexed, waitForGlodaIndexer } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/**
+ * Whether we expect fulltext results. IMAP folders that are offline shouldn't
+ * have their bodies indexed.
+ */
+var expectFulltextResults = true;
+
+/**
+ * Should we force our folders offline after we have indexed them once. We do
+ * this in the online_to_offline test variant.
+ */
+var goOffline = false;
+
+/* ===== Populate ===== */
+var world = {
+ phase: 0,
+
+ // A list of tuples of [name, email] of length NUM_AUTHORS.
+ peoples: null,
+ NUM_AUTHORS: 5,
+ // Maps each author (as defined by their email address) to the list of
+ // (synthetic) messages they have 'authored'.
+ authorGroups: {},
+
+ NUM_CONVERSATIONS: 3,
+ // The last message (so far) in each conversation.
+ lastMessagesInConvos: [],
+ // Maps the message-id of the root message in a conversation to the list of
+ // synthetic messages in the conversation.
+ conversationGroups: {},
+ // A list of lists of synthetic messages, organized by the conversation they
+ // belong to.
+ conversationLists: [],
+ // A list of gloda conversation id's, each corresponding to the entries in
+ // converastionLists.
+ glodaConversationIds: [],
+
+ NUM_FOLDERS: 2,
+ MESSAGES_PER_FOLDER: 11,
+ // A list of lists of synthetic messages, one list per folder.
+ folderClumps: [],
+ // A list of nsIMsgFolders, with each folder containing the messages in the
+ // corresponding list in folderClumps.
+ glodaFolders: [],
+
+ outlierAuthor: null,
+ outlierFriend: null,
+
+ // Messages authored by contacts in the "peoples" group.
+ peoplesMessages: [],
+ // Messages authored by outlierAuthor and outlierFriend.
+ outlierMessages: [],
+};
+
+/**
+ * Given a number, provide a unique term. This is for the benefit of the search
+ * logic. This entails using a unique prefix to avoid accidental collision
+ * with terms outside our control and then just generating unique character
+ * strings in a vaguely base-26 style. To avoid the porter stemmer causing odd
+ * things to happen we actually double every numerically driven character.
+ */
+function uniqueTermGenerator(aNum) {
+ let s = "uniq";
+ do {
+ let l = String.fromCharCode(97 + (aNum % 26));
+ s += l + l;
+ aNum = Math.floor(aNum / 26);
+ } while (aNum);
+ return s;
+}
+
+var UNIQUE_OFFSET_CONV = 0;
+var UNIQUE_OFFSET_AUTHOR = 26;
+var UNIQUE_OFFSET_BODY = 0;
+var UNIQUE_OFFSET_SUBJECT = 26 * 26;
+var UNIQUE_OFFSET_ATTACHMENT = 26 * 26 * 26;
+
+/**
+ * Categorize a synthetic message by conversation/folder/people in the 'world'
+ * structure. This is then used by the test code to generate and verify query
+ * data.
+ *
+ * @param aSynthMessage The synthetic message.
+ */
+function categorizeMessage(aSynthMessage) {
+ // Lump by author.
+ let author = aSynthMessage.fromAddress;
+ if (!(author in world.authorGroups)) {
+ world.authorGroups[author] = [];
+ }
+ world.authorGroups[author].push(aSynthMessage);
+
+ // Lump by conversation, keying off of the originator's message id.
+ let originator = aSynthMessage;
+ while (originator.parent) {
+ originator = originator.parent;
+ }
+ if (!(originator.messageId in world.conversationGroups)) {
+ world.conversationGroups[originator.messageId] = [];
+ }
+ world.conversationGroups[originator.messageId].push(aSynthMessage);
+ world.conversationLists[aSynthMessage.iConvo].push(aSynthMessage);
+
+ // Folder lumping happens in a big glob.
+}
+
+/**
+ * Generate messages in a single folder, categorizing them as we go.
+ *
+ * Key message characteristics:
+ * - Whenever a 'peoples' sends a message, they send it to all 'peoples',
+ * including themselves.
+ */
+function generateFolderMessages() {
+ let messages = [],
+ smsg;
+
+ let iAuthor = 0;
+ for (let iMessage = 0; iMessage < world.MESSAGES_PER_FOLDER; iMessage++) {
+ let iConvo = iMessage % world.NUM_CONVERSATIONS;
+
+ // We need missing messages to create ghosts, so periodically add an extra
+ // unknown into the equation. we do this prior to the below step because
+ // then we don't hose up all the fancy body creation the next step does.
+ if (iMessage % 3 == 1) {
+ smsg = msgGen.makeMessage({ inReplyTo: smsg });
+ }
+
+ let convUniqueSubject = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let convUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + iConvo
+ );
+ let authorUniqueBody = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_AUTHOR + iAuthor
+ );
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + iConvo
+ );
+ smsg = msgGen.makeMessage({
+ inReplyTo: world.lastMessagesInConvos[iConvo],
+ // Note that the reply-logic will ignore our subject, luckily that does
+ // not matter! (since it will just copy the subject)
+ subject: convUniqueSubject,
+ body: {
+ body: convUniqueBody + " " + authorUniqueBody,
+ },
+ attachments: [
+ {
+ filename: convUniqueAttachment + ".conv",
+ body: "content does not matter. only life matters.",
+ contentType: "application/x-test",
+ },
+ ],
+ });
+
+ // MakeMessage is not exceedingly clever right now, we need to overwrite
+ // From and To.
+ smsg.from = world.peoples[iAuthor];
+ iAuthor = (iAuthor + iConvo + 1) % world.NUM_AUTHORS;
+ // So, everyone is talking to everyone for this stuff.
+ smsg.to = world.peoples;
+ world.lastMessagesInConvos[iConvo] = smsg;
+ // Simplify categorizeMessage and glodaInfoStasher's life.
+ smsg.iConvo = iConvo;
+
+ categorizeMessage(smsg);
+ messages.push(smsg);
+ world.peoplesMessages.push(smsg);
+ }
+
+ smsg = msgGen.makeMessage();
+ smsg.from = world.outlierAuthor;
+ smsg.to = [world.outlierFriend];
+ // Do not lump it.
+ messages.push(smsg);
+ world.outlierMessages.push(smsg);
+
+ world.folderClumps.push(messages);
+
+ return new SyntheticMessageSet(messages);
+}
+
+/**
+ * To save ourselves some lookup trouble, pretend to be a verification
+ * function so we get easy access to the gloda translations of the messages so
+ * we can cram this in various places.
+ */
+function glodaInfoStasher(aSynthMessage, aGlodaMessage) {
+ if (aSynthMessage.iConvo !== undefined) {
+ world.glodaConversationIds[aSynthMessage.iConvo] =
+ aGlodaMessage.conversation.id;
+ }
+ if (world.glodaFolders.length <= world.phase) {
+ world.glodaFolders.push(aGlodaMessage.folder);
+ }
+}
+
+// We override these for the IMAP tests.
+var pre_setup_populate_hook = function default_pre_setup_populate_hook() {};
+var post_setup_populate_hook = function default_post_setup_populate_hook() {};
+
+// First, we must populate our message store with delicious messages.
+async function setup_populate() {
+ world.glodaHolderCollection = Gloda.explicitCollection(
+ GlodaConstants.NOUN_MESSAGE,
+ []
+ );
+
+ world.peoples = msgGen.makeNamesAndAddresses(world.NUM_AUTHORS);
+ world.outlierAuthor = msgGen.makeNameAndAddress();
+ world.outlierFriend = msgGen.makeNameAndAddress();
+ // Set up the per-conversation values with blanks initially.
+ for (let iConvo = 0; iConvo < world.NUM_CONVERSATIONS; iConvo++) {
+ world.lastMessagesInConvos.push(null);
+ world.conversationLists.push([]);
+ world.glodaConversationIds.push(null);
+ }
+
+ let setOne = generateFolderMessages();
+ let folderOne = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderOne], [setOne]);
+ // If this is the online_to_offline variant (indicated by goOffline) we want
+ // to make the messages available offline. This should trigger an event
+ // driven re-indexing of the messages which should make the body available
+ // for fulltext queries.
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setOne]));
+ await messageInjection.makeFolderAndContentsOffline(folderOne);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setOne], { verifier: glodaInfoStasher })
+ );
+
+ world.phase++;
+ let setTwo = generateFolderMessages();
+ let folderTwo = await messageInjection.makeEmptyFolder();
+ await messageInjection.addSetsToFolders([folderTwo], [setTwo]);
+ if (goOffline) {
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([setTwo]));
+ await messageInjection.makeFolderAndContentsOffline(folderTwo);
+ }
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([setTwo], { verifier: glodaInfoStasher })
+ );
+}
+
+/* ===== Non-text queries ===== */
+
+/* === messages === */
+
+/**
+ * Takes a list of mutually exclusive queries and a list of the resulting
+ * collections and ensures that the collections from one query do not pass the
+ * query.test() method of one of the other queries. To restate, the queries
+ * must not have any overlapping results, or we will get angry without
+ * justification.
+ */
+function verify_nonMatches(aQueries, aCollections) {
+ for (let i = 0; i < aCollections.length; i++) {
+ let testQuery = aQueries[i];
+ let nonmatches = aCollections[(i + 1) % aCollections.length].items;
+
+ for (let item of nonmatches) {
+ if (testQuery.test(item)) {
+ dump("item: " + JSON.stringify(item) + "\n");
+ dump("constraints: " + JSON.stringify(testQuery._constraints) + "\n");
+ do_throw(
+ "Something should not match query.test(), but it does: " + item
+ );
+ }
+ }
+ }
+}
+
+var ts_convNum = 0;
+/* preserved state for the non-match testing performed by
+ * test_query_messages_by_conversation_nonmatches.
+ */
+var ts_convQueries = [];
+var ts_convCollections = [];
+/**
+ * Query conversations by gloda conversation-id, saving the queries and
+ * resulting collections in ts_convQueries and ts_convCollections for the
+ * use of test_query_messages_by_conversation_nonmatches who verifies the
+ * query.test() logic doesn't match on things it should not match on.
+ *
+ * @tests gloda.noun.message.attr.conversation
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_conversation() {
+ let convNum = ts_convNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.conversation(world.glodaConversationIds[convNum]);
+
+ ts_convQueries.push(query);
+ ts_convCollections.push(
+ await queryExpect(query, world.conversationLists[convNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_conversation_nonmatches() {
+ verify_nonMatches(ts_convQueries, ts_convCollections);
+}
+
+var ts_folderNum = 0;
+var ts_folderQueries = [];
+var ts_folderCollections = [];
+/**
+ * @tests gloda.noun.message.attr.folder
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_folder() {
+ let folderNum = ts_folderNum++;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.folder(world.glodaFolders[folderNum]);
+
+ ts_folderQueries.push(query);
+ ts_folderCollections.push(
+ await queryExpect(query, world.folderClumps[folderNum])
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_folder_nonmatches() {
+ verify_nonMatches(ts_folderQueries, ts_folderCollections);
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeader()
+ */
+async function test_get_message_for_header() {
+ // Pick an arbitrary message.
+ let glodaMessage = ts_convCollections[1].items[0];
+ // Find the synthetic message that matches (ordering must not be assumed).
+ let synthMessage = world.conversationLists[1].find(
+ sm => sm.messageId == glodaMessage.headerMessageID
+ );
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeader,
+ queryThis: Gloda,
+ args: [glodaMessage.folderMessage],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ [synthMessage]
+ );
+}
+
+/**
+ * @tests Gloda.ns.getMessageCollectionForHeaders()
+ */
+async function test_get_messages_for_headers() {
+ let messageCollection = ts_convCollections[0];
+ let headers = messageCollection.items.map(m => m.folderMessage);
+ await queryExpect(
+ {
+ queryFunc: Gloda.getMessageCollectionForHeaders,
+ queryThis: Gloda,
+ args: [headers],
+ nounId: GlodaConstants.NOUN_MESSAGE,
+ },
+ world.conversationLists[0]
+ );
+}
+
+// At this point we go run the identity and contact tests for side-effects.
+
+var ts_messageIdentityQueries = [];
+var ts_messageIdentityCollections = [];
+/**
+ * @tests gloda.noun.message.attr.involves
+ * @tests gloda.datastore.sqlgen.kConstraintIn
+ */
+async function test_query_messages_by_identity_peoples() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(peoplesIdentityCollection.items[0]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.peoplesMessages)
+ );
+}
+
+/**
+ * @tests gloda.noun.message.attr.involves
+ */
+async function test_query_messages_by_identity_outlier() {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.involves(outlierIdentityCollection.items[0]);
+ // This also tests our ability to have two intersecting constraints! hooray!.
+ query.involves(outlierIdentityCollection.items[1]);
+
+ ts_messageIdentityQueries.push(query);
+ ts_messageIdentityCollections.push(
+ await queryExpect(query, world.outlierMessages)
+ );
+}
+
+/**
+ * @tests gloda.query.test.kConstraintIn
+ */
+function test_query_messages_by_identity_nonmatches() {
+ verify_nonMatches(ts_messageIdentityQueries, ts_messageIdentityCollections);
+}
+
+/* exported test_query_messages_by_contact */
+function test_query_messages_by_contact() {
+ // IOU
+}
+
+var ts_messagesDateQuery;
+/**
+ * @tests gloda.noun.message.attr.date
+ * @tests gloda.datastore.sqlgen.kConstraintRanges
+ */
+async function test_query_messages_by_date() {
+ ts_messagesDateQuery = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ // We are clearly relying on knowing the generation sequence here,
+ // fuggedaboutit.
+ ts_messagesDateQuery.dateRange([
+ world.peoplesMessages[1].date,
+ world.peoplesMessages[2].date,
+ ]);
+ await queryExpect(ts_messagesDateQuery, world.peoplesMessages.slice(1, 3));
+}
+
+/**
+ * @tests gloda.query.test.kConstraintRanges
+ */
+function test_query_messages_by_date_nonmatches() {
+ if (
+ ts_messagesDateQuery.test(world.peoplesMessages[0]) ||
+ ts_messagesDateQuery.test(world.peoplesMessages[3])
+ ) {
+ do_throw("The date testing mechanism is busted.");
+ }
+}
+
+/* === contacts === */
+/* exported test_query_contacts_by_popularity */
+function test_query_contacts_by_popularity() {
+ // IOU
+}
+
+/* === identities === */
+
+/* ===== Text-based queries ===== */
+
+/* === conversations === */
+
+/* exported test_query_conversations_by_subject_text */
+function test_query_conversations_by_subject_text() {}
+
+/* === messages === */
+
+/**
+ * Test subject searching using the conversation unique subject term.
+ *
+ * @tests gloda.noun.message.attr.subjectMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_subject_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convSubjectTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_SUBJECT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.subjectMatches(convSubjectTerm);
+ await queryExpect(query, world.conversationLists[convNum]);
+}
+
+/**
+ * Test body searching using the conversation unique body term.
+ *
+ * @tests gloda.noun.message.attr.bodyMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_body_text() {
+ // We only need to use one conversation.
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convBodyTerm = uniqueTermGenerator(
+ UNIQUE_OFFSET_BODY + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.bodyMatches(convBodyTerm);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test attachment name searching using the conversation unique attachment term.
+ *
+ * @tests gloda.noun.message.attr.attachmentNamesMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_attachment_names() {
+ let convNum = 0;
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ let convUniqueAttachment = uniqueTermGenerator(
+ UNIQUE_OFFSET_ATTACHMENT + UNIQUE_OFFSET_CONV + convNum
+ );
+ query.attachmentNamesMatch(convUniqueAttachment);
+ await queryExpect(
+ query,
+ expectFulltextResults ? world.conversationLists[convNum] : []
+ );
+}
+
+/**
+ * Test author name fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_name() {
+ let [authorName, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorName);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test author mail address fulltext searching using an arbitrary author.
+ *
+ * @tests gloda.noun.message.attr.authorMatches
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_authorMatches_email() {
+ let [, authorMail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.authorMatches(authorMail);
+ await queryExpect(query, world.authorGroups[authorMail]);
+}
+
+/**
+ * Test recipient name fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_name() {
+ let name = world.peoples[0][0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(name);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/**
+ * Test recipient mail fulltext searching using an arbitrary recipient. Since
+ * all 'peoples' messages are sent to all of them, any choice from peoples
+ * gets us all 'peoplesMessages'.
+ *
+ * @tests gloda.noun.message.attr.recipientsMatch
+ * @tests gloda.datastore.sqlgen.kConstraintFulltext
+ */
+async function test_query_messages_by_recipients_email() {
+ let [, mail] = world.peoples[0];
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.recipientsMatch(mail);
+ await queryExpect(query, world.peoplesMessages);
+}
+
+/* === contacts === */
+
+var contactLikeQuery;
+/**
+ * @tests gloda.noun.contact.attr.name
+ * @tests gloda.datastore.sqlgen.kConstraintStringLike
+ */
+async function test_query_contacts_by_name() {
+ // Let's use like... we need to test that.
+ contactLikeQuery = Gloda.newQuery(GlodaConstants.NOUN_CONTACT);
+ let personName = world.peoples[0][0];
+ // Chop off the first and last letter... this isn't the most edge-case
+ // handling way to roll, but LOOK OVER THERE? IS THAT ELVIS?
+ let personNameSubstring = personName.substring(1, personName.length - 1);
+ contactLikeQuery.nameLike(
+ contactLikeQuery.WILDCARD,
+ personNameSubstring,
+ contactLikeQuery.WILDCARD
+ );
+
+ await queryExpect(contactLikeQuery, [personName]);
+}
+
+/**
+ * @tests gloda.query.test.kConstraintStringLike
+ */
+function test_query_contacts_by_name_nonmatch() {
+ let otherContact = outlierIdentityCollection.items[0].contact;
+ if (contactLikeQuery.test(otherContact)) {
+ do_throw("The string LIKE mechanism as applied to contacts does not work.");
+ }
+}
+
+/* === identities === */
+
+var peoplesIdentityQuery;
+var peoplesIdentityCollection;
+async function test_query_identities_for_peoples() {
+ peoplesIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ peoplesIdentityQuery.kind("email");
+ let peopleAddrs = world.peoples.map(nameAndAddr => nameAndAddr[1]);
+ peoplesIdentityQuery.value.apply(peoplesIdentityQuery, peopleAddrs);
+ peoplesIdentityCollection = await queryExpect(
+ peoplesIdentityQuery,
+ peopleAddrs
+ );
+}
+
+var outlierIdentityQuery;
+var outlierIdentityCollection;
+async function test_query_identities_for_outliers() {
+ outlierIdentityQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ outlierIdentityQuery.kind("email");
+ let outlierAddrs = [world.outlierAuthor[1], world.outlierFriend[1]];
+ outlierIdentityQuery.value.apply(outlierIdentityQuery, outlierAddrs);
+ outlierIdentityCollection = await queryExpect(
+ outlierIdentityQuery,
+ outlierAddrs
+ );
+}
+
+function test_query_identities_by_kind_and_value_nonmatches() {
+ verify_nonMatches(
+ [peoplesIdentityQuery, outlierIdentityQuery],
+ [peoplesIdentityCollection, outlierIdentityCollection]
+ );
+}
+
+function test_sanity_test_environment() {
+ Assert.ok(msgGen, "Sanity that msgGen is set.");
+ Assert.ok(messageInjection, "Sanity that messageInjection is set.");
+}
+
+var base_query_messages_tests = [
+ test_sanity_test_environment,
+ function pre_setup_populate() {
+ pre_setup_populate_hook();
+ },
+ setup_populate,
+ function post_setup_populate() {
+ post_setup_populate_hook();
+ },
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation,
+ test_query_messages_by_conversation_nonmatches,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder,
+ test_query_messages_by_folder_nonmatches,
+ test_get_message_for_header,
+ test_get_messages_for_headers,
+ // Need to do the identity and contact lookups so we can have their results
+ // for the other message-related queries.
+ test_query_identities_for_peoples,
+ test_query_identities_for_outliers,
+ test_query_identities_by_kind_and_value_nonmatches,
+ // Back to messages!
+ test_query_messages_by_identity_peoples,
+ test_query_messages_by_identity_outlier,
+ test_query_messages_by_identity_nonmatches,
+ test_query_messages_by_date,
+ test_query_messages_by_date_nonmatches,
+ // Fulltext
+ test_query_messages_by_subject_text,
+ test_query_messages_by_body_text,
+ test_query_messages_by_attachment_names,
+ test_query_messages_by_authorMatches_name,
+ test_query_messages_by_authorMatches_email,
+ test_query_messages_by_recipients_name,
+ test_query_messages_by_recipients_email,
+ // Like
+ test_query_contacts_by_name,
+ test_query_contacts_by_name_nonmatch,
+];
diff --git a/comm/mailnews/db/gloda/test/unit/head_gloda.js b/comm/mailnews/db/gloda/test/unit/head_gloda.js
new file mode 100644
index 0000000000..fb8edbd24e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/head_gloda.js
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { mailTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MailTestUtils.jsm"
+);
+
+// Ensure the profile directory is set up
+do_get_profile();
+
+var gDEPTH = "../../../../../";
+
+registerCleanupFunction(function () {
+ load(gDEPTH + "mailnews/resources/mailShutdown.js");
+});
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
new file mode 100644
index 0000000000..e8234f1a97
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaQueryHelper.jsm
@@ -0,0 +1,431 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = ["queryExpect", "sqlExpectCount", "sqlRun"];
+
+/*
+ * This file provides gloda query helpers for the test infrastructure.
+ */
+
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.queryHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var _defaultExpectationExtractors = {};
+_defaultExpectationExtractors[GlodaConstants.NOUN_MESSAGE] = [
+ function expectExtract_message_gloda(aGlodaMessage) {
+ return aGlodaMessage.headerMessageID;
+ },
+ function expectExtract_message_synth(aSynthMessage) {
+ return aSynthMessage.messageId;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_CONTACT] = [
+ function expectExtract_contact_gloda(aGlodaContact) {
+ return aGlodaContact.name;
+ },
+ function expectExtract_contact_name(aName) {
+ return aName;
+ },
+];
+_defaultExpectationExtractors[GlodaConstants.NOUN_IDENTITY] = [
+ function expectExtract_identity_gloda(aGlodaIdentity) {
+ return aGlodaIdentity.value;
+ },
+ function expectExtract_identity_address(aAddress) {
+ return aAddress;
+ },
+];
+
+function expectExtract_default_toString(aThing) {
+ return aThing.toString();
+}
+
+/**
+ * @see queryExpect for info on what we do.
+ */
+class QueryExpectationListener {
+ constructor(
+ aExpectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ aCallerStackFrame
+ ) {
+ this.expectedSet = aExpectedSet;
+ this.glodaExtractor = aGlodaExtractor;
+ this.orderVerifier = aOrderVerifier;
+ this.completed = false;
+ this.callerStackFrame = aCallerStackFrame;
+ // Track our current 'index' in the results for the (optional) order verifier,
+ // but also so we can provide slightly more useful debug output.
+ this.nextIndex = 0;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ onItemsAdded(aItems, aCollection) {
+ log.debug("QueryExpectationListener onItemsAdded received.");
+ for (let item of aItems) {
+ let glodaStringRep;
+ try {
+ glodaStringRep = this.glodaExtractor(item);
+ } catch (ex) {
+ this._reject(
+ new Error(
+ "Gloda extractor threw during query expectation.\n" +
+ "Item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // Make sure we were expecting this guy.
+ if (glodaStringRep in this.expectedSet) {
+ delete this.expectedSet[glodaStringRep];
+ } else {
+ this._reject(
+ new Error(
+ "Query returned unexpected result!\n" +
+ "Item:\n" +
+ item +
+ "\nExpected set:\n" +
+ this.expectedSet +
+ "\nCaller:\n" +
+ this.callerStackFrame
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ if (this.orderVerifier) {
+ try {
+ this.orderVerifier(this.nextIndex, item, aCollection);
+ } catch (ex) {
+ // If the order was wrong, we could probably go for an output of what
+ // we actually got...
+ dump("Order Problem detected. Dump of data:\n");
+ for (let [iThing, thing] of aItems.entries()) {
+ dump(
+ iThing +
+ ": " +
+ thing +
+ (aCollection.stashedColumns
+ ? ". " + aCollection.stashedColumns[thing.id].join(", ")
+ : "") +
+ "\n"
+ );
+ }
+ this._reject(ex);
+ return; // We don't have to continue for more checks.
+ }
+ }
+ this.nextIndex++;
+
+ // Make sure the query's test method agrees with the database about this.
+ if (!aCollection.query.test(item)) {
+ this._reject(
+ new Error(
+ "Query test returned false when it should have been true on.\n" +
+ "Extracted:\n" +
+ glodaStringRep +
+ "\nItem:\n" +
+ item
+ )
+ );
+ }
+ }
+ }
+ onItemsModified(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsModified received. Nothing done."
+ );
+ }
+ onItemsRemoved(aItems, aCollection) {
+ log.debug(
+ "QueryExpectationListener onItemsRemoved received. Nothing done."
+ );
+ }
+ onQueryCompleted(aCollection) {
+ log.debug("QueryExpectationListener onQueryCompleted received.");
+ // We may continue to match newly added items if we leave our query as it
+ // is, so let's become explicit to avoid related troubles.
+ aCollection.becomeExplicit();
+
+ // `expectedSet` should now be empty.
+ for (let key in this.expectedSet) {
+ let value = this.expectedSet[key];
+ this._reject(
+ new Error(
+ "Query should have returned:\n" +
+ key +
+ " (" +
+ value +
+ ").\n" +
+ "But " +
+ this.nextIndex +
+ " was seen."
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+
+ // If no error is thrown then we're fine here.
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
+
+/**
+ * Execute the given query, verifying that the result set contains exactly the
+ * contents of the expected set; no more, no less. Since we expect that the
+ * query will result in gloda objects, but your expectations will not be posed
+ * in terms of gloda objects (though they could be), we rely on extractor
+ * functions to take the gloda result objects and the expected result objects
+ * into the same string.
+ * If you don't provide extractor functions, we will use our defaults (based on
+ * the query noun type) if available, or assume that calling toString is
+ * sufficient.
+ *
+ * @param aQuery Either a query to execute, or a dict with the following keys:
+ * - queryFunc: The function to call that returns a function.
+ * - queryThis: The 'this' to use for the invocation of queryFunc.
+ * - args: A list (possibly empty) or arguments to precede the traditional
+ * arguments to query.getCollection.
+ * - nounId: The (numeric) noun id of the noun type expected to be returned.
+ * @param aExpectedSet The list of expected results from the query where each
+ * item is suitable for extraction using aExpectedExtractor. We have a soft
+ * spot for SyntheticMessageSets and automatically unbox them.
+ * @param aGlodaExtractor The extractor function to take an instance of the
+ * gloda representation and return a string for comparison/equivalence
+ * against that returned by the expected extractor (against the input
+ * instance in aExpectedSet.) The value returned must be unique for all
+ * of the expected gloda representations of the expected set. If omitted,
+ * the default extractor for the gloda noun type is used. If no default
+ * extractor exists, toString is called on the item.
+ * @param aExpectedExtractor The extractor function to take an instance from the
+ * values in the aExpectedSet and return a string for comparison/equivalence
+ * against that returned by the gloda extractor. The value returned must
+ * be unique for all of the values in the expected set. If omitted, the
+ * default extractor for the presumed input type based on the gloda noun
+ * type used for the query is used, failing over to toString.
+ * @param aOrderVerifier Optional function to verify the order the results are
+ * received in. Function signature should be of the form (aZeroBasedIndex,
+ * aItem, aCollectionResultIsFor).
+ */
+async function queryExpect(
+ aQuery,
+ aExpectedSet,
+ aGlodaExtractor,
+ aExpectedExtractor,
+ aOrderVerifier
+) {
+ if (aQuery.test) {
+ aQuery = {
+ queryFunc: aQuery.getCollection,
+ queryThis: aQuery,
+ args: [],
+ nounId: aQuery._nounDef.id,
+ };
+ }
+
+ if ("synMessages" in aExpectedSet) {
+ aExpectedSet = aExpectedSet.synMessages;
+ }
+
+ // - set extractor functions to defaults if omitted
+ if (aGlodaExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aGlodaExtractor = _defaultExpectationExtractors[aQuery.nounId][0];
+ } else {
+ aGlodaExtractor = expectExtract_default_toString;
+ }
+ }
+ if (aExpectedExtractor == null) {
+ if (_defaultExpectationExtractors[aQuery.nounId] !== undefined) {
+ aExpectedExtractor = _defaultExpectationExtractors[aQuery.nounId][1];
+ } else {
+ aExpectedExtractor = expectExtract_default_toString;
+ }
+ }
+
+ // - build the expected set
+ let expectedSet = {};
+ for (let item of aExpectedSet) {
+ try {
+ expectedSet[aExpectedExtractor(item)] = item;
+ } catch (ex) {
+ throw new Error(
+ "Expected extractor threw during query expectation for item:\n" +
+ item +
+ "\nException:\n" +
+ ex
+ );
+ }
+ }
+
+ // - create the listener...
+ let listener = new QueryExpectationListener(
+ expectedSet,
+ aGlodaExtractor,
+ aOrderVerifier,
+ Components.stack.caller
+ );
+ aQuery.args.push(listener);
+ let queryValue = aQuery.queryFunc.apply(aQuery.queryThis, aQuery.args);
+ // Wait for the QueryListener to finish.
+ await listener.promise;
+ return queryValue;
+}
+
+/**
+ * Asynchronously run a SQL statement against the gloda database. This can grow
+ * binding logic and data returning as needed.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlRun(sql) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createAsyncStatement(sql);
+ let rows = null;
+
+ let promiseResolve;
+ let promiseReject;
+ let promise = new Promise((resolve, reject) => {
+ promiseResolve = resolve;
+ promiseReject = reject;
+ });
+ // Running SQL.
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ if (!rows) {
+ rows = [];
+ }
+ let row;
+ while ((row = aResultSet.getNextRow())) {
+ rows.push(row);
+ }
+ },
+ handleError(aError) {
+ promiseReject(
+ new Error("SQL error!\nResult:\n" + aError + "\nSQL:\n" + sql)
+ );
+ },
+ handleCompletion() {
+ promiseResolve(rows);
+ },
+ });
+ stmt.finalize();
+ return promise;
+}
+
+/**
+ * Run an (async) SQL statement against the gloda database. The statement
+ * should be a SELECT COUNT; we check the count against aExpectedCount.
+ * Any additional arguments are positionally bound to the statement.
+ *
+ * We run the statement asynchronously to get a consistent view of the database.
+ */
+async function sqlExpectCount(aExpectedCount, aSQLString, ...params) {
+ let conn = GlodaDatastore.asyncConnection;
+ let stmt = conn.createStatement(aSQLString);
+
+ for (let iArg = 0; iArg < params.length; iArg++) {
+ GlodaDatastore._bindVariant(stmt, iArg, params[iArg]);
+ }
+
+ let desc = [aSQLString, ...params];
+ // Running SQL count.
+ let listener = new SqlExpectationListener(
+ aExpectedCount,
+ desc,
+ Components.stack.caller
+ );
+ stmt.executeAsync(listener);
+ // We don't need the statement anymore.
+ stmt.finalize();
+
+ await listener.promise;
+}
+
+class SqlExpectationListener {
+ constructor(aExpectedCount, aDesc, aCallerStackFrame) {
+ this.actualCount = null;
+ this.expectedCount = aExpectedCount;
+ this.sqlDesc = aDesc;
+ this.callerStackFrame = aCallerStackFrame;
+
+ this._promise = new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ });
+ }
+ handleResult(aResultSet) {
+ let row = aResultSet.getNextRow();
+ if (!row) {
+ this._reject(
+ new Error(
+ "No result row returned from caller:\n" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this.actualCount = row.getInt64(0);
+ }
+
+ handleError(aError) {
+ this._reject(
+ new Error(
+ "SQL error from caller:\n" +
+ this.callerStackFrame +
+ "\nResult:\n" +
+ aError +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ }
+
+ handleCompletion(aReason) {
+ if (this.actualCount != this.expectedCount) {
+ this._reject(
+ new Error(
+ "Actual count of " +
+ this.actualCount +
+ "does not match expected count of:\n" +
+ this.expectedCount +
+ "\nFrom caller:" +
+ this.callerStackFrame +
+ "\nSQL:\n" +
+ this.sqlDesc
+ )
+ );
+ return; // We don't have to continue for more checks.
+ }
+ this._resolve();
+ }
+
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
new file mode 100644
index 0000000000..a4c092400b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelper.jsm
@@ -0,0 +1,847 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file provides gloda testing infrastructure.
+ *
+ * A few words about how tests should expect to interact with indexing:
+ *
+ * By default, we enable only event-driven indexing with an infinite work queue
+ * length. This means that all messages will be queued for indexing as they
+ * are added or modified. You should await to |waitForGlodaIndexer| to wait
+ * until the indexer completes. If you want to assert that certain messages
+ * will have been indexed during that pass, you can pass them as arguments to
+ * |assertExpectedMessagesIndexed|.
+ * There is no need to tell us to expect the messages to be indexed prior to the
+ * waiting as long as nothing spins the event loop after you perform the action
+ * that triggers indexing. None of our existing xpcshell tests do this, but it
+ * is part of the mozmill idiom for its waiting mechanism, so be sure to not
+ * perform a mozmill wait without first telling us to expect the messages.
+ */
+
+const EXPORTED_SYMBOLS = [
+ "assertExpectedMessagesIndexed",
+ "glodaTestHelperInitialize",
+ "nukeGlodaCachesAndCollections",
+ "prepareIndexerForTesting",
+ "waitForGlodaIndexer",
+];
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.testHelper",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+var indexMessageState;
+
+/**
+ * Create a 'me' identity of "me@localhost" for the benefit of Gloda. At the
+ * time of this writing, Gloda only initializes Gloda.myIdentities and
+ * Gloda.myContact at startup with no event-driven updates. As such, this
+ * function needs to be called prior to gloda startup.
+ */
+function createMeIdentity() {
+ let identity = MailServices.accounts.createIdentity;
+ identity.email = "me@localhost";
+ identity.fullName = "Me";
+}
+// And run it now.
+createMeIdentity();
+
+// Set the gloda prefs.
+// "yes" to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// "no" to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+var ENVIRON_MAPPINGS = [
+ {
+ envVar: "GLODA_DATASTORE_EXPLAIN_TO_PATH",
+ prefName: "mailnews.database.global.datastore.explainToPath",
+ },
+];
+
+// Propagate environment variables to prefs as appropriate:
+for (let { envVar, prefName } of ENVIRON_MAPPINGS) {
+ if (Services.env.exists(envVar)) {
+ Services.prefs.setCharPref(prefName, Services.env.get(envVar));
+ }
+}
+
+/**
+ * Side note:
+ * Keep them in the global scope so that a Cu.forceGC() call won't purge them.
+ */
+var collectionListener;
+
+/**
+ * Registers MessageInjection listeners and Gloda listeners for our tests.
+ *
+ * @param {MessageInjection} messageInjection Instance of MessageInjection
+ * to register Events to.
+ */
+function glodaTestHelperInitialize(messageInjection) {
+ // Initialize the message state if we are dealing with messages. At some
+ // point we probably want to just completely generalize the indexing state.
+ // That point is likely when our testing infrastructure needs the support
+ // provided by `indexMessageState` for things other than messages.
+ indexMessageState = new IndexMessageState();
+
+ collectionListener = new GlodaCollectionListener();
+ new TestAttributeProvider();
+ new MsgsClassifiedListener();
+
+ // Add a hook that makes folders not filthy when we first see them.
+ messageInjection.registerMessageInjectionListener({
+ /**
+ * By default all folders start out filthy. This is great in the real world
+ * but I went and wrote all the unit tests without entirely thinking about
+ * how this affected said unit tests. So we add a listener so that we can
+ * force the folders to be clean.
+ * This is okay and safe because messageInjection always creates the folders
+ * without any messages in them.
+ */
+ onRealFolderCreated(aRealFolder) {
+ log.debug(
+ `onRealFolderCreated through MessageInjection received. ` +
+ `Make folder: ${aRealFolder.name} clean for Gloda.`
+ );
+ let glodaFolder = Gloda.getFolderForFolder(aRealFolder);
+ glodaFolder._downgradeDirtyStatus(glodaFolder.kFolderClean);
+ },
+
+ /**
+ * Make waitForGlodaIndexer know that it should wait for a msgsClassified
+ * event whenever messages have been injected, at least if event-driven
+ * indexing is enabled.
+ */
+ onInjectingMessages() {
+ log.debug(
+ "onInjectingMessages through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+
+ /**
+ * This basically translates to "we are triggering an IMAP move" and has
+ * the ramification that we should expect a msgsClassified event because
+ * the destination will see the header get added at some point.
+ */
+ onMovingMessagesWithoutDestHeaders() {
+ log.debug(
+ "onMovingMessagesWithoutDestHeaders through MessageInjection received. Pushing to intrestestingEvents."
+ );
+ indexMessageState.interestingEvents.push("msgsClassified");
+ },
+ });
+ log.debug("glodaTestHelperInitialize finished.");
+}
+
+class IndexMessageState {
+ data = new GlodaIndexerData();
+
+ constructor() {
+ prepareIndexerForTesting();
+ // Continue the preparing by assigning the hook recover and hook cleanup.
+ GlodaIndexer._unitTestHookRecover = this._testHookRecover;
+ GlodaIndexer._unitTestHookCleanup = this._testHookCleanup;
+ }
+
+ resetData() {
+ this.data = new GlodaIndexerData();
+ }
+
+ // The synthetic message sets passed in to |assertExpectedMessagesIndexed|.
+ synMessageSets = [];
+ // The user-specified accumulate-style verification function.
+ verifier() {
+ return this.data.data.verifier;
+ }
+ // Should we augment the synthetic sets with gloda message info?
+ augmentSynSets() {
+ return this.data.data.augment;
+ }
+ deletionSynSets() {
+ return this.data.data.deleted;
+ }
+
+ // Expected value of |_workerRecoveredCount| at assertion time.
+ expectedWorkerRecoveredCount() {
+ return this.data.data.recovered;
+ }
+ // Expected value of |_workerFailedToRecoverCount| at assertion time.
+ expectedFailedToRecoverCount() {
+ return this.data.data.failedToRecover;
+ }
+ // Expected value of |_workerCleanedUpCount| at assertion time.
+ expectedCleanedUpCount() {
+ return this.data.data.cleanedUp;
+ }
+ // Expected value of |_workerHadNoCleanUpCount| at assertion time.
+ expectedHadNoCleanUpCount() {
+ return this.data.data.hadNoCleanUp;
+ }
+ /**
+ * The number of messages that were fully (re)indexed using
+ * Gloda.grokNounItem.
+ */
+ _numFullIndexed = 0;
+ // Expected value of |_numFullIndexed| at assertion time.
+ expectedNumFullIndexed() {
+ return this.data.data.fullyIndexed;
+ }
+
+ // The number of times a worker had a recover helper and it recovered.
+ _workerRecoveredCount = 0;
+ // The number of times a worker had a recover helper and it did not recover.
+ _workerFailedToRecoverCount = 0;
+ // The number of times a worker had a cleanup helper and it cleaned up.
+ _workerCleanedUpCount = 0;
+ // The number of times a worker had no cleanup helper but there was a cleanup.
+ _workerHadNoCleanUpCount = 0;
+
+ /**
+ * Beware this scoping for this class is lost where _testHookRecover is used.
+ *
+ * @param aRecoverResult
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookRecover(aRecoverResult, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer recovery hook fired" +
+ "\nrecover result:\n" +
+ aRecoverResult +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle:\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aRecoverResult) {
+ indexMessageState._workerRecoveredCount++;
+ } else {
+ indexMessageState._workerFailedToRecoverCount++;
+ }
+ }
+
+ /**
+ * Beware this scoping for this class is lost where _testHookCleanup is used.
+ *
+ * @param aHadCleanupFunc
+ * @param aOriginEx
+ * @param aActiveJob
+ * @param aCallbackHandle
+ */
+ _testHookCleanup(aHadCleanupFunc, aOriginEx, aActiveJob, aCallbackHandle) {
+ log.debug(
+ "indexer cleanup hook fired" +
+ "\nhad cleanup?\n" +
+ aHadCleanupFunc +
+ "\noriginating exception:\n" +
+ aOriginEx +
+ "\nactive job:\n" +
+ aActiveJob +
+ "\ncallbackHandle\n" +
+ indexMessageState._jsonifyCallbackHandleState(aCallbackHandle)
+ );
+ if (aHadCleanupFunc) {
+ indexMessageState._workerCleanedUpCount++;
+ } else {
+ indexMessageState._workerHadNoCleanUpCount++;
+ }
+ }
+ _jsonifyCallbackHandleState(aCallbackHandle) {
+ return {
+ _stringRep: aCallbackHandle.activeStack.length + " active generators",
+ activeStackLength: aCallbackHandle.activeStack.length,
+ contextStack: aCallbackHandle.contextStack,
+ };
+ }
+
+ /**
+ * The gloda messages indexed since the last call to |waitForGlodaIndexer|.
+ */
+ _glodaMessagesByMessageId = [];
+ _glodaDeletionsByMessageId = [];
+
+ _numItemsAdded = 0;
+
+ applyGlodaIndexerData(data) {
+ this.data.applyData(data);
+ }
+
+ /**
+ * A list of events that we need to see before we allow ourselves to perform
+ * the indexer check. For example, if "msgsClassified" is in here, it means
+ * that whether the indexer is active or not is irrelevant until we have
+ * seen that msgsClassified event.
+ */
+ interestingEvents = [];
+}
+
+function prepareIndexerForTesting() {
+ if (!GlodaIndexer.enabled) {
+ throw new Error(
+ "The gloda indexer is somehow not enabled. This is problematic."
+ );
+ }
+ // Make the indexer be more verbose about indexing for us.
+ GlodaIndexer._unitTestSuperVerbose = true;
+ GlodaMsgIndexer._unitTestSuperVerbose = true;
+ // Lobotomize the adaptive indexer.
+ // The indexer doesn't need to worry about load; zero his rescheduling time.
+ GlodaIndexer._INDEX_INTERVAL = 0;
+ // The indexer already registered for the idle service; we must remove this
+ // or "idle" notifications will still get sent via the observer mechanism.
+ let realIdleService = GlodaIndexer._idleService;
+ realIdleService.removeIdleObserver(
+ GlodaIndexer,
+ GlodaIndexer._indexIdleThresholdSecs
+ );
+ // Pretend we are always idle.
+ GlodaIndexer._idleService = {
+ idleTime: 1000,
+ addIdleObserver() {
+ // There is no actual need to register with the idle observer, and if
+ // we do, the stupid "idle" notification will trigger commits.
+ },
+ removeIdleObserver() {},
+ };
+ // We want the event-driven indexer to always handle indexing and never spill
+ // to an indexing sweep unless a test intentionally does so.
+ GlodaIndexer._indexMaxEventQueueMessages = 10000;
+ // Lobotomize the adaptive indexer's constants.
+ GlodaIndexer._cpuTargetIndexTime = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_ACTIVE = 10000000;
+ GlodaIndexer._CPU_TARGET_INDEX_TIME_IDLE = 10000000;
+ GlodaIndexer._CPU_IS_BUSY_TIME = 10000000;
+ GlodaIndexer._PAUSE_LATE_IS_BUSY_TIME = 10000000;
+
+ delete GlodaIndexer._indexTokens;
+ GlodaIndexer.__defineGetter__("_indexTokens", function () {
+ return GlodaIndexer._CPU_MAX_TOKENS_PER_BATCH;
+ });
+ GlodaIndexer.__defineSetter__("_indexTokens", function () {});
+
+ // This includes making commits only happen when we the unit tests explicitly
+ // tell them to.
+ GlodaIndexer._MINIMUM_COMMIT_TIME = 10000000;
+ GlodaIndexer._MAXIMUM_COMMIT_TIME = 10000000;
+}
+
+class GlodaIndexerData {
+ data = {
+ verifier: null,
+ augment: false,
+ deleted: [],
+ fullyIndexed: null,
+
+ // Things should not be recovering or failing and cleaning up unless the test
+ // is expecting it.
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ };
+
+ /**
+ * Applies data shallow.
+ * Only the first level of keys are applied and replaced complete
+ * if given via param data. No deep merge.
+ *
+ * @param {*} data
+ */
+ applyData(data) {
+ this.data = {
+ ...this.data,
+ ...data,
+ };
+ }
+}
+
+/**
+ * Note that if the indexer is not currently active we assume it has already
+ * completed; we do not entertain the possibility that it has not yet started.
+ * Since the indexer is 'active' as soon as it sees an event, this does mean
+ * that you need to wait to make sure the indexing event has happened before
+ * calling us. This is reasonable.
+ */
+async function waitForGlodaIndexer() {
+ let eventsPending = TestUtils.waitForCondition(() => {
+ if (indexMessageState.interestingEvents.length > 1) {
+ // Events still pending. See msgClassified event and
+ // messageInjection.registerMessageInjectionListener.
+ return false;
+ }
+ // Events finished.
+ return true;
+ });
+ let indexerRunning = TestUtils.waitForCondition(() => {
+ if (GlodaIndexer.indexing) {
+ // Still indexing.
+ return false;
+ }
+ // Indexing finished.
+ return true;
+ });
+
+ log.debug(
+ "waitForGlodaIndexer waiting for intrestingEvents and GlodaIndexer.indexing."
+ );
+
+ // If we are waiting on certain events to occur first, block on those.
+ await Promise.all([eventsPending, indexerRunning]);
+}
+
+/**
+ * Each time a msgClassified Event is fired and it is present
+ * in IndexMessageState.interestingEvents it will be removed.
+ */
+class MsgsClassifiedListener {
+ /**
+ * Events pending for the tests.
+ * (we want this to happen after gloda registers its own listener, and it
+ * does.)
+ */
+ constructor() {
+ MailServices.mfn.addListener(
+ this,
+ Ci.nsIMsgFolderNotificationService.msgsClassified
+ );
+ }
+ /**
+ * If this was an expected interesting event, remove it from the list.
+ * If an event happens that we did not expect, it does not matter. We know
+ * this because we add events we care about to interestingEvents before they
+ * can possibly be fired.
+ */
+ msgsClassified(aMsgHdrs, aJunkClassified, aTraitClassified) {
+ log.debug("MsgsClassifiedListener msgsClassified received.");
+ let idx = indexMessageState.interestingEvents.indexOf("msgsClassified");
+ if (idx != -1) {
+ log.debug("Remove intrestingEvent through msgsClassified.");
+ // Remove the interesting Event as we received it here.
+ indexMessageState.interestingEvents.splice(idx, 1);
+ }
+ }
+}
+
+/**
+ * This AttributeProvider helps us testing Gloda.
+ * With the `process` method the Collections will be noticed
+ * through listeners.
+ * (onItemsAdded, onItemsModified, onItemsRemoved, onQueryComplete)
+ */
+class TestAttributeProvider {
+ providerName = "glodaTestHelper:fakeProvider";
+ constructor() {
+ // Register us with gloda as an attribute provider so that we can
+ // distinguish between fully reindexed messages and fastpath indexed
+ // messages.
+ Gloda._attrProviderOrderByNoun[GlodaConstants.NOUN_MESSAGE].push({
+ providerName: this.providerName,
+ process: this.process,
+ });
+ }
+ /**
+ * Fake attribute provider processing function so we can distinguish
+ * between fully reindexed messages and fast-path modified messages.
+ * Process has to be invoked for the GlodaCollectionListener
+ */
+ *process(aItem, aRawReps, aIsConceptuallyNew, aCallbackHandle) {
+ indexMessageState._numFullIndexed++;
+
+ yield GlodaConstants.kWorkDone;
+ }
+}
+
+/**
+ * This class tracks a GlodaCollection (created by Gloda._wildcardCollection).
+ * The listeners for this collection which will notify our IndexMessageState
+ * are defined here.
+ */
+class GlodaCollectionListener {
+ // Our catch-all message collection that nets us all messages passing by.
+ catchAllCollection = null;
+ constructor() {
+ this.catchAllCollection = Gloda._wildcardCollection(
+ GlodaConstants.NOUN_MESSAGE
+ );
+ this.catchAllCollection.listener = this;
+ }
+ /*
+ * Our catch-all collection listener. Any time a new message gets indexed,
+ * we should receive an onItemsAdded call. Any time an existing message
+ * gets reindexed, we should receive an onItemsModified call. Any time an
+ * existing message actually gets purged from the system, we should receive
+ * an onItemsRemoved call.
+ */
+ onItemsAdded(aItems) {
+ log.debug("GlodaCollectionListener onItemsAdded received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item.folderMessage +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+
+ // Simulate some other activity clearing out the the current folder's
+ // cached database, which used to kill the indexer's enumerator.
+ if (++indexMessageState._numItemsAdded == 3) {
+ log.debug("GlodaCollectionListener simulate other activity.");
+ GlodaMsgIndexer._indexingFolder.msgDatabase = null;
+ }
+ }
+
+ onItemsModified(aItems) {
+ log.debug("GlodaCollectionListener onItemsModified received.");
+ for (let item of aItems) {
+ if (item.headerMessageID in indexMessageState._glodaMessagesByMessageId) {
+ throw new Error(
+ "Gloda message" +
+ item +
+ "already indexed once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaMessagesByMessageId."
+ );
+ indexMessageState._glodaMessagesByMessageId[item.headerMessageID] = item;
+ }
+ }
+
+ onItemsRemoved(aItems) {
+ log.debug("GlodaCollectionListener onItemsRemoved received.");
+ for (let item of aItems) {
+ if (
+ item.headerMessageID in indexMessageState._glodaDeletionsByMessageId
+ ) {
+ throw new Error(
+ "Gloda message " +
+ item +
+ "already deleted once since the last waitForGlodaIndexer call!"
+ );
+ }
+ log.debug(
+ "GlodaCollectionListener save item to indexMessageState._glodaDeletionsByMessageId."
+ );
+ indexMessageState._glodaDeletionsByMessageId[item.headerMessageID] = item;
+ }
+ }
+ onQueryComplete(aCollection) {
+ log.debug(
+ "GlodaCollectionListener onQueryComplete received. Nothing done."
+ );
+ }
+}
+
+/**
+ * Assert that the set of messages indexed is exactly the set passed in.
+ * If a verification function is provided, use it on a per-message basis
+ * to make sure the resulting gloda message looks like it should given the
+ * synthetic message.
+ *
+ * Throws Errors if something is not according and returns always [true, string]
+ * for `Assert.ok` in your tests. This ensures proper testing output.
+ *
+ * @param {SyntheticMessage[]} aSynMessageSets A list of SyntheticMessageSets
+ * containing exactly the messages we should expect to see.
+ * @param [aConfig.verifier] The function to call to verify that the indexing
+ * had the desired result. Takes arguments aSynthMessage (the synthetic
+ * message just indexed), aGlodaMessage (the gloda message representation of
+ * the indexed message), and aPreviousResult (the value last returned by the
+ * verifier function for this given set of messages, or undefined if it is
+ * the first message.)
+ * @param [aConfig.augment=false] Should we augment the synthetic message sets
+ * with references to their corresponding gloda messages? The messages
+ * will show up in a 'glodaMessages' list on the syn set.
+ * @param {SyntheticMessageSet[]} [aConfig.deleted] A list of SyntheticMessageSets
+ * containing messages that should be recognized as deleted by the gloda
+ * indexer in this pass.
+ * @param [aConfig.fullyIndexed] A count of the number of messages we expect
+ * to observe being fully indexed. This is relevant because in the case
+ * of message moves, gloda may generate an onItemsModified notification but
+ * not reindex the message. This attribute allows the tests to distinguish
+ * between the two cases.
+ * @returns {[true, string]}
+ */
+function assertExpectedMessagesIndexed(aSynMessageSets, aConfig) {
+ indexMessageState.synMessageSets = aSynMessageSets;
+
+ indexMessageState.applyGlodaIndexerData(aConfig);
+
+ // Check that we have a gloda message for every syn message and verify.
+ for (let msgSet of indexMessageState.synMessageSets) {
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages = [];
+ }
+ for (let [iSynMsg, synMsg] of msgSet.synMessages.entries()) {
+ if (!(synMsg.messageId in indexMessageState._glodaMessagesByMessageId)) {
+ let msgHdr = msgSet.getMsgHdr(iSynMsg);
+ throw new Error(
+ "Header " +
+ msgHdr.messageId +
+ " in folder: " +
+ (msgHdr ? msgHdr.folder.name : "no header?") +
+ " should have been indexed."
+ );
+ }
+
+ let glodaMsg =
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId];
+ if (indexMessageState.augmentSynSets()) {
+ msgSet.glodaMessages.push(glodaMsg);
+ }
+
+ indexMessageState._glodaMessagesByMessageId[synMsg.messageId] = null;
+
+ let verifier = indexMessageState.verifier();
+ let previousValue = undefined;
+ if (verifier) {
+ try {
+ // Looking if a previous value have been present.
+ previousValue = verifier(synMsg, glodaMsg, previousValue);
+ } catch (ex) {
+ throw new Error(
+ "Verification failure: " +
+ synMsg +
+ " is not close enough to " +
+ glodaMsg +
+ "; basing this on exception: " +
+ ex
+ );
+ }
+ }
+ }
+ }
+
+ // Check that we don't have any extra gloda messages. (lacking syn msgs)
+ for (let messageId in indexMessageState._glodaMessagesByMessageId) {
+ let glodaMsg = indexMessageState._glodaMessagesByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message:\n" +
+ glodaMsg +
+ "\nShould not have been indexed.\n" +
+ "Source header:\n" +
+ glodaMsg.folderMessage
+ );
+ }
+ }
+
+ if (indexMessageState.deletionSynSets()) {
+ for (let msgSet of indexMessageState.deletionSynSets()) {
+ for (let synMsg of msgSet.synMessages) {
+ if (
+ !(synMsg.messageId in indexMessageState._glodaDeletionsByMessageId)
+ ) {
+ throw new Error(
+ "Synthetic message " + synMsg + " did not get deleted!"
+ );
+ }
+
+ indexMessageState._glodaDeletionsByMessageId[synMsg.messageId] = null;
+ }
+ }
+ }
+
+ // Check that we don't have unexpected deletions.
+ for (let messageId in indexMessageState._glodaDeletionsByMessageId) {
+ let glodaMsg = indexMessageState._glodaDeletionsByMessageId[messageId];
+ if (glodaMsg != null) {
+ throw new Error(
+ "Gloda message with message id " +
+ messageId +
+ " was " +
+ "unexpectedly deleted!"
+ );
+ }
+ }
+
+ if (
+ indexMessageState.expectedWorkerRecoveredCount() != null &&
+ indexMessageState.expectedWorkerRecoveredCount() !=
+ indexMessageState._workerRecoveredCount
+ ) {
+ throw new Error(
+ "Expected worker-recovered count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedWorkerRecoveredCount() +
+ "\nActual:\n" +
+ indexMessageState._workerRecoveredCount
+ );
+ }
+ if (
+ indexMessageState.expectedFailedToRecoverCount() != null &&
+ indexMessageState.expectedFailedToRecoverCount() !=
+ indexMessageState._workerFailedToRecoverCount
+ ) {
+ throw new Error(
+ "Expected worker-failed-to-recover count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedFailedToRecoverCount() +
+ "\nActual:\n" +
+ indexMessageState._workerFailedToRecoverCount
+ );
+ }
+ if (
+ indexMessageState.expectedCleanedUpCount() != null &&
+ indexMessageState.expectedCleanedUpCount() !=
+ indexMessageState._workerCleanedUpCount
+ ) {
+ throw new Error(
+ "Expected worker-cleaned-up count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedCleanedUpCount() +
+ "\nActual:\n" +
+ indexMessageState._workerCleanedUpCount
+ );
+ }
+ if (
+ indexMessageState.expectedHadNoCleanUpCount() != null &&
+ indexMessageState.expectedHadNoCleanUpCount() !=
+ indexMessageState._workerHadNoCleanUpCount
+ ) {
+ throw new Error(
+ "Expected worker-had-no-cleanup count did not match actual!\n" +
+ "Expected:\n" +
+ indexMessageState.expectedHadNoCleanUpCount() +
+ "\nActual\n" +
+ indexMessageState._workerHadNoCleanUpCount
+ );
+ }
+
+ if (
+ indexMessageState.expectedNumFullIndexed() != null &&
+ indexMessageState.expectedNumFullIndexed() !=
+ indexMessageState._numFullIndexed
+ ) {
+ throw new Error(
+ "Expected number of fully indexed messages did not match.\n" +
+ "Expected:\n" +
+ indexMessageState.expectedNumFullIndexed() +
+ "\nActual:\n" +
+ indexMessageState._numFullIndexed
+ );
+ }
+
+ // Cleanup of internal tracking values in the IndexMessageState
+ // for new tests.
+ resetIndexMessageState();
+
+ // If no error has been thrown till here were fine!
+ // Return values for Assert.ok.
+ // Using like Assert.ok(...assertExpectedMessagesIndexed()).
+ return [true, "Expected messages were indexed."];
+}
+
+/**
+ * Resets the IndexMessageState
+ *
+ * @TODO more docs
+ */
+function resetIndexMessageState() {
+ indexMessageState.synMessageSets = [];
+ indexMessageState._glodaMessagesByMessageId = [];
+ indexMessageState._glodaDeletionsByMessageId = [];
+
+ indexMessageState._workerRecoveredCount = 0;
+ indexMessageState._workerFailedToRecoverCount = 0;
+ indexMessageState._workerCleanedUpCount = 0;
+ indexMessageState._workerHadNoCleanUpCount = 0;
+
+ indexMessageState._numFullIndexed = 0;
+ indexMessageState.resetData();
+}
+
+/**
+ * Wipe out almost everything from the clutches of the GlodaCollectionManager.
+ * By default, it is caching things and knows about all the non-GC'ed
+ * collections. Tests may want to ensure that their data is loaded from disk
+ * rather than relying on the cache, and so, we exist.
+ * The exception to everything is that Gloda's concept of myContact and
+ * myIdentities needs to have its collections still be reachable or invariants
+ * are in danger of being "de-invarianted".
+ * The other exception to everything are any catch-all-collections used by our
+ * testing/indexing process. We don't scan for them, we just hard-code their
+ * addition if they exist.
+ */
+function nukeGlodaCachesAndCollections() {
+ // Explode if the GlodaCollectionManager somehow doesn't work like we think it
+ // should. (I am reluctant to put this logic in there, especially because
+ // knowledge of the Gloda contact/identity collections simply can't be known
+ // by the colleciton manager.)
+ if (
+ GlodaCollectionManager._collectionsByNoun === undefined ||
+ GlodaCollectionManager._cachesByNoun === undefined
+ ) {
+ // We don't check the Gloda contact/identities things because they might not
+ // get initialized if there are no identities, which is the case for our
+ // unit tests right now...
+ throw new Error(
+ "Try and remember to update the testing infrastructure when you " +
+ "change things!"
+ );
+ }
+
+ // We can just blow away the known collections.
+ GlodaCollectionManager._collectionsByNoun = {};
+ // But then we have to put the myContact / myIdentities junk back.
+ if (Gloda._myContactCollection) {
+ GlodaCollectionManager.registerCollection(Gloda._myContactCollection);
+ GlodaCollectionManager.registerCollection(Gloda._myIdentitiesCollection);
+ }
+ // Don't forget our testing catch-all collection.
+ if (collectionListener.catchAllCollection) {
+ // Empty it out in case it has anything in it.
+ collectionListener.catchAllCollection.clear();
+ // And now we can register it.
+ GlodaCollectionManager.registerCollection(
+ collectionListener.catchAllCollection
+ );
+ }
+
+ // Caches aren't intended to be cleared, but we also don't want to lose our
+ // caches, so we need to create new ones from the ashes of the old ones.
+ let oldCaches = GlodaCollectionManager._cachesByNoun;
+ GlodaCollectionManager._cachesByNoun = {};
+ for (let nounId in oldCaches) {
+ let cache = oldCaches[nounId];
+ GlodaCollectionManager.defineCache(cache._nounDef, cache._maxCacheSize);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
new file mode 100644
index 0000000000..f7a5199ba3
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/resources/GlodaTestHelperFunctions.jsm
@@ -0,0 +1,293 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const EXPORTED_SYMBOLS = [
+ "configureGlodaIndexing",
+ "waitForGlodaDBFlush",
+ "waitForIndexingHang",
+ "resumeFromSimulatedHang",
+ "permuteMessages",
+ "makeABCardForAddressPair",
+];
+
+/*
+ * This file provides gloda testing infrastructure functions which are not coupled
+ * with the IndexMessageState from GlodaTestHelper.jsm
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var { SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+
+var log = console.createInstance({
+ prefix: "gloda.helperFunctions",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.loglevel",
+});
+
+/**
+ * Resume execution when the db has run all the async statements whose execution
+ * was queued prior to this call. We trigger a commit to accomplish this,
+ * although this could also be accomplished without a commit. (Though we would
+ * have to reach into GlodaDatastore.jsm and get at the raw connection or extend
+ * datastore to provide a way to accomplish this.)
+ */
+async function waitForGlodaDBFlush() {
+ // We already have a mechanism to do this by forcing a commit. Arguably,
+ // it would be better to use a mechanism that does not induce an fsync.
+ var savedDepth = GlodaDatastore._transactionDepth;
+ if (!savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ GlodaDatastore.runPostCommit(promiseResolve);
+ // We don't actually need to run things to zero. We can just wait for the
+ // outer transaction to close itself.
+ GlodaDatastore._commitTransaction();
+ if (savedDepth) {
+ GlodaDatastore._beginTransaction();
+ }
+ await promise;
+}
+
+/**
+ * An injected fault exception.
+ */
+function InjectedFault(aWhy) {
+ this.message = aWhy;
+}
+InjectedFault.prototype = {
+ toString() {
+ return "[InjectedFault: " + this.message + "]";
+ },
+};
+
+function _inject_failure_on_MsgHdrToMimeMessage() {
+ throw new InjectedFault("MsgHdrToMimeMessage");
+}
+
+let hangResolve;
+let hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+});
+
+function _simulate_hang_on_MsgHdrToMimeMessage(...aArgs) {
+ hangResolve([MsgHdrToMimeMessage, null, aArgs]);
+}
+
+/**
+ * If you have configured gloda to hang while indexing, this is the thing
+ * you wait on to make sure the indexer actually gets to the point where it
+ * hangs.
+ */
+async function waitForIndexingHang() {
+ await hangPromise;
+}
+
+/**
+ * Configure gloda indexing. For most settings, the settings get clobbered by
+ * the next time this method is called. Omitted settings reset to the defaults.
+ * However, anything labeled as a 'sticky' setting stays that way until
+ * explicitly changed.
+ *
+ * @param {boolean} [aArgs.event=true] Should event-driven indexing be enabled
+ * (true) or disabled (false)? Right now, this actually suppresses
+ * indexing... the semantics will be ironed out as-needed.
+ * @param [aArgs.hangWhile] Must be either omitted (for don't force a hang) or
+ * "streaming" indicating that we should do a no-op instead of performing
+ * the message streaming. This will manifest as a hang until
+ * |resumeFromSimulatedHang| is invoked or the test explicitly causes the
+ * indexer to abort (in which case you do not need to call the resume
+ * function.) You must omit injectFaultIn if you use hangWhile.
+ * @param [aArgs.injectFaultIn=null] Must be omitted (for don't inject a
+ * failure) or "streaming" indicating that we should inject a failure when
+ * the message indexer attempts to stream a message. The fault will be an
+ * appropriate exception. You must omit hangWhile if you use injectFaultIn.
+ */
+function configureGlodaIndexing(aArgs) {
+ let shouldSuppress = "event" in aArgs ? !aArgs.event : false;
+ if (shouldSuppress != GlodaIndexer.suppressIndexing) {
+ log.debug(`Setting suppress indexing to ${shouldSuppress}.`);
+ GlodaIndexer.suppressIndexing = shouldSuppress;
+ }
+
+ if ("hangWhile" in aArgs) {
+ log.debug(`Enabling hang injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.hangWhile) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _simulate_hang_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.hangWhile + " is not a legal choice for hangWhile"
+ );
+ }
+ } else if ("injectFaultIn" in aArgs) {
+ log.debug(`Enabling fault injection in ${aArgs.hangWhile}.`);
+ switch (aArgs.injectFaultIn) {
+ case "streaming":
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc =
+ _inject_failure_on_MsgHdrToMimeMessage;
+ break;
+ default:
+ throw new Error(
+ aArgs.injectFaultIn + " is not a legal choice for injectFaultIn"
+ );
+ }
+ } else {
+ if (GlodaMsgIndexer._MsgHdrToMimeMessageFunc != MsgHdrToMimeMessage) {
+ log.debug("Clearing hang/fault injection.");
+ }
+ GlodaMsgIndexer._MsgHdrToMimeMessageFunc = MsgHdrToMimeMessage;
+ }
+}
+
+/**
+ * Call this to resume from the hang induced by configuring the indexer with
+ * a "hangWhile" argument to |configureGlodaIndexing|.
+ *
+ * @param [aJustResumeExecution=false] Should we just poke the callback driver
+ * for the indexer rather than continuing the call. You would likely want
+ * to do this if you committed a lot of violence while in the simulated
+ * hang and proper resumption would throw exceptions all over the place.
+ * (For example; if you hang before streaming and destroy the message
+ * header while suspended, resuming the attempt to stream will throw.)
+ */
+async function resumeFromSimulatedHang(aJustResumeExecution) {
+ if (aJustResumeExecution) {
+ log.debug("Resuming from simulated hang with direct wrapper callback.");
+ GlodaIndexer._wrapCallbackDriver();
+ } else {
+ let [func, dis, args] = await hangPromise;
+ log.debug(`Resuming from simulated hang with call to: ${func.name}.`);
+ func.apply(dis, args);
+ }
+ // Reset the promise for the hang.
+ hangPromise = new Promise(resolve => {
+ hangResolve = resolve;
+ });
+}
+
+/**
+ * Prepares permutations for messages with aScenarioMaker. Be sure to wait for the indexer
+ * for every permutation and verify the result.
+ *
+ * This process is executed once for each possible permutation of observation
+ * of the synthetic messages. (Well, we cap it; brute-force test your logic
+ * on your own time; you should really only be feeding us minimal scenarios.)
+ *
+ * @param aScenarioMaker A function that, when called, will generate a series
+ * of SyntheticMessage instances. Each call to this method should generate
+ * a new set of conceptually equivalent, but not identical, messages. This
+ * allows us to process without having to reset our state back to nothing each
+ * time. (This is more to try and make sure we run the system with a 'dirty'
+ * state than a bid for efficiency.)
+ * @param {MessageInjection} messageInjection An instance to use for permuting
+ * the messages and creating folders.
+ *
+ * @returns {[async () => SyntheticMessageSet]} Await it sequentially with a for...of loop.
+ * Wait for each element for the Indexer and assert afterwards.
+ */
+async function permuteMessages(aScenarioMaker, messageInjection) {
+ let folder = await messageInjection.makeEmptyFolder();
+
+ // To calculate the permutations, we need to actually see what gets produced.
+ let scenarioMessages = aScenarioMaker();
+ let numPermutations = Math.min(factorial(scenarioMessages.length), 32);
+
+ let permutations = [];
+ for (let iPermutation = 0; iPermutation < numPermutations; iPermutation++) {
+ permutations.push(async () => {
+ log.debug(`Run permutation: ${iPermutation + 1} / ${numPermutations}`);
+ // If this is not the first time through, we need to create a new set.
+ if (iPermutation) {
+ scenarioMessages = aScenarioMaker();
+ }
+ scenarioMessages = permute(scenarioMessages, iPermutation);
+ let scenarioSet = new SyntheticMessageSet(scenarioMessages);
+ await messageInjection.addSetsToFolders([folder], [scenarioSet]);
+ return scenarioSet;
+ });
+ }
+ return permutations;
+}
+
+/**
+ * A simple factorial function used to calculate the number of permutations
+ * possible for a given set of messages.
+ */
+function factorial(i, rv) {
+ if (i <= 1) {
+ return rv || 1;
+ }
+ return factorial(i - 1, (rv || 1) * i); // tail-call capable
+}
+
+/**
+ * Permute an array given a 'permutation id' that is an integer that fully
+ * characterizes the permutation through the decisions that need to be made
+ * at each step.
+ *
+ * @param aArray Source array that is destructively processed.
+ * @param aPermutationId The permutation id. A permutation id of 0 results in
+ * the original array's sequence being maintained.
+ */
+function permute(aArray, aPermutationId) {
+ let out = [];
+ for (let i = aArray.length; i > 0; i--) {
+ let offset = aPermutationId % i;
+ out.push(aArray[offset]);
+ aArray.splice(offset, 1);
+ aPermutationId = Math.floor(aPermutationId / i);
+ }
+ return out;
+}
+
+/**
+ * Add a name-and-address pair as generated by `makeNameAndAddress` to the
+ * personal address book.
+ */
+function makeABCardForAddressPair(nameAndAddress) {
+ // XXX bug 314448 demands that we trigger creation of the ABs... If we don't
+ // do this, then the call to addCard will fail if someone else hasn't tickled
+ // this.
+ MailServices.ab.directories;
+
+ // kPABData is copied from abSetup.js
+ let kPABData = {
+ URI: "jsaddrbook://abook.sqlite",
+ };
+ let addressBook = MailServices.ab.getDirectory(kPABData.URI);
+
+ let card = Cc["@mozilla.org/addressbook/cardproperty;1"].createInstance(
+ Ci.nsIAbCard
+ );
+ card.displayName = nameAndAddress[0];
+ card.primaryEmail = nameAndAddress[1];
+
+ // Just save the new node straight away.
+ addressBook.addCard(card);
+
+ log.debug(`Adding address book card for: ${nameAndAddress}`);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
new file mode 100644
index 0000000000..ff186e871a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_corrupt_database.js
@@ -0,0 +1,86 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test does not use glodaTestHelper because:
+ * 1) We need to do things as part of the test without gloda having remotely
+ * thought about opening the database.
+ * 2) We expect and desire that the logger produce a warning and glodaTestHelper
+ * takes the view that warnings = death.
+ *
+ * We do use the rest of the test infrastructure though.
+ */
+
+// -- Do configure the gloda prefs though...
+// Yes to indexing.
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// No to a sweep we don't control.
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+// We'll start with this datastore ID, and make sure it gets overwritten
+// when the index is rebuilt.
+var kDatastoreIDPref = "mailnews.database.global.datastore.id";
+var kOriginalDatastoreID = "47e4bad6-fedc-4931-bf3f-d2f4146ac63e";
+Services.prefs.setCharPref(kDatastoreIDPref, kOriginalDatastoreID);
+
+/**
+ * Create an illegal=corrupt database and make sure that we log a message and
+ * still end up happy.
+ */
+add_task(function test_corrupt_databases_get_reported_and_blown_away() {
+ // - Get the file path.
+ let dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // - Protect dangerous people from themselves.
+ // (There should not be a database at this point; if there is one, we are
+ // not in the sandbox profile we expect. I wouldn't bother except we're
+ // going out of our way to write gibberish whereas gloda accidentally
+ // opening a valid database is bad but not horrible.)
+ if (dbFile.exists()) {
+ do_throw("There should not be a database at this point.");
+ }
+
+ // - Create the file.
+ dump("Creating gibberish file\n");
+ let ostream = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(
+ Ci.nsIFileOutputStream
+ );
+ ostream.init(dbFile, -1, -1, 0);
+ let fileContents = "I'm in ur database not being a database.\n";
+ ostream.write(fileContents, fileContents.length);
+ ostream.close();
+
+ // - Init gloda, get warnings.
+ dump("Init gloda\n");
+ var { Gloda } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaPublic.jsm"
+ );
+ dump("Gloda inited, checking\n");
+
+ // - Make sure the datastore has an actual database.
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ // Make sure that the datastoreID was overwritten
+ Assert.notEqual(Gloda.datastoreID, kOriginalDatastoreID);
+ // And for good measure, make sure that the pref was also overwritten
+ let currentDatastoreID = Services.prefs.getCharPref(kDatastoreIDPref);
+ Assert.notEqual(currentDatastoreID, kOriginalDatastoreID);
+ // We'll also ensure that the Gloda.datastoreID matches the one stashed
+ // in prefs...
+ Assert.equal(currentDatastoreID, Gloda.datastoreID);
+ // And finally, we'll make sure that the datastoreID is a string with length
+ // greater than 0.
+ Assert.equal(typeof Gloda.datastoreID, "string");
+ Assert.ok(Gloda.datastoreID.length > 0);
+
+ if (!GlodaDatastore.asyncConnection) {
+ do_throw("No database connection suggests no database!");
+ }
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_folder_logic.js b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
new file mode 100644
index 0000000000..6625258daa
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_folder_logic.js
@@ -0,0 +1,60 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the gloda folder logic.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Tests in this file assume that returned folders are nsIMsgFolders and not
+ // handles which currently only local injection supports.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Newly created folders should not be filthy (at least as long as they have
+ * nothing in them.)
+ */
+add_task(async function test_newly_created_folders_start_clean() {
+ let msgFolder = await messageInjection.makeEmptyFolder();
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderClean);
+});
+
+/**
+ * Deleted folders should not leave behind any mapping, and that mapping
+ * definitely should not interfere with a newly created folder of the same
+ * name.
+ */
+add_task(async function test_deleted_folder_tombstones_get_forgotten() {
+ let oldFolder = await messageInjection.makeEmptyFolder("volver");
+ let oldGlodaFolder = Gloda.getFolderForFolder(oldFolder);
+ messageInjection.deleteFolder(oldFolder);
+
+ // The tombstone needs to know it is deleted.
+ Assert.ok(oldGlodaFolder._deleted);
+
+ let newFolder = await messageInjection.makeEmptyFolder("volver");
+ let newGlodaFolder = Gloda.getFolderForFolder(newFolder);
+
+ // This folder better not be the same and better not think it is deleted.
+ Assert.notEqual(oldGlodaFolder, newGlodaFolder);
+ Assert.ok(!newGlodaFolder._deleted);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
new file mode 100644
index 0000000000..d938208c9b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_fts3_tokenizer.js
@@ -0,0 +1,299 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This test file recycles part of test_intl.js. What we do is insert into the
+ * fulltext index two messages:
+ * - one has tokens 'aa' and 'bbb',
+ * - one is from a previous test and has CJK characters in it.
+ *
+ * We want to test that the behavior of the tokenizer is as expected (namely,
+ * that it drops two-letter tokens unless they're CJK bigrams), and that
+ * GlodaMsgSearcher.jsm properly drops two-letter tokens (unless CJK) from the search
+ * terms to avoid issuing a query that will definitely return no results.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect, sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { GlodaFolder } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDataModel.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* ===== Tests ===== */
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ ],
+ },
+ // -- Regular case. Make sure two-letter tokens do not match, since the
+ // tokenizer is supposed to drop them. Also make sure that a three-letter
+ // token matches.
+ {
+ name: "Boring ASCII",
+ actual: "aa bbb",
+ encodings: {
+ "utf-8": ["=?utf-8?q?aa_bbb?=", "aa bbb"],
+ },
+ searchPhrases: [
+ { body: "aa", match: false },
+ { body: "bbb", match: true },
+ ],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_cjk() {
+ await indexPhrase(intlPhrases[0]);
+});
+
+add_task(async function test_index_regular() {
+ await indexPhrase(intlPhrases[1]);
+});
+
+/**
+ * - Check that the 'aa' token was never emitted (we don't emit two-letter
+ * tokens unless they're CJK).
+ * - Check that the '\u81ea\u52d5' token was emitted, because it's CJK.
+ * - Check that the 'bbb' token was duly emitted (three letters is more than two
+ * letters so it's tokenized).
+ */
+add_task(async function test_token_count() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+ await sqlExpectCount(
+ 0,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'aa'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH 'bbb'"
+ );
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM messagesText where messagesText MATCH '\u81ea\u52d5'"
+ );
+});
+
+add_task(async function test_fulltextsearch_cjk() {
+ await test_fulltextsearch(intlPhrases[0]);
+});
+
+add_task(async function test_fulltextsearch_regular() {
+ await test_fulltextsearch(intlPhrases[1]);
+});
+
+/**
+ * We make sure that the Gloda module that builds the query drops two-letter
+ * tokens, otherwise this would result in an empty search (no matches for
+ * two-letter tokens).
+ */
+add_task(async function test_query_builder() {
+ // aa should be dropped, and we have one message containing the bbb token.
+ await msgSearchExpectCount(1, "aa bbb");
+ // The CJK part should not be dropped, and match message 1; the bbb token
+ // should not be dropped, and match message 2; 0 results returned because no
+ // message has the two tokens in it.
+ await msgSearchExpectCount(0, "\u81ea\u52d5 bbb");
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |test_fulltextsearch|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("Using character set:\n" + charset + "\nActual:\n" + actual + "\n");
+ dump("Subject:\n" + subject + "\nSubject length:\n" + subject.length + "\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name:" +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function test_fulltextsearch(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
+
+/**
+ * Pass a query string to the GlodaMsgSearcher, run the corresponding SQL query,
+ * and check the resulted count is what we want.
+ *
+ * Use like so:
+ * await msgSearchExpectCount(1, "I like cheese");
+ */
+async function msgSearchExpectCount(aCount, aFulltextStr) {
+ // Let the GlodaMsgSearcher build its query
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ let conn = GlodaDatastore.asyncConnection;
+ let query = searcher.buildFulltextQuery();
+
+ // Brace yourself, brutal monkey-patching NOW
+ let sql, args;
+ let oldFunc = GlodaDatastore._queryFromSQLString;
+ GlodaDatastore._queryFromSQLString = function (aSql, aArgs) {
+ sql = aSql;
+ args = aArgs;
+ };
+ query.getCollection();
+ GlodaDatastore._queryFromSQLString = oldFunc;
+
+ // Bind the parameters
+ let stmt = conn.createStatement(sql);
+ for (let [iBinding, bindingValue] of args.entries()) {
+ GlodaDatastore._bindVariant(stmt, iBinding, bindingValue);
+ }
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ let i = 0;
+ stmt.executeAsync({
+ handleResult(aResultSet) {
+ for (
+ let row = aResultSet.getNextRow();
+ row;
+ row = aResultSet.getNextRow()
+ ) {
+ i++;
+ }
+ },
+
+ handleError(aError) {
+ do_throw(new Error("Error: " + aError.message));
+ },
+
+ handleCompletion(aReason) {
+ if (aReason != Ci.mozIStorageStatementCallback.REASON_FINISHED) {
+ do_throw(new Error("Query canceled or aborted!"));
+ }
+
+ if (i != aCount) {
+ throw new Error(
+ "Didn't get the expected number of rows: got " +
+ i +
+ " expected " +
+ aCount +
+ " SQL: " +
+ sql
+ );
+ }
+ promiseResolve();
+ },
+ });
+ stmt.finalize();
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
new file mode 100644
index 0000000000..3c59de4233
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_imap_offline.js
@@ -0,0 +1,34 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for IMAP messages that are offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
new file mode 100644
index 0000000000..f02a6750b4
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_gloda_content_local.js
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests the operation of the GlodaContent (in GlodaContent.jsm) and its exposure
+ * via Gloda.getMessageContent for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_gloda_content.js */
+load("base_gloda_content.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_gloda_content_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
new file mode 100644
index 0000000000..9d0b0d4103
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_addressbook.js
@@ -0,0 +1,139 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Check that events update identity._hasAddressBookCard correctly.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaCollectionManager } = ChromeUtils.import(
+ "resource:///modules/gloda/Collection.jsm"
+);
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var EMAIL_ADDRESS = "all.over@the.world.invalid";
+var DISPLAY_NAME = "every day";
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Create an e-mail so the identity can exist.
+ */
+add_setup(async function () {
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, from: [DISPLAY_NAME, EMAIL_ADDRESS] }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Okay, but it knows it has no card because indexing thinks stuff.
+ // So let's flush all caches and create a query that just knows about the
+ // identity.
+ nukeGlodaCachesAndCollections();
+
+ let identQuery = Gloda.newQuery(GlodaConstants.NOUN_IDENTITY);
+ identQuery.kind("email");
+ identQuery.value(EMAIL_ADDRESS);
+ await queryExpect(identQuery, [EMAIL_ADDRESS]);
+
+ // Now the identity exists. Make sure it is in cache.
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.notEqual(identity, null);
+
+ // And make sure it has no idea what the current state of the card is.
+ if (identity._hasAddressBookCard !== undefined) {
+ do_throw(
+ "We should have no idea about the state of the ab card, but " +
+ "it's: " +
+ identity._hasAddressBookCard
+ );
+ }
+});
+
+/**
+ * Add a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+/**
+ * Remove the card we added in setup, make sure we update the cached identity
+ * ab card state.
+ */
+add_task(function test_remove_card_cache_indication() {
+ delete_card(EMAIL_ADDRESS);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, false);
+});
+
+/**
+ * Add again a card for that e-mail, make sure we update the cached identity ab
+ * card state.
+ */
+add_task(function test_add_card_cache_indication() {
+ add_card(EMAIL_ADDRESS, DISPLAY_NAME);
+
+ let identity = get_cached_gloda_identity_for_email(EMAIL_ADDRESS);
+ Assert.equal(identity._hasAddressBookCard, true);
+});
+
+function add_card(aEmailAddress, aDisplayName) {
+ Cc["@mozilla.org/addressbook/services/addressCollector;1"]
+ .getService(Ci.nsIAbAddressCollector)
+ .collectSingleAddress(aEmailAddress, aDisplayName, true, true);
+}
+
+function get_card_for_email(aEmailAddress) {
+ for (let book of MailServices.ab.directories) {
+ let card = book.cardForEmailAddress(aEmailAddress);
+ if (card) {
+ return [book, card];
+ }
+ }
+ return [null, null];
+}
+
+function delete_card(aEmailAddress) {
+ let [book, card] = get_card_for_email(aEmailAddress);
+
+ MailServices.ab.getDirectory(book.URI).deleteCards([card]);
+}
+
+function get_cached_gloda_identity_for_email(aEmailAddress) {
+ return GlodaCollectionManager.cacheLookupOneByUniqueValue(
+ GlodaConstants.NOUN_IDENTITY,
+ "email@" + aEmailAddress.toLowerCase()
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
new file mode 100644
index 0000000000..5920ac981e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_bad_messages.js
@@ -0,0 +1,210 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that we fail on bad messages by marking the messages as bad rather than
+ * exploding or something bad like that.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_BAD_MESSAGE_ID = 2;
+
+var illegalMessageTemplates = [
+ // -- Authors
+ {
+ name: "no author",
+ clobberHeaders: {
+ From: "",
+ },
+ },
+ {
+ name: "too many authors (> 1)",
+ clobberHeaders: {
+ From: "Tweedle Dee <dee@example.com>, Tweedle Dum <dum@example.com>",
+ },
+ },
+];
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_illegal_message_no_author() {
+ await illegal_message(illegalMessageTemplates[0]);
+});
+add_task(async function test_illegal_message_too_many_authors() {
+ await illegal_message(illegalMessageTemplates[1]);
+});
+
+/**
+ * A byzantine failure to stream should not sink us. Fake a failure.
+ */
+add_task(async function test_streaming_failure() {
+ configureGlodaIndexing({ injectFaultIn: "streaming" });
+
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1 }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+
+ configureGlodaIndexing({});
+});
+
+/**
+ * If we have one bad message followed by a good message, the good message
+ * should still get indexed. Additionally, if we do a sweep on the folder,
+ * we should not attempt to index the message again.
+ */
+add_task(async function test_recovery_and_no_second_attempts() {
+ let [, goodSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: { From: "" } }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([goodSet], { recovered: 1 }));
+
+ // Index the folder; no messages should get indexed and there should be no
+ // failure things.
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 0,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+});
+
+/**
+ * Make sure that we attempt to reindex a dirty bad message and that when we
+ * fail that we clear the dirty bit.
+ */
+add_task(async function test_reindex_on_dirty_clear_dirty_on_fail() {
+ // Inject a new illegal message
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [
+ {
+ count: 1,
+ clobberHeaders: illegalMessageTemplates[0].clobberHeaders,
+ },
+ ]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Mark the message dirty, force the folder to be indexed.
+ let msgHdr = msgSet.getMsgHdr(0);
+ msgHdr.setUint32Property("gloda-dirty", 1);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+
+ // Check again with filthy.
+ msgHdr.setUint32Property("gloda-dirty", 2);
+ GlodaMsgIndexer.indexFolder(messageInjection.getInboxFolder());
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+ // Now the message should be clean.
+ Assert.equal(msgHdr.getUint32Property("gloda-dirty"), 0);
+});
+
+/**
+ * Using exciting templates from |illegalMessageTemplates|, verify that gloda
+ * fails to index them and marks the messages bad.
+ */
+async function illegal_message(aInfo) {
+ // Inject the messages.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [messageInjection.getInboxFolder()],
+ [{ count: 1, clobberHeaders: aInfo.clobberHeaders }]
+ );
+
+ // Indexing should complete without actually indexing the message.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([], {
+ recovered: 1,
+ failedToRecover: 0,
+ cleanedUp: 0,
+ hadNoCleanUp: 0,
+ })
+ );
+
+ // Make sure the header has the expected gloda bad message state.
+ let msgHdr = msgSet.getMsgHdr(0);
+ Assert.equal(msgHdr.getUint32Property("gloda-id"), GLODA_BAD_MESSAGE_ID);
+
+ // Make sure gloda does not think the message is indexed.
+ Assert.equal(Gloda.isMessageIndexed(msgHdr), false);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_compaction.js b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
new file mode 100644
index 0000000000..7b6923ab61
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_compaction.js
@@ -0,0 +1,395 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that gloda does the right things in terms of compaction. Major cases:
+ *
+ * - Compaction occurs while we are in the process of indexing a folder. We
+ * want to make sure we stop indexing cleanly
+ *
+ * - A folder that we have already indexed gets compacted. We want to make sure
+ * that we update the message keys for all involved. This means verifying
+ * that both the on-disk representations and in-memory representations are
+ * correct.
+ *
+ * - Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the
+ * compaction. (Simulating the user quitting before all compactions have
+ * been processed.)
+ *
+ * - Moves/deletes that happen after a compaction but before we process the
+ * compaction generate a special type of edge case that we need to check.
+ *
+ * There is also a less interesting case:
+ *
+ * - Make sure that the indexer does not try and start indexing a folder that is
+ * in the process of being compacted.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var {
+ configureGlodaIndexing,
+ resumeFromSimulatedHang,
+ waitForGlodaDBFlush,
+ waitForIndexingHang,
+} = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { PromiseTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/PromiseTestUtils.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ /*
+ * All the rest of the gloda tests (should) work with maildir, but this test
+ * only works/makes sense with mbox, so force it to always use mbox. This
+ * allows developers to manually change the default to maildir and have the
+ * gloda tests run with that.
+ */
+ Services.prefs.setCharPref(
+ "mail.serverDefaultStoreContractID",
+ "@mozilla.org/msgstore/berkeleystore;1"
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function compaction_indexing_pass_none_pending_commit() {
+ await compaction_indexing_pass({
+ name: "none pending commit",
+ forceCommit: true,
+ });
+});
+add_task(async function compaction_indexing_pass_all_pending_commit() {
+ await compaction_indexing_pass({
+ name: "all pending commit",
+ forceCommit: false,
+ });
+});
+
+/**
+ * Make sure that an indexing sweep performs a compaction pass if we kill the
+ * compaction job automatically scheduled by the conclusion of the compaction.
+ * (Simulating the user quitting before all compactions have been processed.)
+ */
+add_task(async function test_sweep_performs_compaction() {
+ let [[folder], moveSet, staySet] = await messageInjection.makeFoldersWithSets(
+ 1,
+ [{ count: 1 }, { count: 1 }]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet, staySet], { augment: true })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ // Disable event-driven indexing so there is no way the compaction job can
+ // get worked.
+ configureGlodaIndexing({ event: false });
+
+ // Compact.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // Make sure the folder is marked compacted.
+ let glodaFolder = Gloda.getFolderForFolder(msgFolder);
+ Assert.ok(glodaFolder.compacted);
+
+ // Re-enable indexing and fire up an indexing pass.
+ configureGlodaIndexing({ event: true });
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Make sure the compaction happened.
+ verify_message_keys(staySet);
+});
+
+/**
+ * Make sure that if we compact a folder then move messages out of it and/or
+ * delete messages from it before its compaction pass happens that the
+ * compaction pass properly marks the messages deleted.
+ */
+add_task(
+ async function test_moves_and_deletions_on_compacted_folder_edge_case() {
+ let [[folder], compactMoveSet, moveSet, delSet, staySet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed(
+ [compactMoveSet, moveSet, delSet, staySet],
+ {
+ augment: true,
+ }
+ )
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactMoveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactMoveSet]));
+
+ // Disable indexing because we don't want to process the compaction.
+ configureGlodaIndexing({ event: false });
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Erase the compaction job.
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Delete
+ // Because of the compaction, the PendingCommitTracker forgot that the message
+ // we are deleting got indexed; we will receive no event.
+ await MessageInjection.deleteMessages(delSet);
+
+ // - Move
+ // Same deal on the move, except that it will try and trigger event-based
+ // indexing in the target folder...
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ // Kill the event-based indexing job of the target; we want the indexing sweep
+ // to see it as a move.
+ dump("killing all indexing jobs\n");
+ GlodaIndexer.purgeJobsUsingFilter(() => true);
+
+ // - Indexing pass
+ // Re-enable indexing so we can do a sweep.
+ configureGlodaIndexing({ event: true });
+
+ // This will trigger compaction (per the previous unit test) which should mark
+ // moveSet and delSet as deleted. Then it should happen in to the next
+ // folder and add moveSet again...
+ dump("triggering indexing sweep\n");
+ GlodaMsgIndexer.indexingSweepNeeded = true;
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([moveSet], {
+ deleted: [moveSet, delSet],
+ })
+ );
+
+ // Sanity check the compaction for giggles.
+ verify_message_keys(staySet);
+ }
+);
+
+/**
+ * Induce a compaction while we are in the middle of indexing. Make sure we
+ * clean up and that the folder ends
+ *
+ * Note that in order for compaction to happen there has to be something for
+ * compaction to do, so our prep involves moving a message to another folder.
+ * (Deletion actually produces more legwork for gloda whereas a local move is
+ * almost entirely free.)
+ */
+add_task(async function test_compaction_interrupting_indexing() {
+ // Create a folder with a message inside.
+ let [[folder], compactionFodderSet] =
+ await messageInjection.makeFoldersWithSets(1, [{ count: 1 }]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Move that message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(compactionFodderSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([compactionFodderSet]));
+
+ // Configure the gloda indexer to hang while streaming the message.
+ configureGlodaIndexing({ hangWhile: "streaming" });
+
+ // Create a folder with a message inside.
+ let [msgSet] = await messageInjection.makeNewSetsInFolders(
+ [folder],
+ [{ count: 1 }]
+ );
+
+ await waitForIndexingHang();
+
+ // Compact! This should kill the job and because of the compaction; no other
+ // reason should be able to do this.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+
+ // Reset indexing to not hang.
+ configureGlodaIndexing({});
+
+ // Sorta get the event chain going again.
+ await resumeFromSimulatedHang(true);
+
+ // Because the folder was dirty it should actually end up getting indexed,
+ // so in the end the message will get indexed.
+ // Also, make sure a cleanup was observed.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { cleanedUp: 1 }));
+});
+
+/**
+ *
+ */
+add_task(async function test_do_not_enter_compacting_folders() {
+ // Turn off indexing.
+ configureGlodaIndexing({ event: false });
+
+ // Create a folder with a message inside.
+ let [[folder]] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ // Lie and claim we are compacting that folder.
+ let glodaFolder = Gloda.getFolderForFolder(
+ messageInjection.getRealInjectionFolder(folder)
+ );
+ glodaFolder.compacting = true;
+
+ // Now try and force ourselves to index that folder and its message.
+ // Turn back on indexing.
+ configureGlodaIndexing({ event: true });
+
+ // Verify that the indexer completes without having indexed anything.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Verify that the message keys match between the message headers and the
+ * (augmented on) gloda messages that correspond to the headers.
+ */
+function verify_message_keys(aSynSet) {
+ let iMsg = 0;
+ for (let msgHdr of aSynSet.msgHdrs()) {
+ let glodaMsg = aSynSet.glodaMessages[iMsg++];
+ if (msgHdr.messageKey != glodaMsg.messageKey) {
+ throw new Error(
+ "Message header " +
+ msgHdr +
+ " should have message key " +
+ msgHdr.messageKey +
+ " but has key " +
+ glodaMsg.messageKey +
+ " per gloda msg " +
+ glodaMsg
+ );
+ }
+ }
+ dump("verified message keys after compaction\n");
+}
+
+/**
+ * Compact a folder that we were not indexing. Make sure gloda's representations
+ * get updated to the new message keys.
+ *
+ * This is parameterized because the logic has special cases to deal with
+ * messages that were pending commit that got blown away.
+ */
+async function compaction_indexing_pass(aParam) {
+ // Create 5 messages. We will move just the third message so the first two
+ // message keep their keys and the last two change. (We want 2 for both
+ // cases to avoid edge cases.)
+ let [[folder], sameSet, moveSet, shiftSet] =
+ await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ { count: 1 },
+ { count: 2 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([sameSet, moveSet, shiftSet], {
+ augment: true,
+ })
+ );
+
+ // Move the message to another folder.
+ let otherFolder = await messageInjection.makeEmptyFolder();
+ await messageInjection.moveMessages(moveSet, otherFolder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([moveSet]));
+
+ if (aParam.forceCommit) {
+ await waitForGlodaDBFlush();
+ }
+
+ // Compact the folder.
+ let msgFolder = messageInjection.getRealInjectionFolder(folder);
+ dump(
+ "Triggering compaction " +
+ "Folder: " +
+ msgFolder.name +
+ " Gloda folder: " +
+ Gloda.getFolderForFolder(msgFolder) +
+ "\n"
+ );
+
+ let urlListener = new PromiseTestUtils.PromiseUrlListener();
+ msgFolder.compact(urlListener, null);
+ await urlListener.promise;
+ // Wait for the compaction job to complete.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ verify_message_keys(sameSet);
+ verify_message_keys(shiftSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
new file mode 100644
index 0000000000..0004373f7a
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_offline.js
@@ -0,0 +1,49 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for offline IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(function () {
+ // Set these preferences to stop the cache value "cachePDir" being fetched. This
+ // avoids errors on the javascript console, for which the test would otherwise fail.
+ // See bug 903402 for follow-up information.
+ Services.prefs.setComplexValue(
+ "browser.cache.disk.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ Services.prefs.setComplexValue(
+ "browser.cache.offline.parent_directory",
+ Ci.nsIFile,
+ do_get_profile()
+ );
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
new file mode 100644
index 0000000000..c144155799
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for online IMAP junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
new file mode 100644
index 0000000000..788b630d5b
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_junk_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test indexing support for local junk.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_index_junk.js */
+load("base_index_junk.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_junk_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
new file mode 100644
index 0000000000..a340122ef0
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_offline.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are offline from the start.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
new file mode 100644
index 0000000000..4977dd5521
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online.js
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..85031ec0ac
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_imap_online_to_offline.js
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Tests how well gloda indexes IMAP messages that are not offline at first, but
+ * are made offline later.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+var msgGen;
+var scenarios;
+var messageInjection;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
new file mode 100644
index 0000000000..5441a3062c
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_messages_local.js
@@ -0,0 +1,133 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test indexing support for local messages.
+ */
+
+var {
+ glodaTestHelperInitialize,
+ assertExpectedMessagesIndexed,
+ waitForGlodaIndexer,
+ messageInjection,
+ nukeGlodaCachesAndCollections,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator, MessageScenarioFactory } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/* import-globals-from base_index_messages.js */
+load("base_index_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ scenarios = new MessageScenarioFactory(msgGen);
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_reparse_of_local_folder_works() {
+ // Index a folder.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+
+ // Force a db flush so we do not have any outstanding references to the
+ // folder or its headers.
+ await waitForGlodaDBFlush();
+
+ // Mark the summary invalid.
+ folder.msgDatabase.summaryValid = false;
+ // Clear the database so next time we have to reparse.
+ folder.msgDatabase.forceClosed();
+
+ // Force gloda to re-parse the folder again.
+ GlodaMsgIndexer.indexFolder(folder);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+});
+
+/**
+ * Ensure that fromJSON for a non-singular attribute properly filters out
+ * "undefined" return values, specifically as it relates to tags. When the
+ * user removes them Gloda doesn't actually re-index the messages so the
+ * values will still be there when we next load the message.
+ *
+ * We directly monkey with the state of NounTag for no really good reason, but
+ * maybe it cuts down on disk I/O because we don't have to touch prefs.
+ */
+add_task(async function test_fromjson_of_removed_tag() {
+ // -- Inject
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet], { augment: true }));
+ let gmsg = msgSet.glodaMessages[0];
+
+ // -- Tag
+ let tag = TagNoun.getTag("$label4");
+ msgSet.addTag(tag.key);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+ Assert.equal(gmsg.tags.length, 1);
+ Assert.equal(gmsg.tags[0].key, tag.key);
+
+ // -- Forget about the tag, TagNoun!
+ delete TagNoun._tagMap[tag.key];
+ // This also means we have to replace the tag service with a liar.
+ let realTagService = TagNoun._msgTagService;
+ TagNoun._msgTagService = {
+ isValidKey() {
+ return false;
+ }, // Lies!
+ };
+
+ // -- Forget about the message, gloda!
+ let glodaId = gmsg.id;
+ nukeGlodaCachesAndCollections();
+
+ // -- Re-load the message.
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.id(glodaId);
+ let coll = await queryExpect(query, msgSet);
+
+ // -- Put the tag back in TagNoun before we check and possibly explode.
+ TagNoun._tagMap[tag.key] = tag;
+ TagNoun._msgTagService = realTagService;
+
+ // -- Verify the message apparently has no tags (despite no reindex).
+ gmsg = coll.items[0];
+ Assert.equal(gmsg.tags.length, 0);
+});
+
+/**
+ * Test that we are using hasOwnProperty or a properly guarding dict for
+ * NounTag so that if someone created a tag called "watch" and then deleted
+ * it, we don't end up exposing the watch function as the tag.
+ *
+ * Strictly speaking, this does not really belong here, but it's a matched set
+ * with the previous test.
+ */
+add_task(
+ function test_nountag_does_not_think_it_has_watch_tag_when_it_does_not() {
+ Assert.equal(TagNoun.fromJSON("watch"), undefined);
+ }
+);
+
+base_index_messages_tests.forEach(e => {
+ add_task(e);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
new file mode 100644
index 0000000000..c3f79f0c21
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_index_sweep_folder.js
@@ -0,0 +1,265 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file tests the folder indexing logic of Gloda._worker_folderIndex in
+ * the greater context of the sweep indexing mechanism in a whitebox fashion.
+ *
+ * Automated indexing is suppressed for the duration of this file.
+ *
+ * In order to test the phases of the logic we inject failures into
+ * GlodaIndexer._indexerGetEnumerator with a wrapper to control how far
+ * indexing gets. We also clobber or wrap other functions as needed.
+ */
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { configureGlodaIndexing } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlExpectCount } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+
+var { TestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TestUtils.sys.mjs"
+);
+
+/**
+ * We want to stop the GlodaMsgIndexer._indexerGetEnumerator after a
+ * set amount of folder indexing.
+ */
+const ENUMERATOR_SIGNAL_WORD = "STOP Me!";
+/**
+ * How many more enumerations before we should throw; 0 means don't throw.
+ */
+var stop_enumeration_after = 0;
+/**
+ * We hide the error in the promise chain. But we do have to know if it happens
+ * at another cycle.
+ */
+var error_is_thrown = false;
+/**
+ * Inject GlodaMsgIndexer._indexerGetEnumerator with our test indexerGetEnumerator.
+ */
+GlodaMsgIndexer._original_indexerGetEnumerator =
+ GlodaMsgIndexer._indexerGetEnumerator;
+/**
+ * Wrapper for GlodaMsgIndexer._indexerGetEnumerator to cause explosions.
+ */
+GlodaMsgIndexer._indexerGetEnumerator = function (...aArgs) {
+ if (stop_enumeration_after && !--stop_enumeration_after) {
+ error_is_thrown = true;
+ throw new Error(ENUMERATOR_SIGNAL_WORD);
+ }
+
+ return GlodaMsgIndexer._original_indexerGetEnumerator(...aArgs);
+};
+
+var messageInjection;
+
+add_setup(function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ // We do not want the event-driven indexer crimping our style.
+ configureGlodaIndexing({ event: false });
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * The value itself does not matter; it just needs to be present and be in a
+ * certain range for our logic testing.
+ */
+var arbitraryGlodaId = 4096;
+
+/**
+ * When we enter a filthy folder we should be marking all the messages as filthy
+ * that have gloda-id's and committing.
+ */
+add_task(async function test_propagate_filthy_from_folder_to_messages() {
+ // Mark the folder as filthy.
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 3 },
+ ]);
+ let glodaFolder = Gloda.getFolderForFolder(folder);
+ glodaFolder._dirtyStatus = glodaFolder.kFolderFilthy;
+
+ // Mark each header with a gloda-id so they can get marked filthy.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", arbitraryGlodaId);
+ }
+
+ // Force the database to see it as filthy so we can verify it changes.
+ glodaFolder._datastore.updateFolderDirtyStatus(glodaFolder);
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderFilthy
+ );
+
+ // Index the folder, aborting at the second get enumerator request.
+ stop_enumeration_after = 2;
+
+ await spin_folder_indexer(folder);
+
+ // The folder should only be dirty.
+ Assert.equal(glodaFolder.dirtyStatus, glodaFolder.kFolderDirty);
+ // Make sure the database sees it as dirty.
+ await sqlExpectCount(
+ 1,
+ "SELECT COUNT(*) FROM folderLocations WHERE id = ? " +
+ "AND dirtyStatus = ?",
+ glodaFolder.id,
+ glodaFolder.kFolderDirty
+ );
+
+ // The messages should be filthy per the headers.
+ // We force a commit of the database.
+ for (let msgHdr of msgSet.msgHdrs()) {
+ Assert.equal(
+ msgHdr.getUint32Property("gloda-dirty"),
+ GlodaMsgIndexer.kMessageFilthy
+ );
+ }
+});
+
+/**
+ * Make sure our counting pass and our indexing passes gets it right. We test
+ * with 0,1,2 messages matching.
+ */
+add_task(async function test_count_pass() {
+ let [[folder], msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 2 },
+ ]);
+
+ let hdrs = msgSet.msgHdrList;
+
+ // - (clean) messages with gloda-id's do not get indexed
+ // Nothing is indexed at this point, so all 2.
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 2);
+
+ // Pretend the first is indexed, leaving a count of 1.
+ hdrs[0].setUint32Property("gloda-id", arbitraryGlodaId);
+ error_is_thrown = false;
+ stop_enumeration_after = 2;
+ await spin_folder_indexer(folder, 1);
+
+ // Pretend both are indexed, count of 0.
+ hdrs[1].setUint32Property("gloda-id", arbitraryGlodaId);
+ // No explosion should happen since we should never get to the second
+ // enumerator.
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 0);
+
+ // - Dirty messages get indexed.
+ hdrs[0].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 1);
+
+ hdrs[1].setUint32Property("gloda-dirty", GlodaMsgIndexer.kMessageDirty);
+ stop_enumeration_after = 2;
+ error_is_thrown = false;
+ await spin_folder_indexer(folder, 2);
+});
+
+/**
+ * Create a folder indexing job for the given injection folder handle and
+ * run it until completion.
+ *
+ * The folder indexer will continue running on its own if we dont throw an Error in the
+ * GlodaMsgIndexer._indexerGetEnumerator
+ */
+async function spin_folder_indexer(aFolderHandle, aExpectedJobGoal) {
+ let msgFolder = messageInjection.getRealInjectionFolder(aFolderHandle);
+
+ // Cheat and use indexFolder to build the job for us.
+ GlodaMsgIndexer.indexFolder(msgFolder);
+ // Steal that job.
+ let job = GlodaIndexer._indexQueue.pop();
+ GlodaIndexer._indexingJobGoal--;
+
+ // Create the callbackHandle.
+ let callbackHandle = new CallbackHandle();
+ // Create the worker.
+ let worker = GlodaMsgIndexer._worker_folderIndex(job, callbackHandle);
+ try {
+ callbackHandle.pushAndGo(worker, null);
+ await Promise.race([
+ callbackHandle.promise,
+ TestUtils.waitForCondition(() => {
+ return error_is_thrown;
+ }),
+ ]);
+ } catch (ex) {
+ do_throw(ex);
+ }
+
+ if (aExpectedJobGoal !== undefined) {
+ Assert.equal(job.goal, aExpectedJobGoal);
+ }
+}
+
+/**
+ * Implements GlodaIndexer._callbackHandle's interface adapted to our async
+ * test driver. This allows us to run indexing workers directly in tests
+ * or support code.
+ *
+ * We do not do anything with the context stack or recovery. Use the actual
+ * indexer callback handler for that!
+ *
+ * Actually, we do very little at all right now. This will fill out as needs
+ * arise.
+ */
+class CallbackHandle {
+ constructor() {
+ this._promise = new Promise(resolve => {
+ this._resolve = resolve;
+ });
+ }
+
+ pushAndGo(aIterator, aContext) {
+ this.glodaWorkerAdapter(aIterator, this._resolve).catch(reason => {
+ if (!reason.message.match(ENUMERATOR_SIGNAL_WORD)) {
+ throw reason;
+ }
+ });
+ }
+
+ async glodaWorkerAdapter(aIter, resolve) {
+ while (!error_is_thrown) {
+ switch (aIter.next().value) {
+ case GlodaConstants.kWorkSync:
+ break;
+ case GlodaConstants.kWorkDone:
+ case GlodaConstants.kWorkDoneWithResult:
+ resolve();
+ return;
+ default:
+ break;
+ }
+ }
+ }
+ get promise() {
+ return this._promise;
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_intl.js b/comm/mailnews/db/gloda/test/unit/test_intl.js
new file mode 100644
index 0000000000..e6e9868189
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_intl.js
@@ -0,0 +1,355 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Sanity check our encoding transforms and make sure the mozporter tokenizer
+ * is resulting in the expected fulltext search results. Specifically:
+ * - Check that subject, body, and attachment names are properly indexed;
+ * previously we screwed up at least one of these in terms of handling
+ * encodings properly.
+ * - Check that we can fulltext search on those things afterwards.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/**
+ * To make the encoding pairs:
+ * - For the subject bit:
+ * import email
+ * h = email.Header.Header(charset=CHARSET)
+ * h.append(STRING)
+ * h.encode()
+ * - For the body bit
+ * s.encode(CHARSET)
+ */
+var intlPhrases = [
+ // -- CJK case
+ {
+ name: "CJK: Vending Machine",
+ actual: "\u81ea\u52d5\u552e\u8ca8\u6a5f",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?6Ieq5YuV5ZSu6LKo5qmf?=",
+ "\xe8\x87\xaa\xe5\x8b\x95\xe5\x94\xae\xe8\xb2\xa8\xe6\xa9\x9f",
+ ],
+ "euc-jp": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\xbc\xab\xc6\xb0\xd3\xb4\xb2\xdf\xb5\xa1",
+ ],
+ "shift-jis": [
+ "=?shift-jis?b?jqmTrppTid2LQA==?=",
+ "\x8e\xa9\x93\xae\x9aS\x89\xdd\x8b@",
+ ],
+ },
+ searchPhrases: [
+ // Match bi-gram driven matches starting from the front.
+ { body: '"\u81ea\u52d5"', match: true },
+ { body: '"\u81ea\u52d5\u552e"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ // Now match from the back (bi-gram based).
+ { body: '"\u52d5\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u552e\u8ca8\u6a5f"', match: true },
+ { body: '"\u8ca8\u6a5f"', match: true },
+ // Now everybody in the middle!
+ { body: '"\u52d5\u552e\u8ca8"', match: true },
+ { body: '"\u552e\u8ca8"', match: true },
+ { body: '"\u52d5\u552e"', match: true },
+ // -- Now match nobody!
+ // Nothing in common with the right answer.
+ { body: '"\u81eb\u52dc"', match: false },
+ // Too long, no match!
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5f\u6a5f"', match: false },
+ // Minor change at the end.
+ { body: '"\u81ea\u52d5\u552e\u8ca8\u6a5e"', match: false },
+ ],
+ },
+ // Use two words where the last character is a multi-byte sequence and one of
+ // them is the last word in the string. This helps test an off-by-one error
+ // in both the asymmetric case (query's last character is last character in
+ // the tokenized string but it is not the last character in the body string)
+ // and symmetric case (last character in the query and the body).
+ {
+ name: "Czech diacritics",
+ actual: "Slov\u00e1cko Moravsk\u00e9 rodin\u011b",
+ encodings: {
+ "utf-8": [
+ "=?utf-8?b?U2xvdsOhY2tvIE1vcmF2c2vDqSByb2RpbsSb?=",
+ "Slov\xc3\xa1cko Moravsk\xc3\xa9 rodin\xc4\x9b",
+ ],
+ },
+ searchPhrases: [
+ // -- Desired
+ // Match on exact for either word should work
+ { body: "Slov\u00e1cko", match: true },
+ { body: "Moravsk\u00e9", match: true },
+ { body: "rodin\u011b", match: true },
+ // The ASCII uppercase letters get case-folded
+ { body: "slov\u00e1cko", match: true },
+ { body: "moravsk\u00e9", match: true },
+ { body: "rODIN\u011b", match: true },
+ ],
+ },
+ // Ignore accent search!
+ {
+ name: "having accent: Paris",
+ actual: "Par\u00eds",
+ encodings: {
+ "utf-8": ["=?UTF-8?B?UGFyw61z?=", "Par\xc3\xads"],
+ },
+ searchPhrases: [{ body: "paris", match: true }],
+ },
+ // Case insensitive case for non-ASCII characters.
+ {
+ name: "Russian: new",
+ actual: "\u041d\u043e\u0432\u043e\u0435",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?0J3QvtCy0L7QtQ==?=",
+ "\xd0\x9d\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xb5",
+ ],
+ },
+ searchPhrases: [{ body: "\u043d\u043e\u0432\u043e\u0435", match: true }],
+ },
+ // Case-folding happens after decomposition.
+ {
+ name: "Awesome where A has a bar over it",
+ actual: "\u0100wesome",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=C4=80wesome?=", "\xc4\x80wesome"],
+ },
+ searchPhrases: [
+ { body: "\u0100wesome", match: true }, // Upper A-bar
+ { body: "\u0101wesome", match: true }, // Lower a-bar
+ { body: "Awesome", match: true }, // Upper A
+ { body: "awesome", match: true }, // Lower a
+ ],
+ },
+ // Deep decomposition happens and after that, case folding.
+ {
+ name: "Upper case upsilon with diaeresis and hook goes to small upsilon",
+ actual: "\u03d4esterday",
+ encodings: {
+ "utf-8": ["=?utf-8?q?=CF=94esterday?=", "\xcf\x94esterday"],
+ },
+ searchPhrases: [
+ { body: "\u03d4esterday", match: true }, // Y_: 03d4 => 03d2 (decomposed)
+ { body: "\u03d3esterday", match: true }, // Y_' 03d3 => 03d2 (decomposed)
+ { body: "\u03d2esterday", match: true }, // Y_ 03d2 => 03a5 (decomposed)
+ { body: "\u03a5esterday", match: true }, // Y 03a5 => 03c5 (lowercase)
+ { body: "\u03c5esterday", match: true }, // y 03c5 (final state)
+ ],
+ },
+ // Full-width alphabet.
+ // Even if search phrases are ASCII, it has to hit.
+ {
+ name: "Full-width Thunderbird",
+ actual:
+ "\uff34\uff48\uff55\uff4e\uff44\uff45\uff52\uff42\uff49\uff52\uff44",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?77y0772I772V772O772E772F772S772C772J772S772E?=",
+ "\xef\xbc\xb4\xef\xbd\x88\xef\xbd\x95\xef\xbd\x8e\xef\xbd\x84\xef\xbd\x85\xef\xbd\x92\xef\xbd\x82\xef\xbd\x89\xef\xbd\x92\xef\xbd\x84",
+ ],
+ },
+ searchPhrases: [
+ // Full-width lower.
+ {
+ body: "\uff34\uff28\uff35\uff2e\uff24\uff25\uff32\uff22\uff29\uff32\uff24",
+ match: true,
+ },
+ // Half-width.
+ { body: "Thunderbird", match: true },
+ ],
+ },
+ // Half-width Katakana with voiced sound mark.
+ // Even if search phrases are full-width, it has to hit.
+ {
+ name: "Half-width Katakana: Thunderbird (SANDAABAADO)",
+ actual: "\uff7b\uff9d\uff80\uff9e\uff70\uff8a\uff9e\uff70\uff84\uff9e",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?7727776d776A776e772w776K776e772w776E776e?=",
+ "\xef\xbd\xbb\xef\xbe\x9d\xef\xbe\x80\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x8a\xef\xbe\x9e\xef\xbd\xb0\xef\xbe\x84\xef\xbe\x9e",
+ ],
+ },
+ searchPhrases: [
+ { body: "\u30b5\u30f3\u30c0\u30fc\u30d0\u30fc\u30c9", match: true },
+ ],
+ },
+ // Thai: Would you like to see the movie?
+ {
+ name: "Thai: query movie word into Thai language content",
+ actual:
+ "\u0e04\u0e38\u0e13\u0e2d\u0e22\u0e32\u0e01\u0e44\u0e1b\u0e14\u0e39\u0e2b\u0e19\u0e31\u0e07",
+ encodings: {
+ "utf-8": [
+ "=?UTF-8?B?4LiE4Li44LiT4Lit4Lii4Liy4LiB4LmE4Lib4LiU4Li54Lir4LiZ4Lix4LiH?=",
+ "\xe0\xb8\x84\xe0\xb8\xb8\xe0\xb8\x93\xe0\xb8\xad\xe0\xb8\xa2\xe0\xb8\xb2\xe0\xb8\x81\xe0\xb9\x84\xe0\xb8\x9b\xe0\xb8\x94\xe0\xb8\xb9\xe0\xb8\xab\xe0\xb8\x99\xe0\xb8\xb1\xe0\xb8\x87",
+ ],
+ },
+ searchPhrases: [{ body: "\u0e2b\u0e19\u0e31\u0e07", match: true }],
+ },
+];
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ // Use mbox injection because the fake server chokes sometimes right now.
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_index_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await indexPhrase(phrase);
+ }
+});
+
+add_task(async function flush_db() {
+ // Force a db flush so I can investigate the database if I want.
+ await waitForGlodaDBFlush();
+});
+
+add_task(async function test_fulltextsearch_all_phrases() {
+ for (let phrase of intlPhrases) {
+ await fulltextsearchPhrase(phrase);
+ }
+});
+
+/**
+ * Names with encoded commas in them can screw up our mail address parsing if
+ * we perform the mime decoding prior to handing the mail address off for
+ * parsing.
+ */
+add_task(async function test_encoding_complications_with_mail_addresses() {
+ let basePair = msgGen.makeNameAndAddress();
+ // The =2C encodes a comma!
+ let encodedCommaPair = ["=?iso-8859-1?Q?=DFnake=2C_=DFammy?=", basePair[1]];
+ // "Snake, Sammy", but with a much cooler looking S-like character!
+ let decodedName = "\u00dfnake, \u00dfammy";
+ // Use the thing with the comma in it for all cases; previously there was an
+ // asymmetry between to and cc...
+ let smsg = msgGen.makeMessage({
+ from: encodedCommaPair,
+ to: [encodedCommaPair],
+ cc: [encodedCommaPair],
+ });
+ function verify_sammy_snake(unused, gmsg) {
+ Assert.equal(gmsg.from.contact.name, decodedName);
+ Assert.equal(gmsg.to.length, 1);
+ Assert.equal(gmsg.to[0].id, gmsg.from.id);
+ Assert.equal(gmsg.cc.length, 1);
+ Assert.equal(gmsg.cc[0].id, gmsg.from.id);
+ }
+
+ let synSet = new SyntheticMessageSet([smsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_sammy_snake })
+ );
+});
+
+/**
+ * For each phrase in the intlPhrases array (we are parameterized over it using
+ * parameterizeTest in the 'tests' declaration), create a message where the
+ * subject, body, and attachment name are populated using the encodings in
+ * the phrase's "encodings" attribute, one encoding per message. Make sure
+ * that the strings as exposed by the gloda representation are equal to the
+ * expected/actual value.
+ * Stash each created synthetic message in a resultList list on the phrase so
+ * that we can use them as expected query results in
+ * |fulltextsearchPhrase|.
+ */
+async function indexPhrase(aPhrase) {
+ // Create a synthetic message for each of the delightful encoding types.
+ let messages = [];
+ aPhrase.resultList = [];
+ for (let charset in aPhrase.encodings) {
+ let [quoted, bodyEncoded] = aPhrase.encodings[charset];
+
+ let smsg = msgGen.makeMessage({
+ subject: quoted,
+ body: { charset, encoding: "8bit", body: bodyEncoded },
+ attachments: [{ filename: quoted, body: "gabba gabba hey" }],
+ // Save off the actual value for checking.
+ callerData: [charset, aPhrase.actual],
+ });
+
+ messages.push(smsg);
+ aPhrase.resultList.push(smsg);
+ }
+ let synSet = new SyntheticMessageSet(messages);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([synSet], { verifier: verify_index })
+ );
+}
+
+/**
+ * Does the per-message verification for indexPhrase. Knows what is right for
+ * each message because of the callerData attribute on the synthetic message.
+ */
+function verify_index(smsg, gmsg) {
+ let [charset, actual] = smsg.callerData;
+ let subject = gmsg.subject;
+ let indexedBodyText = gmsg.indexedBodyText.trim();
+ let attachmentName = gmsg.attachmentNames[0];
+ dump("using character set: " + charset + " actual: " + actual + "\n");
+ dump("subject: " + subject + " (len: " + subject.length + ")\n");
+ Assert.equal(actual, subject);
+ dump("Body: " + indexedBodyText + " (len: " + indexedBodyText.length + ")\n");
+ Assert.equal(actual, indexedBodyText);
+ dump(
+ "Attachment name: " +
+ attachmentName +
+ " (len: " +
+ attachmentName.length +
+ ")\n"
+ );
+ Assert.equal(actual, attachmentName);
+}
+
+/**
+ * For each phrase, make sure that all of the searchPhrases either match or fail
+ * to match as appropriate.
+ */
+async function fulltextsearchPhrase(aPhrase) {
+ for (let searchPhrase of aPhrase.searchPhrases) {
+ let query = Gloda.newQuery(GlodaConstants.NOUN_MESSAGE);
+ query.bodyMatches(searchPhrase.body);
+ await queryExpect(query, searchPhrase.match ? aPhrase.resultList : []);
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_migration.js b/comm/mailnews/db/gloda/test/unit/test_migration.js
new file mode 100644
index 0000000000..f7e1bc334d
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_migration.js
@@ -0,0 +1,151 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test migration logic by artificially inducing or simulating the problem, then
+ * trigger the migration logic, then verify things ended up correct, including
+ * the schema version so a second pass of the logic doesn't happen. (As
+ * opposed to checking in an example of a broken database and running against
+ * that.)
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ nukeGlodaCachesAndCollections,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { waitForGlodaDBFlush, makeABCardForAddressPair } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { sqlRun } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgIndexer } = ChromeUtils.import(
+ "resource:///modules/gloda/IndexMsg.jsm"
+);
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+const GLODA_OLD_BAD_MESSAGE_ID = 1;
+
+var msgGen;
+var messageInjection;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Fix the fallout from bug 732372 (with this patch for bug 734507) which left
+ * identities whose e-mails were in the address book without contacts and then
+ * broke messages involving them.
+ */
+add_task(async function test_fix_missing_contacts_and_fallout() {
+ // -- Setup
+
+ // - Create 4 e-mail addresses, 2 of which are in the address book. (We want
+ // to make sure we have to iterate, hence >1).
+ let abPeeps = msgGen.makeNamesAndAddresses(2);
+ let nonAbPeeps = msgGen.makeNamesAndAddresses(2);
+ makeABCardForAddressPair(abPeeps[0]);
+ makeABCardForAddressPair(abPeeps[1]);
+
+ // - Create messages of the genres [from, to]: [inAB, inAB], [inAB, !inAB],
+ // [!inAB, inAB], [!inAB, !inAB]. The permutations are black box overkill.
+ // Smear the messages over multiple folders for realism.
+ let [, yesyesMsgSet, yesnoMsgSet, noyesMsgSet, nonoMsgSet] =
+ await messageInjection.makeFoldersWithSets(3, [
+ { count: 2, from: abPeeps[0], to: [abPeeps[1]] },
+ { count: 2, from: abPeeps[1], to: nonAbPeeps },
+ { count: 2, from: nonAbPeeps[0], to: abPeeps },
+ { count: 2, from: nonAbPeeps[1], to: [nonAbPeeps[0]] },
+ ]);
+
+ // Union the yeses together; we don't care about their composition.
+ let yesMsgSet = yesyesMsgSet.union(yesnoMsgSet).union(noyesMsgSet),
+ noMsgSet = nonoMsgSet;
+
+ // - Let gloda index the messages so the identities get created.
+ await waitForGlodaIndexer();
+ Assert.ok(
+ ...assertExpectedMessagesIndexed([yesMsgSet, noMsgSet], { augment: true })
+ );
+ // The messages are now indexed and the contacts created.
+
+ // - Compel an indexing sweep so the folder's dirty statuses get cleared
+ GlodaMsgIndexer.initialSweep();
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([])); // (no new messages to index)
+
+ // - Force a DB commit so the pending commit tracker gets emptied out
+ // (otherwise we need to worry about its state overriding our clobbering)
+ await waitForGlodaDBFlush();
+
+ // - Delete the contact records for the people in the address book.
+ await sqlRun(
+ "DELETE FROM contacts WHERE id IN (" +
+ yesMsgSet.glodaMessages[0].from.contact.id +
+ ", " +
+ yesMsgSet.glodaMessages[0].to[0].contact.id +
+ ")"
+ );
+
+ // - Nuke the gloda caches so we totally forget those contact records.
+ nukeGlodaCachesAndCollections();
+
+ // - Manually mark the messages involving the inAB people with the _old_ bad
+ // id marker so that our scan will see them.
+ for (let msgHdr of yesMsgSet.msgHdrs()) {
+ msgHdr.setUint32Property("gloda-id", GLODA_OLD_BAD_MESSAGE_ID);
+ }
+
+ // - Mark the db schema version to the version with the bug (26).
+ // Sanity check that gloda actually populates the value with the current
+ // version correctly.
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+ GlodaDatastore._actualSchemaVersion = 26;
+ await sqlRun("PRAGMA user_version = 26");
+ // Make sure that took, since we check it below as a success indicator.
+ let verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), 26);
+
+ // -- Test
+ // - Trigger the migration logic and request an indexing sweep.
+ GlodaMsgIndexer.disable();
+ GlodaMsgIndexer.enable();
+ GlodaMsgIndexer.initialSweep();
+
+ // - Wait for the indexer to complete, expecting that the messages that we
+ // marked bad will get indexed but not the good messages.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([yesMsgSet], { augment: true }));
+
+ // - Verify that the identities have contacts again.
+ // Must have the contact object.
+ Assert.notEqual(yesMsgSet.glodaMessages[0].from.contact, undefined);
+ // The contact's name should come from the address book card
+ Assert.equal(yesMsgSet.glodaMessages[0].from.contact.name, abPeeps[0][0]);
+
+ // - Verify that the schema version changed from gloda's perspective and from
+ // the db's perspective.
+ verRows = await sqlRun("PRAGMA user_version");
+ Assert.equal(verRows[0].getInt64(0), GlodaDatastore._schemaVersion);
+ Assert.equal(
+ GlodaDatastore._actualSchemaVersion,
+ GlodaDatastore._schemaVersion
+ );
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
new file mode 100644
index 0000000000..2e18fbe11f
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_attachments_size.js
@@ -0,0 +1,445 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the byte-counting libmime facility, to make sure that what
+ * is streamed to us is actually labeled with the right size.
+ */
+
+/*
+ * Do not include glodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiRelated,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+add_setup(function () {
+ // Sanity check: figure out how many bytes the original text occupies in UTF-8 encoding
+ Assert.equal(
+ new TextEncoder().encode(originalText).length,
+ originalTextByteCount
+ );
+
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+var htmlText = "<html><head></head><body>I am HTML! Woo! </body></html>";
+
+var partHtml = new SyntheticPartLeaf(htmlText, {
+ contentType: "text/html",
+});
+
+// This text is 168 characters long, and occupies 173 bytes when encoded in
+// UTF-8. (We make sure it occupies 173 bytes in run_test below). Note that
+// you cannot use this text directly because it isn't pure ASCII. You must use
+// one of the encoded forms below.
+var originalText =
+ "Longtemps, je me suis couché de bonne heure. Parfois, à " +
+ "peine ma bougie éteinte, mes yeux se fermaient si vite que je n'avais pas le " +
+ "temps de me dire : « Je m'endors. »";
+var originalTextByteCount = 173;
+
+var b64Text =
+ "TG9uZ3RlbXBzLCBqZSBtZSBzdWlzIGNvdWNow6kgZGUgYm9ubmUgaGV1cmUuIFBhcmZvaXMs\n" +
+ "IMOgIHBlaW5lIG1hIGJvdWdpZSDDqXRlaW50ZSwgbWVzIHlldXggc2UgZmVybWFpZW50IHNp\n" +
+ "IHZpdGUgcXVlIGplIG4nYXZhaXMgcGFzIGxlIHRlbXBzIGRlIG1lIGRpcmUgOiDCqyBKZSBt\n" +
+ "J2VuZG9ycy4gwrsK";
+
+var qpText =
+ "Longtemps,=20je=20me=20suis=20couch=C3=A9=20de=20bonne=20heure.=20Parfois,=\n" +
+ "=20=C3=A0=20peine=20ma=20bougie=20=C3=A9teinte,=20mes=20yeux=20se=20fermaie=\n" +
+ "nt=20si=20vite=20que=20je=20n'avais=20pas=20le=20temps=20de=20me=20dire=20:=\n" +
+ "=20=C2=AB=20Je=20m'endors.=20=C2=BB";
+
+var uuText =
+ "begin 666 -\n" +
+ 'M3&]N9W1E;7!S+"!J92!M92!S=6ES(&-O=6-HPZD@9&4@8F]N;F4@:&5U<F4N\n' +
+ "M(%!A<F9O:7,L(,.@('!E:6YE(&UA(&)O=6=I92##J71E:6YT92P@;65S('EE\n" +
+ "M=7@@<V4@9F5R;6%I96YT('-I('9I=&4@<75E(&IE(&XG879A:7,@<&%S(&QE\n" +
+ "G('1E;7!S(&1E(&UE(&1I<F4@.B#\"JR!*92!M)V5N9&]R<RX@PKL*\n" +
+ "\n" +
+ "end";
+
+var yencText =
+ "Hello there --\n" +
+ "=ybegin line=128 size=174 name=jane.doe\n" +
+ "\x76\x99\x98\x91\x9e\x8f\x97\x9a\x9d\x56\x4a\x94\x8f\x4a\x97\x8f" +
+ "\x4a\x9d\x9f\x93\x9d\x4a\x8d\x99\x9f\x8d\x92\xed\xd3\x4a\x8e\x8f" +
+ "\x4a\x8c\x99\x98\x98\x8f\x4a\x92\x8f\x9f\x9c\x8f\x58\x4a\x7a\x8b" +
+ "\x9c\x90\x99\x93\x9d\x56\x4a\xed\xca\x4a\x9a\x8f\x93\x98\x8f\x4a" +
+ "\x97\x8b\x4a\x8c\x99\x9f\x91\x93\x8f\x4a\xed\xd3\x9e\x8f\x93\x98" +
+ "\x9e\x8f\x56\x4a\x97\x8f\x9d\x4a\xa3\x8f\x9f\xa2\x4a\x9d\x8f\x4a" +
+ "\x90\x8f\x9c\x97\x8b\x93\x8f\x98\x9e\x4a\x9d\x93\x4a\xa0\x93\x9e" +
+ "\x8f\x4a\x9b\x9f\x8f\x4a\x94\x8f\x4a\x98\x51\x8b\xa0\x8b\x93\x9d" +
+ "\x0d\x0a\x4a\x9a\x8b\x9d\x4a\x96\x8f\x4a\x9e\x8f\x97\x9a\x9d\x4a" +
+ "\x8e\x8f\x4a\x97\x8f\x4a\x8e\x93\x9c\x8f\x4a\x64\x4a\xec\xd5\x4a" +
+ "\x74\x8f\x4a\x97\x51\x8f\x98\x8e\x99\x9c\x9d\x58\x4a\xec\xe5\x34" +
+ "\x0d\x0a" +
+ "=yend size=174 crc32=7efccd8e\n";
+
+// That completely exotic encoding is only detected if there is no content type
+// on the message, which is usually the case in newsgroups. I hate you yencode!
+// var partYencText = new SyntheticPartLeaf("I am text! Woo!\n\n" + yencText, {
+// contentType: "",
+// charset: "",
+// format: "",
+// });
+
+var partUUText = new SyntheticPartLeaf(
+ "I am text! With uuencode... noes...\n\n" + uuText,
+ {
+ contentType: "",
+ charset: "",
+ format: "",
+ }
+);
+
+var tachText = {
+ filename: "bob.txt",
+ body: qpText,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+};
+
+var tachInlineText = {
+ filename: "foo.txt",
+ body: qpText,
+ format: null,
+ charset: "utf-8",
+ encoding: "quoted-printable",
+ disposition: "inline",
+};
+
+// Images have a different behavior than other attachments: they are displayed
+// inline most of the time, so there are two different code paths that need to
+// enable streaming and byte counting to the JS mime emitter.
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachPdf = {
+ filename: "bob.pdf",
+ contentType: "application/pdf",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: b64Text,
+};
+
+var tachUU = {
+ filename: "john.doe",
+ contentType: "application/x-uuencode",
+ encoding: "uuencode",
+ charset: null,
+ format: null,
+ body: uuText,
+};
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ encoding: "base64",
+ body: b64Text,
+};
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: b64Text,
+};
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+new SyntheticPartLeaf(relImage.body, relImage);
+
+var messageInfos = [
+ {
+ name: "uuencode inline",
+ bodyPart: partUUText,
+ subject: "duh",
+ epsilon: 1,
+ checkTotalSize: false,
+ },
+ // Encoding type specific to newsgroups, not interested, gloda doesn't even
+ // treat this as an attachment (probably because gloda requires an attachment
+ // to have a content-type, which these yencoded parts don't have), but size IS
+ // counted properly nonetheless.
+ /* {
+ name: 'text/plain with yenc inline',
+ bodyPart: partYencText,
+ subject: "yEnc-Prefix: \"jane.doe\" 174 yEnc bytes - yEnc test (1)",
+ },*/
+ // Inline image, not interested either, gloda doesn't keep that as an
+ // attachment (probably a deliberate choice), size is NOT counted properly.
+ // (don't want to investigate, I doubt it's a useful information anyway.)
+ /* {
+ name: 'multipart/related',
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },*/
+ // This doesn't really make sense because it returns the length of the
+ // encoded blob without the envelope. Disabling as part of bug 711980.
+ /* {
+ name: '.eml attachment',
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({ body: { body: qpText,
+ charset: "UTF-8",
+ encoding: "quoted-printable" } }),
+ ]),
+ epsilon: 1,
+ },*/
+ // All of the other common cases work fine.
+ {
+ name: 'all sorts of "real" attachments',
+ bodyPart: partHtml,
+ attachments: [
+ tachImage,
+ tachPdf,
+ tachUU,
+ tachApplication,
+ tachText,
+ tachInlineText,
+ ],
+ epsilon: 2,
+ },
+];
+
+add_task(async function test_message_attachments() {
+ for (let messageInfo of messageInfos) {
+ await message_attachments(messageInfo);
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: originalText } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var bogusMessageInfos = [
+ // In this case, the wooooo part is not an attachment, so its bytes won't be
+ // counted (size will end up being 0 bytes). We don't check the size, but
+ // check_bogus_parts makes sure we're able to come up with a resulting size
+ // for the MimeMessage.
+ //
+ // In that very case, since message M is an attachment, libmime will count M's
+ // bytes, and we could have MimeMessages prefer the size libmime tells them
+ // (when they have it), rather than recursively computing their sizes. I'm not
+ // sure changing jsmimeemitter.js is worth the trouble just for buggy
+ // messages...
+ {
+ name: ".eml attachment with inner MimeUnknown",
+ bodyPart: new SyntheticPartMultiMixed([
+ partHtml,
+ msgGen.makeMessage({
+ // <--- M
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([
+ partHtml,
+ new SyntheticPartLeaf(htmlText, { contentType: "woooooo" }),
+ ]),
+ ]),
+ }),
+ ]),
+ epsilon: 6,
+ checkSize: false,
+ },
+];
+
+add_task(async function test_bogus_messages(info) {
+ for (let bogusMessageInfo of bogusMessageInfos) {
+ await bogus_messages(bogusMessageInfo);
+ }
+});
+
+add_task(async function test_have_attachments() {
+ // The goal here is to explicitly check that these messages have attachments.
+ let number = 1;
+ let synMsg = msgGen.makeMessage({
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiMixed([partHtml, partTachVCard]),
+ number,
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.equal(aMimeMsg.allUserAttachments.length, number);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+});
+
+async function message_attachments(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_attachments(
+ aMimeMsg,
+ info.epsilon,
+ "checkTotalSize" in info ? info.checkTotalSize : undefined
+ );
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
+
+function check_attachments(aMimeMsg, epsilon, checkTotalSize) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ /* It is hard to get a byte count that's perfectly accurate. When composing
+ * the message, the MIME structure goes like this (for an encoded attachment):
+ *
+ * XXXXXXXXXX
+ * XXXXXXXXXX <-- encoded block
+ * XXXXXXXXXX
+ * <-- newline
+ * --chopchop <-- MIME separator
+ *
+ * libmime counts bytes all the way up to the separator, which means it counts
+ * the bytes for the extra line. Since newlines in emails are \n, most of the
+ * time we get att.size = 174 instead of 173.
+ *
+ * The good news is, it's just a fixed extra cost. There no issues with the
+ * inner contents of the attachment, you can add as many newlines as you want
+ * in it, Unix or Windows, the count won't get past the bounds.
+ */
+
+ Assert.ok(aMimeMsg.allUserAttachments.length > 0);
+
+ let totalSize = htmlText.length;
+
+ for (let att of aMimeMsg.allUserAttachments) {
+ dump("*** Attachment now is " + att.name + " " + att.size + "\n");
+ Assert.ok(Math.abs(att.size - originalTextByteCount) <= epsilon);
+ totalSize += att.size;
+ }
+
+ // Undefined means true.
+ if (checkTotalSize !== false) {
+ dump(
+ "*** Total size comparison: " + totalSize + " vs " + aMimeMsg.size + "\n"
+ );
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+function check_bogus_parts(aMimeMsg, { epsilon, checkSize }) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+
+ // First make sure the size is computed properly
+ let x = parseInt(aMimeMsg.size);
+ Assert.ok(!isNaN(x));
+
+ let sep = "@mozilla.org/windows-registry-key;1" in Cc ? "\r\n" : "\n";
+
+ if (checkSize) {
+ let partSize = 0;
+ // The attachment, although a MimeUnknown part, is actually plain/text that
+ // contains the whole attached message, including headers. Count them.
+ for (let k in bogusMessage.headers) {
+ let v = bogusMessage.headers[k];
+ partSize += (k + ": " + v + sep).length;
+ }
+ // That's the newline between the headers and the message body.
+ partSize += sep.length;
+ // That's the message body.
+ partSize += originalTextByteCount;
+ // That's the total length that's to be returned by the MimeMessage abstraction.
+ let totalSize = htmlText.length + partSize;
+ dump(totalSize + " vs " + aMimeMsg.size + "\n");
+ Assert.ok(Math.abs(aMimeMsg.size - totalSize) <= epsilon);
+ }
+}
+
+async function bogus_messages(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ check_bogus_parts(aMimeMsg, info);
+ promiseResolve();
+ } catch (e) {
+ do_throw(e);
+ }
+ });
+
+ await promise;
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
new file mode 100644
index 0000000000..3380a0937e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_mime_emitter.js
@@ -0,0 +1,746 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * General testing of the JS Mime Emitter to make sure it doesn't choke on any
+ * scenarios.
+ *
+ * We do not test, but should consider testing:
+ * - MimeEncryptedPKCS7, whatever that translates to.
+ * - apple double
+ * - sun attachment
+ */
+
+/*
+ * Do not include GlodaTestHelper because we do not want gloda loaded and it
+ * adds a lot of runtime overhead which makes certain debugging strategies like
+ * using chronicle-recorder impractical.
+ */
+
+var { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+var {
+ MessageGenerator,
+ SyntheticPartLeaf,
+ SyntheticPartMultiAlternative,
+ SyntheticDegeneratePartEmpty,
+ SyntheticPartMultiSignedSMIME,
+ SyntheticPartMultiMixed,
+ SyntheticPartMultiSignedPGP,
+ SyntheticPartMultiRelated,
+ SyntheticPartMultiDigest,
+ SyntheticPartMultiParallel,
+ SyntheticMessageSet,
+} = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+// While we're at it, we'll also test the correctness of the GlodaAttachment
+// representation, esp. its "I just need the part information to rebuild the
+// URLs" claim.
+var { GlodaFundAttr } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaFundAttr.jsm"
+);
+
+const DEATH_TO_NEWLINE_TYPE_THINGS = /[\r\n]+/g;
+var msgGen = new MessageGenerator();
+var messageInjection;
+
+var partText = new SyntheticPartLeaf("I am text! Woo!");
+var partHtml = new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+);
+var partEnriched = new SyntheticPartLeaf(
+ "<bold><italic>I am not a popular format! sad woo :(</italic></bold>",
+ {
+ contentType: "text/enriched",
+ }
+);
+var partAlternative = new SyntheticPartMultiAlternative([partText, partHtml]);
+var partMailingListFooter = new SyntheticPartLeaf("I am an annoying footer!");
+
+// We need to make sure a part that has content-disposition: attachment, even
+// though it doesn't have any filename, still is treated as an attachment.
+var tachNoFilename = {
+ body: "I like Bordeaux wine",
+ contentType: "text/plain",
+ disposition: "attachment",
+};
+
+// This is an external attachment, i.e. a mime part that basically says "go find
+// the attachment on disk, assuming it still exists, here's the path to the file
+// on disk". It turns out feed enclosures are presented in the exact same way,
+// so this covers this case as well.
+var tachExternal = {
+ body:
+ "You deleted an attachment from this message. The original MIME headers for the attachment were:\n" +
+ "Content-Type: image/png;\n" +
+ ' name="conversations-bug1.png"\n' +
+ "Content-Transfer-Encoding: base64\n" +
+ "Content-Disposition: attachment;\n" +
+ ' filename="conversations-bug1.png"',
+ contentType: "image/png",
+ filename: "conversations-bug1.png",
+ charset: null,
+ format: null,
+ encoding: "base64",
+ extraHeaders: {
+ "X-Mozilla-External-Attachment-URL": "file:///tmp/conversations-bug1.png",
+ "X-Mozilla-Altered": 'AttachmentDetached; date="Wed Aug 03 11:11:33 2011"',
+ },
+};
+var tachText = { filename: "bob.txt", body: "I like cheese!" };
+var partTachText = new SyntheticPartLeaf(tachText.body, tachText);
+var tachInlineText = {
+ filename: "foo.txt",
+ body: "Rock the mic",
+ format: null,
+ charset: null,
+ disposition: "inline",
+};
+new SyntheticPartLeaf(tachInlineText.body, tachInlineText);
+
+var tachImage = {
+ filename: "bob.png",
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ body: "YWJj\n",
+};
+var partTachImage = new SyntheticPartLeaf(tachImage.body, tachImage);
+
+var relImage = {
+ contentType: "image/png",
+ encoding: "base64",
+ charset: null,
+ format: null,
+ contentId: "part1.foo@bar.invalid",
+ body: "YWJj\n",
+};
+var partRelImage = new SyntheticPartLeaf(relImage.body, relImage);
+
+var tachVCard = {
+ filename: "bob.vcf",
+ contentType: "text/vcard",
+ encoding: "7bit",
+ body: "begin:vcard\nfn:Bob\nend:vcard\n",
+};
+var partTachVCard = new SyntheticPartLeaf(tachVCard.body, tachVCard);
+
+var tachApplication = {
+ filename: "funky.funk",
+ contentType: "application/x-funky",
+ body: "funk!",
+};
+var partTachApplication = new SyntheticPartLeaf(
+ tachApplication.body,
+ tachApplication
+);
+
+var partTachMessages = [msgGen.makeMessage(), msgGen.makeMessage()];
+
+var partEmpty = new SyntheticDegeneratePartEmpty();
+
+var messageInfos = [
+ // -- Simple
+ {
+ name: "text/plain",
+ bodyPart: partText,
+ },
+ {
+ name: "text/html",
+ bodyPart: partHtml,
+ },
+ // -- Simply ugly
+ {
+ name: "text/enriched",
+ bodyPart: partEnriched,
+ },
+ // -- Simple w/attachment
+ {
+ name: "text/plain w/text attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachText],
+ },
+ {
+ name: "text/plain w/image attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/plain w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/plain w/app attachment (=> multipart/mixed)",
+ bodyPart: partText,
+ attachments: [tachApplication],
+ },
+ {
+ name: "text/html w/text attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachText],
+ },
+ {
+ name: "text/html w/image attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachImage],
+ },
+ {
+ name: "text/html w/vcard attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachVCard],
+ },
+ {
+ name: "text/html w/app attachment (=> multipart/mixed)",
+ bodyPart: partHtml,
+ attachments: [tachApplication],
+ },
+ // -- Alternatives
+ {
+ name: "multipart/alternative: text/plain, text/html",
+ bodyPart: partAlternative,
+ },
+ {
+ name: "multipart/alternative plain/html w/text attachment",
+ bodyPart: partAlternative,
+ attachments: [tachText],
+ },
+ {
+ name: "multipart/alternative plain/html w/image attachment",
+ bodyPart: partAlternative,
+ attachments: [tachImage],
+ },
+ {
+ name: "multipart/alternative plain/html w/vcard attachment",
+ bodyPart: partAlternative,
+ attachments: [tachVCard],
+ },
+ {
+ name: "multipart/alternative plain/html w/app attachment",
+ bodyPart: partAlternative,
+ attachments: [tachApplication],
+ },
+ // -- S/MIME.
+ {
+ name: "S/MIME alternative",
+ bodyPart: new SyntheticPartMultiSignedSMIME(partAlternative),
+ },
+ {
+ name: "S/MIME alternative with text attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachText])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachImage])
+ ),
+ },
+ {
+ name: "S/MIME alternative with image attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachVCard])
+ ),
+ },
+ {
+ name: "S/MIME alternative with app attachment inside",
+ // We have to do the attachment packing ourselves on this one.
+ bodyPart: new SyntheticPartMultiSignedSMIME(
+ new SyntheticPartMultiMixed([partAlternative, partTachApplication])
+ ),
+ },
+ {
+ name: "S/MIME alternative wrapped in mailing list",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiSignedSMIME(partAlternative),
+ partMailingListFooter,
+ ]),
+ },
+ // -- PGP signature
+ // We mainly care that all the content-type parameters show up.
+ {
+ name: "PGP signed alternative",
+ bodyPart: new SyntheticPartMultiSignedPGP(partAlternative),
+ },
+ // -- Attached RFC822
+ {
+ // Not your average attachment, pack ourselves for now.
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachMessages[0],
+ ]),
+ },
+ // -- Multipart/related
+ {
+ name: "multipart/related",
+ bodyPart: new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ },
+ {
+ name: "multipart/related inside multipart/alternative",
+ bodyPart: new SyntheticPartMultiAlternative([
+ partText,
+ new SyntheticPartMultiRelated([partHtml, partRelImage]),
+ ]),
+ },
+ // -- Multipart/digest
+ {
+ name: "multipart/digest",
+ bodyPart: new SyntheticPartMultiDigest(partTachMessages.concat()),
+ },
+ // -- Multipart/parallel (allegedly the same as mixed)
+ {
+ name: "multipart/parallel",
+ bodyPart: new SyntheticPartMultiParallel([partText, partTachImage]),
+ },
+ // --- Previous bugs
+ // -- Bug 495057, text/enriched was being dumb
+ {
+ name: "text/enriched inside related",
+ bodyPart: new SyntheticPartMultiRelated([partEnriched]),
+ },
+ // -- Empty sections
+ // This was a crasher because the empty part made us try and close the
+ // child preceding the empty part a second time. The nested multipart led
+ // to the crash providing evidence of the double-close bug but there was
+ // nothing inherently nested-multipart-requiring to trigger the double-close
+ // bug.
+ {
+ name: "nested multipart with empty multipart section",
+ bodyPart: new SyntheticPartMultiMixed([
+ new SyntheticPartMultiRelated([partAlternative, partTachText]),
+ partEmpty,
+ ]),
+ },
+ {
+ name: "empty multipart section produces no child",
+ bodyPart: new SyntheticPartMultiMixed([partText, partEmpty, partTachText]),
+ },
+];
+
+add_setup(async function () {
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ registerCleanupFunction(function () {
+ GlodaDatastore.shutdown();
+ });
+});
+
+add_task(async function test_stream_message() {
+ for (let messageInfo of messageInfos) {
+ await stream_message(messageInfo);
+ }
+});
+
+/**
+ * Stream
+ */
+add_task(async function test_sane_bodies() {
+ // 60 bytes long... (becomes 59 on the other side when \r is dropped)
+ let hugeString =
+ "don't know what you want but I can't stream it anymore...\r\n";
+ const powahsOfTwo = 10;
+ for (let i = 0; i < powahsOfTwo; i++) {
+ hugeString = hugeString + hugeString;
+ }
+ // This will come out to be 60k, of course.
+ Assert.equal(hugeString.length, 60 * Math.pow(2, powahsOfTwo));
+
+ let synMsg = msgGen.makeMessage({
+ body: { body: hugeString, contentType: "text/plain" },
+ });
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ let bodyPart = aMimeMsg.parts[0];
+ // (the \r gets gone, so it's only 59 per line)
+ if (bodyPart.body.length > 20 * 1024 + 59) {
+ do_throw(
+ "Mime body length is " +
+ bodyPart.body.length +
+ " bytes long but should not be!"
+ );
+ }
+ promiseResolve();
+ },
+ false,
+ { saneBodySize: true }
+ );
+
+ await promise;
+});
+
+// Additional testing for the correctness of allAttachments and
+// allUserAttachments representation
+
+var partTachNestedMessages = [
+ // Looks like the synthetic part generator appends the charset=ISO-8859-1 part
+ // all by itself. That allows us to create a non-UTF-8 subject, and ensure the
+ // resulting attachment name is indeed São Paulo.eml.
+ msgGen.makeMessage({
+ subject: "S" + String.fromCharCode(0xe3) + "o Paulo",
+ bodyPart: new SyntheticPartLeaf(
+ "<html><head></head><body>I am HTML! Woo! </body></html>",
+ {
+ contentType: "text/html",
+ }
+ ),
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage],
+ }),
+ msgGen.makeMessage({
+ attachments: [tachImage, tachApplication],
+ }),
+];
+
+var attMessagesParams = [
+ {
+ attachments: [tachNoFilename],
+ },
+ {
+ attachments: [tachExternal],
+ },
+ {
+ name: "attached rfc822",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[0],
+ ]),
+ },
+ {
+ name: "attached rfc822 w. image inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachNestedMessages[1],
+ ]),
+ },
+ {
+ name: "attached x/funky + attached rfc822 w. (image + x/funky) inside",
+ bodyPart: new SyntheticPartMultiMixed([
+ partAlternative,
+ partTachApplication,
+ partTachNestedMessages[2],
+ ]),
+ },
+];
+
+var expectedAttachmentsInfo = [
+ {
+ allAttachmentsContentTypes: ["text/plain"],
+ allUserAttachmentsContentTypes: ["text/plain"],
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["image/png"],
+ },
+ {
+ allAttachmentsContentTypes: [],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ firstAttachmentName: "S\u00e3o Paulo.eml",
+ },
+ {
+ allAttachmentsContentTypes: ["image/png"],
+ allUserAttachmentsContentTypes: ["message/rfc822"],
+ },
+ {
+ allAttachmentsContentTypes: [
+ "application/x-funky",
+ "image/png",
+ "application/x-funky",
+ ],
+ allUserAttachmentsContentTypes: ["application/x-funky", "message/rfc822"],
+ },
+];
+
+add_task(async function test_attachments_correctness() {
+ for (let [i, params] of attMessagesParams.entries()) {
+ let synMsg = msgGen.makeMessage(params);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ try {
+ let expected = expectedAttachmentsInfo[i];
+ if ("firstAttachmentName" in expected) {
+ let att = aMimeMsg.allUserAttachments[0];
+ Assert.equal(att.name.length, expected.firstAttachmentName.length);
+ for (let j = 0; j < att.name.length; ++j) {
+ Assert.equal(
+ att.name.charCodeAt(j),
+ expected.firstAttachmentName.charCodeAt(j)
+ );
+ }
+ }
+
+ Assert.equal(
+ aMimeMsg.allAttachments.length,
+ expected.allAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allAttachmentsContentTypes[j]
+ );
+ }
+
+ Assert.equal(
+ aMimeMsg.allUserAttachments.length,
+ expected.allUserAttachmentsContentTypes.length
+ );
+ for (let [j, att] of aMimeMsg.allUserAttachments.entries()) {
+ Assert.equal(
+ att.contentType,
+ expected.allUserAttachmentsContentTypes[j]
+ );
+ }
+
+ // Test
+ for (let att of aMimeMsg.allUserAttachments) {
+ let uri = aMsgHdr.folder.getUriForMsg(aMsgHdr);
+ let glodaAttachment = GlodaFundAttr.glodaAttFromMimeAtt(
+ { folderMessageURI: uri },
+ att
+ );
+ // The GlodaAttachment appends the filename, which is not always
+ // present
+ Assert.ok(glodaAttachment.url.startsWith(att.url));
+ }
+ } catch (e) {
+ dump(aMimeMsg.prettyString() + "\n");
+ do_throw(e);
+ }
+
+ promiseResolve();
+ },
+ false
+ );
+
+ await promise;
+ }
+});
+
+var bogusMessage = msgGen.makeMessage({ body: { body: "whatever" } });
+bogusMessage._contentType = "woooooo"; // Breaking abstraction boundaries. Bad.
+
+var weirdMessageInfos = [
+ // This message has an unnamed part as an attachment (with
+ // Content-Disposition: inline and which is displayable inline). Previously,
+ // libmime would emit notifications for this to be treated as an attachment,
+ // name Part 1.2. Now it's not the case anymore, so we should ensure this
+ // message has no attachments.
+ {
+ name: "test message with part 1.2 attachment",
+ attachments: [
+ {
+ body: "attachment",
+ filename: "",
+ format: "",
+ },
+ ],
+ },
+];
+
+add_task(async function test_part12_not_an_attachment() {
+ let synMsg = msgGen.makeMessage(weirdMessageInfos[0]);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ try {
+ Assert.ok(aMimeMsg.allUserAttachments.length == 0);
+ Assert.ok(aMimeMsg.allAttachments.length == 0);
+ } catch (e) {
+ do_throw(e);
+ }
+ promiseResolve();
+ });
+
+ await promise;
+});
+
+async function stream_message(info) {
+ let synMsg = msgGen.makeMessage(info);
+ let synSet = new SyntheticMessageSet([synMsg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ MsgHdrToMimeMessage(msgHdr, null, function (aMsgHdr, aMimeMsg) {
+ verify_stream_message(info, synMsg, aMsgHdr, aMimeMsg);
+ promiseResolve();
+ });
+
+ await promise;
+}
+/**
+ * Verify the streamed results are what we wanted. For now, this just means
+ * receiving a representation; we don't check it for correctness.
+ */
+function verify_stream_message(aInfo, aSynMsg, aMsgHdr, aMimeMsg) {
+ if (aMimeMsg == null) {
+ do_throw("We really should have gotten a result!");
+ }
+ try {
+ // aMimeMsg is normalized; it only ever actually gets one child.
+ verify_body_part_equivalence(aSynMsg.bodyPart, aMimeMsg.parts[0]);
+ } catch (ex) {
+ dump("Something was wrong with the MIME rep!\n!!!!!!!!\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like: \n" + aMimeMsg.prettyString(true, " ", true) + "\n\n"
+ );
+ do_throw(ex);
+ }
+
+ dump("Everything is just fine.\n");
+ dump("Synthetic looks like:\n " + aSynMsg.prettyString() + "\n\n");
+ dump(
+ "MIME looks like:\n " + aMimeMsg.prettyString(true, " ", false) + "\n\n"
+ );
+}
+
+/**
+ * Applies any transformations to the synthetic body part that we would expect
+ * to happen to a message during its libmime journey. It may be better to
+ * just put the expected translations in the synthetic body part instead of
+ * trying to make this method do anything complex.
+ */
+function synTransformBody(aSynBodyPart) {
+ let text = aSynBodyPart.body.trim();
+ // This transforms things into HTML apparently.
+ if (aSynBodyPart._contentType == "text/enriched") {
+ // Our job here is just to transform just enough for our example above.
+ // We also could have provided a manual translation on the body part.
+ text = text.replace(/bold/g, "B").replace(/italic/g, "I");
+ }
+ return text;
+}
+
+function verify_body_part_equivalence(aSynBodyPart, aMimePart) {
+ // The content-type devoid of parameters should match.
+ Assert.equal(aSynBodyPart._contentType, aMimePart.contentType);
+
+ // The header representation of the content-type should also match unless
+ // this is an rfc822 part, in which case it should only match for the
+ // actual contents.
+ if (aMimePart.contentType != "message/rfc822") {
+ Assert.equal(
+ aSynBodyPart.contentTypeHeaderValue.replace(
+ DEATH_TO_NEWLINE_TYPE_THINGS,
+ ""
+ ),
+ aMimePart.get("content-type").replace(DEATH_TO_NEWLINE_TYPE_THINGS, "")
+ );
+ }
+
+ // XXX body part checking will get brittle if we ever actually encode things!
+ if (
+ aSynBodyPart.body &&
+ !aSynBodyPart._filename &&
+ aSynBodyPart._contentType.startsWith("text/")
+ ) {
+ Assert.equal(
+ synTransformBody(aSynBodyPart),
+ aMimePart.body
+ .trim()
+ .replace(/\r/g, "")
+ // Remove stuff added by libmime for HTML parts.
+ .replace(
+ /[\n]*<meta http-equiv="content-type" content="text\/html; .*">[\n]*/g,
+ ""
+ )
+ .replace(/[\n]+<\/body>/, "</body>")
+ );
+ }
+ if (aSynBodyPart.parts) {
+ let iPart;
+ let realPartOffsetCompensator = 0;
+ for (iPart = 0; iPart < aSynBodyPart.parts.length; iPart++) {
+ let subSyn = aSynBodyPart.parts[iPart];
+ // If this is a degenerate empty, it should not produce output, so
+ // compensate for the offset drift and get on with our lives.
+ if (subSyn instanceof SyntheticDegeneratePartEmpty) {
+ realPartOffsetCompensator--;
+ continue;
+ }
+ let subMime = aMimePart.parts[iPart + realPartOffsetCompensator];
+ // Our special case is the signature, which libmime does not expose to us.
+ // Ignore! (Also, have our too-many-part checker below not trip on this.)
+ if (subSyn._contentType != "application/x-pkcs7-signature") {
+ if (subMime == null) {
+ do_throw(
+ "No MIME part matching " + subSyn.contentTypeHeaderValue + "\n"
+ );
+ }
+ verify_body_part_equivalence(subSyn, subMime);
+ }
+ }
+ // Only check if there are still more mime parts; don't check for a count
+ // mismatch (the PKCS case from above needs to be handled).
+ if (iPart < aMimePart.parts.length) {
+ do_throw("MIME part has more sub-parts than syn part?");
+ }
+ }
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_msg_search.js b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
new file mode 100644
index 0000000000..2c8ea1c528
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_msg_search.js
@@ -0,0 +1,155 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test GlodaMsgSearcher.jsm our heuristic-based fulltext search mechanism. Things we
+ * generally want to verify:
+ * - fulltext weighting by where the match happened works.
+ * - static interestingness impacts things appropriately.
+ *
+ * Our general strategy is to create two messages each with a unique string
+ * placed in controlled places and whatever intentional message manipulation
+ * is required to set things up. Then we query using a GlodaMsgSearcher with
+ * the limit set to 1. Only the message we expect should come back.
+ * Keep in mind in all tests that our underlying ranking mechanism is based on
+ * time so the date of each message is relevant but should not be significant
+ * because our score boost factor should always be well in excess of the one
+ * hour increment between messages.
+ *
+ * Previously, we relied on the general equivalence of the logic in
+ * test_query_core to our message search logic.
+ */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { GlodaMsgSearcher } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaMsgSearcher.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var uniqueCounter = 0;
+var messageInjection;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Verify that the ranking function is using the weights as expected. We do not
+ * need to test all the permutations
+ */
+add_task(async function test_fulltext_weighting_by_column() {
+ let ustr = unique_string();
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, body: { body: ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, subjSet);
+});
+
+/**
+ * A term mentioned 3 times in the body is worth more than twice in the subject.
+ * (This is because the subject saturates at one occurrence worth 2.0 and the
+ * body does not saturate until 10, each worth 1.0.)
+ */
+add_task(async function test_fulltext_weighting_saturation() {
+ let ustr = unique_string();
+ let double_ustr = ustr + " " + ustr;
+ let thrice_ustr = ustr + " " + ustr + " " + ustr;
+ let [, subjSet, bodySet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: double_ustr },
+ { count: 1, body: { body: thrice_ustr } },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet, bodySet]));
+ await asyncMsgSearcherExpect(ustr, bodySet);
+});
+
+/**
+ * Use a starred message with the same fulltext match characteristics as another
+ * message to verify the preference goes the right way. Have the starred
+ * message be the older message for safety.
+ */
+add_task(async function test_static_interestingness_boost_works() {
+ let ustr = unique_string();
+ let [, starred, notStarred] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ { count: 1, subject: ustr },
+ ]);
+ // Index in their native state.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred, notStarred]));
+ // Star and index.
+ starred.setStarred(true);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([starred]));
+ // Stars upon thars wins.
+ await asyncMsgSearcherExpect(ustr, starred);
+});
+
+/**
+ * Make sure that the query does not retrieve more than actually matches.
+ */
+add_task(async function test_joins_do_not_return_everybody() {
+ let ustr = unique_string();
+ let [, subjSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1, subject: ustr },
+ ]);
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([subjSet]));
+ await asyncMsgSearcherExpect(ustr, subjSet, 2);
+});
+
+/**
+ * Generate strings like "aaaaa", "aabaa", "aacaa", etc. The idea with the
+ * suffix is to avoid the porter stemmer from doing something weird that
+ * collapses things.
+ */
+function unique_string() {
+ let uval = uniqueCounter++;
+ let s =
+ String.fromCharCode(97 + Math.floor(uval / (26 * 26))) +
+ String.fromCharCode(97 + (Math.floor(uval / 26) % 26)) +
+ String.fromCharCode(97 + (uval % 26)) +
+ "aa";
+ return s;
+}
+
+/**
+ * Wrap the construction of a GlodaMsgSearcher with a limit of 1 and feed it to
+ * queryExpect.
+ *
+ * @param aFulltextStr The fulltext query string which GlodaMsgSearcher will
+ * parse.
+ * @param aExpectedSet The expected result set. Make sure that the size of the
+ * set is consistent with aLimit.
+ * @param [aLimit=1]
+ *
+ * Use like so:
+ * await asyncMsgSearchExpect("foo bar", someSynMsgSet);
+ */
+async function asyncMsgSearcherExpect(aFulltextStr, aExpectedSet, aLimit) {
+ let limit = aLimit ? aLimit : 1;
+ Services.prefs.setIntPref("mailnews.database.global.search.msg.limit", limit);
+ let searcher = new GlodaMsgSearcher(null, aFulltextStr);
+ await queryExpect(searcher.buildFulltextQuery(), aExpectedSet);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
new file mode 100644
index 0000000000..128720ee76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_noun_mimetype.js
@@ -0,0 +1,144 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test noun_mimetype. Exists because I just changed its implementation and I'm
+ * afraid I may have damaged it and it's hard to tell, so ironically a unit test
+ * is the easiest solution. (Don't you hate it when the right thing to do is
+ * also the easy thing to do?)
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { waitForGlodaDBFlush } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelperFunctions.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MimeTypeNoun } = ChromeUtils.import(
+ "resource:///modules/gloda/NounMimetype.jsm"
+);
+
+var passResults = [];
+var curPassResults;
+
+add_setup(async function () {
+ let msgGen = new MessageGenerator();
+ let messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+add_task(async function test_new_pass_first_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_first_time() {
+ test_basics();
+});
+
+/**
+ * Do two passes of test_basics making sure that persisted values really
+ * persist.
+ */
+add_task(async function test_new_pass_second_time() {
+ await new_pass();
+});
+
+add_task(function test_basics_second_time() {
+ test_basics();
+});
+
+add_task(function verify_passes_are_the_same() {
+ var firstPassResults = passResults[0];
+ for (let iType = 0; iType < curPassResults.length; iType++) {
+ for (let iPass = 1; iPass < passResults.length; iPass++) {
+ Assert.equal(firstPassResults[iType].id, passResults[iPass][iType].id);
+ }
+ }
+});
+
+add_task(function test_parameters() {
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ Assert.equal(plain, MimeTypeNoun.getMimeType('text/plain; charset="UTF-8"'));
+});
+
+/**
+ * Setup a new 'pass' by nuking the MimeTypeNoun's state if it has any. The
+ * goal here is to verify that the database persistence is actually working,
+ * and we can only do that if we convince it to nuke its authoritative 'cache'
+ * and grab a new copy.
+ */
+async function new_pass() {
+ // We have to nuke if it has already happened.
+ if (passResults.length) {
+ MimeTypeNoun._mimeTypes = {};
+ MimeTypeNoun._mimeTypesByID = {};
+ MimeTypeNoun._mimeTypeHighID = {};
+ MimeTypeNoun._highID = 0;
+ MimeTypeNoun._init();
+ }
+ curPassResults = [];
+ passResults.push(curPassResults);
+
+ // The mime type does some async stuff... make sure we don't advance until
+ // it is done with said stuff.
+ await waitForGlodaDBFlush();
+}
+
+function test_basics() {
+ let python;
+ // If this is not the first pass, check for python before other things to
+ // make sure we're not just relying on consistent logic rather than actual
+ // persistence.
+ if (passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+
+ let jpeg = MimeTypeNoun.getMimeType("image/jpeg");
+ curPassResults.push(jpeg);
+
+ let png = MimeTypeNoun.getMimeType("image/png");
+ curPassResults.push(png);
+
+ let html = MimeTypeNoun.getMimeType("text/html");
+ curPassResults.push(html);
+
+ let plain = MimeTypeNoun.getMimeType("text/plain");
+ curPassResults.push(plain);
+
+ // If this is for the first time, check for python now (see above).
+ if (!passResults.length) {
+ python = MimeTypeNoun.getMimeType("text/x-python");
+ }
+ // But always add it to the results now, as we need consistent ordering
+ // since we use a list.
+ curPassResults.push(python);
+
+ // Sanity-checking the parsing.
+ Assert.equal(jpeg.type, "image");
+ Assert.equal(jpeg.subType, "jpeg");
+
+ // - Make sure the numeric trickiness for the block stuff is actually doing
+ // the right thing!
+ const BLOCK_SIZE = MimeTypeNoun.TYPE_BLOCK_SIZE;
+ // Same blocks.
+ Assert.equal(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(png.id / BLOCK_SIZE)
+ );
+ Assert.equal(
+ Math.floor(html.id / BLOCK_SIZE),
+ Math.floor(plain.id / BLOCK_SIZE)
+ );
+ // Different blocks.
+ Assert.notEqual(
+ Math.floor(jpeg.id / BLOCK_SIZE),
+ Math.floor(html.id / BLOCK_SIZE)
+ );
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
new file mode 100644
index 0000000000..e47eac75bc
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration.js
@@ -0,0 +1,62 @@
+/**
+ * Atypical gloda unit test that tests nuke migration. Gloda is not designed
+ * to be shutdown and started up again in the same process lifetime. It tries
+ * to be clever with caching accessors that clobber themselves out of existence
+ * which are hard to make come back to life, and probably other things.
+ *
+ * So what we do is create a global-messages-db.sqlite with an unacceptably
+ * old schema version before tickling gloda to startup. If gloda comes up
+ * with a database connection and it has the right schema version, we declare
+ * that gloda has successfully loaded. Our only historical screw-up here was
+ * very blatant (and was actually a result of trying to avoid complexity in
+ * the nuke path! oh the irony!) so we don't need to get all hardcore.
+ */
+
+/**
+ * The DB version to use. We set this as a non-const variable so that
+ * test_nuke_migration_from_future.js can change it.
+ */
+var BAD_DB_VERSION_TO_USE = 2;
+
+/**
+ * Synchronously create and close the out-of-date database. Because we are
+ * only using synchronous APIs, we know everything is in fact dead. GC being
+ * what it is, the various C++ objects will probably stay alive through the
+ * next test, but will be inert because we have closed the database.
+ */
+function make_out_of_date_database() {
+ // Get the path to our global database
+ var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ dbFile.append("global-messages-db.sqlite");
+
+ // Create the database
+ var dbConnection = Services.storage.openUnsharedDatabase(dbFile);
+ dbConnection.schemaVersion = BAD_DB_VERSION_TO_USE;
+
+ // Close the database (will throw if there's a problem closing)
+ dbConnection.close();
+}
+
+// some copied and pasted preference setup from glodaTestHelper that is
+// appropriate here.
+// yes to indexing
+Services.prefs.setBoolPref("mailnews.database.global.indexer.enabled", true);
+// no to a sweep we don't control
+Services.prefs.setBoolPref(
+ "mailnews.database.global.indexer.perform_initial_sweep",
+ false
+);
+
+function run_test() {
+ // - make the old database
+ make_out_of_date_database();
+
+ // - tickle gloda
+ // GlodaPublic.jsm loads Gloda.jsm which self-initializes and initializes the datastore
+ ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+ let { GlodaDatastore } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaDatastore.jsm"
+ );
+
+ Assert.notEqual(GlodaDatastore.asyncConnection, null);
+}
diff --git a/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
new file mode 100644
index 0000000000..f60c1dd29e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_nuke_migration_from_future.js
@@ -0,0 +1,12 @@
+/**
+ * There are actually two ways the nuke migration can be invoked. From
+ * a database too far from the future, and too far from the past. This
+ * one is the future one. We must keep ourselves safe from time-traveling
+ * grandchildren!
+ */
+
+/* import-globals-from test_nuke_migration.js */
+load("test_nuke_migration.js");
+
+// pick something so far forward it will never get used!
+BAD_DB_VERSION_TO_USE = 100000000;
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_core.js b/comm/mailnews/db/gloda/test/unit/test_query_core.js
new file mode 100644
index 0000000000..0849a62d50
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_core.js
@@ -0,0 +1,658 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test the mechanics our query functionality. Tests in this file are intended
+ * to cover extreme boundary cases and things that are just unlikely to happen
+ * in reasonable message use-cases. (Which is to say, it could be hard to
+ * formulate a set of synthetic messages that result in the situation we want
+ * to test for.)
+ */
+
+var { prepareIndexerForTesting } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { queryExpect } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaQueryHelper.jsm"
+);
+var { Gloda } = ChromeUtils.import("resource:///modules/gloda/GlodaPublic.jsm");
+var { GlodaConstants } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaConstants.jsm"
+);
+var { GlodaIndexer, IndexingJob } = ChromeUtils.import(
+ "resource:///modules/gloda/GlodaIndexer.jsm"
+);
+
+/* ===== Test Noun ===== */
+/*
+ * Introduce a simple noun type for our testing so that we can avoid having to
+ * deal with the semantics of messages/friends and all their complexity.
+ */
+
+var WidgetProvider = {
+ providerName: "widget",
+ *process() {
+ yield GlodaConstants.kWorkDone;
+ },
+};
+
+add_setup(function () {
+ // Don't initialize the index message state
+ prepareIndexerForTesting();
+ GlodaIndexer.registerIndexer(GenericIndexer);
+ Gloda.addIndexerListener(genericIndexerCallback);
+});
+
+var WidgetNoun;
+add_task(function setup_test_noun_and_attributes() {
+ // --- noun
+ WidgetNoun = Gloda.defineNoun({
+ name: "widget",
+ clazz: Widget,
+ allowsArbitraryAttrs: true,
+ // It is vitally important to our correctness that we allow caching
+ // otherwise our in-memory representations will not be canonical and the db
+ // will load some. Or we could add things to collections as we index them.
+ cache: true,
+ cacheCost: 32,
+ schema: {
+ columns: [
+ ["id", "INTEGER PRIMARY KEY"],
+ ["intCol", "NUMBER", "inum"],
+ // datePRTime is special and creates a Date object.
+ ["dateCol", "NUMBER", "datePRTime"],
+ ["strCol", "STRING", "str"],
+ ["notabilityCol", "NUMBER", "notability"],
+ ["textOne", "STRING", "text1"],
+ ["textTwo", "STRING", "text2"],
+ ],
+ indices: {
+ intCol: ["intCol"],
+ strCol: ["strCol"],
+ },
+ fulltextColumns: [
+ ["fulltextOne", "TEXT", "text1"],
+ ["fulltextTwo", "TEXT", "text2"],
+ ],
+ genericAttributes: true,
+ },
+ });
+
+ const EXT_NAME = "test";
+
+ // --- special (on-row) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "inum",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "intCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "date",
+ singular: true,
+ special: GlodaConstants.kSpecialColumn,
+ specialColumnName: "dateCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_DATE,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "str",
+ singular: true,
+ special: GlodaConstants.kSpecialString,
+ specialColumnName: "strCol",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_STRING,
+ canQuery: true,
+ });
+
+ // --- fulltext attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text1",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextOne",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "text2",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: "fulltextTwo",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "fulltextAll",
+ singular: true,
+ special: GlodaConstants.kSpecialFulltext,
+ specialColumnName: WidgetNoun.tableName + "Text",
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_FULLTEXT,
+ canQuery: true,
+ });
+
+ // --- external (attribute-storage) attributes
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "singleIntAttr",
+ singular: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+
+ Gloda.defineAttribute({
+ provider: WidgetProvider,
+ extensionName: EXT_NAME,
+ attributeType: GlodaConstants.kAttrFundamental,
+ attributeName: "multiIntAttr",
+ singular: false,
+ emptySetIsSignificant: true,
+ subjectNouns: [WidgetNoun.id],
+ objectNoun: GlodaConstants.NOUN_NUMBER,
+ canQuery: true,
+ });
+});
+
+/* ===== Tests ===== */
+
+const ALPHABET = "abcdefghijklmnopqrstuvwxyz";
+add_task(async function test_lots_of_string_constraints() {
+ let stringConstraints = [];
+ for (let i = 0; i < 2049; i++) {
+ stringConstraints.push(
+ ALPHABET[Math.floor(i / (ALPHABET.length * 2)) % ALPHABET.length] +
+ ALPHABET[Math.floor(i / ALPHABET.length) % ALPHABET.length] +
+ ALPHABET[i % ALPHABET.length] +
+ // Throw in something that will explode if not quoted
+ // and use an uneven number of things so if we fail
+ // to quote it won't get quietly eaten.
+ "'\""
+ );
+ }
+
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.str.apply(query, stringConstraints);
+
+ await queryExpect(query, []);
+});
+
+/* === Query === */
+
+/**
+ * Use a counter so that each test can have its own unique value for intCol so
+ * that it can use that as a constraint. Otherwise we would need to purge
+ * between every test. That's not an unreasonable alternative, but this works.
+ * Every test should increment this before using it.
+ */
+var testUnique = 100;
+
+/**
+ * Widgets with multiIntAttr populated with one or more values.
+ */
+var nonSingularWidgets;
+/**
+ * Widgets with multiIntAttr unpopulated.
+ */
+var singularWidgets;
+
+add_task(async function setup_non_singular_values() {
+ testUnique++;
+ let origin = new Date("2007/01/01");
+ nonSingularWidgets = [
+ new Widget(testUnique, origin, "ns1", 0, "", ""),
+ new Widget(testUnique, origin, "ns2", 0, "", ""),
+ ];
+ singularWidgets = [
+ new Widget(testUnique, origin, "s1", 0, "", ""),
+ new Widget(testUnique, origin, "s2", 0, "", ""),
+ ];
+ nonSingularWidgets[0].multiIntAttr = [1, 2];
+ nonSingularWidgets[1].multiIntAttr = [3];
+ singularWidgets[0].multiIntAttr = [];
+ // And don't bother setting it on singularWidgets[1].
+
+ GenericIndexer.indexObjects(nonSingularWidgets.concat(singularWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+add_task(async function test_query_has_value_for_non_singular() {
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr();
+ await queryExpect(query, nonSingularWidgets);
+});
+
+/**
+ * We should find the one singular object where we set the multiIntAttr to an
+ * empty set. We don't find the one without the attribute since that's
+ * actually something different.
+ * We also want to test that re-indexing properly adds/removes the attribute
+ * so change the object and make sure everything happens correctly.
+ *
+ * @tests gloda.datastore.sqlgen.kConstraintIn.emptySet
+ * @tests gloda.query.test.kConstraintIn.emptySet
+ */
+add_task(async function test_empty_set_logic() {
+ // - Initial query based on the setup previously.
+ dump("Initial index case\n");
+ let query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [singularWidgets[0]]);
+
+ // - Make one of the non-singulars move to empty and move the guy who matched
+ // to no longer match.
+ dump("Incremental index case\n");
+ nonSingularWidgets[0].multiIntAttr = [];
+ singularWidgets[0].multiIntAttr = [4, 5];
+
+ GenericIndexer.indexObjects([nonSingularWidgets[0], singularWidgets[0]]);
+ await promiseGenericIndexerCallback;
+
+ // Reset promise;
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+
+ query = Gloda.newQuery(WidgetNoun.id);
+ query.inum(testUnique);
+ query.multiIntAttr(null);
+ await queryExpect(query, [nonSingularWidgets[0]]);
+
+ // Make sure that the query doesn't explode when it has to handle a case
+ // that's not supposed to match.
+ Assert.ok(!query.test(singularWidgets[0]));
+});
+
+/* === Search === */
+/*
+ * The conceit of our search is that more recent messages are better than older
+ * messages. But at the same time, we care about some messages more than
+ * others (in general), and we care about messages that match search terms
+ * more strongly too. So we introduce a general 'score' heuristic which we
+ * then apply to message timestamps to make them appear more recent. We
+ * then order by this 'date score' hybrid, which we dub "dascore". Such a
+ * flattening heuristic is over-simple, but believed to be sufficient to
+ * generally get us the messages we want. Post-processing based can then
+ * be more multi-dimensional and what not, but that is beyond the scope of
+ * this unit test.
+ */
+
+/**
+ * How much time boost should a 'score point' amount to? The authoritative,
+ * incontrivertible answer, across all time and space, is a week.
+ * Gloda and storage like to store things as PRTime and so we do it too,
+ * even though milliseconds are the actual granularity of JS Date instances.
+ */
+const SCORE_TIMESTAMP_FACTOR = 1000 * 1000 * 60 * 60 * 24 * 7;
+
+/**
+ * How many score points for each fulltext match?
+ */
+const SCORE_FOR_FULLTEXT_MATCH = 1;
+
+/**
+ * Roughly how many characters are in each offset match.
+ */
+const OFFSET_CHARS_PER_FULLTEXT_MATCH = 8;
+
+var fooWidgets = null;
+var barBazWidgets = null;
+
+add_task(async function setup_search_ranking_idiom() {
+ // --- Build some widgets for testing.
+ // Use inum to represent the expected result sequence
+ // Setup a base date.
+ let origin = new Date("2008/01/01");
+ let daymore = new Date("2008/01/02");
+ let monthmore = new Date("2008/02/01");
+ fooWidgets = [
+ // -- Setup the term "foo" to do frequency tests.
+ new Widget(5, origin, "", 0, "", "foo"),
+ new Widget(4, origin, "", 0, "", "foo foo"),
+ new Widget(3, origin, "", 0, "foo", "foo foo"),
+ new Widget(2, origin, "", 0, "foo foo", "foo foo"),
+ new Widget(1, origin, "", 0, "foo foo", "foo foo foo"),
+ new Widget(0, origin, "", 0, "foo foo foo", "foo foo foo"),
+ ];
+ barBazWidgets = [
+ // -- Setup score and matches to boost older messages over newer messages.
+ new Widget(7, origin, "", 0, "", "bar"), // score boost: 1 + date: 0
+ new Widget(6, daymore, "", 0, "", "bar"), // 1 + 0+
+ new Widget(5, origin, "", 1, "", "bar"), // 2 + 0
+ new Widget(4, daymore, "", 0, "bar", "bar"), // 2 + 0+
+ new Widget(3, origin, "", 1, "bar", "baz"), // 3 + 0
+ new Widget(2, monthmore, "", 0, "", "bar"), // 1 + 4
+ new Widget(1, origin, "", 0, "bar baz", "bar baz bar bar"), // 6 + 0
+ new Widget(0, origin, "", 1, "bar baz", "bar baz bar bar"), // 7 + 0
+ ];
+
+ GenericIndexer.indexObjects(fooWidgets.concat(barBazWidgets));
+ await promiseGenericIndexerCallback;
+
+ // Reset promise.
+ promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+ });
+});
+
+// Add one because the last snippet shouldn't have a trailing space.
+const OFFSET_SCORE_SQL_SNIPPET =
+ "(((length(osets) + 1) / " +
+ OFFSET_CHARS_PER_FULLTEXT_MATCH +
+ ") * " +
+ SCORE_FOR_FULLTEXT_MATCH +
+ ")";
+
+const SCORE_SQL_SNIPPET = "(" + OFFSET_SCORE_SQL_SNIPPET + " + notabilityCol)";
+
+const DASCORE_SQL_SNIPPET =
+ "((" + SCORE_SQL_SNIPPET + " * " + SCORE_TIMESTAMP_FACTOR + ") + dateCol)";
+
+const WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL =
+ "SELECT ext_widget.*, offsets(ext_widgetText) AS osets " +
+ "FROM ext_widget, ext_widgetText WHERE ext_widgetText MATCH ?" +
+ " AND ext_widget.id == ext_widgetText.docid";
+
+/**
+ * Used by queryExpect to verify
+ */
+function verify_widget_order_and_stashing(
+ aZeroBasedIndex,
+ aWidget,
+ aCollection
+) {
+ Assert.equal(aZeroBasedIndex, aWidget.inum);
+ if (
+ !aCollection.stashedColumns[aWidget.id] ||
+ !aCollection.stashedColumns[aWidget.id].length
+ ) {
+ do_throw("no stashed information for widget: " + aWidget);
+ }
+}
+
+/**
+ * Test the fundamentals of the search ranking idiom we use elsewhere. This
+ * is primarily a simplified
+ */
+add_task(async function test_search_ranking_idiom_offsets() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7.
+ // dascore becomes 0-based column number 8.
+ outerWrapColumns: [DASCORE_SQL_SNIPPET + " AS dascore"],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8],
+ });
+ query.fulltextAll("foo");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ fooWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+add_task(async function test_search_ranking_idiom_score() {
+ let query = Gloda.newQuery(WidgetNoun.id, {
+ explicitSQL: WIDGET_FULLTEXT_QUERY_EXPLICIT_SQL,
+ // osets becomes 0-based column number 7
+ // dascore becomes 0-based column number 8
+ outerWrapColumns: [
+ DASCORE_SQL_SNIPPET + " AS dascore",
+ SCORE_SQL_SNIPPET + " AS dabore",
+ "dateCol",
+ ],
+ // Save our extra columns for analysis and debugging.
+ stashColumns: [7, 8, 9, 10],
+ });
+ query.fulltextAll("bar OR baz");
+ query.orderBy("-dascore");
+ await queryExpect(
+ query,
+ barBazWidgets,
+ null,
+ null,
+ verify_widget_order_and_stashing
+ );
+});
+
+/**
+ * Generic indexing mechanism; does nothing special, just uses
+ * Gloda.grokNounItem. Call GenericIndexer.indexNewObjects() to queue
+ * queue your objects for initial indexing.
+ */
+var GenericIndexer = {
+ _log: console.createInstance({
+ prefix: "gloda.test",
+ maxLogLevel: "Warn",
+ maxLogLevelPref: "gloda.test.loglevel",
+ }),
+ /* public interface */
+ name: "generic_indexer",
+ enable() {
+ this.enabled = true;
+ },
+ disable() {
+ this.enabled = false;
+ },
+ get workers() {
+ return [
+ [
+ "generic",
+ {
+ worker: this._worker_index_generic,
+ },
+ ],
+ ];
+ },
+ initialSweep() {},
+ /* mock interface */
+ enabled: false,
+ initialSweepCalled: false,
+ indexObjects(aObjects) {
+ indexingInProgress = true;
+ this._log.debug(
+ "enqueuing " +
+ aObjects.length +
+ " generic objects with id: " +
+ aObjects[0].NOUN_ID
+ );
+ GlodaIndexer.indexJob(new IndexingJob("generic", null, aObjects.concat()));
+ },
+ /* implementation */
+ *_worker_index_generic(aJob, aCallbackHandle) {
+ this._log.debug(
+ "Beginning indexing " + aJob.items.length + " generic items"
+ );
+ for (let item of aJob.items) {
+ this._log.debug("Indexing: " + item);
+ yield aCallbackHandle.pushAndGo(
+ Gloda.grokNounItem(
+ item,
+ {},
+ item.id === undefined,
+ item.id === undefined,
+ aCallbackHandle,
+ item.NOUN_DEF.cache
+ )
+ );
+ item._stash();
+ }
+
+ yield GlodaConstants.kWorkDone;
+ this._log.debug("Done indexing");
+ },
+};
+
+var indexingInProgress = false;
+var promiseGenericIndexerCallbackResolve;
+var promiseGenericIndexerCallback = new Promise(resolve => {
+ promiseGenericIndexerCallbackResolve = resolve;
+});
+function genericIndexerCallback(aStatus) {
+ // If indexingInProgress is false, we've received the synthetic
+ // notification, so ignore it.
+ if (indexingInProgress && aStatus == GlodaConstants.kIndexerIdle) {
+ indexingInProgress = false;
+ promiseGenericIndexerCallbackResolve();
+ }
+}
+
+/**
+ * Simple test object.
+ *
+ * Has some tricks for gloda indexing to deal with gloda's general belief that
+ * things are immutable. When we get indexed we stash all of our attributes
+ * at that time in _indexStash. Then when we get cloned we propagate our
+ * current attributes over to the cloned object and restore _indexStash. This
+ * sets things up the way gloda expects them as long as we never de-persist
+ * from the db.
+ */
+function Widget(inum, date, str, notability, text1, text2) {
+ this._id = undefined;
+ this._inum = inum;
+ this._date = date;
+ this._str = str;
+ this._notability = notability;
+ this._text1 = text1;
+ this._text2 = text2;
+
+ this._indexStash = null;
+ this._restoreStash = null;
+}
+Widget.prototype = {
+ _clone() {
+ let clonus = new Widget(
+ this._inum,
+ this._date,
+ this._str,
+ this._notability,
+ this._text1,
+ this._text2
+ );
+ clonus._id = this._id;
+ clonus._iAmAClone = true;
+
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key.startsWith("_")) {
+ continue;
+ }
+ clonus[key] = value;
+ if (key in this._indexStash) {
+ this[key] = this._indexStash[key];
+ }
+ }
+
+ return clonus;
+ },
+ _stash() {
+ this._indexStash = {};
+ for (let key of Object.keys(this)) {
+ let value = this[key];
+ if (key[0].startsWith("_")) {
+ continue;
+ }
+ this._indexStash[key] = value;
+ }
+ },
+
+ get id() {
+ return this._id;
+ },
+ set id(aVal) {
+ this._id = aVal;
+ },
+
+ // Gloda's attribute idiom demands that row attributes be prefixed with a '_'
+ // (Because Gloda.grokNounItem detects attributes by just walking.). This
+ // could be resolved by having the special attributes moot these dudes, but
+ // that's not how things are right now.
+ get inum() {
+ return this._inum;
+ },
+ set inum(aVal) {
+ this._inum = aVal;
+ },
+ get date() {
+ return this._date;
+ },
+ set date(aVal) {
+ this._date = aVal;
+ },
+
+ get datePRTime() {
+ return this._date.valueOf() * 1000;
+ },
+ // We need a special setter to convert back from PRTime to an actual
+ // date object.
+ set datePRTime(aVal) {
+ this._date = new Date(aVal / 1000);
+ },
+
+ get str() {
+ return this._str;
+ },
+ set str(aVal) {
+ this._str = aVal;
+ },
+ get notability() {
+ return this._notability;
+ },
+ set notability(aVal) {
+ this._notability = aVal;
+ },
+ get text1() {
+ return this._text1;
+ },
+ set text1(aVal) {
+ this._text1 = aVal;
+ },
+ get text2() {
+ return this._text2;
+ },
+ set text2(aVal) {
+ this._text2 = aVal;
+ },
+
+ toString() {
+ return "" + this.id;
+ },
+};
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
new file mode 100644
index 0000000000..93b4a9ec34
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_offline.js
@@ -0,0 +1,37 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were offline before they were
+ * indexed.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: true },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
new file mode 100644
index 0000000000..368252a5e6
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online.js
@@ -0,0 +1,38 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that aren't offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+expectFulltextResults = false;
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
new file mode 100644
index 0000000000..0788c15ff7
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_imap_online_to_offline.js
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for IMAP messages that were indexed, then made available
+ * offline.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+// We want to go offline once the messages have already been indexed online.
+goOffline = true;
+
+add_setup(function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection(
+ { mode: "imap", offline: false },
+ msgGen
+ );
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
new file mode 100644
index 0000000000..c88fe1aa4e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_query_messages_local.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Test query support for local messages.
+ */
+
+var { glodaTestHelperInitialize } = ChromeUtils.import(
+ "resource://testing-common/gloda/GlodaTestHelper.jsm"
+);
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+/* import-globals-from base_query_messages.js */
+load("base_query_messages.js");
+
+add_setup(async function () {
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+base_query_messages_tests.forEach(test => {
+ add_task(test);
+});
diff --git a/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
new file mode 100644
index 0000000000..efe489974e
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_smime_mimemsg_representation.js
@@ -0,0 +1,894 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Test that S/MIME messages are properly displayed and that the MimeMessage
+ * representation is correct.
+ */
+
+var { FileUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/FileUtils.sys.mjs"
+);
+var { MessageGenerator, SyntheticMessageSet } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+var { MsgHdrToMimeMessage } = ChromeUtils.import(
+ "resource:///modules/gloda/MimeMessage.jsm"
+);
+
+var msgGen;
+var messageInjection;
+
+function initNSS() {
+ // Copy the NSS database files over.
+ let profile = FileUtils.getDir("ProfD", []);
+ let files = ["cert9.db", "key4.db"];
+ let directory = do_get_file("../../../../data/db-tinderbox-invalid");
+ for (let f of files) {
+ let keydb = directory.clone();
+ keydb.append(f);
+ keydb.copyTo(profile, f);
+ }
+
+ // Ensure NSS is initialized.
+ Cc["@mozilla.org/psm;1"].getService(Ci.nsISupports);
+}
+
+add_setup(async function () {
+ initNSS();
+ msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+});
+
+add_task(async function test_smime_mimemsg() {
+ let msg = msgGen.makeEncryptedSMimeMessage({
+ from: ["Tinderbox", "tinderbox@foo.invalid"],
+ to: [["Tinderbox", "tinderbox@foo.invalid"]],
+ subject: "Albertine disparue (La Fugitive)",
+ body: { body: encrypted_blurb },
+ });
+ let synSet = new SyntheticMessageSet([msg]);
+ await messageInjection.addSetsToFolders(
+ [messageInjection.getInboxFolder()],
+ [synSet]
+ );
+
+ let msgHdr = synSet.getMsgHdr(0);
+
+ let promiseResolve;
+ let promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+ // Make sure by default, MimeMessages do not include encrypted parts
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can't "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 0);
+ // Make sure we can't see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 0);
+ promiseResolve();
+ },
+ true,
+ {}
+ );
+
+ await promise;
+
+ // Reset promise.
+ promise = new Promise(resolve => {
+ promiseResolve = resolve;
+ });
+
+ // Now what about we specifically ask to "see" the encrypted parts?
+ MsgHdrToMimeMessage(
+ msgHdr,
+ null,
+ function (aMsgHdr, aMimeMsg) {
+ // First make sure the MIME structure is as we expect it to be.
+ Assert.equal(aMimeMsg.parts.length, 1);
+ // Then, make sure the MimeUnknown part there has the encrypted flag
+ Assert.ok(aMimeMsg.parts[0].isEncrypted);
+ // And that we can "see through" the MimeUnknown container
+ Assert.equal(aMimeMsg.parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts.length, 1);
+ Assert.equal(aMimeMsg.parts[0].parts[0].parts[0].parts.length, 2);
+ // Make sure we can see the attachment
+ Assert.equal(aMimeMsg.allUserAttachments.length, 1);
+ Assert.equal(aMimeMsg.allUserAttachments[0].contentType, "image/jpeg");
+ promiseResolve();
+ // Extra little bit of testing
+ },
+ true,
+ {
+ examineEncryptedParts: true,
+ }
+ );
+ await promise;
+});
+
+var encrypted_blurb =
+ "MIAGCSqGSIb3DQEHA6CAMIACAQAxgf8wgfwCAQAwZTBgMQswCQYDVQQGEwJTVzETMBEGA1UE\n" +
+ "CBMKVGVzdCBTdGF0ZTERMA8GA1UEBxMIVGVzdCBMb2MxETAPBgNVBAoTCFRlc3QgT3JnMRYw\n" +
+ "FAYDVQQDEw1TTUlNRSBUZXN0IENBAgEFMA0GCSqGSIb3DQEBAQUABIGAJ6gUwBMmtiIIF4ii\n" +
+ "SzkMP5vh6kCztLuF7yy/To27ZUlNOjBZZRuiwcQHiZx0aZXVhtAZcLgQKRcDwwGGd0xGvBIW\n" +
+ "dHO/gJlVX0frePMALZx/NIUtbN1cjtwDAezcTmTshiosYmlzzpPnTkgPDNDezxbN4bdBfWRu\n" +
+ "vA7aVTWGn/YwgAYJKoZIhvcNAQcBMBQGCCqGSIb3DQMHBAgV77BzGUrfiqCABIIgAGLhaWnP\n" +
+ "VOgC/TGjXhAk+kjv2g4Oi8qJIJ9CWXGnBjqMAAkTgUBspqc6rxY23gIrnYbLxX3Ik+YM9je0\n" +
+ "XP/ECiY44C8lGTKIOYAE5S58w9HCrtHn3tWid8h9Yc4TJrlJ8DRv0AnpOIsob1oqkDGuIjSt\n" +
+ "sKkr2tR8t632ARoEqyWdoHIVdKVkCE7gIICHn03e/0e5Aye4dLWttTNcCwqClXR9W6QsNPuA\n" +
+ "ZWvxBCBzN8SmqkdJilFFbFusup2ON69oFTFpX8CzaUYoXI6LgxuX435fWsXJUfDI077NWQrB\n" +
+ "LbnqM6UAoYkLPYRL+hTtYE4Z8o8sU/3n5yaq6WtCRUWz+ukQWKfDq2MDWqTVI12CCy505npv\n" +
+ "2bvNUxZHInfmSzbdmTty2aaSWnuGzWI8jnA/LdPS+0ly8fkZV9tU5n46uAYOFzcVGfA94iIr\n" +
+ "8+ftcVSSLCu5qpjOdYi1iVg/sR2sjhq3gcS+CxOGjdR1s+UWmWdBnulQ0yks7/PTjlztGVvV\n" +
+ "PYkmJQ/1io3whu0UPGdUOINTFKyfca8OHnPtkAqsTBqqxnEaXVsaD4QI859u7ZiKfUL08vC2\n" +
+ "cmwHTN7iVGyMe9IfaKxXPDi3WWbOi5Aafc5KDeX3sgzC01LoIaWqTrm756GEj7dJ9vsKzlxO\n" +
+ "Xfz95oVq1/pDwUcPtTtDLWPtQHRmBl711qzVvUozT9p3GCmvzDHETlMQa45/m5jp4jEHlA1j\n" +
+ "GFX/Y0G8Y5Zziv9JD2sYc+78H5f7IMrHibKRlnsIuCvcxazUB0CfiUO5Q4Xe82bSS09C1IvJ\n" +
+ "/I79HN0KNGN4es+x/0eyIlYD3dcm3uqDpsv0ghMEPBKogqDLMzZUwW3bQxn8bMqB/zL+6hLm\n" +
+ "1197EESFEYrs6yzVnuap+vnfaqk+vprwe2Kasl1vIl1h3K+PZvsjdQHqX1WfZRWQ41eKHX/M\n" +
+ "cR5Kn8fhi/4ddt8IK2i+OeCbkRsRnBIhGpcP2pkVaH0EtZ45nbxbs1qlFbWC4nWAJ3UlmnSe\n" +
+ "eO5QOErFgwJX9W1hUWiAgyDqMWcdWLYPQJ4Gw9yqwrEP6baILArF1oZyc9XgSBzZn/7kTw6h\n" +
+ "TeCSKu0QCK1jQXUKbftl76ftFh6L/mEPWG8CZP02GnDQx5eEoUhEIS4tf3Ltc/8ey6k62R8C\n" +
+ "gMLsUdOusI61w18bNW0ffVc+N+C8j8uWbc8w4dL4DHnfz/oFUjuk0AlpZE8ii7GNqszBgirq\n" +
+ "wQ3WdXwpD4Q/j/hru040ONElMJr7HO6ipL1oP7nbIR7JHoJmht4G39pXJ86XfJmtzMuu0MxC\n" +
+ "UTcLt1Sz87HzrMO9eWdApGo6qvwSwapAQC48nXY/WDRHgxjji6EQLwO0wF4Rlwlo4SsW3nwm\n" +
+ "NtOBsjKsEQ6/WILvRAziAPlp7+v13QfLrrzmnWFwKE6h9KQ/wpLL9/TAoy76FHoRvZgT3x20\n" +
+ "Vo9Fe7nZbc6qEc9/DbwShxWMbsU8vlzrxm4pgOC7I4jftUgolQ+NE78sQHH4XefHDKXWxRvx\n" +
+ "H8HVU/TPsj+2cEHM2WlVOXlYdtlobx20DSiOvhWdkW45Zw+9SaVkGw/IhCVkLi0UKuQV1gou\n" +
+ "lA4FeTVs0WY7jUdZB6c3DYgu4o5gxVvpRKOmwNp7rVIjsGuAjC91FN3DGQYlsyItLlZd8Yli\n" +
+ "FqGL6B2HTehmOGwtc6pfzbUJj9X9biZlQBigS3waDC0ei7HUq5M0ztyZv71dg+ZA39F0ZlVD\n" +
+ "CszjUNp847Lvt91JVmQdH0HTPu7Qfb/l3qX6LARTCgFfLGzjdcthzxyWEU/oCurUj9E1MwxX\n" +
+ "pfr8AX9/ajgCCS9bBvV0luYe/+0xqrzbnZw3m3ljfpxx5k78HFVuYhXt4iEsgtbXhJuLr/EJ\n" +
+ "B+Cu2YaQhXrvtyfi4EkOLoOcIzu5hs8V4hPebDbhDQKDcF3EhzYZ0k2YlfXnUx2Uk1Xw/x7n\n" +
+ "bLKVIpw0xSnVWdj3XeHLwEwh+T6/uthhi99iiXNQikxwbrEU4Y5IVAjh/JfKywIgPcXnaDqR\n" +
+ "1anwP8a+QQcD3U9neOvIZVx4fA/Ide5svJEkJ6gccel7kMAGD3/R14VfasqjBc0XhoEZT4PN\n" +
+ "xuW8fZIKPkxU4KEgM2VlzB9ZgTTcfUbUMmaCWioQEwfF7J2PhmIl6pBUiBFUdPv9+TnE4riG\n" +
+ "Cm5myUQEap9SFIjWRbLidy4+ZOK1rA34zNT4CnknLWFruygn8EzpgQVlru5no+qppchbOjyH\n" +
+ "O+Yz9VGs+SjoQlMl1HjtM2GQeNizP7AsLLd/R+jQ0Al4+KmM0Z8obTtYKUjG5rlwtNzcxyjv\n" +
+ "tvEhXeWjD4xGkWN8Xhf7VQX2dM7APMIiyyMNaNDVZvWxU9DpJjt4F+mhQFk4Yk5ao+Bs23MV\n" +
+ "XI4b0GanjnGzu5bHMUngkHLISMNGcDicT5JzfVYMbiM2pDakaaZWQ/ztQW5gWzjYFpj/Yffg\n" +
+ "ThvYXUi71gTZqHZiybqu6UI4iBOXc3mXbKzN3XwBSfCODFHJj5A9Lzh4pVBrndC7APfgn+Cm\n" +
+ "6ga7DmPZI1igomTOiIcF5+i7AOW/8hnv9hlsxN3D7mrIiJuRAkCD56kGBkCEMnZ1EA5nk49+\n" +
+ "k1s+XKPKPskxz8XrD2vhPL9ToSXQl/i+b+bh7jBIi+2KJh5XoiM9CCqP3B7bjxwx9qvtq7nD\n" +
+ "/+Zn4B2qCxxGI5d92mV4d1KGanbzHSZh1PJyQHrRcMMdoHMEVl1AW+YPffkwQrnRef1AZm9D\n" +
+ "ZB8B5LJvvjyNXsVGicPYM+RZwthk9Eko0W17u8fC3I/TST8c+kNqJihNhJW3y70plmSe/na4\n" +
+ "G4XeSHdbHsOWHq8CkRW83jk+2G0BE+1Y7YQt9jLOgVlIm6qYr1ov629575zV3ebyxXtkQY0g\n" +
+ "mjoal1nGJCrCp7GAl/c5KMK66T03RXEY+sBZZ2sbv6FiB6+xHreUI7k+JCUJ/uoW6c/8ithM\n" +
+ "L0gMRpxZrhksRcaBDXa8Mp4lyrqf3QWiowznSIyKPm7i0FjGGul/SESz7cKe/8RjJbKnx4TP\n" +
+ "dZ5G/+dhOZwXoisiGSj4CdXq6KKY62C1Pfvnf9elYJMo7GT8+6REYXrCQEoTIAw9zkQGD/FJ\n" +
+ "L6PvXunheXSHY454jau9JqqQdYaroYVrIHD9AINJPKluaToyT62oOL2CcG3dB0Yw1SZfUASa\n" +
+ "P36CevQjjs9GhLeFrqXXYx9ItqbYZKMiHDarjf3KgOzRhFS97n4OaZgn7Yc/tOvtXTMlYSAy\n" +
+ "M4pw2vISXcuaSl6mQzbllYuWk2sqt+rpt+/l0Hd/TfLVzp4mMq84cKerXSL271oc/2Sary/l\n" +
+ "wRHj50Wz0gIxjyfg1FgegnDmaeDCuMwSTFjrlUaV7FSKPZqaVr4LBQbyL5fsd2VrO4mQfmdO\n" +
+ "rwd7+CojtVraeyrNcwC6inBoPOa07A1aYB+bGKhwn/7n6YJEdX8AtTtir1u4r9rIPeUyv+nA\n" +
+ "QpPkPie5R481ZEgApFhyvFy6+etmHBPEpr5PguDzX1Una8sOBfBxDMVCLdn6lHA/ebDCDrLn\n" +
+ "JobzOLmW8G8cXwTmgxr1r5KbvoUaWfSZtJYL6M3b4Ix73GfAhbH30eAbgRya+IHrTx2Nhy0q\n" +
+ "pU1mgbM1aV4OhZ3wZXga8tpWnohVcTIXUfQhBYwJXCxVj6lR6mVd+4WKZT5Tz1twrYxI1ZGD\n" +
+ "HRIatLWeshiULj2KNVtTkc0w4HqIw6gVEwYSojzduuhrtXZMsBVImyV9151ZFL/oDgMQEOEm\n" +
+ "qIill8STDIz2bFF+FzkLLW+l5TeJ9rS4mrO1ffKdVWWL/PFlBvP39PHTkSv7+MYKhobbzccA\n" +
+ "ydjzdauQVn28lXVIMpF9UWmMeyWZlogGNECxb7NAPwvzONGvak//dBKEsqnGquNfNHfFJoMZ\n" +
+ "S5Ts8Br8rc0LW0zyLpLls3p+AnyJQQArteqraSodGk6X18BIbJc2avhbzGJnegacFhTr+e6a\n" +
+ "7niVgn1/P9PNo/SfMYZLWTIUKLkHq9GDhuniHqGM7tcdujI+Orit/uLVYaHDEMVKUDvJuJGj\n" +
+ "z+EybiUvIvpWjY7nWRjmtwTzR8JFUnltTGoLbcnA0Fmtu3rQCOuECYbUvH2bbtJBjatmA38+\n" +
+ "AotExnchuqDI13HVm9OY2CjyD4cJonvmjpz60xwFnr3HGp8pZNNFmvY2udGKUYhNF1X8mb9c\n" +
+ "vgs8SiT3Lf1HNXfayy+F+kLkXqBNZLnGfRHWKOAWSEj8dXiJ0ScLmAvoJTbC18s3yYoK3o2X\n" +
+ "z1sY+RERhyJ3UmFHuQ5q75w2mKz4l0kzHA6bfwHvLbTps7sNkkhT403KU8RbxNmsQDgFMCfw\n" +
+ "BaJnTNyQFJTVgljTEnFsaUAhEOgyoCAFvwe7eKTGO2NqqX9hrWcEoXSa6FgnLQvT49SZHrYC\n" +
+ "poVRVZdJ6sqnjSy7OxT+WbuQufc44TEYeGuHjH444yS7ZCMVyjNaQDRvWPYuXmFp8Anw5lO+\n" +
+ "xLb+LMEgeFKcVMjtnYLZTTgY6UtqMr18BzwHKft6+ATzyUc1zsHv9Ap7mmdRakLFa+8QbXvc\n" +
+ "+AfVbOsmcY8Bmin0nKIL9nfOUPahEMQBN1NN3dOWM/5qa3REk1Cx3rIaB/jsU9f9zUztg9MV\n" +
+ "kvplfOVYoxUsBoAhCjjzPmCgVbp6Gnr/Ebd2vFvDsokp0yHw7Cgb0mBznsntRnkb2cEB0tvw\n" +
+ "fBhK7YeETx8W3A+PKSkn1AwvoG6WeiM6CFh2yp/VYjabwfrTrfVbXpk4epzCLU8WTyPvuxv3\n" +
+ "DDH4s/Zl0CMIqL2oydtEN11ARdwXi3AImYsix/cWLzV5r1UN6NN0B0y9zmT5BrCElrJKJxYb\n" +
+ "NmafkxyrCFGnjWFIRzw4s/GGm/dBx0DGBizioedTttqjnF0rfF2pM/MVf/udCdd6uQyYlGZz\n" +
+ "AxW6ZKX0TPj7bvPyRgzqXBXTfd23kYVH/lvHEsKxnMb2F9A9LYun63jPFSiHXCahU4WcuzZK\n" +
+ "aH6h+cnY3xJn8+P2e4m4pTDMHdsgBQs4upMTxrxhH01MnUgbKz6IA2KV9y8H24PzzqJawh02\n" +
+ "xhdMHVuV396LvvjICg4OWzvFdEFdWDEZ4ph4nYTHN62TsQUwa8t3MBbKeW4mlIQXqGNAhfN6\n" +
+ "UR8nqf4H56oAMTvsvNS8EoCgcu/L9C5TrDnldYf3Zhyx51A0ufvpSNR6onWOKzVF/qwtyn/C\n" +
+ "y5l9X4c/0uCbff2nkYUqVAkfgD/hdEXiO0kdku6ptnWbNUPU76pQDQ5vD6sfe/8ZsRF68Eay\n" +
+ "XhvbZYmXCVn7azZeEps3EiOKCL4cazE508fLyjC/fNc1WMdyIve1lhXGI8uJ7/lB6tJ6CucL\n" +
+ "WT4OX6kHZh4I7mXy2+lezAELmrP3eU7YduHemlXqqlOrnw8pwGEVCsxGmCv6DdJNehk3wCJv\n" +
+ "GcdygTynL5d5fGe1mP2zxZjW9kscNX1nwf1+sz6chZ3jXpiBTRXICh66vk3UbyS3eZk8NKYL\n" +
+ "dY+/cN1O4jtipgHGq8EPUefBVRH+DmjTqFA05qHAaV/fZ53xLWm8YVTI/DS9fbbPZprOBeib\n" +
+ "GoMdA+a0Sqh6RdIWlaFXYYJUspp+rI1FlOBZvgy8Z5K5oGajE6RM06EeB7DPtI1/K+jRXa5O\n" +
+ "YXacRu/lgDlZvevVsSj27Oy6A+rbfo5oafhMMCLArtGlY4ENMk+u/ztvoxPlos9vCUV6NSFj\n" +
+ "znenH7iv5TUvv5gm4n1NCSZ9Db+zW5DQS8Gm5iGUsRj6VX5hZ1pMl2df43B6I5BwCKnq2eYn\n" +
+ "mpDzvUXUku9C/RkTxf/xfaIG30+whnY9Id4MWzWNNIJicvEdJkDgE5iRfwsVntbQYGwctmxs\n" +
+ "209aIk/KjeGWPOyg6TFYF5ZJMe/0XVSr2Bci3cj7GWeFc2FrFB/5nfExErrT4+e+9GMCyXcz\n" +
+ "bIbj45WCoA3Lgo2vh7bZV7xy6iXv358kl7bahH2/IvjUPGn3EKQY8ApoTNrRXvKAt7P4Q7zM\n" +
+ "HrRSQ+iDYZ3BCmoWfXMzRmRJbAzvC1akeduykIwQkL8QP7z7n33ntPlP2n1rDLI+LoDSOC3o\n" +
+ "bJzafHOOAH2J/MWOI61Tj7+FWyGIPihUf4rZqFXnoZkBpy/fRb/+qmSmIZ3YPiDdwICnCerU\n" +
+ "0BLeaWRD4aie51FyZ5fR+tXmTu7JDC+GRKp4EARokJgL4CTnuSGY9TaYKsoKrwST/9kKQrlM\n" +
+ "ISOGV8yTnLTzhs01EijkNEJZkJwg7QYxsJ8x9zLDL44fCL+KALLpkHEmUQdkLwy5DQV97qL+\n" +
+ "/6bSyxgLBiEHRJQns3HHGlUvNt2naUPCukRO7ieIlrPPSaL199yPcgjmFIBiXptTm9fZJRzE\n" +
+ "rkwkIeGlXzxhSpLHApOnDZaeNlVE+5NyNHOxbbBxfrc5Xmg68ZESXwxKeZAF4GM11OBLzj/f\n" +
+ "r6iGBayidg/uYZ5D0CCSyTDT1Y5RKFFe1DieQey1bj9oIuE+jo9coYLc7XUK8cnlOqLRl9Kt\n" +
+ "kcB4t5JAqob/ZttXhHnZ8J3QUpprXYYQ9c4NrYf4KEy1+femS8uGnuBZgUM1Tun5EjSeKxMB\n" +
+ "cY8gGkXcsuLzRpAtwifgHM2R6dgOq7g2nwB4wQYiILSqAsSH0QKNb+tS3NKyfNsg1tJK1PSI\n" +
+ "vOjRQCkzaII1IureIWrUikWCbQWqTDW/PazEr3HG9+BMs1JMUbEviA6ljNZz478Xbc+mA9yI\n" +
+ "RsqILUos/MCjKEhYn/qq+BsKtKmSC0nsZ3KXQcLbq7O/RZU85Dr+N+wyhieT8vu+4hb0mqrn\n" +
+ "FZwyMQt2WpnqaNk5tw92/Gw/Ad5q6ACt3PZiG4GrG3NNaKxadwkN9POzyN4zn+7gq3cyF/uN\n" +
+ "imAv6aVHaiD002PMWHIMKUOFwmS9AV3iskmW+swH9UyLPnWDejvUs8jW6mmeD3TOR8sRQv8q\n" +
+ "KwcvrscKtEXmBvFDYh3UcIcu/j5wb7WLwhNi3XOpGHEgg2MjDf5ti0kkrR68VEc+XBvnAYV7\n" +
+ "5EIrxI1qfkNcgXKRdOg6msLv6a9QSgJunwjACXM7Zv96MHMEETgkNr7DO+woHjWcPl4AYV4k\n" +
+ "HgPGUISEGUQr6/c1penqLiExW+iVj8Y5uLj3c/PNQLMhnttckHWVCz6wlqxmvoUQHgEl3Qd5\n" +
+ "pODBWHyC2FZku+Xuyu2o+GHxj10hYfsEl/qoDqqvW4TGlTz16MQrSV3SMs/i6SHmq5eiuhMf\n" +
+ "Hj6nkt3hljgHA1YawbFL58hj4x2DAyeYFfLY1YEBMH3K6JLxUdD0c02lecUDOqUxBrp+/qp2\n" +
+ "4KIqFLZ3+z7Wzx8WI0DzKYyZK79+VV7+Imv+DpOTaLFLu7nymvPeOgbzTsrJbJQo560EXpLl\n" +
+ "wID5Z36x9P/A54q0i/mhTzK/RtYYhqgaV4+GmP7XxA58zulNAJIVcsmgXKiD1GpmOR8c8EDm\n" +
+ "kMGEcrACXBOkpEJHp07J5vD8gfWublIG3MzeoTjeBhUJM7G9H5r6tNHdB4Ak+TMVfjcN0vbZ\n" +
+ "UtVCiQJqR8USTwNCumY3EtcMiXGVM3CRTTLai+IZVmLqED7SL3kpOdFcthMk5K0L0j1Ootan\n" +
+ "wFE2QhcmMVP6x8kH9cJVhbhLHWYbO/vg1AcLE7YOPRD3DVId+3dTZo0JVDC6RQKpOuUBolbH\n" +
+ "P8GpxBg4IcKqyMAA/1+FzaLicvXPzk7rKFkXjL5cgervdWF8Xx6gaihVXRfR7AiWOy38I0GH\n" +
+ "RJI8WC8NruvGHN71Oi0VKiyGD8o4tlGZyQoeRU02Z7cM1X493wCEVUuBEXYI5ax7wIcl25AD\n" +
+ "+WAv2iBZ3gHNNyCSJZM/Tqk2/2B35pfotVMgs67fnUy9tpm3n9nOdm/FgReSu3CBM3JZmYtf\n" +
+ "tOfqq3Xpu/3WnhWjkqDVmgaQ42PWtxYU32ah3M+EHHhkYSIG/csaSkVlyGYul3BsfeZ4jCvK\n" +
+ "MvVFFD2Kzkyt8zKKQlA7Zzyf900aFNhU5SkX70s94Bk3WXHXD5DRQRYHWmruCFVkFJXyaiZj\n" +
+ "qWBVKP3Gv6OXSc9IRimu6p0l0TaDxxjNoPskg6dXHTV5uTcgOKfRohgudjQC20VmamOp8IGd\n" +
+ "1muj9L82CT7elonqA0E6HFZfJqJIfxq/wSFVG7wiB9Gwjoj1xgB7bSzbglpOV/ReBPcv1ivl\n" +
+ "KsJmK9nlmfS4Y9MPWuctSROg9QVEOWq/XowOm6+Y4bpKpDhmmpsUpMsDtOJnrvSWJwcwWRRB\n" +
+ "+2Z3H6kIEUXDq1cjLsrBIWRTwb//h0Sbb2Kb1cUHnQQAjlhkSlOpaEMTzQb7GMojunx8Yeb9\n" +
+ "ff/1l4/1tqVSxX61AJuJyywGyk9AIsDIm1WW6P+P5AVRsy5xu61qrL60GHlMxtfm7ZSLAeR7\n" +
+ "GvBOgDitOE+llhzZSjwdaESxSAvnhFfM5TOCSj5YNBfLaI8bVxn4Br342GV7nufFqOLkp4rr\n" +
+ "3pcNbQvsb+k7kkdyNMNtOQfG/Ojf8YTGoanvDYrtB/0Euu0TXR86ljXPIJOT/4nhue4149SO\n" +
+ "9lboxBH6iaP8AGxn+2/pzCbcOXjDzcD/i1DoQXVcwfniiMf6S+CHb38Os3KTO49YsMYjrDPP\n" +
+ "9L2IurXfUHONlljI1T6GFV1RfRCBfO5XklduPaR4+4B0JLhU6+UKl9vdTphhwrYTuJ8I3wkD\n" +
+ "6DO4hvktTjl/IPLyYPU1w48W3cZ++P/wJNtIYl5I/ZSNfAzefc8SQh7kcnVnDoocElfWHfg6\n" +
+ "oZL0MSe088uFDAxaJTLxDaDIbzjBkwaiRYSBQ+SQVBmUlP1EjLbrwdayi2IidFj2Mr6nv4KZ\n" +
+ "4HUlmmVMSvg4K2Iv5NGgAmAjYngYSveCdDkYXQgOXldxnzVTzRRP+nEAtFepLx6TZjSjawqL\n" +
+ "nZ+N0/BCJ5UkldplLALg+5kdHCLwcdkz+H4YsB2sLE8zULM9JJW88DGBKXKue4J8GkhJlY9i\n" +
+ "1y1pdTW6mvC0J0oMAe2ULkrakIdyGgNghwjnDMaf85niB1A4+qjN0K3uGGjRyWddJH/Pnv+Z\n" +
+ "7A9dmkRNnYMFEkyFYTkbfmE2fHr4MY+YwlwjE7f69LmKEcai/is9L/Lqv5Onb8W6N06l54s1\n" +
+ "iYKzFFqo/gc0UJsiBhPmSKMNvoeoUpi0yUgXDPtw5+9HD/hqFSXqWGh2uR1vOUi85k0f1eOe\n" +
+ "zzkIBzcL3on0y03D74cB1QtjBAS2lwTXzjyEbitB4AxHyp5L13tPJs4l2uo8JXpL8u0HmJVR\n" +
+ "w6AOL/rV6elTYkuxnq5aOq8WQcm+1cYY4fPdT7ZRwVy0ZfHpN6VsqmMNIoAUyRgy86sYU2E+\n" +
+ "UMTeKZzD1+T2LbbV38AQh2kaLlSNuNkoFIjFZZvth/vubqIjHlmsw2MeZqXZIs3dBeA/1GL8\n" +
+ "s0k5ix2Obdy1t+w1e0d+y/ei1IzsxHRdBvrn1YDqdFw4xdUreJ3FSTrsTePlSWVjJXKGm13h\n" +
+ "hFjuCqELnR05+au1dFSbiAlbMPM6W/cebi+/0GmvIvfRaqrbvRJoUWxfgaFcanrlin7a11Pb\n" +
+ "6pFV47mIKHxWQiYq0z3kq+QQ1YqXvxMdM7eIg0PEOygB4Wp2FwIG0ZcEFfdq5CPveormJ/EZ\n" +
+ "NOFrIHZXkFl8fT4x8LFLWNmlQwoVqeQGOs51CYQF7YPXjFx64mV0RXz/umA5/Un6fHjKS5Yq\n" +
+ "7ZIhx+JPX4+s3RrxbUjbq4hCCa2MSBBQONhdmXtKKIf+TNvnimm9je5bt3Nu79A2OYbAzvb3\n" +
+ "cOEcQqieXzqj358oIxwd7BL2xLEMbe2Z+1bDXK+YwyJpXNF0Ech6Vbh4PSLHpW1jCoIn5HCP\n" +
+ "4K28TdrXOwwKkac1WjaQCw0RztZEatpJW1PyhQ0n8xcegTqT+6nyifeTbEKuUYXhCaJa0spg\n" +
+ "Xx1yv6G+ieBg5owSZ3DQSQ4GmaZ4GBgFePkqroihA4C1bs2FbrRWRFVRWAAEZYdcgHOyBWNG\n" +
+ "KLGntWv2VWf7yid8+oSQLExsYHBGdYMTJCbU53fuAnJYE4DJJ15Vztj0TO74KqKrkTtxfog7\n" +
+ "5CdFia/OvcCruLblCFLcrRyhsW3YKUxHmgpAPoSN4/46Bz+ob+CCkd6RJzwjnhfIgbXqKRLE\n" +
+ "8KfsCqksHp1p3hEgvm3iDuqHfBP/7O/T5V753HBhuAzFZlaOzQsjBfzK+BMXP3zp+DEpGwUL\n" +
+ "Pd/DG0fa6odMTqPs/TUblpHeANF88+XRkgB/hucv+K7h13bfRRYPMM4zephlWBzBDIaoazv9\n" +
+ "SvRyy21B4vRTXrwbTkSZXTtEFCb2027l+ycCayD9XXCLQUhjSrsI8SB+9qC/i827HcLF7X20\n" +
+ "L/8Na6qnRTinmwkBUDk+o6APUlR6sDpX+uf1bOyiV6oF0wy59+kXi9oCjupzPBOatSM8ka47\n" +
+ "6tcHJ6na0wJ+Z7EjcaOqy26OYcPT2m3wvquK00JLHCaTDisK3cQ9178FxZmpD8i09AsLVWuz\n" +
+ "r/dmucYAxjKMQzV6+q94S42EThtTbw3LJURF/8QNLk8AZKwVuaw7zz5+8F/bc2qtrUr762t1\n" +
+ "KN+9Ul8Kc2N5IxAS+klFXPfA1isfvbm88737wa3Tk1N54QIvDXVLBJg4OzvjkQAPai9lPqUK\n" +
+ "Tj3LrtYGPDTaRyRXpsH0ehIZ66TRobSaBBrL4VeopHzoWOutlTLlSSjZ1Grn6SFGdH/i998Q\n" +
+ "64ucbkyejUbFT6SgOzDN3rnl9ppqnDPOCk60WAeosAJdf4tndoYGGQQnQpsBh8uLCkyyu4z2\n" +
+ "di/om5c1yNSJsv6j2jQQiPsMX+ef+27mdAj9pUXQSRnl3oZRvQMQ7VmKsa8NBByU05MwSvOn\n" +
+ "vuEKgPq5CL+2Spnjcll+wWQsDF6OZMb2cM7PmLTGTI9LKnPnDPEhz4borQfch3jHR/EVtsmg\n" +
+ "BX6xmoD7gQdXPWBFTvwT7ljRJ4v5O0v/4p56rTneZZwBBIIgAOfncYVNGur0g1ZaFAujgzEG\n" +
+ "/PLpgIqn2rjHU+zmUuf28MvHdWxVNgSar7qMRp67M6UM6RExfuv1vzWw+ogYWeiQOYMYcBqP\n" +
+ "4p1Dm0ZxwWaqgllea7MCmniOrEGNizUMlvYIJoYcKJFVHz4Jbxy9pzGVL58Kbmwa1ZDwSXqC\n" +
+ "YHcVLer9yxoZpuDnIhRXHUnDx6Iw6QDiKpMQqJcFKf0YJTUrhN2M17kUaOD7TY0zHhDznFHY\n" +
+ "Oe0hlEu1y/FEwNxueg8tpjGVivXTX5E/81RMpUHKenlM8WbA7GQepFiIrcZTsnZ9jBCXLPGu\n" +
+ "CI00YwbNnzV/EsYsHAcvwIQBlBDVjSjkxoBaBmDsVpLawCh/SGEAl99Fe1/08OKHceGPDxko\n" +
+ "wZ3Sge2vC2ydyu4+LVnypr29R3sv53cnApKlt0uplnF4rbpBSbTCgH6IR0Aq/aYQUW032HtX\n" +
+ "wuPhxgIp7Yf5mi5rd3MwyLhsTQ7dFhXZT1kecAXAMo2x2BAo98yJfmvXM90hIwlXHwp11ped\n" +
+ "MTzc47I5XC+dR3YTHbxUKC7RCo2OjiLsT6UocM0vqyxkkJrUWHuC9vGHNEA3wmJuj/Tncr+r\n" +
+ "/bLYzx9TWcN0st02kCC4wUjQJuNlCZLjmnCrr6Y8Yrm592pv3ztcVD+cbgjwptpxN4OXTreI\n" +
+ "7Py1P0BRRC7N3I+W8OVsszHpjGsEqxFDdyRL7VtUWMR85c1cJKvmYWeSSVX0YlNsbMtVledB\n" +
+ "ViJg/2Qa6vU5lB3WXIyXOuJVEo9B6ua60Fg+HlKDHEl/5bOqOzW5pgTz2BclmAb+NvhEdl6a\n" +
+ "SzNSHFrCqmmG6Nb9DCT9wcvvs74PN2QFHm1vxPymLoEQYZ1o0oI9puayLFpMykIK4N8Kinp5\n" +
+ "iUWxh3t+V3L/yz6jHXiL2pR3UYBrfzRb+bOumTD5ENLil/3P8BngPSCvYAfRMOrBj6EAIoZi\n" +
+ "HTaCqKN2K7LefPum/AQXfE5oHHJXWkS5Zx+DiKVmwJcQuzqO5j+sJxuUlZXQnSR28g++33WK\n" +
+ "zZsrMU1MolLmEFArfC2Z1o2dxtk2FIQVq/mNhq79sfU+xmCEaGyUV84NCFpXMTe2z0m8gQA9\n" +
+ "/v+Arqi7hCbtq2AyUFNwlUlBjdAxtoPNUj5E9iPfpQVZLUTGM8H4C5kJkOXYtb+XKeoKRLx5\n" +
+ "VCESit4KBnFfx4Egptm58q2CDUOb441YhMQKUR2TCCgLPJFZBKexz0jJpWHoCBBNj5lbAeQk\n" +
+ "3Hrpj2ErGttnVxL/pFEOY0u22FWHeXdELaBs0bvbQ/8WHGUg9THFzhZtvo+utuFGpmU+gK+a\n" +
+ "XCvYMtSxSBFoSSwA4v/YTc12QBO/Dm5xINzupyx9cfkbUgrRRbP/ORXB+KIkL3uQEa6UwRzo\n" +
+ "NdZlGOySsXHLmMkICx1TxWHiTjVbrk0tAvSjIiCgdW3kFVAqGNovgl259anhCkXxbnLUMMsc\n" +
+ "sAVW0cdy3DPLjbab5tCSjbpLE8g7KxGTX6jgwjZVEDEkvhk2JwqaxQdhp6JsZIOMSxOmhhSa\n" +
+ "+zZ2V3amEkQs6Ks+3MOPRF233G33dfkmkaq8oPNOXzROimZod7RaYTJYlfl3kBHx2Gd33ID3\n" +
+ "OR2Z5ZywURCEUZ1tmidgJaChiT42hfkTNI+Y11S0DKHoQZfDQQ4gOpoGo8qn8GntVyVx25nA\n" +
+ "VpxqsbddA6diporOmNx76M7+tuSKN8KpqHpv1K1+Bv180rqa/oZ+PXxO1nu3Iv+drzvMuSXs\n" +
+ "ityJ/DRhzg3Hdz8ZJOUuKb06AfhMDcFGOpCAz5KVN6wr9/bD53Ig0KU/gUKDd2vBsPemKSL5\n" +
+ "FKKKuHf5LYVMELEfgEwhcnen5tT+wvh+UOVit6YLHSQ3uoNW9REzBwEsBcSM2xHRlg+oPw7V\n" +
+ "K6CoW1SZdRt3P6ixVDbU5IAz9oH3owqC27FK1poBSXTEg6+AodSdKD2TOqyAaP3a5+/QoGya\n" +
+ "uQntOxj2mU9rtGP2p7wQuL48ya6waALfx+8N/P18hlILF8x7K+JPBZ+0BWhMNEF9BgPOau//\n" +
+ "THHwFMvjc0yVlRtChlhzEjhAhvcK9WpM7c0R6N5vBm7M9477PbGkNZzMFqduJxTw+hxja2oZ\n" +
+ "gjcm9JXGFbYb1ATE/8WDh5dy4H49azbAb70mf9XxzvllCUCdor8TXkjqTp8qyof7P81BUknL\n" +
+ "g8vYzpY3D8eoKFwyS/f0QQic0t0/wbRVZ/tiW9qzzKaAppKINddPfVXlGUKbSKsXy5rjcg+f\n" +
+ "rD4WKauGPgTs+kOpCOAOxAd46wEP0CoLnjALeVsP6q+yNic2Mxa2FUN2fQ7Am8IWV73cnkP0\n" +
+ "RK/tcmGOmFkg73KJSl/FC3yNxG8HLQmcY/IeW+Z0PVLTj5tzWer2cey9/JTHzzOLvqEjDpZH\n" +
+ "bbsS7lOi+oxEEHHRlOM7PECSsMc9C/AQohyDyHNYPEqo1XjRmTUSU6ozbgcLDucrpAIjvVYm\n" +
+ "8Cz1icS3xZCO97XtqSGd6LsMYWlCHvQ6RJAcuBxL8sasJHkz5QZ4TG1xArSRDdz+bO/4Df+Q\n" +
+ "R5HTXGqY6cFs9CLG6O/vpzGKCaeaIjKVIZTTl93Ql988Y3Rk/NQFpWRoIWtrMC0Lpu04Vmop\n" +
+ "qYLPJCFEdCctbhiD/SXjUR6unYXHPAPGWwpRmUF8gQChRng+R5bzpmMXGAUOP8W7lvthh5+g\n" +
+ "66o+0kvtxImNox3up83hSnsU9xv5n37j9T3pttub3ozQIJTudiHS6uNLbKwDCbCvrdvY9vMu\n" +
+ "8D2LSmNC1b7QHkU7R7Bq6R8DWdvm+T+LKqgqodpoInMsN/p70ShybyVQAOg7RNUzw7k8RJKV\n" +
+ "TdxHFAxEVpS/PiBs3JFwL8QpOMVhmgK3O6Ictn/TW249fQ5qEEA7LLMY6H/TZmYWg/EWfTzL\n" +
+ "wd4bGfdMoY+IRjMsxfX4Z1vLVAo83VbtgvbKFpLb1EO7Kc7zuCS0w1BeQ5++eAnZCy3GaTUk\n" +
+ "vFAkjZkU64NaObuZ1/4hyMMGnzNZYnNraZ0+wNOFdLquhi6F5wjsbep9kf/VZfNJscWNIhsd\n" +
+ "+okxW4QlBC9smcIQJfpYx+ycVttGXQ7acP6U5NmVKf/TCu30Ltev6/SXtLlWVzFMFO6ZgKrG\n" +
+ "4xlUqiSn2L0P8AmjvWEPAyL7f3E8iarGS8mKnAq+h/LeyQPD5M8sDhrBDsBweQJghnRavj5/\n" +
+ "kg9MalKxnbYxB79uzRi3Cqz1nNJxP/sAyUi4/c7+PU4T0xQkoU3BioXURhCXZMcOOBSwSEGy\n" +
+ "LCpJbPMRSnX6gveGth2ba7os14cRSG44LPe9BDjrJwSvVV4Pv12OeNPqwH/tvyaVi5V2UvGn\n" +
+ "J9t8EK+rYLlZJs65g7oxaTIcBpkRIzElLMGNmXsEHkGc5PQeJC48C+yho5cKq84lDq0XHlMv\n" +
+ "atYV/u5N/w7Ta+nOQGn41GTOyZmAqddNwpabhszmzx32klOHwNWdM/xoqXze0SHBEMSYaXfW\n" +
+ "cOecJNbWpmIoFs+gxt6AKnOYWC/UdaBN+NPUmyQh56LNBPXHInMGc+TJpJR2BhLryKYbMRiG\n" +
+ "3KcysiWiSOujHeMhohFMUm/DUfy1LgMT8T+bQGrCIvhAjpQn5uqtB2xBMtnD4Rc6KxTyY/HT\n" +
+ "VhVtQqITCY4wy3yv15lIGxe0LLGGnVtYJqo5EEe6hQg9eXOhH6dhCDKMQ8InV+H55fAB7dnq\n" +
+ "7gZhYwjUh3+cbQHnamh/qovVNY/4sTHOP0i+13ekbw/Q7zTq27bWPGyWrfa1vsMFqBZD4vVQ\n" +
+ "1/dkZvzpdWc0uJqqSw1p0vVaHddjAwaoBqqYLwIbhrhDPYqpkQuBnNLxSoYf296ut3Z6tcxX\n" +
+ "PSOt9Z5XGK0f3XdQQSOyP2ujB9KI8sNgPCC3BpXcqb0shalUXwltnRpAsLzRnxjOujR48rxA\n" +
+ "li/1wGpRxFPNsA0dG9/kGGN/FKdYW9J38fC8YVM1gpFDrvENuiGqKxdTnAQqwNTQ4YMZKgIU\n" +
+ "spsCCOA26YRsJwRYRn3Ajw9wpTR22OG9SwmZlhgsvFxVRiDRa0KlysJVpF4n5C3F7oQtroiD\n" +
+ "86oThYaQN3ylOr8qpf4ks/rl5QHoY7j72FAaqn/9hef1C3kAh6vF85ZliGXKY4tV3gBLMgZX\n" +
+ "L08CCTUsBQG+1qeRY3UKaigBTfsbYfxU/CLayCoEV95Y4j6yFV1GDG/OuYN6hSIjw9hl3p5t\n" +
+ "4iSmAuH4jkdQFWAile59e5ewt9KuJwxjyCFpn2gREx6LBImTDAQ9YW1AManPRtvriv0mnmG/\n" +
+ "x3Pm826Jteq8pd0Vi6pLLATWjzAz+GyrtmMjk1InY0sUXdMzMfWczWBedZKCLzd2WB1tCoUt\n" +
+ "g2ZnQO3nBV/+t3yTH15cNtkG74Kk/3itRBxz1kPvLjMMwQrlErfIF5zOQ/SFXaJoiC18jIFp\n" +
+ "1aDng/elbbjpz8Y6ZQdYlwZAJt14Pgmd9oCiT8nw7cNzJkzhPw1g3MSjHiqndHNeP3J16Rp2\n" +
+ "wGnvYwGTWA2sbPgtPSv61mstrs0ZW8+JbqknLn6lRxfnODqiwH8jR723GrJGHWRwwFOLN0SY\n" +
+ "eKO7T6OPsxdiWSnDb587DzdcPV8UjwU92sdtxJPJTE5AP3ER/GFlrRtJWoJNEc4FPQPEbxSI\n" +
+ "kf8ziZWlEcwztvZyeKv/iOqmGBULuXXjFVRYn+PLXJ7rXIMo/FC4rp8wOpVy1Kr82UoJdriE\n" +
+ "+KRpOMZAqyBoQhnzqT3KSI2fzfKlKLg4XFajzjKgvA25Lt4t0FiTX0oPjT5xXy3nLMPqJkSa\n" +
+ "1xk8jA/WhFzm1H7KPjttN3Cl7Q4II+NnbxXrZ3jxZ0pAQkbR1goH3QrBDkr888Gxp4RpyUqd\n" +
+ "sgplw5FdAIGLuPZD20JkSAtJI9MuYJtndWYm1xO6aIrpCsG05E2NVSr7ziyaEEuiL1Xc8TlT\n" +
+ "//v4JMO9As9x/Pcik1mD8f7a8qLibt4+yboD1/Vra4SgfWyWaniG326q5Upk8Bl1hksCKKTO\n" +
+ "7vSEp32TaP90SOuH054HQc4Ki0ffye0aBJMifV77RVz6GErggO6iyIsFjSVpCi+bwQZ6wrkk\n" +
+ "lV3znF1li5e8dGkfMv8G/F7rCpecpvYQPD4+8PPmIELFAoRXw/PKsFXf2z9Jj3KxCirGmnWa\n" +
+ "6pV7BuKiXH2ir11ZD4zrZ8Qi2SlAJ4VfY3BIgt2nkZ8FRkmT0wroc+Basp8PDcuKzgT2HBgX\n" +
+ "r9ZhanQBsf1OZxaU33jeGUd03f4Kgf22xawruBhcdwlfRybZSUQHGpiTbhflPn6n1L697/xv\n" +
+ "kr4StZ2YIb2UHppAWbDBxZOvBct4tBi7L3A5hr+/TQr2em7kYbyrDn1x8wgNxvk7mJ2s58Dk\n" +
+ "b8Sw+XG0UnmuLhrPBF6Q7juOHN2BTaSn2X8IPtOmf5Md3KCBwb8xoIz1VUMGlgyQpvu6dL6p\n" +
+ "DDFkeCWmZloPz5tlZfwDtvgzrPxykz5sl9nwu3T5nQeufx8z76FmN1ACbxbKP4cUD29WPVRX\n" +
+ "fXQOdkzT2ogLgDkVXvOMZgeiLJ8Ws2nWPXKct4EsrykjhPvkdFLv5D65hvAnWYXBldq4DUfz\n" +
+ "tYYzorGqiyQT0p27FA6z/ohsOzkrYT5DHmOcgMJCItgnifuFh2LnXPpmW+PGPtHY4Ij7hAaC\n" +
+ "XCE++XLdlHsrEpx0Fv2f3zjmdLYRRLFkYq/g5jMWw0xAhTx9MyLBNSOTELeEZ1gOMyEUBMkg\n" +
+ "64uTVRkSZCNjOMj8QuzozG0QT8zKXUPZufka7ltYMt/LrJvUx1PqeX/Hf5hd7ZTj/2xdOZlA\n" +
+ "DcaB5H8jclPjsFn2HoLeVHnaKt1ImdQMmJpktGzC20rT1ZVqg/jIhm1hEC6rhIgXI6UaXxl7\n" +
+ "9sun22kYio6itWgJFtlQvdEgiTHlYF5Agq6Yeiv9/gw2HTnd0BFL1RHrYeUHHBxvM4Nfalu0\n" +
+ "kVRhhnJBpa5kvP74Ck1DpSaSQ6ftLOmbJ0LBZQbWxPuH1bOcztDPxW5s7F5dPqfKRfzD57s+\n" +
+ "CktZTUI5jCkxGUdsLboqCaX/9ne6mr/KQqWNbkJ6Vpl/uBMa3Iuk4UdbVdLPa9QB37vxLChI\n" +
+ "E0iRpbPCa9GBdvyf1iTlvSEAJ+xkKaxKf3DFt4ro2+CcUllEG51wegf09GacjX0vtmrJVsZm\n" +
+ "rMnt96KXdL+DtJicFFovTuu4ssf6lV2cIrKLbHBrcNuHjCAuuhsF/r3p5kewh2ZZFfkqfQ73\n" +
+ "T6XlHrAB7+jVKRPCavljLiiU/mWIZ5caadS1wDlf0Yoor76bIpr5Ifn6QiV7O1zOuboZwL4k\n" +
+ "QhLgRCCD2wn4BkeGccn4quAZSFvEpL4G9vjl5efeEI71WegKBIwEqL4w8eJCitufg1I47Bz7\n" +
+ "k8/tPa2R3qZmoS5pTW4ObX84i+nbTpuVanJ6BmaLqS/Imti8pOnu/+Nk57DYAqz/+PboZNqo\n" +
+ "wQ+d9/s7/ORYYxD085yJvTZdTsldaslunLviDXPE6WUVtt3XzNxCR7cUxNcIh8kOwbxPkwhF\n" +
+ "nLdqQVDHs8KWUb/mJPUkipwWxnwlb/nSjs+6T2P6ansxq4FNFQJeXVCLF3Mnc4ZDeC3GB/KK\n" +
+ "21Z3JYUAynAWuK5y4N3Ed3GUHhJjUReBvW6T+3MsgHapQbzvHvKTmueIuxa9nHXsUaxojV7V\n" +
+ "PNxp9TRvUX5KLJ+OPZsVhut32zpe0/HdSHeUVawdIun1chs73Gb67bZA0vnhirbASCStNnyB\n" +
+ "gTaw4o53N99N/11/i3zurK5bxqnAhEfe+H8cY5qwVOf3zksctxdjBO6OyfG7EyEbFLgxt3MK\n" +
+ "rRzwleobPeYBAp0Lotu+iBngfg9EcoC3kh7XTx2Kqc8OGISjRF7Vsf42AVWxNZc6Y1Z2kfcm\n" +
+ "zJil/iTM8sNSfbhOQ4HDA5Sn+WJXFRkz1fx+7O8bpikDBZAanEUDxO7gsn/VFezgIqJZsJGN\n" +
+ "4U2Y+C3TkRT7jxvYISFJtTr7KzQEJurFvjHUBjf+KcDc4J4CAQdDilAro4auJm9ji1k7+6dn\n" +
+ "rd5iX4Uu1GIs92wWbZ+jI7CwWDCG8GFwaPXa3+rfMgzWQLK5Z5papSZ3HTU2zEFNj4w51M5n\n" +
+ "4N9hmyZolUROZ/Md7gB5lI73EcAxVmbmpSCQ+tTarj3jIfzXU8gx3xrTx/IjhqYFX1jvzf1n\n" +
+ "Q6BNzyctkUAVpilUv8FFdCVl5qVhNHcOzzXGemxUNT/m5e/1P0dAk/dt3bgw1HfGvzvhoXG0\n" +
+ "19OMLCpf64P8uQbq53Dg6dlWXIQt8Bpg61x9z53kdD02AsK8LPy6H9O9HdQIgJX29o3BLwT5\n" +
+ "wMinuRUzgKPscuLOlHS9wCXbTJKa7mAK5gt4wf5Cpks6Ps7TYY2bq5AF0cUlHNnhiJ5XnbiA\n" +
+ "wB87rVdZLJaLHJRkw2P/Fd9xuEAHfmFqHkOHIF4g9dlPOV1nAzetM/B88QTWUta7W6uH9SrP\n" +
+ "wHkvN3D+Dri1KpAyGNauMJTXCl4iyF+9+oCD2IrXYo/imlGiNHvgoiBQeSnG//F5ZV4typ4u\n" +
+ "akQZu4NvOjI7fmkr4JW2w+hAo1zhNGCsEyl7jjU9x//xtfpKT2dZfg7JY6C2LlqyMbDXJFO0\n" +
+ "ru54525F7mHpJD1MG1a58G+bBhVGA1NxB1OSzmC9fdIpkFPsE01/bv0lcM22Shd6Y3jWW+U+\n" +
+ "4KupG6U7+RWwnNfQE8EwYAt3FLhHUz5SfdctalR8W2xG1HaUB911r7dX1/v9Hj617wYsgLwD\n" +
+ "rfQRJiQuMpleYjlsRGW9gonyH0k4WYHb4WbAB74QSkV4NYiqoYh5CRPzfpG3gCosNDw3pbil\n" +
+ "ZmA6MGB7x4EtviOMbyNbHy4SgLXpRxhOrBSFokvLseV9RsNW2xlXbS07zl1IFIo2GqZFvG7I\n" +
+ "RuvREl8D+83OMskSwKltdTIubJlLrFNPKbAXnXk4IIGRykhlkv+68zfP1hVqR2B7CTElHTvs\n" +
+ "VaLMtXKDPRvRae02HpiDCbzVKBMVlyttetXQSXg6d2YY9mT6O3ZlYri5aM47j1vwEnmgurSt\n" +
+ "hwoJF0mVCmbvNWR2JXLZ8IG8LP+xdkop7bBufL3Urt34iRucih0krQMp0txmIp3N9V8Bou5l\n" +
+ "Ce8Hc0J4uvcf5y3UHa28PydhK6XAJP8j4Lfkmkz0XrcXed4Z8psdsN+A78rJUHOsemcz1xmt\n" +
+ "r+qHdvDCW3SJ6vAS1NeaaKE7KepaWGFpIyA7uAegKvVKzSMigJZqF0DVhN6kVo675hBifJsz\n" +
+ "yZ+6douRnIqITYIrT0pF96O0D1totzUJ+zLTH+sOsrVusBDDNrad8ZX/YirSiS5vMDeyPKB5\n" +
+ "DJ6e0LgGhOyVigqNM/EBngFfk4OsKCHNi56KfQ3Egn0LAT7krK72KW2ml287CRJbnSYjLyIl\n" +
+ "PH6Alfa7wje4s48AVM2D2w7sAQl7PNr9fuOFcRnDIfjsWQUMAo/m8jsqKZYeBXy8RNXbeMdh\n" +
+ "KqieZIbWJhLJ85EGwcadWXNF60IeCa/ZXov0emYNMnN7uF1ZR5nIVUyDMV9MzG2RxcpTb1lO\n" +
+ "qaauedNmP0gI7l1OSCNz/Dt1KgzP38dg5YOi71RGrxYyz7Kva5NHiFhI3mWHJEdmRpnx142m\n" +
+ "Zy3MtpIPYoMWOxpyi9oEOPps1VvXxChVO1bePOh3CPdqzONsAXz4+P38R6MMEtiYQ3qOxv/F\n" +
+ "j+bE+UNAIyG2PAfKtaXOJ8rW8qLIMUP5aPL4/gkGDSRuvSBWpo4oWTfLwtI+FLkJSursuOha\n" +
+ "+96QakdwiSJ6p+yWaB/ex3AhULVsYWaBdV71daW9GHsa4tsPReoRcfHYHvXQy6LC6fppPiGV\n" +
+ "9iwhXbbfvuaQhn2Nb7B2j1ovG8wqtfyk+j+39asVFyNTaQiB0kA/KNu/NAi+ZNTtBaskvIjp\n" +
+ "4fFYn3pBV70OIiueCJbQTMzzCCqkPzQXtfcnvBLrDwwl4f8M59elOgPHCOBKOkEsgIf3SbNS\n" +
+ "2DreFkeMpcIed6vDDXIK1PIqmremOmSnJvoa4okRyu2SdXekQWknq2rpm20mySpeJd23/QXG\n" +
+ "gsNPPW8lVUYKDOY/YcjoxFzjRemhDZiivlN+4KBLkATO3x3sU/ZD1EOXSSCk2t8J6nzSCLPk\n" +
+ "JdLhaz/V7Lqt7ML5hmlYO30oF1wUS5U9Sx0vrWO62lvzj6FYiw75er57GfnF9n6RUl1VEwOk\n" +
+ "8NgYy+/XQkXLqExe50ueeKTICtEP0YwNekgKlrgKKwEtM2VGiyzSzZ0PL63yeNixOcVuh5zy\n" +
+ "WmsY1VgPzdZ2FwAzxtvBcYPEpkL0R5U1fmAhLjwAzd4jDG11Uo+bhpwTA+mf9KTlw6hwV83V\n" +
+ "ivNDJ+SXvLG+l2Bbu1dK+CLDB011U4lDV10EfvP+Op5keWlTY8nCozy3SLcm3LYkcnSB8aKg\n" +
+ "bRgOM2ZWO8gxxmYfub5OsOeTWoA8X1OEwOUgIA99KOu1p8PPr/tJuyuQd81KLUsdFUSlqmXs\n" +
+ "vHEpF03T825RTrmyFkusRXUCSgX3dvoxQ+Cgwkac2+Amrs3tz9FhVF3dZbgGuTXEIqb9pheB\n" +
+ "rlLGpmzJc4UXdsFZh2qSpFq5Of8aLgrXDYD2Z4S0oL2qvsjXF9rTTfqnWSTpbdKqj3AavDEO\n" +
+ "yFbhBRSNZhrI5/Fi96CWkx7JDk4boeX1REmRqQr11O+emU2eJq5em156zMBKaY5qeOX6kcyl\n" +
+ "BQrpnwq12Di+tVZ6IHamNEpobZYh7Om7l96FGsPgrCt8k7AAtsDRMbslBotcv+uzIGuiRC6N\n" +
+ "rmn6fLSbkp7M6dqDCXnQYzH1rIIIAhHH8t9kjUmv+QjdbTZV7UM/3mV/U+35i+dUi7uEoq4l\n" +
+ "pIORSDnGYj/mCvqa13pe+HKB3+dvr2G1n0Ouh8zg9weANoXwvRQ1/WQcrZLTS28woIssJgWM\n" +
+ "tRDeNQzpguB7B3GgoLf6N/4/3Nj+S9cPtnrgV3u1i7Cb3tMPOzpPmUjFtahJ09pt/CTnSKvC\n" +
+ "MZt9mu6B71hRdMb7mpwswVv50HwWtBVDIQ/nMwa1UX0iLmxZ0kRRYEzvsqyrcIPrxYzIwdgu\n" +
+ "eaoQXggEqcuEwT2k+rN5l8oBYoW6y4IHaFvuo63keiQRzGyDdtjxwvu7HaUyU0tJbcTBygcs\n" +
+ "TtBOMlVcxHGRrc2R5VD7lZTCWx86ROwI1j8WtpX94HuY+siOqLFonUiEurKss/4ehfbVSfcf\n" +
+ "TyHS4h+6lobZaFfoIkN5rW5iju5rzOWQxbtKfz0Fbl5bbs1qe4tJSHOG+Wsp16gP3W1qIB5L\n" +
+ "EgLc579ve9CFE57TBIR8zGsCRhDarWLRNk786yHSB11td0esF/9bMAA3RMMBe9UVSKIP/mdi\n" +
+ "L6C3XWzOzDWihUAs9VuDOWogl2+PZ0yMp21AjeRvo0afZYLYQwNng9fksmMC3qlD9Z0ssXX1\n" +
+ "109RZsDAo4jMOv4MZV95JTJqq5Ti2TvOf7FtyWubLkHxFTjoaoc5Vi3saXpM051if4pI1JDb\n" +
+ "WFmkxnOcxosVzcGvF36FGWuV3tx3BIPTY9p5Y3h43f8RLE2J0AmZVT9EBbWDEFYcHogjNGfT\n" +
+ "S7psFrm7FOBXwTmNQ70aUL/7sheXyftywrKl89In4E+Qfp7ARoq6hbrTUM4XP2q3Onyl6UVb\n" +
+ "qEZwvX8fzK3XACUcfyHXfunvn8NRGJk0EUgxf+GIXd4h76j8s0vyOhgTV9/m9uOs/SzPaoFg\n" +
+ "QAeiOgVWfaJTO3Ra7Lu931otDh1+e+Km/+kbx1cD55hNuHcS9wlU+ohwKD7d8jMXoLgQvXjz\n" +
+ "uKD/Zn3FMxyNgwhLJPYfD/tDqR6dRVWGQXbiibW7iieYD0IrHfsbUN2KQ/SraDEypcVSxXTh\n" +
+ "5OCJioK8C2p8fUuggfJTRhzUtVhnBwRA/eV44/b99Ifo8o52+n5g7eFo7KAnRAEAjRkHK+Gk\n" +
+ "Tm10mVPIl1Vsrz/j/NT+c6aRjq8RBIIgAEYUdOjDp3jyydz9k44SgJsSdnHnYlSeYwtsxaHI\n" +
+ "zf5Dm+cI324tx6Gdq0t7cxtrlEijUkegAVBSy+PF2kb4aeVc+GjosWjJ9r7yLzV2Lsz6j/Nl\n" +
+ "3vBXXdy9Ho4ZqOrlb7usA8ecbGxZ4wPdNysVRjiNJxDBvs2SV1BJ9HtQ3gUHek1KwrYRq4uF\n" +
+ "oxsmV2J5NV2e6mYCukTCvZyHpUA9ZIuhn/U4llnoOAaJXdgNou6FEUblyBe1QQ6FWmP0xcVq\n" +
+ "MnHKY0FpBYkGX8X6suyD6NdcCPU64wyqmmBX+hfmFEoEUjmSlpQ//au9voQPSUGBk1BHeyi1\n" +
+ "+oD7uy5xesBlcnenbmzAJVk1CWpdbvvII92ZAGLeQlK1JC/xiqvdkfQZ0ifkH56M838ZnPZJ\n" +
+ "rQCW9TB9Gv53QZ09x/P2b3VB+58X+UMxeYl2gU5dZIC1ZABOGihh0zLukCayCj3pgE5u2udu\n" +
+ "ZZjmNYvIx1khp02kWZxBl/R7mLn6sPJp8AJlHZvhg0eJFdnclPzveocADzfHPeTpFn+APBnz\n" +
+ "WPCGh5E04F5mujrGuKeRJaiRwYC8PEEIzrMSTCKthMOHts/xY3Ic26ULTMxPjI11fIZ11R8x\n" +
+ "54UYrrCZo8g7pWrxyBipkjbg85diLCQy5+rAxinVubo+gdSxnbpiVOMwRkY9lNt0/7Vy7c1Y\n" +
+ "yrTOG/2sU3DbopXfwf9JNkwP14Ba5pJXHy1yy3EsM9yR+KPc52dpz33m29BxGmOTTKIVVjov\n" +
+ "w1LFJkFkLuSX46/1bx4CTd/T8+EcGw+LRQYi8qw3xrXclhEJ171ZTp0XpE2ownDUcTwRiUjj\n" +
+ "x/E+pwFrwMnKTWt83ol+xWsU83N7w+DhoQ7jmi4CWK35J5JAh/mk5ofGNozJAmOdgk7xixbV\n" +
+ "uEkvOAGj9kwfFYtR7+N8ab69lAEFNbt/mBbd8ZfPudqi46DdMM94n26nFupuwRaNYEmZbYJf\n" +
+ "xZ1G0fCBgj9dLjw+ZlnC+nVnhkY7ZG9/WtHW1bIorRvJGnbk03MBzydKC150edeW8leqBp+v\n" +
+ "bp07c372rZHCC3G2J33xn36qlOJ3/zWf1wPbuJIrrF8tY9mcttpzzF2Qopb8oCVEECjaRlQU\n" +
+ "EkgAEb1xxIW0syvLQRuKSOKz0DU4kKUxoHbyN4PQ9lw3zYKQFTBh1CeuZqlYyuPuG0Pfj+Jh\n" +
+ "Lj6KVvRjdEgXG366qjEpa74cJ6rvqiTpbkBysvPzNfwA9SPNXUM3S+pAmOmaPwvcPsWVpevn\n" +
+ "/CYtksAPtKW7wO1wj/GmQ2kzDPZ5hKqVUkpvDPwZyLkWwLseiOX95g1/9Zz0UR0tRBnPzW0L\n" +
+ "pLttlajn3kv+zs99yQqjgrIVbuDj2kT222Q5FaiKYCaPGt6XlaHF7krMl6ojU4sLlYY5frME\n" +
+ "jKQBA79r75vHvDyRmgeBr3VstOI+su+pZCNEPlmYOzzWPOFJpZQEoz6wKhRlPMbxCufV79n3\n" +
+ "FmZFk+XIYaIXypz7TMaCiy2M1uzKD3ChuJc2SiKCLF4oGKMPG4iIcYWkKPuqxaBB4MHWSJw7\n" +
+ "FxU0SGBgQTQpuPZNNSygrHOcVThaIhsTIvPEbnYHG5FQvOHa5zuEEaep5Oj4PAy86vIj1SOp\n" +
+ "Y09gNrOt9PJgOvWIVCPYaAWUBONqx8B4SyqsccXpPVzrnTdN3qXlAhnBx6k9qwRCsttgASrx\n" +
+ "M+JSSiSakCBKpsHwuzceG8cG1NbWMYNQZbk8L3ojNVEqOG7awufWqOf1TNQv0A8a7cE6MLwy\n" +
+ "pCKrb+A/oawJq/pU4AUdEDvGKsnq771Ektk8uLXM9nxhM03vsT1Tv0CvIgtWC0DTnbT9DcpQ\n" +
+ "txMcv//WtOhZ2O2OXBhruf20KbdmbehBvYpFmLsfjVH5500MR38FaOfo7MTveaHfXIPK51TM\n" +
+ "OY1JLzLjZNtwrXDSLE2paTokynENryw60MRUnPRbqcImP3Ro5kFM/wQ4QmaHK/P8c9b+S5UY\n" +
+ "nrdDsFOCr65X+8/1DeX+jFHO8TGkZ5/+C3boM9sHEk59GTG8Ly8myYWDSEeNV1QgxAuFsRgi\n" +
+ "iz7aJ5QD2IfRIMGao++g8N7rYZg2GfprfWgBdOXV7A1CTCYmszdQDLxkLmd2uRUgrFOyJBeh\n" +
+ "1d4oAVem9rljEmhKhA9VfYR5GBxjKIauP8wUsE460dxh/Y5dx/UgTcoMM2EOozkZT07KemPY\n" +
+ "NIsLuVcIrpjx+4tIh9Jqxzj96IEf4R36sf0/mAi4Vr5i7ih7hNf9WqBQXJgmxn1jP9zyY/P5\n" +
+ "5Tj4eyAD+N7apNndwWvvWakk+RSSY1wZOs1/8qlNThl+Dx7xbQjXYGJ0l4Y1BrwmBsca/gMk\n" +
+ "hZ3KarNsb2ywJzL5ddbUr3CbSZuVVtzHQTeOEOTAkKgSYemFEwVADepFfP+N+CkQD9l/jPtb\n" +
+ "JtP3NxEne8OWrGZXDfH962jXVoVGo/n7LArJCk2eCBRLu2GeeI5U7t60+D3kvilKm3KzCNCG\n" +
+ "HoSmE3iOzRmCmzBam640WW4L2IEVrhdlYyIcLpHDev+FEIRF58KrTKR3+zsHncst8yHo2SJ+\n" +
+ "nPJor3ow4UxlPi9W5sciz0vqaBUw6GDI+4UssTWZXew5P9KnQXu27QCKFX174ol/Xj7MPK5G\n" +
+ "20QXGuHcUE6WeVbu/R046begqyWmfAIBSfsYJzh9lp/RKTwdbVd+eVI6Q96MXarLb16JdZom\n" +
+ "RCjaZKu96g6xl711JMHqP8ckYgghrqLvg67Fx7b0RmGCmX9d1UjxWWBkKRa1fcCyCNlzGdRD\n" +
+ "QjZ1/+SbAMDDnzuBjPE1r8RSfW1maD11JU506s6/N1U90oXe3jmgPovjyvPUo7Kfu7mdxc7c\n" +
+ "DAPdg2wTq79gcQ2dOKMHYUCBa9zqBAoZlXeDY8MCWBFB2oH1s4ZYVd1ZXOdo880T/QFNKmAt\n" +
+ "fkxFaXKwuVBbz832ntMngm3219LTs7dV5zQj7Ualn7XtHDUoptl14m/7K1Kvp47tW1TNdtNA\n" +
+ "5gcQsxY7enKaL1M/ymQPCB2Vtq88gAS1g81gudXWwSsgR/Ibd4chHXjX0AtLgSzQTO8njFcE\n" +
+ "tPqXcf6jxPH0hy55M1j+uTbCg0+eFHOsWvtak6DHmCXKJEVMtjDK8tu8Aqs3dszlC1vcc9wY\n" +
+ "7Tm6AJPb2NXD+Ly+/b3H2RsyUYU2GmgyKU9DTzm0Mso075CAg5Kijo0KmTbJtOdkqFdpd9rf\n" +
+ "PjA+c/Lt/VFyvyRkUppHyOmecwIqW+N/1gS3ZPWZv3CpO4kBL+I9VszGpe8OxiQBv8PVmWy2\n" +
+ "j5alpanRWLIxJUqxDrR++cwvY+zk06i/cj9PaA7ZS+IjXTgYCEJjJe5gLNikVYtXeh06biiZ\n" +
+ "NKV++wfi7BwbTk/zapIFl/aYPjricw+OioitAvtMlNy9TLfazmpMjepWGu6eyEmsJUu+PyIJ\n" +
+ "qLr4zp6nPL7NDkpC+d+NW+26UKJDFuFu/9G0zzzAFIqHCAtY4iVgHTQBeZjUXhyrkQMIlbhz\n" +
+ "/AmEAYHjNJMOcj/1fNp17bCJKsq2F9PVspsDAFWs5aM3xoiMdPZbacv/J1LnZzxqLTw4ayOq\n" +
+ "xISNpve4zdHKBaMXL711fnZ9Re5dvRxBtsikW76m/GYsZqtCXc4dAgwz5bJZTwCErKv2HEwA\n" +
+ "1czSfxIKtLZfgcn2OpUTu/pCXXwjUgrW76mMxx+Ew5lRzyQuZmh8FEWLVEfl6ZUfWF/uTp9Q\n" +
+ "2pdnrqZSfGa44NCqafNdCoBRA8+gBeAVRb4fDc3gdQMaJvgM/Q05BYk+x/7QSq33aPEteKH8\n" +
+ "WE10HHKeCF/Q72BfWs4fKAe9uLOdCSpxzzbW4ng+XcguAIsKW7BMGtH8mfr7Hx0VONL8o7Jr\n" +
+ "cE035LUC7EedjiZSXBXmIqoZ/WBd4IM3I9w0MCQqORg6eqsjcsYeioF0KEwyu8U64W88KqnV\n" +
+ "9UkUbf1pdO1vV31/r2FAfJL+zj+t2syzcDzaB931FtdFGb4mF9WBQMDAXZ/tRgyh7J/L/n/g\n" +
+ "zy6SMmsqB7XJelHjNXmi0o/Mz4GzneVzYomV5iwRctD6RP8dxhUDFgNdFZKepu5vke5Ly+ot\n" +
+ "mild9RHtQFL8tfoUVfAGdHTFByujfryb3agmM9Z1JccGa0qSXWUVOGLNWHDeBoKglSavkYGo\n" +
+ "60Ik/PVq5rn3Z9BraKsNEM1pa6IE6jyMJLmK3kxzs7F4AFpMV7fpzUMPhEPrQk+PTflWt+L8\n" +
+ "tttXzOOdeFeD0Vm8dqvmye8hhL5KwORhZ5Zj9vdKG9XZwxb3YL+o41AK1K2j5u2M/PzdpgQf\n" +
+ "oX0te214mUo+6MrYi5/2WG1Quzg4ZK3iyGDNKL8ezu380DyoYCXgzBkd3j4FxppxXVi/ARD7\n" +
+ "1mYABAPecrRIxDq9i8wf42ih+nm9M9n4CQ6LCTSLeH/uZPZeIGagFuIhHIeQe8DsQ2G9eH5p\n" +
+ "a7czcNHkWn755Uor3JcR3o+jQ9APDd43W57norYHXnRJomN+45PWIEhGyOMNozh3fZ3ySQ/C\n" +
+ "dDNiReFjxm1gWwSF1C58vZJl65nnup/r7ni6Ht2gia9yxA38EeVg5yELuthsxwTbbjk47VYv\n" +
+ "qRv3B5d2HcjfS0zg4mHjwSZLu+ia5GquBAOPKgotUkti0ianeA6SZ88owXsmqPV/fUwrwFkE\n" +
+ "qC+2TueF4hVHXXrArg+Y8HfFmNdHLFALhnCzpYKIvwAh+3ruQ6degZgl+LYW5gLPoPaZoy10\n" +
+ "c1oVkdvaIrk3WSp1wmg0X7MB5lKQwg/9pH9aZ5Fg66L/NmYHck5SKlnx/Snmc8bSMcBl1m/G\n" +
+ "QLzxOQaFVE7sTRw3hRiOos2mA/13y1pF0/yxT0sICyfy/8JtLXTFYSzIvr1YqdJWPXHRkC3d\n" +
+ "vaS3FJ7sS2zBxy/rqE5Ee7S+nkQH40BFgVBZ1Y1HC9h3PX1YyFgTT7DG884Na8mPbmkBtrw+\n" +
+ "TulraUhj9pk4Hl6sGlDyMYMJBXoNollqyVCzYAlrOWtVKroCYYo3OXV7Doa0cJBmfi6ZCDbB\n" +
+ "3g9DFd48Zn2jn93r7m3TZWlBLj+pGVieBddOhA/sV1RJcWWpojhlks2zjpPsnbg6PLo+w8Jl\n" +
+ "eJXii0rAir5oHxlGZjED7Q0vXfaaAv+eZsNHjzZrYty5sBI/5csCns/RaERoKRWnPjqAihFf\n" +
+ "G4R4n86fpR3qQXS2LCzUipcQ28qydzRGAkzJndFCLtyGhoaVkMaETK8kX/k2iuf471Xxyj0F\n" +
+ "CW8jabKPKeUCAtmGQhbwDFSrJvTmfa1QaUMRiIsNkU7wUnbdh1nEasT55GWVLZdbqWRmJzTj\n" +
+ "YB2jXv4/QN4Iie8EMRCaZyL3NLtFya/Vh2sUbqGsFw16YSZZK2E3fWfM94qcOPjHiJyPxoeT\n" +
+ "CaUdSGuSm+hTFLDoh+LZfwAJEeGMUDAZ/QgiwkunszCNI7FVEhyjcPEOjZ97ijODUhfO1r2T\n" +
+ "neGCYIoNVMzR4n0nGL8fALyxirV9F+dBZlIwCWPDZWwes+8A9buAJhoAZ7umJFYhCQiqXC+q\n" +
+ "ZY0glVYTnaS4fNQ8hTdi7SIN86WUAZiNm5LIlIUHk4ysOonExQ4VB6lCIHJrj+ucK4v4ZthK\n" +
+ "fP9w8kiqpm52fln2dqygroU73DxXsOcMCi90JqUaqAgKiXD82pYVJdGkuOFcsRVKyI3BaN4c\n" +
+ "eaJg61/PBbmGdgUUeubiuG82aGo6nxYGUa7GAD/VEKyyoyl3ba2PvhSXe+Fw5LdsKUs7GM+9\n" +
+ "YJ4gwNW9upKSDoA2ZXubC6XJ/fvAPlkDQj03sBM82GUB6Y2Y30ptrODa2RlQr4/aH1ny0Tt0\n" +
+ "fWv4rEivi8ZsKHwvAyThGW4TQi6Qa6j0d/eabNJgKHnRAPXPZPuYTeudcGg5tbaKn6OeZBoW\n" +
+ "7XzN36cOel46M+3ecBYcYkbGhtjqaMCWa/8iTA5KzXRgK0n29qZZC1sbFECkji4ObMQyjMTO\n" +
+ "sZZyFrJfLo9XUfhj2EoJZxTnF7rIznjSXhZsxN2KcOlqF4hvULDuql/2AiW7nRKa3LnlhYc9\n" +
+ "z5IJ7vyDU/0bMcu6T+bo3qdDNvMbBZ3EZQQzzQqqu0Egx/MEyyXo3UQqlhq2ueZ8VkOIpWjc\n" +
+ "x62QOVhkzn/rb0ASB98UDCMBoAHnu4Vt8HhNVj9gtYzIwsLhCWWZtXe1qslbKHG9RoANsfdk\n" +
+ "utZ/GMSuUXjoV9NyQFAomsKHf30bTAU4nYkxj3RJviAO9aQojVMRqUW/tjZ8k3hb0u1G6WWD\n" +
+ "Wa6IZAhj/WiQTnFkiiY/hSWiAKm0Er+prhJmlLuaGwMjtQIMOyNpjK80TxK1BAgJdGQnreVf\n" +
+ "8HnXz9dHAAiFGaloyC0XtjaadnXdKCW0Kq2DvyPEapfWgKoJnDc94dX80plKfwps/Dg09uoI\n" +
+ "h9ctSWicYy5q2gg/rmy6fBjw3rrv6P63Iu/Yr7d23G1lC8MukkUa8fTB89wZ3n0Eh5+SplvG\n" +
+ "YyVY7kPXxaLS9WPa7GdpEFFsdLoJ5aX0ipGibkTATFWdZEgSCGwCYUpMYJW5K+FvMtzztlCn\n" +
+ "e9ZafDobzb1UFcXTbMcQye2BlFFQl3+qq3AJQi7+OzP8tmprRws5z1WfnUr/8vvps5f1PxpW\n" +
+ "kWcQv1GNlq9ICkiX10mEOq1tVW+nVowf4tw+2y1f9VyuKnKYXWhwlfvuYWfqZG++nXZANhUO\n" +
+ "5CbHpk2IYVk9W1sMNOWq/yIoJPRcVCoNwjz2M0d/ugpAcr7RrPtdG2JWeMPaW2PVkt5UCUy7\n" +
+ "kLjx+EY+EJrSjJfTKNAwNP3zMCPohFuTL7P8zW/qWdoNdr1J6Jn5epvFXxg/4AFPM+LnchuU\n" +
+ "tDxIVNH9If++y1P7wIywzSZVjlCEk+ayGtADUqCvnzwAzlYFCoBEhwTgd+KlEZoyIQYeS8nY\n" +
+ "v6ZkgL0prWsIP5Ctg5cm8yZdXzfyWZ2VQf6qQD0amZ8HjMI1TjdWTtw+sJrTSn6Y/aA9vJeo\n" +
+ "ekNLOBvlYs9UkkxoB2/P+KlWpNswm8ykO4F+kETaPRIj+0Jquc/DFO8loLZJpomo9iyu2+BP\n" +
+ "Yi4cZjRnUNyo+aDK2DXM7wGm5cR+SYHkxPRRYmhrxDdJ8GA8Y3pw+KU+j5DeKgBInnfuZiHV\n" +
+ "sfxmjdhn3OFDW5NZ5Z9PE1M+qDxNkve22sJmiMGPmbBGP8L6icoMyikAJPrvNUll+7qpgt39\n" +
+ "eZE+P3vufj8yXa7STW781cPiWGb9b09nri2b81gwan++4n1UTWRUloMWj4m9TFFY/xw7/1lq\n" +
+ "hP09aPwIZa+sy5m4WP5wD4Hp3l8VcobFeWii2PoU7HCDRyM5J1BFAtPL9l4Mpx4CWTM5OYzB\n" +
+ "H0ihCM8sNWbQFCttc9w4Bh+vzWWpFJPDqikr50aWTEgXB7LwvTx6LiKZBV9cF/MDx/8kn9qN\n" +
+ "geqJobwdsQB8zjVzmLsSEatDHkwpn9owhFV8l+BxavMvBIMGx1lc5zOQ2mXV/n+FVDnEo9fk\n" +
+ "rurpjd663byXVgGtot3dWyr3tUjvARqNjyK7uRUT8O5mK3yDbmtE6+Gtwvwemm7nD0btP1c8\n" +
+ "pY7OaE6MXWioOnzhLH/5spqrbGGV/aP6MeQ+HNKtR4Jx5ujIp/0dVKIvRJ9g+jXSfC8o4j7N\n" +
+ "zw0/+uDrV3Lx0/6IuovFeTYLyKLyuyclVv5hnNZ7msql4Ld/+2tekKGR9Li2tLiVFJ3yfOvy\n" +
+ "YpxDMi//6FgB5tXubPRDLKCP93qurNaCpEICApW5m8NpWvbYcoSSdZKM3YNNaevmP57XVO4D\n" +
+ "IXn2H/7yvA1NdmtFkOFKkkXvhO0JnrNlEa6NMx58WUm+7owtUEv7XT22S1aOpZq4sPky2yxD\n" +
+ "gwXqB5ygbnKbNYQb+mkkLIjiTecgFM72gKmtLDz6huaNvnpxK4uPo0QEqdRUhwD55fcoYiya\n" +
+ "0RsQhNjUAQXC5056WzDuz5xRahSQ2PbrT04pI4hzrlvOdJssi8TtKiL5UFjD67pwIcbmNnps\n" +
+ "1RXo4g2O1nef5/WHe048ZaPdV/pvBdTiEp3bjKFTlD35dUwFcOmq5+W964BmljjQYu/6rGdG\n" +
+ "3Sby2g/B+RCtEz7NB4GA3/5ah7SoJ0cimcA2HRF71Pa5T0cIkyEORSCA9pXrXi3pDz0RrqRQ\n" +
+ "4MsFEiTnJvl7K8MVRfGhVpZSxyvfC1WY5dZ760HKv+fBJAKPZywaIT7wg3Ka58t38u5ZiKFc\n" +
+ "mGzN6M4mvgKTG8EMKgjCcFc9v1IdkWC9vijufVcxfW3rFkPNnakWL0td9qHKq3/mlpxVpBY5\n" +
+ "aDGpdCyzIAmshRa7zXt3LzVWSLmnCzW3aNWd/eLmjLfA05e09lE5ZRF4lOAU5bIC0EB3+iLS\n" +
+ "OSfPE4APylT+7cMlkp/CdBbAfio4xrJbkvSgqwESXWisFgZ9Zih1b/APM68woGmpf5aCY2Wy\n" +
+ "0MqzuOpHerXyh/O8nai1zTyDz0Nqe5Z60ITQR98tV2DHsQDazPSU4Jp1zAA4QuW67i2xps1p\n" +
+ "g0IlOREYGCry/mCh2SPX79USHOq3trmd7OVCaaWHSzzlCuVjm3FCplHq+11/sAw9c7Y9lriS\n" +
+ "zp5li+GbZ1ZVWt38XrVoUkGrexy5Im09C0zNNYMNMMehHkLGXhDBAmBwDWgw8xP+SP8vBa6j\n" +
+ "jCI9AB3AOr7kDL688ts8B+8oYeY9/2UiH7HA7Lb9Lpz06ifrz0/Ojt535D/WPqvJj3r3NgXT\n" +
+ "f0mrcEuuFUfcjnRyKtPQevdgzX5ZRHsvyijAFAt9yUt9AlcTiZOtJerz0RIsTgq8T2tsp0Mx\n" +
+ "vtHsZWqgzKfnW2SGiFhi2aIPZgAZA5FKq5zwT6sWJsaN3iyzqU+4reKwYrx6ZKNu4fT93y23\n" +
+ "Via+Z9s4dy3JFG8hrIY06WG+9XOyFqIoccDiwFfqHGf45mAjuuy9x8SQ5eMWe57tVSFUWxwo\n" +
+ "7zDT80Lh6wWc3cPomT5OWz379x2WXmO4MxXdrx9AKBT2tUXF8aCDbtx3IhG4QRtjQ0STbkjV\n" +
+ "ftV1iQfzx9invUZlUWJOBYYO+ZvnJ8bsS9+ZlNShwxZD3Eq5RfGAyEIF4W+PS2xpZuQxGySZ\n" +
+ "C9iaxZjBGjWJ1N8XwD0c+Vsuyavzgfv3ns7dKSiarIr4znXJaBhS4kJaq7buQ7zVf1iHySHj\n" +
+ "MxkhErY0oZ8DJTxKJDuOYPfW6GtinXZpGXE3KMy1FDXUSH3RC1DdnNtQBpsbVxEjzaYD8Gzj\n" +
+ "d/rWJzE1qtTK/OwlHwZyN/5XDN4Rul76dZbqC5En2jcVo3wlh0wiOQMk5yjycX0exzEJMlU7\n" +
+ "JTlcwkR6zY/Pjgd6l6dvATedIQbS5gxeu7f0ePCbN9coIAEJF+/LtRSeONypYb0MlKxEfena\n" +
+ "LR4XQ4kH3q0ed8jl9E9pXmGJKzEL9RuXiRZw455wx3J/f8ywNWrQ4JWdXKVklTLR1QrBRPSo\n" +
+ "K0qKkC4thqs3dyxgDdywKKq/Yz5pa1KPbp6RL6Pof348nmDbbj8QG59agAaMoRrZnqJmB4DK\n" +
+ "IS2iu+ES6KSmauUTlI+ZRV3HBj5rwu3QDrhQb8w6uC3TY33RcYlFP3MVaHQnlG76tMxkHQ59\n" +
+ "E8WL1dtVTzhOhETiZJZeAgzCqKc9L6aEtHvWZdqnUoWDV0O4UUDMjpNu2o8xYH9S7cFDbrWV\n" +
+ "coBYOkk8H0B4V1toNM8IMSSGs38G0hO0aK9LHyrGfEDO6HCF4qt8K1jcvbZmbUGUvB42a1Hu\n" +
+ "A02aNM7hRsnEOpRCp0l30VSlhdB3tgb6mI1LvNXe3pwSd61Hr+DIx8xDZ0cGA+b2DP7hnYp/\n" +
+ "Z57jk2qNwTYl3Yb/K+QTiv7AN08YDg5pcmkwfR/wuOrwqQp/remhQXUivUu13pMik8YYlwMc\n" +
+ "x0r4r8EmloRkiU8OuIv05EueMspLJItIEnXxchN6BuXdmB1G9C8NN9jl4T2xsmaE0f1vMPRI\n" +
+ "5OOHmPdwoRvGC6qWkY2rpY34haRyTAWrDhELca3kIgIVgsvIikbTkQvhY2+2mTrtlVSDcYhk\n" +
+ "ngvRPIT9Q0je0IK3+3XLog+uLQykqtOYKfsA3hfAAKgnghIQjwx9TO5ys1yR7AIGeYj0fOjI\n" +
+ "+hhwEgeUe3fttRe10FGXei10Z62TXiF1skEL7odnWMpkQ4vN2n4H7LdG+dFkVU1cgJXI16cP\n" +
+ "BKrrknaRXmURgVrJk0sItjxKGzU0OG/U9amKT39LXTc6x8hhAOwNeJLUksGCQpdjsV5XBnw9\n" +
+ "5+1ekWc+MPQK+SSgxWGaNfDPw1IxEWehrIAKjRqFhlsGLY/wbgM0Y9g4XugGMey/Ibbzdvuz\n" +
+ "M6HdffYtEHdzoAAh5CEo1g7jQvzyPHVdEwhxDxV/MmcWB+B3D2AHvgE3EDealVbtp0sOBcKj\n" +
+ "NGXbOPnaI2YzkKKC6Z2DeQBBOLzz3saaoSAi6yy4b+xxK3bcEi64nGZGJKuNi3MpCBL4v71W\n" +
+ "7eUFRwKYyaHhLY3FKwTOs2paQysjDc2NIlOBSrJLfo1wgmU9sQJx7BQdkUkPZn+p7GdfFUoN\n" +
+ "k4akjIewzRwKzPTmIPVhzb4HrmbFp3EtxPlKF7Afzt+6DC1FhzSHVqMyTOL37WBCa+Qthw6l\n" +
+ "lLRtXnwAErLnnaom2qgWl24HFkvaEKu/X98eQqcf/mGcjKgHII6zfdkdHvS+lMlbdo7ATzC4\n" +
+ "Dhx50456H4Q0/4CZ5VE91q1sGglKl7o3KdFiWu+WmgKfJo6/Q9BsHNluLxRPJMA2qEv7/e7o\n" +
+ "JeJ6HBYHHdwB+1DU9VnNXdk8d1SlAGyBjVzE3s28bHRe4pLmwCh3CIbwiNn0NCXavMTMnA7H\n" +
+ "RbrW6eHdJE0AwWs1EX+SPi4PzhFkT5k8iQxQqHbRiKAoqnD5rbhqxifapAf2SA0LNrlbvGY8\n" +
+ "22kE11mwbu3QXvbhD7Ji/1U6E+z/DpYFz9xeXGdcZAFEbo3XfuHh7LQ3FKENTKFQQhVnuX9+\n" +
+ "a301TXP6se2nBIIgALj+F1K0JvkeZE0ZxpXrM+5U3lhSBmPWT8xNBJ7c+EiJtGEhOyQVUZMR\n" +
+ "mOgMJ8sWfEPHQFpgFiRPtw3/Od4vK5IFpQUPqQWCU5wZrp9qrxlwcQAPu+VG2QFbudaIKXJk\n" +
+ "udzf8ltnEc2bjGFh2opSvUsQgh0kOSTnLLVAov9fIf3qKUVeKFcG2xpFIl1BlelOTmKAU7rH\n" +
+ "diRY9ujoLTvkIg/9o+rk83GmPHR3xz3i6RSrOGeiuLZ55PffPNc7aju38GYw0PV02E7Vex6X\n" +
+ "dtmimBHav6a2WvhFZhFzG4O0jr26UKDXYDVHKb1at4ymDgiQ34KAZxT0ZxJmeNAq/KZeXXfZ\n" +
+ "0D7hZ/xhS1+3CohTQM/fG8P6lWa/ohDJirS3tjFqbm4VSjqJaOZqMmTM6SgIeTvypH52i/ZM\n" +
+ "caYsH+/BcGn2W0nv1ZHcjmuMHkrQ5UfFH4AqR99LYahcAMFYE87unzVln/ljrM2hUCkzQjBd\n" +
+ "qeR5Kgfsstnc0O0dcGdmPTRHgJDoZdQzRFN2M6CbwpHl9OO4EselflWw6Z5QwCzBkC/3Hbmp\n" +
+ "wBLZBE45JFiiIqkrxT0t5BAxEYGGyv/JSTygvY6TrsvCoH4AFVIQTi3gsy2/TdcEFU8zwrQ2\n" +
+ "5Pui05SlMlfcccuoRTMMH3qqhuzbuQMz4JgLe7UdIvcQkPGIUdUmqliXOSd1VfSjhVIrqxJe\n" +
+ "4PxKcWNUdGbstdujHh+/KvH4AauRpn9pHw/P/verYdaFFtHpSpADHahd23SGdeWVuhvBGCV1\n" +
+ "/AUb6AoGFXU+m5TV8J+DLH//yvYfzu2ajmTWHpo85/CSnxhdhwF5MWQ2mdIq/x8TC8MwRTDv\n" +
+ "iXs6QCKTGlmSieaQnV1DS6y3np1rJvZodA2/zR6CMNvXoU/R+9aYVVA8jBI4eVeMghn6vp4e\n" +
+ "E+QAlJNU9ji1xLKMzPbWJ5tXryiB+AOF/hH1U31xfFEL/XzDTE+v4rCBpi7xgYLl6CYDIziN\n" +
+ "7AJuq9RdHhLimkxqT7dYH+rPE6BUgoS3wUi1KKy3IfRESuJ3UBPitkCaUvWeE3uZrK40vj81\n" +
+ "VDC3GnXWNXxSRAkx67hi5CBTuWlhFhIrVs9VzTiODlmlf1ln/AcCfwV/xg0QQ0NcuVj6s3a4\n" +
+ "qUm//jigFtxx+AFymf+1ABprCVxD05+eKvH010FgqX5+QPUre2ikKmh9/Cmi5/P6swC1AQgE\n" +
+ "ykXGHatZBjewwuegFIa4fMlqOwQn3VG/JzpsXiQxL5cDpWlT4e58RE8GI9bKJeL5c0ceIxDN\n" +
+ "qQnMgf5HCIUeEhPqskz8Q7nr5T5BRcxQ1oVaVkhbvCAYYJyGE2PgxZbwGcO1qgHVFahWMJnK\n" +
+ "EE2vIcig1OZ4zRdld+3zOdk6q4HExzr/YxllZjFasjr99sDXRnmVTbFQ4qdCwAKtEXfx6dx7\n" +
+ "MnhQ/B/UF3hwl8ODl8uqAu7IhWEYr5LlsOD2rd+T9WiBJW5dyLoBLbhvuVyJzw3dnajipT51\n" +
+ "bDNxbsFft2X1bje54joCjpGpcuIEGntZpU65X4OQiv/cdXI4nV9LaDFvyCsqJ2xQohXSIt2Z\n" +
+ "/pDxDT/ohuCFJDVGItjOcequa/CFwpC+/kH70Pg/84dAFPMug/WgAoIe+cgJ1q5NZSPIBu69\n" +
+ "1bRxZvvaG9cMa/Bs3KLzjWCDzH3zRDUWx+vD0M8gEPjxzF2hFVnwslVPIawHR45fRV3NdDAS\n" +
+ "DMHwVtj4xbFG94OHnBGtEnAH3LTa7dM5CcHZEamHWqnVbASuQkZuiU1xrZEHqtNlNZ+rkO6a\n" +
+ "0m6izOJtlf2Mqw02tIsd0gMD03UOtHC1uie+ZcIiO1bFw6kEoSh9BB3jxt2G2QHf7nJA8o5x\n" +
+ "tJO5Q43AwUIh1evygnVDYSCNtlQ8R2wdCQ6QfUVMhfMxqGajA+SXsCHXPI4YrXGQTawussIN\n" +
+ "E0g63Q/oBxmq+XwarM0+cILrEoq6VfMzz6t5i1DQv/jVmGBlhuKw7V7XbxZV7QKjXhsAhDXq\n" +
+ "sFYxwI/4/AEiPMv2s/p2BNa7WbkgqHrQC3QHrVzwXQglO0x3+iqqSoR2qL0H6TF4QazQiXig\n" +
+ "i3dBIBS8JhdkJFEXY1ylbfSF3xl4DsDHoxHl8KZGYVcH6sThi5aumQLzYxDcstjU26agaSwp\n" +
+ "Uy0HcsZfCK2HVJfBgGJakiEqmjayZKryijz41vqgqqPj1A818TbjUE+SlewGHnnzJY6xDStb\n" +
+ "x2i/Mmu5bvymyiFWaQKKPM5/fOwkUbSO4I+P7JwqFYOIgtuEdKbf2SM9nzatn4FRSCzK9O/E\n" +
+ "pQb5hQCwSONawekPvYfVWHj3WKnUjzuWaUGvCj+h7x1NOgvUvf3P/VrFyUSXQS0zcCiixAJc\n" +
+ "s2S4tbfafNuYSUsSG7DWcastrLHWq8mUkKW/4J/ENONFjzmuXt/iXJt8vSrhWzIx2dMwUYcj\n" +
+ "/9BhwSjTVn4NmMKagxHiOXxwyFer6GbLylVP9+fXXyCt/fODm1lRBPpAdL0ycfrs90GZ1C6q\n" +
+ "gGUvbGHhXlzUmTE8pI5Ao7+m1rm2t2NWWH7IgSK1XggHr9TqGToebgHHbT+peP+7rj50EU0N\n" +
+ "lvGzbpVdoDx8Aj0k4OKDcggHR8vaw8bkuSTNn0yrGN2OlhNrZjzvy1QtH0b1kcVvrVnzJkTs\n" +
+ "gERrq37zfYrZ3nOYegLR1dvuvnl4LScLBVmLzis12XUFoQZ72NMsS4cEVhREkaKkbYrb5kWk\n" +
+ "/nh0ATDW9lC3/yvo/tS8MWsE/MHt5Bhnfb0zH8mYeBIaotjE64S1xwXLr6C+BqO73PlfCeul\n" +
+ "7c7BKZlO8yiQxTPQ1RbWaXqiNT1o/ztvVSYtwFZGWfIdwG7pyG+ewF5aQj2iyxQBiszR0JOL\n" +
+ "KODKBVjKiFqyBjRZ6o9R9orB553QhKbuVC4+vBaGh+P3UwQxlvs3rYE8zInMafcEoSTCoRh3\n" +
+ "x2pFg+mieOPeCXQ2wTSSEd3aF0w7dCNMUv5JKKNPnGgn67sg+2e3s0HoHg6xHvNZ+7FfhDJi\n" +
+ "KydDxAPW8I4f0A6hiQayPN4BavHVIfg5JsAwMkTNbdUBvrTxVLtN1089bPPT3MSEEKf1hNjX\n" +
+ "gb8h7Rgd6zGOv4ovWQHyTncB4d0L4ycP0cBgqi7wh3qhc+FeC9+PCa7DtN6rmxC3knSVOXnY\n" +
+ "8rnDnyA3WN1WgwY/eg0geejJgZglwJU4kb6YpkC3jrZfxgnRETwxmW0ezsHV3jxfTSdntvjl\n" +
+ "EMnrkZTvRX1WWWbjNfGCX6H1qwO0IAWK8PJ6rt1ESOaFGOAQW0d2V2kZpVn/RyuzWtj9VDhf\n" +
+ "ZnpJfh6t46AtbX1eVQx+iE5LhEzxE9keI2vVHTm3m3TVincByj+M7iXz31WNqwPHe11wUgY7\n" +
+ "10q/l6ZcfuJJpv1k+GAbEqkOyMcc5O8TEuGdaVlntU2GFUw56oBYaXuaF5EZ8iu+YnBOXorP\n" +
+ "Byxc+xGM18X5E00NisCWi+Tp6NbK//ig/FHIQDne8qxgBsF2RBiDfBm7TH2i/g49K+FwTEtQ\n" +
+ "dx3Liv5WY/KqTfoK0utGmTt8/HOQmchPrRRv4UaREKFoV6Vq2lBnNsI/SjbJ5E1h4bLNIF5t\n" +
+ "PnxOH9SGzvm3t5VRkyVtWLHn/U92j4mGelwNs0+Su2R3qet6Tjn0NpZI6rkOMN6t/e2+Q/5s\n" +
+ "Ll54iPUt0U7JUiS8ltRQW9pOFLhWnJNImAkHF9CT6ka/QMFk3Q0Gt7RiJDXzHcY3AHmdJ9KU\n" +
+ "b6m8nth3jpLjfbtf0nWNV6MqrsRyPNXpx/Eh6Uu7S+FUAIS+uk9ks6vl6yxStTqFBofoZQqK\n" +
+ "qfTB5MJi+G9XA31vuuYg6V5kyjxuJ2LIYgDuO7tX6Six10eJvjMHqFTdXUekU8JYeucN6o4k\n" +
+ "D0MF0VzTHW3BRCQNJn5w9xAx8KfxB98OArnJjx8KvJ1SQFm4JqpB80bIfC1TIBaArBlN1g7k\n" +
+ "FPsb+JM7YMXrH6Y47u+1ThnmXxZwzsiPwRfD8NcNDGZGcwJvKQdGyd5IMS1db4r8PSMDjB83\n" +
+ "4v+9VOesOI68XrxFvYF49xozS7Uda0lGr3Pz7LFkZTeX+32BfYyMojy7+DrOyUFmUnaaxWpT\n" +
+ "wMp3V6Cj8pm8yGa8OW/ZidqBpMs9cOMy0+ObPvQz5x9p2Fb2yZ833xakHB2pLyNUqrsVzlvW\n" +
+ "CZo2AMGHFZ4Oz58YYWEao0QXWMtRkAEVawYcmkfVocqvuVvVWzh1Z19VujPjsD6pwRbnAGnH\n" +
+ "Gkha1w7GIRsIHvBC+zKJVnPO5VF8O9Vj7cgTgHK529o+w6OgjKrjubcPqopQgSwWAzVS42Xb\n" +
+ "FaFTvYzcdnB5te41pwy7sn3wDQq7fGXFvLfmFJQ3bWlXbc6IXwH6P0DAK8GKU/bp6dv7O+XB\n" +
+ "OBofFA6NRLCbUcBU61GsuNpVIltfLjI90CaGMGwRxGLgpfbTUxNzMBR4qn7E6wb66DR6iQ4Q\n" +
+ "4FyO5TaDHwkZmEgdr2yDWQJx7otQdEc1Gtho3rscsgP3n8wEfGzWCnWLvI4amlpF+lKL8x/I\n" +
+ "lgGUIQgbA/uHzuelF7zxhpXBVYtgiRGLCXkE25foYsTMHXvv51wyrJ+6agLd7ARNL6DVGP3l\n" +
+ "I15G8+ZTwq7ypHdab1IhTLyASjnBZZmPUjGVjC/lCDgc1smm3fFv9ORGpwpdrte9eL3X2Vkf\n" +
+ "D6K4yHuyoVdZN1Br5i1yV1jo884IT+mXgL2CvwONs/flu6cSI91qXgTtXB7m7PzQXARwS+XG\n" +
+ "UTcMaLlxq3Wy04/cg0hM4CiMSQbTcV1vnP1OetmvKXr/qaBhe5guawCfKlJu1vCPUng7Ff2h\n" +
+ "bKi97D/D1x1/ScA8+W5RdxuRLWAE8JFDMA7jHxOYrX21MTTra55pGa/V6i3fJ5NLNAR1aa+z\n" +
+ "RHDNss+/vTdiDYV+ZHkOST+rZE6SAC8KfMZfxrpIyBhaPMB2mZ+iOGi/H4vJS+q/X0COPyU5\n" +
+ "2oEMLyLUnlq+yu9kNskTJNRcn9UCCXEMzUut9/I9dN8JOXxPjF5uHZww0M7qC8DJaLa3tP2P\n" +
+ "QwRADNK1UlDFKmtkg0ZdjeYGpY5Um6sOT4zz7v6TCFLmTCXNiPlrPccvaySLANU3jbQnlJ1E\n" +
+ "ed63D1G+B7TO9IA8cJQZ3Px/H05Wv2ucAc3/rcpumpXRN1RfPKn+XNoglKcd/tM/oJwdCoNI\n" +
+ "iozf5SJmpBYBbOix/AJdb9BkD8shT2IQCevY/wjYJJfmLYA/kVyVDrzJwTYX+9EvgaiF5oNI\n" +
+ "1fBZ55iHr2tG6AdoumK8NpsxxFrOhB+uhl/BfO/YseuGi04rrlfZTn9+cA8vRB0VvFEu5It4\n" +
+ "YVVg4nVsGyoJTCalj/YJb0ZHQzb2Z9qA6l9wRx072k9kUT+iODt/TFmn+D1vVi/YV2ivX79K\n" +
+ "yTyyDprqv+iixouNXwbkqGNWSK6m8DSXfVeyK1vnDTMMUaHwDL3KhDGKIYqU2f1BIiLHNSQw\n" +
+ "XlTIxKYgMShdxCZKxXqPLebDBdkMUGKjIUV+oCryrqlaCMG8nRACZos9rmtTokXdkJA+PXve\n" +
+ "UlqiwJZyMSBjk9qdR+t/Sh0IxUXF0eAtrngJffVEgfQCXdtfUS8YqZeONpDQIYtfCfzjRuoD\n" +
+ "oN1t+FpGXp7M3t0E+CT9ImT1KQsnLAxsqeoJu2NGZVSFXRuba8l2c2tlWfq8o3dNiznWoMxC\n" +
+ "i0B+JLLBIhmzhz2pQOFWHg1FgrKhcqqlm4nnA9scFwP04Ly2uZmpvIBXyf126NMkXky24+mG\n" +
+ "D+BglZabg8au7Ndx0ROpQj6BDc8B7/MZWxDXrNtMYiYgqe1pzAZpK8CketC15t/x7l82BYUX\n" +
+ "hwpAn+Nd760mJjqhC+gzQahH09GqmjDLOe+v13KYUGmCnSEg4+FLXfiN1z9mY9St3DELfjC2\n" +
+ "m+cW3XupwZ8OQ8zErkjzW4zjsvQ4Xhz/6pmpEc3t7OJ1BMc6NhSHIYp98S9615OrfxEPPP6E\n" +
+ "QhR8d8nw0Yzi59bFFsEYRvI0ODqRfQeaM5jgqBooCNrV+KI3qvOmh2CgWg1ma+Verp8VvZNq\n" +
+ "GBnmjw3qQJC2PGGc5ioIVZNbbeZRPXzhrlbk88WaYIgUJ72gsk0Kba3diSqJJ1BuUVBJhakX\n" +
+ "Tx4qxv/seRggUgO3ell5E0e3a5xIEr/DycYI44i6LcYEn1eTCGtfuKHhcKv66nF+8iabaowN\n" +
+ "JIc8fhXO+vXK/tEBHC457Mskn5vSiAeZpWqQHQ8h2xpPTbmPnpYvgSxmmQZBpwv4R8s8PL3i\n" +
+ "XE8gtTyC4/fp8HN6WqG8Zq7wXnrdxyzA8Dw555oRnuJ+WvUXgk19rFm9VdAcXG6vwBhLYMcJ\n" +
+ "ZygnaYviqDmUldjCSZRhEWQNEeg0Xu4a4+lln7W9YeZkvQ3zj692pM5/bxfhRc2KvpfM/zNo\n" +
+ "qCP9ebJbn1vc9nFDDSfK6XAf5XH/7JoEZsXiLC7NN7R9x6RK3Rotupg1qMGtQn/FhJU7vscT\n" +
+ "JMBDfL4acoCpiII/hX54kN1nfQlPxEiVhco7FH3ZcuZ9FFpjy+uIyrsdH4QlyLXWsPq0Dajn\n" +
+ "CAi8om64U7GLayL+Lli72nHt8KWPxrCpDgVkYd1sNp4/QgBNfvsD8dOjAbCe4JzWz5Pr6k2Z\n" +
+ "OGnbXbQbptA/2r8ey/8AMHgUCU7VBZagsrYquYYskylXgtIl4QAoSieXjbsoTKRSjEs4KzUn\n" +
+ "v1C/dA0arWK4e2makIWFVrJH9OmLq7fF4nXsvKwjaz65k2rcHUCg7mQGHC07/9NyQnqE0UUx\n" +
+ "knlHKYvnRu7b5SjLBh6JqN0sbaDdh8vvmZS+zR1TlQ+Uq/ajfpWr1QPfqrgXooTI0KzVJHpw\n" +
+ "ske2e8072lsEW3sIP6WTdv4Q6vJJev7vAKmBOUMLWxtXK56/lH9H+mYlxNpi13NLpN0cNhk6\n" +
+ "1C8buigM8CNd1ePQyxbzAEbVqjP0bMDMI3PxuQBCF6MbDr2/wG6bed/qyYbRYOo4feW0Nsao\n" +
+ "itmFy7s9ZBPXynpAvDqKxSrhW3BNBQIA82KGohQKpRXhi4dr87LJTtu39bin6WLBXredeCH0\n" +
+ "5Jr5jJEdABo7Inkf+wR16svhzJzzpLAuEl8MOUDdZ6PDJS5B2Vnw1zuFbMedhbHz3EWAOkGL\n" +
+ "d35zyMy8TEudBq5lxplIZ0SjPEaJz6wuc2E1Mil2VFIYP5TAPWjFpgr4DB5LIi88aYz74/xd\n" +
+ "lX2VLkWJKuWcXnTaeff46PoXSTpQ+5AGir0fHD2FEhzT5AUx3FF0BKdvXqxeT1QBiYKDMR3p\n" +
+ "MPC0X+3efqz6wAeriiLpIOPauTBtHaaSkSjqOJtoVGkW48Anv7pyMsfwH1U8ayUDmE/6Rz7p\n" +
+ "jKiowjP2aXnGiqIjV18O6zLpW7QHFpvyylda6DrArFbBMIItElZvBmDLafqt/iOT4XKOA92U\n" +
+ "sG3KonD+ZSteuS38MPt4jxYNnnxyBdh5UIpvZ5UhVeHPTt7sjAROdyJnSvuhBUEv/OgDunQl\n" +
+ "+2gsdjn/sTvSCvg2uiXkBIxEm2rXNByEXAzt8eqlNqiNCNN3Z+3Itb8VNQFIWV22BGZWl/+3\n" +
+ "wN2uj/QfcDel5oi22wbhjNkxVfR0BmTefHIuK6yfxE0Gc/om86JLnjT1VaaXYjX8RCd/XRfo\n" +
+ "mkExlaP/JWqK4gpNWStrGHnhN1eqRQiCibAWk2ykzwe0q/QFWcYz9TNGEqbc7tZTg17vSVkX\n" +
+ "+O3FWnofEa4qV8rHBrAGL1mUjYZd8A9LUSQN4K7F+McPSE1vgzRM2146WExBEyx0n7YEtAPJ\n" +
+ "qSrpjQnz5H5TVUYgA7rs0CjQ7nHnGSzyxK+t3GUj4EMzljQO5zwsfBwQTPORvn9Skw46sUx+\n" +
+ "fLbGKt5Fo0RQiUoW6jHMmQ4d/76sBm2PiHfGLAHz5ldeeFvM9MFl1+aMjmZDjhxQkW2uNvd6\n" +
+ "iAPJJvmVf7szHloFgt8Rj2MlBMCnSUnv5cH2RTxRVrBKOuJ9sXHJWjyIABm1n4zYoI8veTzi\n" +
+ "vSjZz0zWaJpYhDC8XB3qaR3Oj29zZiUmCuVND84EVogig7sfjiRDVAmAfvEW21wTAdMwa7MH\n" +
+ "GbmgQs2dzFxfnLpsF8602HEb+41X4W4emzymxQ69YjCjpho27bNo7GM+Im/ye7afFb9dbkKB\n" +
+ "V7f1tn4fv1FkS4fyBqVx+v35rYqjOQFoA9jnFjkx/qwqG9z3MW8D5/zvlaQ7iw8sy9Vki0J3\n" +
+ "E8ge+GvtMaklCAmLsU1OSi5VM46R7h8KlJ9FEnd/ti3QA7DHxrko0gsZXna+fBVGs/wx9dLp\n" +
+ "ZKIrJy35Hi1Jz6ScpFeX3yGT3qo5WKfmLzTxDpVbZ7O06+uidndAsEO6LIEC4s+iTrylvC+4\n" +
+ "RhFt4ECZ0uqP+aOmM/l69K1RLGEAtwvZeo2/3XDyTkEmpa3g9PZtuSrN5QIQk9YKK/JdrskG\n" +
+ "oj7VqUmy3UbWam1xXaPzOF0nU8loT7ibsscCdAp9ePrn8wAJONMOPfIrcOxe+itqALWDl9OS\n" +
+ "tmR+nbLdV/pxDarCeEJphYgNxgLdKwOpN3BlB1EemKkOSwedqBGupAsszVw7uuc15hfOY5z9\n" +
+ "TV6OdnbG8Ne4JML7Iy48hcFG5i1yNk7up0xzvIrqPPOmZiBR5+0d7b5oByfHZBUy6+19ok6K\n" +
+ "0q4bddPWpNIyEErsddXcEoL1Iic2zkAPYB/IbqSyKv5aub1M/kqwvluw4FzZ0dDpJHewrO9/\n" +
+ "8uWwRhlmgHSCTqkJpUx8U2GrmX+Rn992cBFkuKoV+KceuBxwLsg+uG5c1Ml+kam5V3PrjHez\n" +
+ "TY/DoV/VnM8froXvBEaTw3NtdaYMz81+O5wzuYN1D2YnkIbZqESXEstnNna0vezcsOiEmg8Y\n" +
+ "Z/47oz3vU7+g1YXqOtWn6lnxzDTWe5W3FaCtAE4NmMgfXjn0wnIHFEEADRmwGO9+ftxvx8Uy\n" +
+ "wAMQMy753rCu5IebsKpy+Fe5UQAUSy9Xa3OptkgG14EayOCvq6rAyGa5AQdeMKX6PMT+g7co\n" +
+ "hYSVwIXUXzv32q2nV+FpfXPC2DkfgeIlWCWFaaBsSkG6G66JA/IYojkfDJDXYyQV4bjSp6A5\n" +
+ "hI5EzIcajmF45shoBa4wBJ5NrwJx1Mfu7uqfjZCUhP52gD9vIcC3975ReTQIgVfngDwNkcok\n" +
+ "xP7WWPUt/Q+2ZlYEANNgm/XMSgEN63FPvAs62ljNcLp0YCuXpsztDLXsrDKoXkM6LjGhSkXd\n" +
+ "shDR1TQ2GvWb76YWicgbNq5j5FdmWK1GbxxpdzRtkVBaqyHOFB7gAlGYFtF8CXlrhXKxySRZ\n" +
+ "jHWCPbDD0MHSZudWn3tvOoVTxDKG5PA3AqYKl8PoY0vJgNlGR6UrJQ0kDqWyeCODuXkfOVdF\n" +
+ "9ID79DTrNVDMtoQq270z6JSjRXA4VUAaZVzFdqFhsproIY7McL8J4luVfwc2lBhcNwyt0g0r\n" +
+ "M+3zfWELP0e6OxuSt5bsvuB9VtXngtmu4mEXse+oeyiHmuF9kTlsUASB2kne4AsnhqyIklVI\n" +
+ "eaofT3+JgoX4Kpy6vesU7jmUmdDQ5C5d3ccQTLJlNHiFmwQkitO7cpYN+lsLYO57kiWWMjWd\n" +
+ "tL5pg/tOycjsMcqZ+DOPQhcWa7c7WaIJilsqqQA9jKEeurQAr2sxN83BZ0ej1HDEbwA5cpu2\n" +
+ "M1gvCUIgTifKL+EdKzKTleSnMhWSgPgQHGBrkbBXoR5S7XrqCkpCXwpKhwXRCBplzSy8AHuo\n" +
+ "36LK63ofUKWSnrtqQHlLeLGs9k9lSxq5sWELRLhi0vuVe70YSY756VRvsA3V+Rh3h9zOU3mJ\n" +
+ "ob+0WCmzMzrCEkb50qF1mO6nE07gcy2nZ2fZXPlNJLBfPF48kGzNmUUuRG3dRwvGvNt6foMK\n" +
+ "+140jw8Q+/YnwOXahNM23BpkUhvrRaYhLjIOC9ak+uMdIu5ZyjT7CgerH2SDoSOD8CuGYOuI\n" +
+ "Z6vjGBKxEX92UDClwgiIK/2YfgAIpGEAQOWCSRWitU6Jhex/SFi7aVYJ92Biw4wBtcHaHuJo\n" +
+ "4TSSRe67z+LA5X823HG6ibh1nR+u1BIFCgoPKRLpt6w6LUArJZqbYSNyCc/rCynkf5Wz8ZRk\n" +
+ "kC3rDVWmeWAtvLU9k6i1KOUk4vFtaePeGxTNolybo98cOYlj+JFs5mWR+ro6/n4Ryr/IgK0n\n" +
+ "lMvvbIiQM4ckslusg7JOimp+Qvo3hKdbbLLu9ezLZbX6xgT1H5e6Zif3lg8zpbESDCoZ+ZkA\n" +
+ "2X0Q84ofDRRH7beKV+IkG1uIvai+DVlFB1aWvCbV1N2YFX2kdVVYvKuiSxlt66kehSlKsyQg\n" +
+ "U3VnWezaEUhriQrN6u3uqjgAHj0GPhfbtXLNkF7cqb77SreTM1Mkxl/Tx0NHAmPnOvs6DJYO\n" +
+ "goG5W1ywekIpkmDBzXMeFTnjCaXDyBQgpsUklUASySUeJxV39Y2iehjJRiShgyFO1MGF48u2\n" +
+ "ZYZAUN8c1J87DLgy6+pZg5m9eZ/Y5Q1uIP0vnKYA13PmCEvlOdcqb8bgimSixNpWIm58GAc8\n" +
+ "EOtQFCkrwhGq66lDiVBEEhJi7nllTV1WBiZpU//mCqPwV12MYjX45UJlAogpQH6D9rJWEfaC\n" +
+ "xjYyxSpF63jOxkkpcrD89UehYm3bq4eDOGUBW4bFj86iEX0b5Ic3dxMVtK6F/fWGWb823+fF\n" +
+ "mcVKVXV4d8kFAOboGPlC6nJTX9hP6n3CcdHBpU7D4+yamKSPMN0oorOveTkNwofDWwT/xXKC\n" +
+ "Qszrxv56awpebYOByT7CrVnyT1WdsOafrt2r9g7DPUqJwBMPjuuipBNAb5syK90bNWxRwsRz\n" +
+ "+gKSzzg0clu3UfSWof0Kdffclc5FPKPICAcfoVFonUwS2FzmiKpfOI88xVJMv6MjxtxERgiM\n" +
+ "DuBRK/ebHX775Fq/acD6EWAbqN6fysPaBLAoQ0D7RRweEFY8ULWnnVT43OJPO4cP/oYYBIIg\n" +
+ "ANKPCZsvO0TH1Rt1Q7BPtwuOKTt+RBdeXSSF6K3FaLTJH1zCtsVyeRQIjCLZKcssRJy1FGcQ\n" +
+ "OdAnbNIZ68EkC79ESzQ5w/nmXZ85BQBcy5Kez5M0w0f3T2QxBsS7+meWyArZpL1WVJOq+Sca\n" +
+ "6vT+M2Vz09xhBd03Trzyiob/YhmS9UCqlbcGNN6Q61yBT4y0FegjC3Sn5ky7hIP/528rWr4n\n" +
+ "QjjWo2GtcLLoTXRjleIL7VsZPRJ/c5oyWlkwBMX91T3Ta7uhKh85YqChm+6wq++Ov0V9tbxQ\n" +
+ "3JcVjH0lQy0U5dvLWiefkM+AsAJMkKyas+PVuRgIuBFvasILF5dnachcwF7Uunun09hq62nK\n" +
+ "zh3Coy0jSEfcHU92BHoSLisAt/A/ufIMvyqjdLMHnLX6vsWEUj+0XhlqSgAnFED2ngk4sM5q\n" +
+ "/TEH7Z7E/COP9nwJc8HHpIAz50YUgoar77TKZXFYhbc3Zw4Onvl2dYqWOkoTV6qjQ8qOR2Km\n" +
+ "34kCm4PqhHwgJvkMLp7LLX7W+YIg9cqd/rygIxEf6NoIWkp+9DJFfuCMF2qeT8jRnaSHs0To\n" +
+ "OdIrFlUi+V7SGos6AP6R44gkeuXNyon2LD3DnABmmqyjKM9JtqWgxtn/vLNBgOwBcqIp0l3q\n" +
+ "3Znk4QKEIIMCNszdCSZUJwBW5CZDQ4F3ai6X2lxN9g4kmX3n5yKtLkjrWZMfCIwjU51cQYBo\n" +
+ "15Ue0N1+E8WDXHYZ9ahvXWo+dBMmX3bf3SWl+U9xwiljAzZ9DOi/ABrdZsMkVu2N/nm2h7iE\n" +
+ "tf0YqOhwwdenGCNhRJg6MMzoXQLvB68hHO1gZj+Wo6SYLr5DKqdHT/2aXLPiLQtFtWwcwASB\n" +
+ "HGOVqMzMTLa/uqwy9Qr7kF6hddargZwGBozYNCAT1TS/eqGNBm18y7a691qoDg2vzSuHiEAN\n" +
+ "piz/kWW1skxWHFlmdtVT8zHkxg88/h0DAeMiupXvfhr3DygVOih7onC+0L1wgvAOHc/Mcuoq\n" +
+ "qr7rTdm2tdl9sYz9gGKcprVQJ94HW9Io+PJZi/cG1x483V+1K2mWcSDPUImRXNGx/VoSfaJR\n" +
+ "DlMVvNuhBYuVBvjrkx6XDndJPZSjzEDQliwV4rLb8VJJ70faOkI5WKcqesjYzebxqT5J2f48\n" +
+ "KLuZ9rjiW67e3fg8UW5RQyIjJwGwIglPKy+CEuBQaJCZxpzMZ3yzbttiDCkV0NlzuT9exdTr\n" +
+ "ExDsZg6kiZPivYQdQDOJBhhY3LpwZHr7FCjN/QhjC01W5fmbQNTanieL3IU0GjopnjvI748q\n" +
+ "cV2GFVpHWZECq2xGnPBTMfTRkmSQ8WlGcVJ/nMku+Ww0iN4UsWk/m/7ij3JK+KcgVtMHPJxG\n" +
+ "7VbxfnAzdHTSD1K3t1wI3NA5A0Imwd3erO1LPVuMw82PsjM0hUdR4Dtvd/GsCFLlCuUwePBU\n" +
+ "k2ZucLdRPWwdJT/Fy4rX+qbxhfRmvtFw12MzDcLS7sKYvGXIMd129Os6xv8h4Wk6Gag0TrnS\n" +
+ "74sdi8PWo4oK+pDTu8Cb3wZRrTEq9af9XQkFXIWCw8YHgCivEL71XUBAQGPhMF+sdifsgo6I\n" +
+ "ziwheYUtH9pXq2Jo6i4YvfedhJlPssmGento9OHGWTywi3ZHWbAc1h9A4kTivCh2zPO9ee5D\n" +
+ "VTvZuhIbhdA86G0lI+sQRCnLyLjZe7oeK0yOJDzMLUztNHFQhQ+kqQkZvt+bRqvcOYeWn9BI\n" +
+ "d3X+HdyutBWhhMSoknuEByDiK9zo30Wf1yOYt2xqb5p4fvSbFgkP3beD7jcznuEw6TNVApcc\n" +
+ "JpzyqITwXnZo+mJ38CCrPMYEr6RxNTA+XH4uav4BqrSoD5k1IrKqcXUOVoyUk1GOb/fcqUdl\n" +
+ "XJzCb3B0tCKM2qdBrtn7qUc/a7RhzNSwlousUe1OvMeZR3POIQecOjHVH7nD/ihlfhnTc5zl\n" +
+ "s03/ydPiCl5MIHNkaD8ZcIL9s+ejs+g1Mj8r15srXIqW4Q9HXYnwTyWmsTRtKKSKId3IHTmx\n" +
+ "tGycpLLhdZRuUao+lmzvwN6j4C2q3sgiISqnT3Qnti/8ZQxtaJ5yfu8tmGqX9kNlJA5JSzew\n" +
+ "CiEFf2LtG7ZPWoHrleY3zhLrMwbPWdKENohfZuCsZGmhqiqmO0FcOy2NosX3pUjiMrVet/RS\n" +
+ "x24k4Cec2xA6cThnuzBJ5TKxdclLIoNj9tNMsH2sUUEfIY0JcSLntHkdd2S6cb9NyWDCYi4W\n" +
+ "30+ibNY+RYug4Z3AjBMSUqdiKPLO+seP02kHiKm3IzVMQ1zg1abC3dMUgBfxOVOOqHcPaJ0K\n" +
+ "6/hQYhH9CxWggGF5R1yB5Rq6mHw5eD/nnUINjIc8D/dkO0j3hDpOLbtpeAW/O+3RUlAewO+K\n" +
+ "Hqy4B4WkvVTD4estRV8sl0R9hJpSMfXtlGXjkcTujVLcG5XVnzooNYr5QOHoiwS33Lk2aswV\n" +
+ "07ZFgzADntGdBV1oWlX4bEvH4Uhw0UQ3WfSu9Ejv5Lea+Ttp3ygktMGPcrAb1GMUlYBK0twr\n" +
+ "smvHaPAvW0YWN1yFsXEYC8Uhked7n/9IiYBQr1ddUVhjFPYgt9Wb7pbqj0ZXWacLsh8rybGX\n" +
+ "JhsIOKC1Q3ELNoSQU4XR6G3Iq5+sq0YF3R5doJVeqYK4ui4U4uvoqIyOIfAD+Fkd2B5ZedA9\n" +
+ "wR67vjxlsfXISLA5KGFnnFKuAO+k7XcxD5uScCPqz/7WLUl3qSZkL6FdRfJ5hDBh4OSmeqR3\n" +
+ "OkLz4x1PRUjcpcXYhSvnNmsjz88+xZE+uaUASTchKhj3GNvV8tRfXDkgeKOjFnrTCc9ti1vl\n" +
+ "hVfUhFBtVgXcZ6yTWhLTjkZxU6oK3Or1jNfGJ3+8OSGYfRIuSFT0xgi/IND8UYCg2wJVKjhb\n" +
+ "Ysah6CnUeQJuhWlAeHX0avGczd1wuVZVaxbHxtOiwi4IS/qYzTU8R0tFjT6sOOKkC8V1gHca\n" +
+ "AY8DS4uThZj8NZrKCQNRjxjLZvd4O9BqVM4zoVIc0/MumfKAzpj51QXtWsfeL6aUwLcjli4E\n" +
+ "cfk0h3FG0PXw6xmZQMZqRNbVDXydziMXg0tpwHBg9b3zTl5d10DGnMT0mkeVl5j+PhUO0Mmt\n" +
+ "sCXDiaZDVxFXwAIkSz6/5pdn2Iom+8GUe0qCctrEkL6T5hequlQsZAIw4VExd0FdW5zt1lnB\n" +
+ "cBFmUofzV36LG2BHqLXYj9FU+pUiMiOOlP3kPtvFwDmOIMBDHAsUJOIHUX4LHUjR/tAz5+Vn\n" +
+ "cPQVkqqUrps9sQ+syXLHrAPO7qZdRuRyLwjAxARhJozT1rOl39Qv2SnwK/OqP3UzTbA41U5q\n" +
+ "4zcveXZc+C/4zlufru36fLdMtzwnKnumewnUBdGF930V5aD1qsU4UAp/mDnnFZd3yN86ofWM\n" +
+ "fwcX80kZptrl2nxK5Zx3q/u5cPC0uFbbptHHYsPO+AGL5oPo+D6aJXbFh5BFT+od6+f3QFae\n" +
+ "icjnHtPnglfHlDMNCu3pjXDrCX1MpZKaNkfk2mL0rC5kXyOhsbSlZAjb74Xu76VZSIXQ7ad2\n" +
+ "P+c+67bVuG2/eTsiXwGjz/VxTfuQzdORdT6g8IFK9LxYsmeAO6dn9eoKGl4I1V6Dpwa5eLyo\n" +
+ "m6Y7Zi6h3Xe/1y7QQqsdtVRuc6HTSDnS912YqeAMCY1dBuRmRNlnBsVJvpJANAU6l937R52F\n" +
+ "yKZ82C3le/OAPYwJFy7KRpp0OyEwU+DLt7jE47Y5gA+pXJkBNBw2MJoGIKOv8CIXCEg0BPx6\n" +
+ "t7YVvs/H/qKkNLN+2Z1V+u8STlJEq+S2u5jGDBgsJ1JfrXu+difolZkLM32c8TgyplwlPtB0\n" +
+ "uQ6g56Z2Wn0hIcznfpHJLjsAAymRqa6ymEEG+RYGfG//pCyl2IBfx0tNJrdVDLdR0bkSzSEx\n" +
+ "Zmqs3mo05YW08fIaguvZI5TwShuj4VECmS19hvx++dRzg4KPB0nwbUcWXZ4cpXMCGrm37NbR\n" +
+ "wvHfaRByNmVF8e33H9lkMibyT0HMCc29twvMf3EMFSbdKvWlxt3P9hckos4nPKhzhxMKK+x1\n" +
+ "Sk8u9iV2IyNNaTKXhWn/6QdN8+yT8ALuuDyfNmvrnASD6xX1W/qA9i1KFf2ae8S0Y6YeJlgJ\n" +
+ "jq74MN02Z2DD+2EI05a/+fTPDtfnnQ7QZZKcANxXqmhjBBswQpi8fzvG+Zl+CESkJeMnNCaF\n" +
+ "9Pk0DclEeULVWNLZlsPBdfk3zvjCAVPm3L6MoxSKMb40cI5ICmTGpyfouof2YwxW/rvR1P8O\n" +
+ "2Ekkto5tesyxuFLjEkzmMbqDsVe+XBFzTXB+juygxPgkxtEmvw0EAzXp427BmLsPxBdKkA9s\n" +
+ "e5J6xGHkA+3hmOKTWizloRDR+fEQY18QkHp0oj8LYM9mlFhtVu2bbvd9AMJA0F9ypNCDsBOk\n" +
+ "Q9oBsecvfJ4W42uWXsE4jtUCthiJCljRVrNCqT8LS+wohHLKKhr7Ka70umS4PVzYHuvPE97t\n" +
+ "KmfhSJx+oO3yW9feaJQA668/Qc0lyJZ91flDEcKyKoHSWH7gFewPBwuSsU+Tk5wqI15PWf1X\n" +
+ "4lI1mOPaYg31zgckJSvh4YOME3+HTwNAun8gU8h73bMeAO8l6Fu7ijnyin+zBfNCjMgm+tWp\n" +
+ "zvecGCpqbIgbPEMYWqRbo0hvvO6BVQTNWeIMdZY3iZD4PHdHLI76Cuk3jbtvDf44k5m8e6mx\n" +
+ "w+MYOC1R7ep5STedsvmjdW5Fhs0W3oKl9OXk6DkRKFkhhEIZo7LN5KsjIzjkF1Lwj+nzJ3PH\n" +
+ "aPfPq3IAdI25DyKkeZFglrMGDNS8zlNkHzlYohNfBwi15aqwWnT+Us9KogYjpQFqYHt235aI\n" +
+ "JjqPPrQx12lh90DZSuBv00dRsT6nJ6lpAApSj1zfiOUscfc5SgJTV2/WtwmslYy4dQLQMoX3\n" +
+ "JaQwhMlp8ymkKSmTbsLGqJg0PraxzfnpNAsuH/rmr0vmsCVfePbf9ioKlvxJAzqtJ349dSOX\n" +
+ "RgFHcjSuMP/oGV5sL5Q9hKVQ+Lu+iavwR7FP8RVrK+hUzihdeLRueDbCN52UTCkdSrHkynog\n" +
+ "NMKgF/ISuQA3l28NSGdRnpCcKXH7YGbEkpoAd1JoxwtKkoPKsdZaezInM28lFvfY4EIgyeS6\n" +
+ "ec4z6bghHgQIj9CygiWsVeZWm2f8MWf/jHUcp9vZqI0t0hfHtCrsZAWbb68np+3lHhM8CiqA\n" +
+ "KDbW8M3BKeMCWNGMyadYADu+sX3MFjbgHnDncDCfNl1ZhWmtifpIPzjVkmWjwyEgYsSdC7yT\n" +
+ "L09tPrH72poWJRkoWjr0vWqHuqH2dYgUyUw3j63+7Cq6gYcG3ZUvdD8LTVfP6wj53m6GW4/f\n" +
+ "P3la/iS9jSU514dR7/ZsZvxmyxMg7ebjfiyh3oHUJQnJj8xG0gmiys2q7wL+xuKuSZSbOPsy\n" +
+ "z37Rve4xzlSR9ZltIjRx3oL0c+c2VQ2xs1+cIOhiU1udENvjseivKeFOGj+uUrJlCBXIGZep\n" +
+ "gMd6pTHNyLCuwkEnYKc3vxxWe2yeaZxQrBqfI690jq9uGRvmA9JQK9CnSAP9524300JhaGhF\n" +
+ "Mg4J8YmoSv9+gCsbsq90uAiLSrIkeIRpGmg3TAayntJ1lOXnSDZhZAJh3CTk7T8E3zJS+GG6\n" +
+ "mbAvcvo7WRL880W00ZOBtZBEhM7dkIxyqib9zn41SGyAWZAVy6g2G1aRnbz4G+edfwQ1H5jf\n" +
+ "iNGL6KTMioItA8ZpJQQ12aXTqylFi+5wT2N+pdUqBurQWoCnLhY2O1irbIfCUIwnDk5D3a7/\n" +
+ "ySgtiotJLjGkEL+dMcLGqZOo8G1yw0kbjo+iy0mM6MkmM03fTw5KNxjl26UpkjK1Il7vHhtZ\n" +
+ "SN7IqnMQ1gRIYjyIBkS8TRG1z4T/w4Lrh0fqAvy0etZj0Gv38XrlpSI03YrADGD0rI1Z+VZE\n" +
+ "VF0viG+iea7DHg4sP6AtKaHajZcUlinE2/pq9VD03enHxHBqcpo3v34VFlwBVgUfa1Bx5qky\n" +
+ "pOyqJ5XMBjjCcF3UT2GiQ2HigmyFbC8Wx0gAYy7BEmgBVhfqZAUeicrSlY/8hm300j1kkXS1\n" +
+ "QajIqPPWCM9BNqkN1fmsVnL7Npevg8h9zVKoczQH9lZmqnDzW/qckwu4McV8m75LRTNP+ADJ\n" +
+ "KDHREz2Y4VMjGjYoY/xJLwCOZyWd05yqQRiX1ijLjPQtA1BnWVvmtSY7l+V1V+jtjSMwQmNq\n" +
+ "AGirprCNyOBWfUv+lMvxAscCsdMwSWb48bFEQqGW+6onPYHi3QdtJ73U8yGYqCdX2z8Ri5OB\n" +
+ "ceLq8oe11qIIad9IOoklGwE0tvcxl8tZ+uDKV3t/sqCmrFV1/eYdTwaHn0tDVbdjwp78ZvXX\n" +
+ "7r2RgU5ePyP37f05wOgZTfD97KqO2l8oERl6SO9FgsPD7MZqA5MOL9CwLa9kFDe3PRW0OTlm\n" +
+ "5LbCWRNPR+X6qyd7zhWfOsjdyhRa9QYa82q4IOyeUtMHRiy4n9vRSzKVdGlCxbHxkoL8gVzC\n" +
+ "cjhKgnOn1xbMVD9TOPRS4ywu65DiEz3yH5ZUQdeUcxNAIfsyIJF4uLilBctx4QfRg2yk7mYZ\n" +
+ "HN/b5yrCzLIEGT50jbRqQVLdg38ZtnlG/BrvnsQesWqyfw8HQsZgY3Tr50TGxvqBIZpn+ywu\n" +
+ "UADKWcFnnxqggFmlEY/Cnove/yW/6AdZceiq6paNaW2eZY6PKSOgE6LaitOiHw1PCjUCjXVV\n" +
+ "3wts+LSjrFMf4x4QiruEXXu+V5VjX7jatDE+ko9Uz6IU0BkTHi8dublk7fgMq7UopTF/xsnR\n" +
+ "Vwv58q8+YnL+21cF+NQYj2QRPZ+s+xdIcGccrseOILXodpFbVPDGPqkKGIz8qb4STNtM5G4g\n" +
+ "qRTy/lh+oX/8tOll7q2EIYxkUBMUmeFA22S6lmCisiBzwtJT2P6571POxVvG75CvX+6YDUyt\n" +
+ "27K10jYStSrweUrNIO/KjrJ/yb9nSWOaLrni0y/42fa6L1YN9kg3VM4BCMhz9hO9N6gk1jiG\n" +
+ "aSrD6St1f91OLCoYuxjq8aKeo3uRS1mNXdpePIStKTd8ebEE9HMbYCgN9bdnTkmA7KhvQmKU\n" +
+ "5coukIyfFzgST1zngRoNU5HOJTlrb+eMgzdJqciTP64dEPtq5s39kVl18Ks8KsmUiR/eQnqZ\n" +
+ "7wESvxQEPH6JTojnzUrVctzuMWSiAU3o8EK6t7LjyZovKg1Ve1W6FKLerv6PZ+Jbmns1XjqU\n" +
+ "7pJ9pZNoCcjwWPXSx0M97cdEtrcxVMgxB05LEWIyJblPA3flZLpEOVwSAHuJyWXz4PRKJW1k\n" +
+ "56/uSuEAI0DHtfonYz9LQ5zTlmmYAneJTSGU6PZrzzuvGrHegSSZkPRtfVH8C4RmNqUK7iWT\n" +
+ "2MPaHzCAYGXa0AdX98pTh3uPx4LNuL1TrVTrofpypSbatuMVae4588PrAnCcW8yES1wG5Zvz\n" +
+ "tVCzdciw6bZ14dr6EDn9YBDcgqBeFsUpPS8zuQhQeU7/repqjRB8p4KhuZvSb8bMbACYcQhy\n" +
+ "cPvvinsGYHQJ9lrTgEhJetocgrJJxqe1OKnu10uWDg2h/sgaibl0jTmvaQ7Y9FwTZ5NUbNMG\n" +
+ "EeqvVnzsnLsgTAksIRzziIp4ZHFS3INDy+S6VvIDnca/mGTjwlkdgjC75kyihAsmdExopl9R\n" +
+ "W1awVUnXFQXaN7GQYwGApOwZV7VAUxIcfuy+TJjZYg1Fac9mI8RtsipJpfxZ7ZvKKBD7liQl\n" +
+ "kkWUxMdkkYkj0mtIG5Xpiswj/S0gl9wNxzvUNA1tZ/1zXuAmOtL1qGcJF6VBN8+sVS4vxoDb\n" +
+ "xPlWpgVJpZcnuX7qAuehsi70r/51aGCVfcEdVFapntsY8h5X0bbp4F9IcvlUaOqXrMc9IXoq\n" +
+ "nG0jrQ3rLGlYZo2i2gIMBdUCDZkLTlQ/0FGTF2nQi0htDC4cjo++c/y/PVrZ4UPcQdPJaOcE\n" +
+ "z/csRks02TriAc9dspI8dzF7/6qXUgUFPUEcRWv+hS5j2VL3zKlIhRQ14dDS8tHnAUrmoEoK\n" +
+ "YP7GrC/3L7YRaYbom5OrMhz/waZCIz/ZjSwxfd3LvrMMA8fuAKTu1t4qXGdZ7ocYMIQPLxMg\n" +
+ "pUe51B9xwnyiU7Ky0sPBX8s7KDItEO+YXJM32fwB7egddz5qzO09SskraOGloWab8nY0YX0k\n" +
+ "PDtmXUT5J+uzERFrfoZmpnta4qJoE1SKGyS/4L7+30mRrSaD2o+sUyWd+kP6pG23PiGUtqcf\n" +
+ "iQMSoGwehTfSC3cG5XyVbfkXVOukIA+jh5ysABs3KotOPikgVQJCYDS+JnTCkOZKrf0DwRfL\n" +
+ "/roKcv2/ON7W1o25W+QN2yIhNbN88Rjt/5twSB/SDFePFmItzinkibv5y+GZxr9HEaKZ/1jy\n" +
+ "myG1cI8gIUj/nihfEQ/WEjWSyJsO4smvu2Uf7ZN4zTSk/QRukyVrmoOq4dKzStiWssF980Ho\n" +
+ "LCbKzCJFcy13so3MqDtORxtDSp+960XWMOyVHZGUKRgWIKEX2AbJpaEvwCYKdeYAZzIuCwO9\n" +
+ "O1ixtfv1KvzMzCjF4Sk9mucFcVWeEeX7Uw5DDCZ01t8uXxiFkjh+bDal39a7NF+VNDiaAmrZ\n" +
+ "ezysTyjA9h7Su9uizTyeK/eZ+w8hDn1wUe9CgcCGz8PUJxOv14qcUNpZussP0hQVErIWPtH3\n" +
+ "mPRoClx/o+AGTVtLRCR7Fjy6n6Zq8SyGDKf/0xa1QufjHmoqumtnx7tNCsS748+Ys6PFwlVv\n" +
+ "OXcpimPVisL1kfrQHDdBodXnKNZW3rWNtFGYBP1VPVUxqJJ283WRyb/X9bVGx1gPaHzFQj7w\n" +
+ "z7BVhMpPmBt/vAXdIvDGsQGb/16O52bucATPWyOQMLUBVorR07v/5NolSzHYi19ehveceUHb\n" +
+ "omX5uAPXqSR6DvUFbG/n+rOTsYCTlqNIiF4Dm3iAjizMMh15MWvV0+PHyLLl8xcYze9Eb5eO\n" +
+ "5dMRERZCNejwJ6PfFKS46XkfmLCZbWbj30CU6Qb6reC4v6sucId63+TghD3CmbyQ8MZwjB7P\n" +
+ "e4gm1deP0Gw4EqvQvsi7Kq8WQii8OYLO0HSKyF8jzfb8JnIe57A0mge5Ru0KaC8blGuO0e5V\n" +
+ "Mm1SQDXuHJC1dErv4jd3+9Yj4TFgRYumplQR24jYCpC4OyFqcPEBJleAdOkrb775sa48rUqM\n" +
+ "F9dZjP+1MJNhEhgv7g/LLUUuKwwEa6o/Ksbvx6fNikWX+40EyS/wvKzpTZ1vsVxIHMmae2C/\n" +
+ "x3C5YVFN20PUc9VsXjQrFw2T7Q3rtqWgXRgFJrcoY7NDyvJY0UyCBqcWp28N6MKOhXztA6mG\n" +
+ "kS3gZXkpKE+q7yJCByjKLs6D1vTQgj241bswl8KxjlQLw+iC2VYAtZoC4O2BWyD4K9rL7H/I\n" +
+ "i6/ppxf6ofzNv5JHZ/7if19WP/n5XlX9XuMX0ZPU15nRXpapt1hOtT1ER1+bzifyOsywoKYE\n" +
+ "12IgKsb+/LK4k9KlKP+93S+yoFtSWKfunvA0Lyb3Js8h8OY8Kq1Izzw4UjO5npX/uYq18VDr\n" +
+ "LzU6q8d1IX7rYhgrWOmARkzzFbKQ7V9FGyxpbp23Fp4y/GY/F1wnZGRk9CUgdI7ZXIp+pR3R\n" +
+ "eVSECRmLpMEAgKwcoVY5SE7mZYMEh5W3T4GUKgZhIuZPL/8/OoX3HivC2q/A+BMxbJCBL/mx\n" +
+ "bsRTpRvPK8cJgf4QxgE5ylWDpamz52nuNNHxxl+Z7vm0MZ3I7Gj6Fc1pHd+ZhCCjKmIlAseq\n" +
+ "Lofz0yafH0NPd5F8T9onl9Of3ekLWjHSZqNKZVRgLqGPxcM4QMQSQ8vsme/bPABmhrMKZ1YG\n" +
+ "VBpYtJzVTnG55m3r8sFPmPNZ5tdLiLFj5WQvClnVE1Q/eU/1iOjVin9tIfDSf5O+/x6PYs4O\n" +
+ "wHayiyxiAurfPEqlzFcnPznn7R3r+L0mcwGu6YWNOduY3TwoF2NyHnaVzVQrmOQgCE0yD56X\n" +
+ "z2Ur7HwoNYi7Nzfz0CBV2gN3PLGdiYn7J6wyBa5zR5jDhMF0W+oza/+tZFPVjo73PRT4DYc3\n" +
+ "HEHjkHzDJETGdzOHPuQyTIyZGxx2BIyeReQn6oOIREtIub+Ct3KWd7CnW5wMbFaXEYHCagfj\n" +
+ "5/rl0uGBS4KsYrMEXvGCHlJcVDmelx80rBN+Vo6Yrkj8B1LOEgM1lR+9LOBXPrhf9+PRZ4Qp\n" +
+ "9HuoO7Z6x3R4JgERXrEmZUJVj/JVIaF0fjylTj88257mCRWxyO5YpfhBF2s0qLYdMvNKUAzM\n" +
+ "/NjP8oODt3TU0Xt30RTu06amfY6ZnWTX9uzaOffi2BrWrcib97frCqPdOEdPhRcqniIL5U1k\n" +
+ "YnVrSSDUQolLqs32MGdaRCkpGF1lN7YQCBRNXFf0f2KvzC4svuDfsqEnTL7R2Vcu+akAshXQ\n" +
+ "sSjEXmKd49Ky7sZZnEbmfnDaB/+4ZWAYk4gajJib5ewB2pHzp5muKInTyECgYMc2ReVD/tz/\n" +
+ "pd9NG1NzdStchS082PL8DwRJx6HvWjVo9sSd9DGIfVonx6txQ34QCTF9psQ2R0Q1LRtJLyZd\n" +
+ "Dgehsti4GCBZdAQ8dT+2sG4QxKTHwaHCp2mDNWI86eaoZ5q4m0UG+kJrKZM+bbZARbBS5Go6\n" +
+ "8cK2JiiRh465rPEh+CuWJQT12Whk89nbe5nvq+ILez8iYVj3IiBwy2FAVtidPIEgYfVI5TWD\n" +
+ "ElfmXJXR7r2bMCY7RfLR6u8JWLJEoHEGsB2DqUwWixPRaxMbvGMm6t3nwbnhqgeJFNpW+Ntc\n" +
+ "PpZF5XSFj0xTqr6M6alUd26vC7CuXc/MDe29raHZ2k95R8zEnfl/p0HiErPGGA0rA6HmY1L4\n" +
+ "m2yc4wmPwg0cW1m8T9U4bPQaXWQ19wOqrBFety/T+m+3Y/L8aGoHmQlNJpqzbw/DmorcmyjB\n" +
+ "B3EHg5pmn+AwxuQOjTolFP+mmW593LEdkuBpITZHa9mLl5Q2ts3ABIIB4IUmz0F0Z0EOYEjt\n" +
+ "lKryNdgwXjLJF5zLjcs+Rn7FuD9LWs9FRKS8hTYFxGwcnMJLbFgWobGIK8VwXUlhiuj4dlOH\n" +
+ "Llq6eerJUz96gUR5dY0pjci+uVhF9Pr0uJKeGCHJLluqJ8hvE6r0qyXJquWdMgFU480YKlAB\n" +
+ "5XXxVI2geOurRMSSoUXKOk/ZR3i41orN7/gZQPZXvZbNPSVNifbJnqhi0qy9nBsiEtV05tQ4\n" +
+ "kCBnnQmAlNgq//AnuN1H+UNjHxUvtU80yBZMsfbz0BZ6MWF/AlTXEwNnBTXpQI9hYus83AR9\n" +
+ "lht+11eNmwTEVj9VGQVk1S0OTCWe9Gv3mxrPyFGhOJ8vFtBDhpVjSZ5cFCPhGMCZxjrIbzf4\n" +
+ "xjz4fPdSnN3XpBRxuE0FW39coYHX4jNn2FhKtOljHUZjrFL91ZYYo2xdou7VgE7GfVvb7V70\n" +
+ "MiK0OsW8du1c8Iawqmb0H1cWo/GCA8TaFdjfXOWZjEfHpXJvGqW+zcYn2DN0UNYnuP4ITOd4\n" +
+ "A3OQiTaX1XV4M+vKOR1A0OzFty0IxMxcTEwSQM1JQ+zpE11DBMWf4JEo35uAmtvHXPjlyHd2\n" +
+ "YY0ohoV70z8CGMrBN6ws5zIE7n3q7klEWHds5PZMDlzoPZd2rwQIYAM4FwEheYIAAAAAAAAA\n" +
+ "AAAA";
diff --git a/comm/mailnews/db/gloda/test/unit/test_startup_offline.js b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
new file mode 100644
index 0000000000..d7ba435d76
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/test_startup_offline.js
@@ -0,0 +1,53 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var {
+ assertExpectedMessagesIndexed,
+ glodaTestHelperInitialize,
+ waitForGlodaIndexer,
+} = ChromeUtils.import("resource://testing-common/gloda/GlodaTestHelper.jsm");
+
+var { MessageGenerator } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageGenerator.jsm"
+);
+var { MessageInjection } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MessageInjection.jsm"
+);
+
+/*
+ * Test gloda starts up with indexing suppressed when offline at startup.
+ */
+
+var messageInjection;
+
+add_setup(async function () {
+ // We must do this before the first load otherwise gloda is started without
+ // picking up the necessary initialisation.
+ Services.io.manageOfflineStatus = false;
+ Services.io.offline = true;
+ let msgGen = new MessageGenerator();
+ messageInjection = new MessageInjection({ mode: "local" }, msgGen);
+ glodaTestHelperInitialize(messageInjection);
+});
+
+/**
+ * Make sure that if we have to reparse a local folder we do not hang or
+ * anything. (We had a regression where we would hang.)
+ */
+add_task(async function test_gloda_offline_startup() {
+ // Set up a folder for indexing and check the message doesn't get indexed.
+ let [, msgSet] = await messageInjection.makeFoldersWithSets(1, [
+ { count: 1 },
+ ]);
+
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([]));
+
+ // Now go online...
+ Services.io.offline = false;
+
+ // ...and check we have done the indexing and indexed the message.
+ await waitForGlodaIndexer();
+ Assert.ok(...assertExpectedMessagesIndexed([msgSet]));
+});
diff --git a/comm/mailnews/db/gloda/test/unit/xpcshell.ini b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
new file mode 100644
index 0000000000..4efbf97583
--- /dev/null
+++ b/comm/mailnews/db/gloda/test/unit/xpcshell.ini
@@ -0,0 +1,38 @@
+[DEFAULT]
+head = head_gloda.js
+tail =
+support-files = base_*.js resources/*
+prefs =
+ gloda.loglevel=Debug
+
+[test_corrupt_database.js]
+[test_folder_logic.js]
+[test_fts3_tokenizer.js]
+[test_gloda_content_imap_offline.js]
+[test_gloda_content_local.js]
+[test_index_addressbook.js]
+[test_index_bad_messages.js]
+[test_index_compaction.js]
+[test_index_junk_imap_offline.js]
+[test_index_junk_imap_online.js]
+[test_index_junk_local.js]
+[test_index_messages_imap_offline.js]
+[test_index_messages_imap_online.js]
+[test_index_messages_imap_online_to_offline.js]
+[test_index_messages_local.js]
+[test_index_sweep_folder.js]
+[test_intl.js]
+[test_migration.js]
+[test_mime_attachments_size.js]
+[test_mime_emitter.js]
+[test_msg_search.js]
+[test_noun_mimetype.js]
+[test_nuke_migration.js]
+[test_nuke_migration_from_future.js]
+[test_query_core.js]
+[test_query_messages_imap_offline.js]
+[test_query_messages_imap_online.js]
+[test_query_messages_imap_online_to_offline.js]
+[test_query_messages_local.js]
+[test_smime_mimemsg_representation.js]
+[test_startup_offline.js]
diff --git a/comm/mailnews/db/mork/components.conf b/comm/mailnews/db/mork/components.conf
new file mode 100644
index 0000000000..edf2f0d382
--- /dev/null
+++ b/comm/mailnews/db/mork/components.conf
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, you can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ "cid": "{36d90300-27f5-11d3-8d74-00805f8a6617}",
+ "contract_ids": ["@mozilla.org/db/mork;1"],
+ "type": "nsMorkFactoryService",
+ "headers": ["/comm/mailnews/db/mork/nsMorkFactory.h"],
+ },
+]
diff --git a/comm/mailnews/db/mork/mdb.h b/comm/mailnews/db/mork/mdb.h
new file mode 100644
index 0000000000..2f0f0d80e1
--- /dev/null
+++ b/comm/mailnews/db/mork/mdb.h
@@ -0,0 +1,2550 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Blake Ross (blake@blakeross.com)
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MDB_
+#define _MDB_ 1
+
+#include "mozilla/Path.h"
+#include "nscore.h"
+#include "nsISupports.h"
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// { %%%%% begin scalar typedefs %%%%%
+typedef unsigned char mdb_u1; // make sure this is one byte
+typedef unsigned short mdb_u2; // make sure this is two bytes
+typedef short mdb_i2; // make sure this is two bytes
+typedef uint32_t mdb_u4; // make sure this is four bytes
+typedef int32_t mdb_i4; // make sure this is four bytes
+typedef PRWord mdb_ip; // make sure sizeof(mdb_ip) == sizeof(void*)
+
+typedef mdb_u1 mdb_bool; // unsigned byte with zero=false, nonzero=true
+
+/* canonical boolean constants provided only for code clarity: */
+#define mdbBool_kTrue ((mdb_bool)1) /* actually any nonzero means true */
+#define mdbBool_kFalse ((mdb_bool)0) /* only zero means false */
+
+typedef mdb_u4 mdb_id; // unsigned object identity in a scope
+typedef mdb_id mdb_rid; // unsigned row identity inside scope
+typedef mdb_id mdb_tid; // unsigned table identity inside scope
+typedef mdb_u4 mdb_token; // unsigned token for atomized string
+typedef mdb_token mdb_scope; // token used to id scope for rows
+typedef mdb_token mdb_kind; // token used to id kind for tables
+typedef mdb_token mdb_column; // token used to id columns for rows
+typedef mdb_token mdb_cscode; // token used to id charset names
+typedef mdb_u4 mdb_seed; // unsigned collection change counter
+typedef mdb_u4 mdb_count; // unsigned collection member count
+typedef mdb_u4 mdb_size; // unsigned physical media size
+typedef mdb_u4 mdb_fill; // unsigned logical content size
+typedef mdb_u4 mdb_more; // more available bytes for larger buffer
+
+typedef mdb_u2 mork_uses; // 2-byte strong uses count
+typedef mdb_u2 mork_refs; // 2-byte actual reference count
+
+#define mdbId_kNone ((mdb_id)-1) /* never a valid Mork object ID */
+
+typedef mdb_u4 mdb_percent; // 0..100, with values >100 same as 100
+
+typedef mdb_u1 mdb_priority; // 0..9, for a total of ten different values
+
+// sequence position is signed; negative is useful to mean "before first":
+typedef mdb_i4 mdb_pos; // signed zero-based ordinal collection position
+
+#define mdbPos_kBeforeFirst ((mdb_pos)-1) /* any negative is before zero */
+
+// order is also signed, so we can use three states for comparison order:
+typedef mdb_i4 mdb_order; // neg:lessthan, zero:equalto, pos:greaterthan
+
+typedef mdb_order (*mdbAny_Order)(const void* inA, const void* inB,
+ const void* inClosure);
+
+// } %%%%% end scalar typedefs %%%%%
+
+// { %%%%% begin C structs %%%%%
+
+#ifndef mdbScopeStringSet_typedef
+typedef struct mdbScopeStringSet mdbScopeStringSet;
+# define mdbScopeStringSet_typedef 1
+#endif
+
+/*| mdbScopeStringSet: a set of null-terminated C strings that enumerate some
+**| names of row scopes, so that row scopes intended for use by an application
+**| can be declared by an app when trying to open or create a database file.
+**| (We use strings and not tokens because we cannot know the tokens for any
+**| particular db without having first opened the db.) The goal is to inform
+**| a db runtime that scopes not appearing in this list can be given relatively
+**| short shrift in runtime representation, with the expectation that other
+**| scopes will not actually be used. However, a db should still be prepared
+**| to handle accessing row scopes not in this list, rather than raising errors.
+**| But it could be quite expensive to access a row scope not on the list.
+**| Note a zero count for the string set means no such string set is being
+**| specified, and that a db should handle all row scopes efficiently.
+**| (It does NOT mean an app plans to use no content whatsoever.)
+|*/
+#ifndef mdbScopeStringSet_struct
+# define mdbScopeStringSet_struct 1
+struct mdbScopeStringSet { // vector of scopes for use in db opening policy
+ // when mScopeStringSet_Count is zero, this means no scope constraints
+ mdb_count mScopeStringSet_Count; // number of strings in vector below
+ const char** mScopeStringSet_Strings; // null-ended ascii scope strings
+};
+#endif /*mdbScopeStringSet_struct*/
+
+#ifndef mdbOpenPolicy_typedef
+typedef struct mdbOpenPolicy mdbOpenPolicy;
+# define mdbOpenPolicy_typedef 1
+#endif
+
+#ifndef mdbOpenPolicy_struct
+# define mdbOpenPolicy_struct 1
+struct mdbOpenPolicy { // policies affecting db usage for ports and stores
+ mdbScopeStringSet mOpenPolicy_ScopePlan; // predeclare scope usage plan
+ mdb_bool mOpenPolicy_MaxLazy; // nonzero: do least work
+ mdb_bool mOpenPolicy_MinMemory; // nonzero: use least memory
+};
+#endif /*mdbOpenPolicy_struct*/
+
+#ifndef mdbTokenSet_typedef
+typedef struct mdbTokenSet mdbTokenSet;
+# define mdbTokenSet_typedef 1
+#endif
+
+#ifndef mdbTokenSet_struct
+# define mdbTokenSet_struct 1
+struct mdbTokenSet { // array for a set of tokens, and actual slots used
+ mdb_count mTokenSet_Count; // number of token slots in the array
+ mdb_fill mTokenSet_Fill; // the subset of count slots actually used
+ mdb_more mTokenSet_More; // more tokens available for bigger array
+ mdb_token* mTokenSet_Tokens; // array of count mdb_token instances
+};
+#endif /*mdbTokenSet_struct*/
+
+#ifndef mdbUsagePolicy_typedef
+typedef struct mdbUsagePolicy mdbUsagePolicy;
+# define mdbUsagePolicy_typedef 1
+#endif
+
+/*| mdbUsagePolicy: another version of mdbOpenPolicy which uses tokens instead
+**| of scope strings, because usage policies can be constructed for use with a
+**| db that is already open, while an open policy must be constructed before a
+**| db has yet been opened.
+|*/
+#ifndef mdbUsagePolicy_struct
+# define mdbUsagePolicy_struct 1
+struct mdbUsagePolicy { // policies affecting db usage for ports and stores
+ mdbTokenSet mUsagePolicy_ScopePlan; // current scope usage plan
+ mdb_bool mUsagePolicy_MaxLazy; // nonzero: do least work
+ mdb_bool mUsagePolicy_MinMemory; // nonzero: use least memory
+};
+#endif /*mdbUsagePolicy_struct*/
+
+#ifndef mdbOid_typedef
+typedef struct mdbOid mdbOid;
+# define mdbOid_typedef 1
+#endif
+
+#ifndef mdbOid_struct
+# define mdbOid_struct 1
+struct mdbOid { // identity of some row or table inside a database
+ mdb_scope mOid_Scope; // scope token for an id's namespace
+ mdb_id mOid_Id; // identity of object inside scope namespace
+};
+#endif /*mdbOid_struct*/
+
+#ifndef mdbRange_typedef
+typedef struct mdbRange mdbRange;
+# define mdbRange_typedef 1
+#endif
+
+#ifndef mdbRange_struct
+# define mdbRange_struct 1
+struct mdbRange { // range of row positions in a table
+ mdb_pos mRange_FirstPos; // position of first row
+ mdb_pos mRange_LastPos; // position of last row
+};
+#endif /*mdbRange_struct*/
+
+#ifndef mdbColumnSet_typedef
+typedef struct mdbColumnSet mdbColumnSet;
+# define mdbColumnSet_typedef 1
+#endif
+
+#ifndef mdbColumnSet_struct
+# define mdbColumnSet_struct 1
+struct mdbColumnSet { // array of column tokens (just the same as mdbTokenSet)
+ mdb_count mColumnSet_Count; // number of columns
+ mdb_column* mColumnSet_Columns; // count mdb_column instances
+};
+#endif /*mdbColumnSet_struct*/
+
+#ifndef mdbYarn_typedef
+typedef struct mdbYarn mdbYarn;
+# define mdbYarn_typedef 1
+#endif
+
+#ifdef MDB_BEGIN_C_LINKAGE_define
+# define MDB_BEGIN_C_LINKAGE_define 1
+# define MDB_BEGIN_C_LINKAGE extern "C" {
+# define MDB_END_C_LINKAGE }
+#endif /*MDB_BEGIN_C_LINKAGE_define*/
+
+/*| mdbYarn_mGrow: an abstract API for growing the size of a mdbYarn
+**| instance. With respect to a specific API that requires a caller
+**| to supply a string (mdbYarn) that a callee fills with content
+**| that might exceed the specified size, mdbYarn_mGrow is a caller-
+**| supplied means of letting a callee attempt to increase the string
+**| size to become large enough to receive all content available.
+**|
+**|| Grow(): a method for requesting that a yarn instance be made
+**| larger in size. Note that such requests need not be honored, and
+**| need not be honored in full if only partial size growth is desired.
+**| (Note that no nsIMdbEnv instance is passed as argument, although one
+**| might be needed in some circumstances. So if an nsIMdbEnv is needed,
+**| a reference to one might be held inside a mdbYarn member slot.)
+**|
+**|| self: a yarn instance to be grown. Presumably this yarn is
+**| the instance which holds the mYarn_Grow method pointer. Yarn
+**| instancesshould only be passed to grow methods which they were
+**| specifically designed to fit, as indicated by the mYarn_Grow slot.
+**|
+**|| inNewSize: the new desired value for slot mYarn_Size in self.
+**| If mYarn_Size is already this big, then nothing should be done.
+**| If inNewSize is larger than seems feasible or desirable to honor,
+**| then any size restriction policy can be used to grow to some size
+**| greater than mYarn_Size. (Grow() might even grow to a size
+**| greater than inNewSize in order to make the increase in size seem
+**| worthwhile, rather than growing in many smaller steps over time.)
+|*/
+typedef void (*mdbYarn_mGrow)(mdbYarn* self, mdb_size inNewSize);
+// mdbYarn_mGrow methods must be declared with C linkage in C++
+
+/*| mdbYarn: a variable length "string" of arbitrary binary bytes,
+**| whose length is mYarn_Fill, inside a buffer mYarn_Buf that has
+**| at most mYarn_Size byte of physical space.
+**|
+**|| mYarn_Buf: a pointer to space containing content. This slot
+**| might never be nil when mYarn_Size is nonzero, but checks for nil
+**| are recommended anyway.
+**| (Implementations of mdbYarn_mGrow methods should take care to
+**| ensure the existence of a replacement before dropping old Bufs.)
+**| Content in Buf can be anything in any format, but the mYarn_Form
+**| implies the actual format by some caller-to-callee convention.
+**| mYarn_Form==0 implies US-ASCII iso-8859-1 Latin1 string content.
+**|
+**|| mYarn_Size: the physical size of Buf in bytes. Note that if one
+**| intends to terminate a string with a null byte, that it must not
+**| be written at or after mYarn_Buf[mYarn_Size] because this is after
+**| the last byte in the physical buffer space. Size can be zero,
+**| which means the string has no content whatsoever; note that when
+**| Size is zero, this is a suitable reason for Buf==nil as well.
+**|
+**|| mYarn_Fill: the logical content in Buf in bytes, where Fill must
+**| never exceed mYarn_Size. Note that yarn strings might not have a
+**| terminating null byte (since they might not even be C strings), but
+**| when they do, such terminating nulls are considered part of content
+**| and therefore Fill will count such null bytes. So an "empty" C
+**| string will have Fill==1, because content includes one null byte.
+**| Fill does not mean "length" when applied to C strings for this
+**| reason. However, clients using yarns to hold C strings can infer
+**| that length is equal to Fill-1 (but should take care to handle the
+**| case where Fill==0). To be paranoid, one can always copy to a
+**| destination with size exceeding Fill, and place a redundant null
+**| byte in the Fill position when this simplifies matters.
+**|
+**|| mYarn_Form: a designation of content format within mYarn_Buf.
+**| The semantics of this slot are the least well defined, since the
+**| actual meaning is context dependent, to the extent that callers
+**| and callees must agree on format encoding conventions when such
+**| are not standardized in many computing contexts. However, in the
+**| context of a specific mdb database, mYarn_Form is a token for an
+**| atomized string in that database that typically names a preferred
+**| mime type charset designation. If and when mdbYarn is used for
+**| other purposes away from the mdb interface, folks can use another
+**| convention system for encoding content formats. However, in all
+**| contexts is it useful to maintain the convention that Form==0
+**| implies Buf contains US-ASCII iso-8859-1 Latin1 string content.
+**|
+**|| mYarn_Grow: either a mdbYarn_mGrow method, or else nil. When
+**| a mdbYarn_mGrow method is provided, this method can be used to
+**| request a yarn buf size increase. A caller who constructs the
+**| original mdbYarn instance decides whether a grow method is necessary
+**| or desirable, and uses only grow methods suitable for the buffering
+**| nature of a specific mdbYarn instance. (For example, Buf might be a
+**| statically allocated string space which switches to something heap-based
+**| when grown, and subsequent calls to grow the yarn must distinguish the
+**| original static string from heap allocated space, etc.) Note that the
+**| method stored in mYarn_Grow can change, and this might be a common way
+**| to track memory managent changes in policy for mYarn_Buf.
+|*/
+#ifndef mdbYarn_struct
+# define mdbYarn_struct 1
+struct mdbYarn { // buffer with caller space allocation semantics
+ void* mYarn_Buf; // space for holding any binary content
+ mdb_fill mYarn_Fill; // logical content in Buf in bytes
+ mdb_size mYarn_Size; // physical size of Buf in bytes
+ mdb_more mYarn_More; // more available bytes if Buf is bigger
+ mdb_cscode mYarn_Form; // charset format encoding
+ mdbYarn_mGrow mYarn_Grow; // optional method to grow mYarn_Buf
+
+ // Subclasses might add further slots after mYarn_Grow in order to
+ // maintain bookkeeping needs, such as state info about mYarn_Buf.
+};
+#endif /*mdbYarn_struct*/
+
+// } %%%%% end C structs %%%%%
+
+// { %%%%% begin class forward defines %%%%%
+class nsIMdbEnv;
+class nsIMdbObject;
+class nsIMdbErrorHook;
+class nsIMdbThumb;
+class nsIMdbFactory;
+class nsIMdbFile;
+class nsIMdbPort;
+class nsIMdbStore;
+class nsIMdbCursor;
+class nsIMdbPortTableCursor;
+class nsIMdbCollection;
+class nsIMdbTable;
+class nsIMdbTableRowCursor;
+class nsIMdbRow;
+class nsIMdbRowCellCursor;
+class nsIMdbBlob;
+class nsIMdbCell;
+class nsIMdbSorting;
+// } %%%%% end class forward defines %%%%%
+
+// { %%%%% begin C++ abstract class interfaces %%%%%
+
+/*| nsIMdbObject: base class for all message db class interfaces
+**|
+**|| factory: all nsIMdbObjects from the same code suite have the same factory
+**|
+**|| refcounting: both strong and weak references, to ensure strong refs are
+**| acyclic, while weak refs can cause cycles. CloseMdbObject() is
+**| called when (strong) use counts hit zero, but clients can call this close
+**| method early for some reason, if absolutely necessary even though it will
+**| thwart the other uses of the same object. Note that implementations must
+**| cope with close methods being called arbitrary numbers of times. The COM
+**| calls to AddRef() and release ref map directly to strong use ref calls,
+**| but the total ref count for COM objects is the sum of weak & strong refs.
+|*/
+
+#define NS_IMDBOBJECT_IID_STR "5533ea4b-14c3-4bef-ac60-22f9e9a49084"
+
+#define NS_IMDBOBJECT_IID \
+ { \
+ 0x5533ea4b, 0x14c3, 0x4bef, { \
+ 0xac, 0x60, 0x22, 0xf9, 0xe9, 0xa4, 0x90, 0x84 \
+ } \
+ }
+
+class nsIMdbObject : public nsISupports { // msg db base class
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBOBJECT_IID)
+ // { ===== begin nsIMdbObject methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD IsFrozenMdbObject(nsIMdbEnv* ev, mdb_bool* outIsReadonly) = 0;
+ // same as nsIMdbPort::GetIsPortReadonly() when this object is inside a port.
+ // } ----- end attribute methods -----
+
+ // { ----- begin factory methods -----
+ NS_IMETHOD GetMdbFactory(nsIMdbEnv* ev, nsIMdbFactory** acqFactory) = 0;
+ // } ----- end factory methods -----
+
+ // { ----- begin ref counting for well-behaved cyclic graphs -----
+ NS_IMETHOD GetWeakRefCount(nsIMdbEnv* ev, // weak refs
+ mdb_count* outCount) = 0;
+ NS_IMETHOD GetStrongRefCount(nsIMdbEnv* ev, // strong refs
+ mdb_count* outCount) = 0;
+
+ NS_IMETHOD AddWeakRef(nsIMdbEnv* ev) = 0;
+ NS_IMETHOD_(mork_uses) AddStrongRef(nsIMdbEnv* ev) = 0;
+
+ NS_IMETHOD CutWeakRef(nsIMdbEnv* ev) = 0;
+ NS_IMETHOD CutStrongRef(nsIMdbEnv* ev) = 0;
+
+ NS_IMETHOD CloseMdbObject(nsIMdbEnv* ev) = 0; // called at strong refs zero
+ NS_IMETHOD IsOpenMdbObject(nsIMdbEnv* ev, mdb_bool* outOpen) = 0;
+ // } ----- end ref counting -----
+
+ // } ===== end nsIMdbObject methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbObject, NS_IMDBOBJECT_IID)
+
+/*| nsIMdbErrorHook: a base class for clients of this API to subclass, in order
+**| to provide a callback installable in nsIMdbEnv for error notifications. If
+**| apps that subclass nsIMdbErrorHook wish to maintain a reference to the env
+**| that contains the hook, then this should be a weak ref to avoid cycles.
+**|
+**|| OnError: when nsIMdbEnv has an error condition that causes the total count
+**| of errors to increase, then nsIMdbEnv should call OnError() to report the
+**| error in some fashion when an instance of nsIMdbErrorHook is installed. The
+**| variety of string flavors is currently due to the uncertainty here in the
+**| nsIMdbBlob and nsIMdbCell interfaces. (Note that overloading by using the
+**| same method name is not necessary here, and potentially less clear.)
+|*/
+class nsIMdbErrorHook
+ : public nsISupports { // env callback handler to report errors
+ public:
+ // { ===== begin error methods =====
+ NS_IMETHOD OnErrorString(nsIMdbEnv* ev, const char* inAscii) = 0;
+ NS_IMETHOD OnErrorYarn(nsIMdbEnv* ev, const mdbYarn* inYarn) = 0;
+ // } ===== end error methods =====
+
+ // { ===== begin warning methods =====
+ NS_IMETHOD OnWarningString(nsIMdbEnv* ev, const char* inAscii) = 0;
+ NS_IMETHOD OnWarningYarn(nsIMdbEnv* ev, const mdbYarn* inYarn) = 0;
+ // } ===== end warning methods =====
+
+ // { ===== begin abort hint methods =====
+ NS_IMETHOD OnAbortHintString(nsIMdbEnv* ev, const char* inAscii) = 0;
+ NS_IMETHOD OnAbortHintYarn(nsIMdbEnv* ev, const mdbYarn* inYarn) = 0;
+ // } ===== end abort hint methods =====
+};
+
+/*| nsIMdbHeap: abstract memory allocation interface.
+**|
+**|| Alloc: return a block at least inSize bytes in size with alignment
+**| suitable for any native type (such as long integers). When no such
+**| block can be allocated, failure is indicated by a null address in
+**| addition to reporting an error in the environment.
+**|
+**|| Free: deallocate a block allocated or resized earlier by the same
+**| heap instance. If the inBlock parameter is nil, the heap should do
+**| nothing (and crashing is strongly discouraged).
+|*/
+class nsIMdbHeap { // caller-supplied memory management interface
+ public:
+ // { ===== begin nsIMdbHeap methods =====
+ NS_IMETHOD Alloc(nsIMdbEnv* ev, // allocate a piece of memory
+ mdb_size inSize, // requested byte size of new memory block
+ void** outBlock) =
+ 0; // memory block of inSize bytes, or nil
+
+ NS_IMETHOD Free(nsIMdbEnv* ev, // free block from Alloc or Resize()
+ void* ioBlock) = 0; // block to be destroyed/deallocated
+
+ virtual size_t GetUsedSize() = 0;
+
+ virtual ~nsIMdbHeap(){};
+ // } ===== end nsIMdbHeap methods =====
+};
+
+/*| nsIMdbCPlusHeap: Alloc() with global ::new(), Free() with global ::delete().
+**| Resize() is done by ::new() followed by ::delete().
+|*/
+class nsIMdbCPlusHeap { // caller-supplied memory management interface
+ public:
+ // { ===== begin nsIMdbHeap methods =====
+ NS_IMETHOD Alloc(nsIMdbEnv* ev, // allocate a piece of memory
+ mdb_size inSize, // requested size of new memory block
+ void** outBlock); // memory block of inSize bytes, or nil
+
+ NS_IMETHOD Free(nsIMdbEnv* ev, // free block allocated earlier by Alloc()
+ void* inBlock);
+
+ NS_IMETHOD HeapAddStrongRef(nsIMdbEnv* ev);
+ NS_IMETHOD HeapCutStrongRef(nsIMdbEnv* ev);
+ // } ===== end nsIMdbHeap methods =====
+};
+
+/*| nsIMdbThumb:
+|*/
+
+#define NS_IMDBTHUMB_IID_STR "6d3ad7c1-a809-4e74-8577-49fa9a4562fa"
+
+#define NS_IMDBTHUMB_IID \
+ { \
+ 0x6d3ad7c1, 0xa809, 0x4e74, { \
+ 0x85, 0x77, 0x49, 0xfa, 0x9a, 0x45, 0x62, 0xfa \
+ } \
+ }
+
+class nsIMdbThumb
+ : public nsISupports { // closure for repeating incremental method
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBTHUMB_IID)
+
+ // { ===== begin nsIMdbThumb methods =====
+ NS_IMETHOD GetProgress(
+ nsIMdbEnv* ev,
+ mdb_count* outTotal, // total somethings to do in operation
+ mdb_count* outCurrent, // subportion of total completed so far
+ mdb_bool* outDone, // is operation finished?
+ mdb_bool* outBroken // is operation irreparably dead and broken?
+ ) = 0;
+
+ NS_IMETHOD DoMore(
+ nsIMdbEnv* ev,
+ mdb_count* outTotal, // total somethings to do in operation
+ mdb_count* outCurrent, // subportion of total completed so far
+ mdb_bool* outDone, // is operation finished?
+ mdb_bool* outBroken // is operation irreparably dead and broken?
+ ) = 0;
+
+ NS_IMETHOD CancelAndBreakThumb( // cancel pending operation
+ nsIMdbEnv* ev) = 0;
+ // } ===== end nsIMdbThumb methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbThumb, NS_IMDBTHUMB_IID)
+
+/*| nsIMdbEnv: a context parameter used when calling most abstract db methods.
+**| The main purpose of such an object is to permit a database implementation
+**| to avoid the use of globals to share information between various parts of
+**| the implementation behind the abstract db interface. An environment acts
+**| like a session object for a given calling thread, and callers should use
+**| at least one different nsIMdbEnv instance for each thread calling the API.
+**| While the database implementation might not be threaded, it is highly
+**| desirable that the db be thread-safe if calling threads use distinct
+**| instances of nsIMdbEnv. Callers can stop at one nsIMdbEnv per thread, or
+they
+**| might decide to make on nsIMdbEnv instance for every nsIMdbPort opened, so
+that
+**| error information is segregated by database instance. Callers create
+**| instances of nsIMdbEnv by calling the MakeEnv() method in nsIMdbFactory.
+**|
+**|| tracing: an environment might support some kind of tracing, and this
+**| boolean attribute permits such activity to be enabled or disabled.
+**|
+**|| errors: when a call to the abstract db interface returns, a caller might
+**| check the number of outstanding errors to see whether the operation did
+**| actually succeed. Each nsIMdbEnv should have all its errors cleared by a
+**| call to ClearErrors() before making each call to the abstract db API,
+**| because outstanding errors might disable further database actions. (This
+**| is not done inside the db interface, because the db cannot in general know
+**| when a call originates from inside or outside -- only the app knows this.)
+**|
+**|| error hook: callers can install an instance of nsIMdbErrorHook to receive
+**| error notifications whenever the error count increases. The hook can
+**| be uninstalled by passing a null pointer.
+**|
+|*/
+
+#define NS_IMDBENV_IID_STR "a765e46b-efb6-41e6-b75b-c5d6bd710594"
+
+#define NS_IMDBENV_IID \
+ { \
+ 0xa765e46b, 0xefb6, 0x41e6, { \
+ 0xb7, 0x5b, 0xc5, 0xd6, 0xbd, 0x71, 0x05, 0x94 \
+ } \
+ }
+
+class nsIMdbEnv : public nsISupports { // db specific context parameter
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBENV_IID)
+ // { ===== begin nsIMdbEnv methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetErrorCount(mdb_count* outCount, mdb_bool* outShouldAbort) = 0;
+ NS_IMETHOD GetWarningCount(mdb_count* outCount, mdb_bool* outShouldAbort) = 0;
+
+ NS_IMETHOD GetEnvBeVerbose(mdb_bool* outBeVerbose) = 0;
+ NS_IMETHOD SetEnvBeVerbose(mdb_bool inBeVerbose) = 0;
+
+ NS_IMETHOD GetDoTrace(mdb_bool* outDoTrace) = 0;
+ NS_IMETHOD SetDoTrace(mdb_bool inDoTrace) = 0;
+
+ NS_IMETHOD GetAutoClear(mdb_bool* outAutoClear) = 0;
+ NS_IMETHOD SetAutoClear(mdb_bool inAutoClear) = 0;
+
+ NS_IMETHOD GetErrorHook(nsIMdbErrorHook** acqErrorHook) = 0;
+ NS_IMETHOD SetErrorHook(nsIMdbErrorHook* ioErrorHook) =
+ 0; // becomes referenced
+
+ NS_IMETHOD GetHeap(nsIMdbHeap** acqHeap) = 0;
+ NS_IMETHOD SetHeap(nsIMdbHeap* ioHeap) = 0; // becomes referenced
+ // } ----- end attribute methods -----
+
+ NS_IMETHOD ClearErrors() = 0; // clear errors beore re-entering db API
+ NS_IMETHOD ClearWarnings() = 0; // clear warnings
+ NS_IMETHOD ClearErrorsAndWarnings() = 0; // clear both errors & warnings
+ // } ===== end nsIMdbEnv methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbEnv, NS_IMDBENV_IID)
+
+/*| nsIMdbFactory: the main entry points to the abstract db interface. A DLL
+**| that supports this mdb interface need only have a single exported method
+**| that will return an instance of nsIMdbFactory, so that further methods in
+**| the suite can be accessed from objects returned by nsIMdbFactory methods.
+**|
+**|| mdbYarn: note all nsIMdbFactory subclasses must guarantee null
+**| termination of all strings written into mdbYarn instances, as long as
+**| mYarn_Size and mYarn_Buf are nonzero. Even truncated string values must
+**| be null terminated. This is more strict behavior than mdbYarn requires,
+**| but it is part of the nsIMdbFactory interface.
+**|
+**|| envs: an environment instance is required as per-thread context for
+**| most of the db method calls, so nsIMdbFactory creates such instances.
+**|
+**|| rows: callers must be able to create row instances that are independent
+**| of storage space that is part of the db content graph. Many interfaces
+**| for data exchange have strictly copy semantics, so that a row instance
+**| has no specific identity inside the db content model, and the text in
+**| cells are an independenty copy of unexposed content inside the db model.
+**| Callers are expected to maintain one or more row instances as a buffer
+**| for staging cell content copied into or out of a table inside the db.
+**| Callers are urged to use an instance of nsIMdbRow created by the
+nsIMdbFactory
+**| code suite, because reading and writing might be much more efficient than
+**| when using a hand-rolled nsIMdbRow subclass with no relation to the suite.
+**|
+**|| ports: a port is a readonly interface to a specific database file. Most
+**| of the methods to access a db file are suitable for a readonly interface,
+**| so a port is the basic minimum for accessing content. This makes it
+**| possible to read other external formats for import purposes, without
+**| needing the code or competence necessary to write every such format. So
+**| we can write generic import code just once, as long as every format can
+**| show a face based on nsIMdbPort. (However, same suite import can be faster.)
+**| Given a file name and the first 512 bytes of a file, a factory can say if
+**| a port can be opened by this factory. Presumably an app maintains chains
+**| of factories for different suites, and asks each in turn about opening a
+**| a prospective file for reading (as a port) or writing (as a store). I'm
+**| not ready to tackle issues of format fidelity and factory chain ordering.
+**|
+**|| stores: a store is a mutable interface to a specific database file, and
+**| includes the port interface plus any methods particular to writing, which
+**| are few in number. Presumably the set of files that can be opened as
+**| stores is a subset of the set of files that can be opened as ports. A
+**| new store can be created with CreateNewFileStore() by supplying a new
+**| file name which does not yet exist (callers are always responsible for
+**| destroying any existing files before calling this method).
+|*/
+
+#define NS_IMDBFACTORY_IID_STR "2b80395c-b91e-4990-b1a7-023e99ab14e9"
+
+#define NS_IMDBFACTORY_IID \
+ { \
+ 0xf04aa4ab, 0x1fe, 0x4115, { \
+ 0xa4, 0xa5, 0x68, 0x19, 0xdf, 0xf1, 0x10, 0x3d \
+ } \
+ }
+
+class nsIMdbFactory : public nsISupports { // suite entry points
+ using PathChar = mozilla::filesystem::Path::value_type;
+
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBFACTORY_IID)
+ // { ===== begin nsIMdbFactory methods =====
+
+ // { ----- begin file methods -----
+ NS_IMETHOD OpenOldFile(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath, mdb_bool inFrozen,
+ nsIMdbFile** acqFile) = 0;
+ // Choose some subclass of nsIMdbFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be open and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+
+ NS_IMETHOD CreateNewFile(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath,
+ nsIMdbFile** acqFile) = 0;
+ // Choose some subclass of nsIMdbFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be created and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+ // } ----- end file methods -----
+
+ // { ----- begin env methods -----
+ NS_IMETHOD MakeEnv(nsIMdbHeap* ioHeap,
+ nsIMdbEnv** acqEnv) = 0; // acquire new env
+ // ioHeap can be nil, causing a MakeHeap() style heap instance to be used
+ // } ----- end env methods -----
+
+ // { ----- begin heap methods -----
+ NS_IMETHOD MakeHeap(nsIMdbEnv* ev,
+ nsIMdbHeap** acqHeap) = 0; // acquire new heap
+ // } ----- end heap methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD MakeRow(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbRow** acqRow) = 0; // new row
+ // ioHeap can be nil, causing the heap associated with ev to be used
+ // } ----- end row methods -----
+
+ // { ----- begin port methods -----
+ NS_IMETHOD CanOpenFilePort(
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpen, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) = 0; // informal file format description
+
+ NS_IMETHOD OpenFilePort(
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for readonly import
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental port open
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then call nsIMdbFactory::ThumbToOpenPort() to get the port instance.
+
+ NS_IMETHOD
+ ThumbToOpenPort( // redeeming a completed thumb from OpenFilePort()
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFilePort() with done status
+ nsIMdbPort** acqPort) = 0; // acquire new port object
+ // } ----- end port methods -----
+
+ // { ----- begin store methods -----
+ NS_IMETHOD CanOpenFileStore(
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpenAsStore, // whether OpenFileStore() might succeed
+ mdb_bool* outCanOpenAsPort, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) = 0; // informal file format description
+
+ NS_IMETHOD OpenFileStore( // open an existing database
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for general db usage
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental store open
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then call nsIMdbFactory::ThumbToOpenStore() to get the store instance.
+
+ NS_IMETHOD
+ ThumbToOpenStore( // redeem completed thumb from OpenFileStore()
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFileStore() with done status
+ nsIMdbStore** acqStore) = 0; // acquire new db store object
+
+ NS_IMETHOD CreateNewFileStore( // create a new db with minimal content
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // name of file which should not yet exist
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbStore** acqStore) = 0; // acquire new db store object
+ // } ----- end store methods -----
+
+ // } ===== end nsIMdbFactory methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbFactory, NS_IMDBFACTORY_IID)
+
+extern "C" nsIMdbFactory* MakeMdbFactory();
+
+/*| nsIMdbFile: abstract file interface resembling the original morkFile
+**| abstract interface (which was in turn modeled on the file interface
+**| from public domain IronDoc). The design of this file interface is
+**| complicated by the fact that some DB's will not find this interface
+**| adequate for all runtime requirements (even though this file API is
+**| enough to implement text-based DB's like Mork). For this reason,
+**| more methods have been added to let a DB library force the file to
+**| become closed so the DB can reopen the file in some other manner.
+**| Folks are encouraged to suggest ways to tune this interface to suit
+**| DB's that cannot manage to pull their maneuvers even given this API.
+**|
+**|| Tell: get the current i/o position in file
+**|
+**|| Seek: change the current i/o position in file
+**|
+**|| Eof: return file's total length in bytes
+**|
+**|| Read: input inSize bytes into outBuf, returning actual transfer size
+**|
+**|| Get: read starting at specific file offset (e.g. Seek(); Read();)
+**|
+**|| Write: output inSize bytes from inBuf, returning actual transfer size
+**|
+**|| Put: write starting at specific file offset (e.g. Seek(); Write();)
+**|
+**|| Flush: if written bytes are buffered, push them to final destination
+**|
+**|| Path: get file path in some string representation. This is intended
+**| either to support the display of file name in a user presentation, or
+**| to support the closing and reopening of the file when the DB needs more
+**| exotic file access than is presented by the nsIMdbFile interface.
+**|
+**|| Steal: tell this file to close any associated i/o stream in the file
+**| system, because the file ioThief intends to reopen the file in order
+**| to provide the MDB implementation with more exotic file access than is
+**| offered by the nsIMdbFile alone. Presumably the thief knows enough
+**| from Path() in order to know which file to reopen. If Steal() is
+**| successful, this file should probably delegate all future calls to
+**| the nsIMdbFile interface down to the thief files, so that even after
+**| the file has been stolen, it can still be read, written, or forcibly
+**| closed (by a call to CloseMdbObject()).
+**|
+**|| Thief: acquire and return thief passed to an earlier call to Steal().
+|*/
+
+#define NS_IMDBFILE_IID_STR "f04aa4ab-1fe7-4115-a4a5-6819dff1103d"
+
+#define NS_IMDBFILE_IID \
+ { \
+ 0xf04aa4ab, 0x1fe, 0x4115, { \
+ 0xa4, 0xa5, 0x68, 0x19, 0xdf, 0xf1, 0x10, 0x3d \
+ } \
+ }
+
+class nsIMdbFile : public nsISupports { // minimal file interface
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBFILE_IID)
+ // { ===== begin nsIMdbFile methods =====
+
+ // { ----- begin pos methods -----
+ NS_IMETHOD Tell(nsIMdbEnv* ev, mdb_pos* outPos) const = 0;
+ NS_IMETHOD Seek(nsIMdbEnv* ev, mdb_pos inPos, mdb_pos* outPos) = 0;
+ NS_IMETHOD Eof(nsIMdbEnv* ev, mdb_pos* outPos) = 0;
+ // } ----- end pos methods -----
+
+ // { ----- begin read methods -----
+ NS_IMETHOD Read(nsIMdbEnv* ev, void* outBuf, mdb_size inSize,
+ mdb_size* outActualSize) = 0;
+ NS_IMETHOD Get(nsIMdbEnv* ev, void* outBuf, mdb_size inSize, mdb_pos inPos,
+ mdb_size* outActualSize) = 0;
+ // } ----- end read methods -----
+
+ // { ----- begin write methods -----
+ NS_IMETHOD Write(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ mdb_size* outActualSize) = 0;
+ NS_IMETHOD Put(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ mdb_pos inPos, mdb_size* outActualSize) = 0;
+ NS_IMETHOD Flush(nsIMdbEnv* ev) = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin path methods -----
+ NS_IMETHOD Path(nsIMdbEnv* ev, mdbYarn* outFilePath) = 0;
+ // } ----- end path methods -----
+
+ // { ----- begin replacement methods -----
+ NS_IMETHOD Steal(nsIMdbEnv* ev, nsIMdbFile* ioThief) = 0;
+ NS_IMETHOD Thief(nsIMdbEnv* ev, nsIMdbFile** acqThief) = 0;
+ // } ----- end replacement methods -----
+
+ // { ----- begin versioning methods -----
+ NS_IMETHOD BecomeTrunk(nsIMdbEnv* ev) = 0;
+ // If this file is a file version branch created by calling AcquireBud(),
+ // BecomeTrunk() causes this file's content to replace the original
+ // file's content, typically by assuming the original file's identity.
+ // This default implementation of BecomeTrunk() does nothing, and this
+ // is appropriate behavior for files which are not branches, and is
+ // also the right behavior for files returned from AcquireBud() which are
+ // in fact the original file that has been truncated down to zero length.
+
+ NS_IMETHOD AcquireBud(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbFile** acqBud) =
+ 0; // acquired file for new version of content
+ // AcquireBud() starts a new "branch" version of the file, empty of content,
+ // so that a new version of the file can be written. This new file
+ // can later be told to BecomeTrunk() the original file, so the branch
+ // created by budding the file will replace the original file. Some
+ // file subclasses might initially take the unsafe but expedient
+ // approach of simply truncating this file down to zero length, and
+ // then returning the same morkFile pointer as this, with an extra
+ // reference count increment. Note that the caller of AcquireBud() is
+ // expected to eventually call CutStrongRef() on the returned file
+ // in order to release the strong reference. High quality versions
+ // of morkFile subclasses will create entirely new files which later
+ // are renamed to become the old file, so that better transactional
+ // behavior is exhibited by the file, so crashes protect old files.
+ // Note that AcquireBud() is an illegal operation on readonly files.
+ // } ----- end versioning methods -----
+
+ // } ===== end nsIMdbFile methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbFile, NS_IMDBFILE_IID)
+
+/*| nsIMdbPort: a readonly interface to a specific database file. The mutable
+**| nsIMdbStore interface is a subclass that includes writing behavior, but
+**| most of the needed db methods appear in the readonly nsIMdbPort interface.
+**|
+**|| mdbYarn: note all nsIMdbPort and nsIMdbStore subclasses must guarantee null
+**| termination of all strings written into mdbYarn instances, as long as
+**| mYarn_Size and mYarn_Buf are nonzero. Even truncated string values must
+**| be null terminated. This is more strict behavior than mdbYarn requires,
+**| but it is part of the nsIMdbPort and nsIMdbStore interface.
+**|
+**|| attributes: methods are provided to distinguish a readonly port from a
+**| mutable store, and whether a mutable store actually has any dirty content.
+**|
+**|| filepath: the file path used to open the port from the nsIMdbFactory can be
+**| queried and discovered by GetPortFilePath(), which includes format info.
+**|
+**|| export: a port can write itself in other formats, with perhaps a typical
+**| emphasis on text interchange formats used by other systems. A port can be
+**| queried to determine its preferred export interchange format, and a port
+**| can be queried to see whether a specific export format is supported. And
+**| actually exporting a port requires a new destination file name and format.
+**|
+**|| tokens: a port supports queries about atomized strings to map tokens to
+**| strings or strings to token integers. (All atomized strings must be in
+**| US-ASCII iso-8859-1 Latin1 charset encoding.) When a port is actually a
+**| mutable store and a string has not yet been atomized, then StringToToken()
+**| will actually do so and modify the store. The QueryToken() method will not
+**| atomize a string if it has not already been atomized yet, even in stores.
+**|
+**|| tables: other than string tokens, all port content is presented through
+**| tables, which are ordered collections of rows. Tables are identified by
+**| row scope and table kind, which might or might not be unique in a port,
+**| depending on app convention. When tables are effectively unique, then
+**| queries for specific scope and kind pairs will find those tables. To see
+**| all tables that match specific row scope and table kind patterns, even in
+**| the presence of duplicates, every port supports a GetPortTableCursor()
+**| method that returns an iterator over all matching tables. Table kind is
+**| considered scoped inside row scope, so passing a zero for table kind will
+**| find all table kinds for some nonzero row scope. Passing a zero for row
+**| scope will iterate over all tables in the port, in some undefined order.
+**| (A new table can be added to a port using nsIMdbStore::NewTable(), even when
+**| the requested scope and kind combination is already used by other tables.)
+**|
+**|| memory: callers can request that a database use less memory footprint in
+**| several flavors, from an inconsequential idle flavor to a rather drastic
+**| panic flavor. Callers might perform an idle purge very frequently if desired
+**| with very little cost, since only normally scheduled memory management will
+**| be conducted, such as freeing resources for objects scheduled to be dropped.
+**| Callers should perform session memory purges infrequently because they might
+**| involve costly scanning of data structures to removed cached content, and
+**| session purges are recommended only when a caller experiences memory crunch.
+**| Callers should only rarely perform a panic purge, in response to dire memory
+**| straits, since this is likely to make db operations much more expensive
+**| than they would be otherwise. A panic purge asks a database to free as much
+**| memory as possible while staying effective and operational, because a caller
+**| thinks application failure might otherwise occur. (Apps might better close
+**| an open db, so panic purges only make sense when a db is urgently needed.)
+|*/
+class nsIMdbPort : public nsISupports {
+ public:
+ // { ===== begin nsIMdbPort methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetIsPortReadonly(nsIMdbEnv* ev, mdb_bool* outBool) = 0;
+ NS_IMETHOD GetIsStore(nsIMdbEnv* ev, mdb_bool* outBool) = 0;
+ NS_IMETHOD GetIsStoreAndDirty(nsIMdbEnv* ev, mdb_bool* outBool) = 0;
+
+ NS_IMETHOD GetUsagePolicy(nsIMdbEnv* ev, mdbUsagePolicy* ioUsagePolicy) = 0;
+
+ NS_IMETHOD SetUsagePolicy(nsIMdbEnv* ev,
+ const mdbUsagePolicy* inUsagePolicy) = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin memory policy methods -----
+ NS_IMETHOD IdleMemoryPurge( // do memory management already scheduled
+ nsIMdbEnv* ev, // context
+ mdb_size* outEstimatedBytesFreed) =
+ 0; // approximate bytes actually freed
+
+ NS_IMETHOD SessionMemoryPurge( // request specific footprint decrease
+ nsIMdbEnv* ev, // context
+ mdb_size inDesiredBytesFreed, // approximate number of bytes wanted
+ mdb_size* outEstimatedBytesFreed) =
+ 0; // approximate bytes actually freed
+
+ NS_IMETHOD PanicMemoryPurge( // desperately free all possible memory
+ nsIMdbEnv* ev, // context
+ mdb_size* outEstimatedBytesFreed) =
+ 0; // approximate bytes actually freed
+ // } ----- end memory policy methods -----
+
+ // { ----- begin filepath methods -----
+ NS_IMETHOD GetPortFilePath(
+ nsIMdbEnv* ev, // context
+ mdbYarn* outFilePath, // name of file holding port content
+ mdbYarn* outFormatVersion) = 0; // file format description
+
+ NS_IMETHOD GetPortFile(nsIMdbEnv* ev, // context
+ nsIMdbFile** acqFile) =
+ 0; // acquire file used by port or store
+ // } ----- end filepath methods -----
+
+ // { ----- begin export methods -----
+ NS_IMETHOD BestExportFormat( // determine preferred export format
+ nsIMdbEnv* ev, // context
+ mdbYarn* outFormatVersion) = 0; // file format description
+
+ // some tentative suggested import/export formats
+ // "ns:msg:db:port:format:ldif:ns4.0:passthrough" // necessary
+ // "ns:msg:db:port:format:ldif:ns4.5:utf8" // necessary
+ // "ns:msg:db:port:format:ldif:ns4.5:tabbed"
+ // "ns:msg:db:port:format:ldif:ns4.5:binary" // necessary
+ // "ns:msg:db:port:format:html:ns3.0:addressbook" // necessary
+ // "ns:msg:db:port:format:html:display:verbose"
+ // "ns:msg:db:port:format:html:display:concise"
+ // "ns:msg:db:port:format:mork:zany:verbose" // necessary
+ // "ns:msg:db:port:format:mork:zany:atomized" // necessary
+ // "ns:msg:db:port:format:rdf:xml"
+ // "ns:msg:db:port:format:xml:mork"
+ // "ns:msg:db:port:format:xml:display:verbose"
+ // "ns:msg:db:port:format:xml:display:concise"
+ // "ns:msg:db:port:format:xml:print:verbose" // recommended
+ // "ns:msg:db:port:format:xml:print:concise"
+
+ NS_IMETHOD
+ CanExportToFormat( // can export content in given specific format?
+ nsIMdbEnv* ev, // context
+ const char* inFormatVersion, // file format description
+ mdb_bool* outCanExport) = 0; // whether ExportSource() might succeed
+
+ NS_IMETHOD ExportToFormat( // export content in given specific format
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to receive exported content
+ nsIMdbFile* ioFile, // destination abstract file interface
+ const char* inFormatVersion, // file format description
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental export
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the export will be finished.
+
+ // } ----- end export methods -----
+
+ // { ----- begin token methods -----
+ NS_IMETHOD TokenToString( // return a string name for an integer token
+ nsIMdbEnv* ev, // context
+ mdb_token inToken, // token for inTokenName inside this port
+ mdbYarn* outTokenName) = 0; // the type of table to access
+
+ NS_IMETHOD StringToToken( // return an integer token for scope name
+ nsIMdbEnv* ev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) = 0; // token for inTokenName inside this port
+
+ // String token zero is never used and never supported. If the port
+ // is a mutable store, then StringToToken() to create a new
+ // association of inTokenName with a new integer token if possible.
+ // But a readonly port will return zero for an unknown scope name.
+
+ NS_IMETHOD QueryToken( // like StringToToken(), but without adding
+ nsIMdbEnv* ev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) = 0; // token for inTokenName inside this port
+
+ // QueryToken() will return a string token if one already exists,
+ // but unlike StringToToken(), will not assign a new token if not
+ // already in use.
+
+ // } ----- end token methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD HasRow( // contains a row with the specified oid?
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_bool* outHasRow) = 0; // whether GetRow() might succeed
+
+ NS_IMETHOD GetRowRefCount( // get number of tables that contain a row
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_count* outRefCount) = 0; // number of tables containing inRowKey
+
+ NS_IMETHOD GetRow( // access one row with specific oid
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ nsIMdbRow** acqRow) = 0; // acquire specific row (or null)
+
+ // NS_IMETHOD
+ // GetPortRowCursor( // get cursor for all rows in specific scope
+ // nsIMdbEnv* ev, // context
+ // mdb_scope inRowScope, // row scope for row ids
+ // nsIMdbPortRowCursor** acqCursor) = 0; // all such rows in the port
+
+ NS_IMETHOD FindRow(
+ nsIMdbEnv* ev, // search for row with matching cell
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_column inColumn, // the column to search (and maintain an index)
+ const mdbYarn* inTargetCellValue, // cell value for which to search
+ mdbOid* outRowOid, // out row oid on match (or {0,-1} for no match)
+ nsIMdbRow** acqRow) = 0; // acquire matching row (or nil for no match)
+ // can be null if you only want the oid
+ // FindRow() searches for one row that has a cell in column inColumn with
+ // a contained value with the same form (i.e. charset) and is byte-wise
+ // identical to the blob described by yarn inTargetCellValue. Both content
+ // and form of the yarn must be an exact match to find a matching row.
+ //
+ // (In other words, both a yarn's blob bytes and form are significant. The
+ // form is not expected to vary in columns used for identity anyway. This
+ // is intended to make the cost of FindRow() cheaper for MDB implementors,
+ // since any cell value atomization performed internally must necessarily
+ // make yarn form significant in order to avoid data loss in atomization.)
+ //
+ // FindRow() can lazily create an index on attribute inColumn for all rows
+ // with that attribute in row space scope inRowScope, so that subsequent
+ // calls to FindRow() will perform faster. Such an index might or might
+ // not be persistent (but this seems desirable if it is cheap to do so).
+ // Note that lazy index creation in readonly DBs is not very feasible.
+ //
+ // This FindRow() interface assumes that attribute inColumn is effectively
+ // an alternative means of unique identification for a row in a rowspace,
+ // so correct behavior is only guaranteed when no duplicates for this col
+ // appear in the given set of rows. (If more than one row has the same cell
+ // value in this column, no more than one will be found; and cutting one of
+ // two duplicate rows can cause the index to assume no other such row lives
+ // in the row space, so future calls return nil for negative search results
+ // even though some duplicate row might still live within the rowspace.)
+ //
+ // In other words, the FindRow() implementation is allowed to assume simple
+ // hash tables mapping unique column keys to associated row values will be
+ // sufficient, where any duplication is not recorded because only one copy
+ // of a given key need be remembered. Implementors are not required to sort
+ // all rows by the specified column.
+ // } ----- end row methods -----
+
+ // { ----- begin table methods -----
+ NS_IMETHOD HasTable( // supports a table with the specified oid?
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ mdb_bool* outHasTable) = 0; // whether GetTable() might succeed
+
+ NS_IMETHOD GetTable( // access one table with specific oid
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ nsIMdbTable** acqTable) = 0; // acquire specific table (or null)
+
+ NS_IMETHOD HasTableKind( // supports a table of the specified type?
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // rid scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outSupportsTable) = 0; // whether GetTableKind() might succeed
+
+ // row scopes to be supported include the following suggestions:
+ // "ns:msg:db:row:scope:address:cards:all"
+ // "ns:msg:db:row:scope:mail:messages:all"
+ // "ns:msg:db:row:scope:news:articles:all"
+
+ // table kinds to be supported include the following suggestions:
+ // "ns:msg:db:table:kind:address:cards:main"
+ // "ns:msg:db:table:kind:address:lists:all"
+ // "ns:msg:db:table:kind:address:list"
+ // "ns:msg:db:table:kind:news:threads:all"
+ // "ns:msg:db:table:kind:news:thread"
+ // "ns:msg:db:table:kind:mail:threads:all"
+ // "ns:msg:db:table:kind:mail:thread"
+
+ NS_IMETHOD GetTableKind( // access one (random) table of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outMustBeUnique, // whether port can hold only one of these
+ nsIMdbTable** acqTable) = 0; // acquire scoped collection of rows
+
+ NS_IMETHOD
+ GetPortTableCursor( // get cursor for all tables of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ nsIMdbPortTableCursor** acqCursor) = 0; // all such tables in the port
+ // } ----- end table methods -----
+
+ // { ----- begin commit methods -----
+
+ NS_IMETHOD ShouldCompress( // store wastes at least inPercentWaste?
+ nsIMdbEnv* ev, // context
+ mdb_percent inPercentWaste, // 0..100 percent file size waste threshold
+ mdb_percent* outActualWaste, // 0..100 percent of file actually wasted
+ mdb_bool* outShould) = 0; // true when about inPercentWaste% is wasted
+ // ShouldCompress() returns true if the store can determine that the file
+ // will shrink by an estimated percentage of inPercentWaste% (or more) if
+ // CompressCommit() is called, because that percentage of the file seems
+ // to be recoverable free space. The granularity is only in terms of
+ // percentage points, and any value over 100 is considered equal to 100.
+ //
+ // If a store only has an approximate idea how much space might be saved
+ // during a compress, then a best guess should be made. For example, the
+ // Mork implementation might keep track of how much file space began with
+ // text content before the first updating transaction, and then consider
+ // all content following the start of the first transaction as potentially
+ // wasted space if it is all updates and not just new content. (This is
+ // a safe assumption in the sense that behavior will stabilize on a low
+ // estimate of wastage after a commit removes all transaction updates.)
+ //
+ // Some db formats might attempt to keep a very accurate reckoning of free
+ // space size, so a very accurate determination can be made. But other db
+ // formats might have difficulty determining size of free space, and might
+ // require some lengthy calculation to answer. This is the reason for
+ // passing in the percentage threshold of interest, so that such lengthy
+ // computations can terminate early as soon as at least inPercentWaste is
+ // found, so that the entire file need not be groveled when unnecessary.
+ // However, we hope implementations will always favor fast but imprecise
+ // heuristic answers instead of extremely slow but very precise answers.
+ //
+ // If the outActualWaste parameter is non-nil, it will be used to return
+ // the actual estimated space wasted as a percentage of file size. (This
+ // parameter is provided so callers need not call repeatedly with altered
+ // inPercentWaste values to isolate the actual wastage figure.) Note the
+ // actual wastage figure returned can exactly equal inPercentWaste even
+ // when this grossly underestimates the real figure involved, if the db
+ // finds it very expensive to determine the extent of wastage after it is
+ // known to at least exceed inPercentWaste. Note we expect that whenever
+ // outShould returns true, that outActualWaste returns >= inPercentWaste.
+ //
+ // The effect of different inPercentWaste values is not very uniform over
+ // the permitted range. For example, 50 represents 50% wastage, or a file
+ // that is about double what it should be ideally. But 99 represents 99%
+ // wastage, or a file that is about ninety-nine times as big as it should
+ // be ideally. In the smaller direction, 25 represents 25% wastage, or
+ // a file that is only 33% larger than it should be ideally.
+ //
+ // Callers can determine what policy they want to use for considering when
+ // a file holds too much wasted space, and express this as a percentage
+ // of total file size to pass as in the inPercentWaste parameter. A zero
+ // likely returns always trivially true, and 100 always trivially false.
+ // The great majority of callers are expected to use values from 25 to 75,
+ // since most plausible thresholds for compressing might fall between the
+ // extremes of 133% of ideal size and 400% of ideal size. (Presumably the
+ // larger a file gets, the more important the percentage waste involved, so
+ // a sliding scale for compress thresholds might use smaller numbers for
+ // much bigger file sizes.)
+
+ // } ----- end commit methods -----
+
+ // } ===== end nsIMdbPort methods =====
+};
+
+/*| nsIMdbStore: a mutable interface to a specific database file.
+**|
+**|| tables: one can force a new table to exist in a store with NewTable()
+**| and nonzero values for both row scope and table kind. (If one wishes only
+**| one table of a certain kind, then one might look for it first using the
+**| GetTableKind() method). One can pass inMustBeUnique to force future
+**| users of this store to be unable to create other tables with the same pair
+**| of scope and kind attributes. When inMustBeUnique is true, and the table
+**| with the given scope and kind pair already exists, then the existing one
+**| is returned instead of making a new table. Similarly, if one passes false
+**| for inMustBeUnique, but the table kind has already been marked unique by a
+**| previous user of the store, then the existing unique table is returned.
+**|
+**|| import: all or some of another port's content can be imported by calling
+**| AddPortContent() with a row scope identifying the extent of content to
+**| be imported. A zero row scope will import everything. A nonzero row
+**| scope will only import tables with a matching row scope. Note that one
+**| must somehow find a way to negotiate possible conflicts between existing
+**| row content and imported row content, and this involves a specific kind of
+**| definition for row identity involving either row IDs or unique attributes,
+**| or some combination of these two. At the moment I am just going to wave
+**| my hands, and say the default behavior is to assign all new row identities
+**| to all imported content, which will result in no merging of content; this
+**| must change later because it is unacceptable in some contexts.
+**|
+**|| commits: to manage modifications in a mutable store, very few methods are
+**| really needed to indicate global policy choices that are independent of
+**| the actual modifications that happen in objects at the level of tables,
+**| rows, and cells, etc. The most important policy to specify is which sets
+**| of changes are considered associated in a manner such that they should be
+**| applied together atomically to a given store. We call each such group of
+**| changes a transaction. We handle three different grades of transaction,
+**| but they differ only in semantic significance to the application, and are
+**| not intended to nest. (If small transactions were nested inside large
+**| transactions, that would imply that a single large transaction must be
+**| atomic over all the contained small transactions; but actually we intend
+**| smalls transaction never be undone once committed due to, say, aborting a
+**| transaction of greater significance.) The small, large, and session level
+**| commits have equal granularity, and differ only in risk of loss from the
+**| perspective of an application. Small commits characterize changes that
+**| can be lost with relatively small risk, so small transactions can delay
+**| until later if they are expensive or impractical to commit. Large commits
+**| involve changes that would probably inconvenience users if lost, so the
+**| need to pay costs of writing is rather greater than with small commits.
+**| Session commits are last ditch attempts to save outstanding changes before
+**| stopping the use of a particular database, so there will be no later point
+**| in time to save changes that have been delayed due to possible high cost.
+**| If large commits are never delayed, then a session commit has about the
+**| same performance effect as another large commit; but if small and large
+**| commits are always delayed, then a session commit is likely to be rather
+**| expensive as a runtime cost compared to any earlier database usage.
+**|
+**|| aborts: the only way to abort changes to a store is by closing the store.
+**| So there is no specific method for causing any abort. Stores must discard
+**| all changes made that are uncommitted when a store is closed. This design
+**| choice makes the implementations of tables, rows, and cells much less
+**| complex because they need not maintain a record of undobable changes. When
+**| a store is closed, presumably this precipitates the closure of all tables,
+**| rows, and cells in the store as well. So an application can revert the
+**| state of a store in the user interface by quietly closing and reopening a
+**| store, because this will discard uncommitted changes and show old content.
+**| This implies an app that closes a store will need to send a "scramble"
+**| event notification to any views that depend on old discarded content.
+|*/
+
+#define NS_IMDBSTORE_IID_STR "74d6218d-44b0-43b5-9ebe-69a17dfb562c"
+#define NS_IMDBSTORE_IID \
+ { \
+ 0x74d6218d, 0x44b0, 0x43b5, { \
+ 0x9e, 0xbe, 0x69, 0xa1, 0x7d, 0xfb, 0x56, 0x2c \
+ } \
+ }
+
+class nsIMdbStore : public nsIMdbPort {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBSTORE_IID)
+
+ // { ===== begin nsIMdbStore methods =====
+
+ // { ----- begin table methods -----
+ NS_IMETHOD NewTable( // make one new table of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) = 0; // acquire scoped collection of rows
+
+ NS_IMETHOD NewTableWithOid( // make one new table of specific type
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // caller assigned oid
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) = 0; // acquire scoped collection of rows
+ // } ----- end table methods -----
+
+ // { ----- begin row scope methods -----
+ NS_IMETHOD RowScopeHasAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) =
+ 0; // nonzero if store db assigned specified
+
+ NS_IMETHOD SetCallerAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) =
+ 0; // nonzero if store db assigned specified
+
+ NS_IMETHOD SetStoreAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) =
+ 0; // nonzero if store db assigned specified
+ // } ----- end row scope methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD NewRowWithOid(nsIMdbEnv* ev, // new row w/ caller assigned oid
+ const mdbOid* inOid, // caller assigned oid
+ nsIMdbRow** acqRow) = 0; // create new row
+
+ NS_IMETHOD NewRow(nsIMdbEnv* ev, // new row with db assigned oid
+ mdb_scope inRowScope, // row scope for row ids
+ nsIMdbRow** acqRow) = 0; // create new row
+ // Note this row must be added to some table or cell child before the
+ // store is closed in order to make this row persist across sessions.
+
+ // } ----- end row methods -----
+
+ // { ----- begin import/export methods -----
+ NS_IMETHOD ImportContent( // import content from port
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // scope for rows (or zero for all?)
+ nsIMdbPort* ioPort, // the port with content to add to store
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental import
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the import will be finished.
+
+ NS_IMETHOD ImportFile( // import content from port
+ nsIMdbEnv* ev, // context
+ nsIMdbFile* ioFile, // the file with content to add to store
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental import
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the import will be finished.
+ // } ----- end import/export methods -----
+
+ // { ----- begin hinting methods -----
+ NS_IMETHOD
+ ShareAtomColumnsHint( // advise re shared column content atomizing
+ nsIMdbEnv* ev, // context
+ mdb_scope inScopeHint, // zero, or suggested shared namespace
+ const mdbColumnSet* inColumnSet) = 0; // cols desired tokenized together
+
+ NS_IMETHOD
+ AvoidAtomColumnsHint( // advise column with poor atomizing prospects
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet) =
+ 0; // cols with poor atomizing prospects
+ // } ----- end hinting methods -----
+
+ // { ----- begin commit methods -----
+ NS_IMETHOD LargeCommit( // save important changes if at all possible
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ NS_IMETHOD SessionCommit( // save all changes if large commits delayed
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ NS_IMETHOD
+ CompressCommit( // commit and make db physically smaller if possible
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) = 0; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ // } ----- end commit methods -----
+
+ // } ===== end nsIMdbStore methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbStore, NS_IMDBSTORE_IID)
+
+/*| nsIMdbCursor: base cursor class for iterating row cells and table rows
+**|
+**|| count: the number of elements in the collection (table or row)
+**|
+**|| seed: the change count in the underlying collection, which is synced
+**| with the collection when the iteration position is set, and henceforth
+**| acts to show whether the iter has lost collection synchronization, in
+**| case it matters to clients whether any change happens during iteration.
+**|
+**|| pos: the position of the current element in the collection. Negative
+**| means a position logically before the first element. A positive value
+**| equal to count (or larger) implies a position after the last element.
+**| To iterate over all elements, set the position to negative, so subsequent
+**| calls to any 'next' method will access the first collection element.
+**|
+**|| doFailOnSeedOutOfSync: whether a cursor should return an error if the
+**| cursor's snapshot of a table's seed becomes stale with respect the table's
+**| current seed value (which implies the iteration is less than total) in
+**| between to cursor calls that actually access collection content. By
+**| default, a cursor should assume this attribute is false until specified,
+**| so that iterations quietly try to re-sync when they lose coherence.
+|*/
+
+#define NS_IMDBCURSOR_IID_STR "a0c37337-6ebc-474c-90db-e65ea0b850aa"
+
+#define NS_IMDBCURSOR_IID \
+ { \
+ 0xa0c37337, 0x6ebc, 0x474c, { \
+ 0x90, 0xdb, 0xe6, 0x5e, 0xa0, 0xb8, 0x50, 0xaa \
+ } \
+ }
+
+class nsIMdbCursor : public nsISupports { // collection iterator
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBCURSOR_IID)
+ // { ===== begin nsIMdbCursor methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetCount(nsIMdbEnv* ev, mdb_count* outCount) = 0; // readonly
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev, mdb_seed* outSeed) = 0; // readonly
+
+ NS_IMETHOD SetPos(nsIMdbEnv* ev, mdb_pos inPos) = 0; // mutable
+ NS_IMETHOD GetPos(nsIMdbEnv* ev, mdb_pos* outPos) = 0;
+
+ NS_IMETHOD SetDoFailOnSeedOutOfSync(nsIMdbEnv* ev, mdb_bool inFail) = 0;
+ NS_IMETHOD GetDoFailOnSeedOutOfSync(nsIMdbEnv* ev, mdb_bool* outFail) = 0;
+ // } ----- end attribute methods -----
+
+ // } ===== end nsIMdbCursor methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbCursor, NS_IMDBCURSOR_IID)
+
+#define NS_IMDBPORTTABLECURSOR_IID_STR = "f181a41e-933d-49b3-af93-20d3634b8b78"
+
+#define NS_IMDBPORTTABLECURSOR_IID \
+ { \
+ 0xf181a41e, 0x933d, 0x49b3, { \
+ 0xaf, 0x93, 0x20, 0xd3, 0x63, 0x4b, 0x8b, 0x78 \
+ } \
+ }
+
+/*| nsIMdbPortTableCursor: cursor class for iterating port tables
+**|
+**|| port: the cursor is associated with a specific port, which can be
+**| set to a different port (which resets the position to -1 so the
+**| next table acquired is the first in the port.
+**|
+|*/
+class nsIMdbPortTableCursor : public nsISupports { // table collection iterator
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBPORTTABLECURSOR_IID)
+ // { ===== begin nsIMdbPortTableCursor methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetPort(nsIMdbEnv* ev, nsIMdbPort* ioPort) = 0; // sets pos to -1
+ NS_IMETHOD GetPort(nsIMdbEnv* ev, nsIMdbPort** acqPort) = 0;
+
+ NS_IMETHOD SetRowScope(nsIMdbEnv* ev, // sets pos to -1
+ mdb_scope inRowScope) = 0;
+ NS_IMETHOD GetRowScope(nsIMdbEnv* ev, mdb_scope* outRowScope) = 0;
+ // setting row scope to zero iterates over all row scopes in port
+
+ NS_IMETHOD SetTableKind(nsIMdbEnv* ev, // sets pos to -1
+ mdb_kind inTableKind) = 0;
+ NS_IMETHOD GetTableKind(nsIMdbEnv* ev, mdb_kind* outTableKind) = 0;
+ // setting table kind to zero iterates over all table kinds in row scope
+ // } ----- end attribute methods -----
+
+ // { ----- begin table iteration methods -----
+ NS_IMETHOD NextTable( // get table at next position in the db
+ nsIMdbEnv* ev, // context
+ nsIMdbTable** acqTable) = 0; // the next table in the iteration
+ // } ----- end table iteration methods -----
+
+ // } ===== end nsIMdbPortTableCursor methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbPortTableCursor, NS_IMDBPORTTABLECURSOR_IID)
+
+/*| nsIMdbCollection: an object that collects a set of other objects as members.
+**| The main purpose of this base class is to unify the perceived semantics
+**| of tables and rows where their collection behavior is similar. This helps
+**| isolate the mechanics of collection behavior from the other semantics that
+**| are more characteristic of rows and tables.
+**|
+**|| count: the number of objects in a collection is the member count. (Some
+**| collection interfaces call this attribute the 'size', but that can be a
+**| little ambiguous, and counting actual members is harder to confuse.)
+**|
+**|| seed: the seed of a collection is a counter for changes in membership in
+**| a specific collection. This seed should change when members are added to
+**| or removed from a collection, but not when a member changes internal state.
+**| The seed should also change whenever the internal collection of members has
+**| a complex state change that reorders member positions (say by sorting) that
+**| would affect the nature of an iteration over that collection of members.
+**| The purpose of a seed is to inform any outstanding collection cursors that
+**| they might be stale, without incurring the cost of broadcasting an event
+**| notification to such cursors, which would need more data structure support.
+**| Presumably a cursor in a particular mdb code suite has much more direct
+**| access to a collection seed member slot that this abstract COM interface,
+**| so this information is intended more for clients outside mdb that want to
+**| make inferences similar to those made by the collection cursors. The seed
+**| value as an integer magnitude is not very important, and callers should not
+**| assume meaningful information can be derived from an integer value beyond
+**| whether it is equal or different from a previous inspection. A seed uses
+**| integers of many bits in order to make the odds of wrapping and becoming
+**| equal to an earlier seed value have probability that is vanishingly small.
+**|
+**|| port: every collection is associated with a specific database instance.
+**|
+**|| cursor: a subclass of nsIMdbCursor suitable for this specific collection
+**| subclass. The ability to GetCursor() from the base nsIMdbCollection class
+**| is not really as useful as getting a more specifically typed cursor more
+**| directly from the base class without any casting involved. So including
+**| this method here is more for conceptual illustration.
+**|
+**|| oid: every collection has an identity that persists from session to
+**| session. Implementations are probably able to distinguish row IDs from
+**| table IDs, but we don't specify anything official in this regard. A
+**| collection has the same identity for the lifetime of the collection,
+**| unless identity is swapped with another collection by means of a call to
+**| BecomeContent(), which is considered a way to swap a new representation
+**| for an old well-known object. (Even so, only content appears to change,
+**| while the identity seems to stay the same.)
+**|
+**|| become: developers can effectively cause two objects to swap identities,
+**| in order to effect a complete swap between what persistent content is
+**| represented by two oids. The caller should consider this a content swap,
+**| and not identity wap, because identities will seem to stay the same while
+**| only content changes. However, implementations will likely do this
+**| internally by swapping identities. Callers must swap content only
+**| between objects of similar type, such as a row with another row, and a
+**| table with another table, because implementations need not support
+**| cross-object swapping because it might break object name spaces.
+**|
+**|| dropping: when a caller expects a row or table will no longer be used, the
+**| caller can tell the collection to 'drop activity', which means the runtime
+**| object can have its internal representation purged to save memory or any
+**| other resource that is being consumed by the collection's representation.
+**| This has no effect on the collection's persistent content or semantics,
+**| and is only considered a runtime effect. After a collection drops
+**| activity, the object should still be as usable as before (because it has
+**| NOT been closed), but further usage can be expensive to re-instate because
+**| it might involve reallocating space and/or re-reading disk space. But
+**| since this future usage is not expected, the caller does not expect to
+**| pay the extra expense. An implementation can choose to implement
+**| 'dropping activity' in different ways, or even not at all if this
+**| operation is not really feasible. Callers cannot ask objects whether they
+**| are 'dropped' or not, so this should be transparent. (Note that
+**| implementors might fear callers do not really know whether future
+**| usage will occur, and therefore might delay the act of dropping until
+**| the near future, until seeing whether the object is used again
+**| immediately elsewhere. Such use soon after the drop request might cause
+**| the drop to be cancelled.)
+|*/
+class nsIMdbCollection : public nsISupports { // sequence of objects
+ public:
+ // { ===== begin nsIMdbCollection methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev,
+ mdb_seed* outSeed) = 0; // member change count
+ NS_IMETHOD GetCount(nsIMdbEnv* ev,
+ mdb_count* outCount) = 0; // member count
+
+ NS_IMETHOD GetPort(nsIMdbEnv* ev,
+ nsIMdbPort** acqPort) = 0; // collection container
+ // } ----- end attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetCursor( // make a cursor starting iter at inMemberPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inMemberPos, // zero-based ordinal pos of member in collection
+ nsIMdbCursor** acqCursor) = 0; // acquire new cursor instance
+ // } ----- end cursor methods -----
+
+ // { ----- begin ID methods -----
+ NS_IMETHOD GetOid(nsIMdbEnv* ev,
+ mdbOid* outOid) = 0; // read object identity
+ NS_IMETHOD BecomeContent(nsIMdbEnv* ev,
+ const mdbOid* inOid) = 0; // exchange content
+ // } ----- end ID methods -----
+
+ // { ----- begin activity dropping methods -----
+ NS_IMETHOD DropActivity( // tell collection usage no longer expected
+ nsIMdbEnv* ev) = 0;
+ // } ----- end activity dropping methods -----
+
+ // } ===== end nsIMdbCollection methods =====
+};
+
+/*| nsIMdbTable: an ordered collection of rows
+**|
+**|| row scope: an integer token for an atomized string in this database
+**| that names a space for row IDs. This attribute of a table is intended
+**| as guidance metainformation that helps with searching a database for
+**| tables that operate on collections of rows of the specific type. By
+**| convention, a table with a specific row scope is expected to focus on
+**| containing rows that belong to that scope, however exceptions are easily
+**| allowed because all rows in a table are known by both row ID and scope.
+**| (A table with zero row scope is never allowed because this would make it
+**| ambiguous to use a zero row scope when iterating over tables in a port to
+**| indicate that all row scopes should be seen by a cursor.)
+**|
+**|| table kind: an integer token for an atomized string in this database
+**| that names a kind of table as a subset of the associated row scope. This
+**| attribute is intended as guidance metainformation to clarify the role of
+**| this table with respect to other tables in the same row scope, and this
+**| also helps search for such tables in a database. By convention, a table
+**| with a specific table kind has a consistent role for containing rows with
+**| respect to other collections of such rows in the same row scope. Also by
+**| convention, at least one table in a row scope has a table kind purporting
+**| to contain ALL the rows that belong in that row scope, so that at least
+**| one table exists that allows all rows in a scope to be iterated over.
+**| (A table with zero table kind is never allowed because this would make it
+**| ambiguous to use a zero table kind when iterating over tables in a port to
+**| indicate that all table kinds in a row scope should be seen by a cursor.)
+**|
+**|| port: every table is considered part of some port that contains the
+**| table, so that closing the containing port will cause the table to be
+**| indirectly closed as well. We make it easy to get the containing port for
+**| a table, because the port supports important semantic interfaces that will
+**| affect how content in table is presented; the most important port context
+**| that affects a table is specified by the set of token to string mappings
+**| that affect all tokens used throughout the database, and which drive the
+**| meanings of row scope, table kind, cell columns, etc.
+**|
+**|| cursor: a cursor that iterates over the rows in this table, where rows
+**| have zero-based index positions from zero to count-1. Making a cursor
+**| with negative position will next iterate over the first row in the table.
+**|
+**|| position: given any position from zero to count-1, a table will return
+**| the row ID and row scope for the row at that position. (One can use the
+**| GetRowAllCells() method to read that row, or else use a row cursor to both
+**| get the row at some position and read its content at the same time.) The
+**| position depends on whether a table is sorted, and upon the actual sort.
+**| Note that moving a row's position is only possible in unsorted tables.
+**|
+**|| row set: every table contains a collection of rows, where a member row is
+**| referenced by the table using the row ID and row scope for the row. No
+**| single table owns a given row instance, because rows are effectively ref-
+**| counted and destroyed only when the last table removes a reference to that
+**| particular row. (But a row can be emptied of all content no matter how
+**| many refs exist, and this might be the next best thing to destruction.)
+**| Once a row exists in a least one table (after NewRow() is called), then it
+**| can be added to any other table by calling AddRow(), or removed from any
+**| table by calling CutRow(), or queried as a member by calling HasRow(). A
+**| row can only be added to a table once, and further additions do nothing and
+**| complain not at all. Cutting a row from a table only does something when
+**| the row was actually a member, and otherwise does nothing silently.
+**|
+**|| row ref count: one can query the number of tables (and/or cells)
+**| containing a row as a member or a child.
+**|
+**|| row content: one can access or modify the cell content in a table's row
+**| by moving content to or from an instance of nsIMdbRow. Note that nsIMdbRow
+**| never represents the actual row inside a table, and this is the reason
+**| why nsIMdbRow instances do not have row IDs or row scopes. So an instance
+**| of nsIMdbRow always and only contains a snapshot of some or all content in
+**| past, present, or future persistent row inside a table. This means that
+**| reading and writing rows in tables has strictly copy semantics, and we
+**| currently do not plan any exceptions for specific performance reasons.
+**|
+**|| sorting: note all rows are assumed sorted by row ID as a secondary
+**| sort following the primary column sort, when table rows are sorted.
+**|
+**|| indexes:
+|*/
+
+#define NS_IMDBTABLE_IID_STR = "fe11bc98-d02b-4128-9fac-87042fdf9639"
+
+#define NS_IMDBTABLE_IID \
+ { \
+ 0xfe11bc98, 0xd02b, 0x4128, { \
+ 0x9f, 0xac, 0x87, 0x04, 0x2f, 0xdf, 0x96, 0x39 \
+ } \
+ }
+
+class nsIMdbTable : public nsIMdbCollection { // a collection of rows
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBTABLE_IID)
+ // { ===== begin nsIMdbTable methods =====
+
+ // { ----- begin meta attribute methods -----
+ NS_IMETHOD SetTablePriority(nsIMdbEnv* ev, mdb_priority inPrio) = 0;
+ NS_IMETHOD GetTablePriority(nsIMdbEnv* ev, mdb_priority* outPrio) = 0;
+
+ NS_IMETHOD GetTableBeVerbose(nsIMdbEnv* ev, mdb_bool* outBeVerbose) = 0;
+ NS_IMETHOD SetTableBeVerbose(nsIMdbEnv* ev, mdb_bool inBeVerbose) = 0;
+
+ NS_IMETHOD GetTableIsUnique(nsIMdbEnv* ev, mdb_bool* outIsUnique) = 0;
+
+ NS_IMETHOD GetTableKind(nsIMdbEnv* ev, mdb_kind* outTableKind) = 0;
+ NS_IMETHOD GetRowScope(nsIMdbEnv* ev, mdb_scope* outRowScope) = 0;
+
+ NS_IMETHOD GetMetaRow(
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ mdbOid* outOid, // output meta row oid, can be nil to suppress output
+ nsIMdbRow** acqRow) = 0; // acquire table's unique singleton meta row
+ // The purpose of a meta row is to support the persistent recording of
+ // meta info about a table as cells put into the distinguished meta row.
+ // Each table has exactly one meta row, which is not considered a member
+ // of the collection of rows inside the table. The only way to tell
+ // whether a row is a meta row is by the fact that it is returned by this
+ // GetMetaRow() method from some table. Otherwise nothing distinguishes
+ // a meta row from any other row. A meta row can be used anyplace that
+ // any other row can be used, and can even be put into other tables (or
+ // the same table) as a table member, if this is useful for some reason.
+ // The first attempt to access a table's meta row using GetMetaRow() will
+ // cause the meta row to be created if it did not already exist. When the
+ // meta row is created, it will have the row oid that was previously
+ // requested for this table's meta row; or if no oid was ever explicitly
+ // specified for this meta row, then a unique oid will be generated in
+ // the row scope named "m" (so obviously MDB clients should not
+ // manually allocate any row IDs from that special meta scope namespace).
+ // The meta row oid can be specified either when the table is created, or
+ // else the first time that GetMetaRow() is called, by passing a non-nil
+ // pointer to an oid for parameter inOptionalMetaRowOid. The meta row's
+ // actual oid is returned in outOid (if this is a non-nil pointer), and
+ // it will be different from inOptionalMetaRowOid when the meta row was
+ // already given a different oid earlier.
+ // } ----- end meta attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD
+ GetTableRowCursor( // make a cursor, starting iteration at inRowPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbTableRowCursor** acqCursor) = 0; // acquire new cursor instance
+ // } ----- end row position methods -----
+
+ // { ----- begin row position methods -----
+ NS_IMETHOD PosToOid( // get row member for a table position
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ mdbOid* outOid) = 0; // row oid at the specified position
+
+ NS_IMETHOD OidToPos( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_pos* outPos) = 0; // zero-based ordinal position of row in table
+
+ NS_IMETHOD PosToRow( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbRow** acqRow) = 0; // acquire row at table position inRowPos
+
+ NS_IMETHOD RowToPos( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_pos* outPos) = 0; // zero-based ordinal position of row in table
+ // } ----- end row position methods -----
+
+ // { ----- begin oid set methods -----
+ NS_IMETHOD AddOid( // make sure the row with inOid is a table member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid) = 0; // row to ensure membership in table
+
+ NS_IMETHOD HasOid( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_bool* outHasOid) = 0; // whether inOid is a member row
+
+ NS_IMETHOD CutOid( // make sure the row with inOid is not a member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid) = 0; // row to remove from table
+ // } ----- end oid set methods -----
+
+ // { ----- begin row set methods -----
+ NS_IMETHOD NewRow( // create a new row instance in table
+ nsIMdbEnv* ev, // context
+ mdbOid*
+ ioOid, // please use minus one (unbound) rowId for db-assigned IDs
+ nsIMdbRow** acqRow) = 0; // create new row
+
+ NS_IMETHOD AddRow( // make sure the row with inOid is a table member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow) = 0; // row to ensure membership in table
+
+ NS_IMETHOD HasRow( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_bool* outHasRow) = 0; // whether row is a table member
+
+ NS_IMETHOD CutRow( // make sure the row with inOid is not a member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow) = 0; // row to remove from table
+
+ NS_IMETHOD CutAllRows( // remove all rows from the table
+ nsIMdbEnv* ev) = 0; // context
+ // } ----- end row set methods -----
+
+ // { ----- begin hinting methods -----
+ NS_IMETHOD SearchColumnsHint( // advise re future expected search cols
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet) = 0; // columns likely to be searched
+
+ NS_IMETHOD SortColumnsHint( // advise re future expected sort columns
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet) = 0; // columns for likely sort requests
+
+ NS_IMETHOD StartBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* ev, // context
+ const void* inLabel) = 0; // intend unique address to match end call
+ // If batch starts nest by virtue of nesting calls in the stack, then
+ // the address of a local variable makes a good batch start label that
+ // can be used at batch end time, and such addresses remain unique.
+
+ NS_IMETHOD EndBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* ev, // context
+ const void* inLabel) = 0; // label matching start label
+ // Suppose a table is maintaining one or many sort orders for a table,
+ // so that every row added to the table must be inserted in each sort,
+ // and every row cut must be removed from each sort. If a db client
+ // intends to make many such changes before needing any information
+ // about the order or positions of rows inside a table, then a client
+ // might tell the table to start batch changes in order to disable
+ // sorting of rows for the interim. Presumably a table will then do
+ // a full sort of all rows at need when the batch changes end, or when
+ // a surprise request occurs for row position during batch changes.
+ // } ----- end hinting methods -----
+
+ // { ----- begin searching methods -----
+ NS_IMETHOD FindRowMatches( // search variable number of sorted cols
+ nsIMdbEnv* ev, // context
+ const mdbYarn*
+ inPrefix, // content to find as prefix in row's column cell
+ nsIMdbTableRowCursor** acqCursor) = 0; // set of matching rows
+
+ NS_IMETHOD GetSearchColumns( // query columns used by FindRowMatches()
+ nsIMdbEnv* ev, // context
+ mdb_count* outCount, // context
+ mdbColumnSet* outColSet) = 0; // caller supplied space to put columns
+ // GetSearchColumns() returns the columns actually searched when the
+ // FindRowMatches() method is called. No more than mColumnSet_Count
+ // slots of mColumnSet_Columns will be written, since mColumnSet_Count
+ // indicates how many slots are present in the column array. The
+ // actual number of search column used by the table is returned in
+ // the outCount parameter; if this number exceeds mColumnSet_Count,
+ // then a caller needs a bigger array to read the entire column set.
+ // The minimum of mColumnSet_Count and outCount is the number slots
+ // in mColumnSet_Columns that were actually written by this method.
+ //
+ // Callers are expected to change this set of columns by calls to
+ // nsIMdbTable::SearchColumnsHint() or SetSearchSorting(), or both.
+ // } ----- end searching methods -----
+
+ // { ----- begin sorting methods -----
+ // sorting: note all rows are assumed sorted by row ID as a secondary
+ // sort following the primary column sort, when table rows are sorted.
+
+ NS_IMETHOD
+ CanSortColumn( // query which column is currently used for sorting
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to query sorting potential
+ mdb_bool* outCanSort) = 0; // whether the column can be sorted
+
+ NS_IMETHOD GetSorting( // view same table in particular sorting
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // requested new column for sorting table
+ nsIMdbSorting** acqSorting) = 0; // acquire sorting for column
+
+ NS_IMETHOD SetSearchSorting( // use this sorting in FindRowMatches()
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // often same as nsIMdbSorting::GetSortColumn()
+ nsIMdbSorting* ioSorting) = 0; // requested sorting for some column
+ // SetSearchSorting() attempts to inform the table that ioSorting
+ // should be used during calls to FindRowMatches() for searching
+ // the column which is actually sorted by ioSorting. This method
+ // is most useful in conjunction with nsIMdbSorting::SetCompare(),
+ // because otherwise a caller would not be able to override the
+ // comparison ordering method used during searches. Note that some
+ // database implementations might be unable to use an arbitrarily
+ // specified sort order, either due to schema or runtime interface
+ // constraints, in which case ioSorting might not actually be used.
+ // Presumably ioSorting is an instance that was returned from some
+ // earlier call to nsIMdbTable::GetSorting(). A caller can also
+ // use nsIMdbTable::SearchColumnsHint() to specify desired change
+ // in which columns are sorted and searched by FindRowMatches().
+ //
+ // A caller can pass a nil pointer for ioSorting to request that
+ // column inColumn no longer be used at all by FindRowMatches().
+ // But when ioSorting is non-nil, then inColumn should match the
+ // column actually sorted by ioSorting; when these do not agree,
+ // implementations are instructed to give precedence to the column
+ // specified by ioSorting (so this means callers might just pass
+ // zero for inColumn when ioSorting is also provided, since then
+ // inColumn is both redundant and ignored).
+ // } ----- end sorting methods -----
+
+ // { ----- begin moving methods -----
+ // moving a row does nothing unless a table is currently unsorted
+
+ NS_IMETHOD MoveOid( // change position of row in unsorted table
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row inRowId
+ mdb_pos* outActualPos) = 0; // actual new position of row in table
+
+ NS_IMETHOD MoveRow( // change position of row in unsorted table
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row inRowId
+ mdb_pos* outActualPos) = 0; // actual new position of row in table
+ // } ----- end moving methods -----
+
+ // { ----- begin index methods -----
+ NS_IMETHOD AddIndex( // create a sorting index for column if possible
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to sort by index
+ nsIMdbThumb** acqThumb) =
+ 0; // acquire thumb for incremental index building
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the index addition will be finished.
+
+ NS_IMETHOD CutIndex( // stop supporting a specific column index
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column with index to be removed
+ nsIMdbThumb** acqThumb) =
+ 0; // acquire thumb for incremental index destroy
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the index removal will be finished.
+
+ NS_IMETHOD HasIndex( // query for current presence of a column index
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outHasIndex) = 0; // whether column has index for this column
+
+ NS_IMETHOD EnableIndexOnSort( // create an index for col on first sort
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) = 0; // the column to index if ever sorted
+
+ NS_IMETHOD QueryIndexOnSort( // check whether index on sort is enabled
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outIndexOnSort) =
+ 0; // whether column has index-on-sort enabled
+
+ NS_IMETHOD DisableIndexOnSort( // prevent future index creation on sort
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) = 0; // the column to index if ever sorted
+ // } ----- end index methods -----
+
+ // } ===== end nsIMdbTable methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbTable, NS_IMDBTABLE_IID)
+
+/*| nsIMdbSorting: a view of a table in some particular sort order. This
+**| row order closely resembles a readonly array of rows with the same row
+**| membership as the underlying table, but in a different order than the
+**| table's explicit row order. But the sorting's row membership changes
+**| whenever the table's membership changes (without any notification, so
+**| keep this in mind when modifying the table).
+**|
+**|| table: every sorting is associated with a particular table. You
+**| cannot change which table is used by a sorting (just ask some new
+**| table for a suitable sorting instance instead).
+**|
+**|| compare: the ordering method used by a sorting, wrapped up in a
+**| abstract plug-in interface. When this was never installed by an
+**| explicit call to SetNewCompare(), a compare object is still returned,
+**| and it might match the compare instance returned by the factory method
+**| nsIMdbFactory::MakeCompare(), which represents a default sort order
+**| (which we fervently hope is consistently ASCII byte ordering).
+**|
+**|| cursor: in case callers are more comfortable with a cursor style
+**| of accessing row members, each sorting will happily return a cursor
+**| instance with behavior very similar to a cursor returned from a call
+**| to nsIMdbTable::GetTableRowCursor(), but with different row order.
+**| A cursor should show exactly the same information as the pos methods.
+**|
+**|| pos: the PosToOid() and PosToRow() methods are just like the table
+**| methods of the same name, except they show rows in the sort order of
+**| the sorting, rather than that of the table. These methods are like
+**| readonly array position accessor's, or like a C++ operator[].
+|*/
+class nsIMdbSorting : public nsIMdbObject { // sorting of some table
+ public:
+ // { ===== begin nsIMdbSorting methods =====
+
+ // { ----- begin attribute methods -----
+ // sorting: note all rows are assumed sorted by row ID as a secondary
+ // sort following the primary column sort, when table rows are sorted.
+
+ NS_IMETHOD GetTable(nsIMdbEnv* ev, nsIMdbTable** acqTable) = 0;
+ NS_IMETHOD GetSortColumn( // query which col is currently sorted
+ nsIMdbEnv* ev, // context
+ mdb_column* outColumn) = 0; // col the table uses for sorting (or zero)
+
+ // } ----- end attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetSortingRowCursor( // make a cursor, starting at inRowPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbTableRowCursor** acqCursor) = 0; // acquire new cursor instance
+ // A cursor interface turning same info as PosToOid() or PosToRow().
+ // } ----- end row position methods -----
+
+ // { ----- begin row position methods -----
+ NS_IMETHOD PosToOid( // get row member for a table position
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ mdbOid* outOid) = 0; // row oid at the specified position
+
+ NS_IMETHOD PosToRow( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbRow** acqRow) = 0; // acquire row at table position inRowPos
+ // } ----- end row position methods -----
+
+ // } ===== end nsIMdbSorting methods =====
+};
+
+/*| nsIMdbTableRowCursor: cursor class for iterating table rows
+**|
+**|| table: the cursor is associated with a specific table, which can be
+**| set to a different table (which resets the position to -1 so the
+**| next row acquired is the first in the table.
+**|
+**|| NextRowId: the rows in the table can be iterated by identity alone,
+**| without actually reading the cells of any row with this method.
+**|
+**|| NextRowCells: read the next row in the table, but only read cells
+**| from the table which are already present in the row (so no new cells
+**| are added to the row, even if they are present in the table). All the
+**| cells will have content specified, even it is the empty string. No
+**| columns will be removed, even if missing from the row (because missing
+**| and empty are semantically equivalent).
+**|
+**|| NextRowAllCells: read the next row in the table, and access all the
+**| cells for this row in the table, adding any missing columns to the row
+**| as needed until all cells are represented. All the
+**| cells will have content specified, even it is the empty string. No
+**| columns will be removed, even if missing from the row (because missing
+**| and empty are semantically equivalent).
+**|
+|*/
+
+#define NS_IMDBTABLEROWCURSOR_IID_STR = "4f325dad-0385-4b62-a992-c914ab93587e"
+
+#define NS_IMDBTABLEROWCURSOR_IID \
+ { \
+ 0x4f325dad, 0x0385, 0x4b62, { \
+ 0xa9, 0x92, 0xc9, 0x14, 0xab, 0x93, 0x58, 0x7e \
+ } \
+ }
+
+class nsIMdbTableRowCursor : public nsISupports { // table row iterator
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBTABLEROWCURSOR_IID)
+
+ // { ===== begin nsIMdbTableRowCursor methods =====
+
+ // { ----- begin attribute methods -----
+ // NS_IMETHOD SetTable(nsIMdbEnv* ev, nsIMdbTable* ioTable) = 0; // sets pos
+ // to -1 Method SetTable() cut and made obsolete in keeping with new sorting
+ // methods.
+
+ NS_IMETHOD GetTable(nsIMdbEnv* ev, nsIMdbTable** acqTable) = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin duplicate row removal methods -----
+ NS_IMETHOD CanHaveDupRowMembers(nsIMdbEnv* ev, // cursor might hold dups?
+ mdb_bool* outCanHaveDups) = 0;
+
+ NS_IMETHOD MakeUniqueCursor( // clone cursor, removing duplicate rows
+ nsIMdbEnv* ev, // context
+ nsIMdbTableRowCursor** acqCursor) = 0; // acquire clone with no dups
+ // Note that MakeUniqueCursor() is never necessary for a cursor which was
+ // created by table method nsIMdbTable::GetTableRowCursor(), because a table
+ // never contains the same row as a member more than once. However, a cursor
+ // created by table method nsIMdbTable::FindRowMatches() might contain the
+ // same row more than once, because the same row can generate a hit by more
+ // than one column with a matching string prefix. Note this method can
+ // return the very same cursor instance with just an incremented refcount,
+ // when the original cursor could not contain any duplicate rows (calling
+ // CanHaveDupRowMembers() shows this case on a false return). Otherwise
+ // this method returns a different cursor instance. Callers should not use
+ // this MakeUniqueCursor() method lightly, because it tends to defeat the
+ // purpose of lazy programming techniques, since it can force creation of
+ // an explicit row collection in a new cursor's representation, in order to
+ // inspect the row membership and remove any duplicates; this can have big
+ // impact if a collection holds tens of thousands of rows or more, when
+ // the original cursor with dups simply referenced rows indirectly by row
+ // position ranges, without using an explicit row set representation.
+ // Callers are encouraged to use nsIMdbCursor::GetCount() to determine
+ // whether the row collection is very large (tens of thousands), and to
+ // delay calling MakeUniqueCursor() when possible, until a user interface
+ // element actually demands the creation of an explicit set representation.
+ // } ----- end duplicate row removal methods -----
+
+ // { ----- begin oid iteration methods -----
+ NS_IMETHOD NextRowOid( // get row id of next row in the table
+ nsIMdbEnv* ev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+ // } ----- end oid iteration methods -----
+
+ // { ----- begin row iteration methods -----
+ NS_IMETHOD NextRow( // get row cells from table for cells already in row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // acquire next row in table
+ mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+
+ NS_IMETHOD PrevRowOid( // get row id of previous row in the table
+ nsIMdbEnv* ev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+ // } ----- end oid iteration methods -----
+
+ // { ----- begin row iteration methods -----
+ NS_IMETHOD PrevRow( // get row cells from table for cells already in row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // acquire previous row in table
+ mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+
+ // } ----- end row iteration methods -----
+
+ // { ----- begin copy iteration methods -----
+ // NS_IMETHOD NextRowCopy( // put row cells into sink only when already in
+ // sink
+ // nsIMdbEnv* ev, // context
+ // nsIMdbRow* ioSinkRow, // sink for row cells read from next row
+ // mdbOid* outOid, // out row oid
+ // mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+ //
+ // NS_IMETHOD NextRowCopyAll( // put all row cells into sink, adding to sink
+ // nsIMdbEnv* ev, // context
+ // nsIMdbRow* ioSinkRow, // sink for row cells read from next row
+ // mdbOid* outOid, // out row oid
+ // mdb_pos* outRowPos) = 0; // zero-based position of the row in table
+ // } ----- end copy iteration methods -----
+
+ // } ===== end nsIMdbTableRowCursor methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbTableRowCursor, NS_IMDBTABLEROWCURSOR_IID)
+
+/*| nsIMdbRow: a collection of cells
+**|
+|*/
+
+#define NS_IMDBROW_IID_STR "271e8d6e-183a-40e3-9f18-36913b4c7853"
+
+#define NS_IMDBROW_IID \
+ { \
+ 0x271e8d6e, 0x183a, 0x40e3, { \
+ 0x9f, 0x18, 0x36, 0x91, 0x3b, 0x4c, 0x78, 0x53 \
+ } \
+ }
+
+class nsIMdbRow : public nsIMdbCollection { // cell tuple
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBROW_IID)
+ // { ===== begin nsIMdbRow methods =====
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetRowCellCursor( // make a cursor starting iteration at inCellPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inCellPos, // zero-based ordinal position of cell in row
+ nsIMdbRowCellCursor** acqCursor) = 0; // acquire new cursor instance
+ // } ----- end cursor methods -----
+
+ // { ----- begin column methods -----
+ NS_IMETHOD AddColumn( // make sure a particular column is inside row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to add
+ const mdbYarn* inYarn) = 0; // cell value to install
+
+ NS_IMETHOD CutColumn( // make sure a column is absent from the row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) = 0; // column to ensure absent from row
+
+ NS_IMETHOD CutAllColumns( // remove all columns from the row
+ nsIMdbEnv* ev) = 0; // context
+ // } ----- end column methods -----
+
+ // { ----- begin cell methods -----
+ NS_IMETHOD NewCell( // get cell for specified column, or add new one
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to add
+ nsIMdbCell** acqCell) = 0; // cell column and value
+
+ NS_IMETHOD AddCell( // copy a cell from another row to this row
+ nsIMdbEnv* ev, // context
+ const nsIMdbCell* inCell) = 0; // cell column and value
+
+ NS_IMETHOD GetCell( // find a cell in this row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to find
+ nsIMdbCell** acqCell) = 0; // cell for specified column, or null
+
+ NS_IMETHOD EmptyAllCells( // make all cells in row empty of content
+ nsIMdbEnv* ev) = 0; // context
+ // } ----- end cell methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD AddRow( // add all cells in another row to this one
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioSourceRow) = 0; // row to union with
+
+ NS_IMETHOD SetRow( // make exact duplicate of another row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioSourceRow) = 0; // row to duplicate
+ // } ----- end row methods -----
+
+ // { ----- begin blob methods -----
+ NS_IMETHOD SetCellYarn(nsIMdbEnv* ev, // synonym for AddColumn()
+ mdb_column inColumn, // column to write
+ const mdbYarn* inYarn) = 0; // reads from yarn slots
+ // make this text object contain content from the yarn's buffer
+
+ NS_IMETHOD GetCellYarn(nsIMdbEnv* ev,
+ mdb_column inColumn, // column to read
+ mdbYarn* outYarn) = 0; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+
+ NS_IMETHOD AliasCellYarn(nsIMdbEnv* ev,
+ mdb_column inColumn, // column to alias
+ mdbYarn* outYarn) = 0; // writes ALL yarn slots
+
+ NS_IMETHOD NextCellYarn(nsIMdbEnv* ev, // iterative version of GetCellYarn()
+ mdb_column* ioColumn, // next column to read
+ mdbYarn* outYarn) = 0; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+ //
+ // The ioColumn argument is an inout parameter which initially contains the
+ // last column accessed and returns the next column corresponding to the
+ // content read into the yarn. Callers should start with a zero column
+ // value to say 'no previous column', which causes the first column to be
+ // read. Then the value returned in ioColumn is perfect for the next call
+ // to NextCellYarn(), since it will then be the previous column accessed.
+ // Callers need only examine the column token returned to see which cell
+ // in the row is being read into the yarn. When no more columns remain,
+ // and the iteration has ended, ioColumn will return a zero token again.
+ // So iterating over cells starts and ends with a zero column token.
+
+ NS_IMETHOD SeekCellYarn( // resembles nsIMdbRowCellCursor::SeekCell()
+ nsIMdbEnv* ev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ mdbYarn* outYarn) = 0; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+ // Callers can pass nil for outYarn to indicate no interest in content, so
+ // only the outColumn value is returned. NOTE to subclasses: you must be
+ // able to ignore outYarn when the pointer is nil; please do not crash.
+
+ // } ----- end blob methods -----
+
+ // } ===== end nsIMdbRow methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbRow, NS_IMDBROW_IID)
+
+/*| nsIMdbRowCellCursor: cursor class for iterating row cells
+**|
+**|| row: the cursor is associated with a specific row, which can be
+**| set to a different row (which resets the position to -1 so the
+**| next cell acquired is the first in the row.
+**|
+**|| NextCell: get the next cell in the row and return its position and
+**| a new instance of a nsIMdbCell to represent this next cell.
+|*/
+
+#define NS_IMDBROWCELLCURSOR_IID_STR "b33371a7-5d63-4d10-85a8-e44dffe75c28"
+
+#define NS_IMDBROWCELLCURSOR_IID \
+ { \
+ 0x271e8d6e, 0x5d63, 0x4d10, { \
+ 0x85, 0xa8, 0xe4, 0x4d, 0xff, 0xe7, 0x5c, 0x28 \
+ } \
+ }
+
+class nsIMdbRowCellCursor : public nsISupports { // cell collection iterator
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBROWCELLCURSOR_IID)
+ // { ===== begin nsIMdbRowCellCursor methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetRow(nsIMdbEnv* ev, nsIMdbRow* ioRow) = 0; // sets pos to -1
+ NS_IMETHOD GetRow(nsIMdbEnv* ev, nsIMdbRow** acqRow) = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin cell seeking methods -----
+ NS_IMETHOD SeekCell(nsIMdbEnv* ev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ nsIMdbCell** acqCell) = 0; // the cell at inPos
+ // } ----- end cell seeking methods -----
+
+ // { ----- begin cell iteration methods -----
+ NS_IMETHOD NextCell( // get next cell in the row
+ nsIMdbEnv* ev, // context
+ nsIMdbCell** acqCell, // changes to the next cell in the iteration
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos) = 0; // position of cell in row sequence
+
+ NS_IMETHOD PickNextCell( // get next cell in row within filter set
+ nsIMdbEnv* ev, // context
+ nsIMdbCell* ioCell, // changes to the next cell in the iteration
+ const mdbColumnSet* inFilterSet, // col set of actual caller interest
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos) = 0; // position of cell in row sequence
+
+ // Note that inFilterSet should not have too many (many more than 10?)
+ // cols, since this might imply a potential excessive consumption of time
+ // over many cursor calls when looking for column and filter intersection.
+ // } ----- end cell iteration methods -----
+
+ // } ===== end nsIMdbRowCellCursor methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbRowCellCursor, NS_IMDBROWCELLCURSOR_IID)
+
+/*| nsIMdbBlob: a base class for objects composed mainly of byte sequence state.
+**| (This provides a base class for nsIMdbCell, so that cells themselves can
+**| be used to set state in another cell, without extracting a buffer.)
+|*/
+class nsIMdbBlob : public nsISupports { // a string with associated charset
+ public:
+ // { ===== begin nsIMdbBlob methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetBlob(nsIMdbEnv* ev,
+ nsIMdbBlob* ioBlob) = 0; // reads inBlob slots
+ // when inBlob is in the same suite, this might be fastest cell-to-cell
+
+ NS_IMETHOD ClearBlob( // make empty (so content has zero length)
+ nsIMdbEnv* ev) = 0;
+ // clearing a yarn is like SetYarn() with empty yarn instance content
+
+ NS_IMETHOD GetBlobFill(nsIMdbEnv* ev,
+ mdb_fill* outFill) = 0; // size of blob
+ // Same value that would be put into mYarn_Fill, if one called GetYarn()
+ // with a yarn instance that had mYarn_Buf==nil and mYarn_Size==0.
+
+ NS_IMETHOD SetYarn(nsIMdbEnv* ev,
+ const mdbYarn* inYarn) = 0; // reads from yarn slots
+ // make this text object contain content from the yarn's buffer
+
+ NS_IMETHOD GetYarn(nsIMdbEnv* ev,
+ mdbYarn* outYarn) = 0; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+
+ NS_IMETHOD AliasYarn(nsIMdbEnv* ev,
+ mdbYarn* outYarn) = 0; // writes ALL yarn slots
+ // AliasYarn() reveals sensitive internal text buffer state to the caller
+ // by setting mYarn_Buf to point into the guts of this text implementation.
+ //
+ // The caller must take great care to avoid writing on this space, and to
+ // avoid calling any method that would cause the state of this text object
+ // to change (say by directly or indirectly setting the text to hold more
+ // content that might grow the size of the buffer and free the old buffer).
+ // In particular, callers should scrupulously avoid making calls into the
+ // mdb interface to write any content while using the buffer pointer found
+ // in the returned yarn instance. Best safe usage involves copying content
+ // into some other kind of external content representation beyond mdb.
+ //
+ // (The original design of this method a week earlier included the concept
+ // of very fast and efficient cooperative locking via a pointer to some lock
+ // member slot. But let's ignore that complexity in the current design.)
+ //
+ // AliasYarn() is specifically intended as the first step in transferring
+ // content from nsIMdbBlob to a nsString representation, without forcing extra
+ // allocations and/or memory copies. (A standard nsIMdbBlob_AsString() utility
+ // will use AliasYarn() as the first step in setting a nsString instance.)
+ //
+ // This is an alternative to the GetYarn() method, which has copy semantics
+ // only; AliasYarn() relaxes a robust safety principle only for performance
+ // reasons, to accommodate the need for callers to transform text content to
+ // some other canonical representation that would necessitate an additional
+ // copy and transformation when such is incompatible with the mdbYarn format.
+ //
+ // The implementation of AliasYarn() should have extremely little overhead
+ // besides the virtual dispatch to the method implementation, and the code
+ // necessary to populate all the mdbYarn member slots with internal buffer
+ // address and metainformation that describes the buffer content. Note that
+ // mYarn_Grow must always be set to nil to indicate no resizing is allowed.
+
+ // } ----- end attribute methods -----
+
+ // } ===== end nsIMdbBlob methods =====
+};
+
+/*| nsIMdbCell: the text in a single column of a row. The base nsIMdbBlob
+**| class provides all the interface related to accessing cell text.
+**|
+**|| column: each cell in a row appears in a specific column, where this
+**| column is identified by the an integer mdb_scope value (generated by
+**| the StringToScopeToken() method in the containing nsIMdbPort instance).
+**| Because a row cannot have more than one cell with the same column,
+**| something must give if one calls SetColumn() with an existing column
+**| in the same row. When this happens, the other cell is replaced with
+**| this cell (and the old cell is closed if it has outstanding refs).
+**|
+**|| row: every cell instance is a part of some row, and every cell knows
+**| which row is the parent row. (Note this should be represented by a
+**| weak backpointer, so that outstanding cell references cannot keep a
+**| row open that should be closed. Otherwise we'd have ref graph cycles.)
+**|
+**|| text: a cell can either be text, or it can have a child row or table,
+**| but not both at once. If text is read from a cell with a child, the text
+**| content should be empty (for AliasYarn()) or a description of the type
+**| of child (perhaps "mdb:cell:child:row" or "mdb:cell:child:table").
+**|
+**|| child: a cell might reference another row or a table, rather than text.
+**| The interface for putting and getting children rows and tables was first
+**| defined in the nsIMdbTable interface, but then this was moved to this cell
+**| interface as more natural.
+|*/
+
+#define NS_IMDBCELL_IID \
+ { \
+ 0xa3b62f71, 0xa181, 0x4a91, { \
+ 0xb6, 0x6b, 0x27, 0x10, 0x9b, 0x88, 0x98, 0x35 \
+ } \
+ }
+
+#define NS_IMDBCELL_IID_STR = "a3b62f71-a181-4a91-b66b-27109b889835"
+
+class nsIMdbCell
+ : public nsIMdbBlob { // text attribute in row with column scope
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBTABLEROWCURSOR_IID)
+ // { ===== begin nsIMdbCell methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetColumn(nsIMdbEnv* ev, mdb_column inColumn) = 0;
+ NS_IMETHOD GetColumn(nsIMdbEnv* ev, mdb_column* outColumn) = 0;
+
+ NS_IMETHOD GetCellInfo( // all cell metainfo except actual content
+ nsIMdbEnv* ev,
+ mdb_column* outColumn, // the column in the containing row
+ mdb_fill* outBlobFill, // the size of text content in bytes
+ mdbOid* outChildOid, // oid of possible row or table child
+ mdb_bool* outIsRowChild) = 0; // nonzero if child, and a row child
+
+ // Checking all cell metainfo is a good way to avoid forcing a large cell
+ // in to memory when you don't actually want to use the content.
+
+ NS_IMETHOD GetRow(nsIMdbEnv* ev, // parent row for this cell
+ nsIMdbRow** acqRow) = 0;
+ NS_IMETHOD GetPort(nsIMdbEnv* ev, // port containing cell
+ nsIMdbPort** acqPort) = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin children methods -----
+ NS_IMETHOD HasAnyChild( // does cell have a child instead of text?
+ nsIMdbEnv* ev,
+ mdbOid* outOid, // out id of row or table (or unbound if no child)
+ mdb_bool* outIsRow) =
+ 0; // nonzero if child is a row (rather than a table)
+
+ NS_IMETHOD GetAnyChild( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // child row (or null)
+ nsIMdbTable** acqTable) = 0; // child table (or null)
+
+ NS_IMETHOD SetChildRow( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow) = 0; // inRow must be bound inside this same db port
+
+ NS_IMETHOD GetChildRow( // access row of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow) = 0; // acquire child row (or nil if no child)
+
+ NS_IMETHOD SetChildTable( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbTable* inTable) =
+ 0; // table must be bound inside this same db port
+
+ NS_IMETHOD GetChildTable( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbTable** acqTable) = 0; // acquire child table (or nil if no child)
+ // } ----- end children methods -----
+
+ // } ===== end nsIMdbCell methods =====
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbCell, NS_IMDBTABLEROWCURSOR_IID)
+
+// } %%%%% end C++ abstract class interfaces %%%%%
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MDB_ */
diff --git a/comm/mailnews/db/mork/mork.h b/comm/mailnews/db/mork/mork.h
new file mode 100644
index 0000000000..ec48e67046
--- /dev/null
+++ b/comm/mailnews/db/mork/mork.h
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORK_
+#define _MORK_ 1
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#include "nscore.h"
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// { %%%%% begin disable unused param warnings %%%%%
+#define MORK_USED_1(x) (void)(&x)
+#define MORK_USED_2(x, y) \
+ (void)(&x); \
+ (void)(&y);
+#define MORK_USED_3(x, y, z) \
+ (void)(&x); \
+ (void)(&y); \
+ (void)(&z);
+#define MORK_USED_4(w, x, y, z) \
+ (void)(&w); \
+ (void)(&x); \
+ (void)(&y); \
+ (void)(&z);
+
+// } %%%%% end disable unused param warnings %%%%%
+
+// { %%%%% begin macro for finding class member offset %%%%%
+
+/*| OffsetOf: the unsigned integer offset of a class or struct
+**| field from the beginning of that class or struct. This is
+**| the same as the similarly named public domain IronDoc macro,
+**| and is also the same as another macro appearing in stdlib.h.
+**| We want these offsets so we can correctly convert pointers
+**| to member slots back into pointers to enclosing objects, and
+**| have this exactly match what the compiler thinks is true.
+**|
+**|| Basically we are asking the compiler to determine the offset at
+**| compile time, and we use the definition of address artithmetic
+**| to do this. By casting integer zero to a pointer of type obj*,
+**| we can reference the address of a slot in such an object that
+**| is hypothetically physically placed at address zero, but without
+**| actually dereferencing a memory location. The absolute address
+**| of slot is the same as offset of that slot, when the object is
+**| placed at address zero.
+|*/
+#define mork_OffsetOf(obj, slot) ((unsigned int)&((obj*)0)->slot)
+
+// } %%%%% end macro for finding class member offset %%%%%
+
+// { %%%%% begin specific-size integer scalar typedefs %%%%%
+typedef unsigned char mork_u1; // make sure this is one byte
+typedef unsigned short mork_u2; // make sure this is two bytes
+typedef short mork_i2; // make sure this is two bytes
+typedef uint32_t mork_u4; // make sure this is four bytes
+typedef int32_t mork_i4; // make sure this is four bytes
+typedef PRWord mork_ip; // make sure sizeof(mork_ip) == sizeof(void*)
+
+typedef mork_u1 mork_ch; // small byte-sized character (never wide)
+typedef mork_u1 mork_flags; // one byte's worth of predicate bit flags
+
+typedef mork_u2 mork_base; // 2-byte magic class signature slot in object
+typedef mork_u2 mork_derived; // 2-byte magic class signature slot in object
+
+typedef mork_u4 mork_token; // unsigned token for atomized string
+typedef mork_token mork_scope; // token used to id scope for rows
+typedef mork_token mork_kind; // token used to id kind for tables
+typedef mork_token mork_cscode; // token used to id charset names
+typedef mork_token mork_aid; // token used to id atomize cell values
+
+typedef mork_token mork_column; // token used to id columns for rows
+typedef mork_column mork_delta; // mork_column plus mork_change
+
+typedef mork_token mork_color; // bead ID
+#define morkColor_kNone ((mork_color)0)
+
+typedef mork_u4 mork_magic; // unsigned magic signature
+
+typedef mork_u4 mork_seed; // unsigned collection change counter
+typedef mork_u4 mork_count; // unsigned collection member count
+typedef mork_count mork_num; // synonym for count
+typedef mork_u4 mork_size; // unsigned physical media size
+typedef mork_u4 mork_fill; // unsigned logical content size
+typedef mork_u4 mork_more; // more available bytes for larger buffer
+
+typedef mdb_u4 mork_percent; // 0..100, with values >100 same as 100
+
+typedef mork_i4 mork_pos; // negative means "before first" (at zero pos)
+typedef mork_i4 mork_line; // negative means "before first line in file"
+
+typedef mork_u1 mork_usage; // 1-byte magic usage signature slot in object
+typedef mork_u1 mork_access; // 1-byte magic access signature slot in object
+
+typedef mork_u1 mork_change; // add, cut, put, set, nil
+typedef mork_u1 mork_priority; // 0..9, for a total of ten different values
+
+typedef mork_u1 mork_able; // on, off, asleep (clone IronDoc's fe_able)
+typedef mork_u1 mork_load; // dirty or clean (clone IronDoc's fe_load)
+// } %%%%% end specific-size integer scalar typedefs %%%%%
+
+// 'test' is a public domain Mithril for key equality tests in probe maps
+typedef mork_i2 mork_test; /* neg=>kVoid, zero=>kHit, pos=>kMiss */
+
+#define morkTest_kVoid ((mork_test)-1) /* -1: nil key slot, no key order */
+#define morkTest_kHit ((mork_test)0) /* 0: keys are equal, a map hit */
+#define morkTest_kMiss ((mork_test)1) /* 1: keys not equal, a map miss */
+
+// { %%%%% begin constants for Mork scalar types %%%%%
+#define morkPriority_kHi ((mork_priority)0) /* best priority */
+#define morkPriority_kMin ((mork_priority)0) /* best priority is smallest */
+
+#define morkPriority_kLo ((mork_priority)9) /* worst priority */
+#define morkPriority_kMax ((mork_priority)9) /* worst priority is biggest */
+
+#define morkPriority_kCount 10 /* number of distinct priority values */
+
+#define morkAble_kEnabled ((mork_able)0x55) /* same as IronDoc constant */
+#define morkAble_kDisabled ((mork_able)0xAA) /* same as IronDoc constant */
+#define morkAble_kAsleep ((mork_able)0x5A) /* same as IronDoc constant */
+
+#define morkChange_kAdd 'a' /* add member */
+#define morkChange_kCut 'c' /* cut member */
+#define morkChange_kPut 'p' /* put member */
+#define morkChange_kSet 's' /* set all members */
+#define morkChange_kNil 0 /* no change in this member */
+#define morkChange_kDup 'd' /* duplicate changes have no effect */
+// kDup is intended to replace another change constant in an object as a
+// conclusion about change feasibility while staging intended alterations.
+
+#define morkLoad_kDirty ((mork_load)0xDD) /* same as IronDoc constant */
+#define morkLoad_kClean ((mork_load)0x22) /* same as IronDoc constant */
+
+#define morkAccess_kOpen 'o'
+#define morkAccess_kClosing 'c'
+#define morkAccess_kShut 's'
+#define morkAccess_kDead 'd'
+// } %%%%% end constants for Mork scalar types %%%%%
+
+// { %%%%% begin non-specific-size integer scalar typedefs %%%%%
+typedef int mork_char; // nominal type for ints used to hold input byte
+#define morkChar_IsWhite(c) \
+ ((c) == 0xA || (c) == 0x9 || (c) == 0xD || (c) == ' ')
+// } %%%%% end non-specific-size integer scalar typedefs %%%%%
+
+// { %%%%% begin mdb-driven scalar typedefs %%%%%
+// easier to define bool exactly the same as mdb:
+typedef mdb_bool mork_bool; // unsigned byte with zero=false, nonzero=true
+
+/* canonical boolean constants provided only for code clarity: */
+#define morkBool_kTrue ((mork_bool)1) /* actually any nonzero means true */
+#define morkBool_kFalse ((mork_bool)0) /* only zero means false */
+
+// mdb clients can assign these, so we cannot pick maximum size:
+typedef mdb_id mork_id; // unsigned object identity in a scope
+typedef mork_id mork_rid; // unsigned row identity inside scope
+typedef mork_id mork_tid; // unsigned table identity inside scope
+typedef mork_id mork_gid; // unsigned group identity without any scope
+
+// we only care about neg, zero, pos -- so we don't care about size:
+typedef mdb_order mork_order; // neg:lessthan, zero:equalto, pos:greaterthan
+// } %%%%% end mdb-driven scalar typedefs %%%%%
+
+#define morkId_kMinusOne ((mdb_id)-1)
+
+// { %%%%% begin class forward defines %%%%%
+// try to put these in alphabetical order for easier examination:
+class morkMid;
+class morkAtom;
+class morkAtomSpace;
+class morkBookAtom;
+class morkBuf;
+class morkBuilder;
+class morkCell;
+class morkCellObject;
+class morkCursor;
+class morkEnv;
+class morkFactory;
+class morkFile;
+class morkHandle;
+class morkHandleFace; // just an opaque cookie type
+class morkHandleFrame;
+class morkHashArrays;
+class morkMap;
+class morkNode;
+class morkObject;
+class morkOidAtom;
+class morkParser;
+class morkPool;
+class morkPlace;
+class morkPort;
+class morkPortTableCursor;
+class morkProbeMap;
+class morkRow;
+class morkRowCellCursor;
+class morkRowObject;
+class morkRowSpace;
+class morkSorting;
+class morkSortingRowCursor;
+class morkSpace;
+class morkSpan;
+class morkStore;
+class morkStream;
+class morkTable;
+class morkTableChange;
+class morkTableRowCursor;
+class morkThumb;
+class morkWriter;
+class morkZone;
+// } %%%%% end class forward defines %%%%%
+
+// include this config file last for platform & environment specific stuff:
+#ifndef _MORKCONFIG_
+# include "morkConfig.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORK_ */
diff --git a/comm/mailnews/db/mork/morkArray.cpp b/comm/mailnews/db/mork/morkArray.cpp
new file mode 100644
index 0000000000..fff5f8a626
--- /dev/null
+++ b/comm/mailnews/db/mork/morkArray.cpp
@@ -0,0 +1,250 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nscore.h"
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKARRAY_
+# include "morkArray.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkArray::CloseMorkNode(
+ morkEnv* ev) // CloseTable() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseArray(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkArray::~morkArray() // assert CloseTable() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+ MORK_ASSERT(mArray_Slots == 0);
+}
+
+/*public non-poly*/
+morkArray::morkArray(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_size inSize, nsIMdbHeap* ioSlotHeap)
+ : morkNode(ev, inUsage, ioHeap),
+ mArray_Slots(0),
+ mArray_Heap(0),
+ mArray_Fill(0),
+ mArray_Size(0),
+ mArray_Seed(
+ (mork_u4)NS_PTR_TO_INT32(this)) // "random" integer assignment
+{
+ if (ev->Good()) {
+ if (ioSlotHeap) {
+ nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mArray_Heap);
+ if (ev->Good()) {
+ if (inSize < 3) inSize = 3;
+ mdb_size byteSize = inSize * sizeof(void*);
+ void** block = 0;
+ ioSlotHeap->Alloc(ev->AsMdbEnv(), byteSize, (void**)&block);
+ if (block && ev->Good()) {
+ mArray_Slots = block;
+ mArray_Size = inSize;
+ MORK_MEMSET(mArray_Slots, 0, byteSize);
+ if (ev->Good()) mNode_Derived = morkDerived_kArray;
+ }
+ }
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkArray::CloseArray(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ if (mArray_Heap && mArray_Slots)
+ mArray_Heap->Free(ev->AsMdbEnv(), mArray_Slots);
+
+ mArray_Slots = 0;
+ mArray_Size = 0;
+ mArray_Fill = 0;
+ ++mArray_Seed;
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mArray_Heap);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkArray::NonArrayTypeError(morkEnv* ev) {
+ ev->NewError("non morkArray");
+}
+
+/*static*/ void morkArray::IndexBeyondEndError(morkEnv* ev) {
+ ev->NewError("array index beyond end");
+}
+
+/*static*/ void morkArray::NilSlotsAddressError(morkEnv* ev) {
+ ev->NewError("nil mArray_Slots");
+}
+
+/*static*/ void morkArray::FillBeyondSizeError(morkEnv* ev) {
+ ev->NewError("mArray_Fill > mArray_Size");
+}
+
+mork_bool morkArray::Grow(morkEnv* ev, mork_size inNewSize)
+// Grow() returns true if capacity becomes >= inNewSize and ev->Good()
+{
+ if (ev->Good() && inNewSize > mArray_Size) // make array larger?
+ {
+ if (mArray_Fill <= mArray_Size) // fill and size fit the invariant?
+ {
+ if (mArray_Size <= 3)
+ inNewSize = mArray_Size + 3;
+ else
+ inNewSize = mArray_Size *
+ 2; // + 3; // try doubling size here - used to grow by 3
+
+ mdb_size newByteSize = inNewSize * sizeof(void*);
+ void** newBlock = 0;
+ mArray_Heap->Alloc(ev->AsMdbEnv(), newByteSize, (void**)&newBlock);
+ if (newBlock && ev->Good()) // okay new block?
+ {
+ void** oldSlots = mArray_Slots;
+ void** oldEnd = oldSlots + mArray_Fill;
+
+ void** newSlots = newBlock;
+ void** newEnd = newBlock + inNewSize;
+
+ while (oldSlots < oldEnd) *newSlots++ = *oldSlots++;
+
+ while (newSlots < newEnd) *newSlots++ = (void*)0;
+
+ oldSlots = mArray_Slots;
+ mArray_Size = inNewSize;
+ mArray_Slots = newBlock;
+ mArray_Heap->Free(ev->AsMdbEnv(), oldSlots);
+ }
+ } else
+ this->FillBeyondSizeError(ev);
+ }
+ ++mArray_Seed; // always modify seed, since caller intends to add slots
+ return (ev->Good() && mArray_Size >= inNewSize);
+}
+
+void* morkArray::SafeAt(morkEnv* ev, mork_pos inPos) {
+ if (mArray_Slots) {
+ if (inPos >= 0 && inPos < (mork_pos)mArray_Fill)
+ return mArray_Slots[inPos];
+ else
+ this->IndexBeyondEndError(ev);
+ } else
+ this->NilSlotsAddressError(ev);
+
+ return (void*)0;
+}
+
+void morkArray::SafeAtPut(morkEnv* ev, mork_pos inPos, void* ioSlot) {
+ if (mArray_Slots) {
+ if (inPos >= 0 && inPos < (mork_pos)mArray_Fill) {
+ mArray_Slots[inPos] = ioSlot;
+ ++mArray_Seed;
+ } else
+ this->IndexBeyondEndError(ev);
+ } else
+ this->NilSlotsAddressError(ev);
+}
+
+mork_pos morkArray::AppendSlot(morkEnv* ev, void* ioSlot) {
+ mork_pos outPos = -1;
+ if (mArray_Slots) {
+ mork_fill fill = mArray_Fill;
+ if (this->Grow(ev, fill + 1)) {
+ outPos = (mork_pos)fill;
+ mArray_Slots[fill] = ioSlot;
+ mArray_Fill = fill + 1;
+ // note Grow() increments mArray_Seed
+ }
+ } else
+ this->NilSlotsAddressError(ev);
+
+ return outPos;
+}
+
+void morkArray::AddSlot(morkEnv* ev, mork_pos inPos, void* ioSlot) {
+ if (mArray_Slots) {
+ mork_fill fill = mArray_Fill;
+ if (this->Grow(ev, fill + 1)) {
+ void** slot = mArray_Slots; // the slot vector
+ void** end = slot + fill; // one past the last used array slot
+ slot += inPos; // the slot to be added
+
+ while (--end >= slot) // another slot to move upward?
+ end[1] = *end;
+
+ *slot = ioSlot;
+ mArray_Fill = fill + 1;
+ // note Grow() increments mArray_Seed
+ }
+ } else
+ this->NilSlotsAddressError(ev);
+}
+
+void morkArray::CutSlot(morkEnv* ev, mork_pos inPos) {
+ MORK_USED_1(ev);
+ mork_fill fill = mArray_Fill;
+ if (inPos >= 0 &&
+ inPos < (mork_pos)fill) // cutting slot in used array portion?
+ {
+ void** slot = mArray_Slots; // the slot vector
+ void** end = slot + fill; // one past the last used array slot
+ slot += inPos; // the slot to be cut
+
+ while (++slot < end) // another slot to move downward?
+ slot[-1] = *slot;
+
+ slot[-1] = 0; // clear the last used slot which is now unused
+
+ // note inPos<fill implies fill>0, so fill-1 must be nonnegative:
+ mArray_Fill = fill - 1;
+ ++mArray_Seed;
+ }
+}
+
+void morkArray::CutAllSlots(morkEnv* ev) {
+ if (mArray_Slots) {
+ if (mArray_Fill <= mArray_Size) {
+ mdb_size oldByteSize = mArray_Fill * sizeof(void*);
+ MORK_MEMSET(mArray_Slots, 0, oldByteSize);
+ } else
+ this->FillBeyondSizeError(ev);
+ } else
+ this->NilSlotsAddressError(ev);
+
+ ++mArray_Seed;
+ mArray_Fill = 0;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkArray.h b/comm/mailnews/db/mork/morkArray.h
new file mode 100644
index 0000000000..daf9c96f35
--- /dev/null
+++ b/comm/mailnews/db/mork/morkArray.h
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKARRAY_
+#define _MORKARRAY_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kArray /*i*/ 0x4179 /* ascii 'Ay' */
+
+class morkArray : public morkNode { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ void** mArray_Slots; // array of pointers
+ nsIMdbHeap* mArray_Heap; // required heap for allocating mArray_Slots
+ mork_fill mArray_Fill; // logical count of used slots in mArray_Slots
+ mork_size mArray_Size; // physical count of mArray_Slots ( >= Fill)
+ mork_seed mArray_Seed; // change counter for syncing with iterators
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseArray()
+ virtual ~morkArray(); // assert that close executed earlier
+
+ public: // morkArray construction & destruction
+ morkArray(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_size inSize, nsIMdbHeap* ioSlotHeap);
+ void CloseArray(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkArray(const morkArray& other);
+ morkArray& operator=(const morkArray& other);
+
+ public: // dynamic type identification
+ mork_bool IsArray() const {
+ return IsNode() && mNode_Derived == morkDerived_kArray;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing & errors
+ static void NonArrayTypeError(morkEnv* ev);
+ static void IndexBeyondEndError(morkEnv* ev);
+ static void NilSlotsAddressError(morkEnv* ev);
+ static void FillBeyondSizeError(morkEnv* ev);
+
+ public: // other table row cursor methods
+ mork_fill Length() const { return mArray_Fill; }
+ mork_size Capacity() const { return mArray_Size; }
+
+ mork_bool Grow(morkEnv* ev, mork_size inNewSize);
+ // Grow() returns true if capacity becomes >= inNewSize and ev->Good()
+
+ void* At(mork_pos inPos) const { return mArray_Slots[inPos]; }
+ void AtPut(mork_pos inPos, void* ioSlot) { mArray_Slots[inPos] = ioSlot; }
+
+ void* SafeAt(morkEnv* ev, mork_pos inPos);
+ void SafeAtPut(morkEnv* ev, mork_pos inPos, void* ioSlot);
+
+ mork_pos AppendSlot(morkEnv* ev, void* ioSlot);
+ void AddSlot(morkEnv* ev, mork_pos inPos, void* ioSlot);
+ void CutSlot(morkEnv* ev, mork_pos inPos);
+ void CutAllSlots(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakArray(morkArray* me, morkEnv* ev, morkArray** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongArray(morkArray* me, morkEnv* ev, morkArray** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKTABLEROWCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkAtom.cpp b/comm/mailnews/db/mork/morkAtom.cpp
new file mode 100644
index 0000000000..ad3b1d53bf
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtom.cpp
@@ -0,0 +1,432 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/* static */
+mork_bool morkAtom::GetYarn(const morkAtom* atom, mdbYarn* outYarn) {
+ const void* source = 0;
+ mdb_fill fill = 0;
+ mdb_cscode form = 0;
+ outYarn->mYarn_More = 0;
+
+ if (atom) {
+ if (atom->IsWeeBook()) {
+ morkWeeBookAtom* weeBook = (morkWeeBookAtom*)atom;
+ source = weeBook->mWeeBookAtom_Body;
+ fill = weeBook->mAtom_Size;
+ } else if (atom->IsBigBook()) {
+ morkBigBookAtom* bigBook = (morkBigBookAtom*)atom;
+ source = bigBook->mBigBookAtom_Body;
+ fill = bigBook->mBigBookAtom_Size;
+ form = bigBook->mBigBookAtom_Form;
+ } else if (atom->IsWeeAnon()) {
+ morkWeeAnonAtom* weeAnon = (morkWeeAnonAtom*)atom;
+ source = weeAnon->mWeeAnonAtom_Body;
+ fill = weeAnon->mAtom_Size;
+ } else if (atom->IsBigAnon()) {
+ morkBigAnonAtom* bigAnon = (morkBigAnonAtom*)atom;
+ source = bigAnon->mBigAnonAtom_Body;
+ fill = bigAnon->mBigAnonAtom_Size;
+ form = bigAnon->mBigAnonAtom_Form;
+ }
+ }
+
+ if (source && fill) // have an atom with nonempty content?
+ {
+ // if we have too many bytes, and yarn seems growable:
+ if (fill > outYarn->mYarn_Size && outYarn->mYarn_Grow) // try grow?
+ (*outYarn->mYarn_Grow)(outYarn, (mdb_size)fill); // request bigger
+
+ mdb_size size = outYarn->mYarn_Size; // max dest size
+ if (fill > size) // too much atom content?
+ {
+ outYarn->mYarn_More = fill - size; // extra atom bytes omitted
+ fill = size; // copy no more bytes than size of yarn buffer
+ }
+ void* dest = outYarn->mYarn_Buf; // where bytes are going
+ if (!dest) // nil destination address buffer?
+ fill = 0; // we can't write any content at all
+
+ if (fill) // anything to copy?
+ MORK_MEMCPY(dest, source, fill); // copy fill bytes to yarn
+
+ outYarn->mYarn_Fill = fill; // tell yarn size of copied content
+ } else // no content to put into the yarn
+ {
+ outYarn->mYarn_Fill = 0; // tell yarn that atom has no bytes
+ }
+ outYarn->mYarn_Form = form; // always update the form slot
+
+ return (source != 0);
+}
+
+/* static */
+mork_bool morkAtom::AliasYarn(const morkAtom* atom, mdbYarn* outYarn) {
+ outYarn->mYarn_More = 0;
+ outYarn->mYarn_Form = 0;
+
+ if (atom) {
+ if (atom->IsWeeBook()) {
+ morkWeeBookAtom* weeBook = (morkWeeBookAtom*)atom;
+ outYarn->mYarn_Buf = weeBook->mWeeBookAtom_Body;
+ outYarn->mYarn_Fill = weeBook->mAtom_Size;
+ outYarn->mYarn_Size = weeBook->mAtom_Size;
+ } else if (atom->IsBigBook()) {
+ morkBigBookAtom* bigBook = (morkBigBookAtom*)atom;
+ outYarn->mYarn_Buf = bigBook->mBigBookAtom_Body;
+ outYarn->mYarn_Fill = bigBook->mBigBookAtom_Size;
+ outYarn->mYarn_Size = bigBook->mBigBookAtom_Size;
+ outYarn->mYarn_Form = bigBook->mBigBookAtom_Form;
+ } else if (atom->IsWeeAnon()) {
+ morkWeeAnonAtom* weeAnon = (morkWeeAnonAtom*)atom;
+ outYarn->mYarn_Buf = weeAnon->mWeeAnonAtom_Body;
+ outYarn->mYarn_Fill = weeAnon->mAtom_Size;
+ outYarn->mYarn_Size = weeAnon->mAtom_Size;
+ } else if (atom->IsBigAnon()) {
+ morkBigAnonAtom* bigAnon = (morkBigAnonAtom*)atom;
+ outYarn->mYarn_Buf = bigAnon->mBigAnonAtom_Body;
+ outYarn->mYarn_Fill = bigAnon->mBigAnonAtom_Size;
+ outYarn->mYarn_Size = bigAnon->mBigAnonAtom_Size;
+ outYarn->mYarn_Form = bigAnon->mBigAnonAtom_Form;
+ } else
+ atom = 0; // show desire to put empty content in yarn
+ }
+
+ if (!atom) // empty content for yarn?
+ {
+ outYarn->mYarn_Buf = 0;
+ outYarn->mYarn_Fill = 0;
+ outYarn->mYarn_Size = 0;
+ // outYarn->mYarn_Grow = 0; // please don't modify the Grow slot
+ }
+ return (atom != 0);
+}
+
+mork_aid morkAtom::GetBookAtomAid() const // zero or book atom's ID
+{
+ return (this->IsBook()) ? ((morkBookAtom*)this)->mBookAtom_Id : 0;
+}
+
+mork_scope morkAtom::GetBookAtomSpaceScope(
+ morkEnv* ev) const // zero or book's space's scope
+{
+ mork_scope outScope = 0;
+ if (this->IsBook()) {
+ const morkBookAtom* bookAtom = (const morkBookAtom*)this;
+ morkAtomSpace* space = bookAtom->mBookAtom_Space;
+ if (space->IsAtomSpace())
+ outScope = space->SpaceScope();
+ else
+ space->NonAtomSpaceTypeError(ev);
+ }
+
+ return outScope;
+}
+
+void morkAtom::MakeCellUseForever(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mAtom_CellUses = morkAtom_kForeverCellUses;
+}
+
+mork_u1 morkAtom::AddCellUse(morkEnv* ev) {
+ MORK_USED_1(ev);
+ if (mAtom_CellUses < morkAtom_kMaxCellUses) // not already maxed out?
+ ++mAtom_CellUses;
+
+ return mAtom_CellUses;
+}
+
+mork_u1 morkAtom::CutCellUse(morkEnv* ev) {
+ if (mAtom_CellUses) // any outstanding uses to cut?
+ {
+ if (mAtom_CellUses < morkAtom_kMaxCellUses) // not frozen at max?
+ --mAtom_CellUses;
+ } else
+ this->CellUsesUnderflowWarning(ev);
+
+ return mAtom_CellUses;
+}
+
+/*static*/ void morkAtom::CellUsesUnderflowWarning(morkEnv* ev) {
+ ev->NewWarning("mAtom_CellUses underflow");
+}
+
+/*static*/ void morkAtom::BadAtomKindError(morkEnv* ev) {
+ ev->NewError("bad mAtom_Kind");
+}
+
+/*static*/ void morkAtom::ZeroAidError(morkEnv* ev) {
+ ev->NewError("zero atom ID");
+}
+
+/*static*/ void morkAtom::AtomSizeOverflowError(morkEnv* ev) {
+ ev->NewError("atom mAtom_Size overflow");
+}
+
+void morkOidAtom::InitRowOidAtom(morkEnv* ev, const mdbOid& inOid) {
+ MORK_USED_1(ev);
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindRowOid;
+ mAtom_Change = morkChange_kNil;
+ mAtom_Size = 0;
+ mOidAtom_Oid = inOid; // bitwise copy
+}
+
+void morkOidAtom::InitTableOidAtom(morkEnv* ev, const mdbOid& inOid) {
+ MORK_USED_1(ev);
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindTableOid;
+ mAtom_Change = morkChange_kNil;
+ mAtom_Size = 0;
+ mOidAtom_Oid = inOid; // bitwise copy
+}
+
+void morkWeeAnonAtom::InitWeeAnonAtom(morkEnv* ev, const morkBuf& inBuf) {
+ mAtom_Kind = 0;
+ mAtom_Change = morkChange_kNil;
+ if (inBuf.mBuf_Fill <= morkAtom_kMaxByteSize) {
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindWeeAnon;
+ mork_size size = inBuf.mBuf_Fill;
+ mAtom_Size = (mork_u1)size;
+ if (size && inBuf.mBuf_Body)
+ MORK_MEMCPY(mWeeAnonAtom_Body, inBuf.mBuf_Body, size);
+
+ mWeeAnonAtom_Body[size] = 0;
+ } else
+ this->AtomSizeOverflowError(ev);
+}
+
+void morkBigAnonAtom::InitBigAnonAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm) {
+ MORK_USED_1(ev);
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindBigAnon;
+ mAtom_Change = morkChange_kNil;
+ mAtom_Size = 0;
+ mBigAnonAtom_Form = inForm;
+ mork_size size = inBuf.mBuf_Fill;
+ mBigAnonAtom_Size = size;
+ if (size && inBuf.mBuf_Body)
+ MORK_MEMCPY(mBigAnonAtom_Body, inBuf.mBuf_Body, size);
+
+ mBigAnonAtom_Body[size] = 0;
+}
+
+/*static*/ void morkBookAtom::NonBookAtomTypeError(morkEnv* ev) {
+ ev->NewError("non morkBookAtom");
+}
+
+mork_u4 morkBookAtom::HashFormAndBody(morkEnv* ev) const {
+ // This hash is obviously a variation of the dragon book string hash.
+ // (I won't bother to explain or rationalize this usage for you.)
+
+ mork_u4 outHash = 0; // hash value returned
+ unsigned char c; // next character
+ const mork_u1* body; // body of bytes to hash
+ mork_size size = 0; // the number of bytes to hash
+
+ if (this->IsWeeBook()) {
+ size = mAtom_Size;
+ body = ((const morkWeeBookAtom*)this)->mWeeBookAtom_Body;
+ } else if (this->IsBigBook()) {
+ size = ((const morkBigBookAtom*)this)->mBigBookAtom_Size;
+ body = ((const morkBigBookAtom*)this)->mBigBookAtom_Body;
+ } else if (this->IsFarBook()) {
+ size = ((const morkFarBookAtom*)this)->mFarBookAtom_Size;
+ body = ((const morkFarBookAtom*)this)->mFarBookAtom_Body;
+ } else {
+ this->NonBookAtomTypeError(ev);
+ return 0;
+ }
+
+ const mork_u1* end = body + size;
+ while (body < end) {
+ c = *body++;
+ outHash <<= 4;
+ outHash += c;
+ mork_u4 top = outHash & 0xF0000000L; // top four bits
+ if (top) // any of high four bits equal to one?
+ {
+ outHash ^= (top >> 24); // fold down high bits
+ outHash ^= top; // zero top four bits
+ }
+ }
+
+ return outHash;
+}
+
+mork_bool morkBookAtom::EqualFormAndBody(morkEnv* ev,
+ const morkBookAtom* inAtom) const {
+ mork_bool outEqual = morkBool_kFalse;
+
+ const mork_u1* body = 0; // body of inAtom bytes to compare
+ mork_size size; // the number of inAtom bytes to compare
+ mork_cscode form; // nominal charset for ioAtom
+
+ if (inAtom->IsWeeBook()) {
+ size = inAtom->mAtom_Size;
+ body = ((const morkWeeBookAtom*)inAtom)->mWeeBookAtom_Body;
+ form = 0;
+ } else if (inAtom->IsBigBook()) {
+ size = ((const morkBigBookAtom*)inAtom)->mBigBookAtom_Size;
+ body = ((const morkBigBookAtom*)inAtom)->mBigBookAtom_Body;
+ form = ((const morkBigBookAtom*)inAtom)->mBigBookAtom_Form;
+ } else if (inAtom->IsFarBook()) {
+ size = ((const morkFarBookAtom*)inAtom)->mFarBookAtom_Size;
+ body = ((const morkFarBookAtom*)inAtom)->mFarBookAtom_Body;
+ form = ((const morkFarBookAtom*)inAtom)->mFarBookAtom_Form;
+ } else {
+ inAtom->NonBookAtomTypeError(ev);
+ return morkBool_kFalse;
+ }
+
+ const mork_u1* thisBody = 0; // body of bytes in this to compare
+ mork_size thisSize; // the number of bytes in this to compare
+ mork_cscode thisForm; // nominal charset for this atom
+
+ if (this->IsWeeBook()) {
+ thisSize = mAtom_Size;
+ thisBody = ((const morkWeeBookAtom*)this)->mWeeBookAtom_Body;
+ thisForm = 0;
+ } else if (this->IsBigBook()) {
+ thisSize = ((const morkBigBookAtom*)this)->mBigBookAtom_Size;
+ thisBody = ((const morkBigBookAtom*)this)->mBigBookAtom_Body;
+ thisForm = ((const morkBigBookAtom*)this)->mBigBookAtom_Form;
+ } else if (this->IsFarBook()) {
+ thisSize = ((const morkFarBookAtom*)this)->mFarBookAtom_Size;
+ thisBody = ((const morkFarBookAtom*)this)->mFarBookAtom_Body;
+ thisForm = ((const morkFarBookAtom*)this)->mFarBookAtom_Form;
+ } else {
+ this->NonBookAtomTypeError(ev);
+ return morkBool_kFalse;
+ }
+
+ // if atoms are empty, form is irrelevant
+ if (body && thisBody && size == thisSize && (!size || form == thisForm))
+ outEqual = (MORK_MEMCMP(body, thisBody, size) == 0);
+
+ return outEqual;
+}
+
+void morkBookAtom::CutBookAtomFromSpace(morkEnv* ev) {
+ morkAtomSpace* space = mBookAtom_Space;
+ if (space) {
+ mBookAtom_Space = 0;
+ space->mAtomSpace_AtomBodies.CutAtom(ev, this);
+ space->mAtomSpace_AtomAids.CutAtom(ev, this);
+ } else
+ ev->NilPointerError();
+}
+
+morkWeeBookAtom::morkWeeBookAtom(mork_aid inAid) {
+ mAtom_Kind = morkAtom_kKindWeeBook;
+ mAtom_CellUses = 0;
+ mAtom_Change = morkChange_kNil;
+ mAtom_Size = 0;
+
+ mBookAtom_Space = 0;
+ mBookAtom_Id = inAid;
+
+ mWeeBookAtom_Body[0] = 0;
+}
+
+void morkWeeBookAtom::InitWeeBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ morkAtomSpace* ioSpace, mork_aid inAid) {
+ mAtom_Kind = 0;
+ mAtom_Change = morkChange_kNil;
+ if (ioSpace) {
+ if (inAid) {
+ if (inBuf.mBuf_Fill <= morkAtom_kMaxByteSize) {
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindWeeBook;
+ mBookAtom_Space = ioSpace;
+ mBookAtom_Id = inAid;
+ mork_size size = inBuf.mBuf_Fill;
+ mAtom_Size = (mork_u1)size;
+ if (size && inBuf.mBuf_Body)
+ MORK_MEMCPY(mWeeBookAtom_Body, inBuf.mBuf_Body, size);
+
+ mWeeBookAtom_Body[size] = 0;
+ } else
+ this->AtomSizeOverflowError(ev);
+ } else
+ this->ZeroAidError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+void morkBigBookAtom::InitBigBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm,
+ morkAtomSpace* ioSpace, mork_aid inAid) {
+ mAtom_Kind = 0;
+ mAtom_Change = morkChange_kNil;
+ if (ioSpace) {
+ if (inAid) {
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindBigBook;
+ mAtom_Size = 0;
+ mBookAtom_Space = ioSpace;
+ mBookAtom_Id = inAid;
+ mBigBookAtom_Form = inForm;
+ mork_size size = inBuf.mBuf_Fill;
+ mBigBookAtom_Size = size;
+ if (size && inBuf.mBuf_Body)
+ MORK_MEMCPY(mBigBookAtom_Body, inBuf.mBuf_Body, size);
+
+ mBigBookAtom_Body[size] = 0;
+ } else
+ this->ZeroAidError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+void morkFarBookAtom::InitFarBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm,
+ morkAtomSpace* ioSpace, mork_aid inAid) {
+ mAtom_Kind = 0;
+ mAtom_Change = morkChange_kNil;
+ if (ioSpace) {
+ if (inAid) {
+ mAtom_CellUses = 0;
+ mAtom_Kind = morkAtom_kKindFarBook;
+ mAtom_Size = 0;
+ mBookAtom_Space = ioSpace;
+ mBookAtom_Id = inAid;
+ mFarBookAtom_Form = inForm;
+ mFarBookAtom_Size = inBuf.mBuf_Fill;
+ mFarBookAtom_Body = (mork_u1*)inBuf.mBuf_Body;
+ } else
+ this->ZeroAidError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkAtom.h b/comm/mailnews/db/mork/morkAtom.h
new file mode 100644
index 0000000000..4313a2e8fa
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtom.h
@@ -0,0 +1,362 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKATOM_
+#define _MORKATOM_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkAtom_kMaxByteSize 255 /* max for 8-bit integer */
+#define morkAtom_kForeverCellUses 0x0FF /* max for 8-bit integer */
+#define morkAtom_kMaxCellUses 0x07F /* max for 7-bit integer */
+
+#define morkAtom_kKindWeeAnon 'a' /* means morkWeeAnonAtom subclass */
+#define morkAtom_kKindBigAnon 'A' /* means morkBigAnonAtom subclass */
+#define morkAtom_kKindWeeBook 'b' /* means morkWeeBookAtom subclass */
+#define morkAtom_kKindBigBook 'B' /* means morkBigBookAtom subclass */
+#define morkAtom_kKindFarBook 'f' /* means morkFarBookAtom subclass */
+#define morkAtom_kKindRowOid 'r' /* means morkOidAtom subclass */
+#define morkAtom_kKindTableOid 't' /* means morkOidAtom subclass */
+
+/*| Atom: .
+|*/
+class morkAtom { //
+
+ public:
+ mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ mork_change mAtom_Change; // how has this atom been changed?
+ mork_u1 mAtom_Size; // only for atoms smaller than 256 bytes
+
+ public:
+ morkAtom(mork_aid inAid, mork_u1 inKind);
+
+ mork_bool IsWeeAnon() const { return mAtom_Kind == morkAtom_kKindWeeAnon; }
+ mork_bool IsBigAnon() const { return mAtom_Kind == morkAtom_kKindBigAnon; }
+ mork_bool IsWeeBook() const { return mAtom_Kind == morkAtom_kKindWeeBook; }
+ mork_bool IsBigBook() const { return mAtom_Kind == morkAtom_kKindBigBook; }
+ mork_bool IsFarBook() const { return mAtom_Kind == morkAtom_kKindFarBook; }
+ mork_bool IsRowOid() const { return mAtom_Kind == morkAtom_kKindRowOid; }
+ mork_bool IsTableOid() const { return mAtom_Kind == morkAtom_kKindTableOid; }
+
+ mork_bool IsBook() const { return this->IsWeeBook() || this->IsBigBook(); }
+
+ public: // clean vs dirty
+ void SetAtomClean() { mAtom_Change = morkChange_kNil; }
+ void SetAtomDirty() { mAtom_Change = morkChange_kAdd; }
+
+ mork_bool IsAtomClean() const { return mAtom_Change == morkChange_kNil; }
+ mork_bool IsAtomDirty() const { return mAtom_Change == morkChange_kAdd; }
+
+ public: // atom space scope if IsBook() is true, or else zero:
+ mork_scope GetBookAtomSpaceScope(morkEnv* ev) const;
+ // zero or book's space's scope
+
+ mork_aid GetBookAtomAid() const;
+ // zero or book atom's ID
+
+ public: // empty construction does nothing
+ morkAtom() {}
+
+ public: // one-byte refcounting, freezing at maximum
+ void MakeCellUseForever(morkEnv* ev);
+ mork_u1 AddCellUse(morkEnv* ev);
+ mork_u1 CutCellUse(morkEnv* ev);
+
+ mork_bool IsCellUseForever() const {
+ return mAtom_CellUses == morkAtom_kForeverCellUses;
+ }
+
+ private: // warnings
+ static void CellUsesUnderflowWarning(morkEnv* ev);
+
+ public: // errors
+ static void BadAtomKindError(morkEnv* ev);
+ static void ZeroAidError(morkEnv* ev);
+ static void AtomSizeOverflowError(morkEnv* ev);
+
+ public: // yarns
+ static mork_bool AliasYarn(const morkAtom* atom, mdbYarn* outYarn);
+ static mork_bool GetYarn(const morkAtom* atom, mdbYarn* outYarn);
+
+ private: // copying is not allowed
+ morkAtom(const morkAtom& other);
+ morkAtom& operator=(const morkAtom& other);
+};
+
+/*| OidAtom: an atom that references a row or table by identity.
+|*/
+class morkOidAtom : public morkAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // NOT USED IN "BIG" format atoms
+
+ public:
+ mdbOid mOidAtom_Oid; // identity of referenced object
+
+ public: // empty construction does nothing
+ morkOidAtom() {}
+ void InitRowOidAtom(morkEnv* ev, const mdbOid& inOid);
+ void InitTableOidAtom(morkEnv* ev, const mdbOid& inOid);
+
+ private: // copying is not allowed
+ morkOidAtom(const morkOidAtom& other);
+ morkOidAtom& operator=(const morkOidAtom& other);
+};
+
+/*| WeeAnonAtom: an atom whose content immediately follows morkAtom slots
+**| in an inline fashion, so that morkWeeAnonAtom contains both leading
+**| atom slots and then the content bytes without further overhead. Note
+**| that charset encoding is not indicated, so zero is implied for Latin1.
+**| (Non-Latin1 content must be stored in a morkBigAnonAtom with a charset.)
+**|
+**|| An anon (anonymous) atom has no identity, with no associated bookkeeping
+**| for lookup needed for sharing like a book atom.
+**|
+**|| A wee anon atom is immediate but not shared with any other users of this
+**| atom, so no bookkeeping for sharing is needed. This means the atom has
+**| no ID, because the atom has no identity other than this immediate content,
+**| and no hash table is needed to look up this particular atom. This also
+**| applies to the larger format morkBigAnonAtom, which has more slots.
+|*/
+class morkWeeAnonAtom : public morkAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // only for atoms smaller than 256 bytes
+
+ public:
+ mork_u1 mWeeAnonAtom_Body[1]; // 1st byte of immediate content vector
+
+ public: // empty construction does nothing
+ morkWeeAnonAtom() {}
+ void InitWeeAnonAtom(morkEnv* ev, const morkBuf& inBuf);
+
+ // allow extra trailing byte for a null byte:
+ static mork_size SizeForFill(mork_fill inFill) {
+ return sizeof(morkWeeAnonAtom) + inFill;
+ }
+
+ private: // copying is not allowed
+ morkWeeAnonAtom(const morkWeeAnonAtom& other);
+ morkWeeAnonAtom& operator=(const morkWeeAnonAtom& other);
+};
+
+/*| BigAnonAtom: another immediate atom that cannot be encoded as the smaller
+**| morkWeeAnonAtom format because either the size is too great, and/or the
+**| charset is not the default zero for Latin1 and must be explicitly noted.
+**|
+**|| An anon (anonymous) atom has no identity, with no associated bookkeeping
+**| for lookup needed for sharing like a book atom.
+|*/
+class morkBigAnonAtom : public morkAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // NOT USED IN "BIG" format atoms
+
+ public:
+ mork_cscode mBigAnonAtom_Form; // charset format encoding
+ mork_size mBigAnonAtom_Size; // size of content vector
+ mork_u1 mBigAnonAtom_Body[1]; // 1st byte of immed content vector
+
+ public: // empty construction does nothing
+ morkBigAnonAtom() {}
+ void InitBigAnonAtom(morkEnv* ev, const morkBuf& inBuf, mork_cscode inForm);
+
+ // allow extra trailing byte for a null byte:
+ static mork_size SizeForFill(mork_fill inFill) {
+ return sizeof(morkBigAnonAtom) + inFill;
+ }
+
+ private: // copying is not allowed
+ morkBigAnonAtom(const morkBigAnonAtom& other);
+ morkBigAnonAtom& operator=(const morkBigAnonAtom& other);
+};
+
+#define morkBookAtom_kMaxBodySize 1024 /* if larger, cannot be shared */
+
+/*| BookAtom: the common subportion of wee book atoms and big book atoms that
+**| includes the atom ID and the pointer to the space referencing this atom
+**| through a hash table.
+|*/
+class morkBookAtom : public morkAtom { //
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // only for atoms smaller than 256 bytes
+
+ public:
+ morkAtomSpace*
+ mBookAtom_Space; // mBookAtom_Space->SpaceScope() is atom scope
+ mork_aid mBookAtom_Id; // identity token for this shared atom
+
+ public: // empty construction does nothing
+ morkBookAtom() {}
+
+ static void NonBookAtomTypeError(morkEnv* ev);
+
+ public: // Hash() and Equal() for atom ID maps are same for all subclasses:
+ mork_u4 HashAid() const { return mBookAtom_Id; }
+ mork_bool EqualAid(const morkBookAtom* inAtom) const {
+ return (mBookAtom_Id == inAtom->mBookAtom_Id);
+ }
+
+ public: // Hash() and Equal() for atom body maps know about subclasses:
+ // YOU CANNOT SUBCLASS morkBookAtom WITHOUT FIXING Hash and Equal METHODS:
+
+ mork_u4 HashFormAndBody(morkEnv* ev) const;
+ mork_bool EqualFormAndBody(morkEnv* ev, const morkBookAtom* inAtom) const;
+
+ public: // separation from containing space
+ void CutBookAtomFromSpace(morkEnv* ev);
+
+ private: // copying is not allowed
+ morkBookAtom(const morkBookAtom& other);
+ morkBookAtom& operator=(const morkBookAtom& other);
+};
+
+/*| FarBookAtom: this alternative format for book atoms was introduced
+**| in May 2000 in order to support finding atoms in hash tables without
+**| first copying the strings from original parsing buffers into a new
+**| atom format. This was consuming too much time. However, we can
+**| use morkFarBookAtom to stage a hash table query, as long as we then
+**| fix HashFormAndBody() and EqualFormAndBody() to use morkFarBookAtom
+**| correctly.
+**|
+**|| Note we do NOT intend that instances of morkFarBookAtom will ever
+**| be installed in hash tables, because this is not space efficient.
+**| We only expect to create temp instances for table lookups.
+|*/
+class morkFarBookAtom : public morkBookAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // NOT USED IN "BIG" format atoms
+
+ // morkAtomSpace* mBookAtom_Space; // mBookAtom_Space->SpaceScope() is scope
+ // mork_aid mBookAtom_Id; // identity token for this shared atom
+
+ public:
+ mork_cscode mFarBookAtom_Form; // charset format encoding
+ mork_size mFarBookAtom_Size; // size of content vector
+ mork_u1* mFarBookAtom_Body; // bytes are elsewhere, out of line
+
+ public: // empty construction does nothing
+ morkFarBookAtom() {}
+ void InitFarBookAtom(morkEnv* ev, const morkBuf& inBuf, mork_cscode inForm,
+ morkAtomSpace* ioSpace, mork_aid inAid);
+
+ private: // copying is not allowed
+ morkFarBookAtom(const morkFarBookAtom& other);
+ morkFarBookAtom& operator=(const morkFarBookAtom& other);
+};
+
+/*| WeeBookAtom: .
+|*/
+class morkWeeBookAtom : public morkBookAtom { //
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // only for atoms smaller than 256 bytes
+
+ // morkAtomSpace* mBookAtom_Space; // mBookAtom_Space->SpaceScope() is scope
+ // mork_aid mBookAtom_Id; // identity token for this shared atom
+
+ public:
+ mork_u1 mWeeBookAtom_Body[1]; // 1st byte of immed content vector
+
+ public: // empty construction does nothing
+ morkWeeBookAtom() {}
+ explicit morkWeeBookAtom(mork_aid inAid);
+
+ void InitWeeBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ morkAtomSpace* ioSpace, mork_aid inAid);
+
+ // allow extra trailing byte for a null byte:
+ static mork_size SizeForFill(mork_fill inFill) {
+ return sizeof(morkWeeBookAtom) + inFill;
+ }
+
+ private: // copying is not allowed
+ morkWeeBookAtom(const morkWeeBookAtom& other);
+ morkWeeBookAtom& operator=(const morkWeeBookAtom& other);
+};
+
+/*| BigBookAtom: .
+|*/
+class morkBigBookAtom : public morkBookAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // NOT USED IN "BIG" format atoms
+
+ // morkAtomSpace* mBookAtom_Space; // mBookAtom_Space->SpaceScope() is scope
+ // mork_aid mBookAtom_Id; // identity token for this shared atom
+
+ public:
+ mork_cscode mBigBookAtom_Form; // charset format encoding
+ mork_size mBigBookAtom_Size; // size of content vector
+ mork_u1 mBigBookAtom_Body[1]; // 1st byte of immed content vector
+
+ public: // empty construction does nothing
+ morkBigBookAtom() {}
+ void InitBigBookAtom(morkEnv* ev, const morkBuf& inBuf, mork_cscode inForm,
+ morkAtomSpace* ioSpace, mork_aid inAid);
+
+ // allow extra trailing byte for a null byte:
+ static mork_size SizeForFill(mork_fill inFill) {
+ return sizeof(morkBigBookAtom) + inFill;
+ }
+
+ private: // copying is not allowed
+ morkBigBookAtom(const morkBigBookAtom& other);
+ morkBigBookAtom& operator=(const morkBigBookAtom& other);
+};
+
+/*| MaxBookAtom: .
+|*/
+class morkMaxBookAtom : public morkBigBookAtom { //
+
+ // mork_u1 mAtom_Kind; // identifies a specific atom subclass
+ // mork_u1 mAtom_CellUses; // number of persistent uses in a cell
+ // mork_change mAtom_Change; // how has this atom been changed?
+ // mork_u1 mAtom_Size; // NOT USED IN "BIG" format atoms
+
+ // morkAtomSpace* mBookAtom_Space; // mBookAtom_Space->SpaceScope() is scope
+ // mork_aid mBookAtom_Id; // identity token for this shared atom
+
+ // mork_cscode mBigBookAtom_Form; // charset format encoding
+ // mork_size mBigBookAtom_Size; // size of content vector
+ // mork_u1 mBigBookAtom_Body[ 1 ]; // 1st byte of immed content vector
+
+ public:
+ mork_u1 mMaxBookAtom_Body[morkBookAtom_kMaxBodySize + 3]; // max bytes
+
+ public: // empty construction does nothing
+ morkMaxBookAtom() {}
+ void InitMaxBookAtom(morkEnv* ev, const morkBuf& inBuf, mork_cscode inForm,
+ morkAtomSpace* ioSpace, mork_aid inAid) {
+ this->InitBigBookAtom(ev, inBuf, inForm, ioSpace, inAid);
+ }
+
+ private: // copying is not allowed
+ morkMaxBookAtom(const morkMaxBookAtom& other);
+ morkMaxBookAtom& operator=(const morkMaxBookAtom& other);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKATOM_ */
diff --git a/comm/mailnews/db/mork/morkAtomMap.cpp b/comm/mailnews/db/mork/morkAtomMap.cpp
new file mode 100644
index 0000000000..3ae3422b5a
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtomMap.cpp
@@ -0,0 +1,378 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKATOMMAP_
+# include "morkAtomMap.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKINTMAP_
+# include "morkIntMap.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkAtomAidMap::CloseMorkNode(
+ morkEnv* ev) // CloseAtomAidMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseAtomAidMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkAtomAidMap::~morkAtomAidMap() // assert CloseAtomAidMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkAtomAidMap::morkAtomAidMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+#ifdef MORK_ENABLE_PROBE_MAPS
+ : morkProbeMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkBookAtom*), /*inValSize*/ 0,
+ ioSlotHeap, morkAtomAidMap_kStartSlotCount,
+ /*inZeroIsClearKey*/ morkBool_kTrue)
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ : morkMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkBookAtom*), /*inValSize*/ 0,
+ morkAtomAidMap_kStartSlotCount, ioSlotHeap,
+ /*inHoldChanges*/ morkBool_kFalse)
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+{
+ if (ev->Good()) mNode_Derived = morkDerived_kAtomAidMap;
+}
+
+/*public non-poly*/ void morkAtomAidMap::CloseAtomAidMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->CloseProbeMap(ev);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->CloseMap(ev);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+
+/*virtual*/ mork_test // hit(a,b) implies hash(a) == hash(b)
+morkAtomAidMap::MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const {
+ MORK_USED_1(ev);
+ const morkBookAtom* key = *(const morkBookAtom**)inMapKey;
+ if (key) {
+ mork_bool hit = key->EqualAid(*(const morkBookAtom**)inAppKey);
+ return (hit) ? morkTest_kHit : morkTest_kMiss;
+ } else
+ return morkTest_kVoid;
+}
+
+/*virtual*/ mork_u4 // hit(a,b) implies hash(a) == hash(b)
+morkAtomAidMap::MapHash(morkEnv* ev, const void* inAppKey) const {
+ const morkBookAtom* key = *(const morkBookAtom**)inAppKey;
+ if (key)
+ return key->HashAid();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+
+/*virtual*/ mork_u4 morkAtomAidMap::ProbeMapHashMapKey(
+ morkEnv* ev, const void* inMapKey) const {
+ const morkBookAtom* key = *(const morkBookAtom**)inMapKey;
+ if (key)
+ return key->HashAid();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+#else /*MORK_ENABLE_PROBE_MAPS*/
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool //
+morkAtomAidMap::Equal(morkEnv* ev, const void* inKeyA,
+ const void* inKeyB) const {
+ MORK_USED_1(ev);
+ return (*(const morkBookAtom**)inKeyA)
+ ->EqualAid(*(const morkBookAtom**)inKeyB);
+}
+
+/*virtual*/ mork_u4 //
+morkAtomAidMap::Hash(morkEnv* ev, const void* inKey) const {
+ MORK_USED_1(ev);
+ return (*(const morkBookAtom**)inKey)->HashAid();
+}
+// } ===== end morkMap poly interface =====
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+mork_bool morkAtomAidMap::AddAtom(morkEnv* ev, morkBookAtom* ioAtom) {
+ if (ev->Good()) {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->MapAtPut(ev, &ioAtom, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Put(ev, &ioAtom, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ }
+ return ev->Good();
+}
+
+morkBookAtom* morkAtomAidMap::CutAtom(morkEnv* ev, const morkBookAtom* inAtom) {
+ morkBookAtom* oldKey = 0;
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ MORK_USED_1(inAtom);
+ morkProbeMap::ProbeMapCutError(ev);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Cut(ev, &inAtom, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ return oldKey;
+}
+
+morkBookAtom* morkAtomAidMap::GetAtom(morkEnv* ev, const morkBookAtom* inAtom) {
+ morkBookAtom* key = 0; // old val in the map
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->MapAt(ev, &inAtom, &key, /*val*/ (void*)0);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Get(ev, &inAtom, &key, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ return key;
+}
+
+morkBookAtom* morkAtomAidMap::GetAid(morkEnv* ev, mork_aid inAid) {
+ morkWeeBookAtom weeAtom(inAid);
+ morkBookAtom* key = &weeAtom; // we need a pointer
+ morkBookAtom* oldKey = 0; // old key in the map
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->MapAt(ev, &key, &oldKey, /*val*/ (void*)0);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Get(ev, &key, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ return oldKey;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkAtomBodyMap::CloseMorkNode(
+ morkEnv* ev) // CloseAtomBodyMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseAtomBodyMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkAtomBodyMap::~morkAtomBodyMap() // assert CloseAtomBodyMap() executed
+ // earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkAtomBodyMap::morkAtomBodyMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+#ifdef MORK_ENABLE_PROBE_MAPS
+ : morkProbeMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkBookAtom*), /*inValSize*/ 0,
+ ioSlotHeap, morkAtomBodyMap_kStartSlotCount,
+ /*inZeroIsClearKey*/ morkBool_kTrue)
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ : morkMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkBookAtom*), /*inValSize*/ 0,
+ morkAtomBodyMap_kStartSlotCount, ioSlotHeap,
+ /*inHoldChanges*/ morkBool_kFalse)
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+{
+ if (ev->Good()) mNode_Derived = morkDerived_kAtomBodyMap;
+}
+
+/*public non-poly*/ void morkAtomBodyMap::CloseAtomBodyMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->CloseProbeMap(ev);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->CloseMap(ev);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+#ifdef MORK_ENABLE_PROBE_MAPS
+
+/*virtual*/ mork_test // hit(a,b) implies hash(a) == hash(b)
+morkAtomBodyMap::MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const {
+ const morkBookAtom* key = *(const morkBookAtom**)inMapKey;
+ if (key) {
+ return (key->EqualFormAndBody(ev, *(const morkBookAtom**)inAppKey))
+ ? morkTest_kHit
+ : morkTest_kMiss;
+ } else
+ return morkTest_kVoid;
+}
+
+/*virtual*/ mork_u4 // hit(a,b) implies hash(a) == hash(b)
+morkAtomBodyMap::MapHash(morkEnv* ev, const void* inAppKey) const {
+ const morkBookAtom* key = *(const morkBookAtom**)inAppKey;
+ if (key)
+ return key->HashFormAndBody(ev);
+ else
+ return 0;
+}
+
+/*virtual*/ mork_u4 morkAtomBodyMap::ProbeMapHashMapKey(
+ morkEnv* ev, const void* inMapKey) const {
+ const morkBookAtom* key = *(const morkBookAtom**)inMapKey;
+ if (key)
+ return key->HashFormAndBody(ev);
+ else
+ return 0;
+}
+#else /*MORK_ENABLE_PROBE_MAPS*/
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool //
+morkAtomBodyMap::Equal(morkEnv* ev, const void* inKeyA,
+ const void* inKeyB) const {
+ return (*(const morkBookAtom**)inKeyA)
+ ->EqualFormAndBody(ev, *(const morkBookAtom**)inKeyB);
+}
+
+/*virtual*/ mork_u4 //
+morkAtomBodyMap::Hash(morkEnv* ev, const void* inKey) const {
+ return (*(const morkBookAtom**)inKey)->HashFormAndBody(ev);
+}
+// } ===== end morkMap poly interface =====
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+mork_bool morkAtomBodyMap::AddAtom(morkEnv* ev, morkBookAtom* ioAtom) {
+ if (ev->Good()) {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->MapAtPut(ev, &ioAtom, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Put(ev, &ioAtom, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ }
+ return ev->Good();
+}
+
+morkBookAtom* morkAtomBodyMap::CutAtom(morkEnv* ev,
+ const morkBookAtom* inAtom) {
+ morkBookAtom* oldKey = 0;
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ MORK_USED_1(inAtom);
+ morkProbeMap::ProbeMapCutError(ev);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Cut(ev, &inAtom, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ return oldKey;
+}
+
+morkBookAtom* morkAtomBodyMap::GetAtom(morkEnv* ev,
+ const morkBookAtom* inAtom) {
+ morkBookAtom* key = 0; // old val in the map
+#ifdef MORK_ENABLE_PROBE_MAPS
+ this->MapAt(ev, &inAtom, &key, /*val*/ (void*)0);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ this->Get(ev, &inAtom, &key, /*val*/ (void*)0, (mork_change**)0);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ return key;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkAtomRowMap::~morkAtomRowMap() {}
+
+// I changed to sizeof(mork_ip) from sizeof(mork_aid) to fix a crash on
+// 64 bit machines. I am not sure it was the right way to fix the problem,
+// but it does stop the crash. Perhaps we should be using the
+// morkPointerMap instead?
+morkAtomRowMap::morkAtomRowMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ mork_column inIndexColumn)
+ : morkIntMap(ev, inUsage, sizeof(mork_ip), ioHeap, ioSlotHeap,
+ /*inHoldChanges*/ morkBool_kFalse),
+ mAtomRowMap_IndexColumn(inIndexColumn) {
+ if (ev->Good()) mNode_Derived = morkDerived_kAtomRowMap;
+}
+
+void morkAtomRowMap::AddRow(morkEnv* ev, morkRow* ioRow)
+// add ioRow only if it contains a cell in mAtomRowMap_IndexColumn.
+{
+ mork_aid aid = ioRow->GetCellAtomAid(ev, mAtomRowMap_IndexColumn);
+ if (aid) this->AddAid(ev, aid, ioRow);
+}
+
+void morkAtomRowMap::CutRow(morkEnv* ev, morkRow* ioRow)
+// cut ioRow only if it contains a cell in mAtomRowMap_IndexColumn.
+{
+ mork_aid aid = ioRow->GetCellAtomAid(ev, mAtomRowMap_IndexColumn);
+ if (aid) this->CutAid(ev, aid);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkAtomMap.h b/comm/mailnews/db/mork/morkAtomMap.h
new file mode 100644
index 0000000000..895fbedc74
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtomMap.h
@@ -0,0 +1,394 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKATOMMAP_
+#define _MORKATOMMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKPROBEMAP_
+# include "morkProbeMap.h"
+#endif
+
+#ifndef _MORKINTMAP_
+# include "morkIntMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kAtomAidMap /*i*/ 0x6141 /* ascii 'aA' */
+
+#define morkAtomAidMap_kStartSlotCount 23
+
+/*| morkAtomAidMap: keys of morkBookAtom organized by atom ID
+|*/
+#ifdef MORK_ENABLE_PROBE_MAPS
+class morkAtomAidMap : public morkProbeMap { // for mapping tokens to maps
+#else /*MORK_ENABLE_PROBE_MAPS*/
+class morkAtomAidMap : public morkMap { // for mapping tokens to maps
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseAtomAidMap() only if open
+ virtual ~morkAtomAidMap(); // assert that CloseAtomAidMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkAtomAidMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseAtomAidMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsAtomAidMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kAtomAidMap;
+ }
+ // } ===== end morkNode methods =====
+
+ public:
+#ifdef MORK_ENABLE_PROBE_MAPS
+ // { ===== begin morkProbeMap methods =====
+ virtual mork_test // hit(a,b) implies hash(a) == hash(b)
+ MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const override;
+
+ virtual mork_u4 // hit(a,b) implies hash(a) == hash(b)
+ MapHash(morkEnv* ev, const void* inAppKey) const override;
+
+ virtual mork_u4 ProbeMapHashMapKey(morkEnv* ev,
+ const void* inMapKey) const override;
+
+ // virtual mork_bool ProbeMapIsKeyNil(morkEnv* ev, void* ioMapKey);
+
+ // virtual void ProbeMapClearKey(morkEnv* ev, // put 'nil' into all keys
+ // inside map
+ // void* ioMapKey, mork_count inKeyCount); // array of keys inside map
+
+ // virtual void ProbeMapPushIn(morkEnv* ev, // move (key,val) into the map
+ // const void* inAppKey, const void* inAppVal, // (key,val) outside map
+ // void* outMapKey, void* outMapVal); // (key,val) inside map
+
+ // virtual void ProbeMapPullOut(morkEnv* ev, // move (key,val) out from the
+ // map
+ // const void* inMapKey, const void* inMapVal, // (key,val) inside map
+ // void* outAppKey, void* outAppVal) const; // (key,val) outside map
+ // } ===== end morkProbeMap methods =====
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // note: equal(a,b) implies hash(a) == hash(b)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const override;
+ // implemented using morkBookAtom::HashAid()
+
+ virtual mork_u4 // note: equal(a,b) implies hash(a) == hash(b)
+ Hash(morkEnv* ev, const void* inKey) const override;
+ // implemented using morkBookAtom::EqualAid()
+// } ===== end morkMap poly interface =====
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ public: // other map methods
+ mork_bool AddAtom(morkEnv* ev, morkBookAtom* ioAtom);
+ // AddAtom() returns ev->Good()
+
+ morkBookAtom* CutAtom(morkEnv* ev, const morkBookAtom* inAtom);
+ // CutAtom() returns the atom removed equal to inAtom, if there was one
+
+ morkBookAtom* GetAtom(morkEnv* ev, const morkBookAtom* inAtom);
+ // GetAtom() returns the atom equal to inAtom, or else nil
+
+ morkBookAtom* GetAid(morkEnv* ev, mork_aid inAid);
+ // GetAid() returns the atom equal to inAid, or else nil
+
+ // note the atoms are owned elsewhere, usually by morkAtomSpace
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakAtomAidMap(morkAtomAidMap* me, morkEnv* ev,
+ morkAtomAidMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongAtomAidMap(morkAtomAidMap* me, morkEnv* ev,
+ morkAtomAidMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+class morkAtomAidMapIter : public morkProbeMapIter { // typesafe wrapper class
+#else /*MORK_ENABLE_PROBE_MAPS*/
+class morkAtomAidMapIter : public morkMapIter { // typesafe wrapper class
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ public:
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkAtomAidMapIter(morkEnv* ev, morkAtomAidMap* ioMap)
+ : morkProbeMapIter(ev, ioMap) {}
+
+ morkAtomAidMapIter() : morkProbeMapIter() {}
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkAtomAidMapIter(morkEnv* ev, morkAtomAidMap* ioMap)
+ : morkMapIter(ev, ioMap) {}
+
+ morkAtomAidMapIter() : morkMapIter() {}
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ void InitAtomAidMapIter(morkEnv* ev, morkAtomAidMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->First(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* NextAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->Next(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* HereAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->Here(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* CutHereAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->CutHere(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kAtomBodyMap /*i*/ 0x6142 /* ascii 'aB' */
+
+#define morkAtomBodyMap_kStartSlotCount 23
+
+/*| morkAtomBodyMap: keys of morkBookAtom organized by body bytes
+|*/
+#ifdef MORK_ENABLE_PROBE_MAPS
+class morkAtomBodyMap : public morkProbeMap { // for mapping tokens to maps
+#else /*MORK_ENABLE_PROBE_MAPS*/
+class morkAtomBodyMap : public morkMap { // for mapping tokens to maps
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseAtomBodyMap() only if open
+ virtual ~morkAtomBodyMap(); // assert CloseAtomBodyMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkAtomBodyMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseAtomBodyMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsAtomBodyMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kAtomBodyMap;
+ }
+ // } ===== end morkNode methods =====
+
+ public:
+#ifdef MORK_ENABLE_PROBE_MAPS
+ // { ===== begin morkProbeMap methods =====
+ virtual mork_test // hit(a,b) implies hash(a) == hash(b)
+ MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const override;
+
+ virtual mork_u4 // hit(a,b) implies hash(a) == hash(b)
+ MapHash(morkEnv* ev, const void* inAppKey) const override;
+
+ virtual mork_u4 ProbeMapHashMapKey(morkEnv* ev,
+ const void* inMapKey) const override;
+
+ // virtual mork_bool ProbeMapIsKeyNil(morkEnv* ev, void* ioMapKey);
+
+ // virtual void ProbeMapClearKey(morkEnv* ev, // put 'nil' into all keys
+ // inside map
+ // void* ioMapKey, mork_count inKeyCount); // array of keys inside map
+
+ // virtual void ProbeMapPushIn(morkEnv* ev, // move (key,val) into the map
+ // const void* inAppKey, const void* inAppVal, // (key,val) outside map
+ // void* outMapKey, void* outMapVal); // (key,val) inside map
+
+ // virtual void ProbeMapPullOut(morkEnv* ev, // move (key,val) out from the
+ // map
+ // const void* inMapKey, const void* inMapVal, // (key,val) inside map
+ // void* outAppKey, void* outAppVal) const; // (key,val) outside map
+ // } ===== end morkProbeMap methods =====
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // note: equal(a,b) implies hash(a) == hash(b)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const override;
+ // implemented using morkBookAtom::EqualFormAndBody()
+
+ virtual mork_u4 // note: equal(a,b) implies hash(a) == hash(b)
+ Hash(morkEnv* ev, const void* inKey) const override;
+ // implemented using morkBookAtom::HashFormAndBody()
+// } ===== end morkMap poly interface =====
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ public: // other map methods
+ mork_bool AddAtom(morkEnv* ev, morkBookAtom* ioAtom);
+ // AddAtom() returns ev->Good()
+
+ morkBookAtom* CutAtom(morkEnv* ev, const morkBookAtom* inAtom);
+ // CutAtom() returns the atom removed equal to inAtom, if there was one
+
+ morkBookAtom* GetAtom(morkEnv* ev, const morkBookAtom* inAtom);
+ // GetAtom() returns the atom equal to inAtom, or else nil
+
+ // note the atoms are owned elsewhere, usually by morkAtomSpace
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakAtomBodyMap(morkAtomBodyMap* me, morkEnv* ev,
+ morkAtomBodyMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongAtomBodyMap(morkAtomBodyMap* me, morkEnv* ev,
+ morkAtomBodyMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+class morkAtomBodyMapIter : public morkProbeMapIter { // typesafe wrapper class
+#else /*MORK_ENABLE_PROBE_MAPS*/
+class morkAtomBodyMapIter : public morkMapIter { // typesafe wrapper class
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ public:
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkAtomBodyMapIter(morkEnv* ev, morkAtomBodyMap* ioMap)
+ : morkProbeMapIter(ev, ioMap) {}
+
+ morkAtomBodyMapIter() : morkProbeMapIter() {}
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkAtomBodyMapIter(morkEnv* ev, morkAtomBodyMap* ioMap)
+ : morkMapIter(ev, ioMap) {}
+
+ morkAtomBodyMapIter() : morkMapIter() {}
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ void InitAtomBodyMapIter(morkEnv* ev, morkAtomBodyMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->First(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* NextAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->Next(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* HereAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->Here(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* CutHereAtom(morkEnv* ev, morkBookAtom** outAtomPtr) {
+ return this->CutHere(ev, outAtomPtr, /*val*/ (void*)0);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kAtomRowMap /*i*/ 0x6152 /* ascii 'aR' */
+
+/*| morkAtomRowMap: maps morkAtom* -> morkRow*
+|*/
+class morkAtomRowMap : public morkIntMap { // for mapping atoms to rows
+
+ public:
+ mork_column mAtomRowMap_IndexColumn; // row column being indexed
+
+ public:
+ virtual ~morkAtomRowMap();
+ morkAtomRowMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_column inIndexColumn);
+
+ public: // adding and cutting from morkRow instance candidate
+ void AddRow(morkEnv* ev, morkRow* ioRow);
+ // add ioRow only if it contains a cell in mAtomRowMap_IndexColumn.
+
+ void CutRow(morkEnv* ev, morkRow* ioRow);
+ // cut ioRow only if it contains a cell in mAtomRowMap_IndexColumn.
+
+ public: // other map methods
+ mork_bool AddAid(morkEnv* ev, mork_aid inAid, morkRow* ioRow) {
+ return this->AddInt(ev, inAid, ioRow);
+ }
+ // the AddAid() boolean return equals ev->Good().
+
+ mork_bool CutAid(morkEnv* ev, mork_aid inAid) {
+ return this->CutInt(ev, inAid);
+ }
+ // The CutAid() boolean return indicates whether removal happened.
+
+ morkRow* GetAid(morkEnv* ev, mork_aid inAid) {
+ return (morkRow*)this->GetInt(ev, inAid);
+ }
+ // Note the returned space does NOT have an increase in refcount for this.
+
+ public: // dynamic type identification
+ mork_bool IsAtomRowMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kAtomRowMap;
+ }
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakAtomRowMap(morkAtomRowMap* me, morkEnv* ev,
+ morkAtomRowMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongAtomRowMap(morkAtomRowMap* me, morkEnv* ev,
+ morkAtomRowMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+class morkAtomRowMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkAtomRowMapIter(morkEnv* ev, morkAtomRowMap* ioMap)
+ : morkMapIter(ev, ioMap) {}
+
+ morkAtomRowMapIter() : morkMapIter() {}
+ void InitAtomRowMapIter(morkEnv* ev, morkAtomRowMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstAtomAndRow(morkEnv* ev, morkAtom** outAtom,
+ morkRow** outRow) {
+ return this->First(ev, outAtom, outRow);
+ }
+
+ mork_change* NextAtomAndRow(morkEnv* ev, morkAtom** outAtom,
+ morkRow** outRow) {
+ return this->Next(ev, outAtom, outRow);
+ }
+
+ mork_change* HereAtomAndRow(morkEnv* ev, morkAtom** outAtom,
+ morkRow** outRow) {
+ return this->Here(ev, outAtom, outRow);
+ }
+
+ mork_change* CutHereAtomAndRow(morkEnv* ev, morkAtom** outAtom,
+ morkRow** outRow) {
+ return this->CutHere(ev, outAtom, outRow);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKATOMMAP_ */
diff --git a/comm/mailnews/db/mork/morkAtomSpace.cpp b/comm/mailnews/db/mork/morkAtomSpace.cpp
new file mode 100644
index 0000000000..5ecdfe2b4c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtomSpace.cpp
@@ -0,0 +1,233 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkAtomSpace::CloseMorkNode(
+ morkEnv* ev) // CloseAtomSpace() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseAtomSpace(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkAtomSpace::~morkAtomSpace() // assert CloseAtomSpace() executed earlier
+{
+ MORK_ASSERT(mAtomSpace_HighUnderId == 0);
+ MORK_ASSERT(mAtomSpace_HighOverId == 0);
+ MORK_ASSERT(this->IsShutNode());
+ MORK_ASSERT(mAtomSpace_AtomAids.IsShutNode());
+ MORK_ASSERT(mAtomSpace_AtomBodies.IsShutNode());
+}
+
+/*public non-poly*/
+morkAtomSpace::morkAtomSpace(morkEnv* ev, const morkUsage& inUsage,
+ mork_scope inScope, morkStore* ioStore,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkSpace(ev, inUsage, inScope, ioStore, ioHeap, ioSlotHeap),
+ mAtomSpace_HighUnderId(morkAtomSpace_kMinUnderId),
+ mAtomSpace_HighOverId(morkAtomSpace_kMinOverId),
+ mAtomSpace_AtomAids(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioSlotHeap),
+ mAtomSpace_AtomBodies(ev, morkUsage::kMember, (nsIMdbHeap*)0,
+ ioSlotHeap) {
+ // the morkSpace base constructor handles any dirty propagation
+ if (ev->Good()) mNode_Derived = morkDerived_kAtomSpace;
+}
+
+/*public non-poly*/ void morkAtomSpace::CloseAtomSpace(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mAtomSpace_AtomBodies.CloseMorkNode(ev);
+ morkStore* store = mSpace_Store;
+ if (store) this->CutAllAtoms(ev, &store->mStore_Pool);
+
+ mAtomSpace_AtomAids.CloseMorkNode(ev);
+ this->CloseSpace(ev);
+ mAtomSpace_HighUnderId = 0;
+ mAtomSpace_HighOverId = 0;
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkAtomSpace::NonAtomSpaceTypeError(morkEnv* ev) {
+ ev->NewError("non morkAtomSpace");
+}
+
+mork_num morkAtomSpace::CutAllAtoms(morkEnv* ev, morkPool* ioPool) {
+#ifdef MORK_ENABLE_ZONE_ARENAS
+ MORK_USED_2(ev, ioPool);
+ return 0;
+#else /*MORK_ENABLE_ZONE_ARENAS*/
+ if (this->IsAtomSpaceClean()) this->MaybeDirtyStoreAndSpace();
+
+ mork_num outSlots = mAtomSpace_AtomAids.MapFill();
+ morkBookAtom* a = 0; // old key atom in the map
+
+ morkStore* store = mSpace_Store;
+ mork_change* c = 0;
+ morkAtomAidMapIter i(ev, &mAtomSpace_AtomAids);
+ for (c = i.FirstAtom(ev, &a); c; c = i.NextAtom(ev, &a)) {
+ if (a) ioPool->ZapAtom(ev, a, &store->mStore_Zone);
+
+# ifdef MORK_ENABLE_PROBE_MAPS
+ // do not cut anything from the map
+# else /*MORK_ENABLE_PROBE_MAPS*/
+ i.CutHereAtom(ev, /*key*/ (morkBookAtom**)0);
+# endif /*MORK_ENABLE_PROBE_MAPS*/
+ }
+
+ return outSlots;
+#endif /*MORK_ENABLE_ZONE_ARENAS*/
+}
+
+morkBookAtom* morkAtomSpace::MakeBookAtomCopyWithAid(
+ morkEnv* ev, const morkFarBookAtom& inAtom, mork_aid inAid)
+// Make copy of inAtom and put it in both maps, using specified ID.
+{
+ morkBookAtom* outAtom = 0;
+ morkStore* store = mSpace_Store;
+ if (ev->Good() && store) {
+ morkPool* pool = this->GetSpaceStorePool();
+ outAtom = pool->NewFarBookAtomCopy(ev, inAtom, &store->mStore_Zone);
+ if (outAtom) {
+ if (store->mStore_CanDirty) {
+ outAtom->SetAtomDirty();
+ if (this->IsAtomSpaceClean()) this->MaybeDirtyStoreAndSpace();
+ }
+
+ outAtom->mBookAtom_Id = inAid;
+ outAtom->mBookAtom_Space = this;
+ mAtomSpace_AtomAids.AddAtom(ev, outAtom);
+ mAtomSpace_AtomBodies.AddAtom(ev, outAtom);
+ if (this->SpaceScope() == morkAtomSpace_kColumnScope)
+ outAtom->MakeCellUseForever(ev);
+
+ if (mAtomSpace_HighUnderId <= inAid) mAtomSpace_HighUnderId = inAid + 1;
+ }
+ }
+ return outAtom;
+}
+
+morkBookAtom* morkAtomSpace::MakeBookAtomCopy(morkEnv* ev,
+ const morkFarBookAtom& inAtom)
+// make copy of inAtom and put it in both maps, using a new ID as needed.
+{
+ morkBookAtom* outAtom = 0;
+ morkStore* store = mSpace_Store;
+ if (ev->Good() && store) {
+ if (store->mStore_CanAutoAssignAtomIdentity) {
+ morkPool* pool = this->GetSpaceStorePool();
+ morkBookAtom* atom =
+ pool->NewFarBookAtomCopy(ev, inAtom, &mSpace_Store->mStore_Zone);
+ if (atom) {
+ mork_aid id = this->MakeNewAtomId(ev, atom);
+ if (id) {
+ if (store->mStore_CanDirty) {
+ atom->SetAtomDirty();
+ if (this->IsAtomSpaceClean()) this->MaybeDirtyStoreAndSpace();
+ }
+
+ outAtom = atom;
+ atom->mBookAtom_Space = this;
+ mAtomSpace_AtomAids.AddAtom(ev, atom);
+ mAtomSpace_AtomBodies.AddAtom(ev, atom);
+ if (this->SpaceScope() == morkAtomSpace_kColumnScope)
+ outAtom->MakeCellUseForever(ev);
+ } else
+ pool->ZapAtom(ev, atom, &mSpace_Store->mStore_Zone);
+ }
+ } else
+ mSpace_Store->CannotAutoAssignAtomIdentityError(ev);
+ }
+ return outAtom;
+}
+
+mork_aid morkAtomSpace::MakeNewAtomId(morkEnv* ev, morkBookAtom* ioAtom) {
+ mork_aid outAid = 0;
+ mork_tid id = mAtomSpace_HighUnderId;
+ mork_num count = 8; // try up to eight times
+
+ while (!outAid && count) // still trying to find an unused table ID?
+ {
+ --count;
+ ioAtom->mBookAtom_Id = id;
+ if (!mAtomSpace_AtomAids.GetAtom(ev, ioAtom))
+ outAid = id;
+ else {
+ MORK_ASSERT(morkBool_kFalse); // alert developer about ID problems
+ ++id;
+ }
+ }
+
+ mAtomSpace_HighUnderId = id + 1;
+ return outAid;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkAtomSpaceMap::~morkAtomSpaceMap() {}
+
+morkAtomSpaceMap::morkAtomSpaceMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkNodeMap(ev, inUsage, ioHeap, ioSlotHeap) {
+ if (ev->Good()) mNode_Derived = morkDerived_kAtomSpaceMap;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkAtomSpace.h b/comm/mailnews/db/mork/morkAtomSpace.h
new file mode 100644
index 0000000000..d057ad6cfd
--- /dev/null
+++ b/comm/mailnews/db/mork/morkAtomSpace.h
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKATOMSPACE_
+#define _MORKATOMSPACE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKATOMMAP_
+# include "morkAtomMap.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| kMinUnderId: the smallest ID we auto-assign to the 'under' namespace
+**| reserved for tokens expected to occur very frequently, such as the names
+**| of columns. We reserve single byte ids in the ASCII range to correspond
+**| one-to-one to those tokens consisting single ASCII characters (so that
+**| this assignment is always known and constant). So we start at 0x80, and
+**| then reserve the upper half of two hex digit ids and all the three hex
+**| digit IDs for the 'under' namespace for common tokens.
+|*/
+#define morkAtomSpace_kMinUnderId 0x80 /* low 7 bits mean byte tokens */
+
+#define morkAtomSpace_kMaxSevenBitAid 0x7F /* low seven bit integer ID */
+
+/*| kMinOverId: the smallest ID we auto-assign to the 'over' namespace that
+**| might include very large numbers of tokens that are used infrequently,
+**| so that we care less whether the shortest hex representation is used.
+**| So we start all IDs for 'over' category tokens at a value range that
+**| needs at least four hex digits, so we can reserve three hex digits and
+**| shorter for more commonly occurring tokens in the 'under' category.
+|*/
+#define morkAtomSpace_kMinOverId 0x1000 /* using at least four hex bytes */
+
+#define morkDerived_kAtomSpace /*i*/ 0x6153 /* ascii 'aS' */
+
+#define morkAtomSpace_kColumnScope \
+ ((mork_scope)'c') /* column scope is forever */
+
+/*| morkAtomSpace:
+|*/
+class morkAtomSpace : public morkSpace { //
+
+ // public: // slots inherited from morkSpace (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkStore* mSpace_Store; // weak ref to containing store
+
+ // mork_bool mSpace_DoAutoIDs; // whether db should assign member IDs
+ // mork_bool mSpace_HaveDoneAutoIDs; // whether actually auto assigned IDs
+ // mork_u1 mSpace_Pad[ 2 ]; // pad to u4 alignment
+
+ public: // state is public because the entire Mork system is private
+ mork_aid mAtomSpace_HighUnderId; // high ID in 'under' range
+ mork_aid mAtomSpace_HighOverId; // high ID in 'over' range
+
+ morkAtomAidMap mAtomSpace_AtomAids; // all atoms in space by ID
+ morkAtomBodyMap mAtomSpace_AtomBodies; // all atoms in space by body
+
+ public: // more specific dirty methods for atom space:
+ void SetAtomSpaceDirty() { this->SetNodeDirty(); }
+ void SetAtomSpaceClean() { this->SetNodeClean(); }
+
+ mork_bool IsAtomSpaceClean() const { return this->IsNodeClean(); }
+ mork_bool IsAtomSpaceDirty() const { return this->IsNodeDirty(); }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseAtomSpace() only if open
+ virtual ~morkAtomSpace(); // assert that CloseAtomSpace() executed earlier
+
+ public: // morkMap construction & destruction
+ morkAtomSpace(morkEnv* ev, const morkUsage& inUsage, mork_scope inScope,
+ morkStore* ioStore, nsIMdbHeap* ioNodeHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseAtomSpace(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsAtomSpace() const {
+ return IsNode() && mNode_Derived == morkDerived_kAtomSpace;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ void NonAtomSpaceTypeError(morkEnv* ev);
+
+ public: // setup
+ mork_bool MarkAllAtomSpaceContentDirty(morkEnv* ev);
+ // MarkAllAtomSpaceContentDirty() visits every space object and marks
+ // them dirty, including every table, row, cell, and atom. The return
+ // equals ev->Good(), to show whether any error happened. This method is
+ // intended for use in the beginning of a "compress commit" which writes
+ // all store content, whether dirty or not. We dirty everything first so
+ // that later iterations over content can mark things clean as they are
+ // written, and organize the process of serialization so that objects are
+ // written only at need (because of being dirty).
+
+ public: // other space methods
+ // void ReserveColumnAidCount(mork_count inCount)
+ // {
+ // mAtomSpace_HighUnderId = morkAtomSpace_kMinUnderId + inCount;
+ // mAtomSpace_HighOverId = morkAtomSpace_kMinOverId + inCount;
+ // }
+
+ mork_num CutAllAtoms(morkEnv* ev, morkPool* ioPool);
+ // CutAllAtoms() puts all the atoms back in the pool.
+
+ morkBookAtom* MakeBookAtomCopyWithAid(morkEnv* ev,
+ const morkFarBookAtom& inAtom,
+ mork_aid inAid);
+ // Make copy of inAtom and put it in both maps, using specified ID.
+
+ morkBookAtom* MakeBookAtomCopy(morkEnv* ev, const morkFarBookAtom& inAtom);
+ // Make copy of inAtom and put it in both maps, using a new ID as needed.
+
+ mork_aid MakeNewAtomId(morkEnv* ev, morkBookAtom* ioAtom);
+ // generate an unused atom id.
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakAtomSpace(morkAtomSpace* me, morkEnv* ev,
+ morkAtomSpace** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongAtomSpace(morkAtomSpace* me, morkEnv* ev,
+ morkAtomSpace** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kAtomSpaceMap /*i*/ 0x615A /* ascii 'aZ' */
+
+/*| morkAtomSpaceMap: maps mork_scope -> morkAtomSpace
+|*/
+class morkAtomSpaceMap : public morkNodeMap { // for mapping tokens to tables
+
+ public:
+ virtual ~morkAtomSpaceMap();
+ morkAtomSpaceMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+
+ public: // other map methods
+ mork_bool AddAtomSpace(morkEnv* ev, morkAtomSpace* ioAtomSpace) {
+ return this->AddNode(ev, ioAtomSpace->SpaceScope(), ioAtomSpace);
+ }
+ // the AddAtomSpace() boolean return equals ev->Good().
+
+ mork_bool CutAtomSpace(morkEnv* ev, mork_scope inScope) {
+ return this->CutNode(ev, inScope);
+ }
+ // The CutAtomSpace() boolean return indicates whether removal happened.
+
+ morkAtomSpace* GetAtomSpace(morkEnv* ev, mork_scope inScope) {
+ return (morkAtomSpace*)this->GetNode(ev, inScope);
+ }
+ // Note the returned space does NOT have an increase in refcount for this.
+
+ mork_num CutAllAtomSpaces(morkEnv* ev) { return this->CutAllNodes(ev); }
+ // CutAllAtomSpaces() releases all the referenced table values.
+};
+
+class morkAtomSpaceMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkAtomSpaceMapIter(morkEnv* ev, morkAtomSpaceMap* ioMap)
+ : morkMapIter(ev, ioMap) {}
+
+ morkAtomSpaceMapIter() : morkMapIter() {}
+ void InitAtomSpaceMapIter(morkEnv* ev, morkAtomSpaceMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstAtomSpace(morkEnv* ev, mork_scope* outScope,
+ morkAtomSpace** outAtomSpace) {
+ return this->First(ev, outScope, outAtomSpace);
+ }
+
+ mork_change* NextAtomSpace(morkEnv* ev, mork_scope* outScope,
+ morkAtomSpace** outAtomSpace) {
+ return this->Next(ev, outScope, outAtomSpace);
+ }
+
+ mork_change* HereAtomSpace(morkEnv* ev, mork_scope* outScope,
+ morkAtomSpace** outAtomSpace) {
+ return this->Here(ev, outScope, outAtomSpace);
+ }
+
+ mork_change* CutHereAtomSpace(morkEnv* ev, mork_scope* outScope,
+ morkAtomSpace** outAtomSpace) {
+ return this->CutHere(ev, outScope, outAtomSpace);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKATOMSPACE_ */
diff --git a/comm/mailnews/db/mork/morkBead.cpp b/comm/mailnews/db/mork/morkBead.cpp
new file mode 100644
index 0000000000..839322621f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBead.cpp
@@ -0,0 +1,361 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKBEAD_
+# include "morkBead.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkBead::CloseMorkNode(
+ morkEnv* ev) // CloseBead() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseBead(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkBead::~morkBead() // assert CloseBead() executed earlier
+{
+ MORK_ASSERT(mBead_Color == 0 || mNode_Usage == morkUsage_kStack);
+}
+
+/*public non-poly*/
+morkBead::morkBead(mork_color inBeadColor)
+ : morkNode(morkUsage_kStack), mBead_Color(inBeadColor) {}
+
+/*public non-poly*/
+morkBead::morkBead(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor)
+ : morkNode(inUsage, ioHeap), mBead_Color(inBeadColor) {}
+
+/*public non-poly*/
+morkBead::morkBead(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor)
+ : morkNode(ev, inUsage, ioHeap), mBead_Color(inBeadColor) {
+ if (ev->Good()) {
+ if (ev->Good()) mNode_Derived = morkDerived_kBead;
+ }
+}
+
+/*public non-poly*/ void morkBead::CloseBead(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ if (!this->IsShutNode()) {
+ mBead_Color = 0;
+ this->MarkShut();
+ }
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkBeadMap::CloseMorkNode(
+ morkEnv* ev) // CloseBeadMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseBeadMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkBeadMap::~morkBeadMap() // assert CloseBeadMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkBeadMap::morkBeadMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkMap(ev, inUsage, ioHeap, sizeof(morkBead*), /*inValSize*/ 0,
+ /*slotCount*/ 11, ioSlotHeap, /*holdChanges*/ morkBool_kFalse) {
+ if (ev->Good()) mNode_Derived = morkDerived_kBeadMap;
+}
+
+/*public non-poly*/ void morkBeadMap::CloseBeadMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CutAllBeads(ev);
+ this->CloseMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+mork_bool morkBeadMap::AddBead(morkEnv* ev, morkBead* ioBead)
+// the AddBead() boolean return equals ev->Good().
+{
+ if (ioBead && ev->Good()) {
+ morkBead* oldBead = 0; // old key in the map
+
+ mork_bool put =
+ this->Put(ev, &ioBead, /*val*/ (void*)0,
+ /*key*/ &oldBead, /*val*/ (void*)0, (mork_change**)0);
+
+ if (put) // replaced an existing key?
+ {
+ if (oldBead != ioBead) // new bead was not already in table?
+ ioBead->AddStrongRef(ev); // now there's another ref
+
+ if (oldBead && oldBead != ioBead) // need to release old node?
+ oldBead->CutStrongRef(ev);
+ } else
+ ioBead->AddStrongRef(ev); // another ref if not already in table
+ } else if (!ioBead)
+ ev->NilPointerError();
+
+ return ev->Good();
+}
+
+mork_bool morkBeadMap::CutBead(morkEnv* ev, mork_color inColor) {
+ morkBead* oldBead = 0; // old key in the map
+ morkBead bead(inColor);
+ morkBead* key = &bead;
+
+ mork_bool outCutNode =
+ this->Cut(ev, &key,
+ /*key*/ &oldBead, /*val*/ (void*)0, (mork_change**)0);
+
+ if (oldBead) oldBead->CutStrongRef(ev);
+
+ bead.CloseBead(ev);
+ return outCutNode;
+}
+
+morkBead* morkBeadMap::GetBead(morkEnv* ev, mork_color inColor)
+// Note the returned bead does NOT have an increase in refcount for this.
+{
+ morkBead* oldBead = 0; // old key in the map
+ morkBead bead(inColor);
+ morkBead* key = &bead;
+
+ this->Get(ev, &key, /*key*/ &oldBead, /*val*/ (void*)0, (mork_change**)0);
+
+ bead.CloseBead(ev);
+ return oldBead;
+}
+
+mork_num morkBeadMap::CutAllBeads(morkEnv* ev)
+// CutAllBeads() releases all the referenced beads.
+{
+ mork_num outSlots = mMap_Slots;
+
+ morkBeadMapIter i(ev, this);
+ morkBead* b = i.FirstBead(ev);
+
+ while (b) {
+ b->CutStrongRef(ev);
+ i.CutHereBead(ev);
+ b = i.NextBead(ev);
+ }
+
+ return outSlots;
+}
+
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool morkBeadMap::Equal(morkEnv* ev, const void* inKeyA,
+ const void* inKeyB) const {
+ MORK_USED_1(ev);
+ return (*(const morkBead**)inKeyA)->BeadEqual(*(const morkBead**)inKeyB);
+}
+
+/*virtual*/ mork_u4 morkBeadMap::Hash(morkEnv* ev, const void* inKey) const {
+ MORK_USED_1(ev);
+ return (*(const morkBead**)inKey)->BeadHash();
+}
+// } ===== end morkMap poly interface =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkBead* morkBeadMapIter::FirstBead(morkEnv* ev) {
+ morkBead* bead = 0;
+ this->First(ev, &bead, /*val*/ (void*)0);
+ return bead;
+}
+
+morkBead* morkBeadMapIter::NextBead(morkEnv* ev) {
+ morkBead* bead = 0;
+ this->Next(ev, &bead, /*val*/ (void*)0);
+ return bead;
+}
+
+morkBead* morkBeadMapIter::HereBead(morkEnv* ev) {
+ morkBead* bead = 0;
+ this->Here(ev, &bead, /*val*/ (void*)0);
+ return bead;
+}
+
+void morkBeadMapIter::CutHereBead(morkEnv* ev) {
+ this->CutHere(ev, /*key*/ (void*)0, /*val*/ (void*)0);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkBeadProbeMap::CloseMorkNode(
+ morkEnv* ev) // CloseBeadProbeMap() if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseBeadProbeMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkBeadProbeMap::~morkBeadProbeMap() // assert CloseBeadProbeMap() earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkBeadProbeMap::morkBeadProbeMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkProbeMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkBead*), /*inValSize*/ 0, ioSlotHeap,
+ /*startSlotCount*/ 11,
+ /*inZeroIsClearKey*/ morkBool_kTrue) {
+ if (ev->Good()) mNode_Derived = morkDerived_kBeadProbeMap;
+}
+
+/*public non-poly*/ void morkBeadProbeMap::CloseBeadProbeMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CutAllBeads(ev);
+ this->CloseProbeMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*virtual*/ mork_test // hit(a,b) implies hash(a) == hash(b)
+morkBeadProbeMap::MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const {
+ MORK_USED_1(ev);
+ const morkBead* key = *(const morkBead**)inMapKey;
+ if (key) {
+ mork_bool hit = key->BeadEqual(*(const morkBead**)inAppKey);
+ return (hit) ? morkTest_kHit : morkTest_kMiss;
+ } else
+ return morkTest_kVoid;
+}
+
+/*virtual*/ mork_u4 // hit(a,b) implies hash(a) == hash(b)
+morkBeadProbeMap::MapHash(morkEnv* ev, const void* inAppKey) const {
+ const morkBead* key = *(const morkBead**)inAppKey;
+ if (key)
+ return key->BeadHash();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+
+/*virtual*/ mork_u4 morkBeadProbeMap::ProbeMapHashMapKey(
+ morkEnv* ev, const void* inMapKey) const {
+ const morkBead* key = *(const morkBead**)inMapKey;
+ if (key)
+ return key->BeadHash();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+
+mork_bool morkBeadProbeMap::AddBead(morkEnv* ev, morkBead* ioBead) {
+ if (ioBead && ev->Good()) {
+ morkBead* bead = 0; // old key in the map
+
+ mork_bool put = this->MapAtPut(ev, &ioBead, /*val*/ (void*)0,
+ /*key*/ &bead, /*val*/ (void*)0);
+
+ if (put) // replaced an existing key?
+ {
+ if (bead != ioBead) // new bead was not already in table?
+ ioBead->AddStrongRef(ev); // now there's another ref
+
+ if (bead && bead != ioBead) // need to release old node?
+ bead->CutStrongRef(ev);
+ } else
+ ioBead->AddStrongRef(ev); // now there's another ref
+ } else if (!ioBead)
+ ev->NilPointerError();
+
+ return ev->Good();
+}
+
+morkBead* morkBeadProbeMap::GetBead(morkEnv* ev, mork_color inColor) {
+ morkBead* oldBead = 0; // old key in the map
+ morkBead bead(inColor);
+ morkBead* key = &bead;
+
+ this->MapAt(ev, &key, &oldBead, /*val*/ (void*)0);
+
+ bead.CloseBead(ev);
+ return oldBead;
+}
+
+mork_num morkBeadProbeMap::CutAllBeads(morkEnv* ev)
+// CutAllBeads() releases all the referenced bead values.
+{
+ mork_num outSlots = sMap_Slots;
+
+ morkBeadProbeMapIter i(ev, this);
+ morkBead* b = i.FirstBead(ev);
+
+ while (b) {
+ b->CutStrongRef(ev);
+ b = i.NextBead(ev);
+ }
+ this->MapCutAll(ev);
+
+ return outSlots;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkBead.h b/comm/mailnews/db/mork/morkBead.h
new file mode 100644
index 0000000000..deccec0ba6
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBead.h
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKBEAD_
+#define _MORKBEAD_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKPROBEMAP_
+# include "morkProbeMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kBead /*i*/ 0x426F /* ascii 'Bo' */
+
+/*| morkBead: subclass of morkNode that adds knowledge of db suite factory
+**| and containing port to those objects that are exposed as instances of
+**| nsIMdbBead in the public interface.
+|*/
+class morkBead : public morkNode {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ mork_color mBead_Color; // ID for this bead
+
+ public: // Hash() and Equal() for bead maps are same for all subclasses:
+ mork_u4 BeadHash() const { return (mork_u4)mBead_Color; }
+ mork_bool BeadEqual(const morkBead* inBead) const {
+ return (mBead_Color == inBead->mBead_Color);
+ }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseBead() only if open
+ virtual ~morkBead(); // assert that CloseBead() executed earlier
+
+ public: // special case for stack construction for map usage:
+ explicit morkBead(mork_color inBeadColor); // stack-based bead instance
+
+ protected: // special case for morkObject:
+ morkBead(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor);
+
+ public: // morkEnv construction & destruction
+ morkBead(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor);
+ void CloseBead(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkBead(const morkBead& other);
+ morkBead& operator=(const morkBead& other);
+
+ public: // dynamic type identification
+ mork_bool IsBead() const {
+ return IsNode() && mNode_Derived == morkDerived_kBead;
+ }
+ // } ===== end morkNode methods =====
+
+ // void NewNilHandleError(morkEnv* ev); // mBead_Handle is nil
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakBead(morkBead* me, morkEnv* ev, morkBead** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongBead(morkBead* me, morkEnv* ev, morkBead** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kBeadMap /*i*/ 0x744D /* ascii 'bM' */
+
+/*| morkBeadMap: maps bead -> bead (key only using mBead_Color)
+|*/
+class morkBeadMap : public morkMap {
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseBeadMap() only if open
+ virtual ~morkBeadMap(); // assert that CloseBeadMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkBeadMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseBeadMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsBeadMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kBeadMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkMap poly interface =====
+ public:
+ virtual mork_bool // *((mork_u4*) inKeyA) == *((mork_u4*) inKeyB)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const override;
+
+ virtual mork_u4 // some integer function of *((mork_u4*) inKey)
+ Hash(morkEnv* ev, const void* inKey) const override;
+ // } ===== end morkMap poly interface =====
+
+ public: // other map methods
+ mork_bool AddBead(morkEnv* ev, morkBead* ioBead);
+ // the AddBead() boolean return equals ev->Good().
+
+ mork_bool CutBead(morkEnv* ev, mork_color inColor);
+ // The CutBead() boolean return indicates whether removal happened.
+
+ morkBead* GetBead(morkEnv* ev, mork_color inColor);
+ // Note the returned bead does NOT have an increase in refcount for this.
+
+ mork_num CutAllBeads(morkEnv* ev);
+ // CutAllBeads() releases all the referenced beads.
+};
+
+class morkBeadMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkBeadMapIter(morkEnv* ev, morkBeadMap* ioMap) : morkMapIter(ev, ioMap) {}
+
+ morkBeadMapIter() : morkMapIter() {}
+ void InitBeadMapIter(morkEnv* ev, morkBeadMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ morkBead* FirstBead(morkEnv* ev);
+ morkBead* NextBead(morkEnv* ev);
+ morkBead* HereBead(morkEnv* ev);
+ void CutHereBead(morkEnv* ev);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kBeadProbeMap /*i*/ 0x6D74 /* ascii 'mb' */
+
+/*| morkBeadProbeMap: maps bead -> bead (key only using mBead_Color)
+|*/
+class morkBeadProbeMap : public morkProbeMap {
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseBeadProbeMap() only if open
+ virtual ~morkBeadProbeMap(); // assert that CloseBeadProbeMap() executed
+ // earlier
+
+ public: // morkMap construction & destruction
+ morkBeadProbeMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseBeadProbeMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsBeadProbeMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kBeadProbeMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkProbeMap methods =====
+ public:
+ virtual mork_test // hit(a,b) implies hash(a) == hash(b)
+ MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const override;
+
+ virtual mork_u4 // hit(a,b) implies hash(a) == hash(b)
+ MapHash(morkEnv* ev, const void* inAppKey) const override;
+
+ virtual mork_u4 ProbeMapHashMapKey(morkEnv* ev,
+ const void* inMapKey) const override;
+
+ // virtual mork_bool ProbeMapIsKeyNil(morkEnv* ev, void* ioMapKey);
+
+ // virtual void ProbeMapClearKey(morkEnv* ev, // put 'nil' into all keys
+ // inside map
+ // void* ioMapKey, mork_count inKeyCount); // array of keys inside map
+
+ // virtual void ProbeMapPushIn(morkEnv* ev, // move (key,val) into the map
+ // const void* inAppKey, const void* inAppVal, // (key,val) outside map
+ // void* outMapKey, void* outMapVal); // (key,val) inside map
+
+ // virtual void ProbeMapPullOut(morkEnv* ev, // move (key,val) out from the
+ // map
+ // const void* inMapKey, const void* inMapVal, // (key,val) inside map
+ // void* outAppKey, void* outAppVal) const; // (key,val) outside map
+ // } ===== end morkProbeMap methods =====
+
+ public: // other map methods
+ mork_bool AddBead(morkEnv* ev, morkBead* ioBead);
+ // the AddBead() boolean return equals ev->Good().
+
+ morkBead* GetBead(morkEnv* ev, mork_color inColor);
+ // Note the returned bead does NOT have an increase in refcount for this.
+
+ mork_num CutAllBeads(morkEnv* ev);
+ // CutAllBeads() releases all the referenced bead values.
+};
+
+class morkBeadProbeMapIter
+ : public morkProbeMapIter { // typesafe wrapper class
+
+ public:
+ morkBeadProbeMapIter(morkEnv* ev, morkBeadProbeMap* ioMap)
+ : morkProbeMapIter(ev, ioMap) {}
+
+ morkBeadProbeMapIter() : morkProbeMapIter() {}
+ void InitBeadProbeMapIter(morkEnv* ev, morkBeadProbeMap* ioMap) {
+ this->InitProbeMapIter(ev, ioMap);
+ }
+
+ morkBead* FirstBead(morkEnv* ev) { return (morkBead*)this->IterFirstKey(ev); }
+
+ morkBead* NextBead(morkEnv* ev) { return (morkBead*)this->IterNextKey(ev); }
+
+ morkBead* HereBead(morkEnv* ev) { return (morkBead*)this->IterHereKey(ev); }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKBEAD_ */
diff --git a/comm/mailnews/db/mork/morkBlob.cpp b/comm/mailnews/db/mork/morkBlob.cpp
new file mode 100644
index 0000000000..d0fdf104ff
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBlob.cpp
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*static*/ void morkBuf::NilBufBodyError(morkEnv* ev) {
+ ev->NewError("nil mBuf_Body");
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*static*/ void morkBlob::BlobFillOverSizeError(morkEnv* ev) {
+ ev->NewError("mBuf_Fill > mBlob_Size");
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+mork_bool morkBlob::GrowBlob(morkEnv* ev, nsIMdbHeap* ioHeap,
+ mork_size inNewSize) {
+ if (ioHeap) {
+ if (!mBuf_Body) // no body? implies zero sized?
+ mBlob_Size = 0;
+
+ if (mBuf_Fill > mBlob_Size) // fill more than size?
+ {
+ ev->NewWarning("mBuf_Fill > mBlob_Size");
+ mBuf_Fill = mBlob_Size;
+ }
+
+ if (inNewSize > mBlob_Size) // need to allocate larger blob?
+ {
+ mork_u1* body = 0;
+ ioHeap->Alloc(ev->AsMdbEnv(), inNewSize, (void**)&body);
+ if (body && ev->Good()) {
+ void* oldBody = mBuf_Body;
+ if (mBlob_Size) // any old content to transfer?
+ MORK_MEMCPY(body, oldBody, mBlob_Size);
+
+ mBlob_Size = inNewSize; // install new size
+ mBuf_Body = body; // install new body
+
+ if (oldBody) // need to free old buffer body?
+ ioHeap->Free(ev->AsMdbEnv(), oldBody);
+ }
+ }
+ } else
+ ev->NilPointerError();
+
+ if (ev->Good() && mBlob_Size < inNewSize)
+ ev->NewError("mBlob_Size < inNewSize");
+
+ return ev->Good();
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkCoil::morkCoil(morkEnv* ev, nsIMdbHeap* ioHeap) {
+ mBuf_Body = 0;
+ mBuf_Fill = 0;
+ mBlob_Size = 0;
+ mText_Form = 0;
+ mCoil_Heap = ioHeap;
+ if (!ioHeap) ev->NilPointerError();
+}
+
+void morkCoil::CloseCoil(morkEnv* ev) {
+ void* body = mBuf_Body;
+ nsIMdbHeap* heap = mCoil_Heap;
+
+ mBuf_Body = 0;
+ mCoil_Heap = 0;
+
+ if (body && heap) {
+ heap->Free(ev->AsMdbEnv(), body);
+ }
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkBlob.h b/comm/mailnews/db/mork/morkBlob.h
new file mode 100644
index 0000000000..8ce923d232
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBlob.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKBLOB_
+#define _MORKBLOB_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| Buf: the minimum needed to describe location and content length.
+**| This is typically only enough to read from this buffer, since
+**| one cannot write effectively without knowing the size of a buf.
+|*/
+class morkBuf { // subset of nsIMdbYarn slots
+ public:
+ void* mBuf_Body; // space for holding any binary content
+ mork_fill mBuf_Fill; // logical content in Buf in bytes
+
+ public:
+ morkBuf() {}
+ morkBuf(const void* ioBuf, mork_fill inFill)
+ : mBuf_Body((void*)ioBuf), mBuf_Fill(inFill) {}
+
+ void ClearBufFill() { mBuf_Fill = 0; }
+
+ static void NilBufBodyError(morkEnv* ev);
+
+ private: // copying is not allowed
+ morkBuf(const morkBuf& other);
+ morkBuf& operator=(const morkBuf& other);
+};
+
+/*| Blob: a buffer with an associated size, to increase known buf info
+**| to include max capacity in addition to buf location and content.
+**| This form factor allows us to allocate a vector of such blobs,
+**| which can share the same managing heap stored elsewhere, and that
+**| is why we don't include a pointer to a heap in this blob class.
+|*/
+class morkBlob : public morkBuf { // greater subset of nsIMdbYarn slots
+
+ // void* mBuf_Body; // space for holding any binary content
+ // mdb_fill mBuf_Fill; // logical content in Buf in bytes
+ public:
+ mork_size mBlob_Size; // physical size of Buf in bytes
+
+ public:
+ morkBlob() {}
+ morkBlob(const void* ioBuf, mork_fill inFill, mork_size inSize)
+ : morkBuf(ioBuf, inFill), mBlob_Size(inSize) {}
+
+ static void BlobFillOverSizeError(morkEnv* ev);
+
+ public:
+ mork_bool GrowBlob(morkEnv* ev, nsIMdbHeap* ioHeap, mork_size inNewSize);
+
+ private: // copying is not allowed
+ morkBlob(const morkBlob& other);
+ morkBlob& operator=(const morkBlob& other);
+};
+
+/*| Text: a blob with an associated charset annotation, where the
+**| charset actually includes the general notion of typing, and not
+**| just a specification of character set alone; we want to permit
+**| arbitrary charset annotations for ad hoc binary types as well.
+**| (We avoid including a nsIMdbHeap pointer in morkText for the same
+**| reason morkBlob does: we want minimal size vectors of morkText.)
+|*/
+class morkText : public morkBlob { // greater subset of nsIMdbYarn slots
+
+ // void* mBuf_Body; // space for holding any binary content
+ // mdb_fill mBuf_Fill; // logical content in Buf in bytes
+ // mdb_size mBlob_Size; // physical size of Buf in bytes
+
+ public:
+ mork_cscode mText_Form; // charset format encoding
+
+ morkText() {}
+
+ private: // copying is not allowed
+ morkText(const morkText& other);
+ morkText& operator=(const morkText& other);
+};
+
+/*| Coil: a text with an associated nsIMdbHeap instance that provides
+**| all memory management for the space pointed to by mBuf_Body. (This
+**| was the hardest type to give a name in this small class hierarchy,
+**| because it's hard to characterize self-management of one's space.)
+**| A coil is a self-contained blob that knows how to grow itself as
+**| necessary to hold more content when necessary. Coil descends from
+**| morkText to include the mText_Form slot, even though this won't be
+**| needed always, because we are not as concerned about the overall
+**| size of this particular Coil object (if we were concerned about
+**| the size of an array of Coil instances, we would not bother with
+**| a separate heap pointer for each of them).
+**|
+**|| A coil makes a good medium in which to stream content as a sink,
+**| so we will have a subclass of morkSink called morkCoil that
+**| will stream bytes into this self-contained coil object. The name
+**| of this morkCoil class derives more from this intended usage than
+**| from anything else. The Mork code to parse db content will use
+**| coils with associated sinks to accumulate parsed strings.
+**|
+**|| Heap: this is the heap used for memory allocation. This instance
+**| is NOT refcounted, since this coil always assumes the heap is held
+**| through a reference elsewhere (for example, through the same object
+**| that contains or holds the coil itself. This lack of refcounting
+**| is consistent with the fact that morkCoil itself is not refcounted,
+**| and is not intended for use as a standalone object.
+|*/
+class morkCoil : public morkText { // self-managing text blob object
+
+ // void* mBuf_Body; // space for holding any binary content
+ // mdb_fill mBuf_Fill; // logical content in Buf in bytes
+ // mdb_size mBlob_Size; // physical size of Buf in bytes
+ // mdb_cscode mText_Form; // charset format encoding
+ public:
+ nsIMdbHeap* mCoil_Heap; // storage manager for mBuf_Body pointer
+
+ public:
+ morkCoil(morkEnv* ev, nsIMdbHeap* ioHeap);
+
+ void CloseCoil(morkEnv* ev);
+
+ mork_bool GrowCoil(morkEnv* ev, mork_size inNewSize) {
+ return this->GrowBlob(ev, mCoil_Heap, inNewSize);
+ }
+
+ private: // copying is not allowed
+ morkCoil(const morkCoil& other);
+ morkCoil& operator=(const morkCoil& other);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKBLOB_ */
diff --git a/comm/mailnews/db/mork/morkBuilder.cpp b/comm/mailnews/db/mork/morkBuilder.cpp
new file mode 100644
index 0000000000..4e96209929
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBuilder.cpp
@@ -0,0 +1,892 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKPARSER_
+# include "morkParser.h"
+#endif
+
+#ifndef _MORKBUILDER_
+# include "morkBuilder.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkBuilder::CloseMorkNode(
+ morkEnv* ev) // CloseBuilder() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseBuilder(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkBuilder::~morkBuilder() // assert CloseBuilder() executed earlier
+{
+ MORK_ASSERT(mBuilder_Store == 0);
+ MORK_ASSERT(mBuilder_Row == 0);
+ MORK_ASSERT(mBuilder_Table == 0);
+ MORK_ASSERT(mBuilder_Cell == 0);
+ MORK_ASSERT(mBuilder_RowSpace == 0);
+ MORK_ASSERT(mBuilder_AtomSpace == 0);
+}
+
+/*public non-poly*/
+morkBuilder::morkBuilder(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkStream* ioStream,
+ mdb_count inBytesPerParseSegment,
+ nsIMdbHeap* ioSlotHeap, morkStore* ioStore)
+
+ : morkParser(ev, inUsage, ioHeap, ioStream, inBytesPerParseSegment,
+ ioSlotHeap)
+
+ ,
+ mBuilder_Store(0)
+
+ ,
+ mBuilder_Table(0),
+ mBuilder_Row(0),
+ mBuilder_Cell(0)
+
+ ,
+ mBuilder_RowSpace(0),
+ mBuilder_AtomSpace(0)
+
+ ,
+ mBuilder_OidAtomSpace(0),
+ mBuilder_ScopeAtomSpace(0)
+
+ ,
+ mBuilder_PortForm(0),
+ mBuilder_PortRowScope((mork_scope)'r'),
+ mBuilder_PortAtomScope((mork_scope)'v')
+
+ ,
+ mBuilder_TableForm(0),
+ mBuilder_TableRowScope((mork_scope)'r'),
+ mBuilder_TableAtomScope((mork_scope)'v'),
+ mBuilder_TableKind(0)
+
+ ,
+ mBuilder_TablePriority(morkPriority_kLo),
+ mBuilder_TableIsUnique(morkBool_kFalse),
+ mBuilder_TableIsVerbose(morkBool_kFalse),
+ mBuilder_TablePadByte(0)
+
+ ,
+ mBuilder_RowForm(0),
+ mBuilder_RowRowScope((mork_scope)'r'),
+ mBuilder_RowAtomScope((mork_scope)'v')
+
+ ,
+ mBuilder_CellForm(0),
+ mBuilder_CellAtomScope((mork_scope)'v')
+
+ ,
+ mBuilder_DictForm(0),
+ mBuilder_DictAtomScope((mork_scope)'v')
+
+ ,
+ mBuilder_MetaTokenSlot(0)
+
+ ,
+ mBuilder_DoCutRow(morkBool_kFalse),
+ mBuilder_DoCutCell(morkBool_kFalse),
+ mBuilder_CellsVecFill(0) {
+ if (ev->Good()) {
+ if (ioStore) {
+ morkStore::SlotWeakStore(ioStore, ev, &mBuilder_Store);
+ if (ev->Good()) mNode_Derived = morkDerived_kBuilder;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkBuilder::CloseBuilder(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mBuilder_Row = 0;
+ mBuilder_Cell = 0;
+ mBuilder_MetaTokenSlot = 0;
+
+ morkTable::SlotStrongTable((morkTable*)0, ev, &mBuilder_Table);
+ morkStore::SlotWeakStore((morkStore*)0, ev, &mBuilder_Store);
+
+ morkRowSpace::SlotStrongRowSpace((morkRowSpace*)0, ev, &mBuilder_RowSpace);
+
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mBuilder_AtomSpace);
+
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mBuilder_OidAtomSpace);
+
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mBuilder_ScopeAtomSpace);
+ this->CloseParser(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkBuilder::NonBuilderTypeError(morkEnv* ev) {
+ ev->NewError("non morkBuilder");
+}
+
+/*static*/ void morkBuilder::NilBuilderCellError(morkEnv* ev) {
+ ev->NewError("nil mBuilder_Cell");
+}
+
+/*static*/ void morkBuilder::NilBuilderRowError(morkEnv* ev) {
+ ev->NewError("nil mBuilder_Row");
+}
+
+/*static*/ void morkBuilder::NilBuilderTableError(morkEnv* ev) {
+ ev->NewError("nil mBuilder_Table");
+}
+
+/*static*/ void morkBuilder::NonColumnSpaceScopeError(morkEnv* ev) {
+ ev->NewError("column space != 'c'");
+}
+
+void morkBuilder::LogGlitch(morkEnv* ev, const morkGlitch& inGlitch,
+ const char* inKind) {
+ MORK_USED_2(inGlitch, inKind);
+ ev->NewWarning("parsing glitch");
+}
+
+/*virtual*/ void morkBuilder::MidToYarn(
+ morkEnv* ev,
+ const morkMid& inMid, // typically an alias to concat with strings
+ mdbYarn* outYarn)
+// The parser might ask that some aliases be turned into yarns, so they
+// can be concatenated into longer blobs under some circumstances. This
+// is an alternative to using a long and complex callback for many parts
+// for a single cell value.
+{
+ mBuilder_Store->MidToYarn(ev, inMid, outYarn);
+}
+
+/*virtual*/ void morkBuilder::OnNewPort(morkEnv* ev, const morkPlace& inPlace)
+// mp:Start ::= OnNewPort mp:PortItem* OnPortEnd
+// mp:PortItem ::= mp:Content | mp:Group | OnPortGlitch
+// mp:Content ::= mp:PortRow | mp:Dict | mp:Table | mp:Row
+{
+ MORK_USED_2(ev, inPlace);
+ // mParser_InPort = morkBool_kTrue;
+ mBuilder_PortForm = 0;
+ mBuilder_PortRowScope = (mork_scope)'r';
+ mBuilder_PortAtomScope = (mork_scope)'v';
+}
+
+/*virtual*/ void morkBuilder::OnPortGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "port");
+}
+
+/*virtual*/ void morkBuilder::OnPortEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Start ::= OnNewPort mp:PortItem* OnPortEnd
+{
+ MORK_USED_2(ev, inSpan);
+ // ev->StubMethodOnlyError();
+ // nothing to do?
+ // mParser_InPort = morkBool_kFalse;
+}
+
+/*virtual*/ void morkBuilder::OnNewGroup(morkEnv* ev, const morkPlace& inPlace,
+ mork_gid inGid) {
+ MORK_USED_1(inPlace);
+ mParser_InGroup = morkBool_kTrue;
+ mork_pos startPos = inPlace.mPlace_Pos;
+
+ morkStore* store = mBuilder_Store;
+ if (store) {
+ if (inGid >= store->mStore_CommitGroupIdentity)
+ store->mStore_CommitGroupIdentity = inGid + 1;
+
+ if (!store->mStore_FirstCommitGroupPos)
+ store->mStore_FirstCommitGroupPos = startPos;
+ else if (!store->mStore_SecondCommitGroupPos)
+ store->mStore_SecondCommitGroupPos = startPos;
+ }
+}
+
+/*virtual*/ void morkBuilder::OnGroupGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "group");
+}
+
+/*virtual*/ void morkBuilder::OnGroupCommitEnd(morkEnv* ev,
+ const morkSpan& inSpan) {
+ MORK_USED_2(ev, inSpan);
+ // mParser_InGroup = morkBool_kFalse;
+ // ev->StubMethodOnlyError();
+}
+
+/*virtual*/ void morkBuilder::OnGroupAbortEnd(morkEnv* ev,
+ const morkSpan& inSpan) {
+ MORK_USED_1(inSpan);
+ // mParser_InGroup = morkBool_kFalse;
+ ev->StubMethodOnlyError();
+}
+
+/*virtual*/ void morkBuilder::OnNewPortRow(morkEnv* ev,
+ const morkPlace& inPlace,
+ const morkMid& inMid,
+ mork_change inChange) {
+ MORK_USED_3(inMid, inPlace, inChange);
+ // mParser_InPortRow = morkBool_kTrue;
+ ev->StubMethodOnlyError();
+}
+
+/*virtual*/ void morkBuilder::OnPortRowGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "port row");
+}
+
+/*virtual*/ void morkBuilder::OnPortRowEnd(morkEnv* ev,
+ const morkSpan& inSpan) {
+ MORK_USED_1(inSpan);
+ // mParser_InPortRow = morkBool_kFalse;
+ ev->StubMethodOnlyError();
+}
+
+/*virtual*/ void morkBuilder::OnNewTable(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid,
+ mork_bool inCutAllRows)
+// mp:Table ::= OnNewTable mp:TableItem* OnTableEnd
+// mp:TableItem ::= mp:Row | mp:MetaTable | OnTableGlitch
+// mp:MetaTable ::= OnNewMeta mp:MetaItem* mp:Row OnMetaEnd
+// mp:Meta ::= OnNewMeta mp:MetaItem* OnMetaEnd
+// mp:MetaItem ::= mp:Cell | OnMetaGlitch
+{
+ MORK_USED_1(inPlace);
+ // mParser_InTable = morkBool_kTrue;
+ mBuilder_TableForm = mBuilder_PortForm;
+ mBuilder_TableRowScope = mBuilder_PortRowScope;
+ mBuilder_TableAtomScope = mBuilder_PortAtomScope;
+ mBuilder_TableKind = morkStore_kNoneToken;
+
+ mBuilder_TablePriority = morkPriority_kLo;
+ mBuilder_TableIsUnique = morkBool_kFalse;
+ mBuilder_TableIsVerbose = morkBool_kFalse;
+
+ morkTable* table = mBuilder_Store->MidToTable(ev, inMid);
+ morkTable::SlotStrongTable(table, ev, &mBuilder_Table);
+ if (table) {
+ if (table->mTable_RowSpace)
+ mBuilder_TableRowScope = table->mTable_RowSpace->SpaceScope();
+
+ if (inCutAllRows) table->CutAllRows(ev);
+ }
+}
+
+/*virtual*/ void morkBuilder::OnTableGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "table");
+}
+
+/*virtual*/ void morkBuilder::OnTableEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Table ::= OnNewTable mp:TableItem* OnTableEnd
+{
+ MORK_USED_1(inSpan);
+ // mParser_InTable = morkBool_kFalse;
+ if (mBuilder_Table) {
+ mBuilder_Table->mTable_Priority = mBuilder_TablePriority;
+
+ if (mBuilder_TableIsUnique) mBuilder_Table->SetTableUnique();
+
+ if (mBuilder_TableIsVerbose) mBuilder_Table->SetTableVerbose();
+
+ morkTable::SlotStrongTable((morkTable*)0, ev, &mBuilder_Table);
+ } else
+ this->NilBuilderTableError(ev);
+
+ mBuilder_Row = 0;
+ mBuilder_Cell = 0;
+
+ mBuilder_TablePriority = morkPriority_kLo;
+ mBuilder_TableIsUnique = morkBool_kFalse;
+ mBuilder_TableIsVerbose = morkBool_kFalse;
+
+ if (mBuilder_TableKind == morkStore_kNoneToken)
+ ev->NewError("missing table kind");
+
+ mBuilder_CellAtomScope = mBuilder_RowAtomScope = mBuilder_TableAtomScope =
+ mBuilder_PortAtomScope;
+
+ mBuilder_DoCutCell = morkBool_kFalse;
+ mBuilder_DoCutRow = morkBool_kFalse;
+}
+
+/*virtual*/ void morkBuilder::OnNewMeta(morkEnv* ev, const morkPlace& inPlace)
+// mp:Meta ::= OnNewMeta mp:MetaItem* OnMetaEnd
+// mp:MetaItem ::= mp:Cell | OnMetaGlitch
+// mp:Cell ::= OnMinusCell? OnNewCell mp:CellItem? OnCellEnd
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_2(ev, inPlace);
+ // mParser_InMeta = morkBool_kTrue;
+}
+
+/*virtual*/ void morkBuilder::OnMetaGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "meta");
+}
+
+/*virtual*/ void morkBuilder::OnMetaEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Meta ::= OnNewMeta mp:MetaItem* OnMetaEnd
+{
+ MORK_USED_2(ev, inSpan);
+ // mParser_InMeta = morkBool_kFalse;
+}
+
+/*virtual*/ void morkBuilder::OnMinusRow(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mBuilder_DoCutRow = morkBool_kTrue;
+}
+
+/*virtual*/ void morkBuilder::OnNewRow(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid,
+ mork_bool inCutAllCols)
+// mp:Table ::= OnNewTable mp:TableItem* OnTableEnd
+// mp:TableItem ::= mp:Row | mp:MetaTable | OnTableGlitch
+// mp:MetaTable ::= OnNewMeta mp:MetaItem* mp:Row OnMetaEnd
+// mp:Row ::= OnMinusRow? OnNewRow mp:RowItem* OnRowEnd
+// mp:RowItem ::= mp:Cell | mp:Meta | OnRowGlitch
+// mp:Cell ::= OnMinusCell? OnNewCell mp:CellItem? OnCellEnd
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inPlace);
+ // mParser_InRow = morkBool_kTrue;
+
+ mBuilder_CellForm = mBuilder_RowForm = mBuilder_TableForm;
+ mBuilder_CellAtomScope = mBuilder_RowAtomScope = mBuilder_TableAtomScope;
+ mBuilder_RowRowScope = mBuilder_TableRowScope;
+ morkStore* store = mBuilder_Store;
+
+ if (!inMid.mMid_Buf && !inMid.mMid_Oid.mOid_Scope) {
+ morkMid mid(inMid);
+ mid.mMid_Oid.mOid_Scope = mBuilder_RowRowScope;
+ mBuilder_Row = store->MidToRow(ev, mid);
+ } else {
+ mBuilder_Row = store->MidToRow(ev, inMid);
+ }
+ morkRow* row = mBuilder_Row;
+ if (row && inCutAllCols) {
+ row->CutAllColumns(ev);
+ }
+
+ morkTable* table = mBuilder_Table;
+ if (table) {
+ if (row) {
+ if (mParser_InMeta) {
+ morkRow* metaRow = table->mTable_MetaRow;
+ if (!metaRow) {
+ table->mTable_MetaRow = row;
+ table->mTable_MetaRowOid = row->mRow_Oid;
+ row->AddRowGcUse(ev);
+ } else if (metaRow != row) // not identical?
+ ev->NewError("duplicate table meta row");
+ } else {
+ if (mBuilder_DoCutRow)
+ table->CutRow(ev, row);
+ else
+ table->AddRow(ev, row);
+ }
+ }
+ }
+ // else // it is now okay to have rows outside a table:
+ // this->NilBuilderTableError(ev);
+
+ mBuilder_DoCutRow = morkBool_kFalse;
+}
+
+/*virtual*/ void morkBuilder::OnRowPos(morkEnv* ev, mork_pos inRowPos) {
+ if (mBuilder_Row && mBuilder_Table && !mParser_InMeta) {
+ mork_pos hintFromPos = 0; // best hint when we don't know position
+ mBuilder_Table->MoveRow(ev, mBuilder_Row, hintFromPos, inRowPos);
+ }
+}
+
+/*virtual*/ void morkBuilder::OnRowGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "row");
+}
+
+void morkBuilder::FlushBuilderCells(morkEnv* ev) {
+ if (mBuilder_Row) {
+ morkPool* pool = mBuilder_Store->StorePool();
+ morkCell* cells = mBuilder_CellsVec;
+ mork_fill fill = mBuilder_CellsVecFill;
+ mBuilder_Row->TakeCells(ev, cells, fill, mBuilder_Store);
+
+ morkCell* end = cells + fill;
+ --cells; // prepare for preincrement
+ while (++cells < end) {
+ if (cells->mCell_Atom) cells->SetAtom(ev, (morkAtom*)0, pool);
+ }
+ mBuilder_CellsVecFill = 0;
+ } else
+ this->NilBuilderRowError(ev);
+}
+
+/*virtual*/ void morkBuilder::OnRowEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Row ::= OnMinusRow? OnNewRow mp:RowItem* OnRowEnd
+{
+ MORK_USED_1(inSpan);
+ // mParser_InRow = morkBool_kFalse;
+ if (mBuilder_Row) {
+ this->FlushBuilderCells(ev);
+ } else
+ this->NilBuilderRowError(ev);
+
+ mBuilder_Row = 0;
+ mBuilder_Cell = 0;
+
+ mBuilder_DoCutCell = morkBool_kFalse;
+ mBuilder_DoCutRow = morkBool_kFalse;
+}
+
+/*virtual*/ void morkBuilder::OnNewDict(morkEnv* ev, const morkPlace& inPlace)
+// mp:Dict ::= OnNewDict mp:DictItem* OnDictEnd
+// mp:DictItem ::= OnAlias | OnAliasGlitch | mp:Meta | OnDictGlitch
+{
+ MORK_USED_2(ev, inPlace);
+ // mParser_InDict = morkBool_kTrue;
+
+ mBuilder_CellForm = mBuilder_DictForm = mBuilder_PortForm;
+ mBuilder_CellAtomScope = mBuilder_DictAtomScope = mBuilder_PortAtomScope;
+}
+
+/*virtual*/ void morkBuilder::OnDictGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "dict");
+}
+
+/*virtual*/ void morkBuilder::OnDictEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Dict ::= OnNewDict mp:DictItem* OnDictEnd
+{
+ MORK_USED_2(ev, inSpan);
+ // mParser_InDict = morkBool_kFalse;
+
+ mBuilder_DictForm = 0;
+ mBuilder_DictAtomScope = 0;
+}
+
+/*virtual*/ void morkBuilder::OnAlias(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) {
+ MORK_USED_1(inSpan);
+ if (mParser_InDict) {
+ morkMid mid = inMid; // local copy for modification
+ mid.mMid_Oid.mOid_Scope = mBuilder_DictAtomScope;
+ mBuilder_Store->AddAlias(ev, mid, mBuilder_DictForm);
+ } else
+ ev->NewError("alias not in dict");
+}
+
+/*virtual*/ void morkBuilder::OnAliasGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "alias");
+}
+
+morkCell* morkBuilder::AddBuilderCell(morkEnv* ev, const morkMid& inMid,
+ mork_change inChange) {
+ morkCell* outCell = 0;
+ mork_column column = inMid.mMid_Oid.mOid_Id;
+
+ if (ev->Good()) {
+ if (mBuilder_CellsVecFill >= morkBuilder_kCellsVecSize)
+ this->FlushBuilderCells(ev);
+ if (ev->Good()) {
+ if (mBuilder_CellsVecFill < morkBuilder_kCellsVecSize) {
+ mork_fill indx = mBuilder_CellsVecFill++;
+ outCell = mBuilder_CellsVec + indx;
+ outCell->SetColumnAndChange(column, inChange);
+ outCell->mCell_Atom = 0;
+ } else
+ ev->NewError("out of builder cells");
+ }
+ }
+ return outCell;
+}
+
+/*virtual*/ void morkBuilder::OnMinusCell(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mBuilder_DoCutCell = morkBool_kTrue;
+}
+
+/*virtual*/ void morkBuilder::OnNewCell(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid* inMid,
+ const morkBuf* inBuf)
+// Exactly one of inMid and inBuf is nil, and the other is non-nil.
+// When hex ID syntax is used for a column, then inMid is not nil, and
+// when a naked string names a column, then inBuf is not nil.
+
+// mp:Cell ::= OnMinusCell? OnNewCell mp:CellItem? OnCellEnd
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inPlace);
+ // mParser_InCell = morkBool_kTrue;
+
+ mork_change cellChange =
+ (mBuilder_DoCutCell) ? morkChange_kCut : morkChange_kAdd;
+
+ mBuilder_DoCutCell = morkBool_kFalse;
+
+ mBuilder_CellAtomScope = mBuilder_RowAtomScope;
+
+ mBuilder_Cell = 0; // nil until determined for a row
+ morkStore* store = mBuilder_Store;
+ mork_scope scope = morkStore_kColumnSpaceScope;
+ morkMid tempMid; // space for local and modifiable cell mid
+ morkMid* cellMid = &tempMid; // default to local if inMid==0
+
+ if (inMid) // mid parameter is actually provided?
+ {
+ *cellMid = *inMid; // bitwise copy for modifiable local mid
+
+ if (!cellMid->mMid_Oid.mOid_Scope) {
+ if (cellMid->mMid_Buf) {
+ scope = store->BufToToken(ev, cellMid->mMid_Buf);
+ cellMid->mMid_Buf = 0; // don't do scope lookup again
+ ev->NewWarning("column mids need column scope");
+ }
+ cellMid->mMid_Oid.mOid_Scope = scope;
+ }
+ } else if (inBuf) // buf points to naked column string name?
+ {
+ cellMid->ClearMid();
+ cellMid->mMid_Oid.mOid_Id = store->BufToToken(ev, inBuf);
+ cellMid->mMid_Oid.mOid_Scope = scope; // kColumnSpaceScope
+ } else
+ ev->NilPointerError(); // either inMid or inBuf must be non-nil
+
+ mork_column column = cellMid->mMid_Oid.mOid_Id;
+
+ if (mBuilder_Row && ev->Good()) // this cell must be inside a row
+ {
+ // mBuilder_Cell = this->AddBuilderCell(ev, *cellMid, cellChange);
+
+ if (mBuilder_CellsVecFill >= morkBuilder_kCellsVecSize)
+ this->FlushBuilderCells(ev);
+ if (ev->Good()) {
+ if (mBuilder_CellsVecFill < morkBuilder_kCellsVecSize) {
+ mork_fill ix = mBuilder_CellsVecFill++;
+ morkCell* cell = mBuilder_CellsVec + ix;
+ cell->SetColumnAndChange(column, cellChange);
+
+ cell->mCell_Atom = 0;
+ mBuilder_Cell = cell;
+ } else
+ ev->NewError("out of builder cells");
+ }
+ }
+
+ else if (mParser_InMeta && ev->Good()) // cell is in metainfo structure?
+ {
+ if (scope == morkStore_kColumnSpaceScope) {
+ if (mParser_InTable) // metainfo for table?
+ {
+ if (column == morkStore_kKindColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_TableKind;
+ else if (column == morkStore_kStatusColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_TableStatus;
+ else if (column == morkStore_kRowScopeColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_TableRowScope;
+ else if (column == morkStore_kAtomScopeColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_TableAtomScope;
+ else if (column == morkStore_kFormColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_TableForm;
+ } else if (mParser_InDict) // metainfo for dict?
+ {
+ if (column == morkStore_kAtomScopeColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_DictAtomScope;
+ else if (column == morkStore_kFormColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_DictForm;
+ } else if (mParser_InRow) // metainfo for row?
+ {
+ if (column == morkStore_kAtomScopeColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_RowAtomScope;
+ else if (column == morkStore_kRowScopeColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_RowRowScope;
+ else if (column == morkStore_kFormColumn)
+ mBuilder_MetaTokenSlot = &mBuilder_RowForm;
+ }
+ } else
+ ev->NewWarning("expected column scope");
+ }
+}
+
+/*virtual*/ void morkBuilder::OnCellGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) {
+ this->LogGlitch(ev, inGlitch, "cell");
+}
+
+/*virtual*/ void morkBuilder::OnCellForm(morkEnv* ev,
+ mork_cscode inCharsetFormat) {
+ morkCell* cell = mBuilder_Cell;
+ if (cell) {
+ mBuilder_CellForm = inCharsetFormat;
+ } else
+ this->NilBuilderCellError(ev);
+}
+
+/*virtual*/ void morkBuilder::OnCellEnd(morkEnv* ev, const morkSpan& inSpan)
+// mp:Cell ::= OnMinusCell? OnNewCell mp:CellItem? OnCellEnd
+{
+ MORK_USED_2(ev, inSpan);
+ // mParser_InCell = morkBool_kFalse;
+
+ mBuilder_MetaTokenSlot = 0;
+ mBuilder_CellAtomScope = mBuilder_RowAtomScope;
+}
+
+/*virtual*/ void morkBuilder::OnValue(morkEnv* ev, const morkSpan& inSpan,
+ const morkBuf& inBuf)
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inSpan);
+ morkStore* store = mBuilder_Store;
+ morkCell* cell = mBuilder_Cell;
+ if (cell) {
+ mdbYarn yarn;
+ yarn.mYarn_Buf = inBuf.mBuf_Body;
+ yarn.mYarn_Fill = yarn.mYarn_Size = inBuf.mBuf_Fill;
+ yarn.mYarn_More = 0;
+ yarn.mYarn_Form = mBuilder_CellForm;
+ yarn.mYarn_Grow = 0;
+ morkAtom* atom = store->YarnToAtom(ev, &yarn, true /* create */);
+ cell->SetAtom(ev, atom, store->StorePool());
+ } else if (mParser_InMeta) {
+ mork_token* metaSlot = mBuilder_MetaTokenSlot;
+ if (metaSlot) {
+ if (metaSlot == &mBuilder_TableStatus) // table status?
+ {
+ if (mParser_InTable && mBuilder_Table) {
+ const char* body = (const char*)inBuf.mBuf_Body;
+ mork_fill bufFill = inBuf.mBuf_Fill;
+ if (body && bufFill) {
+ const char* bodyEnd = body + bufFill;
+ while (body < bodyEnd) {
+ int c = *body++;
+ switch (c) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ mBuilder_TablePriority = (mork_priority)(c - '0');
+ break;
+
+ case 'u':
+ case 'U':
+ mBuilder_TableIsUnique = morkBool_kTrue;
+ break;
+
+ case 'v':
+ case 'V':
+ mBuilder_TableIsVerbose = morkBool_kTrue;
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ mork_token token = store->BufToToken(ev, &inBuf);
+ if (token) {
+ *metaSlot = token;
+ if (metaSlot == &mBuilder_TableKind) // table kind?
+ {
+ if (mParser_InTable && mBuilder_Table)
+ mBuilder_Table->mTable_Kind = token;
+ }
+ }
+ }
+ }
+ } else
+ this->NilBuilderCellError(ev);
+}
+
+/*virtual*/ void morkBuilder::OnValueMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid)
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inSpan);
+ morkStore* store = mBuilder_Store;
+ morkCell* cell = mBuilder_Cell;
+
+ morkMid valMid; // local mid for modifications
+ mdbOid* valOid = &valMid.mMid_Oid; // ref to oid inside mid
+ *valOid = inMid.mMid_Oid; // bitwise copy inMid's oid
+
+ if (inMid.mMid_Buf) {
+ if (!valOid->mOid_Scope) store->MidToOid(ev, inMid, valOid);
+ } else if (!valOid->mOid_Scope)
+ valOid->mOid_Scope = mBuilder_CellAtomScope;
+
+ if (cell) {
+ morkBookAtom* atom = store->MidToAtom(ev, valMid);
+ if (atom)
+ cell->SetAtom(ev, atom, store->StorePool());
+ else
+ ev->NewError("undefined cell value alias");
+ } else if (mParser_InMeta) {
+ mork_token* metaSlot = mBuilder_MetaTokenSlot;
+ if (metaSlot) {
+ mork_scope valScope = valOid->mOid_Scope;
+ if (!valScope || valScope == morkStore_kColumnSpaceScope) {
+ if (ev->Good() && valMid.HasSomeId()) {
+ *metaSlot = valOid->mOid_Id;
+ if (metaSlot == &mBuilder_TableKind) // table kind?
+ {
+ if (mParser_InTable && mBuilder_Table) {
+ mBuilder_Table->mTable_Kind = valOid->mOid_Id;
+ } else
+ ev->NewWarning("mBuilder_TableKind not in table");
+ } else if (metaSlot == &mBuilder_TableStatus) // table status?
+ {
+ if (mParser_InTable && mBuilder_Table) {
+ // $$ what here??
+ } else
+ ev->NewWarning("mBuilder_TableStatus not in table");
+ }
+ }
+ } else
+ this->NonColumnSpaceScopeError(ev);
+ }
+ } else
+ this->NilBuilderCellError(ev);
+}
+
+/*virtual*/ void morkBuilder::OnRowMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid)
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inSpan);
+ morkStore* store = mBuilder_Store;
+ morkCell* cell = mBuilder_Cell;
+ if (cell) {
+ mdbOid rowOid = inMid.mMid_Oid;
+ if (inMid.mMid_Buf) {
+ if (!rowOid.mOid_Scope) store->MidToOid(ev, inMid, &rowOid);
+ } else if (!rowOid.mOid_Scope)
+ rowOid.mOid_Scope = mBuilder_RowRowScope;
+
+ if (ev->Good()) {
+ morkPool* pool = store->StorePool();
+ morkAtom* atom = pool->NewRowOidAtom(ev, rowOid, &store->mStore_Zone);
+ if (atom) {
+ cell->SetAtom(ev, atom, pool);
+ morkRow* row = store->OidToRow(ev, &rowOid);
+ if (row) // found or created such a row?
+ row->AddRowGcUse(ev);
+ }
+ }
+ } else
+ this->NilBuilderCellError(ev);
+}
+
+/*virtual*/ void morkBuilder::OnTableMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid)
+// mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+// mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+{
+ MORK_USED_1(inSpan);
+ morkStore* store = mBuilder_Store;
+ morkCell* cell = mBuilder_Cell;
+ if (cell) {
+ mdbOid tableOid = inMid.mMid_Oid;
+ if (inMid.mMid_Buf) {
+ if (!tableOid.mOid_Scope) store->MidToOid(ev, inMid, &tableOid);
+ } else if (!tableOid.mOid_Scope)
+ tableOid.mOid_Scope = mBuilder_RowRowScope;
+
+ if (ev->Good()) {
+ morkPool* pool = store->StorePool();
+ morkAtom* atom = pool->NewTableOidAtom(ev, tableOid, &store->mStore_Zone);
+ if (atom) {
+ cell->SetAtom(ev, atom, pool);
+ morkTable* table = store->OidToTable(ev, &tableOid,
+ /*optionalMetaRowOid*/ (mdbOid*)0);
+ if (table) // found or created such a table?
+ table->AddTableGcUse(ev);
+ }
+ }
+ } else
+ this->NilBuilderCellError(ev);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkBuilder.h b/comm/mailnews/db/mork/morkBuilder.h
new file mode 100644
index 0000000000..2c8b2e573f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkBuilder.h
@@ -0,0 +1,303 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKBUILDER_
+#define _MORKBUILDER_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKPARSER_
+# include "morkParser.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| kCellsVecSize: length of cell vector buffer inside morkBuilder
+|*/
+#define morkBuilder_kCellsVecSize 64
+
+#define morkBuilder_kDefaultBytesPerParseSegment 512 /* plausible to big */
+
+#define morkDerived_kBuilder /*i*/ 0x4275 /* ascii 'Bu' */
+
+class morkBuilder /*d*/ : public morkParser {
+ // public: // slots inherited from morkParser (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // nsIMdbHeap* mParser_Heap; // refcounted heap used for allocation
+ // morkStream* mParser_Stream; // refcounted input stream
+
+ // mork_u4 mParser_Tag; // must equal morkParser_kTag
+ // mork_count mParser_MoreGranularity; // constructor
+ // inBytesPerParseSegment
+
+ // mork_u4 mParser_State; // state where parser should resume
+
+ // after finding ends of group transactions, we can re-seek the start:
+ // mork_pos mParser_GroupContentStartPos; // start of this group
+
+ // mdbOid mParser_TableOid; // table oid if inside a table
+ // mdbOid mParser_RowOid; // row oid if inside a row
+ // mork_gid mParser_GroupId; // group ID if inside a group
+
+ // mork_bool mParser_InPort; // called OnNewPort but not OnPortEnd?
+ // mork_bool mParser_InDict; // called OnNewDict but not OnDictEnd?
+ // mork_bool mParser_InCell; // called OnNewCell but not OnCellEnd?
+ // mork_bool mParser_InMeta; // called OnNewMeta but not OnMetaEnd?
+
+ // morkMid mParser_Mid; // current alias being parsed
+ // note that mParser_Mid.mMid_Buf points at mParser_ScopeCoil below:
+
+ // blob coils allocated in mParser_Heap
+ // morkCoil mParser_ScopeCoil; // place to accumulate ID scope blobs
+ // morkCoil mParser_ValueCoil; // place to accumulate value blobs
+ // morkCoil mParser_ColumnCoil; // place to accumulate column blobs
+ // morkCoil mParser_StringCoil; // place to accumulate string blobs
+
+ // morkSpool mParser_ScopeSpool; // writes to mParser_ScopeCoil
+ // morkSpool mParser_ValueSpool; // writes to mParser_ValueCoil
+ // morkSpool mParser_ColumnSpool; // writes to mParser_ColumnCoil
+ // morkSpool mParser_StringSpool; // writes to mParser_StringCoil
+
+ // yarns allocated in mParser_Heap
+ // morkYarn mParser_MidYarn; // place to receive from MidToYarn()
+
+ // span showing current ongoing file position status:
+ // morkSpan mParser_PortSpan; // span of current db port file
+
+ // various spans denoting nested subspaces inside the file's port span:
+ // morkSpan mParser_GroupSpan; // span of current transaction group
+ // morkSpan mParser_DictSpan;
+ // morkSpan mParser_AliasSpan;
+ // morkSpan mParser_MetaDictSpan;
+ // morkSpan mParser_TableSpan;
+ // morkSpan mParser_MetaTableSpan;
+ // morkSpan mParser_RowSpan;
+ // morkSpan mParser_MetaRowSpan;
+ // morkSpan mParser_CellSpan;
+ // morkSpan mParser_ColumnSpan;
+ // morkSpan mParser_SlotSpan;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected morkBuilder members
+ // weak refs that do not prevent closure of referenced nodes:
+ morkStore* mBuilder_Store; // weak ref to builder's store
+
+ // strong refs that do indeed prevent closure of referenced nodes:
+ morkTable* mBuilder_Table; // current table being built (or nil)
+ morkRow* mBuilder_Row; // current row being built (or nil)
+ morkCell* mBuilder_Cell; // current cell within CellsVec (or nil)
+
+ morkRowSpace* mBuilder_RowSpace; // space for mBuilder_CellRowScope
+ morkAtomSpace* mBuilder_AtomSpace; // space for mBuilder_CellAtomScope
+
+ morkAtomSpace* mBuilder_OidAtomSpace; // ground atom space for oids
+ morkAtomSpace* mBuilder_ScopeAtomSpace; // ground atom space for scopes
+
+ // scoped object ids for current objects under construction:
+ mdbOid mBuilder_TableOid; // full oid for current table
+ mdbOid mBuilder_RowOid; // full oid for current row
+
+ // tokens that become set as the result of meta cells in port rows:
+ mork_cscode mBuilder_PortForm; // default port charset format
+ mork_scope mBuilder_PortRowScope; // port row scope
+ mork_scope mBuilder_PortAtomScope; // port atom scope
+
+ // tokens that become set as the result of meta cells in meta tables:
+ mork_cscode mBuilder_TableForm; // default table charset format
+ mork_scope mBuilder_TableRowScope; // table row scope
+ mork_scope mBuilder_TableAtomScope; // table atom scope
+ mork_kind mBuilder_TableKind; // table kind
+
+ mork_token mBuilder_TableStatus; // dummy: priority/unique/verbose
+
+ mork_priority mBuilder_TablePriority; // table priority
+ mork_bool mBuilder_TableIsUnique; // table uniqueness
+ mork_bool mBuilder_TableIsVerbose; // table verboseness
+ mork_u1 mBuilder_TablePadByte; // for u4 alignment
+
+ // tokens that become set as the result of meta cells in meta rows:
+ mork_cscode mBuilder_RowForm; // default row charset format
+ mork_scope mBuilder_RowRowScope; // row scope per row metainfo
+ mork_scope mBuilder_RowAtomScope; // row atom scope
+
+ // meta tokens currently in force, driven by meta info slots above:
+ mork_cscode mBuilder_CellForm; // cell charset format
+ mork_scope mBuilder_CellAtomScope; // cell atom scope
+
+ mork_cscode mBuilder_DictForm; // dict charset format
+ mork_scope mBuilder_DictAtomScope; // dict atom scope
+
+ mork_token* mBuilder_MetaTokenSlot; // pointer to some slot above
+
+ // If any of these 'cut' bools are true, it means a minus was seen in the
+ // Mork source text to indicate removal of content from some container.
+ // (Note there is no corresponding 'add' bool, since add is the default.)
+ // CutRow implies the current row should be cut from the table.
+ // CutCell implies the current column should be cut from the row.
+ mork_bool mBuilder_DoCutRow; // row with kCut change
+ mork_bool mBuilder_DoCutCell; // cell with kCut change
+ mork_u1 mBuilder_row_pad; // pad to u4 alignment
+ mork_u1 mBuilder_cell_pad; // pad to u4 alignment
+
+ morkCell mBuilder_CellsVec[morkBuilder_kCellsVecSize + 1];
+ mork_fill mBuilder_CellsVecFill; // count used in CellsVec
+ // Note when mBuilder_CellsVecFill equals morkBuilder_kCellsVecSize, and
+ // another cell is added, this means all the cells in the vector above
+ // must be flushed to the current row being built to create more room.
+
+ protected: // protected inlines
+ mork_bool CellVectorIsFull() const {
+ return (mBuilder_CellsVecFill == morkBuilder_kCellsVecSize);
+ }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseBuilder() only if open
+ virtual ~morkBuilder(); // assert that CloseBuilder() executed earlier
+
+ public: // morkYarn construction & destruction
+ morkBuilder(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkStream* ioStream, // the readonly stream for input bytes
+ mdb_count inBytesPerParseSegment, // target for ParseMore()
+ nsIMdbHeap* ioSlotHeap, morkStore* ioStore);
+
+ void CloseBuilder(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkBuilder(const morkBuilder& other);
+ morkBuilder& operator=(const morkBuilder& other);
+
+ public: // dynamic type identification
+ mork_bool IsBuilder() const {
+ return IsNode() && mNode_Derived == morkDerived_kBuilder;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // errors
+ static void NonBuilderTypeError(morkEnv* ev);
+ static void NilBuilderCellError(morkEnv* ev);
+ static void NilBuilderRowError(morkEnv* ev);
+ static void NilBuilderTableError(morkEnv* ev);
+ static void NonColumnSpaceScopeError(morkEnv* ev);
+
+ void LogGlitch(morkEnv* ev, const morkGlitch& inGlitch, const char* inKind);
+
+ public: // other builder methods
+ morkCell* AddBuilderCell(morkEnv* ev, const morkMid& inMid,
+ mork_change inChange);
+
+ void FlushBuilderCells(morkEnv* ev);
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // in virtual morkParser methods, data flow subclass to parser
+ virtual void MidToYarn(
+ morkEnv* ev,
+ const morkMid& inMid, // typically an alias to concat with strings
+ mdbYarn* outYarn) override;
+ // The parser might ask that some aliases be turned into yarns, so they
+ // can be concatenated into longer blobs under some circumstances. This
+ // is an alternative to using a long and complex callback for many parts
+ // for a single cell value.
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // out virtual morkParser methods, data flow parser to subclass
+ virtual void OnNewPort(morkEnv* ev, const morkPlace& inPlace) override;
+ virtual void OnPortGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnPortEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnNewGroup(morkEnv* ev, const morkPlace& inPlace,
+ mork_gid inGid) override;
+ virtual void OnGroupGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnGroupCommitEnd(morkEnv* ev, const morkSpan& inSpan) override;
+ virtual void OnGroupAbortEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnNewPortRow(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid,
+ mork_change inChange) override;
+ virtual void OnPortRowGlitch(morkEnv* ev,
+ const morkGlitch& inGlitch) override;
+ virtual void OnPortRowEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnNewTable(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid,
+ mork_bool inCutAllRows) override;
+ virtual void OnTableGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnTableEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnNewMeta(morkEnv* ev, const morkPlace& inPlace) override;
+ virtual void OnMetaGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnMetaEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnMinusRow(morkEnv* ev) override;
+ virtual void OnNewRow(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid, mork_bool inCutAllCols) override;
+ virtual void OnRowPos(morkEnv* ev, mork_pos inRowPos) override;
+ virtual void OnRowGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnRowEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnNewDict(morkEnv* ev, const morkPlace& inPlace) override;
+ virtual void OnDictGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnDictEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnAlias(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) override;
+
+ virtual void OnAliasGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+
+ virtual void OnMinusCell(morkEnv* ev) override;
+ virtual void OnNewCell(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid* inMid, const morkBuf* inBuf) override;
+ // Exactly one of inMid and inBuf is nil, and the other is non-nil.
+ // When hex ID syntax is used for a column, then inMid is not nil, and
+ // when a naked string names a column, then inBuf is not nil.
+
+ virtual void OnCellGlitch(morkEnv* ev, const morkGlitch& inGlitch) override;
+ virtual void OnCellForm(morkEnv* ev, mork_cscode inCharsetFormat) override;
+ virtual void OnCellEnd(morkEnv* ev, const morkSpan& inSpan) override;
+
+ virtual void OnValue(morkEnv* ev, const morkSpan& inSpan,
+ const morkBuf& inBuf) override;
+
+ virtual void OnValueMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) override;
+
+ virtual void OnRowMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) override;
+
+ virtual void OnTableMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) override;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkBuilder methods
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakBuilder(morkBuilder* me, morkEnv* ev,
+ morkBuilder** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongBuilder(morkBuilder* me, morkEnv* ev,
+ morkBuilder** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKBUILDER_ */
diff --git a/comm/mailnews/db/mork/morkCell.cpp b/comm/mailnews/db/mork/morkCell.cpp
new file mode 100644
index 0000000000..5e0ca9128c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCell.cpp
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+void morkCell::SetYarn(morkEnv* ev, const mdbYarn* inYarn, morkStore* ioStore) {
+ morkAtom* atom = ioStore->YarnToAtom(ev, inYarn, true /* create */);
+ if (atom) this->SetAtom(ev, atom, ioStore->StorePool()); // refcounts atom
+}
+
+void morkCell::GetYarn(morkEnv* ev, mdbYarn* outYarn) const {
+ MORK_USED_1(ev);
+ morkAtom::GetYarn(mCell_Atom, outYarn);
+}
+
+void morkCell::AliasYarn(morkEnv* ev, mdbYarn* outYarn) const {
+ MORK_USED_1(ev);
+ morkAtom::AliasYarn(mCell_Atom, outYarn);
+}
+
+void morkCell::SetCellClean() {
+ mork_column col = this->GetColumn();
+ this->SetColumnAndChange(col, morkChange_kNil);
+}
+
+void morkCell::SetCellDirty() {
+ mork_column col = this->GetColumn();
+ this->SetColumnAndChange(col, morkChange_kAdd);
+}
+
+void morkCell::SetAtom(morkEnv* ev, morkAtom* ioAtom, morkPool* ioPool)
+// SetAtom() "acquires" the new ioAtom if non-nil, by calling AddCellUse()
+// to increase the refcount, and puts ioAtom into mCell_Atom. If the old
+// atom in mCell_Atom is non-nil, then it is "released" first by a call to
+// CutCellUse(), and if the use count then becomes zero, then the old atom
+// is deallocated by returning it to the pool ioPool. (And this is
+// why ioPool is a parameter to this method.) Note that ioAtom can be nil
+// to cause the cell to refer to nothing, and the old atom in mCell_Atom
+// can also be nil, and all the atom refcounting is handled correctly.
+//
+// Note that if ioAtom was just created, it typically has a zero use count
+// before calling SetAtom(). But use count is one higher after SetAtom().
+{
+ morkAtom* oldAtom = mCell_Atom;
+ if (oldAtom != ioAtom) // ioAtom is not already installed in this cell?
+ {
+ if (oldAtom) {
+ mCell_Atom = 0;
+ if (oldAtom->CutCellUse(ev) == 0) {
+ // this was zapping atoms still in use - comment out until davidmc
+ // can figure out a better fix.
+ // if ( ioPool )
+ // {
+ // if ( oldAtom->IsBook() )
+ // ((morkBookAtom*) oldAtom)->CutBookAtomFromSpace(ev);
+
+ // ioPool->ZapAtom(ev, oldAtom);
+ // }
+ // else
+ // ev->NilPointerError();
+ }
+ }
+ if (ioAtom) ioAtom->AddCellUse(ev);
+
+ mCell_Atom = ioAtom;
+ }
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkCell.h b/comm/mailnews/db/mork/morkCell.h
new file mode 100644
index 0000000000..5b5194ccc4
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCell.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKCELL_
+#define _MORKCELL_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDelta_kShift 8 /* 8 bit shift */
+#define morkDelta_kChangeMask 0x0FF /* low 8 bit mask */
+#define morkDelta_kColumnMask (~(mork_column)morkDelta_kChangeMask)
+#define morkDelta_Init(self, cl, ch) \
+ ((self) = ((cl) << morkDelta_kShift) | (ch))
+#define morkDelta_Change(self) ((mork_change)((self)&morkDelta_kChangeMask))
+#define morkDelta_Column(self) ((self) >> morkDelta_kShift)
+
+class morkCell { // minimal cell format
+
+ public:
+ mork_delta mCell_Delta; // encoding of both column and change
+ morkAtom* mCell_Atom; // content in this cell
+
+ public:
+ morkCell() : mCell_Delta(0), mCell_Atom(0) {}
+
+ morkCell(const morkCell& c)
+ : mCell_Delta(c.mCell_Delta), mCell_Atom(c.mCell_Atom) {}
+
+ // note if ioAtom is non-nil, caller needs to call ioAtom->AddCellUse():
+ morkCell(mork_column inCol, mork_change inChange, morkAtom* ioAtom) {
+ morkDelta_Init(mCell_Delta, inCol, inChange);
+ mCell_Atom = ioAtom;
+ }
+
+ // note if ioAtom is non-nil, caller needs to call ioAtom->AddCellUse():
+ void Init(mork_column inCol, mork_change inChange, morkAtom* ioAtom) {
+ morkDelta_Init(mCell_Delta, inCol, inChange);
+ mCell_Atom = ioAtom;
+ }
+
+ mork_column GetColumn() const { return morkDelta_Column(mCell_Delta); }
+ mork_change GetChange() const { return morkDelta_Change(mCell_Delta); }
+
+ mork_bool IsCellClean() const { return GetChange() == morkChange_kNil; }
+ mork_bool IsCellDirty() const { return GetChange() != morkChange_kNil; }
+
+ void SetCellClean(); // set change to kNil
+ void SetCellDirty(); // set change to kAdd
+
+ void SetCellColumnDirty(mork_column inCol) {
+ this->SetColumnAndChange(inCol, morkChange_kAdd);
+ }
+
+ void SetCellColumnClean(mork_column inCol) {
+ this->SetColumnAndChange(inCol, morkChange_kNil);
+ }
+
+ void SetColumnAndChange(mork_column inCol, mork_change inChange) {
+ morkDelta_Init(mCell_Delta, inCol, inChange);
+ }
+
+ morkAtom* GetAtom() { return mCell_Atom; }
+
+ void SetAtom(morkEnv* ev, morkAtom* ioAtom, morkPool* ioPool);
+ // SetAtom() "acquires" the new ioAtom if non-nil, by calling AddCellUse()
+ // to increase the refcount, and puts ioAtom into mCell_Atom. If the old
+ // atom in mCell_Atom is non-nil, then it is "released" first by a call to
+ // CutCellUse(), and if the use count then becomes zero, then the old atom
+ // is deallocated by returning it to the pool ioPool. (And this is
+ // why ioPool is a parameter to this method.) Note that ioAtom can be nil
+ // to cause the cell to refer to nothing, and the old atom in mCell_Atom
+ // can also be nil, and all the atom refcounting is handled correctly.
+ //
+ // Note that if ioAtom was just created, it typically has a zero use count
+ // before calling SetAtom(). But use count is one higher after SetAtom().
+
+ void SetYarn(morkEnv* ev, const mdbYarn* inYarn, morkStore* ioStore);
+
+ void AliasYarn(morkEnv* ev, mdbYarn* outYarn) const;
+ void GetYarn(morkEnv* ev, mdbYarn* outYarn) const;
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKCELL_ */
diff --git a/comm/mailnews/db/mork/morkCellObject.cpp b/comm/mailnews/db/mork/morkCellObject.cpp
new file mode 100644
index 0000000000..7ad8402348
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCellObject.cpp
@@ -0,0 +1,453 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCELLOBJECT_
+# include "morkCellObject.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkCellObject::CloseMorkNode(
+ morkEnv* ev) // CloseCellObject() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseCellObject(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkCellObject::~morkCellObject() // assert CloseCellObject() executed earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(mCellObject_Row == 0);
+}
+
+/*public non-poly*/
+morkCellObject::morkCellObject(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkRow* ioRow,
+ morkCell* ioCell, mork_column inCol,
+ mork_pos inPos)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mCellObject_RowObject(0),
+ mCellObject_Row(0),
+ mCellObject_Cell(0),
+ mCellObject_Col(inCol),
+ mCellObject_RowSeed(0),
+ mCellObject_Pos((mork_u2)inPos) {
+ if (ev->Good()) {
+ if (ioRow && ioCell) {
+ if (ioRow->IsRow()) {
+ morkStore* store = ioRow->GetRowSpaceStore(ev);
+ if (store) {
+ morkRowObject* rowObj = ioRow->AcquireRowObject(ev, store);
+ if (rowObj) {
+ mCellObject_Row = ioRow;
+ mCellObject_Cell = ioCell;
+ mCellObject_RowSeed = ioRow->mRow_Seed;
+
+ // morkRowObject::SlotStrongRowObject(rowObj, ev,
+ // &mCellObject_RowObject);
+
+ mCellObject_RowObject = rowObj; // assume control of strong ref
+ }
+ if (ev->Good()) mNode_Derived = morkDerived_kCellObject;
+ }
+ } else
+ ioRow->NonRowTypeError(ev);
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkCellObject, morkObject, nsIMdbCell)
+
+/*public non-poly*/ void morkCellObject::CloseCellObject(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ NS_RELEASE(mCellObject_RowObject);
+ mCellObject_Row = 0;
+ mCellObject_Cell = 0;
+ mCellObject_RowSeed = 0;
+ this->CloseObject(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+mork_bool morkCellObject::ResyncWithRow(morkEnv* ev) {
+ morkRow* row = mCellObject_Row;
+ mork_pos pos = 0;
+ morkCell* cell = row->GetCell(ev, mCellObject_Col, &pos);
+ if (cell) {
+ mCellObject_Pos = (mork_u2)pos;
+ mCellObject_Cell = cell;
+ mCellObject_RowSeed = row->mRow_Seed;
+ } else {
+ mCellObject_Cell = 0;
+ this->MissingRowColumnError(ev);
+ }
+ return ev->Good();
+}
+
+morkAtom* morkCellObject::GetCellAtom(morkEnv* ev) const {
+ morkCell* cell = mCellObject_Cell;
+ if (cell)
+ return cell->GetAtom();
+ else
+ this->NilCellError(ev);
+
+ return (morkAtom*)0;
+}
+
+/*static*/ void morkCellObject::WrongRowObjectRowError(morkEnv* ev) {
+ ev->NewError("mCellObject_Row != mCellObject_RowObject->mRowObject_Row");
+}
+
+/*static*/ void morkCellObject::NilRowError(morkEnv* ev) {
+ ev->NewError("nil mCellObject_Row");
+}
+
+/*static*/ void morkCellObject::NilRowObjectError(morkEnv* ev) {
+ ev->NewError("nil mCellObject_RowObject");
+}
+
+/*static*/ void morkCellObject::NilCellError(morkEnv* ev) {
+ ev->NewError("nil mCellObject_Cell");
+}
+
+/*static*/ void morkCellObject::NonCellObjectTypeError(morkEnv* ev) {
+ ev->NewError("non morkCellObject");
+}
+
+/*static*/ void morkCellObject::MissingRowColumnError(morkEnv* ev) {
+ ev->NewError("mCellObject_Col not in mCellObject_Row");
+}
+
+nsIMdbCell* morkCellObject::AcquireCellHandle(morkEnv* ev) {
+ nsIMdbCell* outCell = this;
+ NS_ADDREF(outCell);
+ return outCell;
+}
+
+morkEnv* morkCellObject::CanUseCell(nsIMdbEnv* mev, mork_bool inMutable,
+ nsresult* outErr, morkCell** outCell) {
+ morkEnv* outEnv = 0;
+ morkCell* cell = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (IsCellObject()) {
+ if (IsMutable() || !inMutable) {
+ morkRowObject* rowObj = mCellObject_RowObject;
+ if (rowObj) {
+ morkRow* row = mCellObject_Row;
+ if (row) {
+ if (rowObj->mRowObject_Row == row) {
+ mork_u2 oldSeed = mCellObject_RowSeed;
+ if (row->mRow_Seed == oldSeed || ResyncWithRow(ev)) {
+ cell = mCellObject_Cell;
+ if (cell) {
+ outEnv = ev;
+ } else
+ NilCellError(ev);
+ }
+ } else
+ WrongRowObjectRowError(ev);
+ } else
+ NilRowError(ev);
+ } else
+ NilRowObjectError(ev);
+ } else
+ NonMutableNodeError(ev);
+ } else
+ NonCellObjectTypeError(ev);
+ }
+ *outErr = ev->AsErr();
+ MORK_ASSERT(outEnv);
+ *outCell = cell;
+
+ return outEnv;
+}
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP morkCellObject::SetBlob(nsIMdbEnv* /* mev */,
+ nsIMdbBlob* /* ioBlob */) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+} // reads inBlob slots
+
+// when inBlob is in the same suite, this might be fastest cell-to-cell
+
+NS_IMETHODIMP
+morkCellObject::ClearBlob( // make empty (so content has zero length)
+ nsIMdbEnv* /* mev */) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ // remember row->MaybeDirtySpaceStoreAndRow();
+}
+// clearing a yarn is like SetYarn() with empty yarn instance content
+
+NS_IMETHODIMP morkCellObject::GetBlobFill(nsIMdbEnv* mev, mdb_fill* outFill)
+// Same value that would be put into mYarn_Fill, if one called GetYarn()
+// with a yarn instance that had mYarn_Buf==nil and mYarn_Size==0.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+} // size of blob
+
+NS_IMETHODIMP morkCellObject::SetYarn(nsIMdbEnv* mev, const mdbYarn* inYarn) {
+ nsresult outErr = NS_OK;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ morkRow* row = mCellObject_Row;
+ if (row) {
+ morkStore* store = row->GetRowSpaceStore(ev);
+ if (store) {
+ cell->SetYarn(ev, inYarn, store);
+ if (row->IsRowClean() && store->mStore_CanDirty)
+ row->MaybeDirtySpaceStoreAndRow();
+ }
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+} // reads from yarn slots
+// make this text object contain content from the yarn's buffer
+
+NS_IMETHODIMP morkCellObject::GetYarn(nsIMdbEnv* mev, mdbYarn* outYarn) {
+ nsresult outErr = NS_OK;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ morkAtom* atom = cell->GetAtom();
+ morkAtom::GetYarn(atom, outYarn);
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+} // writes some yarn slots
+// copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+
+NS_IMETHODIMP morkCellObject::AliasYarn(nsIMdbEnv* mev, mdbYarn* outYarn) {
+ nsresult outErr = NS_OK;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ morkAtom* atom = cell->GetAtom();
+ morkAtom::AliasYarn(atom, outYarn);
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+} // writes ALL yarn slots
+
+// } ----- end attribute methods -----
+
+// } ===== end nsIMdbBlob methods =====
+
+// { ===== begin nsIMdbCell methods =====
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP morkCellObject::SetColumn(nsIMdbEnv* mev, mdb_column inColumn) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ // remember row->MaybeDirtySpaceStoreAndRow();
+}
+
+NS_IMETHODIMP morkCellObject::GetColumn(nsIMdbEnv* mev, mdb_column* outColumn) {
+ nsresult outErr = NS_OK;
+ mdb_column col = 0;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ col = mCellObject_Col;
+ outErr = ev->AsErr();
+ }
+ if (outColumn) *outColumn = col;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkCellObject::GetCellInfo( // all cell metainfo except actual content
+ nsIMdbEnv* mev,
+ mdb_column* outColumn, // the column in the containing row
+ mdb_fill* outBlobFill, // the size of text content in bytes
+ mdbOid* outChildOid, // oid of possible row or table child
+ mdb_bool* outIsRowChild) // nonzero if child, and a row child
+// Checking all cell metainfo is a good way to avoid forcing a large cell
+// in to memory when you don't actually want to use the content.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP morkCellObject::GetRow(
+ nsIMdbEnv* mev, // parent row for this cell
+ nsIMdbRow** acqRow) {
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ outRow = mCellObject_RowObject->AcquireRowHandle(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP morkCellObject::GetPort(nsIMdbEnv* mev, // port containing cell
+ nsIMdbPort** acqPort) {
+ nsresult outErr = NS_OK;
+ nsIMdbPort* outPort = 0;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ if (mCellObject_Row) {
+ morkStore* store = mCellObject_Row->GetRowSpaceStore(ev);
+ if (store) outPort = store->AcquireStoreHandle(ev);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqPort) *acqPort = outPort;
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin children methods -----
+NS_IMETHODIMP
+morkCellObject::HasAnyChild( // does cell have a child instead of text?
+ nsIMdbEnv* mev,
+ mdbOid* outOid, // out id of row or table (or unbound if no child)
+ mdb_bool* outIsRow) // nonzero if child is a row (rather than a table)
+{
+ nsresult outErr = NS_OK;
+ mdb_bool isRow = morkBool_kFalse;
+ outOid->mOid_Scope = 0;
+ outOid->mOid_Id = morkId_kMinusOne;
+ morkCell* cell = 0;
+ morkEnv* ev =
+ this->CanUseCell(mev, /*inMutable*/ morkBool_kTrue, &outErr, &cell);
+ if (ev) {
+ morkAtom* atom = GetCellAtom(ev);
+ if (atom) {
+ isRow = atom->IsRowOid();
+ if (isRow || atom->IsTableOid())
+ *outOid = ((morkOidAtom*)atom)->mOidAtom_Oid;
+ }
+
+ outErr = ev->AsErr();
+ }
+ if (outIsRow) *outIsRow = isRow;
+
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkCellObject::GetAnyChild( // access table of specific attribute
+ nsIMdbEnv* mev, // context
+ nsIMdbRow** acqRow, // child row (or null)
+ nsIMdbTable** acqTable) // child table (or null)
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCellObject::SetChildRow( // access table of specific attribute
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+} // inRow must be bound inside this same db port
+
+NS_IMETHODIMP morkCellObject::GetChildRow( // access row of specific attribute
+ nsIMdbEnv* mev, // context
+ nsIMdbRow** acqRow) // acquire child row (or nil if no child)
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCellObject::SetChildTable( // access table of specific attribute
+ nsIMdbEnv* mev, // context
+ nsIMdbTable* inTable) // table must be bound inside this same db port
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ // remember row->MaybeDirtySpaceStoreAndRow();
+}
+
+NS_IMETHODIMP
+morkCellObject::GetChildTable( // access table of specific attribute
+ nsIMdbEnv* mev, // context
+ nsIMdbTable** acqTable) // acquire child tabdle (or nil if no chil)
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end children methods -----
+
+// } ===== end nsIMdbCell methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkCellObject.h b/comm/mailnews/db/mork/morkCellObject.h
new file mode 100644
index 0000000000..1ef8718ab2
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCellObject.h
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKCELLOBJECT_
+#define _MORKCELLOBJECT_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kCellObject /*i*/ 0x634F /* ascii 'cO' */
+
+class morkCellObject : public morkObject,
+ public nsIMdbCell { // blob attribute in column scope
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+
+ morkRowObject* mCellObject_RowObject; // strong ref to row's object
+ morkRow* mCellObject_Row; // cell's row if still in row object
+ morkCell* mCellObject_Cell; // cell in row if rowseed matches
+ mork_column mCellObject_Col; // col of cell last living in pos
+ mork_u2 mCellObject_RowSeed; // copy of row's seed
+ mork_u2 mCellObject_Pos; // position of cell in row
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseCellObject() only if open
+
+ public: // morkCellObject construction & destruction
+ morkCellObject(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkRow* ioRow, morkCell* ioCell, mork_column inCol,
+ mork_pos inPos);
+ void CloseCellObject(morkEnv* ev); // called by CloseMorkNode();
+
+ NS_IMETHOD SetBlob(nsIMdbEnv* ev,
+ nsIMdbBlob* ioBlob) override; // reads inBlob slots
+ // when inBlob is in the same suite, this might be fastest cell-to-cell
+
+ NS_IMETHOD ClearBlob( // make empty (so content has zero length)
+ nsIMdbEnv* ev) override;
+ // clearing a yarn is like SetYarn() with empty yarn instance content
+
+ NS_IMETHOD GetBlobFill(nsIMdbEnv* ev,
+ mdb_fill* outFill) override; // size of blob
+ // Same value that would be put into mYarn_Fill, if one called GetYarn()
+ // with a yarn instance that had mYarn_Buf==nil and mYarn_Size==0.
+
+ NS_IMETHOD SetYarn(nsIMdbEnv* ev,
+ const mdbYarn* inYarn) override; // reads from yarn slots
+ // make this text object contain content from the yarn's buffer
+
+ NS_IMETHOD GetYarn(nsIMdbEnv* ev,
+ mdbYarn* outYarn) override; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+
+ NS_IMETHOD AliasYarn(nsIMdbEnv* ev,
+ mdbYarn* outYarn) override; // writes ALL yarn slots
+ NS_IMETHOD SetColumn(nsIMdbEnv* ev, mdb_column inColumn) override;
+ NS_IMETHOD GetColumn(nsIMdbEnv* ev, mdb_column* outColumn) override;
+
+ NS_IMETHOD GetCellInfo( // all cell metainfo except actual content
+ nsIMdbEnv* ev,
+ mdb_column* outColumn, // the column in the containing row
+ mdb_fill* outBlobFill, // the size of text content in bytes
+ mdbOid* outChildOid, // oid of possible row or table child
+ mdb_bool* outIsRowChild) override; // nonzero if child, and a row child
+
+ // Checking all cell metainfo is a good way to avoid forcing a large cell
+ // in to memory when you don't actually want to use the content.
+
+ NS_IMETHOD GetRow(nsIMdbEnv* ev, // parent row for this cell
+ nsIMdbRow** acqRow) override;
+ NS_IMETHOD GetPort(nsIMdbEnv* ev, // port containing cell
+ nsIMdbPort** acqPort) override;
+ // } ----- end attribute methods -----
+
+ // { ----- begin children methods -----
+ NS_IMETHOD HasAnyChild( // does cell have a child instead of text?
+ nsIMdbEnv* ev,
+ mdbOid* outOid, // out id of row or table (or unbound if no child)
+ mdb_bool* outIsRow)
+ override; // nonzero if child is a row (rather than a table)
+
+ NS_IMETHOD GetAnyChild( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // child row (or null)
+ nsIMdbTable** acqTable) override; // child table (or null)
+
+ NS_IMETHOD SetChildRow( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow)
+ override; // inRow must be bound inside this same db port
+
+ NS_IMETHOD GetChildRow( // access row of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow) override; // acquire child row (or nil if no child)
+
+ NS_IMETHOD SetChildTable( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbTable* inTable)
+ override; // table must be bound inside this same db port
+
+ NS_IMETHOD GetChildTable( // access table of specific attribute
+ nsIMdbEnv* ev, // context
+ nsIMdbTable** acqTable)
+ override; // acquire child table (or nil if no child)
+
+ // } ----- end children methods -----
+
+ // } ===== end nsIMdbCell methods =====
+ private: // copying is not allowed
+ virtual ~morkCellObject(); // assert that CloseCellObject() executed earlier
+ morkCellObject(const morkCellObject& other);
+ morkCellObject& operator=(const morkCellObject& other);
+
+ public: // dynamic type identification
+ mork_bool IsCellObject() const {
+ return IsNode() && mNode_Derived == morkDerived_kCellObject;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // other cell node methods
+ morkEnv* CanUseCell(nsIMdbEnv* mev, mork_bool inMutable, nsresult* outErr,
+ morkCell** outCell);
+
+ mork_bool ResyncWithRow(morkEnv* ev); // return ev->Good()
+ morkAtom* GetCellAtom(morkEnv* ev) const;
+
+ static void MissingRowColumnError(morkEnv* ev);
+ static void NilRowError(morkEnv* ev);
+ static void NilCellError(morkEnv* ev);
+ static void NilRowObjectError(morkEnv* ev);
+ static void WrongRowObjectRowError(morkEnv* ev);
+ static void NonCellObjectTypeError(morkEnv* ev);
+
+ nsIMdbCell* AcquireCellHandle(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakCellObject(morkCellObject* me, morkEnv* ev,
+ morkCellObject** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongCellObject(morkCellObject* me, morkEnv* ev,
+ morkCellObject** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKCELLOBJECT_ */
diff --git a/comm/mailnews/db/mork/morkCh.cpp b/comm/mailnews/db/mork/morkCh.cpp
new file mode 100644
index 0000000000..334d9c689c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCh.cpp
@@ -0,0 +1,344 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCH_
+# include "morkCh.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/* this byte char predicate source file derives from public domain Mithril */
+/* (that means much of this has a copyright dedicated to the public domain) */
+
+/*============================================================================*/
+/* morkCh_Type */
+
+const mork_flags morkCh_Type[] = /* derives from public domain Mithril table */
+ {
+ 0, /* 0x0 */
+ 0, /* 0x1 */
+ 0, /* 0x2 */
+ 0, /* 0x3 */
+ 0, /* 0x4 */
+ 0, /* 0x5 */
+ 0, /* 0x6 */
+ 0, /* 0x7 */
+ morkCh_kW, /* 0x8 backspace */
+ morkCh_kW, /* 0x9 tab */
+ morkCh_kW, /* 0xA linefeed */
+ 0, /* 0xB */
+ morkCh_kW, /* 0xC page */
+ morkCh_kW, /* 0xD return */
+ 0, /* 0xE */
+ 0, /* 0xF */
+ 0, /* 0x10 */
+ 0, /* 0x11 */
+ 0, /* 0x12 */
+ 0, /* 0x13 */
+ 0, /* 0x14 */
+ 0, /* 0x15 */
+ 0, /* 0x16 */
+ 0, /* 0x17 */
+ 0, /* 0x18 */
+ 0, /* 0x19 */
+ 0, /* 0x1A */
+ 0, /* 0x1B */
+ 0, /* 0x1C */
+ 0, /* 0x1D */
+ 0, /* 0x1E */
+ 0, /* 0x1F */
+
+ morkCh_kV | morkCh_kW, /* 0x20 space */
+ morkCh_kV | morkCh_kM, /* 0x21 ! */
+ morkCh_kV, /* 0x22 " */
+ morkCh_kV, /* 0x23 # */
+ 0, /* 0x24 $ cannot be kV because needs escape */
+ morkCh_kV, /* 0x25 % */
+ morkCh_kV, /* 0x26 & */
+ morkCh_kV, /* 0x27 ' */
+ morkCh_kV, /* 0x28 ( */
+ 0, /* 0x29 ) cannot be kV because needs escape */
+ morkCh_kV, /* 0x2A * */
+ morkCh_kV | morkCh_kM, /* 0x2B + */
+ morkCh_kV, /* 0x2C , */
+ morkCh_kV | morkCh_kM, /* 0x2D - */
+ morkCh_kV, /* 0x2E . */
+ morkCh_kV, /* 0x2F / */
+
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x30 0 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x31 1 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x32 2 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x33 3 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x34 4 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x35 5 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x36 6 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x37 7 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x38 8 */
+ morkCh_kV | morkCh_kD | morkCh_kX, /* 0x39 9 */
+ morkCh_kV | morkCh_kN | morkCh_kM, /* 0x3A : */
+ morkCh_kV, /* 0x3B ; */
+ morkCh_kV, /* 0x3C < */
+ morkCh_kV, /* 0x3D = */
+ morkCh_kV, /* 0x3E > */
+ morkCh_kV | morkCh_kM, /* 0x3F ? */
+
+ morkCh_kV, /* 0x40 @ */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x41 A */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x42 B */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x43 C */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x44 D */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x45 E */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU | morkCh_kX, /* 0x46 F */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x47 G */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x48 H */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x49 I */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4A J */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4B K */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4C L */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4D M */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4E N */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x4F O */
+
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x50 P */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x51 Q */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x52 R */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x53 S */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x54 T */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x55 U */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x56 V */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x57 W */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x58 X */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x59 Y */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kU, /* 0x5A Z */
+ morkCh_kV, /* 0x5B [ */
+ 0, /* 0x5C \ cannot be kV because needs escape */
+ morkCh_kV, /* 0x5D ] */
+ morkCh_kV, /* 0x5E ^ */
+ morkCh_kV | morkCh_kN | morkCh_kM, /* 0x5F _ */
+
+ morkCh_kV, /* 0x60 ` */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x61 a */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x62 b */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x63 c */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x64 d */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x65 e */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL | morkCh_kX, /* 0x66 f */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x67 g */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x68 h */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x69 i */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6A j */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6B k */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6C l */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6D m */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6E n */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x6F o */
+
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x70 p */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x71 q */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x72 r */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x73 s */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x74 t */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x75 u */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x76 v */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x77 w */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x78 x */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x79 y */
+ morkCh_kV | morkCh_kN | morkCh_kM | morkCh_kL, /* 0x7A z */
+ morkCh_kV, /* 0x7B { */
+ morkCh_kV, /* 0x7C | */
+ morkCh_kV, /* 0x7D } */
+ morkCh_kV, /* 0x7E ~ */
+ morkCh_kW, /* 0x7F rubout */
+
+ /* $"80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+
+ /* $"F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF" */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkCh.h b/comm/mailnews/db/mork/morkCh.h
new file mode 100644
index 0000000000..a3fc155a4d
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCh.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORKCH_
+# define _MORKCH_ 1
+
+# ifndef _MORK_
+# include "mork.h"
+# endif
+
+/* this byte char predicate header file derives from public domain Mithril */
+/* (that means much of this has a copyright dedicated to the public domain) */
+
+/* Use all 8 pred bits; lose some pred bits only if we need to reuse them. */
+
+/* ch pred bits: W:white D:digit V:value U:upper L:lower N:name M:more */
+# define morkCh_kW (1 << 0)
+# define morkCh_kD (1 << 1)
+# define morkCh_kV (1 << 2)
+# define morkCh_kU (1 << 3)
+# define morkCh_kL (1 << 4)
+# define morkCh_kX (1 << 5)
+# define morkCh_kN (1 << 6)
+# define morkCh_kM (1 << 7)
+
+extern const mork_flags morkCh_Type[]; /* 256 byte predicate bits ch map */
+
+/* is a numeric decimal digit: (note memory access might be slower) */
+/* define morkCh_IsDigit(c) ( morkCh_Type[ (mork_ch)(c) ] & morkCh_kD ) */
+# define morkCh_IsDigit(c) (((mork_ch)c) >= '0' && ((mork_ch)c) <= '9')
+
+/* is a numeric octal digit: */
+# define morkCh_IsOctal(c) (((mork_ch)c) >= '0' && ((mork_ch)c) <= '7')
+
+/* is a numeric hexadecimal digit: */
+# define morkCh_IsHex(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kX)
+
+/* is value (can be printed in Mork value without needing hex or escape): */
+# define morkCh_IsValue(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kV)
+
+/* is white space : */
+# define morkCh_IsWhite(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kW)
+
+/* is name (can start a Mork name): */
+# define morkCh_IsName(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kN)
+
+/* is name (can continue a Mork name): */
+# define morkCh_IsMore(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kM)
+
+/* is alphabetic upper or lower case */
+# define morkCh_IsAlpha(c) \
+ (morkCh_Type[(mork_ch)(c)] & (morkCh_kL | morkCh_kU))
+
+/* is alphanumeric, including lower case, upper case, and digits */
+# define morkCh_IsAlphaNum(c) \
+ (morkCh_Type[(mork_ch)(c)] & (morkCh_kL | morkCh_kU | morkCh_kD))
+
+/* ````` repeated testing of predicate bits in single flag byte ````` */
+
+# define morkCh_GetFlags(c) (morkCh_Type[(mork_ch)(c)])
+
+# define morkFlags_IsDigit(f) ((f)&morkCh_kD)
+# define morkFlags_IsHex(f) ((f)&morkCh_kX)
+# define morkFlags_IsValue(f) ((f)&morkCh_kV)
+# define morkFlags_IsWhite(f) ((f)&morkCh_kW)
+# define morkFlags_IsName(f) ((f)&morkCh_kN)
+# define morkFlags_IsMore(f) ((f)&morkCh_kM)
+# define morkFlags_IsAlpha(f) ((f) & (morkCh_kL | morkCh_kU))
+# define morkFlags_IsAlphaNum(f) ((f) & (morkCh_kL | morkCh_kU | morkCh_kD))
+
+# define morkFlags_IsUpper(f) ((f)&morkCh_kU)
+# define morkFlags_IsLower(f) ((f)&morkCh_kL)
+
+/* ````` character case (e.g. for case insensitive operations) ````` */
+
+# define morkCh_IsAscii(c) (((mork_u1)c) <= 0x7F)
+# define morkCh_IsSevenBitChar(c) (((mork_u1)c) <= 0x7F)
+
+/* ````` character case (e.g. for case insensitive operations) ````` */
+
+# define morkCh_ToLower(c) ((c) - 'A' + 'a')
+# define morkCh_ToUpper(c) ((c) - 'a' + 'A')
+
+/* extern int morkCh_IsUpper (int c); */
+# define morkCh_IsUpper(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kU)
+
+/* extern int morkCh_IsLower (int c); */
+# define morkCh_IsLower(c) (morkCh_Type[(mork_ch)(c)] & morkCh_kL)
+
+#endif
+/* _MORKCH_ */
diff --git a/comm/mailnews/db/mork/morkConfig.cpp b/comm/mailnews/db/mork/morkConfig.cpp
new file mode 100644
index 0000000000..a02cecead9
--- /dev/null
+++ b/comm/mailnews/db/mork/morkConfig.cpp
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCONFIG_
+# include "morkConfig.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+void mork_assertion_signal(const char* inMessage) { NS_ERROR(inMessage); }
+
+#ifdef MORK_PROVIDE_STDLIB
+
+MORK_LIB_IMPL(mork_i4)
+mork_memcmp(const void* inOne, const void* inTwo, mork_size inSize) {
+ const mork_u1* t = (const mork_u1*)inTwo;
+ const mork_u1* s = (const mork_u1*)inOne;
+ const mork_u1* end = s + inSize;
+ mork_i4 delta;
+
+ while (s < end) {
+ delta = ((mork_i4)*s) - ((mork_i4)*t);
+ if (delta)
+ return delta;
+ else {
+ ++t;
+ ++s;
+ }
+ }
+ return 0;
+}
+
+MORK_LIB_IMPL(void)
+mork_memcpy(void* outDst, const void* inSrc, mork_size inSize) {
+ mork_u1* d = (mork_u1*)outDst;
+ mork_u1* end = d + inSize;
+ const mork_u1* s = ((const mork_u1*)inSrc);
+
+ while (inSize >= 8) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+
+ inSize -= 8;
+ }
+
+ while (d < end) *d++ = *s++;
+}
+
+MORK_LIB_IMPL(void)
+mork_memmove(void* outDst, const void* inSrc, mork_size inSize) {
+ mork_u1* d = (mork_u1*)outDst;
+ const mork_u1* s = (const mork_u1*)inSrc;
+ if (d != s && inSize) // copy is necessary?
+ {
+ const mork_u1* srcEnd = s + inSize; // one past last source byte
+
+ if (d > s && d < srcEnd) // overlap? need to copy backwards?
+ {
+ s = srcEnd; // start one past last source byte
+ d += inSize; // start one past last dest byte
+ mork_u1* dstBegin = d; // last byte to write is first in dest range
+ while (d - dstBegin >= 8) {
+ *--d = *--s;
+ *--d = *--s;
+ *--d = *--s;
+ *--d = *--s;
+
+ *--d = *--s;
+ *--d = *--s;
+ *--d = *--s;
+ *--d = *--s;
+ }
+ while (d > dstBegin) *--d = *--s;
+ } else // can copy forwards without any overlap
+ {
+ mork_u1* dstEnd = d + inSize;
+ while (dstEnd - d >= 8) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+ while (d < dstEnd) *d++ = *s++;
+ }
+ }
+}
+
+MORK_LIB_IMPL(void)
+mork_memset(void* outDst, int inByte, mork_size inSize) {
+ mork_u1* d = (mork_u1*)outDst;
+ mork_u1* end = d + inSize;
+ while (d < end) *d++ = (mork_u1)inByte;
+}
+
+MORK_LIB_IMPL(void)
+mork_strcpy(void* outDst, const void* inSrc) {
+ // back up one first to support preincrement
+ mork_u1* d = ((mork_u1*)outDst) - 1;
+ const mork_u1* s = ((const mork_u1*)inSrc) - 1;
+ while ((*++d = *++s) != 0)
+ ; /* empty */
+}
+
+MORK_LIB_IMPL(mork_i4)
+mork_strcmp(const void* inOne, const void* inTwo) {
+ const mork_u1* t = (const mork_u1*)inTwo;
+ const mork_u1* s = ((const mork_u1*)inOne);
+ mork_i4 a;
+ mork_i4 b;
+ mork_i4 delta;
+
+ do {
+ a = (mork_i4)*s++;
+ b = (mork_i4)*t++;
+ delta = a - b;
+ } while (!delta && a && b);
+
+ return delta;
+}
+
+MORK_LIB_IMPL(mork_i4)
+mork_strncmp(const void* inOne, const void* inTwo, mork_size inSize) {
+ const mork_u1* t = (const mork_u1*)inTwo;
+ const mork_u1* s = (const mork_u1*)inOne;
+ const mork_u1* end = s + inSize;
+ mork_i4 delta;
+ mork_i4 a;
+ mork_i4 b;
+
+ while (s < end) {
+ a = (mork_i4)*s++;
+ b = (mork_i4)*t++;
+ delta = a - b;
+ if (delta || !a || !b) return delta;
+ }
+ return 0;
+}
+
+MORK_LIB_IMPL(mork_size)
+mork_strlen(const void* inString) {
+ // back up one first to support preincrement
+ const mork_u1* s = ((const mork_u1*)inString) - 1;
+ while (*++s) /* preincrement is cheapest */
+ ; /* empty */
+
+ return s - ((const mork_u1*)inString); // distance from original address
+}
+
+#endif /*MORK_PROVIDE_STDLIB*/
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkConfig.h b/comm/mailnews/db/mork/morkConfig.h
new file mode 100644
index 0000000000..812641ee09
--- /dev/null
+++ b/comm/mailnews/db/mork/morkConfig.h
@@ -0,0 +1,170 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKCONFIG_
+#define _MORKCONFIG_ 1
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// { %%%%% begin debug mode options in Mork %%%%%
+#define MORK_DEBUG 1
+// } %%%%% end debug mode options in Mork %%%%%
+
+#ifdef MORK_DEBUG
+# define MORK_MAX_CODE_COMPILE 1
+#endif
+
+// { %%%%% begin platform defs peculiar to Mork %%%%%
+
+#ifdef XP_MACOSX
+# define MORK_MAC 1
+#endif
+
+#ifdef XP_WIN
+# define MORK_WIN 1
+#endif
+
+#ifdef XP_UNIX
+# define MORK_UNIX 1
+#endif
+
+// } %%%%% end platform defs peculiar to Mork %%%%%
+
+#if defined(MORK_WIN) || defined(MORK_UNIX) || defined(MORK_MAC)
+# include <stdio.h>
+# include <ctype.h>
+# include <errno.h>
+# include <string.h>
+# ifdef HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# ifdef HAVE_UNISTD_H
+# include <unistd.h> /* for SEEK_SET, SEEK_END */
+# endif
+
+# include "nsDebug.h"
+
+# define MORK_ISPRINT(c) isprint(c)
+
+# define MORK_FILETELL(file) ftell(file)
+# define MORK_FILESEEK(file, where, how) fseek(file, where, how)
+# define MORK_FILEREAD(outbuf, insize, file) fread(outbuf, 1, insize, file)
+# if defined(MORK_WIN)
+void mork_fileflush(FILE* file);
+# define MORK_FILEFLUSH(file) mork_fileflush(file)
+# else
+# define MORK_FILEFLUSH(file) fflush(file)
+# endif /*MORK_WIN*/
+
+# if defined(MORK_WIN)
+# define MORK_FILEOPEN(file, how) \
+ _wfopen(char16ptr_t(file), NS_ConvertASCIItoUTF16(how).get())
+# else
+# define MORK_FILEOPEN(file, how) fopen(file, how)
+# endif /*MORK_WIN*/
+# define MORK_FILECLOSE(file) fclose(file)
+#endif /*defined(MORK_WIN) || defined(MORK_UNIX) || defined(MORK_MAC)*/
+
+/* ===== separating switchable features ===== */
+
+#define MORK_ENABLE_ZONE_ARENAS 1 /* using morkZone for pooling */
+
+// #define MORK_ENABLE_PROBE_MAPS 1 /* use smaller hash tables */
+
+#define MORK_BEAD_OVER_NODE_MAPS 1 /* use bead not node maps */
+
+/* ===== pooling ===== */
+
+#if defined(HAVE_64BIT_BUILD)
+# define MORK_CONFIG_ALIGN_8 1 /* must have 8 byte alignment */
+#else
+# define MORK_CONFIG_PTR_SIZE_4 1 /* sizeof(void*) == 4 */
+#endif
+
+// #define MORK_DEBUG_HEAP_STATS 1 /* analyze per-block heap usage */
+
+/* ===== ===== ===== ===== line characters ===== ===== ===== ===== */
+#define mork_kCR 0x0D
+#define mork_kLF 0x0A
+#define mork_kVTAB '\013'
+#define mork_kFF '\014'
+#define mork_kTAB '\011'
+#define mork_kCRLF "\015\012" /* A CR LF equivalent string */
+
+#if defined(MORK_MAC)
+# define mork_kNewline "\015"
+# define mork_kNewlineSize 1
+#else
+# if defined(MORK_WIN)
+# define mork_kNewline "\015\012"
+# define mork_kNewlineSize 2
+# else
+# if defined(MORK_UNIX)
+# define mork_kNewline "\012"
+# define mork_kNewlineSize 1
+# endif /* MORK_UNIX */
+# endif /* MORK_WIN */
+#endif /* MORK_MAC */
+
+// { %%%%% begin assertion macro %%%%%
+extern void mork_assertion_signal(const char* inMessage);
+#define MORK_ASSERTION_SIGNAL(Y) mork_assertion_signal(Y)
+#define MORK_ASSERT(X) \
+ if (!(X)) MORK_ASSERTION_SIGNAL(#X)
+// } %%%%% end assertion macro %%%%%
+
+#define MORK_LIB(return) return /*API return declaration*/
+#define MORK_LIB_IMPL(return) return /*implementation return declaration*/
+
+// { %%%%% begin standard c utility methods %%%%%
+
+#if defined(MORK_WIN) || defined(MORK_UNIX) || defined(MORK_MAC)
+# define MORK_USE_C_STDLIB 1
+#endif /*MORK_WIN*/
+
+#ifdef MORK_USE_C_STDLIB
+# define MORK_MEMCMP(src1, src2, size) memcmp(src1, src2, size)
+# define MORK_MEMCPY(dest, src, size) memcpy(dest, src, size)
+# define MORK_MEMMOVE(dest, src, size) memmove(dest, src, size)
+# define MORK_MEMSET(dest, byte, size) memset(dest, byte, size)
+# if defined(MORK_WIN)
+# define MORK_STRCPY(dest, src) wcscpy(char16ptr_t(dest), char16ptr_t(src))
+# else
+# define MORK_STRCPY(dest, src) strcpy(dest, src)
+# endif /*MORK_WIN*/
+# define MORK_STRCMP(one, two) strcmp(one, two)
+# define MORK_STRNCMP(one, two, length) strncmp(one, two, length)
+# if defined(MORK_WIN)
+# define MORK_STRLEN(string) wcslen(char16ptr_t(string))
+# else
+# define MORK_STRLEN(string) strlen(string)
+# endif /*MORK_WIN*/
+#endif /*MORK_USE_C_STDLIB*/
+
+#ifdef MORK_PROVIDE_STDLIB
+MORK_LIB(mork_i4) mork_memcmp(const void* a, const void* b, mork_size inSize);
+MORK_LIB(void) mork_memcpy(void* dst, const void* src, mork_size inSize);
+MORK_LIB(void) mork_memmove(void* dst, const void* src, mork_size inSize);
+MORK_LIB(void) mork_memset(void* dst, int inByte, mork_size inSize);
+MORK_LIB(void) mork_strcpy(void* dst, const void* src);
+MORK_LIB(mork_i4) mork_strcmp(const void* a, const void* b);
+MORK_LIB(mork_i4) mork_strncmp(const void* a, const void* b, mork_size inSize);
+MORK_LIB(mork_size) mork_strlen(const void* inString);
+
+# define MORK_MEMCMP(src1, src2, size) mork_memcmp(src1, src2, size)
+# define MORK_MEMCPY(dest, src, size) mork_memcpy(dest, src, size)
+# define MORK_MEMMOVE(dest, src, size) mork_memmove(dest, src, size)
+# define MORK_MEMSET(dest, byte, size) mork_memset(dest, byte, size)
+# define MORK_STRCPY(dest, src) mork_strcpy(dest, src)
+# define MORK_STRCMP(one, two) mork_strcmp(one, two)
+# define MORK_STRNCMP(one, two, length) mork_strncmp(one, two, length)
+# define MORK_STRLEN(string) mork_strlen(string)
+#endif /*MORK_PROVIDE_STDLIB*/
+
+// } %%%%% end standard c utility methods %%%%%
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKCONFIG_ */
diff --git a/comm/mailnews/db/mork/morkCursor.cpp b/comm/mailnews/db/mork/morkCursor.cpp
new file mode 100644
index 0000000000..407aa9b3fb
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCursor.cpp
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkCursor::CloseMorkNode(
+ morkEnv* ev) // CloseCursor() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseCursor(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkCursor::~morkCursor() // assert CloseCursor() executed earlier
+{}
+
+/*public non-poly*/
+morkCursor::morkCursor(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mCursor_Seed(0),
+ mCursor_Pos(-1),
+ mCursor_DoFailOnSeedOutOfSync(morkBool_kFalse) {
+ if (ev->Good()) mNode_Derived = morkDerived_kCursor;
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkCursor, morkObject, nsIMdbCursor)
+
+/*public non-poly*/ void morkCursor::CloseCursor(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mCursor_Seed = 0;
+ mCursor_Pos = -1;
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// { ----- begin ref counting for well-behaved cyclic graphs -----
+NS_IMETHODIMP
+morkCursor::GetWeakRefCount(nsIMdbEnv* mev, // weak refs
+ mdb_count* outCount) {
+ *outCount = WeakRefsOnly();
+ return NS_OK;
+}
+NS_IMETHODIMP
+morkCursor::GetStrongRefCount(nsIMdbEnv* mev, // strong refs
+ mdb_count* outCount) {
+ *outCount = StrongRefsOnly();
+ return NS_OK;
+}
+// ### TODO - clean up this cast, if required
+NS_IMETHODIMP
+morkCursor::AddWeakRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::AddWeakRef((morkEnv*)mev));
+}
+
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkCursor::AddStrongRef(morkEnv* mev) { return morkNode::AddStrongRef(mev); }
+#endif
+
+NS_IMETHODIMP_(mork_uses)
+morkCursor::AddStrongRef(nsIMdbEnv* mev) {
+ return morkNode::AddStrongRef((morkEnv*)mev);
+}
+
+NS_IMETHODIMP
+morkCursor::CutWeakRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::CutWeakRef((morkEnv*)mev));
+}
+
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkCursor::CutStrongRef(morkEnv* mev) { return morkNode::CutStrongRef(mev); }
+#endif
+
+NS_IMETHODIMP
+morkCursor::CutStrongRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_uses to nsresult
+ return static_cast<nsresult>(morkNode::CutStrongRef((morkEnv*)mev));
+}
+
+NS_IMETHODIMP
+morkCursor::CloseMdbObject(nsIMdbEnv* mev) {
+ return morkNode::CloseMdbObject((morkEnv*)mev);
+}
+
+NS_IMETHODIMP
+morkCursor::IsOpenMdbObject(nsIMdbEnv* mev, mdb_bool* outOpen) {
+ *outOpen = IsOpenNode();
+ return NS_OK;
+}
+NS_IMETHODIMP
+morkCursor::IsFrozenMdbObject(nsIMdbEnv* mev, mdb_bool* outIsReadonly) {
+ *outIsReadonly = IsFrozen();
+ return NS_OK;
+}
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+NS_IMETHODIMP
+morkCursor::GetCount(nsIMdbEnv* mev, mdb_count* outCount) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCursor::GetSeed(nsIMdbEnv* mev, mdb_seed* outSeed) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCursor::SetPos(nsIMdbEnv* mev, mdb_pos inPos) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCursor::GetPos(nsIMdbEnv* mev, mdb_pos* outPos) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCursor::SetDoFailOnSeedOutOfSync(nsIMdbEnv* mev, mdb_bool inFail) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkCursor::GetDoFailOnSeedOutOfSync(nsIMdbEnv* mev, mdb_bool* outFail) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkCursor.h b/comm/mailnews/db/mork/morkCursor.h
new file mode 100644
index 0000000000..11c8ec8839
--- /dev/null
+++ b/comm/mailnews/db/mork/morkCursor.h
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKCURSOR_
+#define _MORKCURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kCursor /*i*/ 0x4375 /* ascii 'Cu' */
+
+class morkCursor : public morkObject,
+ public nsIMdbCursor { // collection iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD IsFrozenMdbObject(nsIMdbEnv* ev, mdb_bool* outIsReadonly) override;
+ // same as nsIMdbPort::GetIsPortReadonly() when this object is inside a port.
+ // } ----- end attribute methods -----
+
+ // { ----- begin ref counting for well-behaved cyclic graphs -----
+ NS_IMETHOD GetWeakRefCount(nsIMdbEnv* ev, // weak refs
+ mdb_count* outCount) override;
+ NS_IMETHOD GetStrongRefCount(nsIMdbEnv* ev, // strong refs
+ mdb_count* outCount) override;
+
+ NS_IMETHOD AddWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of AddStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) AddStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD_(mork_uses) AddStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CutWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of CutStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) CutStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD CutStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CloseMdbObject(
+ nsIMdbEnv* ev) override; // called at strong refs zero
+ NS_IMETHOD IsOpenMdbObject(nsIMdbEnv* ev, mdb_bool* outOpen) override;
+ // } ----- end ref counting -----
+
+ // } ===== end nsIMdbObject methods =====
+
+ // { ===== begin nsIMdbCursor methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetCount(nsIMdbEnv* ev, mdb_count* outCount) override; // readonly
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev, mdb_seed* outSeed) override; // readonly
+
+ NS_IMETHOD SetPos(nsIMdbEnv* ev, mdb_pos inPos) override; // mutable
+ NS_IMETHOD GetPos(nsIMdbEnv* ev, mdb_pos* outPos) override;
+
+ NS_IMETHOD SetDoFailOnSeedOutOfSync(nsIMdbEnv* ev, mdb_bool inFail) override;
+ NS_IMETHOD GetDoFailOnSeedOutOfSync(nsIMdbEnv* ev,
+ mdb_bool* outFail) override;
+ // } ----- end attribute methods -----
+
+ // } ===== end nsIMdbCursor methods =====
+
+ // } ----- end attribute methods -----
+
+ mork_seed mCursor_Seed;
+ mork_pos mCursor_Pos;
+ mork_bool mCursor_DoFailOnSeedOutOfSync;
+ mork_u1 mCursor_Pad[3]; // explicitly pad to u4 alignment
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseCursor() only if open
+
+ public: // morkCursor construction & destruction
+ morkCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap);
+ void CloseCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ protected:
+ virtual ~morkCursor(); // assert that CloseCursor() executed earlier
+
+ private: // copying is not allowed
+ morkCursor(const morkCursor& other);
+ morkCursor& operator=(const morkCursor& other);
+
+ public: // dynamic type identification
+ mork_bool IsCursor() const {
+ return IsNode() && mNode_Derived == morkDerived_kCursor;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // other cursor methods
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakCursor(morkCursor* me, morkEnv* ev, morkCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongCursor(morkCursor* me, morkEnv* ev,
+ morkCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkDeque.cpp b/comm/mailnews/db/mork/morkDeque.cpp
new file mode 100644
index 0000000000..7490aef84b
--- /dev/null
+++ b/comm/mailnews/db/mork/morkDeque.cpp
@@ -0,0 +1,246 @@
+/*************************************************************************
+This software is part of a public domain IronDoc source code distribution,
+and is provided on an "AS IS" basis, with all risks borne by the consumers
+or users of the IronDoc software. There are no warranties, guarantees, or
+promises about quality of any kind; and no remedies for failure exist.
+
+Permission is hereby granted to use this IronDoc software for any purpose
+at all, without need for written agreements, without royalty or license
+fees, and without fees or obligations of any other kind. Anyone can use,
+copy, change and distribute this software for any purpose, and nothing is
+required, implicitly or otherwise, in exchange for this usage.
+
+You cannot apply your own copyright to this software, but otherwise you
+are encouraged to enjoy the use of this software in any way you see fit.
+However, it would be rude to remove names of developers from the code.
+(IronDoc is also known by the short name "Fe" and a longer name "Ferrum",
+which are used interchangeably with the name IronDoc in the sources.)
+*************************************************************************/
+/*
+ * File: morkDeque.cpp
+ * Contains: Ferrum deque (double ended queue (linked list))
+ *
+ * Copied directly from public domain IronDoc, with minor naming tweaks:
+ * Designed and written by David McCusker, but all this code is public domain.
+ * There are no warranties, no guarantees, no promises, and no remedies.
+ */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+/*=============================================================================
+ * morkNext: linked list node for very simple, singly-linked list
+ */
+
+morkNext::morkNext() : mNext_Link(0) {}
+
+/*static*/ void* morkNext::MakeNewNext(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) {
+ void* next = 0;
+ ioHeap.Alloc(ev->AsMdbEnv(), inSize, (void**)&next);
+ if (!next) ev->OutOfMemoryError();
+
+ return next;
+}
+
+/*static*/
+void morkNext::ZapOldNext(morkEnv* ev, nsIMdbHeap* ioHeap) {
+ if (ioHeap) {
+ ioHeap->Free(ev->AsMdbEnv(), this);
+ } else
+ ev->NilPointerError();
+}
+
+/*=============================================================================
+ * morkList: simple, singly-linked list
+ */
+
+morkList::morkList() : mList_Head(0), mList_Tail(0) {}
+
+void morkList::CutAndZapAllListMembers(morkEnv* ev, nsIMdbHeap* ioHeap)
+// make empty list, zapping every member by calling ZapOldNext()
+{
+ if (ioHeap) {
+ morkNext* next = 0;
+ while ((next = this->PopHead()) != 0) next->ZapOldNext(ev, ioHeap);
+
+ mList_Head = 0;
+ mList_Tail = 0;
+ } else
+ ev->NilPointerError();
+}
+
+void morkList::CutAllListMembers()
+// just make list empty, dropping members without zapping
+{
+ while (this->PopHead())
+ ; /* empty */
+
+ mList_Head = 0;
+ mList_Tail = 0;
+}
+
+morkNext* morkList::PopHead() // cut head of list
+{
+ morkNext* outHead = mList_Head;
+ if (outHead) // anything to cut from list?
+ {
+ morkNext* next = outHead->mNext_Link;
+ mList_Head = next;
+ if (!next) // cut the last member, so tail no longer exists?
+ mList_Tail = 0;
+
+ outHead->mNext_Link = 0; // nil outgoing node link; unnecessary, but tidy
+ }
+ return outHead;
+}
+
+void morkList::PushHead(morkNext* ioLink) // add to head of list
+{
+ morkNext* head = mList_Head; // old head of list
+ morkNext* tail = mList_Tail; // old tail of list
+
+ MORK_ASSERT((head && tail) || (!head && !tail));
+
+ ioLink->mNext_Link = head; // make old head follow the new link
+ if (!head) // list was previously empty?
+ mList_Tail = ioLink; // head is also tail for first member added
+
+ mList_Head = ioLink; // head of list is the new link
+}
+
+void morkList::PushTail(morkNext* ioLink) // add to tail of list
+{
+ morkNext* head = mList_Head; // old head of list
+ morkNext* tail = mList_Tail; // old tail of list
+
+ MORK_ASSERT((head && tail) || (!head && !tail));
+
+ ioLink->mNext_Link = 0;
+ if (tail) {
+ tail->mNext_Link = ioLink;
+ mList_Tail = ioLink;
+ } else // list was previously empty?
+ mList_Head = mList_Tail =
+ ioLink; // tail is also head for first member added
+}
+
+/*=============================================================================
+ * morkLink: linked list node embedded in objs to allow insertion in morkDeques
+ */
+
+morkLink::morkLink() : mLink_Next(0), mLink_Prev(0) {}
+
+/*static*/ void* morkLink::MakeNewLink(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) {
+ void* alink = 0;
+ ioHeap.Alloc(ev->AsMdbEnv(), inSize, (void**)&alink);
+ if (!alink) ev->OutOfMemoryError();
+
+ return alink;
+}
+
+/*static*/
+void morkLink::ZapOldLink(morkEnv* ev, nsIMdbHeap* ioHeap) {
+ if (ioHeap) {
+ ioHeap->Free(ev->AsMdbEnv(), this);
+ } else
+ ev->NilPointerError();
+}
+
+/*=============================================================================
+ * morkDeque: doubly linked list modeled after VAX queue instructions
+ */
+
+morkDeque::morkDeque() { mDeque_Head.SelfRefer(); }
+
+/*| RemoveFirst:
+|*/
+morkLink* morkDeque::RemoveFirst() /*i*/
+{
+ morkLink* alink = mDeque_Head.mLink_Next;
+ if (alink != &mDeque_Head) {
+ (mDeque_Head.mLink_Next = alink->mLink_Next)->mLink_Prev = &mDeque_Head;
+ return alink;
+ }
+ return (morkLink*)0;
+}
+
+/*| RemoveLast:
+|*/
+morkLink* morkDeque::RemoveLast() /*i*/
+{
+ morkLink* alink = mDeque_Head.mLink_Prev;
+ if (alink != &mDeque_Head) {
+ (mDeque_Head.mLink_Prev = alink->mLink_Prev)->mLink_Next = &mDeque_Head;
+ return alink;
+ }
+ return (morkLink*)0;
+}
+
+/*| At:
+|*/
+morkLink* morkDeque::At(mork_pos index) const /*i*/
+/* indexes are one based (and not zero based) */
+{
+ mork_num count = 0;
+ morkLink* alink;
+ for (alink = this->First(); alink; alink = this->After(alink)) {
+ if (++count == (mork_num)index) break;
+ }
+ return alink;
+}
+
+/*| IndexOf:
+|*/
+mork_pos morkDeque::IndexOf(const morkLink* member) const /*i*/
+/* indexes are one based (and not zero based) */
+/* zero means member is not in deque */
+{
+ mork_num count = 0;
+ const morkLink* alink;
+ for (alink = this->First(); alink; alink = this->After(alink)) {
+ ++count;
+ if (member == alink) return (mork_pos)count;
+ }
+ return 0;
+}
+
+/*| Length:
+|*/
+mork_num morkDeque::Length() const /*i*/
+{
+ mork_num count = 0;
+ morkLink* alink;
+ for (alink = this->First(); alink; alink = this->After(alink)) ++count;
+ return count;
+}
+
+/*| LengthCompare:
+|*/
+int morkDeque::LengthCompare(mork_num c) const /*i*/
+{
+ mork_num count = 0;
+ const morkLink* alink;
+ for (alink = this->First(); alink; alink = this->After(alink)) {
+ if (++count > c) return 1;
+ }
+ return (count == c) ? 0 : -1;
+}
diff --git a/comm/mailnews/db/mork/morkDeque.h b/comm/mailnews/db/mork/morkDeque.h
new file mode 100644
index 0000000000..54e5080254
--- /dev/null
+++ b/comm/mailnews/db/mork/morkDeque.h
@@ -0,0 +1,244 @@
+/*************************************************************************
+This software is part of a public domain IronDoc source code distribution,
+and is provided on an "AS IS" basis, with all risks borne by the consumers
+or users of the IronDoc software. There are no warranties, guarantees, or
+promises about quality of any kind; and no remedies for failure exist.
+
+Permission is hereby granted to use this IronDoc software for any purpose
+at all, without need for written agreements, without royalty or license
+fees, and without fees or obligations of any other kind. Anyone can use,
+copy, change and distribute this software for any purpose, and nothing is
+required, implicitly or otherwise, in exchange for this usage.
+
+You cannot apply your own copyright to this software, but otherwise you
+are encouraged to enjoy the use of this software in any way you see fit.
+However, it would be rude to remove names of developers from the code.
+(IronDoc is also known by the short name "Fe" and a longer name "Ferrum",
+which are used interchangeably with the name IronDoc in the sources.)
+*************************************************************************/
+/*
+ * File: morkDeque.h
+ * Contains: Ferrum deque (double ended queue (linked list))
+ *
+ * Copied directly from public domain IronDoc, with minor naming tweaks:
+ * Designed and written by David McCusker, but all this code is public domain.
+ * There are no warranties, no guarantees, no promises, and no remedies.
+ */
+
+#ifndef _MORKDEQUE_
+#define _MORKDEQUE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+/*=============================================================================
+ * morkNext: linked list node for very simple, singly-linked list
+ */
+
+class morkNext /*d*/ {
+ public:
+ morkNext* mNext_Link;
+
+ public:
+ explicit morkNext(int inZero) : mNext_Link(0) {}
+
+ explicit morkNext(morkNext* ioLink) : mNext_Link(ioLink) {}
+
+ morkNext(); // mNext_Link( 0 ), { }
+
+ public:
+ morkNext* GetNextLink() const { return mNext_Link; }
+
+ public: // link memory management methods
+ static void* MakeNewNext(size_t inSize, nsIMdbHeap& ioHeap, morkEnv* ev);
+ void ZapOldNext(morkEnv* ev, nsIMdbHeap* ioHeap);
+
+ public: // link memory management operators
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkNext::MakeNewNext(inSize, ioHeap, ev);
+ }
+
+ void operator delete(void* ioAddress) // DO NOT CALL THIS, hope to crash:
+ {
+ ((morkNext*)0)->ZapOldNext((morkEnv*)0, (nsIMdbHeap*)0);
+ } // boom
+};
+
+/*=============================================================================
+ * morkList: simple, singly-linked list
+ */
+
+/*| morkList: a list of singly-linked members (instances of morkNext), where
+**| the number of list members might be so numerous that we must about cost
+**| for two pointer link slots per member (as happens with morkLink).
+**|
+**|| morkList is intended to support lists of changes in morkTable, where we
+**| are worried about the space cost of representing such changes. (Later we
+**| can use an array instead, when we get even more worried, to avoid cost
+**| of link slots at all, per member).
+**|
+**|| Do NOT create cycles in links using this list class, since we do not
+**| deal with them very nicely.
+|*/
+class morkList /*d*/ {
+ public:
+ morkNext* mList_Head; // first link in the list
+ morkNext* mList_Tail; // last link in the list
+
+ public:
+ morkNext* GetListHead() const { return mList_Head; }
+ morkNext* GetListTail() const { return mList_Tail; }
+
+ mork_bool IsListEmpty() const { return (mList_Head == 0); }
+ mork_bool HasListMembers() const { return (mList_Head != 0); }
+
+ public:
+ morkList(); // : mList_Head( 0 ), mList_Tail( 0 ) { }
+
+ void CutAndZapAllListMembers(morkEnv* ev, nsIMdbHeap* ioHeap);
+ // make empty list, zapping every member by calling ZapOldNext()
+
+ void CutAllListMembers();
+ // just make list empty, dropping members without zapping
+
+ public:
+ morkNext* PopHead(); // cut head of list
+
+ // Note we don't support PopTail(), so use morkDeque if you need that.
+
+ void PushHead(morkNext* ioLink); // add to head of list
+ void PushTail(morkNext* ioLink); // add to tail of list
+};
+
+/*=============================================================================
+ * morkLink: linked list node embedded in objs to allow insertion in morkDeques
+ */
+
+class morkLink /*d*/ {
+ public:
+ morkLink* mLink_Next;
+ morkLink* mLink_Prev;
+
+ public:
+ explicit morkLink(int inZero) : mLink_Next(0), mLink_Prev(0) {}
+
+ morkLink(); // mLink_Next( 0 ), mLink_Prev( 0 ) { }
+
+ public:
+ morkLink* Next() const { return mLink_Next; }
+ morkLink* Prev() const { return mLink_Prev; }
+
+ void SelfRefer() { mLink_Next = mLink_Prev = this; }
+ void Clear() { mLink_Next = mLink_Prev = 0; }
+
+ void AddBefore(morkLink* old) {
+ ((old)->mLink_Prev->mLink_Next = (this))->mLink_Prev = (old)->mLink_Prev;
+ ((this)->mLink_Next = (old))->mLink_Prev = this;
+ }
+
+ void AddAfter(morkLink* old) {
+ ((old)->mLink_Next->mLink_Prev = (this))->mLink_Next = (old)->mLink_Next;
+ ((this)->mLink_Prev = (old))->mLink_Next = this;
+ }
+
+ void Remove() {
+ (mLink_Prev->mLink_Next = mLink_Next)->mLink_Prev = mLink_Prev;
+ }
+
+ public: // link memory management methods
+ static void* MakeNewLink(size_t inSize, nsIMdbHeap& ioHeap, morkEnv* ev);
+ void ZapOldLink(morkEnv* ev, nsIMdbHeap* ioHeap);
+
+ public: // link memory management operators
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkLink::MakeNewLink(inSize, ioHeap, ev);
+ }
+};
+
+/*=============================================================================
+ * morkDeque: doubly linked list modeled after VAX queue instructions
+ */
+
+class morkDeque /*d*/ {
+ public:
+ morkLink mDeque_Head;
+
+ public: // construction
+ morkDeque(); // { mDeque_Head.SelfRefer(); }
+
+ public: // methods
+ morkLink* RemoveFirst();
+
+ morkLink* RemoveLast();
+
+ morkLink* At(mork_pos index) const; /* one-based, not zero-based */
+
+ mork_pos IndexOf(const morkLink* inMember) const;
+ /* one-based index ; zero means member is not in deque */
+
+ mork_num Length() const;
+
+ /* the following method is more efficient for long lists: */
+ int LengthCompare(mork_num inCount) const;
+ /* -1: length < count, 0: length == count, 1: length > count */
+
+ public: // inlines
+ mork_bool IsEmpty() const {
+ return (mDeque_Head.mLink_Next == (morkLink*)&mDeque_Head);
+ }
+
+ morkLink* After(const morkLink* old) const {
+ return (((old)->mLink_Next != &mDeque_Head) ? (old)->mLink_Next
+ : (morkLink*)0);
+ }
+
+ morkLink* Before(const morkLink* old) const {
+ return (((old)->mLink_Prev != &mDeque_Head) ? (old)->mLink_Prev
+ : (morkLink*)0);
+ }
+
+ morkLink* First() const {
+ return ((mDeque_Head.mLink_Next != &mDeque_Head) ? mDeque_Head.mLink_Next
+ : (morkLink*)0);
+ }
+
+ morkLink* Last() const {
+ return ((mDeque_Head.mLink_Prev != &mDeque_Head) ? mDeque_Head.mLink_Prev
+ : (morkLink*)0);
+ }
+
+ /*
+ From IronDoc documentation for AddFirst:
+ +--------+ +--------+ +--------+ +--------+ +--------+
+ | h.next |-->| b.next | | h.next |-->| a.next |-->| b.next |
+ +--------+ +--------+ ==> +--------+ +--------+ +--------+
+ | h.prev |<--| b.prev | | h.prev |<--| a.prev |<--| b.prev |
+ +--------+ +--------+ +--------+ +--------+ +--------+
+ */
+
+ void AddFirst(morkLink* in) /*i*/
+ {
+ (mDeque_Head.mLink_Next->mLink_Prev = in)->mLink_Next =
+ mDeque_Head.mLink_Next;
+ (in->mLink_Prev = &mDeque_Head)->mLink_Next = in;
+ }
+ /*
+ From IronDoc documentation for AddLast:
+ +--------+ +--------+ +--------+ +--------+ +--------+
+ | y.next |-->| h.next | | y.next |-->| z.next |-->| h.next |
+ +--------+ +--------+ ==> +--------+ +--------+ +--------+
+ | y.prev |<--| h.prev | | y.prev |<--| z.prev |<--| h.prev |
+ +--------+ +--------+ +--------+ +--------+ +--------+
+ */
+
+ void AddLast(morkLink* in) {
+ (mDeque_Head.mLink_Prev->mLink_Next = in)->mLink_Prev =
+ mDeque_Head.mLink_Prev;
+ (in->mLink_Next = &mDeque_Head)->mLink_Prev = in;
+ }
+};
+
+#endif /* _MORKDEQUE_ */
diff --git a/comm/mailnews/db/mork/morkEnv.cpp b/comm/mailnews/db/mork/morkEnv.cpp
new file mode 100644
index 0000000000..c7c67f1e9e
--- /dev/null
+++ b/comm/mailnews/db/mork/morkEnv.cpp
@@ -0,0 +1,519 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKCH_
+# include "morkCh.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKFACTORY_
+# include "morkFactory.h"
+#endif
+
+#include "mozilla/Char16.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkEnv::CloseMorkNode(
+ morkEnv* ev) /*i*/ // CloseEnv() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseEnv(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkEnv::~morkEnv() /*i*/ // assert CloseEnv() executed earlier
+{
+ CloseMorkNode(mMorkEnv);
+ if (mEnv_Heap) {
+ mork_bool ownsHeap = mEnv_OwnsHeap;
+ nsIMdbHeap* saveHeap = mEnv_Heap;
+
+ if (ownsHeap) {
+#ifdef MORK_DEBUG_HEAP_STATS
+ printf("%d blocks remaining \n",
+ ((orkinHeap*)saveHeap)->HeapBlockCount());
+ mork_u4* array = (mork_u4*)this;
+ array -= 3;
+ // null out heap ptr in mem block so we won't crash trying to use it to
+ // delete the env.
+ *array = nullptr;
+#endif // MORK_DEBUG_HEAP_STATS
+ // whoops, this is our heap - hmm. Can't delete it, or not allocate env's
+ // from an orkinHeap.
+ delete saveHeap;
+ }
+ }
+ // MORK_ASSERT(mEnv_SelfAsMdbEnv==0);
+ MORK_ASSERT(mEnv_ErrorHook == 0);
+}
+
+/* choose morkBool_kTrue or morkBool_kFalse for kBeVerbose: */
+#define morkEnv_kBeVerbose morkBool_kFalse
+
+/*public non-poly*/
+morkEnv::morkEnv(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkFactory* ioFactory, nsIMdbHeap* ioSlotHeap)
+ : morkObject(inUsage, ioHeap, morkColor_kNone),
+ mEnv_Factory(ioFactory),
+ mEnv_Heap(ioSlotHeap)
+
+ ,
+ mEnv_SelfAsMdbEnv(0),
+ mEnv_ErrorHook(0),
+ mEnv_HandlePool(0)
+
+ ,
+ mEnv_ErrorCount(0),
+ mEnv_WarningCount(0)
+
+ ,
+ mEnv_ErrorCode(NS_OK)
+
+ ,
+ mEnv_DoTrace(morkBool_kFalse),
+ mEnv_AutoClear(morkAble_kDisabled),
+ mEnv_ShouldAbort(morkBool_kFalse),
+ mEnv_BeVerbose(morkEnv_kBeVerbose),
+ mEnv_OwnsHeap(morkBool_kFalse) {
+ MORK_ASSERT(ioSlotHeap && ioFactory);
+ if (ioSlotHeap) {
+ // mEnv_Heap is NOT refcounted:
+ // nsIMdbHeap_SlotStrongHeap(ioSlotHeap, this, &mEnv_Heap);
+
+ mEnv_HandlePool =
+ new morkPool(morkUsage::kGlobal, (nsIMdbHeap*)0, ioSlotHeap);
+
+ MORK_ASSERT(mEnv_HandlePool);
+ if (mEnv_HandlePool && this->Good()) {
+ mNode_Derived = morkDerived_kEnv;
+ mNode_Refs += morkEnv_kWeakRefCountEnvBonus;
+ }
+ }
+}
+
+/*public non-poly*/
+morkEnv::morkEnv(morkEnv* ev, /*i*/
+ const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbEnv* inSelfAsMdbEnv, morkFactory* ioFactory,
+ nsIMdbHeap* ioSlotHeap)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mEnv_Factory(ioFactory),
+ mEnv_Heap(ioSlotHeap)
+
+ ,
+ mEnv_SelfAsMdbEnv(inSelfAsMdbEnv),
+ mEnv_ErrorHook(0),
+ mEnv_HandlePool(0)
+
+ ,
+ mEnv_ErrorCount(0),
+ mEnv_WarningCount(0)
+
+ ,
+ mEnv_ErrorCode(NS_OK)
+
+ ,
+ mEnv_DoTrace(morkBool_kFalse),
+ mEnv_AutoClear(morkAble_kDisabled),
+ mEnv_ShouldAbort(morkBool_kFalse),
+ mEnv_BeVerbose(morkEnv_kBeVerbose),
+ mEnv_OwnsHeap(morkBool_kFalse) {
+ // $$$ do we need to refcount the inSelfAsMdbEnv nsIMdbEnv??
+
+ if (ioFactory && inSelfAsMdbEnv && ioSlotHeap) {
+ // mEnv_Heap is NOT refcounted:
+ // nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mEnv_Heap);
+
+ mEnv_HandlePool = new (*ioSlotHeap, ev)
+ morkPool(ev, morkUsage::kHeap, ioSlotHeap, ioSlotHeap);
+
+ MORK_ASSERT(mEnv_HandlePool);
+ if (mEnv_HandlePool && ev->Good()) {
+ mNode_Derived = morkDerived_kEnv;
+ mNode_Refs += morkEnv_kWeakRefCountEnvBonus;
+ }
+ } else
+ ev->NilPointerError();
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkEnv, morkObject, nsIMdbEnv)
+/*public non-poly*/ void morkEnv::CloseEnv(
+ morkEnv* ev) /*i*/ // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ // $$$ release mEnv_SelfAsMdbEnv??
+ // $$$ release mEnv_ErrorHook??
+
+ mEnv_SelfAsMdbEnv = 0;
+ mEnv_ErrorHook = 0;
+
+ morkPool* savePool = mEnv_HandlePool;
+ morkPool::SlotStrongPool((morkPool*)0, ev, &mEnv_HandlePool);
+ // free the pool
+ if (mEnv_SelfAsMdbEnv) {
+ if (savePool && mEnv_Heap) mEnv_Heap->Free(this->AsMdbEnv(), savePool);
+ } else {
+ if (savePool) {
+ if (savePool->IsOpenNode()) savePool->CloseMorkNode(ev);
+ delete savePool;
+ }
+ // how do we free this? might need to get rid of asserts.
+ }
+ // mEnv_Factory is NOT refcounted
+
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+mork_size morkEnv::OidAsHex(void* outBuf, const mdbOid& inOid)
+// sprintf(buf, "%lX:^%lX", (long) inOid.mOid_Id, (long) inOid.mOid_Scope);
+{
+ mork_u1* p = (mork_u1*)outBuf;
+ mork_size outSize = this->TokenAsHex(p, inOid.mOid_Id);
+ p += outSize;
+ *p++ = ':';
+
+ mork_scope scope = inOid.mOid_Scope;
+ if (scope < 0x80 && morkCh_IsName((mork_ch)scope)) {
+ *p++ = (mork_u1)scope;
+ *p = 0; // null termination
+ outSize += 2;
+ } else {
+ *p++ = '^';
+ mork_size scopeSize = this->TokenAsHex(p, scope);
+ outSize += scopeSize + 2;
+ }
+ return outSize;
+}
+
+mork_u1 morkEnv::HexToByte(mork_ch inFirstHex, mork_ch inSecondHex) {
+ mork_u1 hi = 0; // high four hex bits
+ mork_flags f = morkCh_GetFlags(inFirstHex);
+ if (morkFlags_IsDigit(f))
+ hi = (mork_u1)(inFirstHex - (mork_ch)'0');
+ else if (morkFlags_IsUpper(f))
+ hi = (mork_u1)((inFirstHex - (mork_ch)'A') + 10);
+ else if (morkFlags_IsLower(f))
+ hi = (mork_u1)((inFirstHex - (mork_ch)'a') + 10);
+
+ mork_u1 lo = 0; // low four hex bits
+ f = morkCh_GetFlags(inSecondHex);
+ if (morkFlags_IsDigit(f))
+ lo = (mork_u1)(inSecondHex - (mork_ch)'0');
+ else if (morkFlags_IsUpper(f))
+ lo = (mork_u1)((inSecondHex - (mork_ch)'A') + 10);
+ else if (morkFlags_IsLower(f))
+ lo = (mork_u1)((inSecondHex - (mork_ch)'a') + 10);
+
+ return (mork_u1)((hi << 4) | lo);
+}
+
+// TokenAsHex() is the same as sprintf(outBuf, "%lX", (long) inToken);
+// Writes up to 32 hex digits, plus a NUL-terminator. So outBuf must
+// be at least 33 bytes.
+// Return value is number of characters written, excluding the NUL.
+mork_size morkEnv::TokenAsHex(void* outBuf, mork_token inToken) {
+ static const char morkEnv_kHexDigits[] = "0123456789ABCDEF";
+ char* p = (char*)outBuf;
+ char* end = p + 32; // write no more than 32 digits for safety
+ if (inToken) {
+ // first write all the hex digits in backwards order:
+ while (p < end && inToken) // more digits to write?
+ {
+ *p++ = morkEnv_kHexDigits[inToken & 0x0F]; // low four bits
+ inToken >>= 4; // we fervently hope this does not sign extend
+ }
+ *p = 0; // end the string with a null byte
+ char* s = (char*)outBuf; // first byte in string
+ mork_size size = (mork_size)(p - s); // distance from start
+
+ // now reverse the string in place:
+ // note that p starts on the null byte, so we need predecrement:
+ while (--p > s) // need to swap another byte in the string?
+ {
+ char c = *p; // temp for swap
+ *p = *s;
+ *s++ = c; // move s forward here, and p backward in the test
+ }
+ return size;
+ } else // special case for zero integer
+ {
+ *p++ = '0'; // write a zero digit
+ *p = 0; // end with a null byte
+ return 1; // one digit in hex representation
+ }
+}
+
+void morkEnv::StringToYarn(const PathChar* inString, mdbYarn* outYarn) {
+ if (outYarn) {
+ mdb_fill fill =
+ (inString) ? (mdb_fill)MORK_STRLEN(inString) * sizeof(PathChar) : 0;
+
+ if (fill) // have nonempty content?
+ {
+ mdb_size size = outYarn->mYarn_Size; // max dest size
+ if (fill > size) // too much string content?
+ {
+ outYarn->mYarn_More = fill - size; // extra string bytes omitted
+ fill = size; // copy no more bytes than size of yarn buffer
+ }
+ void* dest = outYarn->mYarn_Buf; // where bytes are going
+ if (!dest) // nil destination address buffer?
+ fill = 0; // we can't write any content at all
+
+ if (fill) // anything to copy?
+ MORK_MEMCPY(dest, inString, fill); // copy fill bytes to yarn
+
+ outYarn->mYarn_Fill = fill; // tell yarn size of copied content
+ } else // no content to put into the yarn
+ {
+ outYarn->mYarn_Fill = 0; // tell yarn that string has no bytes
+ }
+ outYarn->mYarn_Form = 0; // always update the form slot
+ } else
+ this->NilPointerError();
+}
+
+morkEnv::PathChar* morkEnv::CopyString(nsIMdbHeap* ioHeap,
+ const PathChar* inString) {
+ PathChar* outString = nullptr;
+ if (ioHeap && inString) {
+ mork_size size = (MORK_STRLEN(inString) + 1) * sizeof(PathChar);
+ ioHeap->Alloc(this->AsMdbEnv(), size, (void**)&outString);
+ if (outString) MORK_STRCPY(outString, inString);
+ } else
+ this->NilPointerError();
+ return outString;
+}
+
+void morkEnv::FreeString(nsIMdbHeap* ioHeap, PathChar* ioString) {
+ if (ioHeap) {
+ if (ioString) ioHeap->Free(this->AsMdbEnv(), ioString);
+ } else
+ this->NilPointerError();
+}
+
+void morkEnv::NewError(const char* inString) {
+ MORK_ASSERT(morkBool_kFalse); // get developer's attention
+
+ ++mEnv_ErrorCount;
+ mEnv_ErrorCode = NS_ERROR_FAILURE;
+
+ if (mEnv_ErrorHook) mEnv_ErrorHook->OnErrorString(this->AsMdbEnv(), inString);
+}
+
+void morkEnv::NewWarning(const char* inString) {
+ MORK_ASSERT(morkBool_kFalse); // get developer's attention
+
+ ++mEnv_WarningCount;
+ if (mEnv_ErrorHook)
+ mEnv_ErrorHook->OnWarningString(this->AsMdbEnv(), inString);
+}
+
+void morkEnv::StubMethodOnlyError() { this->NewError("method is stub only"); }
+
+void morkEnv::OutOfMemoryError() { this->NewError("out of memory"); }
+
+void morkEnv::CantMakeWhenBadError() {
+ this->NewError("can't make an object when ev->Bad()");
+}
+
+static const char morkEnv_kNilPointer[] = "nil pointer";
+
+void morkEnv::NilPointerError() { this->NewError(morkEnv_kNilPointer); }
+
+void morkEnv::NilPointerWarning() { this->NewWarning(morkEnv_kNilPointer); }
+
+void morkEnv::NewNonEnvError() { this->NewError("non-env instance"); }
+
+void morkEnv::NilEnvSlotError() {
+ if (!mEnv_HandlePool || !mEnv_Factory) {
+ if (!mEnv_HandlePool) this->NewError("nil mEnv_HandlePool");
+ if (!mEnv_Factory) this->NewError("nil mEnv_Factory");
+ } else
+ this->NewError("unknown nil env slot");
+}
+
+void morkEnv::NonEnvTypeError(morkEnv* ev) { ev->NewError("non morkEnv"); }
+
+void morkEnv::ClearMorkErrorsAndWarnings() {
+ mEnv_ErrorCount = 0;
+ mEnv_WarningCount = 0;
+ mEnv_ErrorCode = NS_OK;
+ mEnv_ShouldAbort = morkBool_kFalse;
+}
+
+void morkEnv::AutoClearMorkErrorsAndWarnings() {
+ if (this->DoAutoClear()) {
+ mEnv_ErrorCount = 0;
+ mEnv_WarningCount = 0;
+ mEnv_ErrorCode = NS_OK;
+ mEnv_ShouldAbort = morkBool_kFalse;
+ }
+}
+
+/*static*/ morkEnv* morkEnv::FromMdbEnv(
+ nsIMdbEnv* ioEnv) // dynamic type checking
+{
+ morkEnv* outEnv = 0;
+ if (ioEnv) {
+ // Note this cast is expected to perform some address adjustment of the
+ // pointer, so oenv likely does not equal ioEnv. Do not cast to void*
+ // first to force an exactly equal pointer (we tried it and it's wrong).
+ morkEnv* ev = (morkEnv*)ioEnv;
+ if (ev && ev->IsEnv()) {
+ if (ev->DoAutoClear()) {
+ ev->mEnv_ErrorCount = 0;
+ ev->mEnv_WarningCount = 0;
+ ev->mEnv_ErrorCode = NS_OK;
+ }
+ outEnv = ev;
+ } else
+ MORK_ASSERT(outEnv);
+ } else
+ MORK_ASSERT(outEnv);
+ return outEnv;
+}
+
+NS_IMETHODIMP
+morkEnv::GetErrorCount(mdb_count* outCount, mdb_bool* outShouldAbort) {
+ if (outCount) *outCount = mEnv_ErrorCount;
+ if (outShouldAbort) *outShouldAbort = mEnv_ShouldAbort;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetWarningCount(mdb_count* outCount, mdb_bool* outShouldAbort) {
+ if (outCount) *outCount = mEnv_WarningCount;
+ if (outShouldAbort) *outShouldAbort = mEnv_ShouldAbort;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetEnvBeVerbose(mdb_bool* outBeVerbose) {
+ NS_ENSURE_ARG_POINTER(outBeVerbose);
+ *outBeVerbose = mEnv_BeVerbose;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::SetEnvBeVerbose(mdb_bool inBeVerbose) {
+ mEnv_BeVerbose = inBeVerbose;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetDoTrace(mdb_bool* outDoTrace) {
+ NS_ENSURE_ARG_POINTER(outDoTrace);
+ *outDoTrace = mEnv_DoTrace;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::SetDoTrace(mdb_bool inDoTrace) {
+ mEnv_DoTrace = inDoTrace;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetAutoClear(mdb_bool* outAutoClear) {
+ NS_ENSURE_ARG_POINTER(outAutoClear);
+ *outAutoClear = DoAutoClear();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::SetAutoClear(mdb_bool inAutoClear) {
+ if (inAutoClear)
+ EnableAutoClear();
+ else
+ DisableAutoClear();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetErrorHook(nsIMdbErrorHook** acqErrorHook) {
+ NS_ENSURE_ARG_POINTER(acqErrorHook);
+ *acqErrorHook = mEnv_ErrorHook;
+ NS_IF_ADDREF(mEnv_ErrorHook);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::SetErrorHook(nsIMdbErrorHook* ioErrorHook) // becomes referenced
+{
+ mEnv_ErrorHook = ioErrorHook;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::GetHeap(nsIMdbHeap** acqHeap) {
+ NS_ENSURE_ARG_POINTER(acqHeap);
+ nsIMdbHeap* outHeap = mEnv_Heap;
+
+ if (acqHeap) *acqHeap = outHeap;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::SetHeap(nsIMdbHeap* ioHeap) // becomes referenced
+{
+ nsIMdbHeap_SlotStrongHeap(ioHeap, this, &mEnv_Heap);
+ return NS_OK;
+}
+// } ----- end attribute methods -----
+
+NS_IMETHODIMP
+morkEnv::ClearErrors() // clear errors beore re-entering db API
+{
+ mEnv_ErrorCount = 0;
+ mEnv_ErrorCode = NS_OK;
+ mEnv_ShouldAbort = morkBool_kFalse;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::ClearWarnings() // clear warning
+{
+ mEnv_WarningCount = 0;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkEnv::ClearErrorsAndWarnings() // clear both errors & warnings
+{
+ ClearMorkErrorsAndWarnings();
+ return NS_OK;
+}
+// } ===== end nsIMdbEnv methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkEnv.h b/comm/mailnews/db/mork/morkEnv.h
new file mode 100644
index 0000000000..e9b635051d
--- /dev/null
+++ b/comm/mailnews/db/mork/morkEnv.h
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKENV_
+#define _MORKENV_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+// sean was here
+#include "mozilla/Path.h"
+#include "nsError.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kEnv /*i*/ 0x4576 /* ascii 'Ev' */
+
+// use NS error codes to make Mork easier to use with the rest of mozilla
+#define morkEnv_kNoError NS_SUCCEEDED /* no error has happened */
+#define morkEnv_kNonEnvTypeError \
+ NS_ERROR_FAILURE /* morkEnv::IsEnv() is false */
+
+#define morkEnv_kStubMethodOnlyError NS_ERROR_NO_INTERFACE
+#define morkEnv_kOutOfMemoryError NS_ERROR_OUT_OF_MEMORY
+#define morkEnv_kNilPointerError NS_ERROR_NULL_POINTER
+#define morkEnv_kNewNonEnvError NS_ERROR_FAILURE
+#define morkEnv_kNilEnvSlotError NS_ERROR_FAILURE
+
+#define morkEnv_kBadFactoryError NS_ERROR_FACTORY_NOT_LOADED
+#define morkEnv_kBadFactoryEnvError NS_ERROR_FACTORY_NOT_LOADED
+#define morkEnv_kBadEnvError NS_ERROR_FAILURE
+
+#define morkEnv_kNonHandleTypeError NS_ERROR_FAILURE
+#define morkEnv_kNonOpenNodeError NS_ERROR_FAILURE
+
+/* try NOT to leak all env instances */
+#define morkEnv_kWeakRefCountEnvBonus 0
+
+/*| morkEnv:
+|*/
+class morkEnv : public morkObject, public nsIMdbEnv {
+ using PathChar = mozilla::filesystem::Path::value_type;
+ NS_DECL_ISUPPORTS_INHERITED
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // state is public because the entire Mork system is private
+ morkFactory* mEnv_Factory; // NON-refcounted factory
+ nsIMdbHeap* mEnv_Heap; // NON-refcounted heap
+
+ nsIMdbEnv* mEnv_SelfAsMdbEnv;
+ nsIMdbErrorHook* mEnv_ErrorHook;
+
+ morkPool* mEnv_HandlePool; // pool for re-using handles
+
+ mork_u2 mEnv_ErrorCount;
+ mork_u2 mEnv_WarningCount;
+
+ nsresult mEnv_ErrorCode;
+
+ mork_bool mEnv_DoTrace;
+ mork_able mEnv_AutoClear;
+ mork_bool mEnv_ShouldAbort;
+ mork_bool mEnv_BeVerbose;
+ mork_bool mEnv_OwnsHeap;
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseEnv() only if open
+ virtual ~morkEnv(); // assert that CloseEnv() executed earlier
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetErrorCount(mdb_count* outCount,
+ mdb_bool* outShouldAbort) override;
+ NS_IMETHOD GetWarningCount(mdb_count* outCount,
+ mdb_bool* outShouldAbort) override;
+
+ NS_IMETHOD GetEnvBeVerbose(mdb_bool* outBeVerbose) override;
+ NS_IMETHOD SetEnvBeVerbose(mdb_bool inBeVerbose) override;
+
+ NS_IMETHOD GetDoTrace(mdb_bool* outDoTrace) override;
+ NS_IMETHOD SetDoTrace(mdb_bool inDoTrace) override;
+
+ NS_IMETHOD GetAutoClear(mdb_bool* outAutoClear) override;
+ NS_IMETHOD SetAutoClear(mdb_bool inAutoClear) override;
+
+ NS_IMETHOD GetErrorHook(nsIMdbErrorHook** acqErrorHook) override;
+ NS_IMETHOD SetErrorHook(
+ nsIMdbErrorHook* ioErrorHook) override; // becomes referenced
+
+ NS_IMETHOD GetHeap(nsIMdbHeap** acqHeap) override;
+ NS_IMETHOD SetHeap(nsIMdbHeap* ioHeap) override; // becomes referenced
+ // } ----- end attribute methods -----
+
+ NS_IMETHOD ClearErrors() override; // clear errors beore re-entering db API
+ NS_IMETHOD ClearWarnings() override; // clear warnings
+ NS_IMETHOD ClearErrorsAndWarnings() override; // clear both errors & warnings
+ // } ===== end nsIMdbEnv methods =====
+ public: // morkEnv construction & destruction
+ morkEnv(const morkUsage& inUsage, nsIMdbHeap* ioHeap, morkFactory* ioFactory,
+ nsIMdbHeap* ioSlotHeap);
+ morkEnv(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbEnv* inSelfAsMdbEnv, morkFactory* ioFactory,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseEnv(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkEnv(const morkEnv& other);
+ morkEnv& operator=(const morkEnv& other);
+
+ public: // dynamic type identification
+ mork_bool IsEnv() const {
+ return IsNode() && mNode_Derived == morkDerived_kEnv;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // utility env methods
+ mork_u1 HexToByte(mork_ch inFirstHex, mork_ch inSecondHex);
+
+ mork_size TokenAsHex(void* outBuf, mork_token inToken);
+ // TokenAsHex() is the same as sprintf(outBuf, "%lX", (long) inToken);
+
+ mork_size OidAsHex(void* outBuf, const mdbOid& inOid);
+ // sprintf(buf, "%lX:^%lX", (long) inOid.mOid_Id, (long) inOid.mOid_Scope);
+
+ PathChar* CopyString(nsIMdbHeap* ioHeap, const PathChar* inString);
+ void FreeString(nsIMdbHeap* ioHeap, PathChar* ioString);
+ void StringToYarn(const PathChar* inString, mdbYarn* outYarn);
+
+ public: // other env methods
+ morkHandleFace* NewHandle(mork_size inSize) {
+ return mEnv_HandlePool->NewHandle(this, inSize, (morkZone*)0);
+ }
+
+ void ZapHandle(morkHandleFace* ioHandle) {
+ mEnv_HandlePool->ZapHandle(this, ioHandle);
+ }
+
+ void EnableAutoClear() { mEnv_AutoClear = morkAble_kEnabled; }
+ void DisableAutoClear() { mEnv_AutoClear = morkAble_kDisabled; }
+
+ mork_bool DoAutoClear() const { return mEnv_AutoClear == morkAble_kEnabled; }
+
+ void NewError(const char* inString);
+ void NewWarning(const char* inString);
+
+ void ClearMorkErrorsAndWarnings(); // clear both errors & warnings
+ void AutoClearMorkErrorsAndWarnings(); // clear if auto is enabled
+
+ void StubMethodOnlyError();
+ void OutOfMemoryError();
+ void NilPointerError();
+ void NilPointerWarning();
+ void CantMakeWhenBadError();
+ void NewNonEnvError();
+ void NilEnvSlotError();
+
+ void NonEnvTypeError(morkEnv* ev);
+
+ // canonical env convenience methods to check for presence of errors:
+ mork_bool Good() const { return (mEnv_ErrorCount == 0); }
+ mork_bool Bad() const { return (mEnv_ErrorCount != 0); }
+
+ nsIMdbEnv* AsMdbEnv() { return (nsIMdbEnv*)this; }
+ static morkEnv* FromMdbEnv(nsIMdbEnv* ioEnv); // dynamic type checking
+
+ nsresult AsErr() const { return mEnv_ErrorCode; }
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakEnv(morkEnv* me, morkEnv* ev, morkEnv** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongEnv(morkEnv* me, morkEnv* ev, morkEnv** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+#undef MOZ_ASSERT_TYPE_OK_FOR_REFCOUNTING
+#ifdef MOZ_IS_DESTRUCTIBLE
+# define MOZ_ASSERT_TYPE_OK_FOR_REFCOUNTING(X) \
+ static_assert( \
+ !MOZ_IS_DESTRUCTIBLE(X) || mozilla::IsSame<X, morkEnv>::value, \
+ "Reference-counted class " #X \
+ " should not have a public destructor. " \
+ "Try to make this class's destructor non-public. If that is really " \
+ "not possible, you can whitelist this class by providing a " \
+ "HasDangerousPublicDestructor specialization for it.");
+#else
+# define MOZ_ASSERT_TYPE_OK_FOR_REFCOUNTING(X)
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKENV_ */
diff --git a/comm/mailnews/db/mork/morkFactory.cpp b/comm/mailnews/db/mork/morkFactory.cpp
new file mode 100644
index 0000000000..09a76ba86a
--- /dev/null
+++ b/comm/mailnews/db/mork/morkFactory.cpp
@@ -0,0 +1,521 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKFACTORY_
+# include "morkFactory.h"
+#endif
+
+#ifndef _ORKINHEAP_
+# include "orkinHeap.h"
+#endif
+
+#ifndef _MORKFILE_
+# include "morkFile.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKTHUMB_
+# include "morkThumb.h"
+#endif
+
+#ifndef _MORKWRITER_
+# include "morkWriter.h"
+#endif
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkFactory::CloseMorkNode(
+ morkEnv* ev) /*i*/ // CloseFactory() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseFactory(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkFactory::~morkFactory() /*i*/ // assert CloseFactory() executed earlier
+{
+ CloseFactory(&mFactory_Env);
+ MORK_ASSERT(mFactory_Env.IsShutNode());
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkFactory::morkFactory() // uses orkinHeap
+ : morkObject(morkUsage::kGlobal, (nsIMdbHeap*)0, morkColor_kNone),
+ mFactory_Env(morkUsage::kMember, (nsIMdbHeap*)0, this, new orkinHeap()),
+ mFactory_Heap() {
+ if (mFactory_Env.Good()) {
+ mNode_Derived = morkDerived_kFactory;
+ mNode_Refs += morkFactory_kWeakRefCountBonus;
+ }
+}
+
+/*public non-poly*/
+morkFactory::morkFactory(nsIMdbHeap* ioHeap)
+ : morkObject(morkUsage::kHeap, ioHeap, morkColor_kNone),
+ mFactory_Env(morkUsage::kMember, (nsIMdbHeap*)0, this, ioHeap),
+ mFactory_Heap() {
+ if (mFactory_Env.Good()) {
+ mNode_Derived = morkDerived_kFactory;
+ mNode_Refs += morkFactory_kWeakRefCountBonus;
+ }
+}
+
+/*public non-poly*/
+morkFactory::morkFactory(morkEnv* ev, /*i*/
+ const morkUsage& inUsage, nsIMdbHeap* ioHeap)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mFactory_Env(morkUsage::kMember, (nsIMdbHeap*)0, this, ioHeap),
+ mFactory_Heap() {
+ if (ev->Good()) {
+ mNode_Derived = morkDerived_kFactory;
+ mNode_Refs += morkFactory_kWeakRefCountBonus;
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkFactory, morkObject, nsIMdbFactory)
+
+extern "C" nsIMdbFactory* MakeMdbFactory() {
+ return new morkFactory(new orkinHeap());
+}
+
+/*public non-poly*/ void morkFactory::CloseFactory(
+ morkEnv* ev) /*i*/ // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mFactory_Env.CloseMorkNode(ev);
+ this->CloseObject(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+morkEnv* morkFactory::GetInternalFactoryEnv(nsresult* outErr) {
+ morkEnv* outEnv = 0;
+ if (IsNode() && IsOpenNode() && IsFactory()) {
+ morkEnv* fenv = &mFactory_Env;
+ if (fenv && fenv->IsNode() && fenv->IsOpenNode() && fenv->IsEnv()) {
+ fenv->ClearMorkErrorsAndWarnings(); // drop any earlier errors
+ outEnv = fenv;
+ } else
+ *outErr = morkEnv_kBadFactoryEnvError;
+ } else
+ *outErr = morkEnv_kBadFactoryError;
+
+ return outEnv;
+}
+
+void morkFactory::NonFactoryTypeError(morkEnv* ev) {
+ ev->NewError("non morkFactory");
+}
+
+NS_IMETHODIMP
+morkFactory::OpenOldFile(nsIMdbEnv* mev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath, mork_bool inFrozen,
+ nsIMdbFile** acqFile)
+// Choose some subclass of nsIMdbFile to instantiate, in order to read
+// (and write if not frozen) the file known by inFilePath. The file
+// returned should be open and ready for use, and presumably positioned
+// at the first byte position of the file. The exact manner in which
+// files must be opened is considered a subclass specific detail, and
+// other portions or Mork source code don't want to know how it's done.
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ morkFile* file = nullptr;
+ if (ev) {
+ if (!ioHeap) ioHeap = &mFactory_Heap;
+
+ file = morkFile::OpenOldFile(ev, ioHeap, inFilePath, inFrozen);
+ NS_IF_ADDREF(file);
+
+ outErr = ev->AsErr();
+ }
+ if (acqFile) *acqFile = file;
+
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkFactory::CreateNewFile(nsIMdbEnv* mev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath, nsIMdbFile** acqFile)
+// Choose some subclass of nsIMdbFile to instantiate, in order to read
+// (and write if not frozen) the file known by inFilePath. The file
+// returned should be created and ready for use, and presumably positioned
+// at the first byte position of the file. The exact manner in which
+// files must be opened is considered a subclass specific detail, and
+// other portions or Mork source code don't want to know how it's done.
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ morkFile* file = nullptr;
+ if (ev) {
+ if (!ioHeap) ioHeap = &mFactory_Heap;
+
+ file = morkFile::CreateNewFile(ev, ioHeap, inFilePath);
+ if (file) NS_ADDREF(file);
+
+ outErr = ev->AsErr();
+ }
+ if (acqFile) *acqFile = file;
+
+ return outErr;
+}
+// } ----- end file methods -----
+
+// { ----- begin env methods -----
+NS_IMETHODIMP
+morkFactory::MakeEnv(nsIMdbHeap* ioHeap, nsIMdbEnv** acqEnv)
+// ioHeap can be nil, causing a MakeHeap() style heap instance to be used
+{
+ nsresult outErr = NS_OK;
+ nsIMdbEnv* outEnv = 0;
+ mork_bool ownsHeap = (ioHeap == 0);
+ if (!ioHeap) ioHeap = new orkinHeap();
+
+ if (acqEnv && ioHeap) {
+ morkEnv* fenv = this->GetInternalFactoryEnv(&outErr);
+ if (fenv) {
+ morkEnv* newEnv =
+ new (*ioHeap, fenv) morkEnv(morkUsage::kHeap, ioHeap, this, ioHeap);
+
+ if (newEnv) {
+ newEnv->mEnv_OwnsHeap = ownsHeap;
+ newEnv->mNode_Refs += morkEnv_kWeakRefCountEnvBonus;
+ NS_ADDREF(newEnv);
+ newEnv->mEnv_SelfAsMdbEnv = newEnv;
+ outEnv = newEnv;
+ } else
+ outErr = morkEnv_kOutOfMemoryError;
+ }
+
+ *acqEnv = outEnv;
+ } else
+ outErr = morkEnv_kNilPointerError;
+
+ return outErr;
+}
+// } ----- end env methods -----
+
+// { ----- begin heap methods -----
+NS_IMETHODIMP
+morkFactory::MakeHeap(nsIMdbEnv* mev, nsIMdbHeap** acqHeap) {
+ nsresult outErr = NS_OK;
+ nsIMdbHeap* outHeap = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ outHeap = new orkinHeap();
+ if (!outHeap) ev->OutOfMemoryError();
+ }
+ MORK_ASSERT(acqHeap);
+ if (acqHeap) *acqHeap = outHeap;
+ return outErr;
+}
+// } ----- end heap methods -----
+
+// { ----- begin row methods -----
+NS_IMETHODIMP
+morkFactory::MakeRow(nsIMdbEnv* mev, nsIMdbHeap* ioHeap, nsIMdbRow** acqRow) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// ioHeap can be nil, causing the heap associated with ev to be used
+// } ----- end row methods -----
+
+// { ----- begin port methods -----
+NS_IMETHODIMP
+morkFactory::CanOpenFilePort(
+ nsIMdbEnv* mev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpen, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) {
+ nsresult outErr = NS_OK;
+ if (outFormatVersion) {
+ outFormatVersion->mYarn_Fill = 0;
+ }
+ mdb_bool canOpenAsPort = morkBool_kFalse;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioFile && outCanOpen) {
+ canOpenAsPort = this->CanOpenMorkTextFile(ev, ioFile);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+
+ if (outCanOpen) *outCanOpen = canOpenAsPort;
+
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkFactory::OpenFilePort(
+ nsIMdbEnv* mev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for readonly import
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb) {
+ NS_ASSERTION(false, "this doesn't look implemented");
+ MORK_USED_1(ioHeap);
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioFile && inOpenPolicy && acqThumb) {
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then call nsIMdbFactory::ThumbToOpenPort() to get the port instance.
+
+NS_IMETHODIMP
+morkFactory::ThumbToOpenPort( // redeeming a completed thumb from
+ // OpenFilePort()
+ nsIMdbEnv* mev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFilePort() with done status
+ nsIMdbPort** acqPort) {
+ nsresult outErr = NS_OK;
+ nsIMdbPort* outPort = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioThumb && acqPort) {
+ morkThumb* thumb = (morkThumb*)ioThumb;
+ morkStore* store = thumb->ThumbToOpenStore(ev);
+ if (store) {
+ store->mStore_CanAutoAssignAtomIdentity = morkBool_kTrue;
+ store->mStore_CanDirty = morkBool_kTrue;
+ store->SetStoreAndAllSpacesCanDirty(ev, morkBool_kTrue);
+
+ NS_ADDREF(store);
+ outPort = store;
+ }
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqPort) *acqPort = outPort;
+ return outErr;
+}
+// } ----- end port methods -----
+
+mork_bool morkFactory::CanOpenMorkTextFile(morkEnv* ev,
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile) {
+ MORK_USED_1(ev);
+ mork_bool outBool = morkBool_kFalse;
+ mork_size headSize = strlen(morkWriter_kFileHeader);
+
+ char localBuf[256 + 4]; // for extra for sloppy safety
+ mdbYarn localYarn;
+ mdbYarn* y = &localYarn;
+ y->mYarn_Buf = localBuf; // space to hold content
+ y->mYarn_Fill = 0; // no logical content yet
+ y->mYarn_Size = 256; // physical capacity is 256 bytes
+ y->mYarn_More = 0;
+ y->mYarn_Form = 0;
+ y->mYarn_Grow = 0;
+
+ if (ioFile) {
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ mdb_size actualSize = 0;
+ ioFile->Get(menv, y->mYarn_Buf, y->mYarn_Size, /*pos*/ 0, &actualSize);
+ y->mYarn_Fill = actualSize;
+
+ if (y->mYarn_Buf && actualSize >= headSize && ev->Good()) {
+ mork_u1* buf = (mork_u1*)y->mYarn_Buf;
+ outBool = (MORK_MEMCMP(morkWriter_kFileHeader, buf, headSize) == 0);
+ }
+ } else
+ ev->NilPointerError();
+
+ return outBool;
+}
+
+// { ----- begin store methods -----
+NS_IMETHODIMP
+morkFactory::CanOpenFileStore(
+ nsIMdbEnv* mev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpenAsStore, // whether OpenFileStore() might succeed
+ mdb_bool* outCanOpenAsPort, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) {
+ mdb_bool canOpenAsStore = morkBool_kFalse;
+ mdb_bool canOpenAsPort = morkBool_kFalse;
+ if (outFormatVersion) {
+ outFormatVersion->mYarn_Fill = 0;
+ }
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioFile && outCanOpenAsStore) {
+ // right now always say true; later we should look for magic patterns
+ canOpenAsStore = this->CanOpenMorkTextFile(ev, ioFile);
+ canOpenAsPort = canOpenAsStore;
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (outCanOpenAsStore) *outCanOpenAsStore = canOpenAsStore;
+
+ if (outCanOpenAsPort) *outCanOpenAsPort = canOpenAsPort;
+
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkFactory::OpenFileStore( // open an existing database
+ nsIMdbEnv* mev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for general db usage
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb) {
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (!ioHeap) // need to use heap from env?
+ ioHeap = ev->mEnv_Heap;
+
+ if (ioFile && inOpenPolicy && acqThumb) {
+ morkStore* store = new (*ioHeap, ev)
+ morkStore(ev, morkUsage::kHeap, ioHeap, this, ioHeap);
+
+ if (store) {
+ mork_bool frozen = morkBool_kFalse; // open store mutable access
+ if (store->OpenStoreFile(ev, frozen, ioFile, inOpenPolicy)) {
+ morkThumb* thumb = morkThumb::Make_OpenFileStore(ev, ioHeap, store);
+ if (thumb) {
+ outThumb = thumb;
+ thumb->AddRef();
+ }
+ }
+ // store->CutStrongRef(mev); // always cut ref (handle has its
+ // own ref)
+ }
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then call nsIMdbFactory::ThumbToOpenStore() to get the store instance.
+
+NS_IMETHODIMP
+morkFactory::ThumbToOpenStore( // redeem completed thumb from OpenFileStore()
+ nsIMdbEnv* mev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFileStore() with done status
+ nsIMdbStore** acqStore) {
+ nsresult outErr = NS_OK;
+ nsIMdbStore* outStore = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioThumb && acqStore) {
+ morkThumb* thumb = (morkThumb*)ioThumb;
+ morkStore* store = thumb->ThumbToOpenStore(ev);
+ if (store) {
+ store->mStore_CanAutoAssignAtomIdentity = morkBool_kTrue;
+ store->mStore_CanDirty = morkBool_kTrue;
+ store->SetStoreAndAllSpacesCanDirty(ev, morkBool_kTrue);
+
+ outStore = store;
+ NS_ADDREF(store);
+ }
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqStore) *acqStore = outStore;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkFactory::CreateNewFileStore( // create a new db with minimal content
+ nsIMdbEnv* mev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // name of file which should not yet exist
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbStore** acqStore) {
+ nsresult outErr = NS_OK;
+ nsIMdbStore* outStore = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (!ioHeap) // need to use heap from env?
+ ioHeap = ev->mEnv_Heap;
+
+ if (ioFile && inOpenPolicy && acqStore && ioHeap) {
+ morkStore* store = new (*ioHeap, ev)
+ morkStore(ev, morkUsage::kHeap, ioHeap, this, ioHeap);
+
+ if (store) {
+ store->mStore_CanAutoAssignAtomIdentity = morkBool_kTrue;
+ store->mStore_CanDirty = morkBool_kTrue;
+ store->SetStoreAndAllSpacesCanDirty(ev, morkBool_kTrue);
+
+ if (store->CreateStoreFile(ev, ioFile, inOpenPolicy)) outStore = store;
+ NS_ADDREF(store);
+ }
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqStore) *acqStore = outStore;
+ return outErr;
+}
+// } ----- end store methods -----
+
+// } ===== end nsIMdbFactory methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkFactory.h b/comm/mailnews/db/mork/morkFactory.h
new file mode 100644
index 0000000000..c04d478edf
--- /dev/null
+++ b/comm/mailnews/db/mork/morkFactory.h
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKFACTORY_
+#define _MORKFACTORY_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _ORKINHEAP_
+# include "orkinHeap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class nsIMdbFactory;
+
+#define morkDerived_kFactory /*i*/ 0x4663 /* ascii 'Fc' */
+#define morkFactory_kWeakRefCountBonus 0 /* try NOT to leak all factories */
+
+/*| morkFactory:
+|*/
+class morkFactory : public morkObject, public nsIMdbFactory { // nsIMdbObject
+ using PathChar = mozilla::filesystem::Path::value_type;
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // state is public because the entire Mork system is private
+ morkEnv mFactory_Env; // private env instance used internally
+ orkinHeap mFactory_Heap;
+
+ NS_DECL_ISUPPORTS_INHERITED
+ // { ===== begin morkNode interface =====
+ public: // morkFactory virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseFactory() only if open
+
+ // { ===== begin nsIMdbFactory methods =====
+
+ // { ----- begin file methods -----
+ NS_IMETHOD OpenOldFile(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath, mdb_bool inFrozen,
+ nsIMdbFile** acqFile) override;
+ // Choose some subclass of nsIMdbFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be open and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+
+ NS_IMETHOD CreateNewFile(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath,
+ nsIMdbFile** acqFile) override;
+ // Choose some subclass of nsIMdbFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be created and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+ // } ----- end file methods -----
+
+ // { ----- begin env methods -----
+ NS_IMETHOD MakeEnv(nsIMdbHeap* ioHeap,
+ nsIMdbEnv** acqEnv) override; // new env
+ // ioHeap can be nil, causing a MakeHeap() style heap instance to be used
+ // } ----- end env methods -----
+
+ // { ----- begin heap methods -----
+ NS_IMETHOD MakeHeap(nsIMdbEnv* ev,
+ nsIMdbHeap** acqHeap) override; // new heap
+ // } ----- end heap methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD MakeRow(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbRow** acqRow) override; // new row
+ // ioHeap can be nil, causing the heap associated with ev to be used
+ // } ----- end row methods -----
+
+ // { ----- begin port methods -----
+ NS_IMETHOD CanOpenFilePort(
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpen, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) override; // informal file format description
+
+ NS_IMETHOD OpenFilePort(
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for readonly import
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb)
+ override; // acquire thumb for incremental port open
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then call nsIMdbFactory::ThumbToOpenPort() to get the port instance.
+
+ NS_IMETHOD
+ ThumbToOpenPort( // redeeming a completed thumb from OpenFilePort()
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFilePort() with done status
+ nsIMdbPort** acqPort) override; // acquire new port object
+ // } ----- end port methods -----
+
+ // { ----- begin store methods -----
+ NS_IMETHOD CanOpenFileStore(
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to investigate
+ // const mdbYarn* inFirst512Bytes,
+ nsIMdbFile* ioFile, // db abstract file interface
+ mdb_bool* outCanOpenAsStore, // whether OpenFileStore() might succeed
+ mdb_bool* outCanOpenAsPort, // whether OpenFilePort() might succeed
+ mdbYarn* outFormatVersion) override; // informal file format description
+
+ NS_IMETHOD OpenFileStore( // open an existing database
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // the file to open for general db usage
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbThumb** acqThumb)
+ override; // acquire thumb for incremental store open
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then call nsIMdbFactory::ThumbToOpenStore() to get the store instance.
+
+ NS_IMETHOD
+ ThumbToOpenStore( // redeem completed thumb from OpenFileStore()
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb* ioThumb, // thumb from OpenFileStore() with done status
+ nsIMdbStore** acqStore) override; // acquire new db store object
+
+ NS_IMETHOD CreateNewFileStore( // create a new db with minimal content
+ nsIMdbEnv* ev, // context
+ nsIMdbHeap* ioHeap, // can be nil to cause ev's heap attribute to be used
+ // const char* inFilePath, // name of file which should not yet exist
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy, // runtime policies for using db
+ nsIMdbStore** acqStore) override; // acquire new db store object
+
+ // } ----- end store methods -----
+
+ // } ===== end nsIMdbFactory methods =====
+
+ public: // morkYarn construction & destruction
+ morkFactory(); // uses orkinHeap
+ explicit morkFactory(nsIMdbHeap* ioHeap); // caller supplied heap
+ morkFactory(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap);
+ void CloseFactory(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // morkNode memory management operators
+ void* operator new(size_t inSize) noexcept(true) {
+ return ::operator new(inSize);
+ }
+
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkNode::MakeNew(inSize, ioHeap, ev);
+ }
+
+ private: // copying is not allowed
+ morkFactory(const morkFactory& other);
+ morkFactory& operator=(const morkFactory& other);
+ virtual ~morkFactory(); // assert that CloseFactory() executed earlier
+
+ public: // dynamic type identification
+ mork_bool IsFactory() const {
+ return IsNode() && mNode_Derived == morkDerived_kFactory;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // other factory methods
+ void NonFactoryTypeError(morkEnv* ev);
+ morkEnv* GetInternalFactoryEnv(nsresult* outErr);
+ mork_bool CanOpenMorkTextFile(morkEnv* ev, nsIMdbFile* ioFile);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakFactory(morkFactory* me, morkEnv* ev,
+ morkFactory** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongFactory(morkFactory* me, morkEnv* ev,
+ morkFactory** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKFACTORY_ */
diff --git a/comm/mailnews/db/mork/morkFile.cpp b/comm/mailnews/db/mork/morkFile.cpp
new file mode 100644
index 0000000000..b7b7848cc2
--- /dev/null
+++ b/comm/mailnews/db/mork/morkFile.cpp
@@ -0,0 +1,738 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKFILE_
+# include "morkFile.h"
+#endif
+
+#ifdef MORK_WIN
+# include "io.h"
+# include <windows.h>
+#endif
+
+#include "mozilla/Unused.h"
+#include "nsString.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkFile::CloseMorkNode(
+ morkEnv* ev) // CloseFile() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseFile(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkFile::~morkFile() // assert CloseFile() executed earlier
+{
+ MORK_ASSERT(mFile_Frozen == 0);
+ MORK_ASSERT(mFile_DoTrace == 0);
+ MORK_ASSERT(mFile_IoOpen == 0);
+ MORK_ASSERT(mFile_Active == 0);
+}
+
+/*public non-poly*/
+morkFile::morkFile(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mFile_Frozen(0),
+ mFile_DoTrace(0),
+ mFile_IoOpen(0),
+ mFile_Active(0)
+
+ ,
+ mFile_SlotHeap(0),
+ mFile_Name(0),
+ mFile_Thief(0) {
+ if (ev->Good()) {
+ if (ioSlotHeap) {
+ nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mFile_SlotHeap);
+ if (ev->Good()) mNode_Derived = morkDerived_kFile;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkFile, morkObject, nsIMdbFile)
+/*public non-poly*/ void morkFile::CloseFile(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mFile_Frozen = 0;
+ mFile_DoTrace = 0;
+ mFile_IoOpen = 0;
+ mFile_Active = 0;
+
+ if (mFile_Name) this->SetFileName(ev, nullptr);
+
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mFile_SlotHeap);
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mFile_Thief);
+
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ morkFile* morkFile::OpenOldFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath,
+ mork_bool inFrozen)
+// Choose some subclass of morkFile to instantiate, in order to read
+// (and write if not frozen) the file known by inFilePath. The file
+// returned should be open and ready for use, and presumably positioned
+// at the first byte position of the file. The exact manner in which
+// files must be opened is considered a subclass specific detail, and
+// other portions or Mork source code don't want to know how it's done.
+{
+ return morkStdioFile::OpenOldStdioFile(ev, ioHeap, inFilePath, inFrozen);
+}
+
+/*static*/ morkFile* morkFile::CreateNewFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath)
+// Choose some subclass of morkFile to instantiate, in order to read
+// (and write if not frozen) the file known by inFilePath. The file
+// returned should be created and ready for use, and presumably positioned
+// at the first byte position of the file. The exact manner in which
+// files must be opened is considered a subclass specific detail, and
+// other portions or Mork source code don't want to know how it's done.
+{
+ return morkStdioFile::CreateNewStdioFile(ev, ioHeap, inFilePath);
+}
+
+void morkFile::NewMissingIoError(morkEnv* ev) const {
+ ev->NewError("file missing io");
+}
+
+/*static*/ void morkFile::NonFileTypeError(morkEnv* ev) {
+ ev->NewError("non morkFile");
+}
+
+/*static*/ void morkFile::NilSlotHeapError(morkEnv* ev) {
+ ev->NewError("nil mFile_SlotHeap");
+}
+
+/*static*/ void morkFile::NilFileNameError(morkEnv* ev) {
+ ev->NewError("nil mFile_Name");
+}
+
+void morkFile::SetThief(morkEnv* ev, nsIMdbFile* ioThief) {
+ nsIMdbFile_SlotStrongFile(ioThief, ev, &mFile_Thief);
+}
+
+void morkFile::SetFileName(morkEnv* ev,
+ const PathChar* inName) // inName can be nil
+{
+ nsIMdbHeap* heap = mFile_SlotHeap;
+ if (heap) {
+ PathChar* name = mFile_Name;
+ if (name) {
+ mFile_Name = 0;
+ ev->FreeString(heap, name);
+ }
+ if (ev->Good() && inName) mFile_Name = ev->CopyString(heap, inName);
+ } else
+ this->NilSlotHeapError(ev);
+}
+
+void morkFile::NewFileDownError(morkEnv* ev) const
+// call NewFileDownError() when either IsOpenAndActiveFile()
+// is false, or when IsOpenActiveAndMutableFile() is false.
+{
+ if (this->IsOpenNode()) {
+ if (this->FileActive()) {
+ if (this->FileFrozen()) {
+ ev->NewError("file frozen");
+ } else
+ ev->NewError("unknown file problem");
+ } else
+ ev->NewError("file not active");
+ } else
+ ev->NewError("file not open");
+}
+
+void morkFile::NewFileErrnoError(morkEnv* ev) const
+// call NewFileErrnoError() to convert std C errno into AB fault
+{
+ const char* errnoString = strerror(errno);
+ ev->NewError(errnoString); // maybe pass value of strerror() instead
+}
+
+// ````` ````` ````` ````` newlines ````` ````` ````` `````
+
+#if defined(MORK_MAC)
+static const char morkFile_kNewlines[] =
+ "\015\015\015\015\015\015\015\015\015\015\015\015\015\015\015\015";
+# define morkFile_kNewlinesCount 16
+#else
+# if defined(MORK_WIN)
+static const char morkFile_kNewlines[] =
+ "\015\012\015\012\015\012\015\012\015\012\015\012\015\012\015\012";
+# define morkFile_kNewlinesCount 8
+# else
+# ifdef MORK_UNIX
+static const char morkFile_kNewlines[] =
+ "\012\012\012\012\012\012\012\012\012\012\012\012\012\012\012\012";
+# define morkFile_kNewlinesCount 16
+# endif /* MORK_UNIX */
+# endif /* MORK_WIN */
+#endif /* MORK_MAC */
+
+mork_size morkFile::WriteNewlines(morkEnv* ev, mork_count inNewlines)
+// WriteNewlines() returns the number of bytes written.
+{
+ mork_size outSize = 0;
+ while (inNewlines && ev->Good()) // more newlines to write?
+ {
+ mork_u4 quantum = inNewlines;
+ if (quantum > morkFile_kNewlinesCount) quantum = morkFile_kNewlinesCount;
+
+ mork_size quantumSize = quantum * mork_kNewlineSize;
+ mdb_size bytesWritten;
+ this->Write(ev->AsMdbEnv(), morkFile_kNewlines, quantumSize, &bytesWritten);
+ outSize += quantumSize;
+ inNewlines -= quantum;
+ }
+ return outSize;
+}
+
+NS_IMETHODIMP
+morkFile::Eof(nsIMdbEnv* mev, mdb_pos* outPos) {
+ nsresult outErr = NS_OK;
+ mdb_pos pos = -1;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ pos = Length(ev);
+ outErr = ev->AsErr();
+ if (outPos) *outPos = pos;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkFile::Get(nsIMdbEnv* mev, void* outBuf, mdb_size inSize, mdb_pos inPos,
+ mdb_size* outActualSize) {
+ nsresult rv = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mdb_pos outPos;
+ Seek(mev, inPos, &outPos);
+ if (ev->Good()) rv = Read(mev, outBuf, inSize, outActualSize);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+morkFile::Put(nsIMdbEnv* mev, const void* inBuf, mdb_size inSize, mdb_pos inPos,
+ mdb_size* outActualSize) {
+ nsresult outErr = NS_OK;
+ *outActualSize = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mdb_pos outPos;
+
+ Seek(mev, inPos, &outPos);
+ if (ev->Good()) Write(mev, inBuf, inSize, outActualSize);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+// { ----- begin path methods -----
+NS_IMETHODIMP
+morkFile::Path(nsIMdbEnv* mev, mdbYarn* outFilePath) {
+ nsresult outErr = NS_OK;
+ if (outFilePath) outFilePath->mYarn_Fill = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ ev->StringToYarn(GetFileNameString(), outFilePath);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+// } ----- end path methods -----
+
+// { ----- begin replacement methods -----
+
+NS_IMETHODIMP
+morkFile::Thief(nsIMdbEnv* mev, nsIMdbFile** acqThief) {
+ nsresult outErr = NS_OK;
+ nsIMdbFile* outThief = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ outThief = GetThief();
+ NS_IF_ADDREF(outThief);
+ outErr = ev->AsErr();
+ }
+ if (acqThief) *acqThief = outThief;
+ return outErr;
+}
+
+// } ----- end replacement methods -----
+
+// { ----- begin versioning methods -----
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkStdioFile::CloseMorkNode(
+ morkEnv* ev) // CloseStdioFile() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseStdioFile(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkStdioFile::~morkStdioFile() // assert CloseStdioFile() executed earlier
+{
+ if (mStdioFile_File) CloseStdioFile(mMorkEnv);
+ MORK_ASSERT(mStdioFile_File == 0);
+}
+
+/*public non-poly*/ void morkStdioFile::CloseStdioFile(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ if (mStdioFile_File && this->FileActive() && this->FileIoOpen()) {
+ this->CloseStdio(ev);
+ }
+
+ mStdioFile_File = 0;
+
+ this->CloseFile(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// compatible with the morkFile::MakeFile() entry point
+
+/*static*/ morkStdioFile* morkStdioFile::OpenOldStdioFile(
+ morkEnv* ev, nsIMdbHeap* ioHeap, const PathChar* inFilePath,
+ mork_bool inFrozen) {
+ morkStdioFile* outFile = 0;
+ if (ioHeap && inFilePath) {
+ const char* mode = (inFrozen) ? "rb" : "rb+";
+ outFile = new (*ioHeap, ev)
+ morkStdioFile(ev, morkUsage::kHeap, ioHeap, ioHeap, inFilePath, mode);
+
+ if (outFile) {
+ outFile->SetFileFrozen(inFrozen);
+ }
+ } else
+ ev->NilPointerError();
+
+ return outFile;
+}
+
+/*static*/ morkStdioFile* morkStdioFile::CreateNewStdioFile(
+ morkEnv* ev, nsIMdbHeap* ioHeap, const PathChar* inFilePath) {
+ morkStdioFile* outFile = 0;
+ if (ioHeap && inFilePath) {
+ const char* mode = "wb+";
+ outFile = new (*ioHeap, ev)
+ morkStdioFile(ev, morkUsage::kHeap, ioHeap, ioHeap, inFilePath, mode);
+ } else
+ ev->NilPointerError();
+
+ return outFile;
+}
+
+NS_IMETHODIMP
+morkStdioFile::BecomeTrunk(nsIMdbEnv* ev)
+// If this file is a file version branch created by calling AcquireBud(),
+// BecomeTrunk() causes this file's content to replace the original
+// file's content, typically by assuming the original file's identity.
+{
+ return Flush(ev);
+}
+
+NS_IMETHODIMP
+morkStdioFile::AcquireBud(nsIMdbEnv* mdbev, nsIMdbHeap* ioHeap,
+ nsIMdbFile** acquiredFile)
+// AcquireBud() starts a new "branch" version of the file, empty of content,
+// so that a new version of the file can be written. This new file
+// can later be told to BecomeTrunk() the original file, so the branch
+// created by budding the file will replace the original file. Some
+// file subclasses might initially take the unsafe but expedient
+// approach of simply truncating this file down to zero length, and
+// then returning the same morkFile pointer as this, with an extra
+// reference count increment. Note that the caller of AcquireBud() is
+// expected to eventually call CutStrongRef() on the returned file
+// in order to release the strong reference. High quality versions
+// of morkFile subclasses will create entirely new files which later
+// are renamed to become the old file, so that better transactional
+// behavior is exhibited by the file, so crashes protect old files.
+// Note that AcquireBud() is an illegal operation on readonly files.
+{
+ NS_ENSURE_ARG(acquiredFile);
+ MORK_USED_1(ioHeap);
+ nsresult rv = NS_OK;
+ morkFile* outFile = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+
+ if (this->IsOpenAndActiveFile()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ // #ifdef MORK_WIN
+ // truncate(file, /*eof*/ 0);
+ // #else /*MORK_WIN*/
+ PathChar* name = mFile_Name;
+ if (name) {
+ if (MORK_FILECLOSE(file) >= 0) {
+ this->SetFileActive(morkBool_kFalse);
+ this->SetFileIoOpen(morkBool_kFalse);
+ mStdioFile_File = 0;
+
+ file = MORK_FILEOPEN(
+ name, "wb+"); // open for write, discarding old content
+ if (file) {
+ mStdioFile_File = file;
+ this->SetFileActive(morkBool_kTrue);
+ this->SetFileIoOpen(morkBool_kTrue);
+ this->SetFileFrozen(morkBool_kFalse);
+ } else
+ this->new_stdio_file_fault(ev);
+ } else
+ this->new_stdio_file_fault(ev);
+ } else
+ this->NilFileNameError(ev);
+
+ // #endif /*MORK_WIN*/
+
+ if (ev->Good() && this->AddStrongRef(ev->AsMdbEnv())) {
+ outFile = this;
+ AddRef();
+ }
+ } else if (mFile_Thief) {
+ rv = mFile_Thief->AcquireBud(ev->AsMdbEnv(), ioHeap, acquiredFile);
+ } else
+ this->NewMissingIoError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ *acquiredFile = outFile;
+ return rv;
+}
+
+mork_pos morkStdioFile::Length(morkEnv* ev) const {
+ mork_pos outPos = 0;
+
+ if (this->IsOpenAndActiveFile()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ long start = MORK_FILETELL(file);
+ if (start >= 0) {
+ long fore = MORK_FILESEEK(file, 0, SEEK_END);
+ if (fore >= 0) {
+ long eof = MORK_FILETELL(file);
+ if (eof >= 0) {
+ long back = MORK_FILESEEK(file, start, SEEK_SET);
+ if (back >= 0)
+ outPos = eof;
+ else
+ this->new_stdio_file_fault(ev);
+ } else
+ this->new_stdio_file_fault(ev);
+ } else
+ this->new_stdio_file_fault(ev);
+ } else
+ this->new_stdio_file_fault(ev);
+ } else if (mFile_Thief)
+ mFile_Thief->Eof(ev->AsMdbEnv(), &outPos);
+ else
+ this->NewMissingIoError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ return outPos;
+}
+
+NS_IMETHODIMP
+morkStdioFile::Tell(nsIMdbEnv* ev, mork_pos* outPos) const {
+ nsresult rv = NS_OK;
+ NS_ENSURE_ARG(outPos);
+ morkEnv* mev = morkEnv::FromMdbEnv(ev);
+ if (this->IsOpenAndActiveFile()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ long where = MORK_FILETELL(file);
+ if (where >= 0)
+ *outPos = where;
+ else
+ this->new_stdio_file_fault(mev);
+ } else if (mFile_Thief)
+ mFile_Thief->Tell(ev, outPos);
+ else
+ this->NewMissingIoError(mev);
+ } else
+ this->NewFileDownError(mev);
+
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStdioFile::Read(nsIMdbEnv* ev, void* outBuf, mork_size inSize,
+ mork_num* outCount) {
+ nsresult rv = NS_OK;
+ morkEnv* mev = morkEnv::FromMdbEnv(ev);
+ if (this->IsOpenAndActiveFile()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ long count = (long)MORK_FILEREAD(outBuf, inSize, file);
+ if (count >= 0) {
+ *outCount = (mork_num)count;
+ } else
+ this->new_stdio_file_fault(mev);
+ } else if (mFile_Thief)
+ mFile_Thief->Read(ev, outBuf, inSize, outCount);
+ else
+ this->NewMissingIoError(mev);
+ } else
+ this->NewFileDownError(mev);
+
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStdioFile::Seek(nsIMdbEnv* mdbev, mork_pos inPos, mork_pos* aOutPos) {
+ mork_pos outPos = 0;
+ nsresult rv = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+
+ if (this->IsOpenOrClosingNode() && this->FileActive()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ long where = MORK_FILESEEK(file, inPos, SEEK_SET);
+ if (where >= 0)
+ outPos = inPos;
+ else
+ this->new_stdio_file_fault(ev);
+ } else if (mFile_Thief)
+ mFile_Thief->Seek(mdbev, inPos, aOutPos);
+ else
+ this->NewMissingIoError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ *aOutPos = outPos;
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStdioFile::Write(nsIMdbEnv* mdbev, const void* inBuf, mork_size inSize,
+ mork_size* aOutSize) {
+ mork_num outCount = 0;
+ nsresult rv = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+ if (this->IsOpenActiveAndMutableFile()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ mozilla::Unused << fwrite(inBuf, 1, inSize, file);
+ if (!ferror(file))
+ outCount = inSize;
+ else
+ this->new_stdio_file_fault(ev);
+ } else if (mFile_Thief)
+ mFile_Thief->Write(mdbev, inBuf, inSize, &outCount);
+ else
+ this->NewMissingIoError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ *aOutSize = outCount;
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStdioFile::Flush(nsIMdbEnv* mdbev) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+ if (this->IsOpenOrClosingNode() && this->FileActive()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (file) {
+ MORK_FILEFLUSH(file);
+
+ } else if (mFile_Thief)
+ mFile_Thief->Flush(mdbev);
+ else
+ this->NewMissingIoError(ev);
+ } else
+ this->NewFileDownError(ev);
+ return NS_OK;
+}
+
+// ````` ````` ````` ````` ````` ````` ````` `````
+// protected: // protected non-poly morkStdioFile methods
+
+void morkStdioFile::new_stdio_file_fault(morkEnv* ev) const {
+ FILE* file = (FILE*)mStdioFile_File;
+
+ int copyErrno = errno; // facilitate seeing error in debugger
+
+ // bunch of stuff not ported here
+ if (!copyErrno && file) {
+ copyErrno = ferror(file);
+ errno = copyErrno;
+ }
+
+ this->NewFileErrnoError(ev);
+}
+
+// ````` ````` ````` ````` ````` ````` ````` `````
+// public: // public non-poly morkStdioFile methods
+
+/*public non-poly*/
+morkStdioFile::morkStdioFile(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkFile(ev, inUsage, ioHeap, ioSlotHeap), mStdioFile_File(0) {
+ if (ev->Good()) mNode_Derived = morkDerived_kStdioFile;
+}
+
+morkStdioFile::morkStdioFile(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ const PathChar* inName, const char* inMode)
+ // calls OpenStdio() after construction
+ : morkFile(ev, inUsage, ioHeap, ioSlotHeap), mStdioFile_File(0) {
+ if (ev->Good()) this->OpenStdio(ev, inName, inMode);
+}
+
+morkStdioFile::morkStdioFile(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ void* ioFile, const PathChar* inName,
+ mork_bool inFrozen)
+ // calls UseStdio() after construction
+ : morkFile(ev, inUsage, ioHeap, ioSlotHeap), mStdioFile_File(0) {
+ if (ev->Good()) this->UseStdio(ev, ioFile, inName, inFrozen);
+}
+
+void morkStdioFile::OpenStdio(morkEnv* ev, const PathChar* inName,
+ const char* inMode)
+// Open a new FILE with name inName, using mode flags from inMode.
+{
+ if (ev->Good()) {
+ if (!inMode) inMode = "";
+
+ mork_bool frozen = (*inMode == 'r'); // cursory attempt to note readonly
+
+ if (this->IsOpenNode()) {
+ if (!this->FileActive()) {
+ this->SetFileIoOpen(morkBool_kFalse);
+ if (inName && *inName) {
+ this->SetFileName(ev, inName);
+ if (ev->Good()) {
+ FILE* file = MORK_FILEOPEN(inName, inMode);
+ if (file) {
+ mStdioFile_File = file;
+ this->SetFileActive(morkBool_kTrue);
+ this->SetFileIoOpen(morkBool_kTrue);
+ this->SetFileFrozen(frozen);
+ } else
+ this->new_stdio_file_fault(ev);
+ }
+ } else
+ ev->NewError("no file name");
+ } else
+ ev->NewError("file already active");
+ } else
+ this->NewFileDownError(ev);
+ }
+}
+
+void morkStdioFile::UseStdio(morkEnv* ev, void* ioFile, const PathChar* inName,
+ mork_bool inFrozen)
+// Use an existing file, like stdin/stdout/stderr, which should not
+// have the io stream closed when the file is closed. The ioFile
+// parameter must actually be of type FILE (but we don't want to make
+// this header file include the stdio.h header file).
+{
+ if (ev->Good()) {
+ if (this->IsOpenNode()) {
+ if (!this->FileActive()) {
+ if (ioFile) {
+ this->SetFileIoOpen(morkBool_kFalse);
+ this->SetFileName(ev, inName);
+ if (ev->Good()) {
+ mStdioFile_File = ioFile;
+ this->SetFileActive(morkBool_kTrue);
+ this->SetFileFrozen(inFrozen);
+ }
+ } else
+ ev->NilPointerError();
+ } else
+ ev->NewError("file already active");
+ } else
+ this->NewFileDownError(ev);
+ }
+}
+
+void morkStdioFile::CloseStdio(morkEnv* ev)
+// Close the stream io if both and FileActive() and FileIoOpen(), but
+// this does not close this instances (like CloseStdioFile() does).
+// If stream io was made active by means of calling UseStdio(),
+// then this method does little beyond marking the stream inactive
+// because FileIoOpen() is false.
+{
+ if (mStdioFile_File && this->FileActive() && this->FileIoOpen()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (MORK_FILECLOSE(file) < 0) this->new_stdio_file_fault(ev);
+
+ mStdioFile_File = 0;
+ this->SetFileActive(morkBool_kFalse);
+ this->SetFileIoOpen(morkBool_kFalse);
+ }
+}
+
+NS_IMETHODIMP
+morkStdioFile::Steal(nsIMdbEnv* ev, nsIMdbFile* ioThief)
+// If this file is a file version branch created by calling AcquireBud(),
+// BecomeTrunk() causes this file's content to replace the original
+// file's content, typically by assuming the original file's identity.
+{
+ morkEnv* mev = morkEnv::FromMdbEnv(ev);
+ if (mStdioFile_File && FileActive() && FileIoOpen()) {
+ FILE* file = (FILE*)mStdioFile_File;
+ if (MORK_FILECLOSE(file) < 0) new_stdio_file_fault(mev);
+
+ mStdioFile_File = 0;
+ }
+ SetThief(mev, ioThief);
+ return NS_OK;
+}
+
+#if defined(MORK_WIN)
+
+void mork_fileflush(FILE* file) { fflush(file); }
+
+#endif /*MORK_WIN*/
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkFile.h b/comm/mailnews/db/mork/morkFile.h
new file mode 100644
index 0000000000..1a2933643f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkFile.h
@@ -0,0 +1,360 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORKFILE_
+#define _MORKFILE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#include "mozilla/Path.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*=============================================================================
+ * morkFile: abstract file interface
+ */
+
+#define morkDerived_kFile /*i*/ 0x4669 /* ascii 'Fi' */
+
+class morkFile /*d*/ : public morkObject,
+ public nsIMdbFile { /* ````` simple file API ````` */
+ using PathChar = mozilla::filesystem::Path::value_type;
+
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // public: // slots inherited from morkObject (meant to inform only)
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected morkFile members (similar to public domain IronDoc)
+ virtual ~morkFile(); // assert that CloseFile() executed earlier
+
+ mork_u1 mFile_Frozen; // 'F' => file allows only read access
+ mork_u1 mFile_DoTrace; // 'T' trace if ev->DoTrace()
+ mork_u1 mFile_IoOpen; // 'O' => io stream is open (& needs a close)
+ mork_u1 mFile_Active; // 'A' => file is active and usable
+
+ nsIMdbHeap* mFile_SlotHeap; // heap for Name and other allocated slots
+ PathChar* mFile_Name; // can be nil if SetFileName() is never called
+ // mFile_Name convention: managed with morkEnv::CopyString()/FreeString()
+
+ nsIMdbFile* mFile_Thief; // from a call to orkinFile::Steal()
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ NS_DECL_ISUPPORTS_INHERITED
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseFile() only if open
+
+ public: // morkFile construction & destruction
+ morkFile(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseFile(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkFile(const morkFile& other);
+ morkFile& operator=(const morkFile& other);
+
+ public: // dynamic type identification
+ mork_bool IsFile() const {
+ return IsNode() && mNode_Derived == morkDerived_kFile;
+ }
+ // } ===== end morkNode methods =====
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public static standard file creation entry point
+ static morkFile* OpenOldFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath, mork_bool inFrozen);
+ // Choose some subclass of morkFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be open and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+
+ static morkFile* CreateNewFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath);
+ // Choose some subclass of morkFile to instantiate, in order to read
+ // (and write if not frozen) the file known by inFilePath. The file
+ // returned should be created and ready for use, and presumably positioned
+ // at the first byte position of the file. The exact manner in which
+ // files must be opened is considered a subclass specific detail, and
+ // other portions or Mork source code don't want to know how it's done.
+
+ public: // non-poly morkFile methods
+ mork_bool FileFrozen() const { return mFile_Frozen == 'F'; }
+ mork_bool FileDoTrace() const { return mFile_DoTrace == 'T'; }
+ mork_bool FileIoOpen() const { return mFile_IoOpen == 'O'; }
+ mork_bool FileActive() const { return mFile_Active == 'A'; }
+
+ void SetFileFrozen(mork_bool b) { mFile_Frozen = (mork_u1)((b) ? 'F' : 0); }
+ void SetFileDoTrace(mork_bool b) { mFile_DoTrace = (mork_u1)((b) ? 'T' : 0); }
+ void SetFileIoOpen(mork_bool b) { mFile_IoOpen = (mork_u1)((b) ? 'O' : 0); }
+ void SetFileActive(mork_bool b) { mFile_Active = (mork_u1)((b) ? 'A' : 0); }
+
+ mork_bool IsOpenActiveAndMutableFile() const {
+ return (IsOpenNode() && FileActive() && !FileFrozen());
+ }
+ // call IsOpenActiveAndMutableFile() before writing a file
+
+ mork_bool IsOpenAndActiveFile() const {
+ return (this->IsOpenNode() && this->FileActive());
+ }
+ // call IsOpenAndActiveFile() before using a file
+
+ nsIMdbFile* GetThief() const { return mFile_Thief; }
+ void SetThief(morkEnv* ev, nsIMdbFile* ioThief); // ioThief can be nil
+
+ const PathChar* GetFileNameString() const { return mFile_Name; }
+ void SetFileName(morkEnv* ev, const PathChar* inName); // inName can be nil
+ static void NilSlotHeapError(morkEnv* ev);
+ static void NilFileNameError(morkEnv* ev);
+ static void NonFileTypeError(morkEnv* ev);
+
+ void NewMissingIoError(morkEnv* ev) const;
+
+ void NewFileDownError(morkEnv* ev) const;
+ // call NewFileDownError() when either IsOpenAndActiveFile()
+ // is false, or when IsOpenActiveAndMutableFile() is false.
+
+ void NewFileErrnoError(morkEnv* ev) const;
+ // call NewFileErrnoError() to convert std C errno into AB fault
+
+ mork_size WriteNewlines(morkEnv* ev, mork_count inNewlines);
+ // WriteNewlines() returns the number of bytes written.
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakFile(morkFile* me, morkEnv* ev, morkFile** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongFile(morkFile* me, morkEnv* ev, morkFile** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ public:
+ virtual mork_pos Length(morkEnv* ev) const = 0; // eof
+ // nsIMdbFile methods
+ NS_IMETHOD Tell(nsIMdbEnv* ev, mdb_pos* outPos) const override = 0;
+ NS_IMETHOD Seek(nsIMdbEnv* ev, mdb_pos inPos, mdb_pos* outPos) override = 0;
+ NS_IMETHOD Eof(nsIMdbEnv* ev, mdb_pos* outPos) override;
+ // } ----- end pos methods -----
+
+ // { ----- begin read methods -----
+ NS_IMETHOD Read(nsIMdbEnv* ev, void* outBuf, mdb_size inSize,
+ mdb_size* outActualSize) override = 0;
+ NS_IMETHOD Get(nsIMdbEnv* ev, void* outBuf, mdb_size inSize, mdb_pos inPos,
+ mdb_size* outActualSize) override;
+ // } ----- end read methods -----
+
+ // { ----- begin write methods -----
+ NS_IMETHOD Write(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ mdb_size* outActualSize) override = 0;
+ NS_IMETHOD Put(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ mdb_pos inPos, mdb_size* outActualSize) override;
+ NS_IMETHOD Flush(nsIMdbEnv* ev) override = 0;
+ // } ----- end attribute methods -----
+
+ // { ----- begin path methods -----
+ NS_IMETHOD Path(nsIMdbEnv* ev, mdbYarn* outFilePath) override;
+ // } ----- end path methods -----
+
+ // { ----- begin replacement methods -----
+ NS_IMETHOD Steal(nsIMdbEnv* ev, nsIMdbFile* ioThief) override = 0;
+ NS_IMETHOD Thief(nsIMdbEnv* ev, nsIMdbFile** acqThief) override;
+ // } ----- end replacement methods -----
+
+ // { ----- begin versioning methods -----
+ NS_IMETHOD BecomeTrunk(nsIMdbEnv* ev) override = 0;
+
+ NS_IMETHOD AcquireBud(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbFile** acqBud) override = 0;
+ // } ----- end versioning methods -----
+
+ // } ===== end nsIMdbFile methods =====
+};
+
+/*=============================================================================
+ * morkStdioFile: concrete file using standard C file io
+ */
+
+#define morkDerived_kStdioFile /*i*/ 0x7346 /* ascii 'sF' */
+
+class morkStdioFile /*d*/ : public morkFile { /* `` copied from IronDoc `` */
+ using PathChar = mozilla::filesystem::Path::value_type;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected morkStdioFile members
+ void* mStdioFile_File;
+ // actually type FILE*, but using opaque void* type
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseStdioFile() only if open
+ virtual ~morkStdioFile(); // assert that CloseStdioFile() executed earlier
+
+ public: // morkStdioFile construction & destruction
+ morkStdioFile(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseStdioFile(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkStdioFile(const morkStdioFile& other);
+ morkStdioFile& operator=(const morkStdioFile& other);
+
+ public: // dynamic type identification
+ mork_bool IsStdioFile() const {
+ return IsNode() && mNode_Derived == morkDerived_kStdioFile;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonStdioFileTypeError(morkEnv* ev);
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // compatible with the morkFile::OpenOldFile() entry point
+ static morkStdioFile* OpenOldStdioFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath,
+ mork_bool inFrozen);
+
+ static morkStdioFile* CreateNewStdioFile(morkEnv* ev, nsIMdbHeap* ioHeap,
+ const PathChar* inFilePath);
+
+ virtual mork_pos Length(morkEnv* ev) const override; // eof
+
+ NS_IMETHOD Tell(nsIMdbEnv* ev, mdb_pos* outPos) const override;
+ NS_IMETHOD Seek(nsIMdbEnv* ev, mdb_pos inPos, mdb_pos* outPos) override;
+ // NS_IMETHOD Eof(nsIMdbEnv* ev, mdb_pos* outPos);
+ // } ----- end pos methods -----
+
+ // { ----- begin read methods -----
+ NS_IMETHOD Read(nsIMdbEnv* ev, void* outBuf, mdb_size inSize,
+ mdb_size* outActualSize) override;
+
+ // { ----- begin write methods -----
+ NS_IMETHOD Write(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ mdb_size* outActualSize) override;
+ // NS_IMETHOD Put(nsIMdbEnv* ev, const void* inBuf, mdb_size inSize,
+ // mdb_pos inPos, mdb_size* outActualSize);
+ NS_IMETHOD Flush(nsIMdbEnv* ev) override;
+ // } ----- end attribute methods -----
+
+ NS_IMETHOD Steal(nsIMdbEnv* ev, nsIMdbFile* ioThief) override;
+
+ // { ----- begin versioning methods -----
+ NS_IMETHOD BecomeTrunk(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD AcquireBud(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbFile** acqBud) override;
+ // } ----- end versioning methods -----
+
+ // } ===== end nsIMdbFile methods =====
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected non-poly morkStdioFile methods
+ void new_stdio_file_fault(morkEnv* ev) const;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkStdioFile methods
+ morkStdioFile(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, const PathChar* inName,
+ const char* inMode);
+ // calls OpenStdio() after construction
+
+ morkStdioFile(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, void* ioFile, const PathChar* inName,
+ mork_bool inFrozen);
+ // calls UseStdio() after construction
+
+ void OpenStdio(morkEnv* ev, const PathChar* inName, const char* inMode);
+ // Open a new FILE with name inName, using mode flags from inMode.
+
+ void UseStdio(morkEnv* ev, void* ioFile, const PathChar* inName,
+ mork_bool inFrozen);
+ // Use an existing file, like stdin/stdout/stderr, which should not
+ // have the io stream closed when the file is closed. The ioFile
+ // parameter must actually be of type FILE (but we don't want to make
+ // this header file include the stdio.h header file).
+
+ void CloseStdio(morkEnv* ev);
+ // Close the stream io if both and FileActive() and FileIoOpen(), but
+ // this does not close this instances (like CloseStdioFile() does).
+ // If stream io was made active by means of calling UseStdio(),
+ // then this method does little beyond marking the stream inactive
+ // because FileIoOpen() is false.
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakStdioFile(morkStdioFile* me, morkEnv* ev,
+ morkStdioFile** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongStdioFile(morkStdioFile* me, morkEnv* ev,
+ morkStdioFile** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKFILE_ */
diff --git a/comm/mailnews/db/mork/morkHandle.cpp b/comm/mailnews/db/mork/morkHandle.cpp
new file mode 100644
index 0000000000..e838f8b997
--- /dev/null
+++ b/comm/mailnews/db/mork/morkHandle.cpp
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKFACTORY_
+# include "morkFactory.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKHANDLE_
+# include "morkHandle.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkHandle::CloseMorkNode(
+ morkEnv* ev) // CloseHandle() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseHandle(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkHandle::~morkHandle() // assert CloseHandle() executed earlier
+{
+ MORK_ASSERT(mHandle_Env == 0);
+ MORK_ASSERT(mHandle_Face == 0);
+ MORK_ASSERT(mHandle_Object == 0);
+ MORK_ASSERT(mHandle_Magic == 0);
+ MORK_ASSERT(mHandle_Tag == morkHandle_kTag); // should still have correct tag
+}
+
+/*public non-poly*/
+morkHandle::morkHandle(
+ morkEnv* ev, // note morkUsage is always morkUsage_kPool
+ morkHandleFace* ioFace, // must not be nil, cookie for this handle
+ morkObject* ioObject, // must not be nil, the object for this handle
+ mork_magic inMagic) // magic sig to denote specific subclass
+ : morkNode(ev, morkUsage::kPool, (nsIMdbHeap*)0L),
+ mHandle_Tag(0),
+ mHandle_Env(ev),
+ mHandle_Face(ioFace),
+ mHandle_Object(0),
+ mHandle_Magic(0) {
+ if (ioFace && ioObject) {
+ if (ev->Good()) {
+ mHandle_Tag = morkHandle_kTag;
+ morkObject::SlotStrongObject(ioObject, ev, &mHandle_Object);
+ morkHandle::SlotWeakHandle(this, ev, &ioObject->mObject_Handle);
+ if (ev->Good()) {
+ mHandle_Magic = inMagic;
+ mNode_Derived = morkDerived_kHandle;
+ }
+ } else
+ ev->CantMakeWhenBadError();
+ } else
+ ev->NilPointerError();
+}
+
+/*public non-poly*/ void morkHandle::CloseHandle(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkObject* obj = mHandle_Object;
+ mork_bool objDidRefSelf = (obj && obj->mObject_Handle == this);
+ if (objDidRefSelf) obj->mObject_Handle = 0; // drop the reference
+
+ morkObject::SlotStrongObject((morkObject*)0, ev, &mHandle_Object);
+ mHandle_Magic = 0;
+ // note mHandle_Tag MUST stay morkHandle_kTag for morkNode::ZapOld()
+ this->MarkShut();
+
+ if (objDidRefSelf)
+ this->CutWeakRef(ev); // do last, because it might self destroy
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+void morkHandle::NilFactoryError(morkEnv* ev) const {
+ ev->NewError("nil mHandle_Factory");
+}
+
+void morkHandle::NilHandleObjectError(morkEnv* ev) const {
+ ev->NewError("nil mHandle_Object");
+}
+
+void morkHandle::NonNodeObjectError(morkEnv* ev) const {
+ ev->NewError("non-node mHandle_Object");
+}
+
+void morkHandle::NonOpenObjectError(morkEnv* ev) const {
+ ev->NewError("non-open mHandle_Object");
+}
+
+void morkHandle::NewBadMagicHandleError(morkEnv* ev, mork_magic inMagic) const {
+ MORK_USED_1(inMagic);
+ ev->NewError("wrong mHandle_Magic");
+}
+
+void morkHandle::NewDownHandleError(morkEnv* ev) const {
+ if (this->IsHandle()) {
+ if (this->GoodHandleTag()) {
+ if (this->IsOpenNode())
+ ev->NewError("unknown down morkHandle error");
+ else
+ this->NonOpenNodeError(ev);
+ } else
+ ev->NewError("wrong morkHandle tag");
+ } else
+ ev->NewError("non morkHandle");
+}
+
+morkObject* morkHandle::GetGoodHandleObject(morkEnv* ev, mork_bool inMutable,
+ mork_magic inMagicType,
+ mork_bool inClosedOkay) const {
+ morkObject* outObject = 0;
+ if (this->IsHandle() && this->GoodHandleTag() &&
+ (inClosedOkay || this->IsOpenNode())) {
+ if (!inMagicType || mHandle_Magic == inMagicType) {
+ morkObject* obj = this->mHandle_Object;
+ if (obj) {
+ if (obj->IsNode()) {
+ if (inClosedOkay || obj->IsOpenNode()) {
+ if (this->IsMutable() || !inMutable)
+ outObject = obj;
+ else
+ this->NonMutableNodeError(ev);
+ } else
+ this->NonOpenObjectError(ev);
+ } else
+ this->NonNodeObjectError(ev);
+ } else if (!inClosedOkay)
+ this->NilHandleObjectError(ev);
+ } else
+ this->NewBadMagicHandleError(ev, inMagicType);
+ } else
+ this->NewDownHandleError(ev);
+
+ MORK_ASSERT(outObject || inClosedOkay);
+ return outObject;
+}
+
+morkEnv* morkHandle::CanUseHandle(nsIMdbEnv* mev, mork_bool inMutable,
+ mork_bool inClosedOkay,
+ nsresult* outErr) const {
+ morkEnv* outEnv = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkObject* obj = this->GetGoodHandleObject(ev, inMutable,
+ /*magic*/ 0, inClosedOkay);
+ if (obj) {
+ outEnv = ev;
+ }
+ *outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outEnv || inClosedOkay);
+ return outEnv;
+}
+
+// { ===== begin nsIMdbObject methods =====
+
+// { ----- begin attribute methods -----
+/*virtual*/ nsresult morkHandle::Handle_IsFrozenMdbObject(
+ nsIMdbEnv* mev, mdb_bool* outIsReadonly) {
+ nsresult outErr = NS_OK;
+ mdb_bool readOnly = mdbBool_kTrue;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ readOnly = mHandle_Object->IsFrozen();
+
+ outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outIsReadonly);
+ if (outIsReadonly) *outIsReadonly = readOnly;
+
+ return outErr;
+}
+// same as nsIMdbPort::GetIsPortReadonly() when this object is inside a port.
+// } ----- end attribute methods -----
+
+// { ----- begin factory methods -----
+/*virtual*/ nsresult morkHandle::Handle_GetMdbFactory(
+ nsIMdbEnv* mev, nsIMdbFactory** acqFactory) {
+ nsresult outErr = NS_OK;
+ nsIMdbFactory* handle = 0;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ morkFactory* factory = ev->mEnv_Factory;
+ if (factory) {
+ handle = factory;
+ NS_ADDREF(handle);
+ } else
+ this->NilFactoryError(ev);
+
+ outErr = ev->AsErr();
+ }
+
+ MORK_ASSERT(acqFactory);
+ if (acqFactory) *acqFactory = handle;
+
+ return outErr;
+}
+// } ----- end factory methods -----
+
+// { ----- begin ref counting for well-behaved cyclic graphs -----
+/*virtual*/ nsresult morkHandle::Handle_GetWeakRefCount(
+ nsIMdbEnv* mev, // weak refs
+ mdb_count* outCount) {
+ nsresult outErr = NS_OK;
+ mdb_count count = 0;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ count = this->WeakRefsOnly();
+
+ outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outCount);
+ if (outCount) *outCount = count;
+
+ return outErr;
+}
+/*virtual*/ nsresult morkHandle::Handle_GetStrongRefCount(
+ nsIMdbEnv* mev, // strong refs
+ mdb_count* outCount) {
+ nsresult outErr = NS_OK;
+ mdb_count count = 0;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ count = this->StrongRefsOnly();
+
+ outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outCount);
+ if (outCount) *outCount = count;
+
+ return outErr;
+}
+
+/*virtual*/ nsresult morkHandle::Handle_AddWeakRef(nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ this->AddWeakRef(ev);
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+}
+/*virtual*/ nsresult morkHandle::Handle_AddStrongRef(nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ this->AddStrongRef(ev);
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+}
+
+/*virtual*/ nsresult morkHandle::Handle_CutWeakRef(nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ this->CutWeakRef(ev);
+ outErr = ev->AsErr();
+ }
+
+ return outErr;
+}
+/*virtual*/ nsresult morkHandle::Handle_CutStrongRef(nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ this->CutStrongRef(ev);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+/*virtual*/ nsresult morkHandle::Handle_CloseMdbObject(nsIMdbEnv* mev)
+// called at strong refs zero
+{
+ // if only one ref, Handle_CutStrongRef will clean up better.
+ if (mNode_Uses == 1) return Handle_CutStrongRef(mev);
+
+ nsresult outErr = NS_OK;
+
+ if (this->IsNode() && this->IsOpenNode()) {
+ morkEnv* ev = CanUseHandle(mev, /*inMutable*/ morkBool_kFalse,
+ /*inClosedOkay*/ morkBool_kTrue, &outErr);
+ if (ev) {
+ morkObject* object = mHandle_Object;
+ if (object && object->IsNode() && object->IsOpenNode())
+ object->CloseMorkNode(ev);
+
+ this->CloseMorkNode(ev);
+ outErr = ev->AsErr();
+ }
+ }
+ return outErr;
+}
+
+/*virtual*/ nsresult morkHandle::Handle_IsOpenMdbObject(nsIMdbEnv* mev,
+ mdb_bool* outOpen) {
+ MORK_USED_1(mev);
+ nsresult outErr = NS_OK;
+
+ MORK_ASSERT(outOpen);
+ if (outOpen) *outOpen = this->IsOpenNode();
+
+ return outErr;
+}
+// } ----- end ref counting -----
+
+// } ===== end nsIMdbObject methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkHandle.h b/comm/mailnews/db/mork/morkHandle.h
new file mode 100644
index 0000000000..5089e629c8
--- /dev/null
+++ b/comm/mailnews/db/mork/morkHandle.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKHANDLE_
+#define _MORKHANDLE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class morkPool;
+class morkObject;
+class morkFactory;
+
+#define morkDerived_kHandle /*i*/ 0x486E /* ascii 'Hn' */
+#define morkHandle_kTag 0x68416E44 /* ascii 'hAnD' */
+
+/*| morkHandle:
+|*/
+class morkHandle : public morkNode {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ mork_u4 mHandle_Tag; // must equal morkHandle_kTag
+ morkEnv* mHandle_Env; // pool that allocated this handle
+ morkHandleFace* mHandle_Face; // cookie from pool containing this
+ morkObject* mHandle_Object; // object this handle wraps for MDB API
+ mork_magic mHandle_Magic; // magic sig different in each subclass
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseHandle() only if open
+ virtual ~morkHandle(); // assert that CloseHandle() executed earlier
+
+ public: // morkHandle construction & destruction
+ morkHandle(
+ morkEnv* ev, // note morkUsage is always morkUsage_kPool
+ morkHandleFace* ioFace, // must not be nil, cookie for this handle
+ morkObject* ioObject, // must not be nil, the object for this handle
+ mork_magic inMagic); // magic sig to denote specific subclass
+ void CloseHandle(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkHandle(const morkHandle& other);
+ morkHandle& operator=(const morkHandle& other);
+
+ protected: // special case empty construction for morkHandleFrame
+ friend class morkHandleFrame;
+ morkHandle() {}
+
+ public: // dynamic type identification
+ mork_bool IsHandle() const {
+ return IsNode() && mNode_Derived == morkDerived_kHandle;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // morkHandle memory management operators
+ void* operator new(size_t inSize, morkPool& ioPool, morkZone& ioZone,
+ morkEnv* ev) noexcept(true) {
+ return ioPool.NewHandle(ev, inSize, &ioZone);
+ }
+
+ void* operator new(size_t inSize, morkPool& ioPool,
+ morkEnv* ev) noexcept(true) {
+ return ioPool.NewHandle(ev, inSize, (morkZone*)0);
+ }
+
+ void* operator new(size_t inSize, morkHandleFace* ioFace) noexcept(true) {
+ MORK_USED_1(inSize);
+ return ioFace;
+ }
+
+ public: // other handle methods
+ mork_bool GoodHandleTag() const { return mHandle_Tag == morkHandle_kTag; }
+
+ void NewBadMagicHandleError(morkEnv* ev, mork_magic inMagic) const;
+ void NewDownHandleError(morkEnv* ev) const;
+ void NilFactoryError(morkEnv* ev) const;
+ void NilHandleObjectError(morkEnv* ev) const;
+ void NonNodeObjectError(morkEnv* ev) const;
+ void NonOpenObjectError(morkEnv* ev) const;
+
+ morkObject* GetGoodHandleObject(morkEnv* ev, mork_bool inMutable,
+ mork_magic inMagicType,
+ mork_bool inClosedOkay) const;
+
+ public: // interface supporting mdbObject methods
+ morkEnv* CanUseHandle(nsIMdbEnv* mev, mork_bool inMutable,
+ mork_bool inClosedOkay, nsresult* outErr) const;
+
+ // { ----- begin mdbObject style methods -----
+ nsresult Handle_IsFrozenMdbObject(nsIMdbEnv* ev, mdb_bool* outIsReadonly);
+
+ nsresult Handle_GetMdbFactory(nsIMdbEnv* ev, nsIMdbFactory** acqFactory);
+ nsresult Handle_GetWeakRefCount(nsIMdbEnv* ev, mdb_count* outCount);
+ nsresult Handle_GetStrongRefCount(nsIMdbEnv* ev, mdb_count* outCount);
+
+ nsresult Handle_AddWeakRef(nsIMdbEnv* ev);
+ nsresult Handle_AddStrongRef(nsIMdbEnv* ev);
+
+ nsresult Handle_CutWeakRef(nsIMdbEnv* ev);
+ nsresult Handle_CutStrongRef(nsIMdbEnv* ev);
+
+ nsresult Handle_CloseMdbObject(nsIMdbEnv* ev);
+ nsresult Handle_IsOpenMdbObject(nsIMdbEnv* ev, mdb_bool* outOpen);
+ // } ----- end mdbObject style methods -----
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakHandle(morkHandle* me, morkEnv* ev, morkHandle** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongHandle(morkHandle* me, morkEnv* ev,
+ morkHandle** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+#define morkHandleFrame_kPadSlotCount 4
+
+/*| morkHandleFrame: an object format used for allocating and maintaining
+**| instances of morkHandle, with leading slots used to maintain this in a
+**| linked list, and following slots to provide extra footprint that might
+**| be needed by any morkHandle subclasses that include very little extra
+**| space (by virtue of the fact that each morkHandle subclass is expected
+**| to multiply inherit from another base class that has only abstract methods
+**| for space overhead related only to some vtable representation).
+|*/
+class morkHandleFrame {
+ public:
+ morkLink mHandleFrame_Link; // list storage without trampling Handle
+ morkHandle mHandleFrame_Handle;
+ mork_ip mHandleFrame_Padding[morkHandleFrame_kPadSlotCount];
+
+ public:
+ morkHandle* AsHandle() { return &mHandleFrame_Handle; }
+
+ morkHandleFrame() {} // actually, morkHandleFrame never gets constructed
+
+ private: // copying is not allowed
+ morkHandleFrame(const morkHandleFrame& other);
+ morkHandleFrame& operator=(const morkHandleFrame& other);
+};
+
+#define morkHandleFrame_kHandleOffset \
+ mork_OffsetOf(morkHandleFrame, mHandleFrame_Handle)
+
+#define morkHandle_AsHandleFrame(h) \
+ ((h)->mHandle_Block, \
+ ((morkHandleFrame*)(((mork_u1*)(h)) - morkHandleFrame_kHandleOffset)))
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKHANDLE_ */
diff --git a/comm/mailnews/db/mork/morkIntMap.cpp b/comm/mailnews/db/mork/morkIntMap.cpp
new file mode 100644
index 0000000000..69caa867a1
--- /dev/null
+++ b/comm/mailnews/db/mork/morkIntMap.cpp
@@ -0,0 +1,212 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKINTMAP_
+# include "morkIntMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkIntMap::CloseMorkNode(
+ morkEnv* ev) // CloseIntMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseIntMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkIntMap::~morkIntMap() // assert CloseIntMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkIntMap::morkIntMap(morkEnv* ev, const morkUsage& inUsage,
+ mork_size inValSize, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_bool inHoldChanges)
+ : morkMap(ev, inUsage, ioHeap, sizeof(mork_u4), inValSize,
+ morkIntMap_kStartSlotCount, ioSlotHeap, inHoldChanges) {
+ if (ev->Good()) mNode_Derived = morkDerived_kIntMap;
+}
+
+/*public non-poly*/ void morkIntMap::CloseIntMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CloseMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool // *((mork_u4*) inKeyA) == *((mork_u4*) inKeyB)
+morkIntMap::Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const {
+ MORK_USED_1(ev);
+ return *((const mork_u4*)inKeyA) == *((const mork_u4*)inKeyB);
+}
+
+/*virtual*/ mork_u4 // some integer function of *((mork_u4*) inKey)
+morkIntMap::Hash(morkEnv* ev, const void* inKey) const {
+ MORK_USED_1(ev);
+ return *((const mork_u4*)inKey);
+}
+// } ===== end morkMap poly interface =====
+
+mork_bool morkIntMap::AddInt(morkEnv* ev, mork_u4 inKey, void* ioAddress)
+// the AddInt() method return value equals ev->Good().
+{
+ if (ev->Good()) {
+ this->Put(ev, &inKey, &ioAddress,
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+ }
+
+ return ev->Good();
+}
+
+mork_bool morkIntMap::CutInt(morkEnv* ev, mork_u4 inKey) {
+ return this->Cut(ev, &inKey, /*key*/ (void*)0, /*val*/ (void*)0,
+ (mork_change**)0);
+}
+
+void* morkIntMap::GetInt(morkEnv* ev, mork_u4 inKey)
+// Note the returned val does NOT have an increase in refcount for this.
+{
+ void* val = 0; // old val in the map
+ this->Get(ev, &inKey, /*key*/ (void*)0, &val, (mork_change**)0);
+
+ return val;
+}
+
+mork_bool morkIntMap::HasInt(morkEnv* ev, mork_u4 inKey)
+// Note the returned val does NOT have an increase in refcount for this.
+{
+ return this->Get(ev, &inKey, /*key*/ (void*)0, /*val*/ (void*)0,
+ (mork_change**)0);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#ifdef MORK_POINTER_MAP_IMPL
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkPointerMap::CloseMorkNode(
+ morkEnv* ev) // ClosePointerMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->ClosePointerMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkPointerMap::~morkPointerMap() // assert ClosePointerMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkPointerMap::morkPointerMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkMap(ev, inUsage, ioHeap, sizeof(void*), sizeof(void*),
+ morkPointerMap_kStartSlotCount, ioSlotHeap,
+ /*inHoldChanges*/ morkBool_kFalse) {
+ if (ev->Good()) mNode_Derived = morkDerived_kPointerMap;
+}
+
+/*public non-poly*/ void morkPointerMap::ClosePointerMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CloseMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool // *((void**) inKeyA) == *((void**) inKeyB)
+morkPointerMap::Equal(morkEnv* ev, const void* inKeyA,
+ const void* inKeyB) const {
+ MORK_USED_1(ev);
+ return *((const void**)inKeyA) == *((const void**)inKeyB);
+}
+
+/*virtual*/ mork_u4 // some integer function of *((mork_u4*) inKey)
+morkPointerMap::Hash(morkEnv* ev, const void* inKey) const {
+ MORK_USED_1(ev);
+ return *((const mork_u4*)inKey);
+}
+// } ===== end morkMap poly interface =====
+
+mork_bool morkPointerMap::AddPointer(morkEnv* ev, void* inKey, void* ioAddress)
+// the AddPointer() method return value equals ev->Good().
+{
+ if (ev->Good()) {
+ this->Put(ev, &inKey, &ioAddress,
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+ }
+
+ return ev->Good();
+}
+
+mork_bool morkPointerMap::CutPointer(morkEnv* ev, void* inKey) {
+ return this->Cut(ev, &inKey, /*key*/ (void*)0, /*val*/ (void*)0,
+ (mork_change**)0);
+}
+
+void* morkPointerMap::GetPointer(morkEnv* ev, void* inKey)
+// Note the returned val does NOT have an increase in refcount for this.
+{
+ void* val = 0; // old val in the map
+ this->Get(ev, &inKey, /*key*/ (void*)0, &val, (mork_change**)0);
+
+ return val;
+}
+
+mork_bool morkPointerMap::HasPointer(morkEnv* ev, void* inKey)
+// Note the returned val does NOT have an increase in refcount for this.
+{
+ return this->Get(ev, &inKey, /*key*/ (void*)0, /*val*/ (void*)0,
+ (mork_change**)0);
+}
+#endif /*MORK_POINTER_MAP_IMPL*/
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkIntMap.h b/comm/mailnews/db/mork/morkIntMap.h
new file mode 100644
index 0000000000..97f9c4b977
--- /dev/null
+++ b/comm/mailnews/db/mork/morkIntMap.h
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKINTMAP_
+#define _MORKINTMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kIntMap /*i*/ 0x694D /* ascii 'iM' */
+
+#define morkIntMap_kStartSlotCount 256
+
+/*| morkIntMap: maps mork_token -> morkNode
+|*/
+class morkIntMap : public morkMap { // for mapping tokens to maps
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseIntMap() only if open
+ virtual ~morkIntMap(); // assert that CloseIntMap() executed earlier
+
+ public: // morkMap construction & destruction
+ // keySize for morkIntMap equals sizeof(mork_u4)
+ morkIntMap(morkEnv* ev, const morkUsage& inUsage, mork_size inValSize,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ mork_bool inHoldChanges);
+ void CloseIntMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsIntMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kIntMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // *((mork_u4*) inKeyA) == *((mork_u4*) inKeyB)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const override;
+
+ virtual mork_u4 // some integer function of *((mork_u4*) inKey)
+ Hash(morkEnv* ev, const void* inKey) const override;
+ // } ===== end morkMap poly interface =====
+
+ public: // other map methods
+ mork_bool AddInt(morkEnv* ev, mork_u4 inKey, void* ioAddress);
+ // the AddInt() boolean return equals ev->Good().
+
+ mork_bool CutInt(morkEnv* ev, mork_u4 inKey);
+ // The CutInt() boolean return indicates whether removal happened.
+
+ void* GetInt(morkEnv* ev, mork_u4 inKey);
+ // Note the returned node does NOT have an increase in refcount for this.
+
+ mork_bool HasInt(morkEnv* ev, mork_u4 inKey);
+ // Note the returned node does NOT have an increase in refcount for this.
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#ifdef MORK_POINTER_MAP_IMPL
+
+# define morkDerived_kPointerMap /*i*/ 0x704D /* ascii 'pM' */
+
+# define morkPointerMap_kStartSlotCount 256
+
+/*| morkPointerMap: maps void* -> void*
+**|
+**| This pointer map class is equivalent to morkIntMap when sizeof(mork_u4)
+**| equals sizeof(void*). However, when these two sizes are different,
+**| then we cannot use the same hash table structure very easily without
+**| being very careful about the size and usage assumptions of those
+**| clients using the smaller data type. So we just go ahead and use
+**| morkPointerMap for hash tables using pointer key types.
+|*/
+class morkPointerMap : public morkMap { // for mapping tokens to maps
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // ClosePointerMap() only if open
+ virtual ~morkPointerMap(); // assert that ClosePointerMap() executed earlier
+
+ public: // morkMap construction & destruction
+ // keySize for morkPointerMap equals sizeof(mork_u4)
+ morkPointerMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void ClosePointerMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsPointerMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kPointerMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // *((void**) inKeyA) == *((void**) inKeyB)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const;
+
+ virtual mork_u4 // some integer function of *((mork_u4*) inKey)
+ Hash(morkEnv* ev, const void* inKey) const;
+ // } ===== end morkMap poly interface =====
+
+ public: // other map methods
+ mork_bool AddPointer(morkEnv* ev, void* inKey, void* ioAddress);
+ // the AddPointer() boolean return equals ev->Good().
+
+ mork_bool CutPointer(morkEnv* ev, void* inKey);
+ // The CutPointer() boolean return indicates whether removal happened.
+
+ void* GetPointer(morkEnv* ev, void* inKey);
+ // Note the returned node does NOT have an increase in refcount for this.
+
+ mork_bool HasPointer(morkEnv* ev, void* inKey);
+ // Note the returned node does NOT have an increase in refcount for this.
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakIntMap(morkIntMap* me, morkEnv* ev, morkIntMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongIntMap(morkIntMap* me, morkEnv* ev,
+ morkIntMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+#endif /*MORK_POINTER_MAP_IMPL*/
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKINTMAP_ */
diff --git a/comm/mailnews/db/mork/morkMap.cpp b/comm/mailnews/db/mork/morkMap.cpp
new file mode 100644
index 0000000000..22e57b4df5
--- /dev/null
+++ b/comm/mailnews/db/mork/morkMap.cpp
@@ -0,0 +1,852 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+// This code is mostly a port to C++ from public domain IronDoc C sources.
+// Note many code comments here come verbatim from cut-and-pasted IronDoc.
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+class morkHashArrays {
+ public:
+ nsIMdbHeap* mHashArrays_Heap; // copy of mMap_Heap
+ mork_count mHashArrays_Slots; // copy of mMap_Slots
+
+ mork_u1* mHashArrays_Keys; // copy of mMap_Keys
+ mork_u1* mHashArrays_Vals; // copy of mMap_Vals
+ morkAssoc* mHashArrays_Assocs; // copy of mMap_Assocs
+ mork_change* mHashArrays_Changes; // copy of mMap_Changes
+ morkAssoc** mHashArrays_Buckets; // copy of mMap_Buckets
+ morkAssoc* mHashArrays_FreeList; // copy of mMap_FreeList
+
+ public:
+ void finalize(morkEnv* ev);
+};
+
+void morkHashArrays::finalize(morkEnv* ev) {
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ nsIMdbHeap* heap = mHashArrays_Heap;
+
+ if (heap) {
+ if (mHashArrays_Keys) heap->Free(menv, mHashArrays_Keys);
+ if (mHashArrays_Vals) heap->Free(menv, mHashArrays_Vals);
+ if (mHashArrays_Assocs) heap->Free(menv, mHashArrays_Assocs);
+ if (mHashArrays_Changes) heap->Free(menv, mHashArrays_Changes);
+ if (mHashArrays_Buckets) heap->Free(menv, mHashArrays_Buckets);
+ }
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkMap::CloseMorkNode(
+ morkEnv* ev) // CloseMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkMap::~morkMap() // assert CloseMap() executed earlier
+{
+ MORK_ASSERT(mMap_FreeList == 0);
+ MORK_ASSERT(mMap_Buckets == 0);
+ MORK_ASSERT(mMap_Keys == 0);
+ MORK_ASSERT(mMap_Vals == 0);
+ MORK_ASSERT(mMap_Changes == 0);
+ MORK_ASSERT(mMap_Assocs == 0);
+}
+
+/*public non-poly*/ void morkMap::CloseMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ nsIMdbHeap* heap = mMap_Heap;
+ if (heap) /* need to free the arrays? */
+ {
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+
+ if (mMap_Keys) heap->Free(menv, mMap_Keys);
+
+ if (mMap_Vals) heap->Free(menv, mMap_Vals);
+
+ if (mMap_Assocs) heap->Free(menv, mMap_Assocs);
+
+ if (mMap_Buckets) heap->Free(menv, mMap_Buckets);
+
+ if (mMap_Changes) heap->Free(menv, mMap_Changes);
+ }
+ mMap_Keys = 0;
+ mMap_Vals = 0;
+ mMap_Buckets = 0;
+ mMap_Assocs = 0;
+ mMap_Changes = 0;
+ mMap_FreeList = 0;
+ MORK_MEMSET(&mMap_Form, 0, sizeof(morkMapForm));
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+void morkMap::clear_map(morkEnv* ev, nsIMdbHeap* ioSlotHeap) {
+ mMap_Tag = 0;
+ mMap_Seed = 0;
+ mMap_Slots = 0;
+ mMap_Fill = 0;
+ mMap_Keys = 0;
+ mMap_Vals = 0;
+ mMap_Assocs = 0;
+ mMap_Changes = 0;
+ mMap_Buckets = 0;
+ mMap_FreeList = 0;
+ MORK_MEMSET(&mMap_Form, 0, sizeof(morkMapForm));
+
+ mMap_Heap = 0;
+ if (ioSlotHeap) {
+ nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mMap_Heap);
+ } else
+ ev->NilPointerError();
+}
+
+morkMap::morkMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_size inKeySize, mork_size inValSize, mork_size inSlots,
+ nsIMdbHeap* ioSlotHeap, mork_bool inHoldChanges)
+ : morkNode(ev, inUsage, ioHeap), mMap_Heap(0) {
+ if (ev->Good()) {
+ this->clear_map(ev, ioSlotHeap);
+ if (ev->Good()) {
+ mMap_Form.mMapForm_HoldChanges = inHoldChanges;
+
+ mMap_Form.mMapForm_KeySize = inKeySize;
+ mMap_Form.mMapForm_ValSize = inValSize;
+ mMap_Form.mMapForm_KeyIsIP = (inKeySize == sizeof(mork_ip));
+ mMap_Form.mMapForm_ValIsIP = (inValSize == sizeof(mork_ip));
+
+ this->InitMap(ev, inSlots);
+ if (ev->Good()) mNode_Derived = morkDerived_kMap;
+ }
+ }
+}
+
+void morkMap::NewIterOutOfSyncError(morkEnv* ev) {
+ ev->NewError("map iter out of sync");
+}
+
+void morkMap::NewBadMapError(morkEnv* ev) { ev->NewError("bad morkMap tag"); }
+
+void morkMap::NewSlotsUnderflowWarning(morkEnv* ev) {
+ ev->NewWarning("member count underflow");
+}
+
+void morkMap::InitMap(morkEnv* ev, mork_size inSlots) {
+ if (ev->Good()) {
+ morkHashArrays old;
+ // MORK_MEMCPY(&mMap_Form, &inForm, sizeof(morkMapForm));
+ if (inSlots < 3) /* requested capacity absurdly small? */
+ inSlots = 3; /* bump it up to a minimum practical level */
+ else if (inSlots > (128 * 1024)) /* requested slots absurdly big? */
+ inSlots = (128 * 1024); /* decrease it to a maximum practical level */
+
+ if (this->new_arrays(ev, &old, inSlots)) mMap_Tag = morkMap_kTag;
+
+ MORK_MEMSET(&old, 0, sizeof(morkHashArrays)); // do NOT finalize
+ }
+}
+
+morkAssoc** morkMap::find(morkEnv* ev, const void* inKey,
+ mork_u4 inHash) const {
+ mork_u1* keys = mMap_Keys;
+ mork_num keySize = this->FormKeySize();
+ // morkMap_mEqual equal = this->FormEqual();
+
+ morkAssoc** ref = mMap_Buckets + (inHash % mMap_Slots);
+ morkAssoc* assoc = *ref;
+ while (assoc) /* look at another assoc in the bucket? */
+ {
+ mork_pos i = assoc - mMap_Assocs; /* index of this assoc */
+ if (this->Equal(ev, keys + (i * keySize), inKey)) /* found? */
+ return ref;
+
+ ref = &assoc->mAssoc_Next; /* consider next assoc slot in bucket */
+ assoc = *ref; /* if this is null, then we are done */
+ }
+ return 0;
+}
+
+/*| get_assoc: read the key and/or value at index inPos
+|*/
+void morkMap::get_assoc(void* outKey, void* outVal, mork_pos inPos) const {
+ mork_num valSize = this->FormValSize();
+ if (valSize && outVal) /* map holds values? caller wants the value? */
+ {
+ const mork_u1* value = mMap_Vals + (valSize * inPos);
+ if (valSize == sizeof(mork_ip) && this->FormValIsIP()) /* ip case? */
+ *((mork_ip*)outVal) = *((const mork_ip*)value);
+ else
+ MORK_MEMCPY(outVal, value, valSize);
+ }
+ if (outKey) /* caller wants the key? */
+ {
+ mork_num keySize = this->FormKeySize();
+ const mork_u1* key = mMap_Keys + (keySize * inPos);
+ if (keySize == sizeof(mork_ip) && this->FormKeyIsIP()) /* ip case? */
+ *((mork_ip*)outKey) = *((const mork_ip*)key);
+ else
+ MORK_MEMCPY(outKey, key, keySize);
+ }
+}
+
+/*| put_assoc: write the key and/or value at index inPos
+|*/
+void morkMap::put_assoc(const void* inKey, const void* inVal,
+ mork_pos inPos) const {
+ mork_num valSize = this->FormValSize();
+ if (valSize && inVal) /* map holds values? caller sends a value? */
+ {
+ mork_u1* value = mMap_Vals + (valSize * inPos);
+ if (valSize == sizeof(mork_ip) && this->FormValIsIP()) /* ip case? */
+ *((mork_ip*)value) = *((const mork_ip*)inVal);
+ else
+ MORK_MEMCPY(value, inVal, valSize);
+ }
+ if (inKey) /* caller sends a key? */
+ {
+ mork_num keySize = this->FormKeySize();
+ mork_u1* key = mMap_Keys + (keySize * inPos);
+ if (keySize == sizeof(mork_ip) && this->FormKeyIsIP()) /* ip case? */
+ *((mork_ip*)key) = *((const mork_ip*)inKey);
+ else
+ MORK_MEMCPY(key, inKey, keySize);
+ }
+}
+
+void* morkMap::clear_alloc(morkEnv* ev, mork_size inSize) {
+ void* p = 0;
+ nsIMdbHeap* heap = mMap_Heap;
+ if (heap) {
+ if (NS_SUCCEEDED(heap->Alloc(ev->AsMdbEnv(), inSize, (void**)&p)) && p) {
+ MORK_MEMSET(p, 0, inSize);
+ return p;
+ }
+ } else
+ ev->NilPointerError();
+
+ return (void*)0;
+}
+
+void* morkMap::alloc(morkEnv* ev, mork_size inSize) {
+ void* p = 0;
+ nsIMdbHeap* heap = mMap_Heap;
+ if (heap) {
+ if (NS_SUCCEEDED(heap->Alloc(ev->AsMdbEnv(), inSize, (void**)&p)) && p)
+ return p;
+ } else
+ ev->NilPointerError();
+
+ return (void*)0;
+}
+
+/*| new_keys: allocate an array of inSlots new keys filled with zero.
+|*/
+mork_u1* morkMap::new_keys(morkEnv* ev, mork_num inSlots) {
+ mork_num size = inSlots * this->FormKeySize();
+ return (mork_u1*)this->clear_alloc(ev, size);
+}
+
+/*| new_values: allocate an array of inSlots new values filled with zero.
+**| When values are zero sized, we just return a null pointer.
+|*/
+mork_u1* morkMap::new_values(morkEnv* ev, mork_num inSlots) {
+ mork_u1* values = 0;
+ mork_num size = inSlots * this->FormValSize();
+ if (size) values = (mork_u1*)this->clear_alloc(ev, size);
+ return values;
+}
+
+mork_change* morkMap::new_changes(morkEnv* ev, mork_num inSlots) {
+ mork_change* changes = 0;
+ mork_num size = inSlots * sizeof(mork_change);
+ if (size && mMap_Form.mMapForm_HoldChanges)
+ changes = (mork_change*)this->clear_alloc(ev, size);
+ return changes;
+}
+
+/*| new_buckets: allocate an array of inSlots new buckets filled with zero.
+|*/
+morkAssoc** morkMap::new_buckets(morkEnv* ev, mork_num inSlots) {
+ mork_num size = inSlots * sizeof(morkAssoc*);
+ return (morkAssoc**)this->clear_alloc(ev, size);
+}
+
+/*| new_assocs: allocate an array of inSlots new assocs, with each assoc
+**| linked together in a list with the first array element at the list head
+**| and the last element at the list tail. (morkMap::grow() needs this.)
+|*/
+morkAssoc* morkMap::new_assocs(morkEnv* ev, mork_num inSlots) {
+ mork_num size = inSlots * sizeof(morkAssoc);
+ morkAssoc* assocs = (morkAssoc*)this->alloc(ev, size);
+ if (assocs) /* able to allocate the array? */
+ {
+ morkAssoc* a = assocs + (inSlots - 1); /* the last array element */
+ a->mAssoc_Next = 0; /* terminate tail element of the list with null */
+ while (--a >= assocs) /* another assoc to link into the list? */
+ a->mAssoc_Next = a + 1; /* each points to the following assoc */
+ }
+ return assocs;
+}
+
+mork_bool morkMap::new_arrays(morkEnv* ev, morkHashArrays* old,
+ mork_num inSlots) {
+ mork_bool outNew = morkBool_kFalse;
+
+ /* see if we can allocate all the new arrays before we go any further: */
+ morkAssoc** newBuckets = this->new_buckets(ev, inSlots);
+ morkAssoc* newAssocs = this->new_assocs(ev, inSlots);
+ mork_u1* newKeys = this->new_keys(ev, inSlots);
+ mork_u1* newValues = this->new_values(ev, inSlots);
+ mork_change* newChanges = this->new_changes(ev, inSlots);
+
+ /* it is okay for newChanges to be null when changes are not held: */
+ mork_bool okayChanges = (newChanges || !this->FormHoldChanges());
+
+ /* it is okay for newValues to be null when values are zero sized: */
+ mork_bool okayValues = (newValues || !this->FormValSize());
+
+ if (newBuckets && newAssocs && newKeys && okayChanges && okayValues) {
+ outNew = morkBool_kTrue; /* yes, we created all the arrays we need */
+
+ /* init the old hashArrays with slots from this hash table: */
+ old->mHashArrays_Heap = mMap_Heap;
+
+ old->mHashArrays_Slots = mMap_Slots;
+ old->mHashArrays_Keys = mMap_Keys;
+ old->mHashArrays_Vals = mMap_Vals;
+ old->mHashArrays_Assocs = mMap_Assocs;
+ old->mHashArrays_Buckets = mMap_Buckets;
+ old->mHashArrays_Changes = mMap_Changes;
+
+ /* now replace all our array slots with the new structures: */
+ ++mMap_Seed; /* note the map is now changed */
+ mMap_Keys = newKeys;
+ mMap_Vals = newValues;
+ mMap_Buckets = newBuckets;
+ mMap_Assocs = newAssocs;
+ mMap_FreeList = newAssocs; /* all are free to start with */
+ mMap_Changes = newChanges;
+ mMap_Slots = inSlots;
+ } else /* free the partial set of arrays that were actually allocated */
+ {
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ nsIMdbHeap* heap = mMap_Heap;
+ if (newBuckets) heap->Free(menv, newBuckets);
+ if (newAssocs) heap->Free(menv, newAssocs);
+ if (newKeys) heap->Free(menv, newKeys);
+ if (newValues) heap->Free(menv, newValues);
+ if (newChanges) heap->Free(menv, newChanges);
+
+ MORK_MEMSET(old, 0, sizeof(morkHashArrays));
+ }
+
+ return outNew;
+}
+
+/*| grow: make the map arrays bigger by 33%. The old map is completely
+**| full, or else we would not have called grow() to get more space. This
+**| means the free list is empty, and also means every old key and value is in
+**| use in the old arrays. So every key and value must be copied to the new
+**| arrays, and since they are contiguous in the old arrays, we can efficiently
+**| bitwise copy them in bulk from the old arrays to the new arrays, without
+**| paying any attention to the structure of the old arrays.
+**|
+**|| The content of the old bucket and assoc arrays need not be copied because
+**| this information is going to be completely rebuilt by rehashing all the
+**| keys into their new buckets, given the new larger map capacity. The new
+**| bucket array from new_arrays() is assumed to contain all zeroes, which is
+**| necessary to ensure the lists in each bucket stay null terminated as we
+**| build the new linked lists. (Note no old bucket ordering is preserved.)
+**|
+**|| If the old capacity is N, then in the new arrays the assocs with indexes
+**| from zero to N-1 are still allocated and must be rehashed into the map.
+**| The new free list contains all the following assocs, starting with the new
+**| assoc link at index N. (We call the links in the link array "assocs"
+**| because allocating a link is the same as allocating the key/value pair
+**| with the same index as the link.)
+**|
+**|| The new free list is initialized simply by pointing at the first new link
+**| in the assoc array after the size of the old assoc array. This assumes
+**| that FeHashTable_new_arrays() has already linked all the new assocs into a
+**| list with the first at the head of the list and the last at the tail of the
+**| list. So by making the free list point to the first of the new uncopied
+**| assocs, the list is already well-formed.
+|*/
+mork_bool morkMap::grow(morkEnv* ev) {
+ if (mMap_Heap) /* can we grow the map? */
+ {
+ mork_num newSlots = (mMap_Slots * 2); /* +100% */
+ morkHashArrays old; /* a place to temporarily hold all the old arrays */
+ if (this->new_arrays(ev, &old, newSlots)) /* have more? */
+ {
+ // morkMap_mHash hash = this->FormHash(); /* for terse loop use */
+
+ /* figure out the bulk volume sizes of old keys and values to move: */
+ mork_num oldSlots = old.mHashArrays_Slots; /* number of old assocs */
+ mork_num keyBulk = oldSlots * this->FormKeySize(); /* key volume */
+ mork_num valBulk = oldSlots * this->FormValSize(); /* values */
+
+ /* convenient variables for new arrays that need to be rehashed: */
+ morkAssoc** newBuckets = mMap_Buckets; /* new all zeroes */
+ morkAssoc* newAssocs = mMap_Assocs; /* hash into buckets */
+ morkAssoc* newFreeList = newAssocs + oldSlots; /* new room is free */
+ mork_u1* key = mMap_Keys; /* the first key to rehash */
+ --newAssocs; /* back up one before preincrement used in while loop */
+
+ /* move all old keys and values to the new arrays: */
+ MORK_MEMCPY(mMap_Keys, old.mHashArrays_Keys, keyBulk);
+ if (valBulk) /* are values nonzero sized? */
+ MORK_MEMCPY(mMap_Vals, old.mHashArrays_Vals, valBulk);
+
+ mMap_FreeList = newFreeList; /* remaining assocs are free */
+
+ while (++newAssocs < newFreeList) /* rehash another old assoc? */
+ {
+ morkAssoc** top = newBuckets + (this->Hash(ev, key) % newSlots);
+ key += this->FormKeySize(); /* the next key to rehash */
+ newAssocs->mAssoc_Next = *top; /* link to prev bucket top */
+ *top = newAssocs; /* push assoc on top of bucket */
+ }
+ ++mMap_Seed; /* note the map has changed */
+ old.finalize(ev); /* remember to free the old arrays */
+ }
+ } else
+ ev->OutOfMemoryError();
+
+ return ev->Good();
+}
+
+mork_bool morkMap::Put(morkEnv* ev, const void* inKey, const void* inVal,
+ void* outKey, void* outVal, mork_change** outChange) {
+ mork_bool outPut = morkBool_kFalse;
+
+ if (this->GoodMap()) /* looks good? */
+ {
+ mork_u4 hash = this->Hash(ev, inKey);
+ morkAssoc** ref = this->find(ev, inKey, hash);
+ if (ref) /* assoc was found? reuse an existing assoc slot? */
+ {
+ outPut = morkBool_kTrue; /* inKey was indeed already inside the map */
+ } else /* assoc not found -- need to allocate a new assoc slot */
+ {
+ morkAssoc* assoc = this->pop_free_assoc();
+ if (!assoc) /* no slots remaining in free list? must grow map? */
+ {
+ if (this->grow(ev)) /* successfully made map larger? */
+ assoc = this->pop_free_assoc();
+ }
+ if (assoc) /* allocated new assoc without error? */
+ {
+ ref = mMap_Buckets + (hash % mMap_Slots);
+ assoc->mAssoc_Next = *ref; /* link to prev bucket top */
+ *ref = assoc; /* push assoc on top of bucket */
+
+ ++mMap_Fill; /* one more member in the collection */
+ ++mMap_Seed; /* note the map has changed */
+ }
+ }
+ if (ref) /* did not have an error during possible growth? */
+ {
+ mork_pos i = (*ref) - mMap_Assocs; /* index of assoc */
+ if (outPut && (outKey || outVal)) /* copy old before cobbering? */
+ this->get_assoc(outKey, outVal, i);
+
+ this->put_assoc(inKey, inVal, i);
+ ++mMap_Seed; /* note the map has changed */
+
+ if (outChange) {
+ if (mMap_Changes)
+ *outChange = mMap_Changes + i;
+ else
+ *outChange = this->FormDummyChange();
+ }
+ }
+ } else
+ this->NewBadMapError(ev);
+
+ return outPut;
+}
+
+mork_num morkMap::CutAll(morkEnv* ev) {
+ mork_num outCutAll = 0;
+
+ if (this->GoodMap()) /* map looks good? */
+ {
+ mork_num slots = mMap_Slots;
+ morkAssoc* before = mMap_Assocs - 1; /* before first member */
+ morkAssoc* assoc = before + slots; /* the very last member */
+
+ ++mMap_Seed; /* note the map is changed */
+
+ /* make the assoc array a linked list headed by first & tailed by last: */
+ assoc->mAssoc_Next = 0; /* null terminate the new free list */
+ while (--assoc > before) /* another assoc to link into the list? */
+ assoc->mAssoc_Next = assoc + 1;
+ mMap_FreeList = mMap_Assocs; /* all are free */
+
+ outCutAll = mMap_Fill; /* we'll cut all of them of course */
+
+ mMap_Fill = 0; /* the map is completely empty */
+ } else
+ this->NewBadMapError(ev);
+
+ return outCutAll;
+}
+
+mork_bool morkMap::Cut(morkEnv* ev, const void* inKey, void* outKey,
+ void* outVal, mork_change** outChange) {
+ mork_bool outCut = morkBool_kFalse;
+
+ if (this->GoodMap()) /* looks good? */
+ {
+ mork_u4 hash = this->Hash(ev, inKey);
+ morkAssoc** ref = this->find(ev, inKey, hash);
+ if (ref) /* found an assoc for key? */
+ {
+ outCut = morkBool_kTrue;
+ morkAssoc* assoc = *ref;
+ mork_pos i = assoc - mMap_Assocs; /* index of assoc */
+ if (outKey || outVal) this->get_assoc(outKey, outVal, i);
+
+ *ref = assoc->mAssoc_Next; /* unlink the found assoc */
+ this->push_free_assoc(assoc); /* and put it in free list */
+
+ if (outChange) {
+ if (mMap_Changes)
+ *outChange = mMap_Changes + i;
+ else
+ *outChange = this->FormDummyChange();
+ }
+
+ ++mMap_Seed; /* note the map has changed */
+ if (mMap_Fill) /* the count shows nonzero members? */
+ --mMap_Fill; /* one less member in the collection */
+ else
+ this->NewSlotsUnderflowWarning(ev);
+ }
+ } else
+ this->NewBadMapError(ev);
+
+ return outCut;
+}
+
+mork_bool morkMap::Get(morkEnv* ev, const void* inKey, void* outKey,
+ void* outVal, mork_change** outChange) {
+ mork_bool outGet = morkBool_kFalse;
+
+ if (this->GoodMap()) /* looks good? */
+ {
+ mork_u4 hash = this->Hash(ev, inKey);
+ morkAssoc** ref = this->find(ev, inKey, hash);
+ if (ref) /* found an assoc for inKey? */
+ {
+ mork_pos i = (*ref) - mMap_Assocs; /* index of assoc */
+ outGet = morkBool_kTrue;
+ this->get_assoc(outKey, outVal, i);
+ if (outChange) {
+ if (mMap_Changes)
+ *outChange = mMap_Changes + i;
+ else
+ *outChange = this->FormDummyChange();
+ }
+ }
+ } else
+ this->NewBadMapError(ev);
+
+ return outGet;
+}
+
+morkMapIter::morkMapIter()
+ : mMapIter_Map(0),
+ mMapIter_Seed(0)
+
+ ,
+ mMapIter_Bucket(0),
+ mMapIter_AssocRef(0),
+ mMapIter_Assoc(0),
+ mMapIter_Next(0) {}
+
+void morkMapIter::InitMapIter(morkEnv* ev, morkMap* ioMap) {
+ mMapIter_Map = 0;
+ mMapIter_Seed = 0;
+
+ mMapIter_Bucket = 0;
+ mMapIter_AssocRef = 0;
+ mMapIter_Assoc = 0;
+ mMapIter_Next = 0;
+
+ if (ioMap) {
+ if (ioMap->GoodMap()) {
+ mMapIter_Map = ioMap;
+ mMapIter_Seed = ioMap->mMap_Seed;
+ } else
+ ioMap->NewBadMapError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+morkMapIter::morkMapIter(morkEnv* ev, morkMap* ioMap)
+ : mMapIter_Map(0),
+ mMapIter_Seed(0)
+
+ ,
+ mMapIter_Bucket(0),
+ mMapIter_AssocRef(0),
+ mMapIter_Assoc(0),
+ mMapIter_Next(0) {
+ if (ioMap) {
+ if (ioMap->GoodMap()) {
+ mMapIter_Map = ioMap;
+ mMapIter_Seed = ioMap->mMap_Seed;
+ } else
+ ioMap->NewBadMapError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+void morkMapIter::CloseMapIter(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mMapIter_Map = 0;
+ mMapIter_Seed = 0;
+
+ mMapIter_Bucket = 0;
+ mMapIter_AssocRef = 0;
+ mMapIter_Assoc = 0;
+ mMapIter_Next = 0;
+}
+
+mork_change* morkMapIter::First(morkEnv* ev, void* outKey, void* outVal) {
+ mork_change* outFirst = 0;
+
+ morkMap* map = mMapIter_Map;
+
+ if (map && map->GoodMap()) /* map looks good? */
+ {
+ morkAssoc** bucket = map->mMap_Buckets;
+ morkAssoc** end = bucket + map->mMap_Slots; /* one past last */
+
+ mMapIter_Seed = map->mMap_Seed; /* sync the seeds */
+
+ while (bucket < end) /* another bucket in which to look for assocs? */
+ {
+ morkAssoc* assoc = *bucket++;
+ if (assoc) /* found the first map assoc in use? */
+ {
+ mork_pos i = assoc - map->mMap_Assocs;
+ mork_change* c = map->mMap_Changes;
+ outFirst = (c) ? (c + i) : map->FormDummyChange();
+
+ mMapIter_Assoc = assoc; /* current assoc in iteration */
+ mMapIter_Next = assoc->mAssoc_Next; /* more in bucket */
+ mMapIter_Bucket = --bucket; /* bucket for this assoc */
+ mMapIter_AssocRef = bucket; /* slot referencing assoc */
+
+ map->get_assoc(outKey, outVal, i);
+
+ break; /* end while loop */
+ }
+ }
+ } else
+ map->NewBadMapError(ev);
+
+ return outFirst;
+}
+
+mork_change* morkMapIter::Next(morkEnv* ev, void* outKey, void* outVal) {
+ mork_change* outNext = 0;
+
+ morkMap* map = mMapIter_Map;
+
+ if (map && map->GoodMap()) /* map looks good? */
+ {
+ if (mMapIter_Seed == map->mMap_Seed) /* in sync? */
+ {
+ morkAssoc* here = mMapIter_Assoc; /* current assoc */
+ if (here) /* iteration is not yet concluded? */
+ {
+ morkAssoc* next = mMapIter_Next;
+ morkAssoc* assoc = next; /* default new mMapIter_Assoc */
+ if (next) /* there are more assocs in the same bucket after Here? */
+ {
+ morkAssoc** ref = mMapIter_AssocRef;
+
+ /* (*HereRef) equals Here, except when Here has been cut, after
+ ** which (*HereRef) always equals Next. So if (*HereRef) is not
+ ** equal to Next, then HereRef still needs to be updated to point
+ ** somewhere else other than Here. Otherwise it is fine.
+ */
+ if (*ref != next) /* Here was not cut? must update HereRef? */
+ mMapIter_AssocRef = &here->mAssoc_Next;
+
+ mMapIter_Next = next->mAssoc_Next;
+ } else /* look for the next assoc in the next nonempty bucket */
+ {
+ morkAssoc** bucket = map->mMap_Buckets;
+ morkAssoc** end = bucket + map->mMap_Slots; /* beyond */
+ mMapIter_Assoc = 0; /* default to no more assocs */
+ bucket = mMapIter_Bucket; /* last exhausted bucket */
+ mMapIter_Assoc = 0; /* default to iteration ended */
+
+ while (++bucket < end) /* another bucket to search for assocs? */
+ {
+ assoc = *bucket;
+ if (assoc) /* found another map assoc in use? */
+ {
+ mMapIter_Bucket = bucket;
+ mMapIter_AssocRef = bucket; /* ref to assoc */
+ mMapIter_Next = assoc->mAssoc_Next; /* more */
+
+ break; /* end while loop */
+ }
+ }
+ }
+ if (assoc) /* did we find another assoc in the iteration? */
+ {
+ mMapIter_Assoc = assoc; /* current assoc */
+ mork_pos i = assoc - map->mMap_Assocs;
+ mork_change* c = map->mMap_Changes;
+ outNext = (c) ? (c + i) : map->FormDummyChange();
+
+ map->get_assoc(outKey, outVal, i);
+ }
+ }
+ } else
+ map->NewIterOutOfSyncError(ev);
+ } else
+ map->NewBadMapError(ev);
+
+ return outNext;
+}
+
+mork_change* morkMapIter::Here(morkEnv* ev, void* outKey, void* outVal) {
+ mork_change* outHere = 0;
+
+ morkMap* map = mMapIter_Map;
+
+ if (map && map->GoodMap()) /* map looks good? */
+ {
+ if (mMapIter_Seed == map->mMap_Seed) /* in sync? */
+ {
+ morkAssoc* here = mMapIter_Assoc; /* current assoc */
+ if (here) /* iteration is not yet concluded? */
+ {
+ mork_pos i = here - map->mMap_Assocs;
+ mork_change* c = map->mMap_Changes;
+ outHere = (c) ? (c + i) : map->FormDummyChange();
+
+ map->get_assoc(outKey, outVal, i);
+ }
+ } else
+ map->NewIterOutOfSyncError(ev);
+ } else
+ map->NewBadMapError(ev);
+
+ return outHere;
+}
+
+mork_change* morkMapIter::CutHere(morkEnv* ev, void* outKey, void* outVal) {
+ mork_change* outCutHere = 0;
+ morkMap* map = mMapIter_Map;
+
+ if (map && map->GoodMap()) /* map looks good? */
+ {
+ if (mMapIter_Seed == map->mMap_Seed) /* in sync? */
+ {
+ morkAssoc* here = mMapIter_Assoc; /* current assoc */
+ if (here) /* iteration is not yet concluded? */
+ {
+ morkAssoc** ref = mMapIter_AssocRef;
+ if (*ref != mMapIter_Next) /* not already cut? */
+ {
+ mork_pos i = here - map->mMap_Assocs;
+ mork_change* c = map->mMap_Changes;
+ outCutHere = (c) ? (c + i) : map->FormDummyChange();
+ if (outKey || outVal) map->get_assoc(outKey, outVal, i);
+
+ map->push_free_assoc(here); /* add to free list */
+ *ref = mMapIter_Next; /* unlink here from bucket list */
+
+ /* note the map has changed, but we are still in sync: */
+ mMapIter_Seed = ++map->mMap_Seed; /* sync */
+
+ if (map->mMap_Fill) /* still has nonzero value? */
+ --map->mMap_Fill; /* one less member in the collection */
+ else
+ map->NewSlotsUnderflowWarning(ev);
+ }
+ }
+ } else
+ map->NewIterOutOfSyncError(ev);
+ } else
+ map->NewBadMapError(ev);
+
+ return outCutHere;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkMap.h b/comm/mailnews/db/mork/morkMap.h
new file mode 100644
index 0000000000..0275755c4f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkMap.h
@@ -0,0 +1,379 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORKMAP_
+#define _MORKMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/* (These hash methods closely resemble those in public domain IronDoc.) */
+
+/*| Equal: equal for hash table. Note equal(a,b) implies hash(a)==hash(b).
+|*/
+typedef mork_bool (*morkMap_mEqual)(const morkMap* self, morkEnv* ev,
+ const void* inKeyA, const void* inKeyB);
+
+/*| Hash: hash for hash table. Note equal(a,b) implies hash(a)==hash(b).
+|*/
+typedef mork_u4 (*morkMap_mHash)(const morkMap* self, morkEnv* ev,
+ const void* inKey);
+
+/*| IsNil: whether a key slot contains a "null" value denoting "no such key".
+|*/
+typedef mork_bool (*morkMap_mIsNil)(const morkMap* self, morkEnv* ev,
+ const void* inKey);
+
+/*| Note: notify regarding a refcounting change for a key or a value.
+|*/
+// typedef void (* morkMap_mNote)
+//(morkMap* self, morkEnv* ev, void* inKeyOrVal);
+
+/*| morkMapForm: slots need to initialize a new dict. (This is very similar
+**| to the config object for public domain IronDoc hash tables.)
+|*/
+class morkMapForm { // a struct of callback method pointers for morkMap
+ public:
+ // const void* mMapForm_NilKey; // externally defined 'nil' bit pattern
+
+ // void* mMapForm_NilBuf[ 8 ]; // potential place to put NilKey
+ // If keys are no larger than 8*sizeof(void*), NilKey can be put in NilBuf.
+ // Note this should be true for all Mork subclasses, and we plan usage so.
+
+ // These three methods must always be provided, so zero will cause errors:
+
+ // morkMap_mEqual mMapForm_Equal; // for comparing two keys for identity
+ // morkMap_mHash mMapForm_Hash; // deterministic key to hash method
+ // morkMap_mIsNil mMapForm_IsNil; // to query whether a key equals 'nil'
+
+ // If any of these method slots are nonzero, then morkMap will call the
+ // appropriate one to notify dict users when a key or value is added or cut.
+ // Presumably a caller wants to know this in order to perform refcounting or
+ // some other kind of memory management. These methods are definitely only
+ // called when references to keys or values are inserted or removed, and are
+ // never called when the actual number of references does not change (such
+ // as when added keys are already present or cut keys are alreading missing).
+ //
+ // morkMap_mNote mMapForm_AddKey; // if nonzero, notify about add key
+ // morkMap_mNote mMapForm_CutKey; // if nonzero, notify about cut key
+ // morkMap_mNote mMapForm_AddVal; // if nonzero, notify about add val
+ // morkMap_mNote mMapForm_CutVal; // if nonzero, notify about cut val
+ //
+ // These note methods have been removed because it seems difficult to
+ // guarantee suitable alignment of objects passed to notification methods.
+
+ // Note dict clients should pick key and val sizes that provide whatever
+ // alignment will be required for an array of such keys and values.
+ mork_size mMapForm_KeySize; // size of every key (cannot be zero)
+ mork_size mMapForm_ValSize; // size of every val (can indeed be zero)
+
+ mork_bool mMapForm_HoldChanges; // support changes array in the map
+ mork_change mMapForm_DummyChange; // change used for false HoldChanges
+ mork_bool mMapForm_KeyIsIP; // key is mork_ip sized
+ mork_bool mMapForm_ValIsIP; // key is mork_ip sized
+};
+
+/*| morkAssoc: a canonical association slot in a morkMap. A single assoc
+**| instance does nothing except point to the next assoc in the same bucket
+**| of a hash table. Each assoc has only two interesting attributes: 1) the
+**| address of the assoc, and 2) the next assoc in a bucket's list. The assoc
+**| address is interesting because in the context of an array of such assocs,
+**| one can determine the index of a particular assoc in the array by address
+**| arithmetic, subtracting the array address from the assoc address. And the
+**| index of each assoc is the same index as the associated key and val slots
+**| in the associated arrays
+**|
+**|| Think of an assoc instance as really also containing a key slot and a val
+**| slot, where each key is mMap_Form.mMapForm_KeySize bytes in size, and
+**| each val is mMap_Form.mMapForm_ValSize in size. But the key and val
+**| slots are stored in separate arrays with indexes that are parallel to the
+**| indexes in the array of morkAssoc instances. We have taken the variable
+**| sized slots out of the morkAssoc structure, and put them into parallel
+**| arrays associated with each morkAssoc by array index. And this leaves us
+**| with only the link field to the next assoc in each assoc instance.
+|*/
+class morkAssoc {
+ public:
+ morkAssoc* mAssoc_Next;
+};
+
+#define morkDerived_kMap /*i*/ 0x4D70 /* ascii 'Mp' */
+
+#define morkMap_kTag /*i*/ 0x6D4D6150 /* ascii 'mMaP' */
+
+/*| morkMap: a hash table based on the public domain IronDoc hash table
+**| (which is in turn rather like a similar OpenDoc hash table).
+|*/
+class morkMap : public morkNode {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ nsIMdbHeap* mMap_Heap; // strong ref to heap allocating all space
+ mork_u4 mMap_Tag; // must equal morkMap_kTag
+
+ // When a morkMap instance is constructed, the dict form slots must be
+ // provided in order to properly configure a dict with all runtime needs:
+
+ morkMapForm mMap_Form; // construction time parameterization
+
+ // Whenever the dict changes structure in a way that would affect any
+ // iteration of the dict associations, the seed increments to show this:
+
+ mork_seed mMap_Seed; // counter for member and structural changes
+
+ // The current total assoc capacity of the dict is mMap_Slots, where
+ // mMap_Fill of these slots are actually holding content, so mMap_Fill
+ // is the actual membership count, and mMap_Slots is how larger membership
+ // can become before the hash table must grow the buffers being used.
+
+ mork_count mMap_Slots; // count of slots in the hash table
+ mork_fill mMap_Fill; // number of used slots in the hash table
+
+ // Key and value slots are bound to corresponding mMap_Assocs array slots.
+ // Instead of having a single array like this: {key,val,next}[ mMap_Slots ]
+ // we have instead three parallel arrays with essentially the same meaning:
+ // {key}[ mMap_Slots ], {val}[ mMap_Slots ], {assocs}[ mMap_Slots ]
+
+ mork_u1* mMap_Keys; // mMap_Slots * mMapForm_KeySize buffer
+ mork_u1* mMap_Vals; // mMap_Slots * mMapForm_ValSize buffer
+
+ // An assoc is "used" when it appears in a bucket's linked list of assocs.
+ // Until an assoc is used, it appears in the FreeList linked list. Every
+ // assoc that becomes used goes into the bucket determined by hashing the
+ // key associated with a new assoc. The key associated with a new assoc
+ // goes in to the slot in mMap_Keys which occupies exactly the same array
+ // index as the array index of the used assoc in the mMap_Assocs array.
+
+ morkAssoc* mMap_Assocs; // mMap_Slots * sizeof(morkAssoc) buffer
+
+ // The changes array is only needed when the
+
+ mork_change* mMap_Changes; // mMap_Slots * sizeof(mork_change) buffer
+
+ // The Buckets array need not be the same length as the Assocs array, but we
+ // usually do it that way so the average bucket depth is no more than one.
+ // (We could pick a different policy, or make it parameterizable, but that's
+ // tuning we can do some other time.)
+
+ morkAssoc** mMap_Buckets; // mMap_Slots * sizeof(morkAssoc*) buffer
+
+ // The length of the mMap_FreeList should equal (mMap_Slots - mMap_Fill).
+ // We need a free list instead of a simpler representation because assocs
+ // can be cut and returned to availability in any kind of unknown pattern.
+ // (However, when assocs are first allocated, or when the dict is grown, we
+ // know all new assocs are contiguous and can chain together adjacently.)
+
+ morkAssoc* mMap_FreeList; // list of unused mMap_Assocs array slots
+
+ public: // getters (morkProbeMap compatibility)
+ mork_fill MapFill() const { return mMap_Fill; }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseMap() only if open
+ virtual ~morkMap(); // assert that CloseMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap,
+ mork_size inKeySize, mork_size inValSize, mork_size inSlots,
+ nsIMdbHeap* ioSlotHeap, mork_bool inHoldChanges);
+
+ void CloseMap(morkEnv* ev); // called by
+
+ public: // dynamic type identification
+ mork_bool IsMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kMap;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // poly map hash table methods
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // note: equal(a,b) implies hash(a) == hash(b)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const = 0;
+
+ virtual mork_u4 // note: equal(a,b) implies hash(a) == hash(b)
+ Hash(morkEnv* ev, const void* inKey) const = 0;
+ // } ===== end morkMap poly interface =====
+
+ public: // open utility methods
+ mork_bool GoodMapTag() const { return mMap_Tag == morkMap_kTag; }
+ mork_bool GoodMap() const { return (IsNode() && GoodMapTag()); }
+
+ void NewIterOutOfSyncError(morkEnv* ev);
+ void NewBadMapError(morkEnv* ev);
+ void NewSlotsUnderflowWarning(morkEnv* ev);
+ void InitMap(morkEnv* ev, mork_size inSlots);
+
+ protected: // internal utility methods
+ friend class morkMapIter;
+ void clear_map(morkEnv* ev, nsIMdbHeap* ioHeap);
+
+ void* alloc(morkEnv* ev, mork_size inSize);
+ void* clear_alloc(morkEnv* ev, mork_size inSize);
+
+ void push_free_assoc(morkAssoc* ioAssoc) {
+ ioAssoc->mAssoc_Next = mMap_FreeList;
+ mMap_FreeList = ioAssoc;
+ }
+
+ morkAssoc* pop_free_assoc() {
+ morkAssoc* assoc = mMap_FreeList;
+ if (assoc) mMap_FreeList = assoc->mAssoc_Next;
+ return assoc;
+ }
+
+ morkAssoc** find(morkEnv* ev, const void* inKey, mork_u4 inHash) const;
+
+ mork_u1* new_keys(morkEnv* ev, mork_num inSlots);
+ mork_u1* new_values(morkEnv* ev, mork_num inSlots);
+ mork_change* new_changes(morkEnv* ev, mork_num inSlots);
+ morkAssoc** new_buckets(morkEnv* ev, mork_num inSlots);
+ morkAssoc* new_assocs(morkEnv* ev, mork_num inSlots);
+ mork_bool new_arrays(morkEnv* ev, morkHashArrays* old, mork_num inSlots);
+
+ mork_bool grow(morkEnv* ev);
+
+ void get_assoc(void* outKey, void* outVal, mork_pos inPos) const;
+ void put_assoc(const void* inKey, const void* inVal, mork_pos inPos) const;
+
+ public: // inlines to form slots
+ // const void* FormNilKey() const { return mMap_Form.mMapForm_NilKey; }
+
+ // morkMap_mEqual FormEqual() const { return mMap_Form.mMapForm_Equal; }
+ // morkMap_mHash FormHash() const { return mMap_Form.mMapForm_Hash; }
+ // orkMap_mIsNil FormIsNil() const { return mMap_Form.mMapForm_IsNil; }
+
+ // morkMap_mNote FormAddKey() const { return mMap_Form.mMapForm_AddKey; }
+ // morkMap_mNote FormCutKey() const { return mMap_Form.mMapForm_CutKey; }
+ // morkMap_mNote FormAddVal() const { return mMap_Form.mMapForm_AddVal; }
+ // morkMap_mNote FormCutVal() const { return mMap_Form.mMapForm_CutVal; }
+
+ mork_size FormKeySize() const { return mMap_Form.mMapForm_KeySize; }
+ mork_size FormValSize() const { return mMap_Form.mMapForm_ValSize; }
+
+ mork_bool FormKeyIsIP() const { return mMap_Form.mMapForm_KeyIsIP; }
+ mork_bool FormValIsIP() const { return mMap_Form.mMapForm_ValIsIP; }
+
+ mork_bool FormHoldChanges() const { return mMap_Form.mMapForm_HoldChanges; }
+
+ mork_change* FormDummyChange() { return &mMap_Form.mMapForm_DummyChange; }
+
+ public: // other map methods
+ mork_bool Put(morkEnv* ev, const void* inKey, const void* inVal, void* outKey,
+ void* outVal, mork_change** outChange);
+
+ mork_bool Cut(morkEnv* ev, const void* inKey, void* outKey, void* outVal,
+ mork_change** outChange);
+
+ mork_bool Get(morkEnv* ev, const void* inKey, void* outKey, void* outVal,
+ mork_change** outChange);
+
+ mork_num CutAll(morkEnv* ev);
+
+ private: // copying is not allowed
+ morkMap(const morkMap& other);
+ morkMap& operator=(const morkMap& other);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakMap(morkMap* me, morkEnv* ev, morkMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongMap(morkMap* me, morkEnv* ev, morkMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+/*| morkMapIter: an iterator for morkMap and subclasses. This is not a node,
+**| and expected usage is as a member of some other node subclass, such as in
+**| a cursor subclass or a thumb subclass. Also, iters might be as temp stack
+**| objects when scanning the content of a map.
+|*/
+class morkMapIter { // iterator for hash table map
+
+ protected:
+ morkMap* mMapIter_Map; // map to iterate, NOT refcounted
+ mork_seed mMapIter_Seed; // cached copy of map's seed
+
+ morkAssoc** mMapIter_Bucket; // one bucket in mMap_Buckets array
+ morkAssoc** mMapIter_AssocRef; // usually *AtRef equals Here
+ morkAssoc* mMapIter_Assoc; // the current assoc in an iteration
+ morkAssoc* mMapIter_Next; // mMapIter_Assoc->mAssoc_Next */
+
+ public:
+ morkMapIter(morkEnv* ev, morkMap* ioMap);
+ void CloseMapIter(morkEnv* ev);
+
+ morkMapIter(); // everything set to zero -- need to call InitMapIter()
+
+ protected: // we want all subclasses to provide typesafe wrappers:
+ void InitMapIter(morkEnv* ev, morkMap* ioMap);
+
+ // The morkAssoc returned below is always either mork_change* or
+ // else nil (when there is no such assoc). We return a pointer to
+ // the change rather than a simple bool, because callers might
+ // want to access change info associated with an assoc.
+
+ mork_change* First(morkEnv* ev, void* outKey, void* outVal);
+ mork_change* Next(morkEnv* ev, void* outKey, void* outVal);
+ mork_change* Here(morkEnv* ev, void* outKey, void* outVal);
+
+ mork_change* CutHere(morkEnv* ev, void* outKey, void* outVal);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKMAP_ */
diff --git a/comm/mailnews/db/mork/morkNode.cpp b/comm/mailnews/db/mork/morkNode.cpp
new file mode 100644
index 0000000000..c12fc6a0fd
--- /dev/null
+++ b/comm/mailnews/db/mork/morkNode.cpp
@@ -0,0 +1,550 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKHANDLE_
+# include "morkHandle.h"
+#endif
+
+/*3456789_123456789_123456789_123456789_123456789_123456789_123456789_12345678*/
+
+/* ===== ===== ===== ===== morkUsage ===== ===== ===== ===== */
+
+static morkUsage morkUsage_gHeap; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kHeap = morkUsage_gHeap;
+
+static morkUsage morkUsage_gStack; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kStack = morkUsage_gStack;
+
+static morkUsage morkUsage_gMember; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kMember = morkUsage_gMember;
+
+static morkUsage morkUsage_gGlobal; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kGlobal = morkUsage_gGlobal;
+
+static morkUsage morkUsage_gPool; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kPool = morkUsage_gPool;
+
+static morkUsage morkUsage_gNone; // ensure EnsureReadyStaticUsage()
+const morkUsage& morkUsage::kNone = morkUsage_gNone;
+
+// This must be structured to allow for non-zero values in global variables
+// just before static init time. We can only safely check for whether a
+// global has the address of some other global. Please, do not initialize
+// either of the variables below to zero, because this could break when a zero
+// is assigned at static init time, but after EnsureReadyStaticUsage() runs.
+
+static mork_u4 morkUsage_g_static_init_target; // only address of this matters
+static mork_u4* morkUsage_g_static_init_done; // is address of target above?
+
+#define morkUsage_do_static_init() \
+ (morkUsage_g_static_init_done = &morkUsage_g_static_init_target)
+
+#define morkUsage_need_static_init() \
+ (morkUsage_g_static_init_done != &morkUsage_g_static_init_target)
+
+/*static*/
+void morkUsage::EnsureReadyStaticUsage() {
+ if (morkUsage_need_static_init()) {
+ morkUsage_do_static_init();
+
+ morkUsage_gHeap.InitUsage(morkUsage_kHeap);
+ morkUsage_gStack.InitUsage(morkUsage_kStack);
+ morkUsage_gMember.InitUsage(morkUsage_kMember);
+ morkUsage_gGlobal.InitUsage(morkUsage_kGlobal);
+ morkUsage_gPool.InitUsage(morkUsage_kPool);
+ morkUsage_gNone.InitUsage(morkUsage_kNone);
+ }
+}
+
+/*static*/
+const morkUsage& morkUsage::GetHeap() // kHeap safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gHeap;
+}
+
+/*static*/
+const morkUsage& morkUsage::GetStack() // kStack safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gStack;
+}
+
+/*static*/
+const morkUsage& morkUsage::GetMember() // kMember safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gMember;
+}
+
+/*static*/
+const morkUsage& morkUsage::GetGlobal() // kGlobal safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gGlobal;
+}
+
+/*static*/
+const morkUsage& morkUsage::GetPool() // kPool safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gPool;
+}
+
+/*static*/
+const morkUsage& morkUsage::GetNone() // kNone safe at static init time
+{
+ EnsureReadyStaticUsage();
+ return morkUsage_gNone;
+}
+
+morkUsage::morkUsage() {
+ if (morkUsage_need_static_init()) {
+ morkUsage::EnsureReadyStaticUsage();
+ }
+}
+
+morkUsage::morkUsage(mork_usage code) : mUsage_Code(code) {
+ if (morkUsage_need_static_init()) {
+ morkUsage::EnsureReadyStaticUsage();
+ }
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*static*/ void* morkNode::MakeNew(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) {
+ void* node = 0;
+ ioHeap.Alloc(ev->AsMdbEnv(), inSize, (void**)&node);
+ if (!node) ev->OutOfMemoryError();
+
+ return node;
+}
+
+/*public non-poly*/ void morkNode::ZapOld(morkEnv* ev, nsIMdbHeap* ioHeap) {
+ if (this->IsNode()) {
+ mork_usage usage = mNode_Usage; // mNode_Usage before ~morkNode
+ this->morkNode::~morkNode(); // first call polymorphic destructor
+ if (ioHeap) // was this node heap allocated?
+ ioHeap->Free(ev->AsMdbEnv(), this);
+ else if (usage == morkUsage_kPool) // mNode_Usage before ~morkNode
+ {
+ morkHandle* h = (morkHandle*)this;
+ if (h->IsHandle() && h->GoodHandleTag()) {
+ if (h->mHandle_Face) {
+ if (ev->mEnv_HandlePool)
+ ev->mEnv_HandlePool->ZapHandle(ev, h->mHandle_Face);
+ else if (h->mHandle_Env && h->mHandle_Env->mEnv_HandlePool)
+ h->mHandle_Env->mEnv_HandlePool->ZapHandle(ev, h->mHandle_Face);
+ } else
+ ev->NilPointerError();
+ }
+ }
+ } else
+ this->NonNodeError(ev);
+}
+
+/*public virtual*/ void morkNode::CloseMorkNode(
+ morkEnv* ev) // CloseNode() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseNode(ev);
+ this->MarkShut();
+ }
+}
+NS_IMETHODIMP
+morkNode::CloseMdbObject(nsIMdbEnv* mev) {
+ return morkNode::CloseMdbObject((morkEnv*)mev);
+}
+
+nsresult morkNode::CloseMdbObject(morkEnv* ev) {
+ // if only one ref, Handle_CutStrongRef will clean up better.
+ if (mNode_Uses == 1)
+ // XXX Casting mork_uses to nsresult
+ return static_cast<nsresult>(CutStrongRef(ev));
+
+ nsresult outErr = NS_OK;
+
+ if (IsNode() && IsOpenNode()) {
+ if (ev) {
+ CloseMorkNode(ev);
+ outErr = ev->AsErr();
+ }
+ }
+ return outErr;
+}
+
+/*public virtual*/
+morkNode::~morkNode() // assert that CloseNode() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode() ||
+ IsDeadNode()); // sometimes we call destructor explicitly w/o
+ // freeing object.
+ mNode_Access = morkAccess_kDead;
+ mNode_Usage = morkUsage_kNone;
+}
+
+/*public virtual*/
+// void CloseMorkNode(morkEnv* ev) = 0; // CloseNode() only if open
+// CloseMorkNode() is the polymorphic close method called when uses==0,
+// which must do NOTHING at all when IsOpenNode() is not true. Otherwise,
+// CloseMorkNode() should call a static close method specific to an object.
+// Each such static close method should either call inherited static close
+// methods, or else perform the consolidated effect of calling them, where
+// subclasses should closely track any changes in base classes with care.
+
+/*public non-poly*/
+morkNode::morkNode(mork_usage inCode)
+ : mNode_Heap(0),
+ mNode_Base(morkBase_kNode),
+ mNode_Derived(0) // until subclass sets appropriately
+ ,
+ mNode_Access(morkAccess_kOpen),
+ mNode_Usage(inCode),
+ mNode_Mutable(morkAble_kEnabled),
+ mNode_Load(morkLoad_kClean),
+ mNode_Uses(1),
+ mNode_Refs(1) {}
+
+/*public non-poly*/
+morkNode::morkNode(const morkUsage& inUsage, nsIMdbHeap* ioHeap)
+ : mNode_Heap(ioHeap),
+ mNode_Base(morkBase_kNode),
+ mNode_Derived(0) // until subclass sets appropriately
+ ,
+ mNode_Access(morkAccess_kOpen),
+ mNode_Usage(inUsage.Code()),
+ mNode_Mutable(morkAble_kEnabled),
+ mNode_Load(morkLoad_kClean),
+ mNode_Uses(1),
+ mNode_Refs(1) {
+ if (!ioHeap && mNode_Usage == morkUsage_kHeap) MORK_ASSERT(ioHeap);
+}
+
+/*public non-poly*/
+morkNode::morkNode(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap)
+ : mNode_Heap(ioHeap),
+ mNode_Base(morkBase_kNode),
+ mNode_Derived(0) // until subclass sets appropriately
+ ,
+ mNode_Access(morkAccess_kOpen),
+ mNode_Usage(inUsage.Code()),
+ mNode_Mutable(morkAble_kEnabled),
+ mNode_Load(morkLoad_kClean),
+ mNode_Uses(1),
+ mNode_Refs(1) {
+ if (!ioHeap && mNode_Usage == morkUsage_kHeap) {
+ this->NilHeapError(ev);
+ }
+}
+
+/*protected non-poly*/ void morkNode::RefsUnderUsesWarning(morkEnv* ev) const {
+ ev->NewError("mNode_Refs < mNode_Uses");
+}
+
+/*protected non-poly*/ void morkNode::NonNodeError(
+ morkEnv* ev) const // called when IsNode() is false
+{
+ ev->NewError("non-morkNode");
+}
+
+/*protected non-poly*/ void morkNode::NonOpenNodeError(
+ morkEnv* ev) const // when IsOpenNode() is false
+{
+ ev->NewError("non-open-morkNode");
+}
+
+/*protected non-poly*/ void morkNode::NonMutableNodeError(
+ morkEnv* ev) const // when IsMutable() is false
+{
+ ev->NewError("non-mutable-morkNode");
+}
+
+/*protected non-poly*/ void morkNode::NilHeapError(
+ morkEnv* ev) const // zero mNode_Heap w/ kHeap usage
+{
+ ev->NewError("nil mNode_Heap");
+}
+
+/*protected non-poly*/ void morkNode::RefsOverflowWarning(
+ morkEnv* ev) const // mNode_Refs overflow
+{
+ ev->NewWarning("mNode_Refs overflow");
+}
+
+/*protected non-poly*/ void morkNode::UsesOverflowWarning(
+ morkEnv* ev) const // mNode_Uses overflow
+{
+ ev->NewWarning("mNode_Uses overflow");
+}
+
+/*protected non-poly*/ void morkNode::RefsUnderflowWarning(
+ morkEnv* ev) const // mNode_Refs underflow
+{
+ ev->NewWarning("mNode_Refs underflow");
+}
+
+/*protected non-poly*/ void morkNode::UsesUnderflowWarning(
+ morkEnv* ev) const // mNode_Uses underflow
+{
+ ev->NewWarning("mNode_Uses underflow");
+}
+
+/*public non-poly*/ void morkNode::CloseNode(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode())
+ this->MarkShut();
+ else
+ this->NonNodeError(ev);
+}
+
+extern void // utility method very similar to morkNode::SlotStrongNode():
+nsIMdbFile_SlotStrongFile(nsIMdbFile* self, morkEnv* ev, nsIMdbFile** ioSlot)
+// If *ioSlot is non-nil, that file is released by CutStrongRef() and
+// then zeroed out. Then if self is non-nil, this is acquired by
+// calling AddStrongRef(), and if the return value shows success,
+// then self is put into slot *ioSlot. Note self can be nil, so we take
+// expression 'nsIMdbFile_SlotStrongFile(0, ev, &slot)'.
+{
+ nsIMdbFile* file = *ioSlot;
+ if (self != file) {
+ if (file) {
+ *ioSlot = 0;
+ NS_RELEASE(file);
+ }
+ if (self && ev->Good()) NS_ADDREF(*ioSlot = self);
+ }
+}
+
+void // utility method very similar to morkNode::SlotStrongNode():
+nsIMdbHeap_SlotStrongHeap(nsIMdbHeap* self, morkEnv* ev, nsIMdbHeap** ioSlot)
+// If *ioSlot is non-nil, that heap is released by CutStrongRef() and
+// then zeroed out. Then if self is non-nil, self is acquired by
+// calling AddStrongRef(), and if the return value shows success,
+// then self is put into slot *ioSlot. Note self can be nil, so we
+// permit expression 'nsIMdbHeap_SlotStrongHeap(0, ev, &slot)'.
+{
+ nsIMdbHeap* heap = *ioSlot;
+ if (self != heap) {
+ if (heap) *ioSlot = 0;
+
+ if (self && ev->Good()) *ioSlot = self;
+ }
+}
+
+/*public static*/ void morkNode::SlotStrongNode(morkNode* me, morkEnv* ev,
+ morkNode** ioSlot)
+// If *ioSlot is non-nil, that node is released by CutStrongRef() and
+// then zeroed out. Then if me is non-nil, this is acquired by
+// calling AddStrongRef(), and if positive is returned to show success,
+// then me is put into slot *ioSlot. Note me can be nil, so we take
+// expression 'morkNode::SlotStrongNode((morkNode*) 0, ev, &slot)'.
+{
+ morkNode* node = *ioSlot;
+ if (me != node) {
+ if (node) {
+ // what if this nulls out the ev and causes asserts?
+ // can we move this after the CutStrongRef()?
+ *ioSlot = 0;
+ node->CutStrongRef(ev);
+ }
+ if (me && me->AddStrongRef(ev)) *ioSlot = me;
+ }
+}
+
+/*public static*/ void morkNode::SlotWeakNode(morkNode* me, morkEnv* ev,
+ morkNode** ioSlot)
+// If *ioSlot is non-nil, that node is released by CutWeakRef() and
+// then zeroed out. Then if me is non-nil, this is acquired by
+// calling AddWeakRef(), and if positive is returned to show success,
+// then me is put into slot *ioSlot. Note me can be nil, so we
+// expression 'morkNode::SlotWeakNode((morkNode*) 0, ev, &slot)'.
+{
+ morkNode* node = *ioSlot;
+ if (me != node) {
+ if (node) {
+ *ioSlot = 0;
+ node->CutWeakRef(ev);
+ }
+ if (me && me->AddWeakRef(ev)) *ioSlot = me;
+ }
+}
+
+/*public non-poly*/ mork_uses morkNode::AddStrongRef(morkEnv* ev) {
+ mork_uses outUses = 0;
+ if (this->IsNode()) {
+ mork_uses uses = mNode_Uses;
+ mork_refs refs = mNode_Refs;
+ if (refs < uses) // need to fix broken refs/uses relation?
+ {
+ this->RefsUnderUsesWarning(ev);
+ mNode_Refs = mNode_Uses = refs = uses;
+ }
+ if (refs < morkNode_kMaxRefCount) // not too great?
+ {
+ mNode_Refs = ++refs;
+ mNode_Uses = ++uses;
+ } else
+ this->RefsOverflowWarning(ev);
+
+ outUses = uses;
+ } else
+ this->NonNodeError(ev);
+ return outUses;
+}
+
+/*private non-poly*/ mork_bool morkNode::cut_use_count(
+ morkEnv* ev) // just one part of CutStrongRef()
+{
+ mork_bool didCut = morkBool_kFalse;
+ if (this->IsNode()) {
+ mork_uses uses = mNode_Uses;
+ if (uses) // not yet zero?
+ mNode_Uses = --uses;
+ else
+ this->UsesUnderflowWarning(ev);
+
+ didCut = morkBool_kTrue;
+ if (!mNode_Uses) // last use gone? time to close node?
+ {
+ if (this->IsOpenNode()) {
+ if (!mNode_Refs) // no outstanding reference?
+ {
+ this->RefsUnderflowWarning(ev);
+ ++mNode_Refs; // prevent potential crash during close
+ }
+ this->CloseMorkNode(ev); // polymorphic self close
+ // (Note CutNode() is not polymorphic -- so don't call that.)
+ }
+ }
+ } else
+ this->NonNodeError(ev);
+ return didCut;
+}
+
+/*public non-poly*/ mork_uses morkNode::CutStrongRef(morkEnv* ev) {
+ mork_refs outRefs = 0;
+ if (this->IsNode()) {
+ if (this->cut_use_count(ev)) outRefs = this->CutWeakRef(ev);
+ } else
+ this->NonNodeError(ev);
+
+ return outRefs;
+}
+
+/*public non-poly*/ mork_refs morkNode::AddWeakRef(morkEnv* ev) {
+ mork_refs outRefs = 0;
+ if (this->IsNode()) {
+ mork_refs refs = mNode_Refs;
+ if (refs < morkNode_kMaxRefCount) // not too great?
+ mNode_Refs = ++refs;
+ else
+ this->RefsOverflowWarning(ev);
+
+ outRefs = refs;
+ } else
+ this->NonNodeError(ev);
+
+ return outRefs;
+}
+
+/*public non-poly*/ mork_refs morkNode::CutWeakRef(morkEnv* ev) {
+ mork_refs outRefs = 0;
+ if (this->IsNode()) {
+ mork_uses uses = mNode_Uses;
+ mork_refs refs = mNode_Refs;
+ if (refs) // not yet zero?
+ mNode_Refs = --refs;
+ else
+ this->RefsUnderflowWarning(ev);
+
+ if (refs < uses) // need to fix broken refs/uses relation?
+ {
+ this->RefsUnderUsesWarning(ev);
+ mNode_Refs = mNode_Uses = refs = uses;
+ }
+
+ outRefs = refs;
+ if (!refs) // last reference gone? time to destroy node?
+ this->ZapOld(ev, mNode_Heap); // self destroy, use this no longer
+ } else
+ this->NonNodeError(ev);
+
+ return outRefs;
+}
+
+static const char morkNode_kBroken[] = "broken";
+
+/*public non-poly*/ const char* morkNode::GetNodeAccessAsString()
+ const // e.g. "open", "shut", etc.
+{
+ const char* outString = morkNode_kBroken;
+ switch (mNode_Access) {
+ case morkAccess_kOpen:
+ outString = "open";
+ break;
+ case morkAccess_kClosing:
+ outString = "closing";
+ break;
+ case morkAccess_kShut:
+ outString = "shut";
+ break;
+ case morkAccess_kDead:
+ outString = "dead";
+ break;
+ }
+ return outString;
+}
+
+/*public non-poly*/ const char* morkNode::GetNodeUsageAsString()
+ const // e.g. "heap", "stack", etc.
+{
+ const char* outString = morkNode_kBroken;
+ switch (mNode_Usage) {
+ case morkUsage_kHeap:
+ outString = "heap";
+ break;
+ case morkUsage_kStack:
+ outString = "stack";
+ break;
+ case morkUsage_kMember:
+ outString = "member";
+ break;
+ case morkUsage_kGlobal:
+ outString = "global";
+ break;
+ case morkUsage_kPool:
+ outString = "pool";
+ break;
+ case morkUsage_kNone:
+ outString = "none";
+ break;
+ }
+ return outString;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkNode.h b/comm/mailnews/db/mork/morkNode.h
new file mode 100644
index 0000000000..6518487a30
--- /dev/null
+++ b/comm/mailnews/db/mork/morkNode.h
@@ -0,0 +1,290 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORKNODE_
+#define _MORKNODE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkUsage_kHeap 'h'
+#define morkUsage_kStack 's'
+#define morkUsage_kMember 'm'
+#define morkUsage_kGlobal 'g'
+#define morkUsage_kPool 'p'
+#define morkUsage_kNone 'n'
+
+class morkUsage {
+ public:
+ mork_usage mUsage_Code; // kHeap, kStack, kMember, or kGhost
+
+ public:
+ explicit morkUsage(mork_usage inCode);
+
+ morkUsage(); // does nothing except maybe call EnsureReadyStaticUsage()
+ void InitUsage(mork_usage inCode) { mUsage_Code = inCode; }
+
+ ~morkUsage() {}
+ mork_usage Code() const { return mUsage_Code; }
+
+ static void EnsureReadyStaticUsage();
+
+ public:
+ static const morkUsage& kHeap; // morkUsage_kHeap
+ static const morkUsage& kStack; // morkUsage_kStack
+ static const morkUsage& kMember; // morkUsage_kMember
+ static const morkUsage& kGlobal; // morkUsage_kGlobal
+ static const morkUsage& kPool; // morkUsage_kPool
+ static const morkUsage& kNone; // morkUsage_kNone
+
+ static const morkUsage& GetHeap(); // kHeap, safe at static init time
+ static const morkUsage& GetStack(); // kStack, safe at static init time
+ static const morkUsage& GetMember(); // kMember, safe at static init time
+ static const morkUsage& GetGlobal(); // kGlobal, safe at static init time
+ static const morkUsage& GetPool(); // kPool, safe at static init time
+ static const morkUsage& GetNone(); // kNone, safe at static init time
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkNode_kMaxRefCount 0x0FFFF /* count sticks if it hits this */
+
+#define morkBase_kNode /*i*/ 0x4E64 /* ascii 'Nd' */
+
+/*| morkNode: several groups of two-byte integers that track the basic
+**| status of an object that can be used to compose in-memory graphs.
+**| This is the base class for nsIMdbObject (which adds members that fit
+**| the needs of an nsIMdbObject subclass). The morkNode class is also used
+**| as the base class for other Mork db classes with no strong relation to
+**| the MDB class hierarchy.
+**|
+**|| Heap: the heap in which this node was allocated, when the usage equals
+**| morkUsage_kHeap to show dynamic allocation. Note this heap is NOT ref-
+**| counted, because that would be too great and complex a burden for all
+**| the nodes allocated in that heap. So heap users should take care to
+**| understand that nodes allocated in that heap are considered protected
+**| by some inclusive context in which all those nodes are allocated, and
+**| that context must maintain at least one strong refcount for the heap.
+**| Occasionally a node subclass will indeed wish to hold a refcounted
+**| reference to a heap, and possibly the same heap that is in mNode_Heap,
+**| but this is always done in a separate slot that explicitly refcounts,
+**| so we avoid confusing what is meant by the mNode_Heap slot.
+|*/
+class morkNode /*: public nsISupports */ { // base class for constructing Mork
+ // object graphs
+
+ public: // state is public because the entire Mork system is private
+ // NS_DECL_ISUPPORTS;
+ nsIMdbHeap* mNode_Heap; // NON-refcounted heap pointer
+
+ mork_base mNode_Base; // must equal morkBase_kNode
+ mork_derived mNode_Derived; // depends on specific node subclass
+
+ mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ mork_able mNode_Mutable; // can this node be modified?
+ mork_load mNode_Load; // is this node clean or dirty?
+
+ mork_uses mNode_Uses; // refcount for strong refs
+ mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ protected: // special case empty construction for morkHandleFrame
+ friend class morkHandleFrame;
+ morkNode() {}
+
+ public: // inlines for weird mNode_Mutable and mNode_Load constants
+ void SetFrozen() { mNode_Mutable = morkAble_kDisabled; }
+ void SetMutable() { mNode_Mutable = morkAble_kEnabled; }
+ void SetAsleep() { mNode_Mutable = morkAble_kAsleep; }
+
+ mork_bool IsFrozen() const { return mNode_Mutable == morkAble_kDisabled; }
+ mork_bool IsMutable() const { return mNode_Mutable == morkAble_kEnabled; }
+ mork_bool IsAsleep() const { return mNode_Mutable == morkAble_kAsleep; }
+
+ void SetNodeClean() { mNode_Load = morkLoad_kClean; }
+ void SetNodeDirty() { mNode_Load = morkLoad_kDirty; }
+
+ mork_bool IsNodeClean() const { return mNode_Load == morkLoad_kClean; }
+ mork_bool IsNodeDirty() const { return mNode_Load == morkLoad_kDirty; }
+
+ public: // morkNode memory management methods
+ static void* MakeNew(size_t inSize, nsIMdbHeap& ioHeap, morkEnv* ev);
+
+ void ZapOld(morkEnv* ev, nsIMdbHeap* ioHeap); // replaces operator delete()
+ // this->morkNode::~morkNode(); // first call polymorphic destructor
+ // if ( ioHeap ) // was this node heap allocated?
+ // ioHeap->Free(ev->AsMdbEnv(), this);
+
+ public: // morkNode memory management operators
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkNode::MakeNew(inSize, ioHeap, ev);
+ }
+
+ protected: // construction without an anv needed for first env constructed:
+ morkNode(const morkUsage& inUsage, nsIMdbHeap* ioHeap);
+
+ explicit morkNode(mork_usage inCode); // usage == inCode, heap == nil
+
+ // { ===== begin basic node interface =====
+ public: // morkNode virtual methods
+ // virtual FlushMorkNode(morkEnv* ev, morkStream* ioStream);
+ // virtual WriteMorkNode(morkEnv* ev, morkStream* ioStream);
+
+ virtual ~morkNode(); // assert that CloseNode() executed earlier
+ virtual void CloseMorkNode(morkEnv* ev); // CloseNode() only if open
+
+ // CloseMorkNode() is the polymorphic close method called when uses==0,
+ // which must do NOTHING at all when IsOpenNode() is not true. Otherwise,
+ // CloseMorkNode() should call a static close method specific to an object.
+ // Each such static close method should either call inherited static close
+ // methods, or else perform the consolidated effect of calling them, where
+ // subclasses should closely track any changes in base classes with care.
+
+ public: // morkNode construction
+ morkNode(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap);
+ void CloseNode(morkEnv* ev); // called by CloseMorkNode();
+ nsresult CloseMdbObject(morkEnv* ev);
+ NS_IMETHOD CloseMdbObject(nsIMdbEnv* ev);
+
+ private: // copying is not allowed
+ morkNode(const morkNode& other);
+ morkNode& operator=(const morkNode& other);
+
+ public: // dynamic type identification
+ mork_bool IsNode() const { return mNode_Base == morkBase_kNode; }
+ // } ===== end basic node methods =====
+
+ public: // public error & warning methods
+ void RefsUnderUsesWarning(
+ morkEnv* ev) const; // call if mNode_Refs < mNode_Uses
+ void NonNodeError(morkEnv* ev) const; // call when IsNode() is false
+ void NilHeapError(morkEnv* ev) const; // zero mNode_Heap when usage is kHeap
+ void NonOpenNodeError(morkEnv* ev) const; // call when IsOpenNode() is false
+
+ void NonMutableNodeError(morkEnv* ev) const; // when IsMutable() is false
+
+ void RefsOverflowWarning(morkEnv* ev) const; // call on mNode_Refs overflow
+ void UsesOverflowWarning(morkEnv* ev) const; // call on mNode_Uses overflow
+ void RefsUnderflowWarning(morkEnv* ev) const; // call on mNode_Refs underflow
+ void UsesUnderflowWarning(morkEnv* ev) const; // call on mNode_Uses underflow
+
+ private: // private refcounting methods
+ mork_bool cut_use_count(morkEnv* ev); // just one part of CutStrongRef()
+
+ public: // other morkNode methods
+ mork_bool GoodRefs() const { return mNode_Refs >= mNode_Uses; }
+ mork_bool BadRefs() const { return mNode_Refs < mNode_Uses; }
+
+ mork_uses StrongRefsOnly() const { return mNode_Uses; }
+ mork_refs WeakRefsOnly() const {
+ return (mork_refs)(mNode_Refs - mNode_Uses);
+ }
+
+ // (this refcounting derives from public domain IronDoc node refcounts)
+ virtual mork_uses AddStrongRef(morkEnv* ev);
+ virtual mork_uses CutStrongRef(morkEnv* ev);
+ mork_refs AddWeakRef(morkEnv* ev);
+ mork_refs CutWeakRef(morkEnv* ev);
+
+ const char* GetNodeAccessAsString() const; // e.g. "open", "shut", etc.
+ const char* GetNodeUsageAsString() const; // e.g. "heap", "stack", etc.
+
+ mork_usage NodeUsage() const { return mNode_Usage; }
+
+ mork_bool IsHeapNode() const { return mNode_Usage == morkUsage_kHeap; }
+
+ mork_bool IsOpenNode() const { return mNode_Access == morkAccess_kOpen; }
+
+ mork_bool IsShutNode() const { return mNode_Access == morkAccess_kShut; }
+
+ mork_bool IsDeadNode() const { return mNode_Access == morkAccess_kDead; }
+
+ mork_bool IsClosingNode() const {
+ return mNode_Access == morkAccess_kClosing;
+ }
+
+ mork_bool IsOpenOrClosingNode() const {
+ return IsOpenNode() || IsClosingNode();
+ }
+
+ mork_bool HasNodeAccess() const {
+ return (IsOpenNode() || IsShutNode() || IsClosingNode());
+ }
+
+ void MarkShut() { mNode_Access = morkAccess_kShut; }
+ void MarkClosing() { mNode_Access = morkAccess_kClosing; }
+ void MarkDead() { mNode_Access = morkAccess_kDead; }
+
+ public: // refcounting for typesafe subclass inline methods
+ static void SlotWeakNode(morkNode* me, morkEnv* ev, morkNode** ioSlot);
+ // If *ioSlot is non-nil, that node is released by CutWeakRef() and
+ // then zeroed out. Then if me is non-nil, this is acquired by
+ // calling AddWeakRef(), and if positive is returned to show success,
+ // then this is put into slot *ioSlot. Note me can be nil, so we
+ // permit expression '((morkNode*) 0L)->SlotWeakNode(ev, &slot)'.
+
+ static void SlotStrongNode(morkNode* me, morkEnv* ev, morkNode** ioSlot);
+ // If *ioSlot is non-nil, that node is released by CutStrongRef() and
+ // then zeroed out. Then if me is non-nil, this is acquired by
+ // calling AddStrongRef(), and if positive is returned to show success,
+ // then me is put into slot *ioSlot. Note me can be nil, so we take
+ // expression 'morkNode::SlotStrongNode((morkNode*) 0, ev, &slot)'.
+};
+
+extern void // utility method very similar to morkNode::SlotStrongNode():
+nsIMdbHeap_SlotStrongHeap(nsIMdbHeap* self, morkEnv* ev, nsIMdbHeap** ioSlot);
+// If *ioSlot is non-nil, that heap is released by CutStrongRef() and
+// then zeroed out. Then if self is non-nil, this is acquired by
+// calling AddStrongRef(), and if the return value shows success,
+// then self is put into slot *ioSlot. Note self can be nil, so we take
+// expression 'nsIMdbHeap_SlotStrongHeap(0, ev, &slot)'.
+
+extern void // utility method very similar to morkNode::SlotStrongNode():
+nsIMdbFile_SlotStrongFile(nsIMdbFile* self, morkEnv* ev, nsIMdbFile** ioSlot);
+// If *ioSlot is non-nil, that file is released by CutStrongRef() and
+// then zeroed out. Then if self is non-nil, this is acquired by
+// calling AddStrongRef(), and if the return value shows success,
+// then self is put into slot *ioSlot. Note self can be nil, so we take
+// expression 'nsIMdbFile_SlotStrongFile(0, ev, &slot)'.
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKNODE_ */
diff --git a/comm/mailnews/db/mork/morkNodeMap.cpp b/comm/mailnews/db/mork/morkNodeMap.cpp
new file mode 100644
index 0000000000..a01b688b16
--- /dev/null
+++ b/comm/mailnews/db/mork/morkNodeMap.cpp
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKINTMAP_
+# include "morkIntMap.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkNodeMap::CloseMorkNode(
+ morkEnv* ev) // CloseNodeMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseNodeMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkNodeMap::~morkNodeMap() // assert CloseNodeMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkNodeMap::morkNodeMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkIntMap(ev, inUsage, /*valsize*/ sizeof(morkNode*), ioHeap, ioSlotHeap,
+ /*inHoldChanges*/ morkBool_kTrue) {
+ if (ev->Good()) mNode_Derived = morkDerived_kNodeMap;
+}
+
+/*public non-poly*/ void morkNodeMap::CloseNodeMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CutAllNodes(ev);
+ this->CloseMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+mork_bool morkNodeMap::AddNode(morkEnv* ev, mork_token inToken,
+ morkNode* ioNode)
+// the AddNode() method return value equals ev->Good().
+{
+ if (ioNode && ev->Good()) {
+ morkNode* node = 0; // old val in the map
+
+ mork_bool put = this->Put(ev, &inToken, &ioNode,
+ /*key*/ (void*)0, &node, (mork_change**)0);
+
+ if (put) // replaced an existing value for key inToken?
+ {
+ if (node && node != ioNode) // need to release old node?
+ node->CutStrongRef(ev);
+ }
+
+ if (ev->Bad() || !ioNode->AddStrongRef(ev)) {
+ // problems adding node or increasing refcount?
+ this->Cut(ev, &inToken, // make sure not in map
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+ }
+ } else if (!ioNode)
+ ev->NilPointerError();
+
+ return ev->Good();
+}
+
+mork_bool morkNodeMap::CutNode(morkEnv* ev, mork_token inToken) {
+ morkNode* node = 0; // old val in the map
+ mork_bool outCutNode = this->Cut(ev, &inToken,
+ /*key*/ (void*)0, &node, (mork_change**)0);
+ if (node) node->CutStrongRef(ev);
+
+ return outCutNode;
+}
+
+morkNode* morkNodeMap::GetNode(morkEnv* ev, mork_token inToken)
+// Note the returned node does NOT have an increase in refcount for this.
+{
+ morkNode* node = 0; // old val in the map
+ this->Get(ev, &inToken, /*key*/ (void*)0, &node, (mork_change**)0);
+
+ return node;
+}
+
+mork_num morkNodeMap::CutAllNodes(morkEnv* ev)
+// CutAllNodes() releases all the reference node values.
+{
+ mork_num outSlots = mMap_Slots;
+ mork_token key = 0; // old key token in the map
+ morkNode* val = 0; // old val node in the map
+
+ mork_change* c = 0;
+ morkNodeMapIter i(ev, this);
+ for (c = i.FirstNode(ev, &key, &val); c; c = i.NextNode(ev, &key, &val)) {
+ if (val) val->CutStrongRef(ev);
+ i.CutHereNode(ev, /*key*/ (mork_token*)0, /*val*/ (morkNode**)0);
+ }
+
+ return outSlots;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkNodeMap.h b/comm/mailnews/db/mork/morkNodeMap.h
new file mode 100644
index 0000000000..c2edc7007e
--- /dev/null
+++ b/comm/mailnews/db/mork/morkNodeMap.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKNODEMAP_
+#define _MORKNODEMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKINTMAP_
+# include "morkIntMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kNodeMap /*i*/ 0x6E4D /* ascii 'nM' */
+
+#define morkNodeMap_kStartSlotCount 512
+
+/*| morkNodeMap: maps mork_token -> morkNode
+|*/
+class morkNodeMap : public morkIntMap { // for mapping tokens to nodes
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseNodeMap() only if open
+ virtual ~morkNodeMap(); // assert that CloseNodeMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkNodeMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseNodeMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsNodeMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kNodeMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkMap poly interface =====
+ // use the Equal() and Hash() for mork_u4 inherited from morkIntMap
+ // } ===== end morkMap poly interface =====
+
+ protected: // we want all subclasses to provide typesafe wrappers:
+ mork_bool AddNode(morkEnv* ev, mork_token inToken, morkNode* ioNode);
+ // the AddNode() boolean return equals ev->Good().
+
+ mork_bool CutNode(morkEnv* ev, mork_token inToken);
+ // The CutNode() boolean return indicates whether removal happened.
+
+ morkNode* GetNode(morkEnv* ev, mork_token inToken);
+ // Note the returned node does NOT have an increase in refcount for this.
+
+ mork_num CutAllNodes(morkEnv* ev);
+ // CutAllNodes() releases all the reference node values.
+};
+
+class morkNodeMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkNodeMapIter(morkEnv* ev, morkNodeMap* ioMap) : morkMapIter(ev, ioMap) {}
+
+ morkNodeMapIter() : morkMapIter() {}
+ void InitNodeMapIter(morkEnv* ev, morkNodeMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstNode(morkEnv* ev, mork_token* outToken,
+ morkNode** outNode) {
+ return this->First(ev, outToken, outNode);
+ }
+
+ mork_change* NextNode(morkEnv* ev, mork_token* outToken, morkNode** outNode) {
+ return this->Next(ev, outToken, outNode);
+ }
+
+ mork_change* HereNode(morkEnv* ev, mork_token* outToken, morkNode** outNode) {
+ return this->Here(ev, outToken, outNode);
+ }
+
+ mork_change* CutHereNode(morkEnv* ev, mork_token* outToken,
+ morkNode** outNode) {
+ return this->CutHere(ev, outToken, outNode);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKNODEMAP_ */
diff --git a/comm/mailnews/db/mork/morkObject.cpp b/comm/mailnews/db/mork/morkObject.cpp
new file mode 100644
index 0000000000..227fa81f08
--- /dev/null
+++ b/comm/mailnews/db/mork/morkObject.cpp
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKHANDLE_
+# include "morkHandle.h"
+#endif
+
+#include "nsCOMPtr.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+NS_IMPL_ISUPPORTS(morkObject, nsIMdbObject)
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkObject::CloseMorkNode(
+ morkEnv* ev) // CloseObject() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseObject(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkObject::~morkObject() // assert CloseObject() executed earlier
+{
+ if (!IsShutNode()) CloseMorkNode(this->mMorkEnv);
+ MORK_ASSERT(mObject_Handle == 0);
+}
+
+/*public non-poly*/
+morkObject::morkObject(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor)
+ : morkBead(inUsage, ioHeap, inBeadColor), mObject_Handle(0) {
+ mMorkEnv = nullptr;
+}
+
+/*public non-poly*/
+morkObject::morkObject(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, mork_color inBeadColor,
+ morkHandle* ioHandle)
+ : morkBead(ev, inUsage, ioHeap, inBeadColor), mObject_Handle(0) {
+ mMorkEnv = ev;
+ if (ev->Good()) {
+ if (ioHandle) morkHandle::SlotWeakHandle(ioHandle, ev, &mObject_Handle);
+
+ if (ev->Good()) mNode_Derived = morkDerived_kObject;
+ }
+}
+
+/*public non-poly*/ void morkObject::CloseObject(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ if (!this->IsShutNode()) {
+ if (mObject_Handle)
+ morkHandle::SlotWeakHandle((morkHandle*)0L, ev, &mObject_Handle);
+
+ mBead_Color = 0; // this->CloseBead(ev);
+ this->MarkShut();
+ }
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ----- begin factory methods -----
+NS_IMETHODIMP
+morkObject::GetMdbFactory(nsIMdbEnv* mev, nsIMdbFactory** acqFactory) {
+ nsresult rv;
+ nsCOMPtr<nsIMdbObject> obj = do_QueryInterface(mev);
+ if (obj)
+ rv = obj->GetMdbFactory(mev, acqFactory);
+ else
+ return NS_ERROR_NO_INTERFACE;
+
+ return rv;
+}
+// } ----- end factory methods -----
+
+// { ----- begin ref counting for well-behaved cyclic graphs -----
+NS_IMETHODIMP
+morkObject::GetWeakRefCount(nsIMdbEnv* mev, // weak refs
+ mdb_count* outCount) {
+ *outCount = WeakRefsOnly();
+ return NS_OK;
+}
+NS_IMETHODIMP
+morkObject::GetStrongRefCount(nsIMdbEnv* mev, // strong refs
+ mdb_count* outCount) {
+ *outCount = StrongRefsOnly();
+ return NS_OK;
+}
+// ### TODO - clean up this cast, if required
+NS_IMETHODIMP
+morkObject::AddWeakRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::AddWeakRef((morkEnv*)mev));
+}
+
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkObject::AddStrongRef(morkEnv* mev) { return morkNode::AddStrongRef(mev); }
+#endif
+
+NS_IMETHODIMP_(mork_uses)
+morkObject::AddStrongRef(nsIMdbEnv* mev) {
+ return morkNode::AddStrongRef((morkEnv*)mev);
+}
+
+NS_IMETHODIMP
+morkObject::CutWeakRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::CutWeakRef((morkEnv*)mev));
+}
+
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkObject::CutStrongRef(morkEnv* mev) { return morkNode::CutStrongRef(mev); }
+#endif
+
+NS_IMETHODIMP
+morkObject::CutStrongRef(nsIMdbEnv* mev) {
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::CutStrongRef((morkEnv*)mev));
+}
+
+NS_IMETHODIMP
+morkObject::CloseMdbObject(nsIMdbEnv* mev) {
+ return morkNode::CloseMdbObject((morkEnv*)mev);
+}
+
+NS_IMETHODIMP
+morkObject::IsOpenMdbObject(nsIMdbEnv* mev, mdb_bool* outOpen) {
+ *outOpen = IsOpenNode();
+ return NS_OK;
+}
+NS_IMETHODIMP
+morkObject::IsFrozenMdbObject(nsIMdbEnv* mev, mdb_bool* outIsReadonly) {
+ *outIsReadonly = IsFrozen();
+ return NS_OK;
+}
+
+// void morkObject::NewNilHandleError(morkEnv* ev) // mObject_Handle is nil
+//{
+// ev->NewError("nil mObject_Handle");
+//}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkObject.h b/comm/mailnews/db/mork/morkObject.h
new file mode 100644
index 0000000000..9548c779d1
--- /dev/null
+++ b/comm/mailnews/db/mork/morkObject.h
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKOBJECT_
+#define _MORKOBJECT_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKBEAD_
+# include "morkBead.h"
+#endif
+
+#ifndef _MORKCONFIG_
+# include "morkConfig.h"
+#endif
+
+#ifndef _ORKINHEAP_
+# include "orkinHeap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kObject /*i*/ 0x6F42 /* ascii 'oB' */
+
+/*| morkObject: subclass of morkNode that adds knowledge of db suite factory
+**| and containing port to those objects that are exposed as instances of
+**| nsIMdbObject in the public interface.
+|*/
+class morkObject : public morkBead, public nsIMdbObject {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+
+ public: // state is public because the entire Mork system is private
+ morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ morkEnv* mMorkEnv; // weak ref to environment this object created in.
+
+ // { ===== begin morkNode interface =====
+ public:
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseObject() only if open
+#ifdef MORK_DEBUG_HEAP_STATS
+ void operator delete(void* ioAddress, size_t size) {
+ mork_u4* array = (mork_u4*)ioAddress;
+ array -= 3;
+ orkinHeap* heap = (orkinHeap*)*array;
+ if (heap) heap->Free(nullptr, ioAddress);
+ }
+#endif
+
+ NS_DECL_ISUPPORTS
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD IsFrozenMdbObject(nsIMdbEnv* ev, mdb_bool* outIsReadonly) override;
+ // same as nsIMdbPort::GetIsPortReadonly() when this object is inside a port.
+ // } ----- end attribute methods -----
+
+ // { ----- begin factory methods -----
+ NS_IMETHOD GetMdbFactory(nsIMdbEnv* ev, nsIMdbFactory** acqFactory) override;
+ // } ----- end factory methods -----
+
+ // { ----- begin ref counting for well-behaved cyclic graphs -----
+ NS_IMETHOD GetWeakRefCount(nsIMdbEnv* ev, // weak refs
+ mdb_count* outCount) override;
+ NS_IMETHOD GetStrongRefCount(nsIMdbEnv* ev, // strong refs
+ mdb_count* outCount) override;
+
+ NS_IMETHOD AddWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of AddStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) AddStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD_(mork_uses) AddStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CutWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of CutStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) CutStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD CutStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CloseMdbObject(
+ nsIMdbEnv* ev) override; // called at strong refs zero
+ NS_IMETHOD IsOpenMdbObject(nsIMdbEnv* ev, mdb_bool* outOpen) override;
+ // } ----- end ref counting -----
+
+ protected: // special case construction of first env without preceding env
+ morkObject(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor);
+ virtual ~morkObject(); // assert that CloseObject() executed earlier
+
+ public: // morkEnv construction & destruction
+ morkObject(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ mork_color inBeadColor,
+ morkHandle* ioHandle); // ioHandle can be nil
+ void CloseObject(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkObject(const morkObject& other);
+ morkObject& operator=(const morkObject& other);
+
+ public: // dynamic type identification
+ mork_bool IsObject() const {
+ return IsNode() && mNode_Derived == morkDerived_kObject;
+ }
+ // } ===== end morkNode methods =====
+
+ // void NewNilHandleError(morkEnv* ev); // mObject_Handle is nil
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakObject(morkObject* me, morkEnv* ev, morkObject** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongObject(morkObject* me, morkEnv* ev,
+ morkObject** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKOBJECT_ */
diff --git a/comm/mailnews/db/mork/morkParser.cpp b/comm/mailnews/db/mork/morkParser.cpp
new file mode 100644
index 0000000000..8ca635014f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkParser.cpp
@@ -0,0 +1,1331 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKPARSER_
+# include "morkParser.h"
+#endif
+
+#ifndef _MORKSTREAM_
+# include "morkStream.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKSINK_
+# include "morkSink.h"
+#endif
+
+#ifndef _MORKCH_
+# include "morkCh.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkParser::CloseMorkNode(
+ morkEnv* ev) // CloseParser() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseParser(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkParser::~morkParser() // assert CloseParser() executed earlier
+{
+ MORK_ASSERT(mParser_Heap == 0);
+ MORK_ASSERT(mParser_Stream == 0);
+}
+
+/*public non-poly*/
+morkParser::morkParser(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkStream* ioStream,
+ mdb_count inBytesPerParseSegment, nsIMdbHeap* ioSlotHeap)
+ : morkNode(ev, inUsage, ioHeap),
+ mParser_Heap(0),
+ mParser_Stream(0),
+ mParser_MoreGranularity(inBytesPerParseSegment),
+ mParser_State(morkParser_kStartState)
+
+ ,
+ mParser_GroupContentStartPos(0)
+
+ ,
+ mParser_TableMid(),
+ mParser_RowMid(),
+ mParser_CellMid()
+
+ ,
+ mParser_InPort(morkBool_kFalse),
+ mParser_InDict(morkBool_kFalse),
+ mParser_InCell(morkBool_kFalse),
+ mParser_InMeta(morkBool_kFalse)
+
+ ,
+ mParser_InPortRow(morkBool_kFalse),
+ mParser_InRow(morkBool_kFalse),
+ mParser_InTable(morkBool_kFalse),
+ mParser_InGroup(morkBool_kFalse)
+
+ ,
+ mParser_AtomChange(morkChange_kNil),
+ mParser_CellChange(morkChange_kNil),
+ mParser_RowChange(morkChange_kNil),
+ mParser_TableChange(morkChange_kNil)
+
+ ,
+ mParser_Change(morkChange_kNil),
+ mParser_IsBroken(morkBool_kFalse),
+ mParser_IsDone(morkBool_kFalse),
+ mParser_DoMore(morkBool_kTrue)
+
+ ,
+ mParser_Mid()
+
+ ,
+ mParser_ScopeCoil(ev, ioSlotHeap),
+ mParser_ValueCoil(ev, ioSlotHeap),
+ mParser_ColumnCoil(ev, ioSlotHeap),
+ mParser_StringCoil(ev, ioSlotHeap)
+
+ ,
+ mParser_ScopeSpool(ev, &mParser_ScopeCoil),
+ mParser_ValueSpool(ev, &mParser_ValueCoil),
+ mParser_ColumnSpool(ev, &mParser_ColumnCoil),
+ mParser_StringSpool(ev, &mParser_StringCoil)
+
+ ,
+ mParser_MidYarn(ev, morkUsage(morkUsage_kMember), ioSlotHeap) {
+ if (inBytesPerParseSegment < morkParser_kMinGranularity)
+ inBytesPerParseSegment = morkParser_kMinGranularity;
+ else if (inBytesPerParseSegment > morkParser_kMaxGranularity)
+ inBytesPerParseSegment = morkParser_kMaxGranularity;
+
+ mParser_MoreGranularity = inBytesPerParseSegment;
+
+ if (ioSlotHeap && ioStream) {
+ nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mParser_Heap);
+ morkStream::SlotStrongStream(ioStream, ev, &mParser_Stream);
+
+ if (ev->Good()) {
+ mParser_Tag = morkParser_kTag;
+ mNode_Derived = morkDerived_kParser;
+ }
+ } else
+ ev->NilPointerError();
+}
+
+/*public non-poly*/ void morkParser::CloseParser(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ if (!this->IsShutNode()) {
+ mParser_ScopeCoil.CloseCoil(ev);
+ mParser_ValueCoil.CloseCoil(ev);
+ mParser_ColumnCoil.CloseCoil(ev);
+ mParser_StringCoil.CloseCoil(ev);
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mParser_Heap);
+ morkStream::SlotStrongStream((morkStream*)0, ev, &mParser_Stream);
+ this->MarkShut();
+ }
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*protected non-poly*/ void morkParser::NonGoodParserError(
+ morkEnv* ev) // when GoodParserTag() is false
+{
+ ev->NewError("non-morkNode");
+}
+
+/*protected non-poly*/ void morkParser::NonUsableParserError(morkEnv* ev) //
+{
+ if (this->IsNode()) {
+ if (this->IsOpenNode()) {
+ if (this->GoodParserTag()) {
+ // okay
+ } else
+ this->NonGoodParserError(ev);
+ } else
+ this->NonOpenNodeError(ev);
+ } else
+ this->NonNodeError(ev);
+}
+
+/*protected non-poly*/ void morkParser::StartParse(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mParser_InCell = morkBool_kFalse;
+ mParser_InMeta = morkBool_kFalse;
+ mParser_InDict = morkBool_kFalse;
+ mParser_InPortRow = morkBool_kFalse;
+
+ mParser_RowMid.ClearMid();
+ mParser_TableMid.ClearMid();
+ mParser_CellMid.ClearMid();
+
+ mParser_GroupId = 0;
+ mParser_InPort = morkBool_kTrue;
+
+ mParser_GroupSpan.ClearSpan();
+ mParser_DictSpan.ClearSpan();
+ mParser_AliasSpan.ClearSpan();
+ mParser_MetaSpan.ClearSpan();
+ mParser_TableSpan.ClearSpan();
+ mParser_RowSpan.ClearSpan();
+ mParser_CellSpan.ClearSpan();
+ mParser_ColumnSpan.ClearSpan();
+ mParser_SlotSpan.ClearSpan();
+
+ mParser_PortSpan.ClearSpan();
+}
+
+/*protected non-poly*/ void morkParser::StopParse(morkEnv* ev) {
+ if (mParser_InCell) {
+ mParser_InCell = morkBool_kFalse;
+ mParser_CellSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnCellEnd(ev, mParser_CellSpan);
+ }
+ if (mParser_InMeta) {
+ mParser_InMeta = morkBool_kFalse;
+ mParser_MetaSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnMetaEnd(ev, mParser_MetaSpan);
+ }
+ if (mParser_InDict) {
+ mParser_InDict = morkBool_kFalse;
+ mParser_DictSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnDictEnd(ev, mParser_DictSpan);
+ }
+ if (mParser_InPortRow) {
+ mParser_InPortRow = morkBool_kFalse;
+ mParser_RowSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnPortRowEnd(ev, mParser_RowSpan);
+ }
+ if (mParser_InRow) {
+ mParser_InRow = morkBool_kFalse;
+ mParser_RowMid.ClearMid();
+ mParser_RowSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnRowEnd(ev, mParser_RowSpan);
+ }
+ if (mParser_InTable) {
+ mParser_InTable = morkBool_kFalse;
+ mParser_TableMid.ClearMid();
+ mParser_TableSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnTableEnd(ev, mParser_TableSpan);
+ }
+ if (mParser_GroupId) {
+ mParser_GroupId = 0;
+ mParser_GroupSpan.SetEndWithEnd(mParser_PortSpan);
+ this->OnGroupAbortEnd(ev, mParser_GroupSpan);
+ }
+ if (mParser_InPort) {
+ mParser_InPort = morkBool_kFalse;
+ this->OnPortEnd(ev, mParser_PortSpan);
+ }
+}
+
+int morkParser::eat_comment(morkEnv* ev) // last char was '/'
+{
+ morkStream* s = mParser_Stream;
+ // Note morkStream::Getc() returns EOF when an error occurs, so
+ // we don't need to check for both c != EOF and ev->Good() below.
+
+ int c = s->Getc(ev);
+ if (c == '/') // C++ style comment?
+ {
+ while ((c = s->Getc(ev)) != EOF && c != 0xA && c != 0xD)
+ ; /* empty */
+
+ if (c == 0xA || c == 0xD) c = this->eat_line_break(ev, c);
+ } else if (c == '*') /* C style comment? */
+ {
+ int depth = 1; // count depth of comments until depth reaches zero
+
+ while (depth > 0 && c != EOF) // still looking for comment end(s)?
+ {
+ while ((c = s->Getc(ev)) != EOF && c != '/' && c != '*') {
+ if (c == 0xA || c == 0xD) // need to count a line break?
+ {
+ c = this->eat_line_break(ev, c);
+ if (c == '/' || c == '*') break; // end while loop
+ }
+ }
+
+ if (c == '*') // maybe end of a comment, if next char is '/'?
+ {
+ if ((c = s->Getc(ev)) == '/') // end of comment?
+ {
+ --depth; // depth of comments has decreased by one
+ if (!depth) // comments all done?
+ c = s->Getc(ev); // return the byte after end of comment
+ } else if (c != EOF) // need to put the char back?
+ s->Ungetc(c); // especially need to put back '*', 0xA, or 0xD
+ } else if (c == '/') // maybe nested comemnt, if next char is '*'?
+ {
+ if ((c = s->Getc(ev)) == '*') // nested comment?
+ ++depth; // depth of comments has increased by one
+ else if (c != EOF) // need to put the char back?
+ s->Ungetc(c); // especially need to put back '/', 0xA, or 0xD
+ }
+
+ if (ev->Bad()) c = EOF;
+ }
+ if (c == EOF && depth > 0) ev->NewWarning("EOF before end of comment");
+ } else
+ ev->NewWarning("expected / or *");
+
+ return c;
+}
+
+int morkParser::eat_line_break(morkEnv* ev, int inLast) {
+ morkStream* s = mParser_Stream;
+ int c = s->Getc(ev); // get next char after 0xA or 0xD
+ this->CountLineBreak();
+ if (c == 0xA || c == 0xD) // another line break character?
+ {
+ if (c != inLast) // not the same as the last one?
+ c = s->Getc(ev); // get next char after two-byte linebreak
+ }
+ return c;
+}
+
+int morkParser::eat_line_continue(morkEnv* ev) // last char was '\'
+{
+ morkStream* s = mParser_Stream;
+ int c = s->Getc(ev);
+ if (c == 0xA || c == 0xD) // linebreak follows \ as expected?
+ {
+ c = this->eat_line_break(ev, c);
+ } else
+ ev->NewWarning("expected linebreak");
+
+ return c;
+}
+
+int morkParser::NextChar(morkEnv* ev) // next non-white content
+{
+ morkStream* s = mParser_Stream;
+ int c = s->Getc(ev);
+ while (c > 0 && ev->Good()) {
+ if (c == '/')
+ c = this->eat_comment(ev);
+ else if (c == 0xA || c == 0xD)
+ c = this->eat_line_break(ev, c);
+ else if (c == '\\')
+ c = this->eat_line_continue(ev);
+ else if (morkCh_IsWhite(c))
+ c = s->Getc(ev);
+ else
+ break; // end while loop when return c is acceptable
+ }
+ if (ev->Bad()) {
+ mParser_State = morkParser_kBrokenState;
+ mParser_DoMore = morkBool_kFalse;
+ mParser_IsDone = morkBool_kTrue;
+ mParser_IsBroken = morkBool_kTrue;
+ c = EOF;
+ } else if (c == EOF) {
+ mParser_DoMore = morkBool_kFalse;
+ mParser_IsDone = morkBool_kTrue;
+ }
+ return c;
+}
+
+void morkParser::OnCellState(morkEnv* ev) { ev->StubMethodOnlyError(); }
+
+void morkParser::OnMetaState(morkEnv* ev) { ev->StubMethodOnlyError(); }
+
+void morkParser::OnRowState(morkEnv* ev) { ev->StubMethodOnlyError(); }
+
+void morkParser::OnTableState(morkEnv* ev) { ev->StubMethodOnlyError(); }
+
+void morkParser::OnDictState(morkEnv* ev) { ev->StubMethodOnlyError(); }
+
+morkBuf* morkParser::ReadName(morkEnv* ev, int c) {
+ morkBuf* outBuf = 0;
+
+ if (!morkCh_IsName(c)) ev->NewError("not a name char");
+
+ morkCoil* coil = &mParser_ColumnCoil;
+ coil->ClearBufFill();
+
+ morkSpool* spool = &mParser_ColumnSpool;
+ spool->Seek(ev, /*pos*/ 0);
+
+ if (ev->Good()) {
+ spool->Putc(ev, c);
+
+ morkStream* s = mParser_Stream;
+ while ((c = s->Getc(ev)) != EOF && morkCh_IsMore(c) && ev->Good())
+ spool->Putc(ev, c);
+
+ if (ev->Good()) {
+ if (c != EOF) {
+ s->Ungetc(c);
+ spool->FlushSink(ev); // update coil->mBuf_Fill
+ } else
+ this->UnexpectedEofError(ev);
+
+ if (ev->Good()) outBuf = coil;
+ }
+ }
+ return outBuf;
+}
+
+mork_bool morkParser::ReadMid(morkEnv* ev, morkMid* outMid) {
+ outMid->ClearMid();
+
+ morkStream* s = mParser_Stream;
+ int next;
+ outMid->mMid_Oid.mOid_Id = this->ReadHex(ev, &next);
+ int c = next;
+ if (c == ':') {
+ if ((c = s->Getc(ev)) != EOF && ev->Good()) {
+ if (c == '^') {
+ outMid->mMid_Oid.mOid_Scope = this->ReadHex(ev, &next);
+ if (ev->Good()) s->Ungetc(next);
+ } else if (morkCh_IsName(c)) {
+ outMid->mMid_Buf = this->ReadName(ev, c);
+ } else
+ ev->NewError("expected name or hex after ':' following ID");
+ }
+
+ if (c == EOF && ev->Good()) this->UnexpectedEofError(ev);
+ } else
+ s->Ungetc(c);
+
+ return ev->Good();
+}
+
+void morkParser::ReadCell(morkEnv* ev) {
+ mParser_CellMid.ClearMid();
+ // this->StartSpanOnLastByte(ev, &mParser_CellSpan);
+ morkMid* cellMid = 0; // if mid syntax is used for column
+ morkBuf* cellBuf = 0; // if naked string is used for column
+
+ morkStream* s = mParser_Stream;
+ int c;
+ if ((c = s->Getc(ev)) != EOF && ev->Good()) {
+ // this->StartSpanOnLastByte(ev, &mParser_ColumnSpan);
+ if (c == '^') {
+ cellMid = &mParser_CellMid;
+ this->ReadMid(ev, cellMid);
+ // if ( !mParser_CellMid.mMid_Oid.mOid_Scope )
+ // mParser_CellMid.mMid_Oid.mOid_Scope = (mork_scope) 'c';
+ } else {
+ if (mParser_InMeta && c == morkStore_kFormColumn) {
+ ReadCellForm(ev, c);
+ return;
+ } else
+ cellBuf = this->ReadName(ev, c);
+ }
+ if (ev->Good()) {
+ // this->EndSpanOnThisByte(ev, &mParser_ColumnSpan);
+
+ mParser_InCell = morkBool_kTrue;
+ this->OnNewCell(ev, *mParser_CellSpan.AsPlace(), cellMid,
+ cellBuf); // , mParser_CellChange
+
+ mParser_CellChange = morkChange_kNil;
+ if ((c = this->NextChar(ev)) != EOF && ev->Good()) {
+ // this->StartSpanOnLastByte(ev, &mParser_SlotSpan);
+ if (c == '=') {
+ morkBuf* buf = this->ReadValue(ev);
+ if (buf) {
+ // this->EndSpanOnThisByte(ev, &mParser_SlotSpan);
+ this->OnValue(ev, mParser_SlotSpan, *buf);
+ }
+ } else if (c == '^') {
+ if (this->ReadMid(ev, &mParser_Mid)) {
+ // this->EndSpanOnThisByte(ev, &mParser_SlotSpan);
+ if ((c = this->NextChar(ev)) != EOF && ev->Good()) {
+ if (c != ')') ev->NewError("expected ')' after cell ^ID value");
+ } else if (c == EOF)
+ this->UnexpectedEofError(ev);
+
+ if (ev->Good()) this->OnValueMid(ev, mParser_SlotSpan, mParser_Mid);
+ }
+ } else if (c == 'r' || c == 't' || c == '"' || c == '\'') {
+ ev->NewError("cell syntax not yet supported");
+ } else {
+ ev->NewError("unknown cell syntax");
+ }
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_CellSpan);
+ mParser_InCell = morkBool_kFalse;
+ this->OnCellEnd(ev, mParser_CellSpan);
+ }
+ }
+ mParser_CellChange = morkChange_kNil;
+
+ if (c == EOF && ev->Good()) this->UnexpectedEofError(ev);
+}
+
+void morkParser::ReadRowPos(morkEnv* ev) {
+ int c; // next character
+ mork_pos rowPos = this->ReadHex(ev, &c);
+
+ if (ev->Good() && c != EOF) // should put back byte after hex?
+ mParser_Stream->Ungetc(c);
+
+ this->OnRowPos(ev, rowPos);
+}
+
+void morkParser::ReadRow(morkEnv* ev, int c)
+// zm:Row ::= zm:S? '[' zm:S? zm:Id zm:RowItem* zm:S? ']'
+// zm:RowItem ::= zm:MetaRow | zm:Cell
+// zm:MetaRow ::= zm:S? '[' zm:S? zm:Cell* zm:S? ']' /* meta attributes */
+// zm:Cell ::= zm:S? '(' zm:Column zm:S? zm:Slot? ')'
+{
+ if (ev->Good()) {
+ // this->StartSpanOnLastByte(ev, &mParser_RowSpan);
+ if (mParser_Change) mParser_RowChange = mParser_Change;
+
+ mork_bool cutAllRowCols = morkBool_kFalse;
+
+ if (c == '[') {
+ if ((c = this->NextChar(ev)) == '-')
+ cutAllRowCols = morkBool_kTrue;
+ else if (ev->Good() && c != EOF)
+ mParser_Stream->Ungetc(c);
+
+ if (this->ReadMid(ev, &mParser_RowMid)) {
+ mParser_InRow = morkBool_kTrue;
+ this->OnNewRow(ev, *mParser_RowSpan.AsPlace(), mParser_RowMid,
+ cutAllRowCols);
+
+ mParser_Change = mParser_RowChange = morkChange_kNil;
+
+ while ((c = this->NextChar(ev)) != EOF && ev->Good() && c != ']') {
+ switch (c) {
+ case '(': // cell
+ this->ReadCell(ev);
+ break;
+
+ case '[': // meta
+ this->ReadMeta(ev, ']');
+ break;
+
+ // case '+': // plus
+ // mParser_CellChange = morkChange_kAdd;
+ // break;
+
+ case '-': // minus
+ // mParser_CellChange = morkChange_kCut;
+ this->OnMinusCell(ev);
+ break;
+
+ // case '!': // bang
+ // mParser_CellChange = morkChange_kSet;
+ // break;
+
+ default:
+ ev->NewWarning("unexpected byte in row");
+ break;
+ } // switch
+ } // while
+
+ if (ev->Good()) {
+ if ((c = this->NextChar(ev)) == '!')
+ this->ReadRowPos(ev);
+ else if (c != EOF && ev->Good())
+ mParser_Stream->Ungetc(c);
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_RowSpan);
+ mParser_InRow = morkBool_kFalse;
+ this->OnRowEnd(ev, mParser_RowSpan);
+
+ } // if ReadMid
+ } // if '['
+
+ else // c != '['
+ {
+ morkStream* s = mParser_Stream;
+ s->Ungetc(c);
+ if (this->ReadMid(ev, &mParser_RowMid)) {
+ mParser_InRow = morkBool_kTrue;
+ this->OnNewRow(ev, *mParser_RowSpan.AsPlace(), mParser_RowMid,
+ cutAllRowCols);
+
+ mParser_Change = mParser_RowChange = morkChange_kNil;
+
+ if (ev->Good()) {
+ if ((c = this->NextChar(ev)) == '!')
+ this->ReadRowPos(ev);
+ else if (c != EOF && ev->Good())
+ s->Ungetc(c);
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_RowSpan);
+ mParser_InRow = morkBool_kFalse;
+ this->OnRowEnd(ev, mParser_RowSpan);
+ }
+ }
+ }
+
+ if (ev->Bad())
+ mParser_State = morkParser_kBrokenState;
+ else if (c == EOF)
+ mParser_State = morkParser_kDoneState;
+}
+
+void morkParser::ReadTable(morkEnv* ev)
+// zm:Table ::= zm:S? '{' zm:S? zm:Id zm:TableItem* zm:S? '}'
+// zm:TableItem ::= zm:MetaTable | zm:RowRef | zm:Row
+// zm:MetaTable ::= zm:S? '{' zm:S? zm:Cell* zm:S? '}' /* meta attributes */
+{
+ // this->StartSpanOnLastByte(ev, &mParser_TableSpan);
+
+ if (mParser_Change) mParser_TableChange = mParser_Change;
+
+ mork_bool cutAllTableRows = morkBool_kFalse;
+
+ int c = this->NextChar(ev);
+ if (c == '-')
+ cutAllTableRows = morkBool_kTrue;
+ else if (ev->Good() && c != EOF)
+ mParser_Stream->Ungetc(c);
+
+ if (ev->Good() && this->ReadMid(ev, &mParser_TableMid)) {
+ mParser_InTable = morkBool_kTrue;
+ this->OnNewTable(ev, *mParser_TableSpan.AsPlace(), mParser_TableMid,
+ cutAllTableRows);
+
+ mParser_Change = mParser_TableChange = morkChange_kNil;
+
+ while ((c = this->NextChar(ev)) != EOF && ev->Good() && c != '}') {
+ if (morkCh_IsHex(c)) {
+ this->ReadRow(ev, c);
+ } else {
+ switch (c) {
+ case '[': // row
+ this->ReadRow(ev, '[');
+ break;
+
+ case '{': // meta
+ this->ReadMeta(ev, '}');
+ break;
+
+ // case '+': // plus
+ // mParser_RowChange = morkChange_kAdd;
+ // break;
+
+ case '-': // minus
+ // mParser_RowChange = morkChange_kCut;
+ this->OnMinusRow(ev);
+ break;
+
+ // case '!': // bang
+ // mParser_RowChange = morkChange_kSet;
+ // break;
+
+ default:
+ ev->NewWarning("unexpected byte in table");
+ break;
+ }
+ }
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_TableSpan);
+ mParser_InTable = morkBool_kFalse;
+ this->OnTableEnd(ev, mParser_TableSpan);
+
+ if (ev->Bad())
+ mParser_State = morkParser_kBrokenState;
+ else if (c == EOF)
+ mParser_State = morkParser_kDoneState;
+ }
+}
+
+mork_id morkParser::ReadHex(morkEnv* ev, int* outNextChar)
+// zm:Hex ::= [0-9a-fA-F] /* a single hex digit */
+// zm:Hex+ ::= zm:Hex | zm:Hex zm:Hex+
+{
+ mork_id hex = 0;
+
+ morkStream* s = mParser_Stream;
+ int c = this->NextChar(ev);
+
+ if (ev->Good()) {
+ if (c != EOF) {
+ if (morkCh_IsHex(c)) {
+ do {
+ if (morkCh_IsDigit(c)) // '0' through '9'?
+ c -= '0';
+ else if (morkCh_IsUpper(c)) // 'A' through 'F'?
+ c -= ('A' - 10); // c = (c - 'A') + 10;
+ else // 'a' through 'f'?
+ c -= ('a' - 10); // c = (c - 'a') + 10;
+
+ hex = (hex << 4) + c;
+ } while ((c = s->Getc(ev)) != EOF && ev->Good() && morkCh_IsHex(c));
+ } else
+ this->ExpectedHexDigitError(ev, c);
+ }
+ }
+ if (c == EOF) this->EofInsteadOfHexError(ev);
+
+ *outNextChar = c;
+ return hex;
+}
+
+/*static*/ void morkParser::EofInsteadOfHexError(morkEnv* ev) {
+ ev->NewWarning("eof instead of hex");
+}
+
+/*static*/ void morkParser::ExpectedHexDigitError(morkEnv* ev, int c) {
+ MORK_USED_1(c);
+ ev->NewWarning("expected hex digit");
+}
+
+/*static*/ void morkParser::ExpectedEqualError(morkEnv* ev) {
+ ev->NewWarning("expected '='");
+}
+
+/*static*/ void morkParser::UnexpectedEofError(morkEnv* ev) {
+ ev->NewWarning("unexpected eof");
+}
+
+morkBuf* morkParser::ReadValue(morkEnv* ev) {
+ morkBuf* outBuf = 0;
+
+ morkCoil* coil = &mParser_ValueCoil;
+ coil->ClearBufFill();
+
+ morkSpool* spool = &mParser_ValueSpool;
+ spool->Seek(ev, /*pos*/ 0);
+
+ if (ev->Good()) {
+ morkStream* s = mParser_Stream;
+ int c;
+ while ((c = s->Getc(ev)) != EOF && c != ')' && ev->Good()) {
+ if (c == '\\') // next char is escaped by '\'?
+ {
+ if ((c = s->Getc(ev)) == 0xA || c == 0xD) // linebreak after \?
+ {
+ c = this->eat_line_break(ev, c);
+ if (c == ')' || c == '\\' || c == '$') {
+ s->Ungetc(c); // just let while loop test read this again
+ continue; // goto next iteration of while loop
+ }
+ }
+ if (c == EOF || ev->Bad()) break; // end while loop
+ } else if (c == '$') // "$" escapes next two hex digits?
+ {
+ if ((c = s->Getc(ev)) != EOF && ev->Good()) {
+ mork_ch first = (mork_ch)c; // first hex digit
+ if ((c = s->Getc(ev)) != EOF && ev->Good()) {
+ mork_ch second = (mork_ch)c; // second hex digit
+ c = ev->HexToByte(first, second);
+ } else
+ break; // end while loop
+ } else
+ break; // end while loop
+ }
+ spool->Putc(ev, c);
+ }
+
+ if (ev->Good()) {
+ if (c != EOF)
+ spool->FlushSink(ev); // update coil->mBuf_Fill
+ else
+ this->UnexpectedEofError(ev);
+
+ if (ev->Good()) outBuf = coil;
+ }
+ }
+ return outBuf;
+}
+
+void morkParser::ReadDictForm(morkEnv* ev) {
+ int nextChar;
+ nextChar = this->NextChar(ev);
+ if (nextChar == '(') {
+ nextChar = this->NextChar(ev);
+ if (nextChar == morkStore_kFormColumn) {
+ int dictForm;
+
+ nextChar = this->NextChar(ev);
+ if (nextChar == '=') {
+ dictForm = this->NextChar(ev);
+ nextChar = this->NextChar(ev);
+ } else if (nextChar == '^') {
+ dictForm = this->ReadHex(ev, &nextChar);
+ } else {
+ ev->NewWarning("unexpected byte in dict form");
+ return;
+ }
+ mParser_ValueCoil.mText_Form = dictForm;
+ if (nextChar == ')') {
+ nextChar = this->NextChar(ev);
+ if (nextChar == '>') return;
+ }
+ }
+ }
+ ev->NewWarning("unexpected byte in dict form");
+}
+
+void morkParser::ReadCellForm(morkEnv* ev, int c) {
+ MORK_ASSERT(c == morkStore_kFormColumn);
+ int nextChar;
+ nextChar = this->NextChar(ev);
+ int cellForm;
+
+ if (nextChar == '=') {
+ cellForm = this->NextChar(ev);
+ nextChar = this->NextChar(ev);
+ } else if (nextChar == '^') {
+ cellForm = this->ReadHex(ev, &nextChar);
+ } else {
+ ev->NewWarning("unexpected byte in cell form");
+ return;
+ }
+ // ### not sure about this. Which form should we set?
+ // mBuilder_CellForm = mBuilder_RowForm = cellForm;
+ if (nextChar == ')') {
+ OnCellForm(ev, cellForm);
+ return;
+ }
+ ev->NewWarning("unexpected byte in cell form");
+}
+
+void morkParser::ReadAlias(morkEnv* ev)
+// zm:Alias ::= zm:S? '(' ('#')? zm:Hex+ zm:S? zm:Value ')'
+// zm:Value ::= '=' ([^)$\] | '\' zm:NonCRLF | zm:Continue | zm:Dollar)*
+{
+ // this->StartSpanOnLastByte(ev, &mParser_AliasSpan);
+
+ int nextChar;
+ mork_id hex = this->ReadHex(ev, &nextChar);
+ int c = nextChar;
+
+ mParser_Mid.ClearMid();
+ mParser_Mid.mMid_Oid.mOid_Id = hex;
+
+ if (morkCh_IsWhite(c) && ev->Good()) c = this->NextChar(ev);
+
+ if (ev->Good()) {
+ if (c == '<') {
+ ReadDictForm(ev);
+ if (ev->Good()) c = this->NextChar(ev);
+ }
+ if (c == '=') {
+ mParser_Mid.mMid_Buf = this->ReadValue(ev);
+ if (mParser_Mid.mMid_Buf) {
+ // this->EndSpanOnThisByte(ev, &mParser_AliasSpan);
+ this->OnAlias(ev, mParser_AliasSpan, mParser_Mid);
+ // need to reset this somewhere.
+ mParser_ValueCoil.mText_Form = 0;
+ }
+ } else
+ this->ExpectedEqualError(ev);
+ }
+}
+
+void morkParser::ReadMeta(morkEnv* ev, int inEndMeta)
+// zm:MetaDict ::= zm:S? '<' zm:S? zm:Cell* zm:S? '>' /* meta attributes */
+// zm:MetaTable ::= zm:S? '{' zm:S? zm:Cell* zm:S? '}' /* meta attributes */
+// zm:MetaRow ::= zm:S? '[' zm:S? zm:Cell* zm:S? ']' /* meta attributes */
+{
+ // this->StartSpanOnLastByte(ev, &mParser_MetaSpan);
+ mParser_InMeta = morkBool_kTrue;
+ this->OnNewMeta(ev, *mParser_MetaSpan.AsPlace());
+
+ mork_bool more = morkBool_kTrue; // until end meta
+ int c;
+ while (more && (c = this->NextChar(ev)) != EOF && ev->Good()) {
+ switch (c) {
+ case '(': // cell
+ this->ReadCell(ev);
+ break;
+
+ case '>': // maybe end meta?
+ if (inEndMeta == '>')
+ more = morkBool_kFalse; // stop reading meta
+ else
+ this->UnexpectedByteInMetaWarning(ev);
+ break;
+
+ case '}': // maybe end meta?
+ if (inEndMeta == '}')
+ more = morkBool_kFalse; // stop reading meta
+ else
+ this->UnexpectedByteInMetaWarning(ev);
+ break;
+
+ case ']': // maybe end meta?
+ if (inEndMeta == ']')
+ more = morkBool_kFalse; // stop reading meta
+ else
+ this->UnexpectedByteInMetaWarning(ev);
+ break;
+
+ case '[': // maybe table meta row?
+ if (mParser_InTable)
+ this->ReadRow(ev, '[');
+ else
+ this->UnexpectedByteInMetaWarning(ev);
+ break;
+
+ default:
+ if (mParser_InTable && morkCh_IsHex(c))
+ this->ReadRow(ev, c);
+ else
+ this->UnexpectedByteInMetaWarning(ev);
+ break;
+ }
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_MetaSpan);
+ mParser_InMeta = morkBool_kFalse;
+ this->OnMetaEnd(ev, mParser_MetaSpan);
+}
+
+/*static*/ void morkParser::UnexpectedByteInMetaWarning(morkEnv* ev) {
+ ev->NewWarning("unexpected byte in meta");
+}
+
+/*static*/ void morkParser::NonParserTypeError(morkEnv* ev) {
+ ev->NewError("non morkParser");
+}
+
+mork_bool morkParser::MatchPattern(morkEnv* ev, const char* inPattern) {
+ // if an error occurs, we want original inPattern in the debugger:
+ const char* pattern = inPattern; // mutable copy of pointer
+ morkStream* s = mParser_Stream;
+ int c;
+ while (*pattern && ev->Good()) {
+ char byte = *pattern++;
+ if ((c = s->Getc(ev)) != byte) {
+ ev->NewError("byte not in expected pattern");
+ }
+ }
+ return ev->Good();
+}
+
+mork_bool morkParser::FindGroupEnd(morkEnv* ev) {
+ mork_bool foundEnd = morkBool_kFalse;
+
+ // char gidBuf[ 64 ]; // to hold hex pattern we want
+ // (void) ev->TokenAsHex(gidBuf, mParser_GroupId);
+
+ morkStream* s = mParser_Stream;
+ int c;
+
+ while ((c = s->Getc(ev)) != EOF && ev->Good() && !foundEnd) {
+ if (c == '@') // maybe start of group ending?
+ {
+ // this->EndSpanOnThisByte(ev, &mParser_GroupSpan);
+ if ((c = s->Getc(ev)) == '$') // '$' follows '@' ?
+ {
+ if ((c = s->Getc(ev)) == '$') // '$' follows "@$" ?
+ {
+ if ((c = s->Getc(ev)) == '}') {
+ foundEnd = this->ReadEndGroupId(ev);
+ // this->EndSpanOnThisByte(ev, &mParser_GroupSpan);
+
+ } else
+ ev->NewError("expected '}' after @$$");
+ }
+ }
+ if (!foundEnd && c == '@') s->Ungetc(c);
+ }
+ }
+
+ return foundEnd && ev->Good();
+}
+
+void morkParser::ReadGroup(morkEnv* mev) {
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ int next = 0;
+ mParser_GroupId = this->ReadHex(mev, &next);
+ if (next == '{') {
+ morkStream* s = mParser_Stream;
+ int c;
+ if ((c = s->Getc(mev)) == '@') {
+ // we really need the following span inside morkBuilder::OnNewGroup():
+ this->StartSpanOnThisByte(mev, &mParser_GroupSpan);
+ mork_pos startPos = mParser_GroupSpan.mSpan_Start.mPlace_Pos;
+
+ // if ( !store->mStore_FirstCommitGroupPos )
+ // store->mStore_FirstCommitGroupPos = startPos;
+ // else if ( !store->mStore_SecondCommitGroupPos )
+ // store->mStore_SecondCommitGroupPos = startPos;
+
+ if (this->FindGroupEnd(mev)) {
+ mork_pos outPos;
+ s->Seek(ev, startPos, &outPos);
+ if (mev->Good()) {
+ this->OnNewGroup(mev, mParser_GroupSpan.mSpan_Start, mParser_GroupId);
+
+ this->ReadContent(mev, /*inInsideGroup*/ morkBool_kTrue);
+
+ this->OnGroupCommitEnd(mev, mParser_GroupSpan);
+ }
+ }
+ } else
+ mev->NewError("expected '@' after @$${id{");
+ } else
+ mev->NewError("expected '{' after @$$id");
+}
+
+mork_bool morkParser::ReadAt(morkEnv* ev, mork_bool inInsideGroup)
+/* groups must be ignored until properly terminated */
+// zm:Group ::= zm:GroupStart zm:Content zm:GroupEnd /* transaction */
+// zm:GroupStart ::= zm:S? '@$${' zm:Hex+ '{@' /* xaction id has own space */
+// zm:GroupEnd ::= zm:GroupCommit | zm:GroupAbort
+// zm:GroupCommit ::= zm:S? '@$$}' zm:Hex+ '}@' /* id matches start id */
+// zm:GroupAbort ::= zm:S? '@$$}~~}@' /* id matches start id */
+/* We must allow started transactions to be aborted in summary files. */
+/* Note '$$' will never occur unescaped in values we will see in Mork. */
+{
+ if (this->MatchPattern(ev, "$$")) {
+ morkStream* s = mParser_Stream;
+ int c;
+ if (((c = s->Getc(ev)) == '{' || c == '}') && ev->Good()) {
+ if (c == '{') // start of new group?
+ {
+ if (!inInsideGroup)
+ this->ReadGroup(ev);
+ else
+ ev->NewError("nested @$${ inside another group");
+ } else // c == '}' // end of old group?
+ {
+ if (inInsideGroup) {
+ this->ReadEndGroupId(ev);
+ mParser_GroupId = 0;
+ } else
+ ev->NewError("unmatched @$$} outside any group");
+ }
+ } else
+ ev->NewError("expected '{' or '}' after @$$");
+ }
+ return ev->Good();
+}
+
+mork_bool morkParser::ReadEndGroupId(morkEnv* ev) {
+ mork_bool outSawGroupId = morkBool_kFalse;
+ morkStream* s = mParser_Stream;
+ int c;
+ if ((c = s->Getc(ev)) != EOF && ev->Good()) {
+ if (c == '~') // transaction is aborted?
+ {
+ this->MatchPattern(ev, "~}@"); // finish rest of pattern
+ } else // push back byte and read expected trailing hex id
+ {
+ s->Ungetc(c);
+ int next = 0;
+ mork_gid endGroupId = this->ReadHex(ev, &next);
+ if (ev->Good()) {
+ if (endGroupId == mParser_GroupId) // matches start?
+ {
+ if (next == '}') // '}' after @$$}id ?
+ {
+ if ((c = s->Getc(ev)) == '@') // '@' after @$$}id} ?
+ {
+ // looks good, so return with no error
+ outSawGroupId = morkBool_kTrue;
+ mParser_InGroup = false;
+ } else
+ ev->NewError("expected '@' after @$$}id}");
+ } else
+ ev->NewError("expected '}' after @$$}id");
+ } else
+ ev->NewError("end group id mismatch");
+ }
+ }
+ }
+ return (outSawGroupId && ev->Good());
+}
+
+void morkParser::ReadDict(morkEnv* ev)
+// zm:Dict ::= zm:S? '<' zm:DictItem* zm:S? '>'
+// zm:DictItem ::= zm:MetaDict | zm:Alias
+// zm:MetaDict ::= zm:S? '<' zm:S? zm:Cell* zm:S? '>' /* meta attributes */
+// zm:Alias ::= zm:S? '(' ('#')? zm:Hex+ zm:S? zm:Value ')'
+{
+ mParser_Change = morkChange_kNil;
+ mParser_AtomChange = morkChange_kNil;
+
+ // this->StartSpanOnLastByte(ev, &mParser_DictSpan);
+ mParser_InDict = morkBool_kTrue;
+ this->OnNewDict(ev, *mParser_DictSpan.AsPlace());
+
+ int c;
+ while ((c = this->NextChar(ev)) != EOF && ev->Good() && c != '>') {
+ switch (c) {
+ case '(': // alias
+ this->ReadAlias(ev);
+ break;
+
+ case '<': // meta
+ this->ReadMeta(ev, '>');
+ break;
+
+ default:
+ ev->NewWarning("unexpected byte in dict");
+ break;
+ }
+ }
+
+ // this->EndSpanOnThisByte(ev, &mParser_DictSpan);
+ mParser_InDict = morkBool_kFalse;
+ this->OnDictEnd(ev, mParser_DictSpan);
+
+ if (ev->Bad())
+ mParser_State = morkParser_kBrokenState;
+ else if (c == EOF)
+ mParser_State = morkParser_kDoneState;
+}
+
+void morkParser::EndSpanOnThisByte(morkEnv* mev, morkSpan* ioSpan) {
+ mork_pos here;
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ nsresult rv = mParser_Stream->Tell(ev, &here);
+ if (NS_SUCCEEDED(rv) && mev->Good()) {
+ this->SetHerePos(here);
+ ioSpan->SetEndWithEnd(mParser_PortSpan);
+ }
+}
+
+void morkParser::EndSpanOnLastByte(morkEnv* mev, morkSpan* ioSpan) {
+ mork_pos here;
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ nsresult rv = mParser_Stream->Tell(ev, &here);
+ if (NS_SUCCEEDED(rv) && mev->Good()) {
+ if (here > 0)
+ --here;
+ else
+ here = 0;
+
+ this->SetHerePos(here);
+ ioSpan->SetEndWithEnd(mParser_PortSpan);
+ }
+}
+
+void morkParser::StartSpanOnLastByte(morkEnv* mev, morkSpan* ioSpan) {
+ mork_pos here;
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ nsresult rv = mParser_Stream->Tell(ev, &here);
+ if (NS_SUCCEEDED(rv) && mev->Good()) {
+ if (here > 0)
+ --here;
+ else
+ here = 0;
+
+ this->SetHerePos(here);
+ ioSpan->SetStartWithEnd(mParser_PortSpan);
+ ioSpan->SetEndWithEnd(mParser_PortSpan);
+ }
+}
+
+void morkParser::StartSpanOnThisByte(morkEnv* mev, morkSpan* ioSpan) {
+ mork_pos here;
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ nsresult rv = mParser_Stream->Tell(ev, &here);
+ if (NS_SUCCEEDED(rv) && mev->Good()) {
+ this->SetHerePos(here);
+ ioSpan->SetStartWithEnd(mParser_PortSpan);
+ ioSpan->SetEndWithEnd(mParser_PortSpan);
+ }
+}
+
+mork_bool morkParser::ReadContent(morkEnv* ev, mork_bool inInsideGroup) {
+ int c;
+ mork_bool keep_going = true;
+ while (keep_going && (c = this->NextChar(ev)) != EOF && ev->Good()) {
+ switch (c) {
+ case '[': // row
+ this->ReadRow(ev, '[');
+ keep_going = false;
+ break;
+
+ case '{': // table
+ this->ReadTable(ev);
+ keep_going = false;
+ break;
+
+ case '<': // dict
+ this->ReadDict(ev);
+ keep_going = false;
+ break;
+
+ case '@': // group
+ return this->ReadAt(ev, inInsideGroup);
+ // break;
+
+ // case '+': // plus
+ // mParser_Change = morkChange_kAdd;
+ // break;
+
+ // case '-': // minus
+ // mParser_Change = morkChange_kCut;
+ // break;
+
+ // case '!': // bang
+ // mParser_Change = morkChange_kSet;
+ // break;
+
+ default:
+ ev->NewWarning("unexpected byte in ReadContent()");
+ break;
+ }
+ }
+ if (ev->Bad())
+ mParser_State = morkParser_kBrokenState;
+ else if (c == EOF)
+ mParser_State = morkParser_kDoneState;
+
+ return (ev->Good() && c != EOF);
+}
+
+void morkParser::OnPortState(morkEnv* ev) {
+ mork_bool firstTime = !mParser_InPort;
+ mParser_InPort = morkBool_kTrue;
+ if (firstTime) this->OnNewPort(ev, *mParser_PortSpan.AsPlace());
+
+ mork_bool done = !this->ReadContent(ev, mParser_InGroup /*inInsideGroup*/);
+
+ if (done) {
+ mParser_InPort = morkBool_kFalse;
+ this->OnPortEnd(ev, mParser_PortSpan);
+ }
+
+ if (ev->Bad()) mParser_State = morkParser_kBrokenState;
+}
+
+void morkParser::OnStartState(morkEnv* mev) {
+ morkStream* s = mParser_Stream;
+ nsIMdbEnv* ev = mev->AsMdbEnv();
+ if (s && s->IsNode() && s->IsOpenNode()) {
+ mork_pos outPos;
+ nsresult rv = s->Seek(ev, 0, &outPos);
+ if (NS_SUCCEEDED(rv) && mev->Good()) {
+ this->StartParse(mev);
+ mParser_State = morkParser_kPortState;
+ }
+ } else
+ mev->NilPointerError();
+
+ if (mev->Bad()) mParser_State = morkParser_kBrokenState;
+}
+
+/*protected non-poly*/ void morkParser::ParseChunk(morkEnv* ev) {
+ mParser_Change = morkChange_kNil;
+ mParser_DoMore = morkBool_kTrue;
+
+ switch (mParser_State) {
+ case morkParser_kCellState: // 0
+ this->OnCellState(ev);
+ break;
+
+ case morkParser_kMetaState: // 1
+ this->OnMetaState(ev);
+ break;
+
+ case morkParser_kRowState: // 2
+ this->OnRowState(ev);
+ break;
+
+ case morkParser_kTableState: // 3
+ this->OnTableState(ev);
+ break;
+
+ case morkParser_kDictState: // 4
+ this->OnDictState(ev);
+ break;
+
+ case morkParser_kPortState: // 5
+ this->OnPortState(ev);
+ break;
+
+ case morkParser_kStartState: // 6
+ this->OnStartState(ev);
+ break;
+
+ case morkParser_kDoneState: // 7
+ mParser_DoMore = morkBool_kFalse;
+ mParser_IsDone = morkBool_kTrue;
+ this->StopParse(ev);
+ break;
+ case morkParser_kBrokenState: // 8
+ mParser_DoMore = morkBool_kFalse;
+ mParser_IsBroken = morkBool_kTrue;
+ this->StopParse(ev);
+ break;
+ default: // ?
+ MORK_ASSERT(morkBool_kFalse);
+ mParser_State = morkParser_kBrokenState;
+ break;
+ }
+}
+
+/*public non-poly*/ mdb_count
+morkParser::ParseMore( // return count of bytes consumed now
+ morkEnv* ev, // context
+ mork_pos* outPos, // current byte pos in the stream afterwards
+ mork_bool* outDone, // is parsing finished?
+ mork_bool* outBroken // is parsing irreparably dead and broken?
+) {
+ mdb_count outCount = 0;
+ if (this->IsNode() && this->GoodParserTag() && this->IsOpenNode()) {
+ mork_pos startPos = this->HerePos();
+
+ if (!mParser_IsDone && !mParser_IsBroken) this->ParseChunk(ev);
+
+ // HerePos is only updated for groups. I'd like it to be more accurate.
+
+ mork_pos here;
+ mParser_Stream->Tell(ev, &here);
+
+ if (outDone) *outDone = mParser_IsDone;
+ if (outBroken) *outBroken = mParser_IsBroken;
+ if (outPos) *outPos = here;
+
+ if (here > startPos) outCount = (mdb_count)(here - startPos);
+ } else {
+ this->NonUsableParserError(ev);
+ if (outDone) *outDone = morkBool_kTrue;
+ if (outBroken) *outBroken = morkBool_kTrue;
+ if (outPos) *outPos = 0;
+ }
+ return outCount;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkParser.h b/comm/mailnews/db/mork/morkParser.h
new file mode 100644
index 0000000000..61184ee995
--- /dev/null
+++ b/comm/mailnews/db/mork/morkParser.h
@@ -0,0 +1,547 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKPARSER_
+#define _MORKPARSER_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKSINK_
+# include "morkSink.h"
+#endif
+
+#ifndef _MORKYARN_
+# include "morkYarn.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*=============================================================================
+ * morkPlace: stream byte position and stream line count
+ */
+
+class morkPlace {
+ public:
+ mork_pos mPlace_Pos; // byte offset in an input stream
+ mork_line mPlace_Line; // line count in an input stream
+
+ void ClearPlace() {
+ mPlace_Pos = 0;
+ mPlace_Line = 0;
+ }
+
+ void SetPlace(mork_pos inPos, mork_line inLine) {
+ mPlace_Pos = inPos;
+ mPlace_Line = inLine;
+ }
+
+ morkPlace() {
+ mPlace_Pos = 0;
+ mPlace_Line = 0;
+ }
+
+ morkPlace(mork_pos inPos, mork_line inLine) {
+ mPlace_Pos = inPos;
+ mPlace_Line = inLine;
+ }
+
+ morkPlace(const morkPlace& inPlace)
+ : mPlace_Pos(inPlace.mPlace_Pos), mPlace_Line(inPlace.mPlace_Line) {}
+};
+
+/*=============================================================================
+ * morkGlitch: stream place and error comment describing a parsing error
+ */
+
+class morkGlitch {
+ public:
+ morkPlace mGlitch_Place; // place in stream where problem happened
+ const char* mGlitch_Comment; // null-terminated ASCII C string
+
+ morkGlitch() { mGlitch_Comment = 0; }
+
+ morkGlitch(const morkPlace& inPlace, const char* inComment)
+ : mGlitch_Place(inPlace), mGlitch_Comment(inComment) {}
+};
+
+/*=============================================================================
+ * morkMid: all possible ways needed to express an alias ID in Mork syntax
+ */
+
+/*| morkMid: an abstraction of all the variations we might need to support
+**| in order to present an ID through the parser interface most cheaply and
+**| with minimum transformation away from the original text format.
+**|
+**|| An ID can have one of four forms:
+**| 1) idHex (mMid_Oid.mOid_Id <- idHex)
+**| 2) idHex:^scopeHex (mMid_Oid.mOid_Id <- idHex, mOid_Scope <- scopeHex)
+**| 3) idHex:scopeName (mMid_Oid.mOid_Id <- idHex, mMid_Buf <- scopeName)
+**| 4) columnName (mMid_Buf <- columnName, for columns in cells only)
+**|
+**|| Typically, mMid_Oid.mOid_Id will hold a nonzero integer value for
+**| an ID, but we might have an optional scope specified by either an integer
+**| in hex format, or a string name. (Note that while the first ID can be
+**| scoped variably, any integer ID for a scope is assumed always located in
+**| the same scope, so the second ID need not be disambiguated.)
+**|
+**|| The only time mMid_Oid.mOid_Id is ever zero is when mMid_Buf alone
+**| is nonzero, to indicate an explicit string instead of an alias appeared.
+**| This case happens to make the representation of columns in cells somewhat
+**| easier to represent, since columns can just appear as a string name; and
+**| this unifies those interfaces with row and table APIs expecting IDs.
+**|
+**|| So when the parser passes an instance of morkMid to a subclass, the
+**| mMid_Oid.mOid_Id slot should usually be nonzero. And the other two
+**| slots, mMid_Oid.mOid_Scope and mMid_Buf, might both be zero, or at
+**| most one of them will be nonzero to indicate an explicit scope; the
+**| parser is responsible for ensuring at most one of these is nonzero.
+|*/
+class morkMid {
+ public:
+ mdbOid mMid_Oid; // mOid_Scope is zero when not specified
+ const morkBuf* mMid_Buf; // points to some specific buf subclass
+
+ morkMid() {
+ mMid_Oid.mOid_Scope = 0;
+ mMid_Oid.mOid_Id = morkId_kMinusOne;
+ mMid_Buf = 0;
+ }
+
+ void InitMidWithCoil(morkCoil* ioCoil) {
+ mMid_Oid.mOid_Scope = 0;
+ mMid_Oid.mOid_Id = morkId_kMinusOne;
+ mMid_Buf = ioCoil;
+ }
+
+ void ClearMid() {
+ mMid_Oid.mOid_Scope = 0;
+ mMid_Oid.mOid_Id = morkId_kMinusOne;
+ mMid_Buf = 0;
+ }
+
+ morkMid(const morkMid& other)
+ : mMid_Oid(other.mMid_Oid), mMid_Buf(other.mMid_Buf) {}
+
+ mork_bool HasNoId() const // ID is unspecified?
+ {
+ return (mMid_Oid.mOid_Id == morkId_kMinusOne);
+ }
+
+ mork_bool HasSomeId() const // ID is specified?
+ {
+ return (mMid_Oid.mOid_Id != morkId_kMinusOne);
+ }
+};
+
+/*=============================================================================
+ * morkSpan: start and end stream byte position and stream line count
+ */
+
+class morkSpan {
+ public:
+ morkPlace mSpan_Start;
+ morkPlace mSpan_End;
+
+ public: // methods
+ public: // inlines
+ morkSpan() {} // use inline empty constructor for each place
+
+ morkPlace* AsPlace() { return &mSpan_Start; }
+ const morkPlace* AsConstPlace() const { return &mSpan_Start; }
+
+ void SetSpan(mork_pos inFromPos, mork_line inFromLine, mork_pos inToPos,
+ mork_line inToLine) {
+ mSpan_Start.SetPlace(inFromPos, inFromLine);
+ mSpan_End.SetPlace(inToPos, inToLine);
+ }
+
+ // setting end, useful to terminate a span using current port span end:
+ void SetEndWithEnd(const morkSpan& inSpan) // end <- span.end
+ {
+ mSpan_End = inSpan.mSpan_End;
+ }
+
+ // setting start, useful to initiate a span using current port span end:
+ void SetStartWithEnd(const morkSpan& inSpan) // start <- span.end
+ {
+ mSpan_Start = inSpan.mSpan_End;
+ }
+
+ void ClearSpan() {
+ mSpan_Start.mPlace_Pos = 0;
+ mSpan_Start.mPlace_Line = 0;
+ mSpan_End.mPlace_Pos = 0;
+ mSpan_End.mPlace_Line = 0;
+ }
+
+ morkSpan(mork_pos inFromPos, mork_line inFromLine, mork_pos inToPos,
+ mork_line inToLine)
+ : mSpan_Start(inFromPos, inFromLine),
+ mSpan_End(inToPos, inToLine) { /* empty implementation */
+ }
+};
+
+/*=============================================================================
+ * morkParser: for parsing Mork text syntax
+ */
+
+/* parse at least half 0.5K at once */
+#define morkParser_kMinGranularity 512
+/* parse at most 64 K at once */
+#define morkParser_kMaxGranularity (64 * 1024)
+
+#define morkDerived_kParser /*i*/ 0x5073 /* ascii 'Ps' */
+#define morkParser_kTag /*i*/ 0x70417253 /* ascii 'pArS' */
+
+// These are states for the simple parsing virtual machine. Needless to say,
+// these must be distinct, and preferably in a contiguous integer range.
+// Don't change these constants without looking at switch statements in code.
+#define morkParser_kCellState 0 /* cell is tightest scope */
+#define morkParser_kMetaState 1 /* meta is tightest scope */
+#define morkParser_kRowState 2 /* row is tightest scope */
+#define morkParser_kTableState 3 /* table is tightest scope */
+#define morkParser_kDictState 4 /* dict is tightest scope */
+#define morkParser_kPortState 5 /* port is tightest scope */
+
+#define morkParser_kStartState 6 /* parsing has not yet begun */
+#define morkParser_kDoneState 7 /* parsing is complete */
+#define morkParser_kBrokenState 8 /* parsing is to broken to work */
+
+class morkParser /*d*/ : public morkNode {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected morkParser members
+ nsIMdbHeap* mParser_Heap; // refcounted heap used for allocation
+ morkStream* mParser_Stream; // refcounted input stream
+
+ mork_u4 mParser_Tag; // must equal morkParser_kTag
+ mork_count mParser_MoreGranularity; // constructor inBytesPerParseSegment
+
+ mork_u4 mParser_State; // state where parser should resume
+
+ // after finding ends of group transactions, we can re-seek the start:
+ mork_pos mParser_GroupContentStartPos; // start of this group
+
+ morkMid mParser_TableMid; // table mid if inside a table
+ morkMid mParser_RowMid; // row mid if inside a row
+ morkMid mParser_CellMid; // cell mid if inside a row
+ mork_gid mParser_GroupId; // group ID if inside a group
+
+ mork_bool mParser_InPort; // called OnNewPort but not OnPortEnd?
+ mork_bool mParser_InDict; // called OnNewDict but not OnDictEnd?
+ mork_bool mParser_InCell; // called OnNewCell but not OnCellEnd?
+ mork_bool mParser_InMeta; // called OnNewMeta but not OnMetaEnd?
+
+ mork_bool mParser_InPortRow; // called OnNewPortRow but not OnPortRowEnd?
+ mork_bool mParser_InRow; // called OnNewRow but not OnNewRowEnd?
+ mork_bool mParser_InTable; // called OnNewMeta but not OnMetaEnd?
+ mork_bool mParser_InGroup; // called OnNewGroup but not OnGroupEnd?
+
+ mork_change mParser_AtomChange; // driven by mParser_Change
+ mork_change mParser_CellChange; // driven by mParser_Change
+ mork_change mParser_RowChange; // driven by mParser_Change
+ mork_change mParser_TableChange; // driven by mParser_Change
+
+ mork_change mParser_Change; // driven by modifier in text
+ mork_bool mParser_IsBroken; // has the parse become broken?
+ mork_bool mParser_IsDone; // has the parse finished?
+ mork_bool mParser_DoMore; // mParser_MoreGranularity not exhausted?
+
+ morkMid mParser_Mid; // current alias being parsed
+ // note that mParser_Mid.mMid_Buf points at mParser_ScopeCoil below:
+
+ // blob coils allocated in mParser_Heap
+ morkCoil mParser_ScopeCoil; // place to accumulate ID scope blobs
+ morkCoil mParser_ValueCoil; // place to accumulate value blobs
+ morkCoil mParser_ColumnCoil; // place to accumulate column blobs
+ morkCoil mParser_StringCoil; // place to accumulate string blobs
+
+ morkSpool mParser_ScopeSpool; // writes to mParser_ScopeCoil
+ morkSpool mParser_ValueSpool; // writes to mParser_ValueCoil
+ morkSpool mParser_ColumnSpool; // writes to mParser_ColumnCoil
+ morkSpool mParser_StringSpool; // writes to mParser_StringCoil
+
+ // yarns allocated in mParser_Heap
+ morkYarn mParser_MidYarn; // place to receive from MidToYarn()
+
+ // span showing current ongoing file position status:
+ morkSpan mParser_PortSpan; // span of current db port file
+
+ // various spans denoting nested subspaces inside the file's port span:
+ morkSpan mParser_GroupSpan; // span of current transaction group
+ morkSpan mParser_DictSpan;
+ morkSpan mParser_AliasSpan;
+ morkSpan mParser_MetaSpan;
+ morkSpan mParser_TableSpan;
+ morkSpan mParser_RowSpan;
+ morkSpan mParser_CellSpan;
+ morkSpan mParser_ColumnSpan;
+ morkSpan mParser_SlotSpan;
+
+ private: // convenience inlines
+ mork_pos HerePos() const { return mParser_PortSpan.mSpan_End.mPlace_Pos; }
+
+ void SetHerePos(mork_pos inPos) {
+ mParser_PortSpan.mSpan_End.mPlace_Pos = inPos;
+ }
+
+ void CountLineBreak() { ++mParser_PortSpan.mSpan_End.mPlace_Line; }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseParser() only if open
+ virtual ~morkParser(); // assert that CloseParser() executed earlier
+
+ public: // morkYarn construction & destruction
+ morkParser(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkStream* ioStream, // the readonly stream for input bytes
+ mdb_count inBytesPerParseSegment, // target for ParseMore()
+ nsIMdbHeap* ioSlotHeap);
+
+ void CloseParser(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkParser(const morkParser& other);
+ morkParser& operator=(const morkParser& other);
+
+ public: // dynamic type identification
+ mork_bool IsParser() const {
+ return IsNode() && mNode_Derived == morkDerived_kParser;
+ }
+
+ // } ===== end morkNode methods =====
+
+ public: // errors and warnings
+ static void UnexpectedEofError(morkEnv* ev);
+ static void EofInsteadOfHexError(morkEnv* ev);
+ static void ExpectedEqualError(morkEnv* ev);
+ static void ExpectedHexDigitError(morkEnv* ev, int c);
+ static void NonParserTypeError(morkEnv* ev);
+ static void UnexpectedByteInMetaWarning(morkEnv* ev);
+
+ public: // other type methods
+ mork_bool GoodParserTag() const { return mParser_Tag == morkParser_kTag; }
+ void NonGoodParserError(morkEnv* ev);
+ void NonUsableParserError(morkEnv* ev);
+ // call when IsNode() or GoodParserTag() is false
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // in virtual morkParser methods, data flow subclass to parser
+ virtual void MidToYarn(
+ morkEnv* ev,
+ const morkMid& inMid, // typically an alias to concat with strings
+ mdbYarn* outYarn) = 0;
+ // The parser might ask that some aliases be turned into yarns, so they
+ // can be concatenated into longer blobs under some circumstances. This
+ // is an alternative to using a long and complex callback for many parts
+ // for a single cell value.
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // out virtual morkParser methods, data flow parser to subclass
+ // The virtual methods below will be called in a pattern corresponding
+ // to the following grammar isomorphic to the Mork grammar. There should
+ // be no exceptions, so subclasses can rely on seeing an appropriate "end"
+ // method whenever some "new" method has been seen earlier. In the event
+ // that some error occurs that causes content to be flushed, or sudden early
+ // termination of a larger containing entity, we will always call a more
+ // enclosed "end" method before we call an "end" method with greater scope.
+
+ // Note the "mp" prefix stands for "Mork Parser":
+
+ // mp:Start ::= OnNewPort mp:PortItem* OnPortEnd
+ // mp:PortItem ::= mp:Content | mp:Group | OnPortGlitch
+ // mp:Group ::= OnNewGroup mp:GroupItem* mp:GroupEnd
+ // mp:GroupItem ::= mp:Content | OnGroupGlitch
+ // mp:GroupEnd ::= OnGroupCommitEnd | OnGroupAbortEnd
+ // mp:Content ::= mp:PortRow | mp:Dict | mp:Table | mp:Row
+ // mp:PortRow ::= OnNewPortRow mp:RowItem* OnPortRowEnd
+ // mp:Dict ::= OnNewDict mp:DictItem* OnDictEnd
+ // mp:DictItem ::= OnAlias | OnAliasGlitch | mp:Meta | OnDictGlitch
+ // mp:Table ::= OnNewTable mp:TableItem* OnTableEnd
+ // mp:TableItem ::= mp:Row | mp:MetaTable | OnTableGlitch
+ // mp:MetaTable ::= OnNewMeta mp:MetaItem* mp:Row OnMetaEnd
+ // mp:Meta ::= OnNewMeta mp:MetaItem* OnMetaEnd
+ // mp:MetaItem ::= mp:Cell | OnMetaGlitch
+ // mp:Row ::= OnMinusRow? OnNewRow mp:RowItem* OnRowEnd
+ // mp:RowItem ::= mp:Cell | mp:Meta | OnRowGlitch
+ // mp:Cell ::= OnMinusCell? OnNewCell mp:CellItem? OnCellEnd
+ // mp:CellItem ::= mp:Slot | OnCellForm | OnCellGlitch
+ // mp:Slot ::= OnValue | OnValueMid | OnRowMid | OnTableMid
+
+ // Note that in interfaces below, mork_change parameters kAdd and kNil
+ // both mean about the same thing by default. Only kCut is interesting,
+ // because this usually means to remove members instead of adding them.
+
+ virtual void OnNewPort(morkEnv* ev, const morkPlace& inPlace) = 0;
+ virtual void OnPortGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnPortEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnNewGroup(morkEnv* ev, const morkPlace& inPlace,
+ mork_gid inGid) = 0;
+ virtual void OnGroupGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnGroupCommitEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+ virtual void OnGroupAbortEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnNewPortRow(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid, mork_change inChange) = 0;
+ virtual void OnPortRowGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnPortRowEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnNewTable(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid, mork_bool inCutAllRows) = 0;
+ virtual void OnTableGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnTableEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnNewMeta(morkEnv* ev, const morkPlace& inPlace) = 0;
+ virtual void OnMetaGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnMetaEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnMinusRow(morkEnv* ev) = 0;
+ virtual void OnNewRow(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid& inMid, mork_bool inCutAllCols) = 0;
+ virtual void OnRowPos(morkEnv* ev, mork_pos inRowPos) = 0;
+ virtual void OnRowGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnRowEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnNewDict(morkEnv* ev, const morkPlace& inPlace) = 0;
+ virtual void OnDictGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnDictEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnAlias(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) = 0;
+
+ virtual void OnAliasGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+
+ virtual void OnMinusCell(morkEnv* ev) = 0;
+ virtual void OnNewCell(morkEnv* ev, const morkPlace& inPlace,
+ const morkMid* inMid, const morkBuf* inBuf) = 0;
+ // Exactly one of inMid and inBuf is nil, and the other is non-nil.
+ // When hex ID syntax is used for a column, then inMid is not nil, and
+ // when a naked string names a column, then inBuf is not nil.
+
+ virtual void OnCellGlitch(morkEnv* ev, const morkGlitch& inGlitch) = 0;
+ virtual void OnCellForm(morkEnv* ev, mork_cscode inCharsetFormat) = 0;
+ virtual void OnCellEnd(morkEnv* ev, const morkSpan& inSpan) = 0;
+
+ virtual void OnValue(morkEnv* ev, const morkSpan& inSpan,
+ const morkBuf& inBuf) = 0;
+
+ virtual void OnValueMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) = 0;
+
+ virtual void OnRowMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) = 0;
+
+ virtual void OnTableMid(morkEnv* ev, const morkSpan& inSpan,
+ const morkMid& inMid) = 0;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected parser helper methods
+ void ParseChunk(morkEnv* ev); // find parse continuation and resume
+
+ void StartParse(morkEnv* ev); // prepare for parsing
+ void StopParse(morkEnv* ev); // terminate parsing & call needed methods
+
+ int NextChar(morkEnv* ev); // next non-white content
+
+ void OnCellState(morkEnv* ev);
+ void OnMetaState(morkEnv* ev);
+ void OnRowState(morkEnv* ev);
+ void OnTableState(morkEnv* ev);
+ void OnDictState(morkEnv* ev);
+ void OnPortState(morkEnv* ev);
+ void OnStartState(morkEnv* ev);
+
+ void ReadCell(morkEnv* ev);
+ void ReadRow(morkEnv* ev, int c);
+ void ReadRowPos(morkEnv* ev);
+ void ReadTable(morkEnv* ev);
+ void ReadTableMeta(morkEnv* ev);
+ void ReadDict(morkEnv* ev);
+ mork_bool ReadContent(morkEnv* ev, mork_bool inInsideGroup);
+ void ReadGroup(morkEnv* ev);
+ mork_bool ReadEndGroupId(morkEnv* ev);
+ mork_bool ReadAt(morkEnv* ev, mork_bool inInsideGroup);
+ mork_bool FindGroupEnd(morkEnv* ev);
+ void ReadMeta(morkEnv* ev, int inEndMeta);
+ void ReadAlias(morkEnv* ev);
+ mork_id ReadHex(morkEnv* ev, int* outNextChar);
+ morkBuf* ReadValue(morkEnv* ev);
+ morkBuf* ReadName(morkEnv* ev, int c);
+ mork_bool ReadMid(morkEnv* ev, morkMid* outMid);
+ void ReadDictForm(morkEnv* ev);
+ void ReadCellForm(morkEnv* ev, int c);
+
+ mork_bool MatchPattern(morkEnv* ev, const char* inPattern);
+
+ void EndSpanOnThisByte(morkEnv* ev, morkSpan* ioSpan);
+ void EndSpanOnLastByte(morkEnv* ev, morkSpan* ioSpan);
+ void StartSpanOnLastByte(morkEnv* ev, morkSpan* ioSpan);
+
+ void StartSpanOnThisByte(morkEnv* ev, morkSpan* ioSpan);
+
+ // void EndSpanOnThisByte(morkEnv* ev, morkSpan* ioSpan)
+ // { MORK_USED_2(ev,ioSpan); }
+
+ // void EndSpanOnLastByte(morkEnv* ev, morkSpan* ioSpan)
+ // { MORK_USED_2(ev,ioSpan); }
+
+ // void StartSpanOnLastByte(morkEnv* ev, morkSpan* ioSpan)
+ // { MORK_USED_2(ev,ioSpan); }
+
+ // void StartSpanOnThisByte(morkEnv* ev, morkSpan* ioSpan)
+ // { MORK_USED_2(ev,ioSpan); }
+
+ int eat_line_break(morkEnv* ev, int inLast);
+ int eat_line_continue(morkEnv* ev); // last char was '\\'
+ int eat_comment(morkEnv* ev); // last char was '/'
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkParser methods
+ mdb_count ParseMore( // return count of bytes consumed now
+ morkEnv* ev, // context
+ mork_pos* outPos, // current byte pos in the stream afterwards
+ mork_bool* outDone, // is parsing finished?
+ mork_bool* outBroken // is parsing irreparably dead and broken?
+ );
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakParser(morkParser* me, morkEnv* ev, morkParser** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongParser(morkParser* me, morkEnv* ev,
+ morkParser** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKPARSER_ */
diff --git a/comm/mailnews/db/mork/morkPool.cpp b/comm/mailnews/db/mork/morkPool.cpp
new file mode 100644
index 0000000000..eb7d543395
--- /dev/null
+++ b/comm/mailnews/db/mork/morkPool.cpp
@@ -0,0 +1,483 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKHANDLE_
+# include "morkHandle.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+#ifndef _MORKZONE_
+# include "morkZone.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkPool::CloseMorkNode(
+ morkEnv* ev) // ClosePool() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->ClosePool(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkPool::~morkPool() // assert ClosePool() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkPool::morkPool(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap)
+ : morkNode(inUsage, ioHeap),
+ mPool_Heap(ioSlotHeap),
+ mPool_UsedFramesCount(0),
+ mPool_FreeFramesCount(0) {
+ // mPool_Heap is NOT refcounted
+ MORK_ASSERT(ioSlotHeap);
+ if (ioSlotHeap) mNode_Derived = morkDerived_kPool;
+}
+
+/*public non-poly*/
+morkPool::morkPool(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap)
+ : morkNode(ev, inUsage, ioHeap),
+ mPool_Heap(ioSlotHeap),
+ mPool_UsedFramesCount(0),
+ mPool_FreeFramesCount(0) {
+ if (ioSlotHeap) {
+ // mPool_Heap is NOT refcounted:
+ // nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mPool_Heap);
+ if (ev->Good()) mNode_Derived = morkDerived_kPool;
+ } else
+ ev->NilPointerError();
+}
+
+/*public non-poly*/ void morkPool::ClosePool(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+#ifdef morkZone_CONFIG_ARENA
+#else /*morkZone_CONFIG_ARENA*/
+ // MORK_USED_1(ioZone);
+#endif /*morkZone_CONFIG_ARENA*/
+
+ nsIMdbHeap* heap = mPool_Heap;
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+ morkLink* aLink;
+ morkDeque* d = &mPool_FreeHandleFrames;
+ while ((aLink = d->RemoveFirst()) != 0) heap->Free(mev, aLink);
+
+ // if the pool's closed, get rid of the frames in use too.
+ d = &mPool_UsedHandleFrames;
+ while ((aLink = d->RemoveFirst()) != 0) heap->Free(mev, aLink);
+
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// alloc and free individual instances of handles (inside hand frames):
+morkHandleFace* morkPool::NewHandle(morkEnv* ev, mork_size inSize,
+ morkZone* ioZone) {
+ void* newBlock = 0;
+ if (inSize <= sizeof(morkHandleFrame)) {
+ morkLink* firstLink = mPool_FreeHandleFrames.RemoveFirst();
+ if (firstLink) {
+ newBlock = firstLink;
+ if (mPool_FreeFramesCount)
+ --mPool_FreeFramesCount;
+ else
+ ev->NewWarning("mPool_FreeFramesCount underflow");
+ } else
+ mPool_Heap->Alloc(ev->AsMdbEnv(), sizeof(morkHandleFrame),
+ (void**)&newBlock);
+ } else {
+ ev->NewWarning("inSize > sizeof(morkHandleFrame)");
+ mPool_Heap->Alloc(ev->AsMdbEnv(), inSize, (void**)&newBlock);
+ }
+#ifdef morkZone_CONFIG_ARENA
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+#endif /*morkZone_CONFIG_ARENA*/
+
+ return (morkHandleFace*)newBlock;
+}
+
+void morkPool::ZapHandle(morkEnv* ev, morkHandleFace* ioHandle) {
+ if (ioHandle) {
+ morkLink* handleLink = (morkLink*)ioHandle;
+ mPool_FreeHandleFrames.AddLast(handleLink);
+ ++mPool_FreeFramesCount;
+ // lets free all handles to track down leaks
+ // - uncomment out next 3 lines, comment out above 2
+ // nsIMdbHeap* heap = mPool_Heap;
+ // nsIMdbEnv* mev = ev->AsMdbEnv();
+ // heap->Free(mev, handleLink);
+ }
+}
+
+// alloc and free individual instances of rows:
+morkRow* morkPool::NewRow(morkEnv* ev,
+ morkZone* ioZone) // allocate a new row instance
+{
+ morkRow* newRow = 0;
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newRow = (morkRow*)ioZone->ZoneNewChip(ev, sizeof(morkRow));
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), sizeof(morkRow), (void**)&newRow);
+#endif /*morkZone_CONFIG_ARENA*/
+
+ if (newRow) MORK_MEMSET(newRow, 0, sizeof(morkRow));
+
+ return newRow;
+}
+
+void morkPool::ZapRow(morkEnv* ev, morkRow* ioRow,
+ morkZone* ioZone) // free old row instance
+{
+#ifdef morkZone_CONFIG_ARENA
+ if (!ioRow) ev->NilPointerWarning(); // a zone 'chip' cannot be freed
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ if (ioRow) mPool_Heap->Free(ev->AsMdbEnv(), ioRow);
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+// alloc and free entire vectors of cells (not just one cell at a time)
+morkCell* morkPool::NewCells(morkEnv* ev, mork_size inSize, morkZone* ioZone) {
+ morkCell* newCells = 0;
+
+ mork_size size = inSize * sizeof(morkCell);
+ if (size) {
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'run' knows its size, and can indeed be deallocated:
+ newCells = (morkCell*)ioZone->ZoneNewRun(ev, size);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), size, (void**)&newCells);
+#endif /*morkZone_CONFIG_ARENA*/
+ }
+
+ // note morkAtom depends on having nil stored in all new mCell_Atom slots:
+ if (newCells) MORK_MEMSET(newCells, 0, size);
+ return newCells;
+}
+
+void morkPool::ZapCells(morkEnv* ev, morkCell* ioVector, mork_size inSize,
+ morkZone* ioZone) {
+ MORK_USED_1(inSize);
+
+ if (ioVector) {
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'run' knows its size, and can indeed be deallocated:
+ ioZone->ZoneZapRun(ev, ioVector);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Free(ev->AsMdbEnv(), ioVector);
+#endif /*morkZone_CONFIG_ARENA*/
+ }
+}
+
+// resize (grow or trim) cell vectors inside a containing row instance
+mork_bool morkPool::AddRowCells(morkEnv* ev, morkRow* ioRow,
+ mork_size inNewSize, morkZone* ioZone) {
+ // note strong implementation similarity to morkArray::Grow()
+
+ MORK_USED_1(ioZone);
+#ifdef morkZone_CONFIG_ARENA
+#else /*morkZone_CONFIG_ARENA*/
+#endif /*morkZone_CONFIG_ARENA*/
+
+ mork_fill fill = ioRow->mRow_Length;
+ if (ev->Good() && fill < inNewSize) // need more cells?
+ {
+ morkCell* newCells = this->NewCells(ev, inNewSize, ioZone);
+ if (newCells) {
+ morkCell* c = newCells; // for iterating during copy
+ morkCell* oldCells = ioRow->mRow_Cells;
+ morkCell* end = oldCells + fill; // copy all the old cells
+ while (oldCells < end) {
+ *c++ = *oldCells++; // bitwise copy each old cell struct
+ }
+ oldCells = ioRow->mRow_Cells;
+ ioRow->mRow_Cells = newCells;
+ ioRow->mRow_Length = (mork_u2)inNewSize;
+ ++ioRow->mRow_Seed;
+
+ if (oldCells) this->ZapCells(ev, oldCells, fill, ioZone);
+ }
+ }
+ return (ev->Good() && ioRow->mRow_Length >= inNewSize);
+}
+
+mork_bool morkPool::CutRowCells(morkEnv* ev, morkRow* ioRow,
+ mork_size inNewSize, morkZone* ioZone) {
+ MORK_USED_1(ioZone);
+#ifdef morkZone_CONFIG_ARENA
+#else /*morkZone_CONFIG_ARENA*/
+#endif /*morkZone_CONFIG_ARENA*/
+
+ mork_fill fill = ioRow->mRow_Length;
+ if (ev->Good() && fill > inNewSize) // need fewer cells?
+ {
+ if (inNewSize) // want any row cells at all?
+ {
+ morkCell* newCells = this->NewCells(ev, inNewSize, ioZone);
+ if (newCells) {
+ morkCell* saveNewCells = newCells; // Keep newcell pos
+ morkCell* oldCells = ioRow->mRow_Cells;
+ morkCell* oldEnd = oldCells + fill; // one past all old cells
+ morkCell* newEnd = oldCells + inNewSize; // copy only kept old cells
+ while (oldCells < newEnd) {
+ *newCells++ = *oldCells++; // bitwise copy each old cell struct
+ }
+ while (oldCells < oldEnd) {
+ if (oldCells->mCell_Atom) // need to unref old cell atom?
+ oldCells->SetAtom(ev, (morkAtom*)0, this); // unref cell atom
+ ++oldCells;
+ }
+ oldCells = ioRow->mRow_Cells;
+ ioRow->mRow_Cells = saveNewCells;
+ ioRow->mRow_Length = (mork_u2)inNewSize;
+ ++ioRow->mRow_Seed;
+
+ if (oldCells) this->ZapCells(ev, oldCells, fill, ioZone);
+ }
+ } else // get rid of all row cells
+ {
+ morkCell* oldCells = ioRow->mRow_Cells;
+ ioRow->mRow_Cells = 0;
+ ioRow->mRow_Length = 0;
+ ++ioRow->mRow_Seed;
+
+ if (oldCells) this->ZapCells(ev, oldCells, fill, ioZone);
+ }
+ }
+ return (ev->Good() && ioRow->mRow_Length <= inNewSize);
+}
+
+// alloc & free individual instances of atoms (lots of atom subclasses):
+void morkPool::ZapAtom(morkEnv* ev, morkAtom* ioAtom,
+ morkZone* ioZone) // any subclass (by kind)
+{
+#ifdef morkZone_CONFIG_ARENA
+ if (!ioAtom) ev->NilPointerWarning(); // a zone 'chip' cannot be freed
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ if (ioAtom) mPool_Heap->Free(ev->AsMdbEnv(), ioAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+morkOidAtom* morkPool::NewRowOidAtom(morkEnv* ev, const mdbOid& inOid,
+ morkZone* ioZone) {
+ morkOidAtom* newAtom = 0;
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkOidAtom*)ioZone->ZoneNewChip(ev, sizeof(morkOidAtom));
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), sizeof(morkOidAtom), (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+
+ if (newAtom) newAtom->InitRowOidAtom(ev, inOid);
+ return newAtom;
+}
+
+morkOidAtom* morkPool::NewTableOidAtom(morkEnv* ev, const mdbOid& inOid,
+ morkZone* ioZone) {
+ morkOidAtom* newAtom = 0;
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkOidAtom*)ioZone->ZoneNewChip(ev, sizeof(morkOidAtom));
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), sizeof(morkOidAtom), (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+ if (newAtom) newAtom->InitTableOidAtom(ev, inOid);
+ return newAtom;
+}
+
+morkAtom* morkPool::NewAnonAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm, morkZone* ioZone)
+// if inForm is zero, and inBuf.mBuf_Fill is less than 256, then a 'wee'
+// anon atom will be created, and otherwise a 'big' anon atom.
+{
+ morkAtom* newAtom = 0;
+
+ mork_bool needBig = (inForm || inBuf.mBuf_Fill > 255);
+ mork_size size = (needBig) ? morkBigAnonAtom::SizeForFill(inBuf.mBuf_Fill)
+ : morkWeeAnonAtom::SizeForFill(inBuf.mBuf_Fill);
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkAtom*)ioZone->ZoneNewChip(ev, size);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), size, (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+ if (newAtom) {
+ if (needBig)
+ ((morkBigAnonAtom*)newAtom)->InitBigAnonAtom(ev, inBuf, inForm);
+ else
+ ((morkWeeAnonAtom*)newAtom)->InitWeeAnonAtom(ev, inBuf);
+ }
+ return newAtom;
+}
+
+morkBookAtom* morkPool::NewBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm, morkAtomSpace* ioSpace,
+ mork_aid inAid, morkZone* ioZone)
+// if inForm is zero, and inBuf.mBuf_Fill is less than 256, then a 'wee'
+// book atom will be created, and otherwise a 'big' book atom.
+{
+ morkBookAtom* newAtom = 0;
+
+ mork_bool needBig = (inForm || inBuf.mBuf_Fill > 255);
+ mork_size size = (needBig) ? morkBigBookAtom::SizeForFill(inBuf.mBuf_Fill)
+ : morkWeeBookAtom::SizeForFill(inBuf.mBuf_Fill);
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkBookAtom*)ioZone->ZoneNewChip(ev, size);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), size, (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+ if (newAtom) {
+ if (needBig)
+ ((morkBigBookAtom*)newAtom)
+ ->InitBigBookAtom(ev, inBuf, inForm, ioSpace, inAid);
+ else
+ ((morkWeeBookAtom*)newAtom)->InitWeeBookAtom(ev, inBuf, ioSpace, inAid);
+ }
+ return newAtom;
+}
+
+morkBookAtom* morkPool::NewBookAtomCopy(morkEnv* ev,
+ const morkBigBookAtom& inAtom,
+ morkZone* ioZone)
+// make the smallest kind of book atom that can hold content in inAtom.
+// The inAtom parameter is often expected to be a staged book atom in
+// the store, which was used to search an atom space for existing atoms.
+{
+ morkBookAtom* newAtom = 0;
+
+ mork_cscode form = inAtom.mBigBookAtom_Form;
+ mork_fill fill = inAtom.mBigBookAtom_Size;
+ mork_bool needBig = (form || fill > 255);
+ mork_size size = (needBig) ? morkBigBookAtom::SizeForFill(fill)
+ : morkWeeBookAtom::SizeForFill(fill);
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkBookAtom*)ioZone->ZoneNewChip(ev, size);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), size, (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+ if (newAtom) {
+ morkBuf buf(inAtom.mBigBookAtom_Body, fill);
+ if (needBig)
+ ((morkBigBookAtom*)newAtom)
+ ->InitBigBookAtom(ev, buf, form, inAtom.mBookAtom_Space,
+ inAtom.mBookAtom_Id);
+ else
+ ((morkWeeBookAtom*)newAtom)
+ ->InitWeeBookAtom(ev, buf, inAtom.mBookAtom_Space,
+ inAtom.mBookAtom_Id);
+ }
+ return newAtom;
+}
+
+morkBookAtom* morkPool::NewFarBookAtomCopy(morkEnv* ev,
+ const morkFarBookAtom& inAtom,
+ morkZone* ioZone)
+// make the smallest kind of book atom that can hold content in inAtom.
+// The inAtom parameter is often expected to be a staged book atom in
+// the store, which was used to search an atom space for existing atoms.
+{
+ morkBookAtom* newAtom = 0;
+
+ mork_cscode form = inAtom.mFarBookAtom_Form;
+ mork_fill fill = inAtom.mFarBookAtom_Size;
+ mork_bool needBig = (form || fill > 255);
+ mork_size size = (needBig) ? morkBigBookAtom::SizeForFill(fill)
+ : morkWeeBookAtom::SizeForFill(fill);
+
+#ifdef morkZone_CONFIG_ARENA
+ // a zone 'chip' remembers no size, and so cannot be deallocated:
+ newAtom = (morkBookAtom*)ioZone->ZoneNewChip(ev, size);
+#else /*morkZone_CONFIG_ARENA*/
+ MORK_USED_1(ioZone);
+ mPool_Heap->Alloc(ev->AsMdbEnv(), size, (void**)&newAtom);
+#endif /*morkZone_CONFIG_ARENA*/
+ if (newAtom) {
+ morkBuf buf(inAtom.mFarBookAtom_Body, fill);
+ if (needBig)
+ ((morkBigBookAtom*)newAtom)
+ ->InitBigBookAtom(ev, buf, form, inAtom.mBookAtom_Space,
+ inAtom.mBookAtom_Id);
+ else
+ ((morkWeeBookAtom*)newAtom)
+ ->InitWeeBookAtom(ev, buf, inAtom.mBookAtom_Space,
+ inAtom.mBookAtom_Id);
+ }
+ return newAtom;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkPool.h b/comm/mailnews/db/mork/morkPool.h
new file mode 100644
index 0000000000..dbad8593b7
--- /dev/null
+++ b/comm/mailnews/db/mork/morkPool.h
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKPOOL_
+#define _MORKPOOL_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class morkHandle;
+class morkHandleFrame;
+class morkHandleFace; // just an opaque cookie type
+class morkBigBookAtom;
+class morkFarBookAtom;
+
+#define morkDerived_kPool /*i*/ 0x706C /* ascii 'pl' */
+
+/*| morkPool: a place to manage pools of non-node objects that are memory
+**| managed out of large chunks of space, so that per-object management
+**| space overhead has no significant cost.
+|*/
+class morkPool : public morkNode {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ nsIMdbHeap* mPool_Heap; // NON-refcounted heap instance
+
+ morkDeque mPool_Blocks; // linked list of large blocks from heap
+
+ // These two lists contain instances of morkHandleFrame:
+ morkDeque mPool_UsedHandleFrames; // handle frames currently being used
+ morkDeque mPool_FreeHandleFrames; // handle frames currently in free list
+
+ mork_count mPool_UsedFramesCount; // length of mPool_UsedHandleFrames
+ mork_count mPool_FreeFramesCount; // length of mPool_UsedHandleFrames
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev); // ClosePool() only if open
+ virtual ~morkPool(); // assert that ClosePool() executed earlier
+
+ public: // morkPool construction & destruction
+ morkPool(const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ morkPool(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void ClosePool(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkPool(const morkPool& other);
+ morkPool& operator=(const morkPool& other);
+
+ public: // dynamic type identification
+ mork_bool IsPool() const {
+ return IsNode() && mNode_Derived == morkDerived_kPool;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ void NonPoolTypeError(morkEnv* ev);
+
+ public: // morkNode memory management operators
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkNode::MakeNew(inSize, ioHeap, ev);
+ }
+
+ void* operator new(size_t inSize) noexcept(true) {
+ return ::operator new(inSize);
+ }
+
+ public: // other pool methods
+ // alloc and free individual instances of handles (inside hand frames):
+ morkHandleFace* NewHandle(morkEnv* ev, mork_size inSize, morkZone* ioZone);
+ void ZapHandle(morkEnv* ev, morkHandleFace* ioHandle);
+
+ // alloc and free individual instances of rows:
+ morkRow* NewRow(morkEnv* ev, morkZone* ioZone); // alloc new row instance
+ void ZapRow(morkEnv* ev, morkRow* ioRow,
+ morkZone* ioZone); // free old row instance
+
+ // alloc and free entire vectors of cells (not just one cell at a time)
+ morkCell* NewCells(morkEnv* ev, mork_size inSize, morkZone* ioZone);
+ void ZapCells(morkEnv* ev, morkCell* ioVector, mork_size inSize,
+ morkZone* ioZone);
+
+ // resize (grow or trim) cell vectors inside a containing row instance
+ mork_bool AddRowCells(morkEnv* ev, morkRow* ioRow, mork_size inNewSize,
+ morkZone* ioZone);
+ mork_bool CutRowCells(morkEnv* ev, morkRow* ioRow, mork_size inNewSize,
+ morkZone* ioZone);
+
+ // alloc & free individual instances of atoms (lots of atom subclasses):
+ void ZapAtom(morkEnv* ev, morkAtom* ioAtom,
+ morkZone* ioZone); // any subclass (by kind)
+
+ morkOidAtom* NewRowOidAtom(morkEnv* ev, const mdbOid& inOid,
+ morkZone* ioZone);
+ morkOidAtom* NewTableOidAtom(morkEnv* ev, const mdbOid& inOid,
+ morkZone* ioZone);
+
+ morkAtom* NewAnonAtom(morkEnv* ev, const morkBuf& inBuf, mork_cscode inForm,
+ morkZone* ioZone);
+ // if inForm is zero, and inBuf.mBuf_Fill is less than 256, then a 'wee'
+ // anon atom will be created, and otherwise a 'big' anon atom.
+
+ morkBookAtom* NewBookAtom(morkEnv* ev, const morkBuf& inBuf,
+ mork_cscode inForm, morkAtomSpace* ioSpace,
+ mork_aid inAid, morkZone* ioZone);
+ // if inForm is zero, and inBuf.mBuf_Fill is less than 256, then a 'wee'
+ // book atom will be created, and otherwise a 'big' book atom.
+
+ morkBookAtom* NewBookAtomCopy(morkEnv* ev, const morkBigBookAtom& inAtom,
+ morkZone* ioZone);
+ // make the smallest kind of book atom that can hold content in inAtom.
+ // The inAtom parameter is often expected to be a staged book atom in
+ // the store, which was used to search an atom space for existing atoms.
+
+ morkBookAtom* NewFarBookAtomCopy(morkEnv* ev, const morkFarBookAtom& inAtom,
+ morkZone* ioZone);
+ // make the smallest kind of book atom that can hold content in inAtom.
+ // The inAtom parameter is often expected to be a staged book atom in
+ // the store, which was used to search an atom space for existing atoms.
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakPool(morkPool* me, morkEnv* ev, morkPool** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongPool(morkPool* me, morkEnv* ev, morkPool** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKPOOL_ */
diff --git a/comm/mailnews/db/mork/morkPortTableCursor.cpp b/comm/mailnews/db/mork/morkPortTableCursor.cpp
new file mode 100644
index 0000000000..8f4f62411d
--- /dev/null
+++ b/comm/mailnews/db/mork/morkPortTableCursor.cpp
@@ -0,0 +1,381 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKPORTTABLECURSOR_
+# include "morkPortTableCursor.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkPortTableCursor::CloseMorkNode(
+ morkEnv* ev) // ClosePortTableCursor() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->ClosePortTableCursor(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkPortTableCursor::~morkPortTableCursor() // ClosePortTableCursor() executed
+ // earlier
+{
+ CloseMorkNode(mMorkEnv);
+}
+
+/*public non-poly*/
+morkPortTableCursor::morkPortTableCursor(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkStore* ioStore,
+ mdb_scope inRowScope,
+ mdb_kind inTableKind,
+ nsIMdbHeap* ioSlotHeap)
+ : morkCursor(ev, inUsage, ioHeap),
+ mPortTableCursor_Store(0),
+ mPortTableCursor_RowScope((mdb_scope)-1) // we want != inRowScope
+ ,
+ mPortTableCursor_TableKind((mdb_kind)-1) // we want != inTableKind
+ ,
+ mPortTableCursor_LastTable(0) // not refcounted
+ ,
+ mPortTableCursor_RowSpace(0) // strong ref to row space
+ ,
+ mPortTableCursor_TablesDidEnd(morkBool_kFalse),
+ mPortTableCursor_SpacesDidEnd(morkBool_kFalse) {
+ if (ev->Good()) {
+ if (ioStore && ioSlotHeap) {
+ mCursor_Pos = -1;
+ mCursor_Seed = 0; // let the iterator do its own seed handling
+ morkStore::SlotWeakStore(ioStore, ev, &mPortTableCursor_Store);
+
+ if (this->SetRowScope(ev, inRowScope))
+ this->SetTableKind(ev, inTableKind);
+
+ if (ev->Good()) mNode_Derived = morkDerived_kPortTableCursor;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkPortTableCursor, morkCursor,
+ nsIMdbPortTableCursor)
+
+morkEnv* morkPortTableCursor::CanUsePortTableCursor(nsIMdbEnv* mev,
+ mork_bool inMutable,
+ nsresult* outErr) const {
+ morkEnv* outEnv = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (IsPortTableCursor())
+ outEnv = ev;
+ else
+ NonPortTableCursorTypeError(ev);
+ *outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outEnv);
+ return outEnv;
+}
+
+/*public non-poly*/ void morkPortTableCursor::ClosePortTableCursor(
+ morkEnv* ev) {
+ if (this->IsNode()) {
+ mCursor_Pos = -1;
+ mCursor_Seed = 0;
+ mPortTableCursor_LastTable = 0;
+ morkStore::SlotWeakStore((morkStore*)0, ev, &mPortTableCursor_Store);
+ morkRowSpace::SlotStrongRowSpace((morkRowSpace*)0, ev,
+ &mPortTableCursor_RowSpace);
+ this->CloseCursor(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkPortTableCursor::NilCursorStoreError(morkEnv* ev) {
+ ev->NewError("nil mPortTableCursor_Store");
+}
+
+/*static*/ void morkPortTableCursor::NonPortTableCursorTypeError(morkEnv* ev) {
+ ev->NewError("non morkPortTableCursor");
+}
+
+mork_bool morkPortTableCursor::SetRowScope(morkEnv* ev, mork_scope inRowScope) {
+ mPortTableCursor_RowScope = inRowScope;
+ mPortTableCursor_LastTable = 0; // restart iteration of space
+
+ mPortTableCursor_TableIter.CloseMapIter(ev);
+ mPortTableCursor_TablesDidEnd = morkBool_kTrue;
+ mPortTableCursor_SpacesDidEnd = morkBool_kTrue;
+
+ morkStore* store = mPortTableCursor_Store;
+ if (store) {
+ morkRowSpace* space = mPortTableCursor_RowSpace;
+
+ if (inRowScope) // intend to cover a specific scope only?
+ {
+ space = store->LazyGetRowSpace(ev, inRowScope);
+ morkRowSpace::SlotStrongRowSpace(space, ev, &mPortTableCursor_RowSpace);
+
+ // We want mPortTableCursor_SpacesDidEnd == morkBool_kTrue
+ // to show this is the only space to be covered.
+ } else // prepare space map iter to cover all space scopes
+ {
+ morkRowSpaceMapIter* rsi = &mPortTableCursor_SpaceIter;
+ rsi->InitRowSpaceMapIter(ev, &store->mStore_RowSpaces);
+
+ space = 0;
+ (void)rsi->FirstRowSpace(ev, (mork_scope*)0, &space);
+ morkRowSpace::SlotStrongRowSpace(space, ev, &mPortTableCursor_RowSpace);
+
+ if (space) // found first space in store
+ mPortTableCursor_SpacesDidEnd = morkBool_kFalse;
+ }
+
+ this->init_space_tables_map(ev);
+ } else
+ this->NilCursorStoreError(ev);
+
+ return ev->Good();
+}
+
+void morkPortTableCursor::init_space_tables_map(morkEnv* ev) {
+ morkRowSpace* space = mPortTableCursor_RowSpace;
+ if (space && ev->Good()) {
+ morkTableMapIter* ti = &mPortTableCursor_TableIter;
+ ti->InitTableMapIter(ev, &space->mRowSpace_Tables);
+ if (ev->Good()) mPortTableCursor_TablesDidEnd = morkBool_kFalse;
+ }
+}
+
+mork_bool morkPortTableCursor::SetTableKind(morkEnv* ev,
+ mork_kind inTableKind) {
+ mPortTableCursor_TableKind = inTableKind;
+ mPortTableCursor_LastTable = 0; // restart iteration of space
+
+ mPortTableCursor_TablesDidEnd = morkBool_kTrue;
+
+ morkRowSpace* space = mPortTableCursor_RowSpace;
+ if (!space && mPortTableCursor_RowScope == 0) {
+ this->SetRowScope(ev, 0);
+ }
+ this->init_space_tables_map(ev);
+
+ return ev->Good();
+}
+
+morkRowSpace* morkPortTableCursor::NextSpace(morkEnv* ev) {
+ morkRowSpace* outSpace = 0;
+ mPortTableCursor_LastTable = 0;
+ mPortTableCursor_SpacesDidEnd = morkBool_kTrue;
+ mPortTableCursor_TablesDidEnd = morkBool_kTrue;
+
+ if (!mPortTableCursor_RowScope) // not just one scope?
+ {
+ morkStore* store = mPortTableCursor_Store;
+ if (store) {
+ morkRowSpaceMapIter* rsi = &mPortTableCursor_SpaceIter;
+
+ (void)rsi->NextRowSpace(ev, (mork_scope*)0, &outSpace);
+ morkRowSpace::SlotStrongRowSpace(outSpace, ev,
+ &mPortTableCursor_RowSpace);
+
+ if (outSpace) // found next space in store
+ {
+ mPortTableCursor_SpacesDidEnd = morkBool_kFalse;
+
+ this->init_space_tables_map(ev);
+
+ if (ev->Bad()) outSpace = 0;
+ }
+ } else
+ this->NilCursorStoreError(ev);
+ }
+
+ return outSpace;
+}
+
+morkTable* morkPortTableCursor::NextTable(morkEnv* ev) {
+ mork_kind kind = mPortTableCursor_TableKind;
+
+ do // until spaces end, or until we find a table in a space
+ {
+ morkRowSpace* space = mPortTableCursor_RowSpace;
+ if (mPortTableCursor_TablesDidEnd) // current space exhausted?
+ space = this->NextSpace(ev); // go on to the next space
+
+ if (space) // have a space remaining that might hold tables?
+ {
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ morkTableMapIter* ti = &mPortTableCursor_TableIter;
+ morkTable* table =
+ (mPortTableCursor_LastTable) ? ti->NextTable(ev) : ti->FirstTable(ev);
+
+ for (; table && ev->Good(); table = ti->NextTable(ev))
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ mork_tid* key = 0; // ignore keys in table map
+ morkTable* table = 0; // old value table in the map
+ morkTableMapIter* ti = &mPortTableCursor_TableIter;
+ mork_change* c = (mPortTableCursor_LastTable)
+ ? ti->NextTable(ev, key, &table)
+ : ti->FirstTable(ev, key, &table);
+
+ for (; c && ev->Good(); c = ti->NextTable(ev, key, &table))
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+ {
+ if (table && table->IsTable()) {
+ if (!kind || kind == table->mTable_Kind) {
+ mPortTableCursor_LastTable = table; // ti->NextTable() hence
+ return table;
+ }
+ } else
+ table->NonTableTypeWarning(ev);
+ }
+ mPortTableCursor_TablesDidEnd = morkBool_kTrue; // space is done
+ mPortTableCursor_LastTable = 0; // make sure next space starts fresh
+ }
+
+ } while (ev->Good() && !mPortTableCursor_SpacesDidEnd);
+
+ return (morkTable*)0;
+}
+
+// { ----- begin table iteration methods -----
+
+// { ===== begin nsIMdbPortTableCursor methods =====
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP
+morkPortTableCursor::SetPort(nsIMdbEnv* mev, nsIMdbPort* ioPort) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkPortTableCursor::GetPort(nsIMdbEnv* mev, nsIMdbPort** acqPort) {
+ nsresult outErr = NS_OK;
+ nsIMdbPort* outPort = 0;
+ morkEnv* ev =
+ this->CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ if (mPortTableCursor_Store)
+ outPort = mPortTableCursor_Store->AcquireStoreHandle(ev);
+ outErr = ev->AsErr();
+ }
+ if (acqPort) *acqPort = outPort;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkPortTableCursor::SetRowScope(nsIMdbEnv* mev, // sets pos to -1
+ mdb_scope inRowScope) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev =
+ this->CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ mCursor_Pos = -1;
+
+ SetRowScope(ev, inRowScope);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkPortTableCursor::GetRowScope(nsIMdbEnv* mev, mdb_scope* outRowScope) {
+ nsresult outErr = NS_OK;
+ mdb_scope rowScope = 0;
+ morkEnv* ev =
+ this->CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ rowScope = mPortTableCursor_RowScope;
+ outErr = ev->AsErr();
+ }
+ *outRowScope = rowScope;
+ return outErr;
+}
+// setting row scope to zero iterates over all row scopes in port
+
+NS_IMETHODIMP
+morkPortTableCursor::SetTableKind(nsIMdbEnv* mev, // sets pos to -1
+ mdb_kind inTableKind) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev =
+ this->CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ mCursor_Pos = -1;
+
+ SetTableKind(ev, inTableKind);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkPortTableCursor::GetTableKind(nsIMdbEnv* mev, mdb_kind* outTableKind)
+// setting table kind to zero iterates over all table kinds in row scope
+{
+ nsresult outErr = NS_OK;
+ mdb_kind tableKind = 0;
+ morkEnv* ev =
+ this->CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ tableKind = mPortTableCursor_TableKind;
+ outErr = ev->AsErr();
+ }
+ *outTableKind = tableKind;
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin table iteration methods -----
+NS_IMETHODIMP
+morkPortTableCursor::NextTable( // get table at next position in the db
+ nsIMdbEnv* mev, // context
+ nsIMdbTable** acqTable) {
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev =
+ CanUsePortTableCursor(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = NextTable(ev);
+ if (table && ev->Good()) outTable = table->AcquireTableHandle(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkPortTableCursor.h b/comm/mailnews/db/mork/morkPortTableCursor.h
new file mode 100644
index 0000000000..2a7bf3d0e9
--- /dev/null
+++ b/comm/mailnews/db/mork/morkPortTableCursor.h
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKPORTTABLECURSOR_
+#define _MORKPORTTABLECURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class orkinPortTableCursor;
+#define morkDerived_kPortTableCursor /*i*/ 0x7443 /* ascii 'tC' */
+
+class morkPortTableCursor : public morkCursor,
+ public nsIMdbPortTableCursor { // row iterator
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkFactory* mObject_Factory; // weak ref to suite factory
+
+ // mork_seed mCursor_Seed;
+ // mork_pos mCursor_Pos;
+ // mork_bool mCursor_DoFailOnSeedOutOfSync;
+ // mork_u1 mCursor_Pad[ 3 ]; // explicitly pad to u4 alignment
+
+ public: // state is public because the entire Mork system is private
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetPort(nsIMdbEnv* ev,
+ nsIMdbPort* ioPort) override; // sets pos to -1
+ NS_IMETHOD GetPort(nsIMdbEnv* ev, nsIMdbPort** acqPort) override;
+
+ NS_IMETHOD SetRowScope(nsIMdbEnv* ev, // sets pos to -1
+ mdb_scope inRowScope) override;
+ NS_IMETHOD GetRowScope(nsIMdbEnv* ev, mdb_scope* outRowScope) override;
+ // setting row scope to zero iterates over all row scopes in port
+
+ NS_IMETHOD SetTableKind(nsIMdbEnv* ev, // sets pos to -1
+ mdb_kind inTableKind) override;
+ NS_IMETHOD GetTableKind(nsIMdbEnv* ev, mdb_kind* outTableKind) override;
+ // setting table kind to zero iterates over all table kinds in row scope
+ // } ----- end attribute methods -----
+
+ // { ----- begin table iteration methods -----
+ NS_IMETHOD NextTable( // get table at next position in the db
+ nsIMdbEnv* ev, // context
+ nsIMdbTable** acqTable) override; // the next table in the iteration
+ // } ----- end table iteration methods -----
+ morkStore* mPortTableCursor_Store; // weak ref to store
+
+ mdb_scope mPortTableCursor_RowScope;
+ mdb_kind mPortTableCursor_TableKind;
+
+ // We only care if LastTable is non-nil, so it is not refcounted;
+ // so you must never access table state or methods using LastTable:
+
+ morkTable* mPortTableCursor_LastTable; // nil or last table (no refcount)
+ morkRowSpace* mPortTableCursor_RowSpace; // current space (strong ref)
+
+ morkRowSpaceMapIter mPortTableCursor_SpaceIter; // iter over spaces
+ morkTableMapIter mPortTableCursor_TableIter; // iter over tables
+
+ // these booleans indicate when the table or space iterator is exhausted:
+
+ mork_bool mPortTableCursor_TablesDidEnd; // no more tables?
+ mork_bool mPortTableCursor_SpacesDidEnd; // no more spaces?
+ mork_u1 mPortTableCursor_Pad[2]; // for u4 alignment
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // ClosePortTableCursor()
+
+ public: // morkPortTableCursor construction & destruction
+ morkPortTableCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkStore* ioStore, mdb_scope inRowScope,
+ mdb_kind inTableKind, nsIMdbHeap* ioSlotHeap);
+ void ClosePortTableCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkPortTableCursor(const morkPortTableCursor& other);
+ morkPortTableCursor& operator=(const morkPortTableCursor& other);
+
+ public: // dynamic type identification
+ mork_bool IsPortTableCursor() const {
+ return IsNode() && mNode_Derived == morkDerived_kPortTableCursor;
+ }
+ // } ===== end morkNode methods =====
+
+ protected: // utilities
+ virtual ~morkPortTableCursor(); // assert that close executed earlier
+
+ void init_space_tables_map(morkEnv* ev);
+
+ public: // other cursor methods
+ static void NilCursorStoreError(morkEnv* ev);
+ static void NonPortTableCursorTypeError(morkEnv* ev);
+
+ morkEnv* CanUsePortTableCursor(nsIMdbEnv* mev, mork_bool inMutable,
+ nsresult* outErr) const;
+
+ morkRowSpace* NextSpace(morkEnv* ev);
+ morkTable* NextTable(morkEnv* ev);
+
+ mork_bool SetRowScope(morkEnv* ev, mork_scope inRowScope);
+ mork_bool SetTableKind(morkEnv* ev, mork_kind inTableKind);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakPortTableCursor(morkPortTableCursor* me, morkEnv* ev,
+ morkPortTableCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongPortTableCursor(morkPortTableCursor* me, morkEnv* ev,
+ morkPortTableCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKPORTTABLECURSOR_ */
diff --git a/comm/mailnews/db/mork/morkProbeMap.cpp b/comm/mailnews/db/mork/morkProbeMap.cpp
new file mode 100644
index 0000000000..6c9c3ecb6c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkProbeMap.cpp
@@ -0,0 +1,1107 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+// This code is a port to NS Mork from public domain Mithril C++ sources.
+// Note many code comments here come verbatim from cut-and-pasted Mithril.
+// In many places, code is identical; Mithril versions stay public domain.
+// Changes in porting are mainly class type and scalar type name changes.
+
+#include "nscore.h"
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKPROBEMAP_
+# include "morkProbeMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+/*============================================================================*/
+/* morkMapScratch */
+
+void morkMapScratch::halt_map_scratch(morkEnv* ev) {
+ nsIMdbHeap* heap = sMapScratch_Heap;
+
+ if (heap) {
+ if (sMapScratch_Keys) heap->Free(ev->AsMdbEnv(), sMapScratch_Keys);
+ if (sMapScratch_Vals) heap->Free(ev->AsMdbEnv(), sMapScratch_Vals);
+ }
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*============================================================================*/
+/* morkProbeMap */
+
+void morkProbeMap::ProbeMapBadTagError(morkEnv* ev) const {
+ ev->NewError("bad sProbeMap_Tag");
+}
+
+void morkProbeMap::WrapWithNoVoidSlotError(morkEnv* ev) const {
+ ev->NewError("wrap without void morkProbeMap slot");
+}
+
+void morkProbeMap::GrowFailsMaxFillError(morkEnv* ev) const {
+ ev->NewError("grow fails morkEnv > sMap_Fill");
+}
+
+void morkProbeMap::MapKeyIsNotIPError(morkEnv* ev) const {
+ ev->NewError("not sMap_KeyIsIP");
+}
+
+void morkProbeMap::MapValIsNotIPError(morkEnv* ev) const {
+ ev->NewError("not sMap_ValIsIP");
+}
+
+void morkProbeMap::rehash_old_map(morkEnv* ev, morkMapScratch* ioScratch) {
+ mork_size keySize = sMap_KeySize; // size of every key bucket
+ mork_size valSize = sMap_ValSize; // size of every associated value
+
+ mork_count slots = sMap_Slots; // number of new buckets
+ mork_u1* keys = sMap_Keys; // destination for rehashed keys
+ mork_u1* vals = sMap_Vals; // destination for any copied values
+
+ mork_bool keyIsIP = (keys && keySize == sizeof(mork_ip) && sMap_KeyIsIP);
+ mork_bool valIsIP = (vals && valSize == sizeof(mork_ip) && sMap_ValIsIP);
+
+ mork_count oldSlots = ioScratch->sMapScratch_Slots; // sMap_Slots
+ mork_u1* oldKeys = ioScratch->sMapScratch_Keys; // sMap_Keys
+ mork_u1* oldVals = ioScratch->sMapScratch_Vals; // sMap_Vals
+ mork_u1* end = oldKeys + (keySize * oldSlots); // one byte past last key
+
+ mork_fill fill = 0; // let's count the actual fill for a double check
+
+ while (oldKeys < end) // another old key bucket to rehash if non-nil?
+ {
+ if (!this->ProbeMapIsKeyNil(ev, oldKeys)) // need to rehash?
+ {
+ ++fill; // this had better match sMap_Fill when we are all done
+ mork_u4 hash = this->ProbeMapHashMapKey(ev, oldKeys);
+
+ mork_pos i = hash % slots; // target hash bucket
+ mork_pos startPos = i; // remember start to detect
+
+ mork_u1* k = keys + (i * keySize);
+ while (!this->ProbeMapIsKeyNil(ev, k)) {
+ if (++i >=
+ (mork_pos)slots) // advanced past end? need to wrap around now?
+ i = 0; // wrap around to first slot in map's hash table
+
+ if (i == startPos) // no void slots were found anywhere in map?
+ {
+ this->WrapWithNoVoidSlotError(ev); // should never happen
+ return; // this is bad, and we can't go on with the rehash
+ }
+ k = keys + (i * keySize);
+ }
+ if (keyIsIP) // int special case?
+ *((mork_ip*)k) = *((const mork_ip*)oldKeys); // fast bitwise copy
+ else
+ MORK_MEMCPY(k, oldKeys, keySize); // slow bitwise copy
+
+ if (oldVals) // need to copy values as well?
+ {
+ mork_size valOffset = (i * valSize);
+ mork_u1* v = vals + valOffset;
+ mork_u1* ov = oldVals + valOffset;
+ if (valIsIP) // int special case?
+ *((mork_ip*)v) = *((const mork_ip*)ov); // fast bitwise copy
+ else
+ MORK_MEMCPY(v, ov, valSize); // slow bitwise copy
+ }
+ }
+ oldKeys += keySize; // advance to next key bucket in old map
+ }
+ if (fill != sMap_Fill) // is the recorded value of sMap_Fill wrong?
+ {
+ ev->NewWarning("fill != sMap_Fill");
+ sMap_Fill = fill;
+ }
+}
+
+mork_bool morkProbeMap::grow_probe_map(morkEnv* ev) {
+ if (sMap_Heap) // can we grow the map?
+ {
+ mork_num newSlots = ((sMap_Slots * 4) / 3) + 1; // +25%
+ morkMapScratch old; // a place to temporarily hold all the old arrays
+ if (this->new_slots(ev, &old, newSlots)) // have more?
+ {
+ ++sMap_Seed; // note the map has changed
+ this->rehash_old_map(ev, &old);
+
+ if (ev->Good()) {
+ mork_count slots = sMap_Slots;
+ mork_num emptyReserve = (slots / 7) + 1; // keep this many empty
+ mork_fill maxFill = slots - emptyReserve; // new max occupancy
+ if (maxFill > sMap_Fill) // new max is bigger than old occupancy?
+ sProbeMap_MaxFill = maxFill; // we can install new max for fill
+ else
+ this->GrowFailsMaxFillError(ev); // we have invariant failure
+ }
+
+ if (ev->Bad()) // rehash failed? need to revert map to last state?
+ this->revert_map(ev, &old); // swap the vectors back again
+
+ old.halt_map_scratch(ev); // remember to free the old arrays
+ }
+ } else
+ ev->OutOfMemoryError();
+
+ return ev->Good();
+}
+
+void morkProbeMap::revert_map(morkEnv* ev, morkMapScratch* ioScratch) {
+ mork_count tempSlots = ioScratch->sMapScratch_Slots; // sMap_Slots
+ mork_u1* tempKeys = ioScratch->sMapScratch_Keys; // sMap_Keys
+ mork_u1* tempVals = ioScratch->sMapScratch_Vals; // sMap_Vals
+
+ ioScratch->sMapScratch_Slots = sMap_Slots;
+ ioScratch->sMapScratch_Keys = sMap_Keys;
+ ioScratch->sMapScratch_Vals = sMap_Vals;
+
+ sMap_Slots = tempSlots;
+ sMap_Keys = tempKeys;
+ sMap_Vals = tempVals;
+}
+
+void morkProbeMap::put_probe_kv(morkEnv* ev, const void* inAppKey,
+ const void* inAppVal, mork_pos inPos) {
+ mork_u1* mapVal = 0;
+ mork_u1* mapKey = 0;
+
+ mork_num valSize = sMap_ValSize;
+ if (valSize && inAppVal) // map holds values? caller sends value?
+ {
+ mork_u1* val = sMap_Vals + (valSize * inPos);
+ if (valSize == sizeof(mork_ip) && sMap_ValIsIP) // int special case?
+ *((mork_ip*)val) = *((const mork_ip*)inAppVal);
+ else
+ mapVal = val; // show possible need to call ProbeMapPushIn()
+ }
+ if (inAppKey) // caller sends the key?
+ {
+ mork_num keySize = sMap_KeySize;
+ mork_u1* key = sMap_Keys + (keySize * inPos);
+ if (keySize == sizeof(mork_ip) && sMap_KeyIsIP) // int special case?
+ *((mork_ip*)key) = *((const mork_ip*)inAppKey);
+ else
+ mapKey = key; // show possible need to call ProbeMapPushIn()
+ } else
+ ev->NilPointerError();
+
+ if ((inAppVal && mapVal) || (inAppKey && mapKey))
+ this->ProbeMapPushIn(ev, inAppKey, inAppVal, mapKey, mapVal);
+
+ if (sMap_Fill > sProbeMap_MaxFill) this->grow_probe_map(ev);
+}
+
+void morkProbeMap::get_probe_kv(morkEnv* ev, void* outAppKey, void* outAppVal,
+ mork_pos inPos) const {
+ const mork_u1* mapVal = 0;
+ const mork_u1* mapKey = 0;
+
+ mork_num valSize = sMap_ValSize;
+ if (valSize && outAppVal) // map holds values? caller wants value?
+ {
+ const mork_u1* val = sMap_Vals + (valSize * inPos);
+ if (valSize == sizeof(mork_ip) && sMap_ValIsIP) // int special case?
+ *((mork_ip*)outAppVal) = *((const mork_ip*)val);
+ else
+ mapVal = val; // show possible need to call ProbeMapPullOut()
+ }
+ if (outAppKey) // caller wants the key?
+ {
+ mork_num keySize = sMap_KeySize;
+ const mork_u1* key = sMap_Keys + (keySize * inPos);
+ if (keySize == sizeof(mork_ip) && sMap_KeyIsIP) // int special case?
+ *((mork_ip*)outAppKey) = *((const mork_ip*)key);
+ else
+ mapKey = key; // show possible need to call ProbeMapPullOut()
+ }
+ if ((outAppVal && mapVal) || (outAppKey && mapKey))
+ this->ProbeMapPullOut(ev, mapKey, mapVal, outAppKey, outAppVal);
+}
+
+mork_test morkProbeMap::find_key_pos(morkEnv* ev, const void* inAppKey,
+ mork_u4 inHash, mork_pos* outPos) const {
+ mork_u1* k = sMap_Keys; // array of keys, each of size sMap_KeySize
+ mork_num size = sMap_KeySize; // number of bytes in each key
+ mork_count slots = sMap_Slots; // total number of key buckets
+ mork_pos i = inHash % slots; // target hash bucket
+ mork_pos startPos = i; // remember start to detect
+
+ mork_test outTest = this->MapTest(ev, k + (i * size), inAppKey);
+ while (outTest == morkTest_kMiss) {
+ if (++i >=
+ (mork_pos)slots) // advancing goes beyond end? need to wrap around now?
+ i = 0; // wrap around to first slot in map's hash table
+
+ if (i == startPos) // no void slots were found anywhere in map?
+ {
+ this->WrapWithNoVoidSlotError(ev); // should never happen
+ break; // end loop on kMiss; note caller expects either kVoid or kHit
+ }
+ outTest = this->MapTest(ev, k + (i * size), inAppKey);
+ }
+ *outPos = i;
+
+ return outTest;
+}
+
+void morkProbeMap::probe_map_lazy_init(morkEnv* ev) {
+ if (this->need_lazy_init() && sMap_Fill == 0) // pending lazy action?
+ {
+ // The constructor cannot successfully call virtual ProbeMapClearKey(),
+ // so we lazily do so now, when we add the first member to the map.
+
+ mork_u1* keys = sMap_Keys;
+ if (keys) // okay to call lazy virtual clear method on new map keys?
+ {
+ if (sProbeMap_ZeroIsClearKey) // zero is good enough to clear keys?
+ {
+ mork_num keyVolume = sMap_Slots * sMap_KeySize;
+ if (keyVolume) MORK_MEMSET(keys, 0, keyVolume);
+ } else
+ this->ProbeMapClearKey(ev, keys, sMap_Slots);
+ } else
+ this->MapNilKeysError(ev);
+ }
+ sProbeMap_LazyClearOnAdd = 0; // don't do this ever again
+}
+
+mork_bool morkProbeMap::MapAtPut(morkEnv* ev, const void* inAppKey,
+ const void* inAppVal, void* outAppKey,
+ void* outAppVal) {
+ mork_bool outPut = morkBool_kFalse;
+
+ if (this->GoodProbeMap()) /* looks good? */
+ {
+ if (this->need_lazy_init() && sMap_Fill == 0) // pending lazy action?
+ this->probe_map_lazy_init(ev);
+
+ if (ev->Good()) {
+ mork_pos slotPos = 0;
+ mork_u4 hash = this->MapHash(ev, inAppKey);
+ mork_test test = this->find_key_pos(ev, inAppKey, hash, &slotPos);
+ outPut = (test == morkTest_kHit);
+
+ if (outPut) // replacing an old assoc? no change in member count?
+ {
+ if (outAppKey || outAppVal) /* copy old before cobber? */
+ this->get_probe_kv(ev, outAppKey, outAppVal, slotPos);
+ } else // adding a new assoc increases membership by one
+ {
+ ++sMap_Fill; /* one more member in the collection */
+ }
+
+ if (test != morkTest_kMiss) /* found slot to hold new assoc? */
+ {
+ ++sMap_Seed; /* note the map has changed */
+ this->put_probe_kv(ev, inAppKey, inAppVal, slotPos);
+ }
+ }
+ } else
+ this->ProbeMapBadTagError(ev);
+
+ return outPut;
+}
+
+mork_bool morkProbeMap::MapAt(morkEnv* ev, const void* inAppKey,
+ void* outAppKey, void* outAppVal) {
+ if (this->GoodProbeMap()) /* looks good? */
+ {
+ if (this->need_lazy_init() && sMap_Fill == 0) // pending lazy action?
+ this->probe_map_lazy_init(ev);
+
+ mork_pos slotPos = 0;
+ mork_u4 hash = this->MapHash(ev, inAppKey);
+ mork_test test = this->find_key_pos(ev, inAppKey, hash, &slotPos);
+ if (test == morkTest_kHit) /* found an assoc pair for inAppKey? */
+ {
+ this->get_probe_kv(ev, outAppKey, outAppVal, slotPos);
+ return morkBool_kTrue;
+ }
+ } else
+ this->ProbeMapBadTagError(ev);
+
+ return morkBool_kFalse;
+}
+
+mork_num morkProbeMap::MapCutAll(morkEnv* ev) {
+ mork_num outCutAll = 0;
+
+ if (this->GoodProbeMap()) /* looks good? */
+ {
+ outCutAll = sMap_Fill; /* number of members cut, which is all of them */
+
+ if (sMap_Keys && !sProbeMap_ZeroIsClearKey)
+ this->ProbeMapClearKey(ev, sMap_Keys, sMap_Slots);
+
+ sMap_Fill = 0; /* map now has no members */
+ } else
+ this->ProbeMapBadTagError(ev);
+
+ return outCutAll;
+}
+
+// { ===== node interface =====
+
+/*virtual*/
+morkProbeMap::~morkProbeMap() // assert NodeStop() finished earlier
+{
+ MORK_ASSERT(sMap_Keys == 0);
+ MORK_ASSERT(sProbeMap_Tag == 0);
+}
+
+/*public virtual*/ void morkProbeMap::CloseMorkNode(
+ morkEnv* ev) // CloseMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseProbeMap(ev);
+ this->MarkShut();
+ }
+}
+
+void morkProbeMap::CloseProbeMap(morkEnv* ev) {
+ if (this->IsNode()) {
+ nsIMdbHeap* heap = sMap_Heap;
+ if (heap) // able to free map arrays?
+ {
+ void* block = sMap_Keys;
+ if (block) {
+ heap->Free(ev->AsMdbEnv(), block);
+ sMap_Keys = 0;
+ }
+
+ block = sMap_Vals;
+ if (block) {
+ heap->Free(ev->AsMdbEnv(), block);
+ sMap_Vals = 0;
+ }
+ }
+ sMap_Keys = 0;
+ sMap_Vals = 0;
+
+ this->CloseNode(ev);
+ sProbeMap_Tag = 0;
+ sProbeMap_MaxFill = 0;
+
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+void* morkProbeMap::clear_alloc(morkEnv* ev, mork_size inSize) {
+ void* p = 0;
+ nsIMdbHeap* heap = sMap_Heap;
+ if (heap) {
+ if (NS_SUCCEEDED(heap->Alloc(ev->AsMdbEnv(), inSize, (void**)&p)) && p) {
+ MORK_MEMSET(p, 0, inSize);
+ return p;
+ }
+ } else
+ ev->NilPointerError();
+
+ return (void*)0;
+}
+
+/*| map_new_keys: allocate an array of inSlots new keys filled with zero.
+**| (cf IronDoc's FeHashTable_new_keys())
+|*/
+mork_u1* morkProbeMap::map_new_keys(morkEnv* ev, mork_num inSlots) {
+ mork_num size = inSlots * sMap_KeySize;
+ return (mork_u1*)this->clear_alloc(ev, size);
+}
+
+/*| map_new_vals: allocate an array of inSlots new values filled with zero.
+**| When values are zero sized, we just return a null pointer.
+**|
+**| (cf IronDoc's FeHashTable_new_values())
+|*/
+mork_u1* morkProbeMap::map_new_vals(morkEnv* ev, mork_num inSlots) {
+ mork_u1* values = 0;
+ mork_num size = inSlots * sMap_ValSize;
+ if (size) values = (mork_u1*)this->clear_alloc(ev, size);
+ return values;
+}
+
+void morkProbeMap::MapSeedOutOfSyncError(morkEnv* ev) {
+ ev->NewError("sMap_Seed out of sync");
+}
+
+void morkProbeMap::MapFillUnderflowWarning(morkEnv* ev) {
+ ev->NewWarning("sMap_Fill underflow");
+}
+
+void morkProbeMap::MapNilKeysError(morkEnv* ev) {
+ ev->NewError("nil sMap_Keys");
+}
+
+void morkProbeMap::MapZeroKeySizeError(morkEnv* ev) {
+ ev->NewError("zero sMap_KeySize");
+}
+
+/*static*/
+void morkProbeMap::ProbeMapCutError(morkEnv* ev) {
+ ev->NewError("morkProbeMap cannot cut");
+}
+
+void morkProbeMap::init_probe_map(morkEnv* ev, mork_size inSlots) {
+ // Note we cannot successfully call virtual ProbeMapClearKey() when we
+ // call init_probe_map() inside the constructor; so we leave this problem
+ // to the caller. (The constructor will call ProbeMapClearKey() later
+ // after setting a suitable lazy flag to show this action is pending.)
+
+ if (ev->Good()) {
+ morkMapScratch old;
+
+ if (inSlots < 7) // capacity too small?
+ inSlots = 7; // increase to reasonable minimum
+ else if (inSlots > (128 * 1024)) // requested capacity too big?
+ inSlots = (128 * 1024); // decrease to reasonable maximum
+
+ if (this->new_slots(ev, &old, inSlots)) sProbeMap_Tag = morkProbeMap_kTag;
+
+ mork_count slots = sMap_Slots;
+ mork_num emptyReserve = (slots / 7) + 1; // keep this many empty
+ sProbeMap_MaxFill = slots - emptyReserve;
+
+ MORK_MEMSET(&old, 0, sizeof(morkMapScratch)); // don't bother halting
+ }
+}
+
+mork_bool morkProbeMap::new_slots(morkEnv* ev, morkMapScratch* old,
+ mork_num inSlots) {
+ mork_bool outNew = morkBool_kFalse;
+
+ // Note we cannot successfully call virtual ProbeMapClearKey() when we
+ // call new_slots() inside the constructor; so we leave this problem
+ // to the caller. (The constructor will call ProbeMapClearKey() later
+ // after setting a suitable lazy flag to show this action is pending.)
+
+ // allocate every new array before we continue:
+ mork_u1* newKeys = this->map_new_keys(ev, inSlots);
+ mork_u1* newVals = this->map_new_vals(ev, inSlots);
+
+ // okay for newVals to be null when values are zero sized?
+ mork_bool okayValues = (newVals || !sMap_ValSize);
+
+ if (newKeys && okayValues) {
+ outNew = morkBool_kTrue; // we created every array needed
+
+ // init mapScratch using slots from current map:
+ old->sMapScratch_Heap = sMap_Heap;
+
+ old->sMapScratch_Slots = sMap_Slots;
+ old->sMapScratch_Keys = sMap_Keys;
+ old->sMapScratch_Vals = sMap_Vals;
+
+ // replace all map array slots using the newly allocated members:
+ ++sMap_Seed; // the map has changed
+ sMap_Keys = newKeys;
+ sMap_Vals = newVals;
+ sMap_Slots = inSlots;
+ } else // free any allocations if only partially successful
+ {
+ nsIMdbHeap* heap = sMap_Heap;
+ if (newKeys) heap->Free(ev->AsMdbEnv(), newKeys);
+ if (newVals) heap->Free(ev->AsMdbEnv(), newVals);
+
+ MORK_MEMSET(old, 0, sizeof(morkMapScratch)); // zap scratch space
+ }
+
+ return outNew;
+}
+
+void morkProbeMap::clear_probe_map(morkEnv* ev, nsIMdbHeap* ioMapHeap) {
+ sProbeMap_Tag = 0;
+ sMap_Seed = 0;
+ sMap_Slots = 0;
+ sMap_Fill = 0;
+ sMap_Keys = 0;
+ sMap_Vals = 0;
+ sProbeMap_MaxFill = 0;
+
+ sMap_Heap = ioMapHeap;
+ if (!ioMapHeap) ev->NilPointerError();
+}
+
+morkProbeMap::morkProbeMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioNodeHeap, mork_size inKeySize,
+ mork_size inValSize, nsIMdbHeap* ioMapHeap,
+ mork_size inSlots, mork_bool inZeroIsClearKey)
+
+ : morkNode(ev, inUsage, ioNodeHeap),
+ sMap_Heap(ioMapHeap)
+
+ ,
+ sMap_Keys(0),
+ sMap_Vals(0)
+
+ ,
+ sMap_Seed(0) // change count of members or structure
+
+ ,
+ sMap_Slots(0) // count of slots in the hash table
+ ,
+ sMap_Fill(0) // number of used slots in the hash table
+
+ ,
+ sMap_KeySize(0) // size of each key (cannot be zero)
+ ,
+ sMap_ValSize(0) // size of each val (zero allowed)
+
+ ,
+ sMap_KeyIsIP(morkBool_kFalse) // sMap_KeySize == sizeof(mork_ip)
+ ,
+ sMap_ValIsIP(morkBool_kFalse) // sMap_ValSize == sizeof(mork_ip)
+
+ ,
+ sProbeMap_MaxFill(0),
+ sProbeMap_LazyClearOnAdd(0),
+ sProbeMap_ZeroIsClearKey(inZeroIsClearKey),
+ sProbeMap_Tag(0) {
+ // Note we cannot successfully call virtual ProbeMapClearKey() when we
+ // call init_probe_map() inside the constructor; so we leave this problem
+ // to the caller. (The constructor will call ProbeMapClearKey() later
+ // after setting a suitable lazy flag to show this action is pending.)
+
+ if (ev->Good()) {
+ this->clear_probe_map(ev, ioMapHeap);
+ if (ev->Good()) {
+ sMap_KeySize = inKeySize;
+ sMap_ValSize = inValSize;
+ sMap_KeyIsIP = (inKeySize == sizeof(mork_ip));
+ sMap_ValIsIP = (inValSize == sizeof(mork_ip));
+
+ this->init_probe_map(ev, inSlots);
+ if (ev->Good()) {
+ if (!inZeroIsClearKey) // must lazy clear later with virtual method?
+ sProbeMap_LazyClearOnAdd = morkProbeMap_kLazyClearOnAdd;
+
+ mNode_Derived = morkDerived_kProbeMap;
+ }
+ }
+ }
+}
+
+/*============================================================================*/
+
+/*virtual*/ mork_test // hit(a,b) implies hash(a) == hash(b)
+morkProbeMap::MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const
+// Note inMapKey is always a key already stored in the map, while inAppKey
+// is always a method argument parameter from a client method call.
+// This matters the most in morkProbeMap subclasses, which have the
+// responsibility of putting 'app' keys into slots for 'map' keys, and
+// the bit pattern representation might be different in such cases.
+// morkTest_kHit means that inMapKey equals inAppKey (and this had better
+// also imply that hash(inMapKey) == hash(inAppKey)).
+// morkTest_kMiss means that inMapKey does NOT equal inAppKey (but this
+// implies nothing at all about hash(inMapKey) and hash(inAppKey)).
+// morkTest_kVoid means that inMapKey is not a valid key bit pattern,
+// which means that key slot in the map is not being used. Note that
+// kVoid is only expected as a return value in morkProbeMap subclasses,
+// because morkProbeMap must ask whether a key slot is used or not.
+// morkChainMap however, always knows when a key slot is used, so only
+// key slots expected to have valid bit patterns will be presented to
+// the MapTest() methods for morkChainMap subclasses.
+//
+// NOTE: it is very important that subclasses correctly return the value
+// morkTest_kVoid whenever the slot for inMapKey contains a bit pattern
+// that means the slot is not being used, because this is the only way a
+// probe map can terminate an unsuccessful search for a key in the map.
+{
+ mork_size keySize = sMap_KeySize;
+ if (keySize == sizeof(mork_ip) && sMap_KeyIsIP) {
+ mork_ip mapKey = *((const mork_ip*)inMapKey);
+ if (mapKey == *((const mork_ip*)inAppKey))
+ return morkTest_kHit;
+ else {
+ return (mapKey) ? morkTest_kMiss : morkTest_kVoid;
+ }
+ } else {
+ mork_bool allSame = morkBool_kTrue;
+ mork_bool allZero = morkBool_kTrue;
+ const mork_u1* ak = (const mork_u1*)inAppKey;
+ const mork_u1* mk = (const mork_u1*)inMapKey;
+ const mork_u1* end = mk + keySize;
+ --mk; // prepare for preincrement:
+ while (++mk < end) {
+ mork_u1 byte = *mk;
+ if (byte) // any nonzero byte in map key means slot is not nil?
+ allZero = morkBool_kFalse;
+ if (byte != *ak++) // bytes differ in map and app keys?
+ allSame = morkBool_kFalse;
+ }
+ if (allSame)
+ return morkTest_kHit;
+ else
+ return (allZero) ? morkTest_kVoid : morkTest_kMiss;
+ }
+}
+
+/*virtual*/ mork_u4 // hit(a,b) implies hash(a) == hash(b)
+morkProbeMap::MapHash(morkEnv* ev, const void* inAppKey) const {
+ mork_size keySize = sMap_KeySize;
+ if (keySize == sizeof(mork_ip) && sMap_KeyIsIP) {
+ return *((const mork_ip*)inAppKey);
+ } else {
+ const mork_u1* key = (const mork_u1*)inAppKey;
+ const mork_u1* end = key + keySize;
+ --key; // prepare for preincrement:
+ while (++key < end) {
+ if (*key) // any nonzero byte in map key means slot is not nil?
+ return morkBool_kFalse;
+ }
+ return morkBool_kTrue;
+ }
+ return (mork_u4)NS_PTR_TO_INT32(inAppKey);
+}
+
+/*============================================================================*/
+
+/*virtual*/ mork_u4 morkProbeMap::ProbeMapHashMapKey(morkEnv* ev,
+ const void* inMapKey) const
+// ProbeMapHashMapKey() does logically the same thing as MapHash(), and
+// the default implementation actually calls virtual MapHash(). However,
+// Subclasses must override this method whenever the formats of keys in
+// the map differ from app keys outside the map, because MapHash() only
+// works on keys in 'app' format, while ProbeMapHashMapKey() only works
+// on keys in 'map' format. This method is called in order to rehash all
+// map keys when a map is grown, and this causes all old map members to
+// move into new slot locations.
+//
+// Note it is absolutely imperative that a hash for a key in 'map' format
+// be exactly the same the hash of the same key in 'app' format, or else
+// maps will seem corrupt later when keys in 'app' format cannot be found.
+{
+ return this->MapHash(ev, inMapKey);
+}
+
+/*virtual*/ mork_bool morkProbeMap::ProbeMapIsKeyNil(morkEnv* ev,
+ void* ioMapKey)
+// ProbeMapIsKeyNil() must say whether the representation of logical 'nil'
+// is currently found inside the key at ioMapKey, for a key found within
+// the map. The the map iterator uses this method to find map keys that
+// are actually being used for valid map associations; otherwise the
+// iterator cannot determine which map slots actually denote used keys.
+// The default method version returns true if all the bits equal zero.
+{
+ if (sMap_KeySize == sizeof(mork_ip) && sMap_KeyIsIP) {
+ return !*((const mork_ip*)ioMapKey);
+ } else {
+ const mork_u1* key = (const mork_u1*)ioMapKey;
+ const mork_u1* end = key + sMap_KeySize;
+ --key; // prepare for preincrement:
+ while (++key < end) {
+ if (*key) // any nonzero byte in map key means slot is not nil?
+ return morkBool_kFalse;
+ }
+ return morkBool_kTrue;
+ }
+}
+
+/*virtual*/ void morkProbeMap::ProbeMapClearKey(
+ morkEnv* ev, // put 'nil' into all keys inside map
+ void* ioMapKey, mork_count inKeyCount) // array of keys inside map
+// ProbeMapClearKey() must put some representation of logical 'nil' into
+// every key slot in the map, such that MapTest() will later recognize
+// that this bit pattern shows each key slot is not actually being used.
+//
+// This method is typically called whenever the map is either created or
+// grown into a larger size, where ioMapKey is a pointer to an array of
+// inKeyCount keys, where each key is this->MapKeySize() bytes in size.
+// Note that keys are assumed immediately adjacent with no padding, so
+// if any alignment requirements must be met, then subclasses should have
+// already accounted for this when specifying a key size in the map.
+//
+// Since this method will be called when a map is being grown in size,
+// nothing should be assumed about the state slots of the map, since the
+// ioMapKey array might not yet live in sMap_Keys, and the array length
+// inKeyCount might not yet live in sMap_Slots. However, the value kept
+// in sMap_KeySize never changes, so this->MapKeySize() is always correct.
+{
+ if (ioMapKey && inKeyCount) {
+ MORK_MEMSET(ioMapKey, 0, (inKeyCount * sMap_KeySize));
+ } else
+ ev->NilPointerWarning();
+}
+
+/*virtual*/ void morkProbeMap::ProbeMapPushIn(
+ morkEnv* ev, // move (key,val) into the map
+ const void* inAppKey, const void* inAppVal, // (key,val) outside map
+ void* outMapKey, void* outMapVal) // (key,val) inside map
+// This method actually puts keys and vals in the map in suitable format.
+//
+// ProbeMapPushIn() must copy a caller key and value in 'app' format
+// into the map slots provided, which are in 'map' format. When the
+// 'app' and 'map' formats are identical, then this is just a bitwise
+// copy of this->MapKeySize() key bytes and this->MapValSize() val bytes,
+// and this is exactly what the default implementation performs. However,
+// if 'app' and 'map' formats are different, and MapTest() depends on this
+// difference in format, then subclasses must override this method to do
+// whatever is necessary to store the input app key in output map format.
+//
+// Do NOT write more than this->MapKeySize() bytes of a map key, or more
+// than this->MapValSize() bytes of a map val, or corruption might ensue.
+//
+// The inAppKey and inAppVal parameters are the same ones passed into a
+// call to MapAtPut(), and the outMapKey and outMapVal parameters are ones
+// determined by how the map currently positions key inAppKey in the map.
+//
+// Note any key or val parameter can be a null pointer, in which case
+// this method must do nothing with those parameters. In particular, do
+// no key move at all when either inAppKey or outMapKey is nil, and do
+// no val move at all when either inAppVal or outMapVal is nil. Note that
+// outMapVal should always be nil when this->MapValSize() is nil.
+{}
+
+/*virtual*/ void morkProbeMap::ProbeMapPullOut(
+ morkEnv* ev, // move (key,val) out from the map
+ const void* inMapKey, const void* inMapVal, // (key,val) inside map
+ void* outAppKey, void* outAppVal) const // (key,val) outside map
+// This method actually gets keys and vals from the map in suitable format.
+//
+// ProbeMapPullOut() must copy a key and val in 'map' format into the
+// caller key and val slots provided, which are in 'app' format. When the
+// 'app' and 'map' formats are identical, then this is just a bitwise
+// copy of this->MapKeySize() key bytes and this->MapValSize() val bytes,
+// and this is exactly what the default implementation performs. However,
+// if 'app' and 'map' formats are different, and MapTest() depends on this
+// difference in format, then subclasses must override this method to do
+// whatever is necessary to store the input map key in output app format.
+//
+// The outAppKey and outAppVal parameters are the same ones passed into a
+// call to either MapAtPut() or MapAt(), while inMapKey and inMapVal are
+// determined by how the map currently positions the target key in the map.
+//
+// Note any key or val parameter can be a null pointer, in which case
+// this method must do nothing with those parameters. In particular, do
+// no key move at all when either inMapKey or outAppKey is nil, and do
+// no val move at all when either inMapVal or outAppVal is nil. Note that
+// inMapVal should always be nil when this->MapValSize() is nil.
+{}
+
+/*============================================================================*/
+/* morkProbeMapIter */
+
+morkProbeMapIter::morkProbeMapIter(morkEnv* ev, morkProbeMap* ioMap)
+ : sProbeMapIter_Map(0),
+ sProbeMapIter_Seed(0),
+ sProbeMapIter_HereIx(morkProbeMapIter_kBeforeIx) {
+ if (ioMap) {
+ if (ioMap->GoodProbeMap()) {
+ if (ioMap->need_lazy_init()) // pending lazy action?
+ ioMap->probe_map_lazy_init(ev);
+
+ sProbeMapIter_Map = ioMap;
+ sProbeMapIter_Seed = ioMap->sMap_Seed;
+ } else
+ ioMap->ProbeMapBadTagError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+void morkProbeMapIter::CloseMapIter(morkEnv* ev) {
+ MORK_USED_1(ev);
+ sProbeMapIter_Map = 0;
+ sProbeMapIter_Seed = 0;
+
+ sProbeMapIter_HereIx = morkProbeMapIter_kAfterIx;
+}
+
+morkProbeMapIter::morkProbeMapIter()
+// zero most slots; caller must call InitProbeMapIter()
+{
+ sProbeMapIter_Map = 0;
+ sProbeMapIter_Seed = 0;
+
+ sProbeMapIter_HereIx = morkProbeMapIter_kBeforeIx;
+}
+
+void morkProbeMapIter::InitProbeMapIter(morkEnv* ev, morkProbeMap* ioMap) {
+ sProbeMapIter_Map = 0;
+ sProbeMapIter_Seed = 0;
+
+ sProbeMapIter_HereIx = morkProbeMapIter_kBeforeIx;
+
+ if (ioMap) {
+ if (ioMap->GoodProbeMap()) {
+ if (ioMap->need_lazy_init()) // pending lazy action?
+ ioMap->probe_map_lazy_init(ev);
+
+ sProbeMapIter_Map = ioMap;
+ sProbeMapIter_Seed = ioMap->sMap_Seed;
+ } else
+ ioMap->ProbeMapBadTagError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+mork_bool morkProbeMapIter::IterFirst(morkEnv* ev, void* outAppKey,
+ void* outAppVal) {
+ sProbeMapIter_HereIx = morkProbeMapIter_kAfterIx; // default to done
+ morkProbeMap* map = sProbeMapIter_Map;
+
+ if (map && map->GoodProbeMap()) /* looks good? */
+ {
+ sProbeMapIter_Seed = map->sMap_Seed; /* sync the seeds */
+
+ mork_u1* k = map->sMap_Keys; // array of keys, each of size sMap_KeySize
+ mork_num size = map->sMap_KeySize; // number of bytes in each key
+ mork_count slots = map->sMap_Slots; // total number of key buckets
+ mork_pos here = 0; // first hash bucket
+
+ while (here < (mork_pos)slots) {
+ if (!map->ProbeMapIsKeyNil(ev, k + (here * size))) {
+ map->get_probe_kv(ev, outAppKey, outAppVal, here);
+
+ sProbeMapIter_HereIx = (mork_i4)here;
+ return morkBool_kTrue;
+ }
+ ++here; // next bucket
+ }
+ } else
+ map->ProbeMapBadTagError(ev);
+
+ return morkBool_kFalse;
+}
+
+mork_bool morkProbeMapIter::IterNext(morkEnv* ev, void* outAppKey,
+ void* outAppVal) {
+ morkProbeMap* map = sProbeMapIter_Map;
+
+ if (map && map->GoodProbeMap()) /* looks good? */
+ {
+ if (sProbeMapIter_Seed == map->sMap_Seed) /* in sync? */
+ {
+ if (sProbeMapIter_HereIx != morkProbeMapIter_kAfterIx) {
+ mork_pos here = (mork_pos)sProbeMapIter_HereIx;
+ if (sProbeMapIter_HereIx < 0)
+ here = 0;
+ else
+ ++here;
+
+ sProbeMapIter_HereIx = morkProbeMapIter_kAfterIx; // default to done
+
+ mork_u1* k = map->sMap_Keys; // key array, each of size sMap_KeySize
+ mork_num size = map->sMap_KeySize; // number of bytes in each key
+ mork_count slots = map->sMap_Slots; // total number of key buckets
+
+ while (here < (mork_pos)slots) {
+ if (!map->ProbeMapIsKeyNil(ev, k + (here * size))) {
+ map->get_probe_kv(ev, outAppKey, outAppVal, here);
+
+ sProbeMapIter_HereIx = (mork_i4)here;
+ return morkBool_kTrue;
+ }
+ ++here; // next bucket
+ }
+ }
+ } else
+ map->MapSeedOutOfSyncError(ev);
+ } else
+ map->ProbeMapBadTagError(ev);
+
+ return morkBool_kFalse;
+}
+
+mork_bool morkProbeMapIter::IterHere(morkEnv* ev, void* outAppKey,
+ void* outAppVal) {
+ morkProbeMap* map = sProbeMapIter_Map;
+
+ if (map && map->GoodProbeMap()) /* looks good? */
+ {
+ if (sProbeMapIter_Seed == map->sMap_Seed) /* in sync? */
+ {
+ mork_pos here = (mork_pos)sProbeMapIter_HereIx;
+ mork_count slots = map->sMap_Slots; // total number of key buckets
+ if (sProbeMapIter_HereIx >= 0 && (here < (mork_pos)slots)) {
+ mork_u1* k = map->sMap_Keys; // key array, each of size sMap_KeySize
+ mork_num size = map->sMap_KeySize; // number of bytes in each key
+
+ if (!map->ProbeMapIsKeyNil(ev, k + (here * size))) {
+ map->get_probe_kv(ev, outAppKey, outAppVal, here);
+ return morkBool_kTrue;
+ }
+ }
+ } else
+ map->MapSeedOutOfSyncError(ev);
+ } else
+ map->ProbeMapBadTagError(ev);
+
+ return morkBool_kFalse;
+}
+
+mork_change* morkProbeMapIter::First(morkEnv* ev, void* outKey, void* outVal) {
+ if (this->IterFirst(ev, outKey, outVal)) return &sProbeMapIter_Change;
+
+ return (mork_change*)0;
+}
+
+mork_change* morkProbeMapIter::Next(morkEnv* ev, void* outKey, void* outVal) {
+ if (this->IterNext(ev, outKey, outVal)) return &sProbeMapIter_Change;
+
+ return (mork_change*)0;
+}
+
+mork_change* morkProbeMapIter::Here(morkEnv* ev, void* outKey, void* outVal) {
+ if (this->IterHere(ev, outKey, outVal)) return &sProbeMapIter_Change;
+
+ return (mork_change*)0;
+}
+
+mork_change* morkProbeMapIter::CutHere(morkEnv* ev, void* outKey,
+ void* outVal) {
+ morkProbeMap::ProbeMapCutError(ev);
+
+ return (mork_change*)0;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// NOTE: the following methods ONLY work for sMap_ValIsIP pointer values.
+// (Note the implied assumption that zero is never a good value pattern.)
+
+void* morkProbeMapIter::IterFirstVal(morkEnv* ev, void* outKey)
+// equivalent to { void* v=0; this->IterFirst(ev, outKey, &v); return v; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_ValIsIP) {
+ void* v = 0;
+ this->IterFirst(ev, outKey, &v);
+ return v;
+ } else
+ map->MapValIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+void* morkProbeMapIter::IterNextVal(morkEnv* ev, void* outKey)
+// equivalent to { void* v=0; this->IterNext(ev, outKey, &v); return v; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_ValIsIP) {
+ void* v = 0;
+ this->IterNext(ev, outKey, &v);
+ return v;
+ } else
+ map->MapValIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+void* morkProbeMapIter::IterHereVal(morkEnv* ev, void* outKey)
+// equivalent to { void* v=0; this->IterHere(ev, outKey, &v); return v; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_ValIsIP) {
+ void* v = 0;
+ this->IterHere(ev, outKey, &v);
+ return v;
+ } else
+ map->MapValIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+// NOTE: the following methods ONLY work for sMap_KeyIsIP pointer values.
+// (Note the implied assumption that zero is never a good key pattern.)
+
+void* morkProbeMapIter::IterFirstKey(morkEnv* ev)
+// equivalent to { void* k=0; this->IterFirst(ev, &k, 0); return k; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_KeyIsIP) {
+ void* k = 0;
+ this->IterFirst(ev, &k, (void*)0);
+ return k;
+ } else
+ map->MapKeyIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+void* morkProbeMapIter::IterNextKey(morkEnv* ev)
+// equivalent to { void* k=0; this->IterNext(ev, &k, 0); return k; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_KeyIsIP) {
+ void* k = 0;
+ this->IterNext(ev, &k, (void*)0);
+ return k;
+ } else
+ map->MapKeyIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+void* morkProbeMapIter::IterHereKey(morkEnv* ev)
+// equivalent to { void* k=0; this->IterHere(ev, &k, 0); return k; }
+{
+ morkProbeMap* map = sProbeMapIter_Map;
+ if (map) {
+ if (map->sMap_KeyIsIP) {
+ void* k = 0;
+ this->IterHere(ev, &k, (void*)0);
+ return k;
+ } else
+ map->MapKeyIsNotIPError(ev);
+ }
+ return (void*)0;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkProbeMap.h b/comm/mailnews/db/mork/morkProbeMap.h
new file mode 100644
index 0000000000..01068a1c82
--- /dev/null
+++ b/comm/mailnews/db/mork/morkProbeMap.h
@@ -0,0 +1,423 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+// This code is a port to NS Mork from public domain Mithril C++ sources.
+// Note many code comments here come verbatim from cut-and-pasted Mithril.
+// In many places, code is identical; Mithril versions stay public domain.
+// Changes in porting are mainly class type and scalar type name changes.
+
+#ifndef _MORKPROBEMAP_
+#define _MORKPROBEMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class morkMapScratch { // utility class used by map subclasses
+ public:
+ nsIMdbHeap* sMapScratch_Heap; // cached sMap_Heap
+ mork_count sMapScratch_Slots; // cached sMap_Slots
+
+ mork_u1* sMapScratch_Keys; // cached sMap_Keys
+ mork_u1* sMapScratch_Vals; // cached sMap_Vals
+
+ public:
+ void halt_map_scratch(morkEnv* ev);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kProbeMap 0x7072 /* ascii 'pr' */
+#define morkProbeMap_kTag 0x70724D50 /* ascii 'prMP' */
+
+#define morkProbeMap_kLazyClearOnAdd ((mork_u1)'c')
+
+class morkProbeMap : public morkNode {
+ protected:
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ protected:
+ // { begin morkMap slots
+ nsIMdbHeap* sMap_Heap; // strong ref to heap allocating all space
+
+ mork_u1* sMap_Keys;
+ mork_u1* sMap_Vals;
+
+ mork_count sMap_Seed; // change count of members or structure
+
+ mork_count sMap_Slots; // count of slots in the hash table
+ mork_fill sMap_Fill; // number of used slots in the hash table
+
+ mork_size sMap_KeySize; // size of each key (cannot be zero)
+ mork_size sMap_ValSize; // size of each val (zero allowed)
+
+ mork_bool sMap_KeyIsIP; // sMap_KeySize == sizeof(mork_ip)
+ mork_bool sMap_ValIsIP; // sMap_ValSize == sizeof(mork_ip)
+ mork_u1 sMap_Pad[2]; // for u4 alignment
+ // } end morkMap slots
+
+ friend class morkProbeMapIter; // for access to protected slots
+
+ public: // getters
+ mork_count MapSeed() const { return sMap_Seed; }
+
+ mork_count MapSlots() const { return sMap_Slots; }
+ mork_fill MapFill() const { return sMap_Fill; }
+
+ mork_size MapKeySize() const { return sMap_KeySize; }
+ mork_size MapValSize() const { return sMap_ValSize; }
+
+ mork_bool MapKeyIsIP() const { return sMap_KeyIsIP; }
+ mork_bool MapValIsIP() const { return sMap_ValIsIP; }
+
+ protected: // slots
+ // { begin morkProbeMap slots
+
+ mork_fill sProbeMap_MaxFill; // max sMap_Fill before map must grow
+
+ mork_u1 sProbeMap_LazyClearOnAdd; // true if kLazyClearOnAdd
+ mork_bool sProbeMap_ZeroIsClearKey; // zero is adequate to clear keys
+ mork_u1 sProbeMap_Pad[2]; // for u4 alignment
+
+ mork_u4 sProbeMap_Tag;
+
+ // } end morkProbeMap slots
+
+ public: // lazy clear on add
+ mork_bool need_lazy_init() const {
+ return sProbeMap_LazyClearOnAdd == morkProbeMap_kLazyClearOnAdd;
+ }
+
+ public: // typing
+ mork_bool GoodProbeMap() const { return sProbeMap_Tag == morkProbeMap_kTag; }
+
+ protected: // utilities
+ void* clear_alloc(morkEnv* ev, mork_size inSize);
+
+ mork_u1* map_new_vals(morkEnv* ev, mork_num inSlots);
+ mork_u1* map_new_keys(morkEnv* ev, mork_num inSlots);
+
+ void clear_probe_map(morkEnv* ev, nsIMdbHeap* ioMapHeap);
+ void init_probe_map(morkEnv* ev, mork_size inSlots);
+ void probe_map_lazy_init(morkEnv* ev);
+
+ mork_bool new_slots(morkEnv* ev, morkMapScratch* old, mork_num inSlots);
+
+ mork_test find_key_pos(morkEnv* ev, const void* inAppKey, mork_u4 inHash,
+ mork_pos* outPos) const;
+
+ void put_probe_kv(morkEnv* ev, const void* inAppKey, const void* inAppVal,
+ mork_pos inPos);
+ void get_probe_kv(morkEnv* ev, void* outAppKey, void* outAppVal,
+ mork_pos inPos) const;
+
+ mork_bool grow_probe_map(morkEnv* ev);
+ void rehash_old_map(morkEnv* ev, morkMapScratch* ioScratch);
+ void revert_map(morkEnv* ev, morkMapScratch* ioScratch);
+
+ public: // errors
+ void ProbeMapBadTagError(morkEnv* ev) const;
+ void WrapWithNoVoidSlotError(morkEnv* ev) const;
+ void GrowFailsMaxFillError(morkEnv* ev) const;
+ void MapKeyIsNotIPError(morkEnv* ev) const;
+ void MapValIsNotIPError(morkEnv* ev) const;
+
+ void MapNilKeysError(morkEnv* ev);
+ void MapZeroKeySizeError(morkEnv* ev);
+
+ void MapSeedOutOfSyncError(morkEnv* ev);
+ void MapFillUnderflowWarning(morkEnv* ev);
+
+ static void ProbeMapCutError(morkEnv* ev);
+
+ // { ===== begin morkMap methods =====
+ public:
+ virtual mork_test // hit(a,b) implies hash(a) == hash(b)
+ MapTest(morkEnv* ev, const void* inMapKey, const void* inAppKey) const;
+ // Note inMapKey is always a key already stored in the map, while inAppKey
+ // is always a method argument parameter from a client method call.
+ // This matters the most in morkProbeMap subclasses, which have the
+ // responsibility of putting 'app' keys into slots for 'map' keys, and
+ // the bit pattern representation might be different in such cases.
+ // morkTest_kHit means that inMapKey equals inAppKey (and this had better
+ // also imply that hash(inMapKey) == hash(inAppKey)).
+ // morkTest_kMiss means that inMapKey does NOT equal inAppKey (but this
+ // implies nothing at all about hash(inMapKey) and hash(inAppKey)).
+ // morkTest_kVoid means that inMapKey is not a valid key bit pattern,
+ // which means that key slot in the map is not being used. Note that
+ // kVoid is only expected as a return value in morkProbeMap subclasses,
+ // because morkProbeMap must ask whether a key slot is used or not.
+ // morkChainMap however, always knows when a key slot is used, so only
+ // key slots expected to have valid bit patterns will be presented to
+ // the MapTest() methods for morkChainMap subclasses.
+ //
+ // NOTE: it is very important that subclasses correctly return the value
+ // morkTest_kVoid whenever the slot for inMapKey contains a bit pattern
+ // that means the slot is not being used, because this is the only way a
+ // probe map can terminate an unsuccessful search for a key in the map.
+
+ virtual mork_u4 // hit(a,b) implies hash(a) == hash(b)
+ MapHash(morkEnv* ev, const void* inAppKey) const;
+
+ virtual mork_bool MapAtPut(morkEnv* ev, const void* inAppKey,
+ const void* inAppVal, void* outAppKey,
+ void* outAppVal);
+
+ virtual mork_bool MapAt(morkEnv* ev, const void* inAppKey, void* outAppKey,
+ void* outAppVal);
+
+ virtual mork_num MapCutAll(morkEnv* ev);
+ // } ===== end morkMap methods =====
+
+ // { ===== begin morkProbeMap methods =====
+ public:
+ virtual mork_u4 ProbeMapHashMapKey(morkEnv* ev, const void* inMapKey) const;
+ // ProbeMapHashMapKey() does logically the same thing as MapHash(), and
+ // the default implementation actually calls virtual MapHash(). However,
+ // Subclasses must override this method whenever the formats of keys in
+ // the map differ from app keys outside the map, because MapHash() only
+ // works on keys in 'app' format, while ProbeMapHashMapKey() only works
+ // on keys in 'map' format. This method is called in order to rehash all
+ // map keys when a map is grown, and this causes all old map members to
+ // move into new slot locations.
+ //
+ // Note it is absolutely imperative that a hash for a key in 'map' format
+ // be exactly the same the hash of the same key in 'app' format, or else
+ // maps will seem corrupt later when keys in 'app' format cannot be found.
+
+ virtual mork_bool ProbeMapIsKeyNil(morkEnv* ev, void* ioMapKey);
+ // ProbeMapIsKeyNil() must say whether the representation of logical 'nil'
+ // is currently found inside the key at ioMapKey, for a key found within
+ // the map. The the map iterator uses this method to find map keys that
+ // are actually being used for valid map associations; otherwise the
+ // iterator cannot determine which map slots actually denote used keys.
+ // The default method version returns true if all the bits equal zero.
+
+ virtual void ProbeMapClearKey(
+ morkEnv* ev, // put 'nil' into all keys inside map
+ void* ioMapKey, mork_count inKeyCount); // array of keys inside map
+ // ProbeMapClearKey() must put some representation of logical 'nil' into
+ // every key slot in the map, such that MapTest() will later recognize
+ // that this bit pattern shows each key slot is not actually being used.
+ //
+ // This method is typically called whenever the map is either created or
+ // grown into a larger size, where ioMapKey is a pointer to an array of
+ // inKeyCount keys, where each key is this->MapKeySize() bytes in size.
+ // Note that keys are assumed immediately adjacent with no padding, so
+ // if any alignment requirements must be met, then subclasses should have
+ // already accounted for this when specifying a key size in the map.
+ //
+ // Since this method will be called when a map is being grown in size,
+ // nothing should be assumed about the state slots of the map, since the
+ // ioMapKey array might not yet live in sMap_Keys, and the array length
+ // inKeyCount might not yet live in sMap_Slots. However, the value kept
+ // in sMap_KeySize never changes, so this->MapKeySize() is always correct.
+
+ virtual void ProbeMapPushIn(morkEnv* ev, // move (key,val) into the map
+ const void* inAppKey,
+ const void* inAppVal, // (key,val) outside map
+ void* outMapKey,
+ void* outMapVal); // (key,val) inside map
+ // This method actually puts keys and vals in the map in suitable format.
+ //
+ // ProbeMapPushIn() must copy a caller key and value in 'app' format
+ // into the map slots provided, which are in 'map' format. When the
+ // 'app' and 'map' formats are identical, then this is just a bitwise
+ // copy of this->MapKeySize() key bytes and this->MapValSize() val bytes,
+ // and this is exactly what the default implementation performs. However,
+ // if 'app' and 'map' formats are different, and MapTest() depends on this
+ // difference in format, then subclasses must override this method to do
+ // whatever is necessary to store the input app key in output map format.
+ //
+ // Do NOT write more than this->MapKeySize() bytes of a map key, or more
+ // than this->MapValSize() bytes of a map val, or corruption might ensue.
+ //
+ // The inAppKey and inAppVal parameters are the same ones passed into a
+ // call to MapAtPut(), and the outMapKey and outMapVal parameters are ones
+ // determined by how the map currently positions key inAppKey in the map.
+ //
+ // Note any key or val parameter can be a null pointer, in which case
+ // this method must do nothing with those parameters. In particular, do
+ // no key move at all when either inAppKey or outMapKey is nil, and do
+ // no val move at all when either inAppVal or outMapVal is nil. Note that
+ // outMapVal should always be nil when this->MapValSize() is nil.
+
+ virtual void ProbeMapPullOut(morkEnv* ev, // move (key,val) out from the map
+ const void* inMapKey,
+ const void* inMapVal, // (key,val) inside map
+ void* outAppKey,
+ void* outAppVal) const; // (key,val) outside map
+ // This method actually gets keys and vals from the map in suitable format.
+ //
+ // ProbeMapPullOut() must copy a key and val in 'map' format into the
+ // caller key and val slots provided, which are in 'app' format. When the
+ // 'app' and 'map' formats are identical, then this is just a bitwise
+ // copy of this->MapKeySize() key bytes and this->MapValSize() val bytes,
+ // and this is exactly what the default implementation performs. However,
+ // if 'app' and 'map' formats are different, and MapTest() depends on this
+ // difference in format, then subclasses must override this method to do
+ // whatever is necessary to store the input map key in output app format.
+ //
+ // The outAppKey and outAppVal parameters are the same ones passed into a
+ // call to either MapAtPut() or MapAt(), while inMapKey and inMapVal are
+ // determined by how the map currently positions the target key in the map.
+ //
+ // Note any key or val parameter can be a null pointer, in which case
+ // this method must do nothing with those parameters. In particular, do
+ // no key move at all when either inMapKey or outAppKey is nil, and do
+ // no val move at all when either inMapVal or outAppVal is nil. Note that
+ // inMapVal should always be nil when this->MapValSize() is nil.
+
+ // } ===== end morkProbeMap methods =====
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseProbeMap() only if open
+ virtual ~morkProbeMap(); // assert that CloseProbeMap() executed earlier
+
+ public: // morkProbeMap construction & destruction
+ morkProbeMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap,
+ mork_size inKeySize, mork_size inValSize, nsIMdbHeap* ioMapHeap,
+ mork_size inSlots, mork_bool inZeroIsClearKey);
+
+ void CloseProbeMap(morkEnv* ev); // called by
+
+ public: // dynamic type identification
+ mork_bool IsProbeMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kProbeMap;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakMap(morkMap* me, morkEnv* ev, morkMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongMap(morkMap* me, morkEnv* ev, morkMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+/*============================================================================*/
+/* morkProbeMapIter */
+
+#define morkProbeMapIter_kBeforeIx ((mork_i4)-1) /* before first member */
+#define morkProbeMapIter_kAfterIx ((mork_i4)-2) /* after last member */
+
+class morkProbeMapIter {
+ protected:
+ morkProbeMap* sProbeMapIter_Map; // nonref
+ mork_num sProbeMapIter_Seed; // iter's cached copy of map's seed
+
+ mork_i4 sProbeMapIter_HereIx;
+
+ mork_change sProbeMapIter_Change; // morkMapIter API simulation dummy
+ mork_u1 sProbeMapIter_Pad[3]; // for u4 alignment
+
+ public:
+ morkProbeMapIter(morkEnv* ev, morkProbeMap* ioMap);
+ void CloseMapIter(morkEnv* ev);
+
+ morkProbeMapIter(); // zero most slots; caller must call InitProbeMapIter()
+
+ protected: // protected so subclasses must provide suitable typesafe inlines:
+ void InitProbeMapIter(morkEnv* ev, morkProbeMap* ioMap);
+
+ void InitMapIter(morkEnv* ev,
+ morkProbeMap* ioMap) // morkMapIter compatibility
+ {
+ this->InitProbeMapIter(ev, ioMap);
+ }
+
+ mork_bool IterFirst(morkEnv* ev, void* outKey, void* outVal);
+ mork_bool IterNext(morkEnv* ev, void* outKey, void* outVal);
+ mork_bool IterHere(morkEnv* ev, void* outKey, void* outVal);
+
+ // NOTE: the following methods ONLY work for sMap_ValIsIP pointer values.
+ // (Note the implied assumption that zero is never a good value pattern.)
+
+ void* IterFirstVal(morkEnv* ev, void* outKey);
+ // equivalent to { void* v=0; this->IterFirst(ev, outKey, &v); return v; }
+
+ void* IterNextVal(morkEnv* ev, void* outKey);
+ // equivalent to { void* v=0; this->IterNext(ev, outKey, &v); return v; }
+
+ void* IterHereVal(morkEnv* ev, void* outKey);
+ // equivalent to { void* v=0; this->IterHere(ev, outKey, &v); return v; }
+
+ // NOTE: the following methods ONLY work for sMap_KeyIsIP pointer values.
+ // (Note the implied assumption that zero is never a good key pattern.)
+
+ void* IterFirstKey(morkEnv* ev);
+ // equivalent to { void* k=0; this->IterFirst(ev, &k, 0); return k; }
+
+ void* IterNextKey(morkEnv* ev);
+ // equivalent to { void* k=0; this->IterNext(ev, &k, 0); return k; }
+
+ void* IterHereKey(morkEnv* ev);
+ // equivalent to { void* k=0; this->IterHere(ev, &k, 0); return k; }
+
+ public: // simulation of the morkMapIter API for morkMap compatibility:
+ mork_change* First(morkEnv* ev, void* outKey, void* outVal);
+ mork_change* Next(morkEnv* ev, void* outKey, void* outVal);
+ mork_change* Here(morkEnv* ev, void* outKey, void* outVal);
+
+ mork_change* CutHere(morkEnv* ev, void* outKey, void* outVal);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKPROBEMAP_ */
diff --git a/comm/mailnews/db/mork/morkQuickSort.cpp b/comm/mailnews/db/mork/morkQuickSort.cpp
new file mode 100644
index 0000000000..6fd211ee5c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkQuickSort.cpp
@@ -0,0 +1,182 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKQUICKSORT_
+# include "morkQuickSort.h"
+#endif
+
+#if !defined(DEBUG) && (defined(__cplusplus) || defined(__gcc))
+# ifndef INLINE
+# define INLINE inline
+# endif
+#else
+# define INLINE
+#endif
+
+static INLINE mork_u1* morkQS_med3(mork_u1*, mork_u1*, mork_u1*, mdbAny_Order,
+ void*);
+
+static INLINE void morkQS_swapfunc(mork_u1*, mork_u1*, int, int);
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define morkQS_swapcode(TYPE, parmi, parmj, n) \
+ { \
+ long i = (n) / sizeof(TYPE); \
+ TYPE* pi = (TYPE*)(parmi); \
+ TYPE* pj = (TYPE*)(parmj); \
+ do { \
+ TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+ }
+
+#define morkQS_SwapInit(a, es) \
+ swaptype = (a - (mork_u1*)0) % sizeof(long) || es % sizeof(long) ? 2 \
+ : es == sizeof(long) ? 0 \
+ : 1;
+
+static INLINE void morkQS_swapfunc(mork_u1* a, mork_u1* b, int n,
+ int swaptype) {
+ if (swaptype <= 1)
+ morkQS_swapcode(long, a, b, n) else morkQS_swapcode(mork_u1, a, b, n)
+}
+
+#define morkQS_swap(a, b) \
+ if (swaptype == 0) { \
+ long t = *(long*)(a); \
+ *(long*)(a) = *(long*)(b); \
+ *(long*)(b) = t; \
+ } else \
+ morkQS_swapfunc(a, b, (int)inSize, swaptype)
+
+#define morkQS_vecswap(a, b, n) \
+ if ((n) > 0) morkQS_swapfunc(a, b, (int)n, swaptype)
+
+static INLINE mork_u1* morkQS_med3(mork_u1* a, mork_u1* b, mork_u1* c,
+ mdbAny_Order cmp, void* closure) {
+ return (*cmp)(a, b, closure) < 0
+ ? ((*cmp)(b, c, closure) < 0 ? b
+ : ((*cmp)(a, c, closure) < 0 ? c : a))
+ : ((*cmp)(b, c, closure) > 0
+ ? b
+ : ((*cmp)(a, c, closure) < 0 ? a : c));
+}
+
+#define morkQS_MIN(x, y) ((x) < (y) ? (x) : (y))
+
+void morkQuickSort(mork_u1* ioVec, mork_u4 inCount, mork_u4 inSize,
+ mdbAny_Order inOrder, void* ioClosure) {
+ mork_u1 *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ int d, r, swaptype, swap_cnt;
+
+tailCall:
+ morkQS_SwapInit(ioVec, inSize);
+ swap_cnt = 0;
+ if (inCount < 7) {
+ for (pm = ioVec + inSize; pm < ioVec + inCount * inSize; pm += inSize)
+ for (pl = pm; pl > ioVec && (*inOrder)(pl - inSize, pl, ioClosure) > 0;
+ pl -= inSize)
+ morkQS_swap(pl, pl - inSize);
+ return;
+ }
+ pm = ioVec + (inCount / 2) * inSize;
+ if (inCount > 7) {
+ pl = ioVec;
+ pn = ioVec + (inCount - 1) * inSize;
+ if (inCount > 40) {
+ d = (inCount / 8) * inSize;
+ pl = morkQS_med3(pl, pl + d, pl + 2 * d, inOrder, ioClosure);
+ pm = morkQS_med3(pm - d, pm, pm + d, inOrder, ioClosure);
+ pn = morkQS_med3(pn - 2 * d, pn - d, pn, inOrder, ioClosure);
+ }
+ pm = morkQS_med3(pl, pm, pn, inOrder, ioClosure);
+ }
+ morkQS_swap(ioVec, pm);
+ pa = pb = ioVec + inSize;
+
+ pc = pd = ioVec + (inCount - 1) * inSize;
+ for (;;) {
+ while (pb <= pc && (r = (*inOrder)(pb, ioVec, ioClosure)) <= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ morkQS_swap(pa, pb);
+ pa += inSize;
+ }
+ pb += inSize;
+ }
+ while (pb <= pc && (r = (*inOrder)(pc, ioVec, ioClosure)) >= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ morkQS_swap(pc, pd);
+ pd -= inSize;
+ }
+ pc -= inSize;
+ }
+ if (pb > pc) break;
+ morkQS_swap(pb, pc);
+ swap_cnt = 1;
+ pb += inSize;
+ pc -= inSize;
+ }
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ for (pm = ioVec + inSize; pm < ioVec + inCount * inSize; pm += inSize)
+ for (pl = pm; pl > ioVec && (*inOrder)(pl - inSize, pl, ioClosure) > 0;
+ pl -= inSize)
+ morkQS_swap(pl, pl - inSize);
+ return;
+ }
+
+ pn = ioVec + inCount * inSize;
+ r = morkQS_MIN(pa - ioVec, pb - pa);
+ morkQS_vecswap(ioVec, pb - r, r);
+ r = morkQS_MIN(pd - pc, (int)(pn - pd - inSize));
+ morkQS_vecswap(pb, pn - r, r);
+ if ((r = pb - pa) > (int)inSize)
+ morkQuickSort(ioVec, r / inSize, inSize, inOrder, ioClosure);
+ if ((r = pd - pc) > (int)inSize) {
+ /* Iterate rather than recurse to save stack space */
+ ioVec = pn - r;
+ inCount = r / inSize;
+ goto tailCall;
+ }
+ /* morkQuickSort(pn - r, r / inSize, inSize, inOrder, ioClosure);*/
+}
diff --git a/comm/mailnews/db/mork/morkQuickSort.h b/comm/mailnews/db/mork/morkQuickSort.h
new file mode 100644
index 0000000000..98561d5758
--- /dev/null
+++ b/comm/mailnews/db/mork/morkQuickSort.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKQUICKSORT_
+#define _MORKQUICKSORT_ 1
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+extern void morkQuickSort(mork_u1* ioVec, mork_u4 inCount, mork_u4 inSize,
+ mdbAny_Order inOrder, void* ioClosure);
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKQUICKSORT_ */
diff --git a/comm/mailnews/db/mork/morkRow.cpp b/comm/mailnews/db/mork/morkRow.cpp
new file mode 100644
index 0000000000..2af5e9adcd
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRow.cpp
@@ -0,0 +1,769 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+#ifndef _MORKCELLOBJECT_
+# include "morkCellObject.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKROWCELLCURSOR_
+# include "morkRowCellCursor.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// notifications regarding row changes:
+
+void morkRow::NoteRowAddCol(morkEnv* ev, mork_column inColumn) {
+ if (!this->IsRowRewrite()) {
+ mork_delta newDelta;
+ morkDelta_Init(newDelta, inColumn, morkChange_kAdd);
+
+ if (newDelta != mRow_Delta) // not repeating existing data?
+ {
+ if (this->HasRowDelta()) // already have one change recorded?
+ this->SetRowRewrite(); // just plan to write all row cells
+ else
+ this->SetRowDelta(inColumn, morkChange_kAdd);
+ }
+ } else
+ this->ClearRowDelta();
+}
+
+void morkRow::NoteRowCutCol(morkEnv* ev, mork_column inColumn) {
+ if (!this->IsRowRewrite()) {
+ mork_delta newDelta;
+ morkDelta_Init(newDelta, inColumn, morkChange_kCut);
+
+ if (newDelta != mRow_Delta) // not repeating existing data?
+ {
+ if (this->HasRowDelta()) // already have one change recorded?
+ this->SetRowRewrite(); // just plan to write all row cells
+ else
+ this->SetRowDelta(inColumn, morkChange_kCut);
+ }
+ } else
+ this->ClearRowDelta();
+}
+
+void morkRow::NoteRowSetCol(morkEnv* ev, mork_column inColumn) {
+ if (!this->IsRowRewrite()) {
+ if (this->HasRowDelta()) // already have one change recorded?
+ this->SetRowRewrite(); // just plan to write all row cells
+ else
+ this->SetRowDelta(inColumn, morkChange_kSet);
+ } else
+ this->ClearRowDelta();
+}
+
+void morkRow::NoteRowSetAll(morkEnv* ev) {
+ this->SetRowRewrite(); // just plan to write all row cells
+ this->ClearRowDelta();
+}
+
+mork_u2 morkRow::AddRowGcUse(morkEnv* ev) {
+ if (this->IsRow()) {
+ if (mRow_GcUses < morkRow_kMaxGcUses) // not already maxed out?
+ ++mRow_GcUses;
+ } else
+ this->NonRowTypeError(ev);
+
+ return mRow_GcUses;
+}
+
+mork_u2 morkRow::CutRowGcUse(morkEnv* ev) {
+ if (this->IsRow()) {
+ if (mRow_GcUses) // any outstanding uses to cut?
+ {
+ if (mRow_GcUses < morkRow_kMaxGcUses) // not frozen at max?
+ --mRow_GcUses;
+ } else
+ this->GcUsesUnderflowWarning(ev);
+ } else
+ this->NonRowTypeError(ev);
+
+ return mRow_GcUses;
+}
+
+/*static*/ void morkRow::GcUsesUnderflowWarning(morkEnv* ev) {
+ ev->NewWarning("mRow_GcUses underflow");
+}
+
+/*static*/ void morkRow::NonRowTypeError(morkEnv* ev) {
+ ev->NewError("non morkRow");
+}
+
+/*static*/ void morkRow::NonRowTypeWarning(morkEnv* ev) {
+ ev->NewWarning("non morkRow");
+}
+
+/*static*/ void morkRow::LengthBeyondMaxError(morkEnv* ev) {
+ ev->NewError("mRow_Length over max");
+}
+
+/*static*/ void morkRow::ZeroColumnError(morkEnv* ev) {
+ ev->NewError(" zero mork_column");
+}
+
+/*static*/ void morkRow::NilCellsError(morkEnv* ev) {
+ ev->NewError("nil mRow_Cells");
+}
+
+void morkRow::InitRow(morkEnv* ev, const mdbOid* inOid, morkRowSpace* ioSpace,
+ mork_size inLength, morkPool* ioPool)
+// if inLength is nonzero, cells will be allocated from ioPool
+{
+ if (ioSpace && ioPool && inOid) {
+ if (inLength <= morkRow_kMaxLength) {
+ if (inOid->mOid_Id != morkRow_kMinusOneRid) {
+ mRow_Space = ioSpace;
+ mRow_Object = 0;
+ mRow_Cells = 0;
+ mRow_Oid = *inOid;
+
+ mRow_Length = (mork_u2)inLength;
+ mRow_Seed = (mork_u2)(mork_ip)this; // "random" assignment
+
+ mRow_GcUses = 0;
+ mRow_Pad = 0;
+ mRow_Flags = 0;
+ mRow_Tag = morkRow_kTag;
+
+ morkZone* zone = &ioSpace->mSpace_Store->mStore_Zone;
+
+ if (inLength) mRow_Cells = ioPool->NewCells(ev, inLength, zone);
+
+ if (this->MaybeDirtySpaceStoreAndRow()) // new row might dirty store
+ {
+ this->SetRowRewrite();
+ this->NoteRowSetAll(ev);
+ }
+ } else
+ ioSpace->MinusOneRidError(ev);
+ } else
+ this->LengthBeyondMaxError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+morkRowObject* morkRow::AcquireRowObject(morkEnv* ev, morkStore* ioStore) {
+ morkRowObject* ro = mRow_Object;
+ if (ro) // need new row object?
+ ro->AddRef();
+ else {
+ nsIMdbHeap* heap = ioStore->mPort_Heap;
+ ro = new (*heap, ev)
+ morkRowObject(ev, morkUsage::kHeap, heap, this, ioStore);
+ if (!ro) return (morkRowObject*)0;
+
+ morkRowObject::SlotWeakRowObject(ro, ev, &mRow_Object);
+ ro->AddRef();
+ }
+ return ro;
+}
+
+nsIMdbRow* morkRow::AcquireRowHandle(morkEnv* ev, morkStore* ioStore) {
+ return AcquireRowObject(ev, ioStore);
+}
+
+nsIMdbCell* morkRow::AcquireCellHandle(morkEnv* ev, morkCell* ioCell,
+ mdb_column inCol, mork_pos inPos) {
+ nsIMdbHeap* heap = ev->mEnv_Heap;
+ morkCellObject* cellObj = new (*heap, ev)
+ morkCellObject(ev, morkUsage::kHeap, heap, this, ioCell, inCol, inPos);
+ if (cellObj) {
+ nsIMdbCell* cellHandle = cellObj->AcquireCellHandle(ev);
+ // cellObj->CutStrongRef(ev->AsMdbEnv());
+ return cellHandle;
+ }
+ return (nsIMdbCell*)0;
+}
+
+mork_count morkRow::CountOverlap(morkEnv* ev, morkCell* ioVector,
+ mork_fill inFill)
+// Count cells in ioVector that change existing cells in this row when
+// ioVector is added to the row (as in TakeCells()). This is the set
+// of cells with the same columns in ioVector and mRow_Cells, which do
+// not have exactly the same value in mCell_Atom, and which do not both
+// have change status equal to morkChange_kCut (because cutting a cut
+// cell still yields a cell that has been cut). CountOverlap() also
+// modifies the change attribute of any cell in ioVector to kDup when
+// the change was previously kCut and the same column cell was found
+// in this row with change also equal to kCut; this tells callers later
+// they need not look for that cell in the row again on a second pass.
+{
+ mork_count outCount = 0;
+ mork_pos pos = 0; // needed by GetCell()
+ morkCell* cells = ioVector;
+ morkCell* end = cells + inFill;
+ --cells; // prepare for preincrement
+ while (++cells < end && ev->Good()) {
+ mork_column col = cells->GetColumn();
+
+ morkCell* old = this->GetCell(ev, col, &pos);
+ if (old) // same column?
+ {
+ mork_change newChg = cells->GetChange();
+ mork_change oldChg = old->GetChange();
+ if (newChg != morkChange_kCut || oldChg != newChg) // not cut+cut?
+ {
+ if (cells->mCell_Atom != old->mCell_Atom) // not same atom?
+ ++outCount; // cells will replace old significantly when added
+ } else
+ cells->SetColumnAndChange(col, morkChange_kDup); // note dup status
+ }
+ }
+ return outCount;
+}
+
+void morkRow::MergeCells(morkEnv* ev, morkCell* ioVector, mork_fill inVecLength,
+ mork_fill inOldRowFill, mork_fill inOverlap)
+// MergeCells() is the part of TakeCells() that does the insertion.
+// inOldRowFill is the old value of mRow_Length, and inOverlap is the
+// number of cells in the intersection that must be updated.
+{
+ morkCell* newCells = mRow_Cells + inOldRowFill; // 1st new cell in row
+ morkCell* newEnd = newCells + mRow_Length; // one past last cell
+
+ morkCell* srcCells = ioVector;
+ morkCell* srcEnd = srcCells + inVecLength;
+
+ --srcCells; // prepare for preincrement
+ while (++srcCells < srcEnd && ev->Good()) {
+ mork_change srcChg = srcCells->GetChange();
+ if (srcChg != morkChange_kDup) // anything to be done?
+ {
+ morkCell* dstCell = 0;
+ if (inOverlap) {
+ mork_pos pos = 0; // needed by GetCell()
+ dstCell = this->GetCell(ev, srcCells->GetColumn(), &pos);
+ }
+ if (dstCell) {
+ --inOverlap; // one fewer intersections to resolve
+ // swap the atoms in the cells to avoid ref counting here:
+ morkAtom* dstAtom = dstCell->mCell_Atom;
+ *dstCell = *srcCells; // bitwise copy, taking src atom
+ srcCells->mCell_Atom = dstAtom; // forget cell ref, if any
+ } else if (newCells < newEnd) // another new cell exists?
+ {
+ dstCell = newCells++; // alloc another new cell
+ // take atom from source cell, transferring ref to this row:
+ *dstCell = *srcCells; // bitwise copy, taking src atom
+ srcCells->mCell_Atom = 0; // forget cell ref, if any
+ } else // oops, we ran out...
+ ev->NewError("out of new cells");
+ }
+ }
+}
+
+void morkRow::TakeCells(morkEnv* ev, morkCell* ioVector, mork_fill inVecLength,
+ morkStore* ioStore) {
+ if (ioVector && inVecLength && ev->Good()) {
+ ++mRow_Seed; // intend to change structure of mRow_Cells
+ mork_size length = (mork_size)mRow_Length;
+
+ mork_count overlap = this->CountOverlap(ev, ioVector, inVecLength);
+
+ mork_size growth = inVecLength - overlap; // cells to add
+ mork_size newLength = length + growth;
+
+ if (growth && ev->Good()) // need to add any cells?
+ {
+ morkZone* zone = &ioStore->mStore_Zone;
+ morkPool* pool = ioStore->StorePool();
+ if (!pool->AddRowCells(ev, this, length + growth, zone))
+ ev->NewError("cannot take cells");
+ }
+ if (ev->Good()) {
+ if (mRow_Length >= newLength)
+ this->MergeCells(ev, ioVector, inVecLength, length, overlap);
+ else
+ ev->NewError("not enough new cells");
+ }
+ }
+}
+
+mork_bool morkRow::MaybeDirtySpaceStoreAndRow() {
+ morkRowSpace* rowSpace = mRow_Space;
+ if (rowSpace) {
+ morkStore* store = rowSpace->mSpace_Store;
+ if (store && store->mStore_CanDirty) {
+ store->SetStoreDirty();
+ rowSpace->mSpace_CanDirty = morkBool_kTrue;
+ }
+
+ if (rowSpace->mSpace_CanDirty) {
+ this->SetRowDirty();
+ rowSpace->SetRowSpaceDirty();
+ return morkBool_kTrue;
+ }
+ }
+ return morkBool_kFalse;
+}
+
+morkCell* morkRow::NewCell(morkEnv* ev, mdb_column inColumn, mork_pos* outPos,
+ morkStore* ioStore) {
+ ++mRow_Seed; // intend to change structure of mRow_Cells
+ mork_size length = (mork_size)mRow_Length;
+ *outPos = (mork_pos)length;
+ morkPool* pool = ioStore->StorePool();
+ morkZone* zone = &ioStore->mStore_Zone;
+
+ mork_bool canDirty = this->MaybeDirtySpaceStoreAndRow();
+
+ if (pool->AddRowCells(ev, this, length + 1, zone)) {
+ morkCell* cell = mRow_Cells + length;
+ // next line equivalent to inline morkCell::SetCellDirty():
+ if (canDirty)
+ cell->SetCellColumnDirty(inColumn);
+ else
+ cell->SetCellColumnClean(inColumn);
+
+ if (canDirty && !this->IsRowRewrite()) this->NoteRowAddCol(ev, inColumn);
+
+ return cell;
+ }
+
+ return (morkCell*)0;
+}
+
+void morkRow::SeekColumn(morkEnv* ev, mdb_pos inPos, mdb_column* outColumn,
+ mdbYarn* outYarn) {
+ morkCell* cells = mRow_Cells;
+ if (cells && inPos < mRow_Length && inPos >= 0) {
+ morkCell* c = cells + inPos;
+ if (outColumn) *outColumn = c->GetColumn();
+ if (outYarn) morkAtom::GetYarn(c->mCell_Atom, outYarn);
+ } else {
+ if (outColumn) *outColumn = 0;
+ if (outYarn) morkAtom::GetYarn((morkAtom*)0, outYarn);
+ }
+}
+
+void morkRow::NextColumn(morkEnv* ev, mdb_column* ioColumn, mdbYarn* outYarn) {
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ mork_column last = 0;
+ mork_column inCol = *ioColumn;
+ morkCell* end = cells + mRow_Length;
+ while (cells < end) {
+ if (inCol == last) // found column?
+ {
+ if (outYarn) morkAtom::GetYarn(cells->mCell_Atom, outYarn);
+ *ioColumn = cells->GetColumn();
+ return; // stop, we are done
+ } else {
+ last = cells->GetColumn();
+ ++cells;
+ }
+ }
+ }
+ *ioColumn = 0;
+ if (outYarn) morkAtom::GetYarn((morkAtom*)0, outYarn);
+}
+
+morkCell* morkRow::CellAt(morkEnv* ev, mork_pos inPos) const {
+ MORK_USED_1(ev);
+ morkCell* cells = mRow_Cells;
+ if (cells && inPos < mRow_Length && inPos >= 0) {
+ return cells + inPos;
+ }
+ return (morkCell*)0;
+}
+
+morkCell* morkRow::GetCell(morkEnv* ev, mdb_column inColumn,
+ mork_pos* outPos) const {
+ MORK_USED_1(ev);
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + mRow_Length;
+ while (cells < end) {
+ mork_column col = cells->GetColumn();
+ if (col == inColumn) // found the desired column?
+ {
+ *outPos = cells - mRow_Cells;
+ return cells;
+ } else
+ ++cells;
+ }
+ }
+ *outPos = -1;
+ return (morkCell*)0;
+}
+
+mork_aid morkRow::GetCellAtomAid(morkEnv* ev, mdb_column inColumn) const
+// GetCellAtomAid() finds the cell with column inColumn, and sees if the
+// atom has a token ID, and returns the atom's ID if there is one. Or
+// else zero is returned if there is no such column, or no atom, or if
+// the atom has no ID to return. This method is intended to support
+// efficient updating of column indexes for rows in a row space.
+{
+ if (this->IsRow()) {
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + mRow_Length;
+ while (cells < end) {
+ mork_column col = cells->GetColumn();
+ if (col == inColumn) // found desired column?
+ {
+ morkAtom* atom = cells->mCell_Atom;
+ if (atom && atom->IsBook())
+ return ((morkBookAtom*)atom)->mBookAtom_Id;
+ else
+ return 0;
+ } else
+ ++cells;
+ }
+ }
+ } else
+ this->NonRowTypeError(ev);
+
+ return 0;
+}
+
+void morkRow::EmptyAllCells(morkEnv* ev) {
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ morkStore* store = this->GetRowSpaceStore(ev);
+ if (store) {
+ if (this->MaybeDirtySpaceStoreAndRow()) {
+ this->SetRowRewrite();
+ this->NoteRowSetAll(ev);
+ }
+ morkPool* pool = store->StorePool();
+ morkCell* end = cells + mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end) {
+ if (cells->mCell_Atom) cells->SetAtom(ev, (morkAtom*)0, pool);
+ }
+ }
+ }
+}
+
+void morkRow::cut_all_index_entries(morkEnv* ev) {
+ morkRowSpace* rowSpace = mRow_Space;
+ if (rowSpace->mRowSpace_IndexCount) // any indexes?
+ {
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end) {
+ morkAtom* atom = cells->mCell_Atom;
+ if (atom) {
+ mork_aid atomAid = atom->GetBookAtomAid();
+ if (atomAid) {
+ mork_column col = cells->GetColumn();
+ morkAtomRowMap* map = rowSpace->FindMap(ev, col);
+ if (map) // cut row from index for this column?
+ map->CutAid(ev, atomAid);
+ }
+ }
+ }
+ }
+ }
+}
+
+void morkRow::CutAllColumns(morkEnv* ev) {
+ morkStore* store = this->GetRowSpaceStore(ev);
+ if (store) {
+ if (this->MaybeDirtySpaceStoreAndRow()) {
+ this->SetRowRewrite();
+ this->NoteRowSetAll(ev);
+ }
+ morkRowSpace* rowSpace = mRow_Space;
+ if (rowSpace->mRowSpace_IndexCount) // any indexes?
+ this->cut_all_index_entries(ev);
+
+ morkPool* pool = store->StorePool();
+ pool->CutRowCells(ev, this, /*newSize*/ 0, &store->mStore_Zone);
+ }
+}
+
+void morkRow::SetRow(morkEnv* ev, const morkRow* inSourceRow) {
+ // note inSourceRow might be in another DB, with a different store...
+ morkStore* store = this->GetRowSpaceStore(ev);
+ morkStore* srcStore = inSourceRow->GetRowSpaceStore(ev);
+ if (store && srcStore) {
+ if (this->MaybeDirtySpaceStoreAndRow()) {
+ this->SetRowRewrite();
+ this->NoteRowSetAll(ev);
+ }
+ morkRowSpace* rowSpace = mRow_Space;
+ mork_count indexes = rowSpace->mRowSpace_IndexCount; // any indexes?
+
+ mork_bool sameStore = (store == srcStore); // identical stores?
+ morkPool* pool = store->StorePool();
+ if (pool->CutRowCells(ev, this, /*newSize*/ 0, &store->mStore_Zone)) {
+ mork_fill fill = inSourceRow->mRow_Length;
+ if (pool->AddRowCells(ev, this, fill, &store->mStore_Zone)) {
+ morkCell* dst = mRow_Cells;
+ morkCell* dstEnd = dst + mRow_Length;
+
+ const morkCell* src = inSourceRow->mRow_Cells;
+ const morkCell* srcEnd = src + fill;
+ --dst;
+ --src; // prepare both for preincrement:
+
+ while (++dst < dstEnd && ++src < srcEnd && ev->Good()) {
+ morkAtom* atom = src->mCell_Atom;
+ mork_column dstCol = src->GetColumn();
+ // Note we modify the mCell_Atom slot directly instead of using
+ // morkCell::SetAtom(), because we know it starts equal to nil.
+
+ if (sameStore) // source and dest in same store?
+ {
+ // next line equivalent to inline morkCell::SetCellDirty():
+ dst->SetCellColumnDirty(dstCol);
+ dst->mCell_Atom = atom;
+ if (atom) // another ref to non-nil atom?
+ atom->AddCellUse(ev);
+ } else // need to dup items from src store in a dest store
+ {
+ dstCol = store->CopyToken(ev, dstCol, srcStore);
+ if (dstCol) {
+ // next line equivalent to inline morkCell::SetCellDirty():
+ dst->SetCellColumnDirty(dstCol);
+ atom = store->CopyAtom(ev, atom);
+ dst->mCell_Atom = atom;
+ if (atom) // another ref?
+ atom->AddCellUse(ev);
+ }
+ }
+ if (indexes && atom) {
+ mork_aid atomAid = atom->GetBookAtomAid();
+ if (atomAid) {
+ morkAtomRowMap* map = rowSpace->FindMap(ev, dstCol);
+ if (map) map->AddAid(ev, atomAid, this);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void morkRow::AddRow(morkEnv* ev, const morkRow* inSourceRow) {
+ if (mRow_Length) // any existing cells we might need to keep?
+ {
+ ev->StubMethodOnlyError();
+ } else
+ this->SetRow(ev, inSourceRow); // just exactly duplicate inSourceRow
+}
+
+void morkRow::OnZeroRowGcUse(morkEnv* ev)
+// OnZeroRowGcUse() is called when CutRowGcUse() returns zero.
+{
+ MORK_USED_1(ev);
+ // ev->NewWarning("need to implement OnZeroRowGcUse");
+}
+
+void morkRow::DirtyAllRowContent(morkEnv* ev) {
+ MORK_USED_1(ev);
+
+ if (this->MaybeDirtySpaceStoreAndRow()) {
+ this->SetRowRewrite();
+ this->NoteRowSetAll(ev);
+ }
+ morkCell* cells = mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end) {
+ cells->SetCellDirty();
+ }
+ }
+}
+
+morkStore* morkRow::GetRowSpaceStore(morkEnv* ev) const {
+ morkRowSpace* rowSpace = mRow_Space;
+ if (rowSpace) {
+ morkStore* store = rowSpace->mSpace_Store;
+ if (store) {
+ if (store->IsStore()) {
+ return store;
+ } else
+ store->NonStoreTypeError(ev);
+ } else
+ ev->NilPointerError();
+ } else
+ ev->NilPointerError();
+
+ return (morkStore*)0;
+}
+
+void morkRow::CutColumn(morkEnv* ev, mdb_column inColumn) {
+ mork_pos pos = -1;
+ morkCell* cell = this->GetCell(ev, inColumn, &pos);
+ if (cell) {
+ morkStore* store = this->GetRowSpaceStore(ev);
+ if (store) {
+ if (this->MaybeDirtySpaceStoreAndRow() && !this->IsRowRewrite())
+ this->NoteRowCutCol(ev, inColumn);
+
+ morkRowSpace* rowSpace = mRow_Space;
+ morkAtomRowMap* map = (rowSpace->mRowSpace_IndexCount)
+ ? rowSpace->FindMap(ev, inColumn)
+ : (morkAtomRowMap*)0;
+ if (map) // this row attribute is indexed by row space?
+ {
+ morkAtom* oldAtom = cell->mCell_Atom;
+ if (oldAtom) // need to cut an entry from the index?
+ {
+ mork_aid oldAid = oldAtom->GetBookAtomAid();
+ if (oldAid) // cut old row attribute from row index in space?
+ map->CutAid(ev, oldAid);
+ }
+ }
+
+ morkPool* pool = store->StorePool();
+ cell->SetAtom(ev, (morkAtom*)0, pool);
+
+ mork_fill fill = mRow_Length; // should not be zero
+ MORK_ASSERT(fill);
+ if (fill) // index < fill for last cell exists?
+ {
+ mork_fill last = fill - 1; // index of last cell in row
+
+ if (pos < (mork_pos)last) // need to move cells following cut cell?
+ {
+ morkCell* lastCell = mRow_Cells + last;
+ mork_count after = last - pos; // cell count after cut cell
+ morkCell* next = cell + 1; // next cell after cut cell
+ MORK_MEMMOVE(cell, next, after * sizeof(morkCell));
+ lastCell->SetColumnAndChange(0, 0);
+ lastCell->mCell_Atom = 0;
+ }
+
+ if (ev->Good())
+ pool->CutRowCells(ev, this, fill - 1, &store->mStore_Zone);
+ }
+ }
+ }
+}
+
+morkAtom* morkRow::GetColumnAtom(morkEnv* ev, mdb_column inColumn) {
+ if (ev->Good()) {
+ mork_pos pos = -1;
+ morkCell* cell = this->GetCell(ev, inColumn, &pos);
+ if (cell) return cell->mCell_Atom;
+ }
+ return (morkAtom*)0;
+}
+
+void morkRow::AddColumn(morkEnv* ev, mdb_column inColumn, const mdbYarn* inYarn,
+ morkStore* ioStore) {
+ if (ev->Good()) {
+ mork_pos pos = -1;
+ morkCell* cell = this->GetCell(ev, inColumn, &pos);
+ morkCell* oldCell = cell; // need to know later whether new
+ if (!cell) // column does not yet exist?
+ cell = this->NewCell(ev, inColumn, &pos, ioStore);
+
+ if (cell) {
+ morkAtom* oldAtom = cell->mCell_Atom;
+
+ morkAtom* atom = ioStore->YarnToAtom(ev, inYarn, true /* create */);
+ if (atom && atom != oldAtom) {
+ morkRowSpace* rowSpace = mRow_Space;
+ morkAtomRowMap* map = (rowSpace->mRowSpace_IndexCount)
+ ? rowSpace->FindMap(ev, inColumn)
+ : (morkAtomRowMap*)0;
+
+ if (map) // inColumn is indexed by row space?
+ {
+ if (oldAtom && oldAtom != atom) // cut old cell from index?
+ {
+ mork_aid oldAid = oldAtom->GetBookAtomAid();
+ if (oldAid) // cut old row attribute from row index in space?
+ map->CutAid(ev, oldAid);
+ }
+ }
+
+ cell->SetAtom(ev, atom, ioStore->StorePool()); // refcounts atom
+
+ if (oldCell) // we changed a pre-existing cell in the row?
+ {
+ ++mRow_Seed;
+ if (this->MaybeDirtySpaceStoreAndRow() && !this->IsRowRewrite())
+ this->NoteRowAddCol(ev, inColumn);
+ }
+
+ if (map) // inColumn is indexed by row space?
+ {
+ mork_aid newAid = atom->GetBookAtomAid();
+ if (newAid) // add new row attribute to row index in space?
+ map->AddAid(ev, newAid, this);
+ }
+ }
+ }
+ }
+}
+
+morkRowCellCursor* morkRow::NewRowCellCursor(morkEnv* ev, mdb_pos inPos) {
+ morkRowCellCursor* outCursor = 0;
+ if (ev->Good()) {
+ morkStore* store = this->GetRowSpaceStore(ev);
+ if (store) {
+ morkRowObject* rowObj = this->AcquireRowObject(ev, store);
+ if (rowObj) {
+ nsIMdbHeap* heap = store->mPort_Heap;
+ morkRowCellCursor* cursor = new (*heap, ev)
+ morkRowCellCursor(ev, morkUsage::kHeap, heap, rowObj);
+
+ if (cursor) {
+ if (ev->Good()) {
+ cursor->mRowCellCursor_Col = inPos;
+ outCursor = cursor;
+ } else
+ cursor->CutStrongRef(ev->mEnv_SelfAsMdbEnv);
+ }
+ rowObj->Release(); // always cut ref (cursor has its own)
+ }
+ }
+ }
+ return outCursor;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkRow.h b/comm/mailnews/db/mork/morkRow.h
new file mode 100644
index 0000000000..e8a8c728ac
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRow.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKROW_
+#define _MORKROW_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class nsIMdbRow;
+class nsIMdbCell;
+#define morkDerived_kRow /*i*/ 0x5277 /* ascii 'Rw' */
+
+#define morkRow_kMaxGcUses 0x0FF /* max for 8-bit unsigned int */
+#define morkRow_kMaxLength 0x0FFFF /* max for 16-bit unsigned int */
+#define morkRow_kMinusOneRid ((mork_rid)-1)
+
+#define morkRow_kTag 'r' /* magic signature for mRow_Tag */
+
+#define morkRow_kNotedBit ((mork_u1)(1 << 0)) /* space has change notes */
+#define morkRow_kRewriteBit ((mork_u1)(1 << 1)) /* must rewrite all cells */
+#define morkRow_kDirtyBit ((mork_u1)(1 << 2)) /* row has been changed */
+
+class morkRow { // row of cells
+
+ public: // state is public because the entire Mork system is private
+ morkRowSpace* mRow_Space; // mRow_Space->SpaceScope() is the row scope
+ morkRowObject* mRow_Object; // refcount & other state for object sharing
+ morkCell* mRow_Cells;
+ mdbOid mRow_Oid;
+
+ mork_delta mRow_Delta; // space to note a single column change
+
+ mork_u2 mRow_Length; // physical count of cells in mRow_Cells
+ mork_u2 mRow_Seed; // count changes in mRow_Cells structure
+
+ mork_u1 mRow_GcUses; // persistent references from tables
+ mork_u1 mRow_Pad; // for u1 alignment
+ mork_u1 mRow_Flags; // one-byte flags slot
+ mork_u1 mRow_Tag; // one-byte tag (need u4 alignment pad)
+
+ public: // interpreting mRow_Delta
+ mork_bool HasRowDelta() const { return (mRow_Delta != 0); }
+
+ void ClearRowDelta() { mRow_Delta = 0; }
+
+ void SetRowDelta(mork_column inCol, mork_change inChange) {
+ morkDelta_Init(mRow_Delta, inCol, inChange);
+ }
+
+ mork_column GetDeltaColumn() const { return morkDelta_Column(mRow_Delta); }
+ mork_change GetDeltaChange() const { return morkDelta_Change(mRow_Delta); }
+
+ public: // noting row changes
+ void NoteRowSetAll(morkEnv* ev);
+ void NoteRowSetCol(morkEnv* ev, mork_column inCol);
+ void NoteRowAddCol(morkEnv* ev, mork_column inCol);
+ void NoteRowCutCol(morkEnv* ev, mork_column inCol);
+
+ public: // flags bit twiddling
+ void SetRowNoted() { mRow_Flags |= morkRow_kNotedBit; }
+ void SetRowRewrite() { mRow_Flags |= morkRow_kRewriteBit; }
+ void SetRowDirty() { mRow_Flags |= morkRow_kDirtyBit; }
+
+ void ClearRowNoted() { mRow_Flags &= (mork_u1)~morkRow_kNotedBit; }
+ void ClearRowRewrite() { mRow_Flags &= (mork_u1)~morkRow_kRewriteBit; }
+ void SetRowClean() {
+ mRow_Flags = 0;
+ mRow_Delta = 0;
+ }
+
+ mork_bool IsRowNoted() const { return (mRow_Flags & morkRow_kNotedBit) != 0; }
+
+ mork_bool IsRowRewrite() const {
+ return (mRow_Flags & morkRow_kRewriteBit) != 0;
+ }
+
+ mork_bool IsRowClean() const { return (mRow_Flags & morkRow_kDirtyBit) == 0; }
+
+ mork_bool IsRowDirty() const { return (mRow_Flags & morkRow_kDirtyBit) != 0; }
+
+ mork_bool IsRowUsed() const { return mRow_GcUses != 0; }
+
+ public: // other row methods
+ morkRow() {}
+ explicit morkRow(const mdbOid* inOid) : mRow_Oid(*inOid) {}
+ void InitRow(morkEnv* ev, const mdbOid* inOid, morkRowSpace* ioSpace,
+ mork_size inLength, morkPool* ioPool);
+ // if inLength is nonzero, cells will be allocated from ioPool
+
+ morkRowObject* AcquireRowObject(morkEnv* ev, morkStore* ioStore);
+ nsIMdbRow* AcquireRowHandle(morkEnv* ev, morkStore* ioStore);
+ nsIMdbCell* AcquireCellHandle(morkEnv* ev, morkCell* ioCell,
+ mdb_column inColumn, mork_pos inPos);
+
+ mork_u2 AddRowGcUse(morkEnv* ev);
+ mork_u2 CutRowGcUse(morkEnv* ev);
+
+ mork_bool MaybeDirtySpaceStoreAndRow();
+
+ public: // internal row methods
+ void cut_all_index_entries(morkEnv* ev);
+
+ // void cut_cell_from_space_index(morkEnv* ev, morkCell* ioCell);
+
+ mork_count CountOverlap(morkEnv* ev, morkCell* ioVector, mork_fill inFill);
+ // Count cells in ioVector that change existing cells in this row when
+ // ioVector is added to the row (as in TakeCells()). This is the set
+ // of cells with the same columns in ioVector and mRow_Cells, which do
+ // not have exactly the same value in mCell_Atom, and which do not both
+ // have change status equal to morkChange_kCut (because cutting a cut
+ // cell still yields a cell that has been cut). CountOverlap() also
+ // modifies the change attribute of any cell in ioVector to kDup when
+ // the change was previously kCut and the same column cell was found
+ // in this row with change also equal to kCut; this tells callers later
+ // they need not look for that cell in the row again on a second pass.
+
+ void MergeCells(morkEnv* ev, morkCell* ioVector, mork_fill inVecLength,
+ mork_fill inOldRowFill, mork_fill inOverlap);
+ // MergeCells() is the part of TakeCells() that does the insertion.
+ // inOldRowFill is the old value of mRow_Length, and inOverlap is the
+ // number of cells in the intersection that must be updated.
+
+ void TakeCells(morkEnv* ev, morkCell* ioVector, mork_fill inVecLength,
+ morkStore* ioStore);
+
+ morkCell* NewCell(morkEnv* ev, mdb_column inColumn, mork_pos* outPos,
+ morkStore* ioStore);
+ morkCell* GetCell(morkEnv* ev, mdb_column inColumn, mork_pos* outPos) const;
+ morkCell* CellAt(morkEnv* ev, mork_pos inPos) const;
+
+ mork_aid GetCellAtomAid(morkEnv* ev, mdb_column inColumn) const;
+ // GetCellAtomAid() finds the cell with column inColumn, and sees if the
+ // atom has a token ID, and returns the atom's ID if there is one. Or
+ // else zero is returned if there is no such column, or no atom, or if
+ // the atom has no ID to return. This method is intended to support
+ // efficient updating of column indexes for rows in a row space.
+
+ public: // external row methods
+ void DirtyAllRowContent(morkEnv* ev);
+
+ morkStore* GetRowSpaceStore(morkEnv* ev) const;
+
+ void AddColumn(morkEnv* ev, mdb_column inColumn, const mdbYarn* inYarn,
+ morkStore* ioStore);
+
+ morkAtom* GetColumnAtom(morkEnv* ev, mdb_column inColumn);
+
+ void NextColumn(morkEnv* ev, mdb_column* ioColumn, mdbYarn* outYarn);
+
+ void SeekColumn(morkEnv* ev, mdb_pos inPos, mdb_column* outColumn,
+ mdbYarn* outYarn);
+
+ void CutColumn(morkEnv* ev, mdb_column inColumn);
+
+ morkRowCellCursor* NewRowCellCursor(morkEnv* ev, mdb_pos inPos);
+
+ void EmptyAllCells(morkEnv* ev);
+ void AddRow(morkEnv* ev, const morkRow* inSourceRow);
+ void SetRow(morkEnv* ev, const morkRow* inSourceRow);
+ void CutAllColumns(morkEnv* ev);
+
+ void OnZeroRowGcUse(morkEnv* ev);
+ // OnZeroRowGcUse() is called when CutRowGcUse() returns zero.
+
+ public: // dynamic typing
+ mork_bool IsRow() const { return mRow_Tag == morkRow_kTag; }
+
+ public: // hash and equal
+ mork_u4 HashRow() const {
+ return (mRow_Oid.mOid_Scope << 16) ^ mRow_Oid.mOid_Id;
+ }
+
+ mork_bool EqualRow(const morkRow* ioRow) const {
+ return ((mRow_Oid.mOid_Scope == ioRow->mRow_Oid.mOid_Scope) &&
+ (mRow_Oid.mOid_Id == ioRow->mRow_Oid.mOid_Id));
+ }
+
+ mork_bool EqualOid(const mdbOid* ioOid) const {
+ return ((mRow_Oid.mOid_Scope == ioOid->mOid_Scope) &&
+ (mRow_Oid.mOid_Id == ioOid->mOid_Id));
+ }
+
+ public: // errors
+ static void ZeroColumnError(morkEnv* ev);
+ static void LengthBeyondMaxError(morkEnv* ev);
+ static void NilCellsError(morkEnv* ev);
+ static void NonRowTypeError(morkEnv* ev);
+ static void NonRowTypeWarning(morkEnv* ev);
+ static void GcUsesUnderflowWarning(morkEnv* ev);
+
+ private: // copying is not allowed
+ morkRow(const morkRow& other);
+ morkRow& operator=(const morkRow& other);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKROW_ */
diff --git a/comm/mailnews/db/mork/morkRowCellCursor.cpp b/comm/mailnews/db/mork/morkRowCellCursor.cpp
new file mode 100644
index 0000000000..edd6ebfd19
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowCellCursor.cpp
@@ -0,0 +1,220 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKROWCELLCURSOR_
+# include "morkRowCellCursor.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkRowCellCursor::CloseMorkNode(
+ morkEnv* ev) // CloseRowCellCursor() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseRowCellCursor(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkRowCellCursor::~morkRowCellCursor() // CloseRowCellCursor() executed
+ // earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkRowCellCursor::morkRowCellCursor(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap,
+ morkRowObject* ioRowObject)
+ : morkCursor(ev, inUsage, ioHeap),
+ mRowCellCursor_RowObject(0),
+ mRowCellCursor_Col(0) {
+ if (ev->Good()) {
+ if (ioRowObject) {
+ morkRow* row = ioRowObject->mRowObject_Row;
+ if (row) {
+ if (row->IsRow()) {
+ mCursor_Pos = -1;
+ mCursor_Seed = row->mRow_Seed;
+
+ morkRowObject::SlotStrongRowObject(ioRowObject, ev,
+ &mRowCellCursor_RowObject);
+ if (ev->Good()) mNode_Derived = morkDerived_kRowCellCursor;
+ } else
+ row->NonRowTypeError(ev);
+ } else
+ ioRowObject->NilRowError(ev);
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkRowCellCursor, morkCursor, nsIMdbRowCellCursor)
+
+/*public non-poly*/ void morkRowCellCursor::CloseRowCellCursor(morkEnv* ev) {
+ if (this->IsNode()) {
+ mCursor_Pos = -1;
+ mCursor_Seed = 0;
+ morkRowObject::SlotStrongRowObject((morkRowObject*)0, ev,
+ &mRowCellCursor_RowObject);
+ this->CloseCursor(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkRowCellCursor::NilRowObjectError(morkEnv* ev) {
+ ev->NewError("nil mRowCellCursor_RowObject");
+}
+
+/*static*/ void morkRowCellCursor::NonRowCellCursorTypeError(morkEnv* ev) {
+ ev->NewError("non morkRowCellCursor");
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+// { ----- begin attribute methods -----
+NS_IMETHODIMP
+morkRowCellCursor::SetRow(nsIMdbEnv* mev, nsIMdbRow* ioRow) {
+ nsresult outErr = NS_OK;
+ morkRow* row = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ row = (morkRow*)ioRow;
+ morkStore* store = row->GetRowSpaceStore(ev);
+ if (store) {
+ morkRowObject* rowObj = row->AcquireRowObject(ev, store);
+ if (rowObj) {
+ morkRowObject::SlotStrongRowObject((morkRowObject*)0, ev,
+ &mRowCellCursor_RowObject);
+
+ mRowCellCursor_RowObject = rowObj; // take this strong ref
+ mCursor_Seed = row->mRow_Seed;
+
+ row->GetCell(ev, mRowCellCursor_Col, &mCursor_Pos);
+ }
+ }
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowCellCursor::GetRow(nsIMdbEnv* mev, nsIMdbRow** acqRow) {
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* rowObj = mRowCellCursor_RowObject;
+ if (rowObj) outRow = rowObj->AcquireRowHandle(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin cell seeking methods -----
+NS_IMETHODIMP
+morkRowCellCursor::SeekCell(
+ nsIMdbEnv* mev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ nsIMdbCell** acqCell) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end cell seeking methods -----
+
+// { ----- begin cell iteration methods -----
+NS_IMETHODIMP
+morkRowCellCursor::NextCell( // get next cell in the row
+ nsIMdbEnv* mev, // context
+ nsIMdbCell** acqCell, // changes to the next cell in the iteration
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ mdb_column col = 0;
+ mdb_pos pos = mRowCellCursor_Col;
+ if (pos < 0)
+ pos = 0;
+ else
+ ++pos;
+
+ morkCell* cell = mRowCellCursor_RowObject->mRowObject_Row->CellAt(ev, pos);
+ if (cell) {
+ col = cell->GetColumn();
+ *acqCell = mRowCellCursor_RowObject->mRowObject_Row->AcquireCellHandle(
+ ev, cell, col, pos);
+ } else {
+ *acqCell = nullptr;
+ pos = -1;
+ }
+ if (outPos) *outPos = pos;
+ if (outColumn) *outColumn = col;
+
+ mRowCellCursor_Col = pos;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkRowCellCursor::PickNextCell( // get next cell in row within filter set
+ nsIMdbEnv* mev, // context
+ nsIMdbCell* ioCell, // changes to the next cell in the iteration
+ const mdbColumnSet* inFilterSet, // col set of actual caller interest
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos)
+// Note that inFilterSet should not have too many (many more than 10?)
+// cols, since this might imply a potential excessive consumption of time
+// over many cursor calls when looking for column and filter intersection.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// } ----- end cell iteration methods -----
+
+// } ===== end nsIMdbRowCellCursor methods =====
diff --git a/comm/mailnews/db/mork/morkRowCellCursor.h b/comm/mailnews/db/mork/morkRowCellCursor.h
new file mode 100644
index 0000000000..91e032d2bc
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowCellCursor.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKROWCELLCURSOR_
+#define _MORKROWCELLCURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class orkinRowCellCursor;
+#define morkDerived_kRowCellCursor /*i*/ 0x6343 /* ascii 'cC' */
+
+class morkRowCellCursor : public morkCursor,
+ public nsIMdbRowCellCursor { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkFactory* mObject_Factory; // weak ref to suite factory
+
+ // mork_seed mCursor_Seed;
+ // mork_pos mCursor_Pos;
+ // mork_bool mCursor_DoFailOnSeedOutOfSync;
+ // mork_u1 mCursor_Pad[ 3 ]; // explicitly pad to u4 alignment
+
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+ morkRowObject* mRowCellCursor_RowObject; // strong ref to row
+ mork_column mRowCellCursor_Col; // col of cell last at mCursor_Pos
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseRowCellCursor()
+
+ public: // morkRowCellCursor construction & destruction
+ morkRowCellCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkRowObject* ioRowObject);
+ void CloseRowCellCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD SetRow(nsIMdbEnv* ev,
+ nsIMdbRow* ioRow) override; // sets pos to -1
+ NS_IMETHOD GetRow(nsIMdbEnv* ev, nsIMdbRow** acqRow) override;
+ // } ----- end attribute methods -----
+
+ // { ----- begin cell seeking methods -----
+ NS_IMETHOD SeekCell(nsIMdbEnv* ev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ nsIMdbCell** acqCell) override; // the cell at inPos
+ // } ----- end cell seeking methods -----
+
+ // { ----- begin cell iteration methods -----
+ NS_IMETHOD NextCell( // get next cell in the row
+ nsIMdbEnv* ev, // context
+ nsIMdbCell** acqCell, // changes to the next cell in the iteration
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos) override; // position of cell in row sequence
+
+ NS_IMETHOD PickNextCell( // get next cell in row within filter set
+ nsIMdbEnv* ev, // context
+ nsIMdbCell* ioCell, // changes to the next cell in the iteration
+ const mdbColumnSet* inFilterSet, // col set of actual caller interest
+ mdb_column* outColumn, // column for this particular cell
+ mdb_pos* outPos) override; // position of cell in row sequence
+
+ // Note that inFilterSet should not have too many (many more than 10?)
+ // cols, since this might imply a potential excessive consumption of time
+ // over many cursor calls when looking for column and filter intersection.
+ // } ----- end cell iteration methods -----
+
+ private: // copying is not allowed
+ morkRowCellCursor(const morkRowCellCursor& other);
+ morkRowCellCursor& operator=(const morkRowCellCursor& other);
+ virtual ~morkRowCellCursor(); // assert that close executed earlier
+
+ public: // dynamic type identification
+ mork_bool IsRowCellCursor() const {
+ return IsNode() && mNode_Derived == morkDerived_kRowCellCursor;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // errors
+ static void NilRowObjectError(morkEnv* ev);
+ static void NonRowCellCursorTypeError(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakRowCellCursor(morkRowCellCursor* me, morkEnv* ev,
+ morkRowCellCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongRowCellCursor(morkRowCellCursor* me, morkEnv* ev,
+ morkRowCellCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKROWCELLCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkRowMap.cpp b/comm/mailnews/db/mork/morkRowMap.cpp
new file mode 100644
index 0000000000..a1d415f8d2
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowMap.cpp
@@ -0,0 +1,250 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkRowMap::CloseMorkNode(
+ morkEnv* ev) // CloseRowMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseRowMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkRowMap::~morkRowMap() // assert CloseRowMap() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkRowMap::morkRowMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ mork_size inSlots)
+ : morkMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkRow*), /*inValSize*/ 0, inSlots,
+ ioSlotHeap, /*inHoldChanges*/ morkBool_kFalse) {
+ if (ev->Good()) mNode_Derived = morkDerived_kRowMap;
+}
+
+/*public non-poly*/ void morkRowMap::CloseRowMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CloseMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ===== begin morkMap poly interface =====
+/*virtual*/ mork_bool //
+morkRowMap::Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const {
+ MORK_USED_1(ev);
+ return (*(const morkRow**)inKeyA)->EqualRow(*(const morkRow**)inKeyB);
+}
+
+/*virtual*/ mork_u4 //
+morkRowMap::Hash(morkEnv* ev, const void* inKey) const {
+ MORK_USED_1(ev);
+ return (*(const morkRow**)inKey)->HashRow();
+}
+// } ===== end morkMap poly interface =====
+
+mork_bool morkRowMap::AddRow(morkEnv* ev, morkRow* ioRow) {
+ if (ev->Good()) {
+ this->Put(ev, &ioRow, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0, (mork_change**)0);
+ }
+ return ev->Good();
+}
+
+morkRow* morkRowMap::CutOid(morkEnv* ev, const mdbOid* inOid) {
+ morkRow row(inOid);
+ morkRow* key = &row;
+ morkRow* oldKey = 0;
+ this->Cut(ev, &key, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+
+ return oldKey;
+}
+
+morkRow* morkRowMap::CutRow(morkEnv* ev, const morkRow* ioRow) {
+ morkRow* oldKey = 0;
+ this->Cut(ev, &ioRow, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+
+ return oldKey;
+}
+
+morkRow* morkRowMap::GetOid(morkEnv* ev, const mdbOid* inOid) {
+ morkRow row(inOid);
+ morkRow* key = &row;
+ morkRow* oldKey = 0;
+ this->Get(ev, &key, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+
+ return oldKey;
+}
+
+morkRow* morkRowMap::GetRow(morkEnv* ev, const morkRow* ioRow) {
+ morkRow* oldKey = 0;
+ this->Get(ev, &ioRow, &oldKey, /*val*/ (void*)0, (mork_change**)0);
+
+ return oldKey;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkRowProbeMap::CloseMorkNode(
+ morkEnv* ev) // CloseRowProbeMap() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseRowProbeMap(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkRowProbeMap::~morkRowProbeMap() // assert CloseRowProbeMap() executed
+ // earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkRowProbeMap::morkRowProbeMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap,
+ mork_size inSlots)
+ : morkProbeMap(ev, inUsage, ioHeap,
+ /*inKeySize*/ sizeof(morkRow*), /*inValSize*/ 0, ioSlotHeap,
+ inSlots,
+ /*inHoldChanges*/ morkBool_kTrue) {
+ if (ev->Good()) mNode_Derived = morkDerived_kRowProbeMap;
+}
+
+/*public non-poly*/ void morkRowProbeMap::CloseRowProbeMap(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ this->CloseProbeMap(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*virtual*/ mork_test // hit(a,b) implies hash(a) == hash(b)
+morkRowProbeMap::MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const {
+ MORK_USED_1(ev);
+ const morkRow* key = *(const morkRow**)inMapKey;
+ if (key) {
+ mork_bool hit = key->EqualRow(*(const morkRow**)inAppKey);
+ return (hit) ? morkTest_kHit : morkTest_kMiss;
+ } else
+ return morkTest_kVoid;
+}
+
+/*virtual*/ mork_u4 // hit(a,b) implies hash(a) == hash(b)
+morkRowProbeMap::MapHash(morkEnv* ev, const void* inAppKey) const {
+ const morkRow* key = *(const morkRow**)inAppKey;
+ if (key)
+ return key->HashRow();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+
+/*virtual*/ mork_u4 morkRowProbeMap::ProbeMapHashMapKey(
+ morkEnv* ev, const void* inMapKey) const {
+ const morkRow* key = *(const morkRow**)inMapKey;
+ if (key)
+ return key->HashRow();
+ else {
+ ev->NilPointerWarning();
+ return 0;
+ }
+}
+
+mork_bool morkRowProbeMap::AddRow(morkEnv* ev, morkRow* ioRow) {
+ if (ev->Good()) {
+ this->MapAtPut(ev, &ioRow, /*val*/ (void*)0,
+ /*key*/ (void*)0, /*val*/ (void*)0);
+ }
+ return ev->Good();
+}
+
+morkRow* morkRowProbeMap::CutOid(morkEnv* ev, const mdbOid* inOid) {
+ MORK_USED_1(inOid);
+ morkProbeMap::ProbeMapCutError(ev);
+
+ return 0;
+}
+
+morkRow* morkRowProbeMap::CutRow(morkEnv* ev, const morkRow* ioRow) {
+ MORK_USED_1(ioRow);
+ morkProbeMap::ProbeMapCutError(ev);
+
+ return 0;
+}
+
+morkRow* morkRowProbeMap::GetOid(morkEnv* ev, const mdbOid* inOid) {
+ morkRow row(inOid);
+ morkRow* key = &row;
+ morkRow* oldKey = 0;
+ this->MapAt(ev, &key, &oldKey, /*val*/ (void*)0);
+
+ return oldKey;
+}
+
+morkRow* morkRowProbeMap::GetRow(morkEnv* ev, const morkRow* ioRow) {
+ morkRow* oldKey = 0;
+ this->MapAt(ev, &ioRow, &oldKey, /*val*/ (void*)0);
+
+ return oldKey;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkRowMap.h b/comm/mailnews/db/mork/morkRowMap.h
new file mode 100644
index 0000000000..d58515d4db
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowMap.h
@@ -0,0 +1,228 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKROWMAP_
+#define _MORKROWMAP_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKPROBEMAP_
+# include "morkProbeMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kRowMap /*i*/ 0x724D /* ascii 'rM' */
+
+/*| morkRowMap: maps a set of morkRow by contained Oid
+|*/
+class morkRowMap : public morkMap { // for mapping row IDs to rows
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseRowMap() only if open
+ virtual ~morkRowMap(); // assert that CloseRowMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkRowMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_size inSlots);
+ void CloseRowMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsRowMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kRowMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkMap poly interface =====
+ virtual mork_bool // note: equal(a,b) implies hash(a) == hash(b)
+ Equal(morkEnv* ev, const void* inKeyA, const void* inKeyB) const override;
+ // implemented using morkRow::EqualRow()
+
+ virtual mork_u4 // note: equal(a,b) implies hash(a) == hash(b)
+ Hash(morkEnv* ev, const void* inKey) const override;
+ // implemented using morkRow::HashRow()
+ // } ===== end morkMap poly interface =====
+
+ public: // other map methods
+ mork_bool AddRow(morkEnv* ev, morkRow* ioRow);
+ // AddRow() returns ev->Good()
+
+ morkRow* CutOid(morkEnv* ev, const mdbOid* inOid);
+ // CutRid() returns the row removed equal to inRid, if there was one
+
+ morkRow* CutRow(morkEnv* ev, const morkRow* ioRow);
+ // CutRow() returns the row removed equal to ioRow, if there was one
+
+ morkRow* GetOid(morkEnv* ev, const mdbOid* inOid);
+ // GetOid() returns the row equal to inRid, or else nil
+
+ morkRow* GetRow(morkEnv* ev, const morkRow* ioRow);
+ // GetRow() returns the row equal to ioRow, or else nil
+
+ // note the rows are owned elsewhere, usually by morkRowSpace
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakRowMap(morkRowMap* me, morkEnv* ev, morkRowMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongRowMap(morkRowMap* me, morkEnv* ev,
+ morkRowMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+class morkRowMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkRowMapIter(morkEnv* ev, morkRowMap* ioMap) : morkMapIter(ev, ioMap) {}
+
+ morkRowMapIter() : morkMapIter() {}
+ void InitRowMapIter(morkEnv* ev, morkRowMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->First(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* NextRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->Next(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* HereRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->Here(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* CutHereRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->CutHere(ev, outRowPtr, /*val*/ (void*)0);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kRowProbeMap /*i*/ 0x726D /* ascii 'rm' */
+
+/*| morkRowProbeMap: maps a set of morkRow by contained Oid
+|*/
+class morkRowProbeMap : public morkProbeMap { // for mapping row IDs to rows
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseRowProbeMap() only if open
+ virtual ~morkRowProbeMap(); // assert CloseRowProbeMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkRowProbeMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_size inSlots);
+ void CloseRowProbeMap(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsRowMap() const {
+ return IsNode() && mNode_Derived == morkDerived_kRowMap;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkProbeMap methods =====
+ virtual mork_test // hit(a,b) implies hash(a) == hash(b)
+ MapTest(morkEnv* ev, const void* inMapKey,
+ const void* inAppKey) const override;
+
+ virtual mork_u4 // hit(a,b) implies hash(a) == hash(b)
+ MapHash(morkEnv* ev, const void* inAppKey) const override;
+
+ virtual mork_u4 ProbeMapHashMapKey(morkEnv* ev,
+ const void* inMapKey) const override;
+
+ // virtual mork_bool ProbeMapIsKeyNil(morkEnv* ev, void* ioMapKey);
+
+ // virtual void ProbeMapClearKey(morkEnv* ev, // put 'nil' into all keys
+ // inside map
+ // void* ioMapKey, mork_count inKeyCount); // array of keys inside map
+
+ // virtual void ProbeMapPushIn(morkEnv* ev, // move (key,val) into the map
+ // const void* inAppKey, const void* inAppVal, // (key,val) outside map
+ // void* outMapKey, void* outMapVal); // (key,val) inside map
+
+ // virtual void ProbeMapPullOut(morkEnv* ev, // move (key,val) out from the
+ // map
+ // const void* inMapKey, const void* inMapVal, // (key,val) inside map
+ // void* outAppKey, void* outAppVal) const; // (key,val) outside map
+ // } ===== end morkProbeMap methods =====
+
+ public: // other map methods
+ mork_bool AddRow(morkEnv* ev, morkRow* ioRow);
+ // AddRow() returns ev->Good()
+
+ morkRow* CutOid(morkEnv* ev, const mdbOid* inOid);
+ // CutRid() returns the row removed equal to inRid, if there was one
+
+ morkRow* CutRow(morkEnv* ev, const morkRow* ioRow);
+ // CutRow() returns the row removed equal to ioRow, if there was one
+
+ morkRow* GetOid(morkEnv* ev, const mdbOid* inOid);
+ // GetOid() returns the row equal to inRid, or else nil
+
+ morkRow* GetRow(morkEnv* ev, const morkRow* ioRow);
+ // GetRow() returns the row equal to ioRow, or else nil
+
+ // note the rows are owned elsewhere, usually by morkRowSpace
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakRowProbeMap(morkRowProbeMap* me, morkEnv* ev,
+ morkRowProbeMap** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongRowProbeMap(morkRowProbeMap* me, morkEnv* ev,
+ morkRowProbeMap** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+class morkRowProbeMapIter : public morkProbeMapIter { // typesafe wrapper class
+
+ public:
+ morkRowProbeMapIter(morkEnv* ev, morkRowProbeMap* ioMap)
+ : morkProbeMapIter(ev, ioMap) {}
+
+ morkRowProbeMapIter() : morkProbeMapIter() {}
+ void InitRowMapIter(morkEnv* ev, morkRowProbeMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->First(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* NextRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->Next(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* HereRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->Here(ev, outRowPtr, /*val*/ (void*)0);
+ }
+
+ mork_change* CutHereRow(morkEnv* ev, morkRow** outRowPtr) {
+ return this->CutHere(ev, outRowPtr, /*val*/ (void*)0);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKROWMAP_ */
diff --git a/comm/mailnews/db/mork/morkRowObject.cpp b/comm/mailnews/db/mork/morkRowObject.cpp
new file mode 100644
index 0000000000..39844172b8
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowObject.cpp
@@ -0,0 +1,530 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKROWCELLCURSOR_
+# include "morkRowCellCursor.h"
+#endif
+
+#ifndef _MORKCELLOBJECT_
+# include "morkCellObject.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkRowObject::CloseMorkNode(
+ morkEnv* ev) // CloseRowObject() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseRowObject(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkRowObject::~morkRowObject() // assert CloseRowObject() executed earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkRowObject::morkRowObject(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkRow* ioRow,
+ morkStore* ioStore)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mRowObject_Row(0),
+ mRowObject_Store(0) {
+ if (ev->Good()) {
+ if (ioRow && ioStore) {
+ mRowObject_Row = ioRow;
+ mRowObject_Store =
+ ioStore; // morkRowObjects don't ref-cnt the owning store.
+
+ if (ev->Good()) mNode_Derived = morkDerived_kRowObject;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkRowObject, morkObject, nsIMdbRow)
+// { ===== begin nsIMdbCollection methods =====
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP
+morkRowObject::GetSeed(nsIMdbEnv* mev, mdb_seed* outSeed) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ *outSeed = (mdb_seed)mRowObject_Row->mRow_Seed;
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+NS_IMETHODIMP
+morkRowObject::GetCount(nsIMdbEnv* mev, mdb_count* outCount) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ *outCount = (mdb_count)mRowObject_Row->mRow_Length;
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::GetPort(nsIMdbEnv* mev, nsIMdbPort** acqPort) {
+ nsresult outErr = NS_OK;
+ nsIMdbPort* outPort = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowSpace* rowSpace = mRowObject_Row->mRow_Space;
+ if (rowSpace && rowSpace->mSpace_Store) {
+ morkStore* store = mRowObject_Row->GetRowSpaceStore(ev);
+ if (store) outPort = store->AcquireStoreHandle(ev);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqPort) *acqPort = outPort;
+
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin cursor methods -----
+NS_IMETHODIMP
+morkRowObject::GetCursor( // make a cursor starting iter at inMemberPos
+ nsIMdbEnv* mev, // context
+ mdb_pos inMemberPos, // zero-based ordinal pos of member in collection
+ nsIMdbCursor** acqCursor) {
+ return this->GetRowCellCursor(mev, inMemberPos,
+ (nsIMdbRowCellCursor**)acqCursor);
+}
+// } ----- end cursor methods -----
+
+// { ----- begin ID methods -----
+NS_IMETHODIMP
+morkRowObject::GetOid(nsIMdbEnv* mev, mdbOid* outOid) {
+ *outOid = mRowObject_Row->mRow_Oid;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ return (ev) ? ev->AsErr() : NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+morkRowObject::BecomeContent(nsIMdbEnv* mev, const mdbOid* inOid) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ // remember row->MaybeDirtySpaceStoreAndRow();
+}
+// } ----- end ID methods -----
+
+// { ----- begin activity dropping methods -----
+NS_IMETHODIMP
+morkRowObject::DropActivity( // tell collection usage no longer expected
+ nsIMdbEnv* mev) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end activity dropping methods -----
+
+// } ===== end nsIMdbCollection methods =====
+
+// { ===== begin nsIMdbRow methods =====
+
+// { ----- begin cursor methods -----
+NS_IMETHODIMP
+morkRowObject::GetRowCellCursor( // make a cursor starting iteration at
+ // inCellPos
+ nsIMdbEnv* mev, // context
+ mdb_pos inPos, // zero-based ordinal position of cell in row
+ nsIMdbRowCellCursor** acqCursor) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ nsIMdbRowCellCursor* outCursor = 0;
+ if (ev) {
+ morkRowCellCursor* cursor = mRowObject_Row->NewRowCellCursor(ev, inPos);
+ if (cursor) {
+ if (ev->Good()) {
+ cursor->mCursor_Seed = (mork_seed)inPos;
+ outCursor = cursor;
+ NS_ADDREF(cursor);
+ }
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqCursor) *acqCursor = outCursor;
+ return outErr;
+}
+// } ----- end cursor methods -----
+
+// { ----- begin column methods -----
+NS_IMETHODIMP
+morkRowObject::AddColumn( // make sure a particular column is inside row
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to add
+ const mdbYarn* inYarn) {
+ nsresult outErr = NS_ERROR_FAILURE;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store && mRowObject_Row)
+ mRowObject_Row->AddColumn(ev, inColumn, inYarn, mRowObject_Store);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::CutColumn( // make sure a column is absent from the row
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn) {
+ nsresult outErr = NS_ERROR_FAILURE;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mRowObject_Row->CutColumn(ev, inColumn);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::CutAllColumns( // remove all columns from the row
+ nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mRowObject_Row->CutAllColumns(ev);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end column methods -----
+
+// { ----- begin cell methods -----
+NS_IMETHODIMP
+morkRowObject::NewCell( // get cell for specified column, or add new one
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to add
+ nsIMdbCell** acqCell) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ GetCell(mev, inColumn, acqCell);
+ if (!*acqCell) {
+ if (mRowObject_Store) {
+ mdbYarn yarn; // to pass empty yarn into morkRowObject::AddColumn()
+ yarn.mYarn_Buf = 0;
+ yarn.mYarn_Fill = 0;
+ yarn.mYarn_Size = 0;
+ yarn.mYarn_More = 0;
+ yarn.mYarn_Form = 0;
+ yarn.mYarn_Grow = 0;
+ AddColumn(ev, inColumn, &yarn);
+ GetCell(mev, inColumn, acqCell);
+ }
+ }
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::AddCell( // copy a cell from another row to this row
+ nsIMdbEnv* mev, // context
+ const nsIMdbCell* inCell) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkCell* cell = 0;
+ morkCellObject* cellObj = (morkCellObject*)inCell;
+ if (cellObj->CanUseCell(mev, morkBool_kFalse, &outErr, &cell)) {
+ morkRow* cellRow = cellObj->mCellObject_Row;
+ if (cellRow) {
+ if (mRowObject_Row != cellRow) {
+ morkStore* store = mRowObject_Row->GetRowSpaceStore(ev);
+ morkStore* cellStore = cellRow->GetRowSpaceStore(ev);
+ if (store && cellStore) {
+ mork_column col = cell->GetColumn();
+ morkAtom* atom = cell->mCell_Atom;
+ mdbYarn yarn;
+ morkAtom::AliasYarn(atom, &yarn); // works even when atom is nil
+
+ if (store != cellStore) col = store->CopyToken(ev, col, cellStore);
+ if (ev->Good()) AddColumn(ev, col, &yarn);
+ } else
+ ev->NilPointerError();
+ }
+ } else
+ ev->NilPointerError();
+ }
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::GetCell( // find a cell in this row
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to find
+ nsIMdbCell** acqCell) {
+ nsresult outErr = NS_OK;
+ nsIMdbCell* outCell = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+
+ if (ev) {
+ if (inColumn) {
+ mork_pos pos = 0;
+ morkCell* cell = mRowObject_Row->GetCell(ev, inColumn, &pos);
+ if (cell) {
+ outCell = mRowObject_Row->AcquireCellHandle(ev, cell, inColumn, pos);
+ }
+ } else
+ mRowObject_Row->ZeroColumnError(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (acqCell) *acqCell = outCell;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::EmptyAllCells( // make all cells in row empty of content
+ nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ EmptyAllCells(ev);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end cell methods -----
+
+// { ----- begin row methods -----
+NS_IMETHODIMP
+morkRowObject::AddRow( // add all cells in another row to this one
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioSourceRow) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRow* unsafeSource = (morkRow*)ioSourceRow; // unsafe cast
+ // if ( unsafeSource->CanUseRow(mev, morkBool_kFalse, &outErr, &source) )
+ { mRowObject_Row->AddRow(ev, unsafeSource); }
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::SetRow( // make exact duplicate of another row
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioSourceRow) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* sourceObject = (morkRowObject*)ioSourceRow; // unsafe cast
+ morkRow* unsafeSource = sourceObject->mRowObject_Row;
+ // if ( unsafeSource->CanUseRow(mev, morkBool_kFalse, &outErr, &source) )
+ { mRowObject_Row->SetRow(ev, unsafeSource); }
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end row methods -----
+
+// { ----- begin blob methods -----
+NS_IMETHODIMP
+morkRowObject::SetCellYarn( // synonym for AddColumn()
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to add
+ const mdbYarn* inYarn) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store) AddColumn(ev, inColumn, inYarn);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+NS_IMETHODIMP
+morkRowObject::GetCellYarn(nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to read
+ mdbYarn* outYarn) // writes some yarn slots
+// copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store && mRowObject_Row) {
+ morkAtom* atom = mRowObject_Row->GetColumnAtom(ev, inColumn);
+ morkAtom::GetYarn(atom, outYarn);
+ }
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::AliasCellYarn(nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to alias
+ mdbYarn* outYarn) // writes ALL yarn slots
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store && mRowObject_Row) {
+ morkAtom* atom = mRowObject_Row->GetColumnAtom(ev, inColumn);
+ morkAtom::AliasYarn(atom, outYarn);
+ // note nil atom works and sets yarn correctly
+ }
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::NextCellYarn(
+ nsIMdbEnv* mev, // iterative version of GetCellYarn()
+ mdb_column* ioColumn, // next column to read
+ mdbYarn* outYarn) // writes some yarn slots
+// copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+//
+// The ioColumn argument is an inout parameter which initially contains the
+// last column accessed and returns the next column corresponding to the
+// content read into the yarn. Callers should start with a zero column
+// value to say 'no previous column', which causes the first column to be
+// read. Then the value returned in ioColumn is perfect for the next call
+// to NextCellYarn(), since it will then be the previous column accessed.
+// Callers need only examine the column token returned to see which cell
+// in the row is being read into the yarn. When no more columns remain,
+// and the iteration has ended, ioColumn will return a zero token again.
+// So iterating over cells starts and ends with a zero column token.
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store && mRowObject_Row)
+ mRowObject_Row->NextColumn(ev, ioColumn, outYarn);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkRowObject::SeekCellYarn( // resembles nsIMdbRowCellCursor::SeekCell()
+ nsIMdbEnv* mev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ mdbYarn* outYarn) // writes some yarn slots
+// copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+// Callers can pass nil for outYarn to indicate no interest in content, so
+// only the outColumn value is returned. NOTE to subclasses: you must be
+// able to ignore outYarn when the pointer is nil; please do not crash.
+
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mRowObject_Store && mRowObject_Row)
+ mRowObject_Row->SeekColumn(ev, inPos, outColumn, outYarn);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+// } ----- end blob methods -----
+
+// } ===== end nsIMdbRow methods =====
+
+/*public non-poly*/ void morkRowObject::CloseRowObject(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkRow* row = mRowObject_Row;
+ mRowObject_Row = 0;
+ this->CloseObject(ev);
+ this->MarkShut();
+
+ if (row) {
+ MORK_ASSERT(row->mRow_Object == this);
+ if (row->mRow_Object == this) {
+ row->mRow_Object = 0; // just nil this slot -- cut ref down below
+
+ mRowObject_Store = 0; // morkRowObjects don't ref-cnt the owning store.
+
+ this->CutWeakRef(
+ ev->AsMdbEnv()); // do last, because it might self destroy
+ }
+ }
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkRowObject::NonRowObjectTypeError(morkEnv* ev) {
+ ev->NewError("non morkRowObject");
+}
+
+/*static*/ void morkRowObject::NilRowError(morkEnv* ev) {
+ ev->NewError("nil mRowObject_Row");
+}
+
+/*static*/ void morkRowObject::NilStoreError(morkEnv* ev) {
+ ev->NewError("nil mRowObject_Store");
+}
+
+/*static*/ void morkRowObject::RowObjectRowNotSelfError(morkEnv* ev) {
+ ev->NewError("mRowObject_Row->mRow_Object != self");
+}
+
+nsIMdbRow* morkRowObject::AcquireRowHandle(morkEnv* ev) // mObject_Handle
+{
+ AddRef();
+ return this;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkRowObject.h b/comm/mailnews/db/mork/morkRowObject.h
new file mode 100644
index 0000000000..7af5642a3f
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowObject.h
@@ -0,0 +1,204 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKROWOBJECT_
+#define _MORKROWOBJECT_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class nsIMdbRow;
+#define morkDerived_kRowObject /*i*/ 0x724F /* ascii 'rO' */
+
+class morkRowObject : public morkObject, public nsIMdbRow { //
+
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+
+ morkRow* mRowObject_Row; // non-refcounted alias to morkRow
+ morkStore* mRowObject_Store; // non-refcounted ptr to store containing row
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseRowObject() only if open
+
+ public: // morkRowObject construction & destruction
+ morkRowObject(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkRow* ioRow, morkStore* ioStore);
+ void CloseRowObject(morkEnv* ev); // called by CloseMorkNode();
+
+ // { ===== begin nsIMdbCollection methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev,
+ mdb_seed* outSeed) override; // member change count
+ NS_IMETHOD GetCount(nsIMdbEnv* ev,
+ mdb_count* outCount) override; // member count
+
+ NS_IMETHOD GetPort(nsIMdbEnv* ev,
+ nsIMdbPort** acqPort) override; // collection container
+ // } ----- end attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetCursor( // make a cursor starting iter at inMemberPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inMemberPos, // zero-based ordinal pos of member in collection
+ nsIMdbCursor** acqCursor) override; // acquire new cursor instance
+ // } ----- end cursor methods -----
+
+ // { ----- begin ID methods -----
+ NS_IMETHOD GetOid(nsIMdbEnv* ev,
+ mdbOid* outOid) override; // read object identity
+ NS_IMETHOD BecomeContent(nsIMdbEnv* ev,
+ const mdbOid* inOid) override; // exchange content
+ // } ----- end ID methods -----
+
+ // { ----- begin activity dropping methods -----
+ NS_IMETHOD DropActivity( // tell collection usage no longer expected
+ nsIMdbEnv* ev) override;
+ // } ----- end activity dropping methods -----
+
+ // } ===== end nsIMdbCollection methods =====
+ // { ===== begin nsIMdbRow methods =====
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetRowCellCursor( // make a cursor starting iteration at inRowPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbRowCellCursor** acqCursor) override; // acquire new cursor instance
+ // } ----- end cursor methods -----
+
+ // { ----- begin column methods -----
+ NS_IMETHOD AddColumn( // make sure a particular column is inside row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to add
+ const mdbYarn* inYarn) override; // cell value to install
+
+ NS_IMETHOD CutColumn( // make sure a column is absent from the row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) override; // column to ensure absent from row
+
+ NS_IMETHOD CutAllColumns( // remove all columns from the row
+ nsIMdbEnv* ev) override; // context
+ // } ----- end column methods -----
+
+ // { ----- begin cell methods -----
+ NS_IMETHOD NewCell( // get cell for specified column, or add new one
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to add
+ nsIMdbCell** acqCell) override; // cell column and value
+
+ NS_IMETHOD AddCell( // copy a cell from another row to this row
+ nsIMdbEnv* ev, // context
+ const nsIMdbCell* inCell) override; // cell column and value
+
+ NS_IMETHOD GetCell( // find a cell in this row
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to find
+ nsIMdbCell** acqCell) override; // cell for specified column, or null
+
+ NS_IMETHOD EmptyAllCells( // make all cells in row empty of content
+ nsIMdbEnv* ev) override; // context
+ // } ----- end cell methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD AddRow( // add all cells in another row to this one
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioSourceRow) override; // row to union with
+
+ NS_IMETHOD SetRow( // make exact duplicate of another row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioSourceRow) override; // row to duplicate
+ // } ----- end row methods -----
+
+ // { ----- begin blob methods -----
+ NS_IMETHOD SetCellYarn(
+ nsIMdbEnv* ev, // synonym for AddColumn()
+ mdb_column inColumn, // column to write
+ const mdbYarn* inYarn) override; // reads from yarn slots
+ // make this text object contain content from the yarn's buffer
+
+ NS_IMETHOD GetCellYarn(nsIMdbEnv* ev,
+ mdb_column inColumn, // column to read
+ mdbYarn* outYarn) override; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+
+ NS_IMETHOD AliasCellYarn(nsIMdbEnv* ev,
+ mdb_column inColumn, // column to alias
+ mdbYarn* outYarn) override; // writes ALL yarn slots
+
+ NS_IMETHOD NextCellYarn(nsIMdbEnv* ev, // iterative version of GetCellYarn()
+ mdb_column* ioColumn, // next column to read
+ mdbYarn* outYarn) override; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+ //
+ // The ioColumn argument is an inout parameter which initially contains the
+ // last column accessed and returns the next column corresponding to the
+ // content read into the yarn. Callers should start with a zero column
+ // value to say 'no previous column', which causes the first column to be
+ // read. Then the value returned in ioColumn is perfect for the next call
+ // to NextCellYarn(), since it will then be the previous column accessed.
+ // Callers need only examine the column token returned to see which cell
+ // in the row is being read into the yarn. When no more columns remain,
+ // and the iteration has ended, ioColumn will return a zero token again.
+ // So iterating over cells starts and ends with a zero column token.
+
+ NS_IMETHOD SeekCellYarn( // resembles nsIMdbRowCellCursor::SeekCell()
+ nsIMdbEnv* ev, // context
+ mdb_pos inPos, // position of cell in row sequence
+ mdb_column* outColumn, // column for this particular cell
+ mdbYarn* outYarn) override; // writes some yarn slots
+ // copy content into the yarn buffer, and update mYarn_Fill and mYarn_Form
+ // Callers can pass nil for outYarn to indicate no interest in content, so
+ // only the outColumn value is returned. NOTE to subclasses: you must be
+ // able to ignore outYarn when the pointer is nil; please do not crash.
+
+ // } ----- end blob methods -----
+
+ // } ===== end nsIMdbRow methods =====
+
+ private: // copying is not allowed
+ morkRowObject(const morkRowObject& other);
+ morkRowObject& operator=(const morkRowObject& other);
+ virtual ~morkRowObject(); // assert that CloseRowObject() executed earlier
+
+ public: // dynamic type identification
+ mork_bool IsRowObject() const {
+ return IsNode() && mNode_Derived == morkDerived_kRowObject;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonRowObjectTypeError(morkEnv* ev);
+ static void NilRowError(morkEnv* ev);
+ static void NilStoreError(morkEnv* ev);
+ static void RowObjectRowNotSelfError(morkEnv* ev);
+
+ public: // other row node methods
+ nsIMdbRow* AcquireRowHandle(morkEnv* ev); // mObject_Handle
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakRowObject(morkRowObject* me, morkEnv* ev,
+ morkRowObject** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongRowObject(morkRowObject* me, morkEnv* ev,
+ morkRowObject** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKROWOBJECT_ */
diff --git a/comm/mailnews/db/mork/morkRowSpace.cpp b/comm/mailnews/db/mork/morkRowSpace.cpp
new file mode 100644
index 0000000000..4ee9d5aead
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowSpace.cpp
@@ -0,0 +1,540 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKATOMMAP_
+# include "morkAtomMap.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkRowSpace::CloseMorkNode(
+ morkEnv* ev) // CloseRowSpace() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseRowSpace(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkRowSpace::~morkRowSpace() // assert CloseRowSpace() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkRowSpace::morkRowSpace(morkEnv* ev, const morkUsage& inUsage,
+ mork_scope inScope, morkStore* ioStore,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkSpace(ev, inUsage, inScope, ioStore, ioHeap, ioSlotHeap),
+ mRowSpace_SlotHeap(ioSlotHeap),
+ mRowSpace_Rows(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioSlotHeap,
+ morkRowSpace_kStartRowMapSlotCount),
+ mRowSpace_Tables(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioSlotHeap),
+ mRowSpace_NextTableId(1),
+ mRowSpace_NextRowId(1)
+
+ ,
+ mRowSpace_IndexCount(0) {
+ morkAtomRowMap** cache = mRowSpace_IndexCache;
+ morkAtomRowMap** cacheEnd = cache + morkRowSpace_kPrimeCacheSize;
+ while (cache < cacheEnd)
+ *cache++ = 0; // put nil into every slot of cache table
+
+ if (ev->Good()) {
+ if (ioSlotHeap) {
+ mNode_Derived = morkDerived_kRowSpace;
+
+ // the morkSpace base constructor handles any dirty propagation
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkRowSpace::CloseRowSpace(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkAtomRowMap** cache = mRowSpace_IndexCache;
+ morkAtomRowMap** cacheEnd = cache + morkRowSpace_kPrimeCacheSize;
+ --cache; // prepare for preincrement:
+ while (++cache < cacheEnd) {
+ if (*cache) morkAtomRowMap::SlotStrongAtomRowMap(0, ev, cache);
+ }
+
+ mRowSpace_Tables.CloseMorkNode(ev);
+
+ morkStore* store = mSpace_Store;
+ if (store) this->CutAllRows(ev, &store->mStore_Pool);
+
+ mRowSpace_Rows.CloseMorkNode(ev);
+ this->CloseSpace(ev);
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkRowSpace::NonRowSpaceTypeError(morkEnv* ev) {
+ ev->NewError("non morkRowSpace");
+}
+
+/*static*/ void morkRowSpace::ZeroKindError(morkEnv* ev) {
+ ev->NewError("zero table kind");
+}
+
+/*static*/ void morkRowSpace::ZeroScopeError(morkEnv* ev) {
+ ev->NewError("zero row scope");
+}
+
+/*static*/ void morkRowSpace::ZeroTidError(morkEnv* ev) {
+ ev->NewError("zero table ID");
+}
+
+/*static*/ void morkRowSpace::MinusOneRidError(morkEnv* ev) {
+ ev->NewError("row ID is -1");
+}
+
+///*static*/ void
+// morkRowSpace::ExpectAutoIdOnlyError(morkEnv* ev)
+//{
+// ev->NewError("zero row ID");
+//}
+
+///*static*/ void
+// morkRowSpace::ExpectAutoIdNeverError(morkEnv* ev)
+//{
+//}
+
+mork_num morkRowSpace::CutAllRows(morkEnv* ev, morkPool* ioPool) {
+ if (this->IsRowSpaceClean()) this->MaybeDirtyStoreAndSpace();
+
+#ifdef MORK_ENABLE_ZONE_ARENAS
+ MORK_USED_2(ev, ioPool);
+ return 0;
+#else /*MORK_ENABLE_ZONE_ARENAS*/
+ mork_num outSlots = mRowSpace_Rows.MapFill();
+ morkZone* zone = &mSpace_Store->mStore_Zone;
+ morkRow* r = 0; // old key row in the map
+ mork_change* c = 0;
+
+# ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMapIter i(ev, &mRowSpace_Rows);
+# else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMapIter i(ev, &mRowSpace_Rows);
+# endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ for (c = i.FirstRow(ev, &r); c && ev->Good(); c = i.NextRow(ev, &r)) {
+ if (r) {
+ if (r->IsRow()) {
+ if (r->mRow_Object) {
+ morkRowObject::SlotWeakRowObject((morkRowObject*)0, ev,
+ &r->mRow_Object);
+ }
+ ioPool->ZapRow(ev, r, zone);
+ } else
+ r->NonRowTypeWarning(ev);
+ } else
+ ev->NilPointerError();
+
+# ifdef MORK_ENABLE_PROBE_MAPS
+ // cut nothing from the map
+# else /*MORK_ENABLE_PROBE_MAPS*/
+ i.CutHereRow(ev, /*key*/ (morkRow**)0);
+# endif /*MORK_ENABLE_PROBE_MAPS*/
+ }
+
+ return outSlots;
+#endif /*MORK_ENABLE_ZONE_ARENAS*/
+}
+
+morkTable* morkRowSpace::FindTableByKind(morkEnv* ev, mork_kind inTableKind) {
+ if (inTableKind) {
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+
+ morkTableMapIter i(ev, &mRowSpace_Tables);
+ morkTable* table = i.FirstTable(ev);
+ for (; table && ev->Good(); table = i.NextTable(ev))
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ mork_tid* key = 0; // nil pointer to suppress key access
+ morkTable* table = 0; // old table in the map
+
+ mork_change* c = 0;
+ morkTableMapIter i(ev, &mRowSpace_Tables);
+ for (c = i.FirstTable(ev, key, &table); c && ev->Good();
+ c = i.NextTable(ev, key, &table))
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+ {
+ if (table->mTable_Kind == inTableKind) return table;
+ }
+ } else
+ this->ZeroKindError(ev);
+
+ return (morkTable*)0;
+}
+
+morkTable* morkRowSpace::NewTableWithTid(
+ morkEnv* ev, mork_tid inTid, mork_kind inTableKind,
+ const mdbOid* inOptionalMetaRowOid) // can be nil to avoid specifying
+{
+ morkTable* outTable = 0;
+ morkStore* store = mSpace_Store;
+
+ if (inTableKind && store) {
+ mdb_bool mustBeUnique = morkBool_kFalse;
+ nsIMdbHeap* heap = store->mPort_Heap;
+ morkTable* table = new (*heap, ev)
+ morkTable(ev, morkUsage::kHeap, heap, store, heap, this,
+ inOptionalMetaRowOid, inTid, inTableKind, mustBeUnique);
+ if (table) {
+ if (mRowSpace_Tables.AddTable(ev, table)) {
+ outTable = table;
+ if (mRowSpace_NextTableId <= inTid) mRowSpace_NextTableId = inTid + 1;
+ }
+
+ if (this->IsRowSpaceClean() && store->mStore_CanDirty)
+ this->MaybeDirtyStoreAndSpace(); // morkTable does already
+ }
+ } else if (store)
+ this->ZeroKindError(ev);
+ else
+ this->NilSpaceStoreError(ev);
+
+ return outTable;
+}
+
+morkTable* morkRowSpace::NewTable(
+ morkEnv* ev, mork_kind inTableKind, mdb_bool inMustBeUnique,
+ const mdbOid* inOptionalMetaRowOid) // can be nil to avoid specifying
+{
+ morkTable* outTable = 0;
+ morkStore* store = mSpace_Store;
+
+ if (inTableKind && store) {
+ if (inMustBeUnique) // need to look for existing table first?
+ outTable = this->FindTableByKind(ev, inTableKind);
+
+ if (!outTable && ev->Good()) {
+ mork_tid id = this->MakeNewTableId(ev);
+ if (id) {
+ nsIMdbHeap* heap = mSpace_Store->mPort_Heap;
+ morkTable* table = new (*heap, ev)
+ morkTable(ev, morkUsage::kHeap, heap, mSpace_Store, heap, this,
+ inOptionalMetaRowOid, id, inTableKind, inMustBeUnique);
+ if (table) {
+ if (mRowSpace_Tables.AddTable(ev, table))
+ outTable = table;
+ else
+ table->Release();
+
+ if (this->IsRowSpaceClean() && store->mStore_CanDirty)
+ this->MaybeDirtyStoreAndSpace(); // morkTable does already
+ }
+ }
+ }
+ } else if (store)
+ this->ZeroKindError(ev);
+ else
+ this->NilSpaceStoreError(ev);
+
+ return outTable;
+}
+
+mork_tid morkRowSpace::MakeNewTableId(morkEnv* ev) {
+ mork_tid outTid = 0;
+ mork_tid id = mRowSpace_NextTableId;
+ mork_num count = 9; // try up to eight times
+
+ while (!outTid && --count) // still trying to find an unused table ID?
+ {
+ if (!mRowSpace_Tables.GetTable(ev, id))
+ outTid = id;
+ else {
+ MORK_ASSERT(morkBool_kFalse); // alert developer about ID problems
+ ++id;
+ }
+ }
+
+ mRowSpace_NextTableId = id + 1;
+ return outTid;
+}
+
+#define MAX_AUTO_ID (morkRow_kMinusOneRid - 2)
+mork_rid morkRowSpace::MakeNewRowId(morkEnv* ev) {
+ mork_rid outRid = 0;
+ mork_rid id = mRowSpace_NextRowId;
+ mork_num count = 9; // try up to eight times
+ mdbOid oid;
+ oid.mOid_Scope = this->SpaceScope();
+
+ // still trying to find an unused row ID?
+ while (!outRid && --count && id <= MAX_AUTO_ID) {
+ oid.mOid_Id = id;
+ if (!mRowSpace_Rows.GetOid(ev, &oid))
+ outRid = id;
+ else {
+ MORK_ASSERT(morkBool_kFalse); // alert developer about ID problems
+ ++id;
+ }
+ }
+
+ if (id < MAX_AUTO_ID) mRowSpace_NextRowId = id + 1;
+ return outRid;
+}
+
+morkAtomRowMap* morkRowSpace::make_index(morkEnv* ev, mork_column inCol) {
+ morkAtomRowMap* outMap = 0;
+ nsIMdbHeap* heap = mRowSpace_SlotHeap;
+ if (heap) // have expected heap for allocations?
+ {
+ morkAtomRowMap* map =
+ new (*heap, ev) morkAtomRowMap(ev, morkUsage::kHeap, heap, heap, inCol);
+
+ if (map) // able to create new map index?
+ {
+ if (ev->Good()) // no errors during construction?
+ {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMapIter i(ev, &mRowSpace_Rows);
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMapIter i(ev, &mRowSpace_Rows);
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ mork_change* c = 0;
+ morkRow* row = 0;
+ mork_aid aidKey = 0;
+
+ for (c = i.FirstRow(ev, &row); c && ev->Good();
+ c = i.NextRow(ev, &row)) // another row in space?
+ {
+ aidKey = row->GetCellAtomAid(ev, inCol);
+ if (aidKey) // row has indexed attribute?
+ map->AddAid(ev, aidKey, row); // include in map
+ }
+ }
+ if (ev->Good()) // no errors constructing index?
+ outMap = map; // return from function
+ else
+ map->CutStrongRef(ev); // discard map on error
+ }
+ } else
+ ev->NilPointerError();
+
+ return outMap;
+}
+
+morkAtomRowMap* morkRowSpace::ForceMap(morkEnv* ev, mork_column inCol) {
+ morkAtomRowMap* outMap = this->FindMap(ev, inCol);
+
+ if (!outMap && ev->Good()) // no such existing index?
+ {
+ if (mRowSpace_IndexCount < morkRowSpace_kMaxIndexCount) {
+ morkAtomRowMap* map = this->make_index(ev, inCol);
+ if (map) // created a new index for col?
+ {
+ mork_count wrap = 0; // count times wrap-around occurs
+ morkAtomRowMap** slot = mRowSpace_IndexCache; // table
+ morkAtomRowMap** end = slot + morkRowSpace_kPrimeCacheSize;
+ slot += (inCol % morkRowSpace_kPrimeCacheSize); // hash
+ while (*slot) // empty slot not yet found?
+ {
+ if (++slot >= end) // wrap around?
+ {
+ slot = mRowSpace_IndexCache; // back to table start
+ if (++wrap > 1) // wrapped more than once?
+ {
+ ev->NewError("no free cache slots"); // disaster
+ break; // end while loop
+ }
+ }
+ }
+ if (ev->Good()) // everything went just fine?
+ {
+ ++mRowSpace_IndexCount; // note another new map
+ *slot = map; // install map in the hash table
+ outMap = map; // return the new map from function
+ } else
+ map->CutStrongRef(ev); // discard map on error
+ }
+ } else
+ ev->NewError("too many indexes"); // why so many indexes?
+ }
+ return outMap;
+}
+
+morkAtomRowMap* morkRowSpace::FindMap(morkEnv* ev, mork_column inCol) {
+ if (mRowSpace_IndexCount && ev->Good()) {
+ mork_count wrap = 0; // count times wrap-around occurs
+ morkAtomRowMap** slot = mRowSpace_IndexCache; // table
+ morkAtomRowMap** end = slot + morkRowSpace_kPrimeCacheSize;
+ slot += (inCol % morkRowSpace_kPrimeCacheSize); // hash
+ morkAtomRowMap* map = *slot;
+ while (map) // another used slot to examine?
+ {
+ if (inCol == map->mAtomRowMap_IndexColumn) // found col?
+ return map;
+ if (++slot >= end) // wrap around?
+ {
+ slot = mRowSpace_IndexCache;
+ if (++wrap > 1) // wrapped more than once?
+ return (morkAtomRowMap*)0; // stop searching
+ }
+ map = *slot;
+ }
+ }
+ return (morkAtomRowMap*)0;
+}
+
+morkRow* morkRowSpace::FindRow(morkEnv* ev, mork_column inCol,
+ const mdbYarn* inYarn) {
+ morkRow* outRow = 0;
+
+ // if yarn hasn't been atomized, there can't be a corresponding row,
+ // so pass in false to not create the row - should help history bloat
+ morkAtom* atom = mSpace_Store->YarnToAtom(ev, inYarn, false);
+ if (atom) // have or created an atom corresponding to input yarn?
+ {
+ mork_aid atomAid = atom->GetBookAtomAid();
+ if (atomAid) // atom has an identity for use in hash table?
+ {
+ morkAtomRowMap* map = this->ForceMap(ev, inCol);
+ if (map) // able to find or create index for col?
+ {
+ outRow = map->GetAid(ev, atomAid); // search for row
+ }
+ }
+ }
+
+ return outRow;
+}
+
+morkRow* morkRowSpace::NewRowWithOid(morkEnv* ev, const mdbOid* inOid) {
+ morkRow* outRow = mRowSpace_Rows.GetOid(ev, inOid);
+ MORK_ASSERT(outRow == 0);
+ if (!outRow && ev->Good()) {
+ morkStore* store = mSpace_Store;
+ if (store) {
+ morkPool* pool = this->GetSpaceStorePool();
+ morkRow* row = pool->NewRow(ev, &store->mStore_Zone);
+ if (row) {
+ row->InitRow(ev, inOid, this, /*length*/ 0, pool);
+
+ if (ev->Good() && mRowSpace_Rows.AddRow(ev, row)) {
+ outRow = row;
+ mork_rid rid = inOid->mOid_Id;
+ if (mRowSpace_NextRowId <= rid) mRowSpace_NextRowId = rid + 1;
+ } else
+ pool->ZapRow(ev, row, &store->mStore_Zone);
+
+ if (this->IsRowSpaceClean() && store->mStore_CanDirty)
+ this->MaybeDirtyStoreAndSpace(); // InitRow() does already
+ }
+ } else
+ this->NilSpaceStoreError(ev);
+ }
+ return outRow;
+}
+
+morkRow* morkRowSpace::NewRow(morkEnv* ev) {
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ mork_rid id = this->MakeNewRowId(ev);
+ if (id) {
+ morkStore* store = mSpace_Store;
+ if (store) {
+ mdbOid oid;
+ oid.mOid_Scope = this->SpaceScope();
+ oid.mOid_Id = id;
+ morkPool* pool = this->GetSpaceStorePool();
+ morkRow* row = pool->NewRow(ev, &store->mStore_Zone);
+ if (row) {
+ row->InitRow(ev, &oid, this, /*length*/ 0, pool);
+
+ if (ev->Good() && mRowSpace_Rows.AddRow(ev, row))
+ outRow = row;
+ else
+ pool->ZapRow(ev, row, &store->mStore_Zone);
+
+ if (this->IsRowSpaceClean() && store->mStore_CanDirty)
+ this->MaybeDirtyStoreAndSpace(); // InitRow() does already
+ }
+ } else
+ this->NilSpaceStoreError(ev);
+ }
+ }
+ return outRow;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkRowSpaceMap::~morkRowSpaceMap() {}
+
+morkRowSpaceMap::morkRowSpaceMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+ : morkNodeMap(ev, inUsage, ioHeap, ioSlotHeap) {
+ if (ev->Good()) mNode_Derived = morkDerived_kRowSpaceMap;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkRowSpace.h b/comm/mailnews/db/mork/morkRowSpace.h
new file mode 100644
index 0000000000..81386576a1
--- /dev/null
+++ b/comm/mailnews/db/mork/morkRowSpace.h
@@ -0,0 +1,243 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKROWSPACE_
+#define _MORKROWSPACE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKARRAY_
+# include "morkArray.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kRowSpace /*i*/ 0x7253 /* ascii 'rS' */
+
+#define morkRowSpace_kStartRowMapSlotCount 11
+
+#define morkRowSpace_kMaxIndexCount 8 /* no more indexes than this */
+#define morkRowSpace_kPrimeCacheSize 17 /* should be prime number */
+
+class morkAtomRowMap;
+
+/*| morkRowSpace:
+|*/
+class morkRowSpace : public morkSpace { //
+
+ // public: // slots inherited from morkSpace (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkStore* mSpace_Store; // weak ref to containing store
+
+ // mork_bool mSpace_DoAutoIDs; // whether db should assign member IDs
+ // mork_bool mSpace_HaveDoneAutoIDs; // whether actually auto assigned IDs
+ // mork_u1 mSpace_Pad[ 2 ]; // pad to u4 alignment
+
+ public: // state is public because the entire Mork system is private
+ nsIMdbHeap* mRowSpace_SlotHeap;
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMap mRowSpace_Rows; // hash table of morkRow instances
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMap mRowSpace_Rows; // hash table of morkRow instances
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ morkTableMap mRowSpace_Tables; // all the tables in this row scope
+
+ mork_tid mRowSpace_NextTableId; // for auto-assigning table IDs
+ mork_rid mRowSpace_NextRowId; // for auto-assigning row IDs
+
+ mork_count mRowSpace_IndexCount; // if nonzero, row indexes exist
+
+ // every nonzero slot in IndexCache is a strong ref to a morkAtomRowMap:
+ morkAtomRowMap* mRowSpace_IndexCache[morkRowSpace_kPrimeCacheSize];
+
+ morkDeque mRowSpace_TablesByPriority[morkPriority_kCount];
+
+ public: // more specific dirty methods for row space:
+ void SetRowSpaceDirty() { this->SetNodeDirty(); }
+ void SetRowSpaceClean() { this->SetNodeClean(); }
+
+ mork_bool IsRowSpaceClean() const { return this->IsNodeClean(); }
+ mork_bool IsRowSpaceDirty() const { return this->IsNodeDirty(); }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseRowSpace() only if open
+ virtual ~morkRowSpace(); // assert that CloseRowSpace() executed earlier
+
+ public: // morkMap construction & destruction
+ morkRowSpace(morkEnv* ev, const morkUsage& inUsage, mork_scope inScope,
+ morkStore* ioStore, nsIMdbHeap* ioNodeHeap,
+ nsIMdbHeap* ioSlotHeap);
+ void CloseRowSpace(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsRowSpace() const {
+ return IsNode() && mNode_Derived == morkDerived_kRowSpace;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonRowSpaceTypeError(morkEnv* ev);
+ static void ZeroScopeError(morkEnv* ev);
+ static void ZeroKindError(morkEnv* ev);
+ static void ZeroTidError(morkEnv* ev);
+ static void MinusOneRidError(morkEnv* ev);
+
+ // static void ExpectAutoIdOnlyError(morkEnv* ev);
+ // static void ExpectAutoIdNeverError(morkEnv* ev);
+
+ public: // other space methods
+ mork_num CutAllRows(morkEnv* ev, morkPool* ioPool);
+ // CutAllRows() puts all rows and cells back into the pool.
+
+ morkTable* NewTable(morkEnv* ev, mork_kind inTableKind,
+ mdb_bool inMustBeUnique,
+ const mdbOid* inOptionalMetaRowOid);
+
+ morkTable* NewTableWithTid(morkEnv* ev, mork_tid inTid, mork_kind inTableKind,
+ const mdbOid* inOptionalMetaRowOid);
+
+ morkTable* FindTableByKind(morkEnv* ev, mork_kind inTableKind);
+ morkTable* FindTableByTid(morkEnv* ev, mork_tid inTid) {
+ return mRowSpace_Tables.GetTable(ev, inTid);
+ }
+
+ mork_tid MakeNewTableId(morkEnv* ev);
+ mork_rid MakeNewRowId(morkEnv* ev);
+
+ // morkRow* FindRowByRid(morkEnv* ev, mork_rid inRid)
+ // { return (morkRow*) mRowSpace_Rows.GetRow(ev, inRid); }
+
+ morkRow* NewRowWithOid(morkEnv* ev, const mdbOid* inOid);
+ morkRow* NewRow(morkEnv* ev);
+
+ morkRow* FindRow(morkEnv* ev, mork_column inColumn, const mdbYarn* inYarn);
+
+ morkAtomRowMap* ForceMap(morkEnv* ev, mork_column inColumn);
+ morkAtomRowMap* FindMap(morkEnv* ev, mork_column inColumn);
+
+ protected: // internal utilities
+ morkAtomRowMap* make_index(morkEnv* ev, mork_column inColumn);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakRowSpace(morkRowSpace* me, morkEnv* ev,
+ morkRowSpace** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongRowSpace(morkRowSpace* me, morkEnv* ev,
+ morkRowSpace** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kRowSpaceMap /*i*/ 0x725A /* ascii 'rZ' */
+
+/*| morkRowSpaceMap: maps mork_scope -> morkRowSpace
+|*/
+class morkRowSpaceMap : public morkNodeMap { // for mapping tokens to tables
+
+ public:
+ virtual ~morkRowSpaceMap();
+ morkRowSpaceMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+
+ public: // other map methods
+ mork_bool AddRowSpace(morkEnv* ev, morkRowSpace* ioRowSpace) {
+ return this->AddNode(ev, ioRowSpace->SpaceScope(), ioRowSpace);
+ }
+ // the AddRowSpace() boolean return equals ev->Good().
+
+ mork_bool CutRowSpace(morkEnv* ev, mork_scope inScope) {
+ return this->CutNode(ev, inScope);
+ }
+ // The CutRowSpace() boolean return indicates whether removal happened.
+
+ morkRowSpace* GetRowSpace(morkEnv* ev, mork_scope inScope) {
+ return (morkRowSpace*)this->GetNode(ev, inScope);
+ }
+ // Note the returned space does NOT have an increase in refcount for this.
+
+ mork_num CutAllRowSpaces(morkEnv* ev) { return this->CutAllNodes(ev); }
+ // CutAllRowSpaces() releases all the referenced table values.
+};
+
+class morkRowSpaceMapIter : public morkMapIter { // typesafe wrapper class
+
+ public:
+ morkRowSpaceMapIter(morkEnv* ev, morkRowSpaceMap* ioMap)
+ : morkMapIter(ev, ioMap) {}
+
+ morkRowSpaceMapIter() : morkMapIter() {}
+ void InitRowSpaceMapIter(morkEnv* ev, morkRowSpaceMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstRowSpace(morkEnv* ev, mork_scope* outScope,
+ morkRowSpace** outRowSpace) {
+ return this->First(ev, outScope, outRowSpace);
+ }
+
+ mork_change* NextRowSpace(morkEnv* ev, mork_scope* outScope,
+ morkRowSpace** outRowSpace) {
+ return this->Next(ev, outScope, outRowSpace);
+ }
+
+ mork_change* HereRowSpace(morkEnv* ev, mork_scope* outScope,
+ morkRowSpace** outRowSpace) {
+ return this->Here(ev, outScope, outRowSpace);
+ }
+
+ mork_change* CutHereRowSpace(morkEnv* ev, mork_scope* outScope,
+ morkRowSpace** outRowSpace) {
+ return this->CutHere(ev, outScope, outRowSpace);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKROWSPACE_ */
diff --git a/comm/mailnews/db/mork/morkSearchRowCursor.cpp b/comm/mailnews/db/mork/morkSearchRowCursor.cpp
new file mode 100644
index 0000000000..f7cc4c56d6
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSearchRowCursor.cpp
@@ -0,0 +1,153 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKSEARCHROWCURSOR_
+# include "morkSearchRowCursor.h"
+#endif
+
+#ifndef _MORKUNIQROWCURSOR_
+# include "morkUniqRowCursor.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkSearchRowCursor::CloseMorkNode(
+ morkEnv* ev) // CloseSearchRowCursor() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseSearchRowCursor(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkSearchRowCursor::~morkSearchRowCursor() // CloseSearchRowCursor() executed
+ // earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkSearchRowCursor::morkSearchRowCursor(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkTable* ioTable,
+ mork_pos inRowPos)
+ : morkTableRowCursor(ev, inUsage, ioHeap, ioTable, inRowPos)
+// , mSortingRowCursor_Sorting( 0 )
+{
+ if (ev->Good()) {
+ if (ioTable) {
+ // morkSorting::SlotWeakSorting(ioSorting, ev,
+ // &mSortingRowCursor_Sorting);
+ if (ev->Good()) {
+ // mNode_Derived = morkDerived_kTableRowCursor;
+ // mNode_Derived must stay equal to kTableRowCursor
+ }
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkSearchRowCursor::CloseSearchRowCursor(
+ morkEnv* ev) {
+ if (this->IsNode()) {
+ // morkSorting::SlotWeakSorting((morkSorting*) 0, ev,
+ // &mSortingRowCursor_Sorting);
+ this->CloseTableRowCursor(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkSearchRowCursor::NonSearchRowCursorTypeError(morkEnv* ev) {
+ ev->NewError("non morkSearchRowCursor");
+}
+
+morkUniqRowCursor* morkSearchRowCursor::MakeUniqCursor(morkEnv* ev) {
+ morkUniqRowCursor* outCursor = 0;
+
+ return outCursor;
+}
+
+#if 0
+orkinTableRowCursor*
+morkSearchRowCursor::AcquireUniqueRowCursorHandle(morkEnv* ev)
+{
+ orkinTableRowCursor* outCursor = 0;
+
+ morkUniqRowCursor* uniqCursor = this->MakeUniqCursor(ev);
+ if ( uniqCursor )
+ {
+ outCursor = uniqCursor->AcquireTableRowCursorHandle(ev);
+ uniqCursor->CutStrongRef(ev);
+ }
+ return outCursor;
+}
+#endif
+mork_bool morkSearchRowCursor::CanHaveDupRowMembers(morkEnv* ev) {
+ return morkBool_kTrue; // true is correct
+}
+
+mork_count morkSearchRowCursor::GetMemberCount(morkEnv* ev) {
+ morkTable* table = mTableRowCursor_Table;
+ if (table)
+ return table->mTable_RowArray.mArray_Fill;
+ else
+ return 0;
+}
+
+morkRow* morkSearchRowCursor::NextRow(morkEnv* ev, mdbOid* outOid,
+ mdb_pos* outPos) {
+ morkRow* outRow = 0;
+ mork_pos pos = -1;
+
+ morkTable* table = mTableRowCursor_Table;
+ if (table) {
+ } else
+ ev->NilPointerError();
+
+ *outPos = pos;
+ return outRow;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkSearchRowCursor.h b/comm/mailnews/db/mork/morkSearchRowCursor.h
new file mode 100644
index 0000000000..c267f35bf6
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSearchRowCursor.h
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKSEARCHROWCURSOR_
+#define _MORKSEARCHROWCURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKTABLEROWCURSOR_
+# include "morkTableRowCursor.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class morkUniqRowCursor;
+class orkinTableRowCursor;
+// #define morkDerived_kSearchRowCursor /*i*/ 0x7352 /* ascii 'sR' */
+
+class morkSearchRowCursor : public morkTableRowCursor { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkFactory* mObject_Factory; // weak ref to suite factory
+
+ // mork_seed mCursor_Seed;
+ // mork_pos mCursor_Pos;
+ // mork_bool mCursor_DoFailOnSeedOutOfSync;
+ // mork_u1 mCursor_Pad[ 3 ]; // explicitly pad to u4 alignment
+
+ // morkTable* mTableRowCursor_Table; // weak ref to table
+
+ // { ===== begin morkNode interface =====
+ public:
+ virtual void CloseMorkNode(morkEnv* ev); // CloseSearchRowCursor()
+ virtual ~morkSearchRowCursor(); // assert that close executed earlier
+
+ public: // morkSearchRowCursor construction & destruction
+ morkSearchRowCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkTable* ioTable, mork_pos inRowPos);
+ void CloseSearchRowCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkSearchRowCursor(const morkSearchRowCursor& other);
+ morkSearchRowCursor& operator=(const morkSearchRowCursor& other);
+
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonSearchRowCursorTypeError(morkEnv* ev);
+
+ public: // uniquify
+ morkUniqRowCursor* MakeUniqCursor(morkEnv* ev);
+
+ public: // other search row cursor methods
+ virtual mork_bool CanHaveDupRowMembers(morkEnv* ev);
+ virtual mork_count GetMemberCount(morkEnv* ev);
+
+#if 0
+ virtual orkinTableRowCursor* AcquireUniqueRowCursorHandle(morkEnv* ev);
+#endif
+
+ // virtual mdb_pos NextRowOid(morkEnv* ev, mdbOid* outOid);
+ virtual morkRow* NextRow(morkEnv* ev, mdbOid* outOid, mdb_pos* outPos);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakSearchRowCursor(morkSearchRowCursor* me, morkEnv* ev,
+ morkSearchRowCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongSearchRowCursor(morkSearchRowCursor* me, morkEnv* ev,
+ morkSearchRowCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKSEARCHROWCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkSink.cpp b/comm/mailnews/db/mork/morkSink.cpp
new file mode 100644
index 0000000000..daf1bc1b9c
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSink.cpp
@@ -0,0 +1,247 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKSINK_
+# include "morkSink.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*virtual*/ morkSink::~morkSink() {
+ mSink_At = 0;
+ mSink_End = 0;
+}
+
+/*virtual*/ void morkSpool::FlushSink(
+ morkEnv* ev) // sync mSpool_Coil->mBuf_Fill
+{
+ morkCoil* coil = mSpool_Coil;
+ if (coil) {
+ mork_u1* body = (mork_u1*)coil->mBuf_Body;
+ if (body) {
+ mork_u1* at = mSink_At;
+ mork_u1* end = mSink_End;
+ if (at >= body && at <= end) // expected cursor order?
+ {
+ mork_fill fill = (mork_fill)(at - body); // current content size
+ if (fill <= coil->mBlob_Size)
+ coil->mBuf_Fill = fill;
+ else {
+ coil->BlobFillOverSizeError(ev);
+ coil->mBuf_Fill = coil->mBlob_Size; // make it safe
+ }
+ } else
+ this->BadSpoolCursorOrderError(ev);
+ } else
+ coil->NilBufBodyError(ev);
+ } else
+ this->NilSpoolCoilError(ev);
+}
+
+/*virtual*/ void morkSpool::SpillPutc(morkEnv* ev,
+ int c) // grow coil and write byte
+{
+ morkCoil* coil = mSpool_Coil;
+ if (coil) {
+ mork_u1* body = (mork_u1*)coil->mBuf_Body;
+ if (body) {
+ mork_u1* at = mSink_At;
+ mork_u1* end = mSink_End;
+ if (at >= body && at <= end) // expected cursor order?
+ {
+ mork_size size = coil->mBlob_Size;
+ mork_fill fill = (mork_fill)(at - body); // current content size
+ if (fill <= size) // less content than medium size?
+ {
+ coil->mBuf_Fill = fill;
+ if (at >= end) // need to grow the coil?
+ {
+ if (size > 2048) // grow slower over 2K?
+ size += 512;
+ else {
+ mork_size growth = (size * 4) / 3; // grow by 33%
+ if (growth < 64) // grow faster under (64 * 3)?
+ growth = 64;
+ size += growth;
+ }
+ if (coil->GrowCoil(ev, size)) // made coil bigger?
+ {
+ body = (mork_u1*)coil->mBuf_Body;
+ if (body) // have a coil body?
+ {
+ mSink_At = at = body + fill;
+ mSink_End = end = body + coil->mBlob_Size;
+ } else
+ coil->NilBufBodyError(ev);
+ }
+ }
+ if (ev->Good()) // seem ready to write byte c?
+ {
+ if (at < end) // morkSink::Putc() would succeed?
+ {
+ *at++ = (mork_u1)c;
+ mSink_At = at;
+ coil->mBuf_Fill = fill + 1;
+ } else
+ this->BadSpoolCursorOrderError(ev);
+ }
+ } else // fill exceeds size
+ {
+ coil->BlobFillOverSizeError(ev);
+ coil->mBuf_Fill = coil->mBlob_Size; // make it safe
+ }
+ } else
+ this->BadSpoolCursorOrderError(ev);
+ } else
+ coil->NilBufBodyError(ev);
+ } else
+ this->NilSpoolCoilError(ev);
+}
+
+// ````` ````` ````` ````` ````` ````` ````` `````
+// public: // public non-poly morkSink methods
+
+/*virtual*/
+morkSpool::~morkSpool()
+// Zero all slots to show this sink is disabled, but destroy no memory.
+// Note it is typically unnecessary to flush this coil sink, since all
+// content is written directly to the coil without any buffering.
+{
+ mSink_At = 0;
+ mSink_End = 0;
+ mSpool_Coil = 0;
+}
+
+morkSpool::morkSpool(morkEnv* ev, morkCoil* ioCoil)
+ // After installing the coil, calls Seek(ev, 0) to prepare for writing.
+ : morkSink(), mSpool_Coil(0) {
+ mSink_At = 0; // set correctly later in Seek()
+ mSink_End = 0; // set correctly later in Seek()
+
+ if (ev->Good()) {
+ if (ioCoil) {
+ mSpool_Coil = ioCoil;
+ this->Seek(ev, /*pos*/ 0);
+ } else
+ ev->NilPointerError();
+ }
+}
+
+// ----- All boolean return values below are equal to ev->Good(): -----
+
+/*static*/ void morkSpool::BadSpoolCursorOrderError(morkEnv* ev) {
+ ev->NewError("bad morkSpool cursor order");
+}
+
+/*static*/ void morkSpool::NilSpoolCoilError(morkEnv* ev) {
+ ev->NewError("nil mSpool_Coil");
+}
+
+mork_bool morkSpool::Seek(morkEnv* ev, mork_pos inPos)
+// Changed the current write position in coil's buffer to inPos.
+// For example, to start writing the coil from scratch, use inPos==0.
+{
+ morkCoil* coil = mSpool_Coil;
+ if (coil) {
+ mork_size minSize = (mork_size)(inPos + 64);
+
+ if (coil->mBlob_Size < minSize) coil->GrowCoil(ev, minSize);
+
+ if (ev->Good()) {
+ coil->mBuf_Fill = (mork_fill)inPos;
+ mork_u1* body = (mork_u1*)coil->mBuf_Body;
+ if (body) {
+ mSink_At = body + inPos;
+ mSink_End = body + coil->mBlob_Size;
+ } else
+ coil->NilBufBodyError(ev);
+ }
+ } else
+ this->NilSpoolCoilError(ev);
+
+ return ev->Good();
+}
+
+mork_bool morkSpool::Write(morkEnv* ev, const void* inBuf, mork_size inSize)
+// write inSize bytes of inBuf to current position inside coil's buffer
+{
+ // This method is conceptually very similar to morkStream::Write(),
+ // and this code was written while looking at that method for clues.
+
+ morkCoil* coil = mSpool_Coil;
+ if (coil) {
+ mork_u1* body = (mork_u1*)coil->mBuf_Body;
+ if (body) {
+ if (inBuf && inSize) // anything to write?
+ {
+ mork_u1* at = mSink_At;
+ mork_u1* end = mSink_End;
+ if (at >= body && at <= end) // expected cursor order?
+ {
+ // note coil->mBuf_Fill can be stale after morkSink::Putc():
+ mork_pos fill = at - body; // current content size
+ mork_num space = (mork_num)(end - at); // space left in body
+ if (space < inSize) // not enough to hold write?
+ {
+ mork_size minGrowth = space + 16;
+ mork_size minSize = coil->mBlob_Size + minGrowth;
+ if (coil->GrowCoil(ev, minSize)) {
+ body = (mork_u1*)coil->mBuf_Body;
+ if (body) {
+ mSink_At = at = body + fill;
+ mSink_End = end = body + coil->mBlob_Size;
+ space = (mork_num)(end - at); // space left in body
+ } else
+ coil->NilBufBodyError(ev);
+ }
+ }
+ if (ev->Good()) {
+ if (space >= inSize) // enough room to hold write?
+ {
+ MORK_MEMCPY(at, inBuf, inSize); // into body
+ mSink_At = at + inSize; // advance past written bytes
+ coil->mBuf_Fill = fill + inSize; // "flush" to fix fill
+ } else
+ ev->NewError("insufficient morkSpool space");
+ }
+ } else
+ this->BadSpoolCursorOrderError(ev);
+ }
+ } else
+ coil->NilBufBodyError(ev);
+ } else
+ this->NilSpoolCoilError(ev);
+
+ return ev->Good();
+}
+
+mork_bool morkSpool::PutString(morkEnv* ev, const char* inString)
+// call Write() with inBuf=inString and inSize=strlen(inString),
+// unless inString is null, in which case we then do nothing at all.
+{
+ if (inString) {
+ mork_size size = strlen(inString);
+ this->Write(ev, inString, size);
+ }
+ return ev->Good();
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkSink.h b/comm/mailnews/db/mork/morkSink.h
new file mode 100644
index 0000000000..9803b5c6da
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSink.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKSINK_
+#define _MORKSINK_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| morkSink is intended to be a very cheap buffered i/o sink which
+**| writes to bufs and other strings a single byte at a time. The
+**| basic idea is that writing a single byte has a very cheap average
+**| cost, because a polymophic function call need only occur when the
+**| space between At and End is exhausted. The rest of the time a
+**| very cheap inline method will write a byte, and then bump a pointer.
+**|
+**|| At: the current position in some sequence of bytes at which to
+**| write the next byte put into the sink. Presumably At points into
+**| the private storage of some space which is not yet filled (except
+**| when At reaches End, and the overflow must then spill). Note both
+**| At and End are zeroed in the destructor to help show that a sink
+**| is no longer usable; this is safe because At==End causes the case
+**| where SpillPutc() is called to handled an exhausted buffer space.
+**|
+**|| End: an address one byte past the last byte which can be written
+**| without needing to make a buffer larger than previously. When At
+**| and End are equal, this means there is no space to write a byte,
+**| and that some underlying buffer space must be grown before another
+**| byte can be written. Note At must always be less than or equal to
+**| End, and otherwise an important invariant has failed severely.
+**|
+**|| Buf: this original class slot has been commented out in the new
+**| and more abstract version of this sink class, but the general idea
+**| behind this slot should be explained to help design subclasses.
+**| Each subclass should provide space into which At and End can point,
+**| where End is beyond the last writable byte, and At is less than or
+**| equal to this point inside the same buffer. With some kinds of
+**| medium, such as writing to an instance of morkBlob, it is feasible
+**| to point directly into the final resting place for all the content
+**| written to the medium. Other mediums such as files, which write
+**| only through function calls, will typically need a local buffer
+**| to efficiently accumulate many bytes between such function calls.
+**|
+**|| FlushSink: this flush method should move any buffered content to
+**| its final destination. For example, for buffered writes to a
+**| string medium, where string methods are function calls and not just
+**| inline macros, it is faster to accumulate many bytes in a small
+**| local buffer and then move these en masse later in a single call.
+**|
+**|| SpillPutc: when At is greater than or equal to End, this means an
+**| underlying buffer has become full, so the buffer must be flushed
+**| before a new byte can be written. The intention is that SpillPutc()
+**| will be equivalent to calling FlushSink() followed by another call
+**| to Putc(), where the flush is expected to make At less then End once
+**| again. Except that FlushSink() need not make the underlying buffer
+**| any larger, and SpillPutc() typically must make room for more bytes.
+**| Note subclasses might want to guard against the case that both At
+**| and End are null, which happens when a sink is destroyed, which sets
+**| both these pointers to null as an indication the sink is disabled.
+|*/
+class morkSink {
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public sink virtual methods
+ virtual void FlushSink(morkEnv* ev) = 0;
+ virtual void SpillPutc(morkEnv* ev, int c) = 0;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // member variables
+ mork_u1* mSink_At; // pointer into mSink_Buf
+ mork_u1* mSink_End; // one byte past last content byte
+
+ // define morkSink_kBufSize 256 /* small enough to go on stack */
+
+ // mork_u1 mSink_Buf[ morkSink_kBufSize + 4 ];
+ // want plus one for any needed end null byte; use plus 4 for alignment
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkSink methods
+ virtual ~morkSink(); // zero both At and End; virtual for subclasses
+ morkSink() {} // does nothing; subclasses must set At and End suitably
+
+ void Putc(morkEnv* ev, int c) {
+ if (mSink_At < mSink_End)
+ *mSink_At++ = (mork_u1)c;
+ else
+ this->SpillPutc(ev, c);
+ }
+};
+
+/*| morkSpool: an output sink that efficiently writes individual bytes
+**| or entire byte sequences to a coil instance, which grows as needed by
+**| using the heap instance in the coil to grow the internal buffer.
+**|
+**|| Note we do not "own" the coil referenced by mSpool_Coil, and
+**| the lifetime of the coil is expected to equal or exceed that of this
+**| sink by some external means. Typical usage might involve keeping an
+**| instance of morkCoil and an instance of morkSpool in the same
+**| owning parent object, which uses the spool with the associated coil.
+|*/
+class morkSpool : public morkSink { // for buffered i/o to a morkCoil
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public sink virtual methods
+ // when morkSink::Putc() moves mSink_At, mSpool_Coil->mBuf_Fill is wrong:
+
+ virtual void FlushSink(morkEnv* ev); // sync mSpool_Coil->mBuf_Fill
+ virtual void SpillPutc(morkEnv* ev, int c); // grow coil and write byte
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // member variables
+ morkCoil* mSpool_Coil; // destination medium for written bytes
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkSink methods
+ static void BadSpoolCursorOrderError(morkEnv* ev);
+ static void NilSpoolCoilError(morkEnv* ev);
+
+ virtual ~morkSpool();
+ // Zero all slots to show this sink is disabled, but destroy no memory.
+ // Note it is typically unnecessary to flush this coil sink, since all
+ // content is written directly to the coil without any buffering.
+
+ morkSpool(morkEnv* ev, morkCoil* ioCoil);
+ // After installing the coil, calls Seek(ev, 0) to prepare for writing.
+
+ // ----- All boolean return values below are equal to ev->Good(): -----
+
+ mork_bool Seek(morkEnv* ev, mork_pos inPos);
+ // Changed the current write position in coil's buffer to inPos.
+ // For example, to start writing the coil from scratch, use inPos==0.
+
+ mork_bool Write(morkEnv* ev, const void* inBuf, mork_size inSize);
+ // write inSize bytes of inBuf to current position inside coil's buffer
+
+ mork_bool PutBuf(morkEnv* ev, const morkBuf& inBuffer) {
+ return this->Write(ev, inBuffer.mBuf_Body, inBuffer.mBuf_Fill);
+ }
+
+ mork_bool PutString(morkEnv* ev, const char* inString);
+ // call Write() with inBuf=inString and inSize=strlen(inString),
+ // unless inString is null, in which case we then do nothing at all.
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKSINK_ */
diff --git a/comm/mailnews/db/mork/morkSpace.cpp b/comm/mailnews/db/mork/morkSpace.cpp
new file mode 100644
index 0000000000..d3b1980089
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSpace.cpp
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKSPACE_
+# include "morkSpace.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkSpace::CloseMorkNode(
+ morkEnv* ev) // CloseSpace() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseSpace(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkSpace::~morkSpace() // assert CloseSpace() executed earlier
+{
+ MORK_ASSERT(SpaceScope() == 0);
+ MORK_ASSERT(mSpace_Store == 0);
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+// morkSpace::morkSpace(morkEnv* ev, const morkUsage& inUsage,
+// nsIMdbHeap* ioNodeHeap, const morkMapForm& inForm,
+// nsIMdbHeap* ioSlotHeap)
+//: morkNode(ev, inUsage, ioNodeHeap)
+//, mSpace_Map(ev, morkUsage::kMember, (nsIMdbHeap*) 0, ioSlotHeap)
+//{
+// ev->StubMethodOnlyError();
+//}
+
+/*public non-poly*/
+morkSpace::morkSpace(morkEnv* ev, const morkUsage& inUsage, mork_scope inScope,
+ morkStore* ioStore, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap)
+ : morkBead(ev, inUsage, ioHeap, inScope),
+ mSpace_Store(0),
+ mSpace_DoAutoIDs(morkBool_kFalse),
+ mSpace_HaveDoneAutoIDs(morkBool_kFalse),
+ mSpace_CanDirty(morkBool_kFalse) // only when store can be dirtied
+{
+ if (ev->Good()) {
+ if (ioStore && ioSlotHeap) {
+ morkStore::SlotWeakStore(ioStore, ev, &mSpace_Store);
+
+ mSpace_CanDirty = ioStore->mStore_CanDirty;
+ if (mSpace_CanDirty) // this new space dirties the store?
+ this->MaybeDirtyStoreAndSpace();
+
+ if (ev->Good()) mNode_Derived = morkDerived_kSpace;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkSpace::CloseSpace(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkStore::SlotWeakStore((morkStore*)0, ev, &mSpace_Store);
+ mBead_Color = 0; // this->CloseBead();
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkSpace::NonAsciiSpaceScopeName(morkEnv* ev) {
+ ev->NewError("SpaceScope() > 0x7F");
+}
+
+/*static*/ void morkSpace::NilSpaceStoreError(morkEnv* ev) {
+ ev->NewError("nil mSpace_Store");
+}
+
+morkPool* morkSpace::GetSpaceStorePool() const {
+ return &mSpace_Store->mStore_Pool;
+}
+
+mork_bool morkSpace::MaybeDirtyStoreAndSpace() {
+ morkStore* store = mSpace_Store;
+ if (store && store->mStore_CanDirty) {
+ store->SetStoreDirty();
+ mSpace_CanDirty = morkBool_kTrue;
+ }
+
+ if (mSpace_CanDirty) {
+ this->SetSpaceDirty();
+ return morkBool_kTrue;
+ }
+
+ return morkBool_kFalse;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkSpace.h b/comm/mailnews/db/mork/morkSpace.h
new file mode 100644
index 0000000000..baf74ee677
--- /dev/null
+++ b/comm/mailnews/db/mork/morkSpace.h
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKSPACE_
+#define _MORKSPACE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKBEAD_
+# include "morkBead.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkSpace_kInitialSpaceSlots /*i*/ 1024 /* default */
+#define morkDerived_kSpace /*i*/ 0x5370 /* ascii 'Sp' */
+
+/*| morkSpace:
+|*/
+class morkSpace : public morkBead { //
+
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+
+ public: // bead color setter & getter replace obsolete member mTable_Id:
+ mork_tid SpaceScope() const { return mBead_Color; }
+ void SetSpaceScope(mork_scope inScope) { mBead_Color = inScope; }
+
+ public: // state is public because the entire Mork system is private
+ morkStore* mSpace_Store; // weak ref to containing store
+
+ mork_bool mSpace_DoAutoIDs; // whether db should assign member IDs
+ mork_bool mSpace_HaveDoneAutoIDs; // whether actually auto assigned IDs
+ mork_bool mSpace_CanDirty; // changes imply the store becomes dirty?
+ mork_u1 mSpace_Pad; // pad to u4 alignment
+
+ public: // more specific dirty methods for space:
+ void SetSpaceDirty() { this->SetNodeDirty(); }
+ void SetSpaceClean() { this->SetNodeClean(); }
+
+ mork_bool IsSpaceClean() const { return this->IsNodeClean(); }
+ mork_bool IsSpaceDirty() const { return this->IsNodeDirty(); }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev); // CloseSpace() only if open
+ virtual ~morkSpace(); // assert that CloseSpace() executed earlier
+
+ public: // morkMap construction & destruction
+ // morkSpace(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap,
+ // const morkMapForm& inForm, nsIMdbHeap* ioSlotHeap);
+
+ morkSpace(morkEnv* ev, const morkUsage& inUsage, mork_scope inScope,
+ morkStore* ioStore, nsIMdbHeap* ioNodeHeap, nsIMdbHeap* ioSlotHeap);
+ void CloseSpace(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsSpace() const {
+ return IsNode() && mNode_Derived == morkDerived_kSpace;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // other space methods
+ mork_bool MaybeDirtyStoreAndSpace();
+
+ static void NonAsciiSpaceScopeName(morkEnv* ev);
+ static void NilSpaceStoreError(morkEnv* ev);
+
+ morkPool* GetSpaceStorePool() const;
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakSpace(morkSpace* me, morkEnv* ev, morkSpace** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongSpace(morkSpace* me, morkEnv* ev, morkSpace** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKSPACE_ */
diff --git a/comm/mailnews/db/mork/morkStore.cpp b/comm/mailnews/db/mork/morkStore.cpp
new file mode 100644
index 0000000000..9356864c35
--- /dev/null
+++ b/comm/mailnews/db/mork/morkStore.cpp
@@ -0,0 +1,1981 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKFACTORY_
+# include "morkFactory.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKTHUMB_
+# include "morkThumb.h"
+#endif
+// #ifndef _MORKFILE_
+// #include "morkFile.h"
+// #endif
+
+#ifndef _MORKBUILDER_
+# include "morkBuilder.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKSTREAM_
+# include "morkStream.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKPORTTABLECURSOR_
+# include "morkPortTableCursor.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKPARSER_
+# include "morkParser.h"
+#endif
+
+#include "nsCOMPtr.h"
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkStore::CloseMorkNode(
+ morkEnv* ev) // ClosePort() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseStore(ev);
+ this->MarkShut();
+ }
+}
+
+/*public non-poly*/ void morkStore::ClosePort(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkFactory::SlotWeakFactory((morkFactory*)0, ev, &mPort_Factory);
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mPort_Heap);
+ this->CloseObject(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+/*public virtual*/
+morkStore::~morkStore() // assert CloseStore() executed earlier
+{
+ if (IsOpenNode()) CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(this->IsShutNode());
+ MORK_ASSERT(mStore_File == 0);
+ MORK_ASSERT(mStore_InStream == 0);
+ MORK_ASSERT(mStore_OutStream == 0);
+ MORK_ASSERT(mStore_Builder == 0);
+ MORK_ASSERT(mStore_OidAtomSpace == 0);
+ MORK_ASSERT(mStore_GroundAtomSpace == 0);
+ MORK_ASSERT(mStore_GroundColumnSpace == 0);
+ MORK_ASSERT(mStore_RowSpaces.IsShutNode());
+ MORK_ASSERT(mStore_AtomSpaces.IsShutNode());
+ MORK_ASSERT(mStore_Pool.IsShutNode());
+}
+
+/*public non-poly*/
+morkStore::morkStore(
+ morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioNodeHeap, // the heap (if any) for this node instance
+ morkFactory* inFactory, // the factory for this
+ nsIMdbHeap* ioPortHeap // the heap to hold all content in the port
+ )
+ : morkObject(ev, inUsage, ioNodeHeap, morkColor_kNone, (morkHandle*)0),
+ mPort_Env(ev),
+ mPort_Factory(0),
+ mPort_Heap(0),
+ mStore_OidAtomSpace(0),
+ mStore_GroundAtomSpace(0),
+ mStore_GroundColumnSpace(0)
+
+ ,
+ mStore_File(0),
+ mStore_InStream(0),
+ mStore_Builder(0),
+ mStore_OutStream(0)
+
+ ,
+ mStore_RowSpaces(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioPortHeap),
+ mStore_AtomSpaces(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioPortHeap),
+ mStore_Zone(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioPortHeap),
+ mStore_Pool(ev, morkUsage::kMember, (nsIMdbHeap*)0, ioPortHeap)
+
+ ,
+ mStore_CommitGroupIdentity(0)
+
+ ,
+ mStore_FirstCommitGroupPos(0),
+ mStore_SecondCommitGroupPos(0)
+
+ // disable auto-assignment of atom IDs until someone knows it is okay:
+ ,
+ mStore_CanAutoAssignAtomIdentity(morkBool_kFalse),
+ mStore_CanDirty(morkBool_kFalse) // not until the store is open
+ ,
+ mStore_CanWriteIncremental(morkBool_kTrue) // always with few exceptions
+{
+ if (ev->Good()) {
+ if (inFactory && ioPortHeap) {
+ morkFactory::SlotWeakFactory(inFactory, ev, &mPort_Factory);
+ nsIMdbHeap_SlotStrongHeap(ioPortHeap, ev, &mPort_Heap);
+ if (ev->Good()) mNode_Derived = morkDerived_kPort;
+ } else
+ ev->NilPointerError();
+ }
+ if (ev->Good()) {
+ mNode_Derived = morkDerived_kStore;
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkStore, morkObject, nsIMdbStore)
+
+/*public non-poly*/ void morkStore::CloseStore(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ nsIMdbFile* file = mStore_File;
+ file->AddRef();
+
+ morkFactory::SlotWeakFactory((morkFactory*)0, ev, &mPort_Factory);
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mPort_Heap);
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mStore_OidAtomSpace);
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mStore_GroundAtomSpace);
+ morkAtomSpace::SlotStrongAtomSpace((morkAtomSpace*)0, ev,
+ &mStore_GroundColumnSpace);
+ mStore_RowSpaces.CloseMorkNode(ev);
+ mStore_AtomSpaces.CloseMorkNode(ev);
+ morkBuilder::SlotStrongBuilder((morkBuilder*)0, ev, &mStore_Builder);
+
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mStore_File);
+
+ file->Release();
+
+ morkStream::SlotStrongStream((morkStream*)0, ev, &mStore_InStream);
+ morkStream::SlotStrongStream((morkStream*)0, ev, &mStore_OutStream);
+
+ mStore_Pool.CloseMorkNode(ev);
+ mStore_Zone.CloseMorkNode(ev);
+ this->ClosePort(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+mork_bool morkStore::DoPreferLargeOverCompressCommit(morkEnv* ev)
+// true when mStore_CanWriteIncremental && store has file large enough
+{
+ nsIMdbFile* file = mStore_File;
+ if (file && mStore_CanWriteIncremental) {
+ mdb_pos fileEof = 0;
+ file->Eof(ev->AsMdbEnv(), &fileEof);
+ if (ev->Good() && fileEof > 128) return morkBool_kTrue;
+ }
+ return morkBool_kFalse;
+}
+
+mork_percent morkStore::PercentOfStoreWasted(morkEnv* ev) {
+ mork_percent outPercent = 0;
+ nsIMdbFile* file = mStore_File;
+
+ if (file) {
+ mork_pos firstPos = mStore_FirstCommitGroupPos;
+ mork_pos secondPos = mStore_SecondCommitGroupPos;
+ if (firstPos || secondPos) {
+ if (firstPos < 512 && secondPos > firstPos)
+ firstPos = secondPos; // better approximation of first commit
+
+ mork_pos fileLength = 0;
+ file->Eof(ev->AsMdbEnv(), &fileLength); // end of file
+ if (ev->Good() && fileLength > firstPos) {
+ mork_size groupContent = fileLength - firstPos;
+ outPercent = (groupContent * 100) / fileLength;
+ }
+ }
+ } else
+ this->NilStoreFileError(ev);
+
+ return outPercent;
+}
+
+void morkStore::SetStoreAndAllSpacesCanDirty(morkEnv* ev,
+ mork_bool inCanDirty) {
+ mStore_CanDirty = inCanDirty;
+
+ mork_change* c = 0;
+ mork_scope* key = 0; // ignore keys in maps
+
+ if (ev->Good()) {
+ morkAtomSpaceMapIter asi(ev, &mStore_AtomSpaces);
+
+ morkAtomSpace* atomSpace = 0; // old val node in the map
+
+ for (c = asi.FirstAtomSpace(ev, key, &atomSpace); c && ev->Good();
+ c = asi.NextAtomSpace(ev, key, &atomSpace)) {
+ if (atomSpace) {
+ if (atomSpace->IsAtomSpace())
+ atomSpace->mSpace_CanDirty = inCanDirty;
+ else
+ atomSpace->NonAtomSpaceTypeError(ev);
+ } else
+ ev->NilPointerError();
+ }
+ }
+
+ if (ev->Good()) {
+ morkRowSpaceMapIter rsi(ev, &mStore_RowSpaces);
+ morkRowSpace* rowSpace = 0; // old val node in the map
+
+ for (c = rsi.FirstRowSpace(ev, key, &rowSpace); c && ev->Good();
+ c = rsi.NextRowSpace(ev, key, &rowSpace)) {
+ if (rowSpace) {
+ if (rowSpace->IsRowSpace())
+ rowSpace->mSpace_CanDirty = inCanDirty;
+ else
+ rowSpace->NonRowSpaceTypeError(ev);
+ }
+ }
+ }
+}
+
+void morkStore::RenumberAllCollectableContent(morkEnv* ev) {
+ MORK_USED_1(ev);
+ // do nothing currently
+}
+
+nsIMdbStore* morkStore::AcquireStoreHandle(morkEnv* ev) { return this; }
+
+morkFarBookAtom* morkStore::StageAliasAsFarBookAtom(morkEnv* ev,
+ const morkMid* inMid,
+ morkAtomSpace* ioSpace,
+ mork_cscode inForm) {
+ if (inMid && inMid->mMid_Buf) {
+ const morkBuf* buf = inMid->mMid_Buf;
+ mork_size length = buf->mBuf_Fill;
+ if (length <= morkBookAtom_kMaxBodySize) {
+ mork_aid dummyAid = 1;
+ // mStore_BookAtom.InitMaxBookAtom(ev, *buf,
+ // inForm, ioSpace, dummyAid);
+
+ mStore_FarBookAtom.InitFarBookAtom(ev, *buf, inForm, ioSpace, dummyAid);
+ return &mStore_FarBookAtom;
+ }
+ } else
+ ev->NilPointerError();
+
+ return (morkFarBookAtom*)0;
+}
+
+morkFarBookAtom* morkStore::StageYarnAsFarBookAtom(morkEnv* ev,
+ const mdbYarn* inYarn,
+ morkAtomSpace* ioSpace) {
+ if (inYarn && inYarn->mYarn_Buf) {
+ mork_size length = inYarn->mYarn_Fill;
+ if (length <= morkBookAtom_kMaxBodySize) {
+ morkBuf buf(inYarn->mYarn_Buf, length);
+ mork_aid dummyAid = 1;
+ // mStore_BookAtom.InitMaxBookAtom(ev, buf,
+ // inYarn->mYarn_Form, ioSpace, dummyAid);
+ mStore_FarBookAtom.InitFarBookAtom(ev, buf, inYarn->mYarn_Form, ioSpace,
+ dummyAid);
+ return &mStore_FarBookAtom;
+ }
+ } else
+ ev->NilPointerError();
+
+ return (morkFarBookAtom*)0;
+}
+
+morkFarBookAtom* morkStore::StageStringAsFarBookAtom(morkEnv* ev,
+ const char* inString,
+ mork_cscode inForm,
+ morkAtomSpace* ioSpace) {
+ if (inString) {
+ mork_size length = strlen(inString);
+ if (length <= morkBookAtom_kMaxBodySize) {
+ morkBuf buf(inString, length);
+ mork_aid dummyAid = 1;
+ // mStore_BookAtom.InitMaxBookAtom(ev, buf, inForm, ioSpace, dummyAid);
+ mStore_FarBookAtom.InitFarBookAtom(ev, buf, inForm, ioSpace, dummyAid);
+ return &mStore_FarBookAtom;
+ }
+ } else
+ ev->NilPointerError();
+
+ return (morkFarBookAtom*)0;
+}
+
+morkAtomSpace* morkStore::LazyGetOidAtomSpace(morkEnv* ev) {
+ MORK_USED_1(ev);
+ if (!mStore_OidAtomSpace) {
+ }
+ return mStore_OidAtomSpace;
+}
+
+morkAtomSpace* morkStore::LazyGetGroundAtomSpace(morkEnv* ev) {
+ if (!mStore_GroundAtomSpace) {
+ mork_scope atomScope = morkStore_kValueSpaceScope;
+ nsIMdbHeap* heap = mPort_Heap;
+ morkAtomSpace* space = new (*heap, ev)
+ morkAtomSpace(ev, morkUsage::kHeap, atomScope, this, heap, heap);
+
+ if (space) // successful space creation?
+ {
+ this->MaybeDirtyStore();
+
+ mStore_GroundAtomSpace = space; // transfer strong ref to this slot
+ mStore_AtomSpaces.AddAtomSpace(ev, space);
+ }
+ }
+ return mStore_GroundAtomSpace;
+}
+
+morkAtomSpace* morkStore::LazyGetGroundColumnSpace(morkEnv* ev) {
+ if (!mStore_GroundColumnSpace) {
+ mork_scope atomScope = morkStore_kGroundColumnSpace;
+ nsIMdbHeap* heap = mPort_Heap;
+ morkAtomSpace* space = new (*heap, ev)
+ morkAtomSpace(ev, morkUsage::kHeap, atomScope, this, heap, heap);
+
+ if (space) // successful space creation?
+ {
+ this->MaybeDirtyStore();
+
+ mStore_GroundColumnSpace = space; // transfer strong ref to this slot
+ mStore_AtomSpaces.AddAtomSpace(ev, space);
+ }
+ }
+ return mStore_GroundColumnSpace;
+}
+
+morkStream* morkStore::LazyGetInStream(morkEnv* ev) {
+ if (!mStore_InStream) {
+ nsIMdbFile* file = mStore_File;
+ if (file) {
+ morkStream* stream = new (*mPort_Heap, ev)
+ morkStream(ev, morkUsage::kHeap, mPort_Heap, file,
+ morkStore_kStreamBufSize, /*frozen*/ morkBool_kTrue);
+ if (stream) {
+ this->MaybeDirtyStore();
+ mStore_InStream = stream; // transfer strong ref to this slot
+ }
+ } else
+ this->NilStoreFileError(ev);
+ }
+ return mStore_InStream;
+}
+
+morkStream* morkStore::LazyGetOutStream(morkEnv* ev) {
+ if (!mStore_OutStream) {
+ nsIMdbFile* file = mStore_File;
+ if (file) {
+ morkStream* stream = new (*mPort_Heap, ev)
+ morkStream(ev, morkUsage::kHeap, mPort_Heap, file,
+ morkStore_kStreamBufSize, /*frozen*/ morkBool_kFalse);
+ if (stream) {
+ this->MaybeDirtyStore();
+ mStore_InStream = stream; // transfer strong ref to this slot
+ }
+ } else
+ this->NilStoreFileError(ev);
+ }
+ return mStore_OutStream;
+}
+
+void morkStore::ForgetBuilder(morkEnv* ev) {
+ if (mStore_Builder)
+ morkBuilder::SlotStrongBuilder((morkBuilder*)0, ev, &mStore_Builder);
+ if (mStore_InStream)
+ morkStream::SlotStrongStream((morkStream*)0, ev, &mStore_InStream);
+}
+
+morkBuilder* morkStore::LazyGetBuilder(morkEnv* ev) {
+ if (!mStore_Builder) {
+ morkStream* stream = this->LazyGetInStream(ev);
+ if (stream) {
+ nsIMdbHeap* heap = mPort_Heap;
+ morkBuilder* builder = new (*heap, ev)
+ morkBuilder(ev, morkUsage::kHeap, heap, stream,
+ morkBuilder_kDefaultBytesPerParseSegment, heap, this);
+ if (builder) {
+ mStore_Builder = builder; // transfer strong ref to this slot
+ }
+ }
+ }
+ return mStore_Builder;
+}
+
+morkRowSpace* morkStore::LazyGetRowSpace(morkEnv* ev, mdb_scope inRowScope) {
+ morkRowSpace* outSpace = mStore_RowSpaces.GetRowSpace(ev, inRowScope);
+ if (!outSpace && ev->Good()) // try to make new space?
+ {
+ nsIMdbHeap* heap = mPort_Heap;
+ outSpace = new (*heap, ev)
+ morkRowSpace(ev, morkUsage::kHeap, inRowScope, this, heap, heap);
+
+ if (outSpace) // successful space creation?
+ {
+ this->MaybeDirtyStore();
+
+ // note adding to node map creates its own strong ref...
+ if (mStore_RowSpaces.AddRowSpace(ev, outSpace))
+ outSpace->CutStrongRef(ev); // ...so we can drop our strong ref
+ }
+ }
+ return outSpace;
+}
+
+morkAtomSpace* morkStore::LazyGetAtomSpace(morkEnv* ev, mdb_scope inAtomScope) {
+ morkAtomSpace* outSpace = mStore_AtomSpaces.GetAtomSpace(ev, inAtomScope);
+ if (!outSpace && ev->Good()) // try to make new space?
+ {
+ if (inAtomScope == morkStore_kValueSpaceScope)
+ outSpace = this->LazyGetGroundAtomSpace(ev);
+
+ else if (inAtomScope == morkStore_kGroundColumnSpace)
+ outSpace = this->LazyGetGroundColumnSpace(ev);
+ else {
+ nsIMdbHeap* heap = mPort_Heap;
+ outSpace = new (*heap, ev)
+ morkAtomSpace(ev, morkUsage::kHeap, inAtomScope, this, heap, heap);
+
+ if (outSpace) // successful space creation?
+ {
+ this->MaybeDirtyStore();
+
+ // note adding to node map creates its own strong ref...
+ if (mStore_AtomSpaces.AddAtomSpace(ev, outSpace))
+ outSpace->CutStrongRef(ev); // ...so we can drop our strong ref
+ }
+ }
+ }
+ return outSpace;
+}
+
+/*static*/ void morkStore::NonStoreTypeError(morkEnv* ev) {
+ ev->NewError("non morkStore");
+}
+
+/*static*/ void morkStore::NilStoreFileError(morkEnv* ev) {
+ ev->NewError("nil mStore_File");
+}
+
+/*static*/ void morkStore::CannotAutoAssignAtomIdentityError(morkEnv* ev) {
+ ev->NewError("false mStore_CanAutoAssignAtomIdentity");
+}
+
+mork_bool morkStore::OpenStoreFile(
+ morkEnv* ev, mork_bool inFrozen,
+ // const char* inFilePath,
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy) {
+ MORK_USED_2(inOpenPolicy, inFrozen);
+ nsIMdbFile_SlotStrongFile(ioFile, ev, &mStore_File);
+
+ // if ( ev->Good() )
+ // {
+ // morkFile* file = morkFile::OpenOldFile(ev, mPort_Heap,
+ // inFilePath, inFrozen);
+ // if ( ioFile )
+ // {
+ // if ( ev->Good() )
+ // morkFile::SlotStrongFile(file, ev, &mStore_File);
+ // else
+ // file->CutStrongRef(ev);
+ //
+ // }
+ // }
+ return ev->Good();
+}
+
+mork_bool morkStore::CreateStoreFile(
+ morkEnv* ev,
+ // const char* inFilePath,
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy) {
+ MORK_USED_1(inOpenPolicy);
+ nsIMdbFile_SlotStrongFile(ioFile, ev, &mStore_File);
+
+ return ev->Good();
+}
+
+morkAtom* morkStore::CopyAtom(morkEnv* ev, const morkAtom* inAtom)
+// copy inAtom (from some other store) over to this store
+{
+ morkAtom* outAtom = 0;
+ if (inAtom) {
+ mdbYarn yarn;
+ if (morkAtom::AliasYarn(inAtom, &yarn))
+ outAtom = this->YarnToAtom(ev, &yarn, true /* create */);
+ }
+ return outAtom;
+}
+
+morkAtom* morkStore::YarnToAtom(morkEnv* ev, const mdbYarn* inYarn,
+ bool createIfMissing /* = true */) {
+ morkAtom* outAtom = 0;
+ if (ev->Good()) {
+ morkAtomSpace* groundSpace = this->LazyGetGroundAtomSpace(ev);
+ if (groundSpace) {
+ morkFarBookAtom* keyAtom =
+ this->StageYarnAsFarBookAtom(ev, inYarn, groundSpace);
+
+ if (keyAtom) {
+ morkAtomBodyMap* map = &groundSpace->mAtomSpace_AtomBodies;
+ outAtom = map->GetAtom(ev, keyAtom);
+ if (!outAtom && createIfMissing) {
+ this->MaybeDirtyStore();
+ outAtom = groundSpace->MakeBookAtomCopy(ev, *keyAtom);
+ }
+ } else if (ev->Good()) {
+ morkBuf b(inYarn->mYarn_Buf, inYarn->mYarn_Fill);
+ morkZone* z = &mStore_Zone;
+ outAtom = mStore_Pool.NewAnonAtom(ev, b, inYarn->mYarn_Form, z);
+ }
+ }
+ }
+ return outAtom;
+}
+
+mork_bool morkStore::MidToOid(morkEnv* ev, const morkMid& inMid,
+ mdbOid* outOid) {
+ *outOid = inMid.mMid_Oid;
+ const morkBuf* buf = inMid.mMid_Buf;
+ if (buf && !outOid->mOid_Scope) {
+ if (buf->mBuf_Fill <= morkBookAtom_kMaxBodySize) {
+ if (buf->mBuf_Fill == 1) {
+ mork_u1* name = (mork_u1*)buf->mBuf_Body;
+ if (name) {
+ outOid->mOid_Scope = (mork_scope)*name;
+ return ev->Good();
+ }
+ }
+ morkAtomSpace* groundSpace = this->LazyGetGroundColumnSpace(ev);
+ if (groundSpace) {
+ mork_cscode form = 0; // default
+ mork_aid aid = 1; // dummy
+ mStore_FarBookAtom.InitFarBookAtom(ev, *buf, form, groundSpace, aid);
+ morkFarBookAtom* keyAtom = &mStore_FarBookAtom;
+ morkAtomBodyMap* map = &groundSpace->mAtomSpace_AtomBodies;
+ morkBookAtom* bookAtom = map->GetAtom(ev, keyAtom);
+ if (bookAtom)
+ outOid->mOid_Scope = bookAtom->mBookAtom_Id;
+ else {
+ this->MaybeDirtyStore();
+ bookAtom = groundSpace->MakeBookAtomCopy(ev, *keyAtom);
+ if (bookAtom) {
+ outOid->mOid_Scope = bookAtom->mBookAtom_Id;
+ bookAtom->MakeCellUseForever(ev);
+ }
+ }
+ }
+ }
+ }
+ return ev->Good();
+}
+
+morkRow* morkStore::MidToRow(morkEnv* ev, const morkMid& inMid) {
+ mdbOid tempOid;
+ this->MidToOid(ev, inMid, &tempOid);
+ return this->OidToRow(ev, &tempOid);
+}
+
+morkTable* morkStore::MidToTable(morkEnv* ev, const morkMid& inMid) {
+ mdbOid tempOid;
+ this->MidToOid(ev, inMid, &tempOid);
+ return this->OidToTable(ev, &tempOid, /*metarow*/ (mdbOid*)0);
+}
+
+mork_bool morkStore::MidToYarn(morkEnv* ev, const morkMid& inMid,
+ mdbYarn* outYarn) {
+ mdbOid tempOid;
+ this->MidToOid(ev, inMid, &tempOid);
+ return this->OidToYarn(ev, tempOid, outYarn);
+}
+
+mork_bool morkStore::OidToYarn(morkEnv* ev, const mdbOid& inOid,
+ mdbYarn* outYarn) {
+ morkBookAtom* atom = 0;
+
+ morkAtomSpace* atomSpace =
+ mStore_AtomSpaces.GetAtomSpace(ev, inOid.mOid_Scope);
+ if (atomSpace) {
+ morkAtomAidMap* map = &atomSpace->mAtomSpace_AtomAids;
+ atom = map->GetAid(ev, (mork_aid)inOid.mOid_Id);
+ }
+ morkAtom::GetYarn(atom, outYarn);
+
+ return ev->Good();
+}
+
+morkBookAtom* morkStore::MidToAtom(morkEnv* ev, const morkMid& inMid) {
+ morkBookAtom* outAtom = 0;
+ mdbOid oid;
+ if (this->MidToOid(ev, inMid, &oid)) {
+ morkAtomSpace* atomSpace =
+ mStore_AtomSpaces.GetAtomSpace(ev, oid.mOid_Scope);
+ if (atomSpace) {
+ morkAtomAidMap* map = &atomSpace->mAtomSpace_AtomAids;
+ outAtom = map->GetAid(ev, (mork_aid)oid.mOid_Id);
+ }
+ }
+ return outAtom;
+}
+
+/*static*/ void morkStore::SmallTokenToOneByteYarn(morkEnv* ev,
+ mdb_token inToken,
+ mdbYarn* outYarn) {
+ MORK_USED_1(ev);
+ if (outYarn->mYarn_Buf && outYarn->mYarn_Size) // any space in yarn at all?
+ {
+ mork_u1* buf = (mork_u1*)outYarn->mYarn_Buf; // for byte arithmetic
+ buf[0] = (mork_u1)inToken; // write the single byte
+ outYarn->mYarn_Fill = 1;
+ outYarn->mYarn_More = 0;
+ } else // just record we could not write the single byte
+ {
+ outYarn->mYarn_More = 1;
+ outYarn->mYarn_Fill = 0;
+ }
+}
+
+void morkStore::TokenToString(morkEnv* ev, mdb_token inToken,
+ mdbYarn* outTokenName) {
+ if (inToken > morkAtomSpace_kMaxSevenBitAid) {
+ morkBookAtom* atom = 0;
+ morkAtomSpace* space = mStore_GroundColumnSpace;
+ if (space) atom = space->mAtomSpace_AtomAids.GetAid(ev, (mork_aid)inToken);
+
+ morkAtom::GetYarn(atom, outTokenName);
+ } else // token is an "immediate" single byte string representation?
+ this->SmallTokenToOneByteYarn(ev, inToken, outTokenName);
+}
+
+// void
+// morkStore::SyncTokenIdChange(morkEnv* ev, const morkBookAtom* inAtom,
+// const mdbOid* inOid)
+// {
+// mork_token mStore_MorkNoneToken; // token for "mork:none" // fill=9
+// mork_column mStore_CharsetToken; // token for "charset" // fill=7
+// mork_column mStore_AtomScopeToken; // token for "atomScope" // fill=9
+// mork_column mStore_RowScopeToken; // token for "rowScope" // fill=8
+// mork_column mStore_TableScopeToken; // token for "tableScope" // fill=10
+// mork_column mStore_ColumnScopeToken; // token for "columnScope" // fill=11
+// mork_kind mStore_TableKindToken; // token for "tableKind" // fill=9
+// ---------------------ruler-for-token-length-above---123456789012
+//
+// if ( inOid->mOid_Scope == morkStore_kColumnSpaceScope &&
+// inAtom->IsWeeBook() )
+// {
+// const mork_u1* body = ((const morkWeeBookAtom*)
+// inAtom)->mWeeBookAtom_Body; mork_size size = inAtom->mAtom_Size;
+//
+// if ( size >= 7 && size <= 11 )
+// {
+// if ( size == 9 )
+// {
+// if ( *body == 'm' )
+// {
+// if ( MORK_MEMCMP(body, "mork:none", 9) == 0 )
+// mStore_MorkNoneToken = inAtom->mBookAtom_Id;
+// }
+// else if ( *body == 'a' )
+// {
+// if ( MORK_MEMCMP(body, "atomScope", 9) == 0 )
+// mStore_AtomScopeToken = inAtom->mBookAtom_Id;
+// }
+// else if ( *body == 't' )
+// {
+// if ( MORK_MEMCMP(body, "tableKind", 9) == 0 )
+// mStore_TableKindToken = inAtom->mBookAtom_Id;
+// }
+// }
+// else if ( size == 7 && *body == 'c' )
+// {
+// if ( MORK_MEMCMP(body, "charset", 7) == 0 )
+// mStore_CharsetToken = inAtom->mBookAtom_Id;
+// }
+// else if ( size == 8 && *body == 'r' )
+// {
+// if ( MORK_MEMCMP(body, "rowScope", 8) == 0 )
+// mStore_RowScopeToken = inAtom->mBookAtom_Id;
+// }
+// else if ( size == 10 && *body == 't' )
+// {
+// if ( MORK_MEMCMP(body, "tableScope", 10) == 0 )
+// mStore_TableScopeToken = inAtom->mBookAtom_Id;
+// }
+// else if ( size == 11 && *body == 'c' )
+// {
+// if ( MORK_MEMCMP(body, "columnScope", 11) == 0 )
+// mStore_ColumnScopeToken = inAtom->mBookAtom_Id;
+// }
+// }
+// }
+// }
+
+morkAtom* morkStore::AddAlias(morkEnv* ev, const morkMid& inMid,
+ mork_cscode inForm) {
+ morkBookAtom* outAtom = 0;
+ if (ev->Good()) {
+ const mdbOid* oid = &inMid.mMid_Oid;
+ morkAtomSpace* atomSpace = this->LazyGetAtomSpace(ev, oid->mOid_Scope);
+ if (atomSpace) {
+ morkFarBookAtom* keyAtom =
+ this->StageAliasAsFarBookAtom(ev, &inMid, atomSpace, inForm);
+ if (keyAtom) {
+ morkAtomAidMap* map = &atomSpace->mAtomSpace_AtomAids;
+ outAtom = map->GetAid(ev, (mork_aid)oid->mOid_Id);
+ if (outAtom) {
+ if (!outAtom->EqualFormAndBody(ev, keyAtom))
+ ev->NewError("duplicate alias ID");
+ } else {
+ this->MaybeDirtyStore();
+ keyAtom->mBookAtom_Id = oid->mOid_Id;
+ outAtom = atomSpace->MakeBookAtomCopyWithAid(ev, *keyAtom,
+ (mork_aid)oid->mOid_Id);
+
+ // if ( outAtom && outAtom->IsWeeBook() )
+ // {
+ // if ( oid->mOid_Scope == morkStore_kColumnSpaceScope )
+ // {
+ // mork_size size = outAtom->mAtom_Size;
+ // if ( size >= 7 && size <= 11 )
+ // this->SyncTokenIdChange(ev, outAtom, oid);
+ // }
+ // }
+ }
+ }
+ }
+ }
+ return outAtom;
+}
+
+#define morkStore_kMaxCopyTokenSize 512 /* if larger, cannot be copied */
+
+mork_token morkStore::CopyToken(morkEnv* ev, mdb_token inToken,
+ morkStore* inStore)
+// copy inToken from inStore over to this store
+{
+ mork_token outToken = 0;
+ if (inStore == this) // same store?
+ outToken = inToken; // just return token unchanged
+ else {
+ char yarnBuf[morkStore_kMaxCopyTokenSize];
+ mdbYarn yarn;
+ yarn.mYarn_Buf = yarnBuf;
+ yarn.mYarn_Fill = 0;
+ yarn.mYarn_Size = morkStore_kMaxCopyTokenSize;
+ yarn.mYarn_More = 0;
+ yarn.mYarn_Form = 0;
+ yarn.mYarn_Grow = 0;
+
+ inStore->TokenToString(ev, inToken, &yarn);
+ if (ev->Good()) {
+ morkBuf buf(yarn.mYarn_Buf, yarn.mYarn_Fill);
+ outToken = this->BufToToken(ev, &buf);
+ }
+ }
+ return outToken;
+}
+
+mork_token morkStore::BufToToken(morkEnv* ev, const morkBuf* inBuf) {
+ mork_token outToken = 0;
+ if (ev->Good()) {
+ const mork_u1* s = (const mork_u1*)inBuf->mBuf_Body;
+ mork_bool nonAscii = (*s > 0x7F);
+ mork_size length = inBuf->mBuf_Fill;
+ if (nonAscii || length > 1) // more than one byte?
+ {
+ mork_cscode form = 0; // default charset
+ morkAtomSpace* space = this->LazyGetGroundColumnSpace(ev);
+ if (space) {
+ morkFarBookAtom* keyAtom = 0;
+ if (length <= morkBookAtom_kMaxBodySize) {
+ mork_aid aid = 1; // dummy
+ // mStore_BookAtom.InitMaxBookAtom(ev, *inBuf, form, space, aid);
+ mStore_FarBookAtom.InitFarBookAtom(ev, *inBuf, form, space, aid);
+ keyAtom = &mStore_FarBookAtom;
+ }
+ if (keyAtom) {
+ morkAtomBodyMap* map = &space->mAtomSpace_AtomBodies;
+ morkBookAtom* bookAtom = map->GetAtom(ev, keyAtom);
+ if (bookAtom)
+ outToken = bookAtom->mBookAtom_Id;
+ else {
+ this->MaybeDirtyStore();
+ bookAtom = space->MakeBookAtomCopy(ev, *keyAtom);
+ if (bookAtom) {
+ outToken = bookAtom->mBookAtom_Id;
+ bookAtom->MakeCellUseForever(ev);
+ }
+ }
+ }
+ }
+ } else // only a single byte in inTokenName string:
+ outToken = *s;
+ }
+
+ return outToken;
+}
+
+mork_token morkStore::StringToToken(morkEnv* ev, const char* inTokenName) {
+ mork_token outToken = 0;
+ if (ev->Good()) {
+ const mork_u1* s = (const mork_u1*)inTokenName;
+ mork_bool nonAscii = (*s > 0x7F);
+ if (nonAscii || (*s && s[1])) // more than one byte?
+ {
+ mork_cscode form = 0; // default charset
+ morkAtomSpace* groundSpace = this->LazyGetGroundColumnSpace(ev);
+ if (groundSpace) {
+ morkFarBookAtom* keyAtom =
+ this->StageStringAsFarBookAtom(ev, inTokenName, form, groundSpace);
+ if (keyAtom) {
+ morkAtomBodyMap* map = &groundSpace->mAtomSpace_AtomBodies;
+ morkBookAtom* bookAtom = map->GetAtom(ev, keyAtom);
+ if (bookAtom)
+ outToken = bookAtom->mBookAtom_Id;
+ else {
+ this->MaybeDirtyStore();
+ bookAtom = groundSpace->MakeBookAtomCopy(ev, *keyAtom);
+ if (bookAtom) {
+ outToken = bookAtom->mBookAtom_Id;
+ bookAtom->MakeCellUseForever(ev);
+ }
+ }
+ }
+ }
+ } else // only a single byte in inTokenName string:
+ outToken = *s;
+ }
+
+ return outToken;
+}
+
+mork_token morkStore::QueryToken(morkEnv* ev, const char* inTokenName) {
+ mork_token outToken = 0;
+ if (ev->Good()) {
+ const mork_u1* s = (const mork_u1*)inTokenName;
+ mork_bool nonAscii = (*s > 0x7F);
+ if (nonAscii || (*s && s[1])) // more than one byte?
+ {
+ mork_cscode form = 0; // default charset
+ morkAtomSpace* groundSpace = this->LazyGetGroundColumnSpace(ev);
+ if (groundSpace) {
+ morkFarBookAtom* keyAtom =
+ this->StageStringAsFarBookAtom(ev, inTokenName, form, groundSpace);
+ if (keyAtom) {
+ morkAtomBodyMap* map = &groundSpace->mAtomSpace_AtomBodies;
+ morkBookAtom* bookAtom = map->GetAtom(ev, keyAtom);
+ if (bookAtom) {
+ outToken = bookAtom->mBookAtom_Id;
+ bookAtom->MakeCellUseForever(ev);
+ }
+ }
+ }
+ } else // only a single byte in inTokenName string:
+ outToken = *s;
+ }
+
+ return outToken;
+}
+
+mork_bool morkStore::HasTableKind(morkEnv* ev, mdb_scope inRowScope,
+ mdb_kind inTableKind,
+ mdb_count* outTableCount) {
+ MORK_USED_2(inRowScope, inTableKind);
+ mork_bool outBool = morkBool_kFalse;
+ mdb_count tableCount = 0;
+
+ ev->StubMethodOnlyError();
+
+ if (outTableCount) *outTableCount = tableCount;
+ return outBool;
+}
+
+morkTable* morkStore::GetTableKind(morkEnv* ev, mdb_scope inRowScope,
+ mdb_kind inTableKind,
+ mdb_count* outTableCount,
+ mdb_bool* outMustBeUnique) {
+ morkTable* outTable = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inRowScope);
+ if (rowSpace) {
+ outTable = rowSpace->FindTableByKind(ev, inTableKind);
+ if (outTable) {
+ if (outTableCount) *outTableCount = outTable->GetRowCount();
+ if (outMustBeUnique) *outMustBeUnique = outTable->IsTableUnique();
+ }
+ }
+ }
+ return outTable;
+}
+
+morkRow* morkStore::FindRow(morkEnv* ev, mdb_scope inScope, mdb_column inColumn,
+ const mdbYarn* inYarn) {
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inScope);
+ if (rowSpace) {
+ outRow = rowSpace->FindRow(ev, inColumn, inYarn);
+ }
+ }
+ return outRow;
+}
+
+morkRow* morkStore::GetRow(morkEnv* ev, const mdbOid* inOid) {
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inOid->mOid_Scope);
+ if (rowSpace) {
+ outRow = rowSpace->mRowSpace_Rows.GetOid(ev, inOid);
+ }
+ }
+ return outRow;
+}
+
+morkTable* morkStore::GetTable(morkEnv* ev, const mdbOid* inOid) {
+ morkTable* outTable = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inOid->mOid_Scope);
+ if (rowSpace) {
+ outTable = rowSpace->FindTableByTid(ev, inOid->mOid_Id);
+ }
+ }
+ return outTable;
+}
+
+morkTable* morkStore::NewTable(
+ morkEnv* ev, mdb_scope inRowScope, mdb_kind inTableKind,
+ mdb_bool inMustBeUnique,
+ const mdbOid* inOptionalMetaRowOid) // can be nil to avoid specifying
+{
+ morkTable* outTable = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inRowScope);
+ if (rowSpace)
+ outTable = rowSpace->NewTable(ev, inTableKind, inMustBeUnique,
+ inOptionalMetaRowOid);
+ }
+ return outTable;
+}
+
+morkPortTableCursor* morkStore::GetPortTableCursor(morkEnv* ev,
+ mdb_scope inRowScope,
+ mdb_kind inTableKind) {
+ morkPortTableCursor* outCursor = 0;
+ if (ev->Good()) {
+ nsIMdbHeap* heap = mPort_Heap;
+ outCursor = new (*heap, ev) morkPortTableCursor(
+ ev, morkUsage::kHeap, heap, this, inRowScope, inTableKind, heap);
+ }
+ NS_IF_ADDREF(outCursor);
+ return outCursor;
+}
+
+morkRow* morkStore::NewRow(morkEnv* ev, mdb_scope inRowScope) {
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inRowScope);
+ if (rowSpace) outRow = rowSpace->NewRow(ev);
+ }
+ return outRow;
+}
+
+morkRow* morkStore::NewRowWithOid(morkEnv* ev, const mdbOid* inOid) {
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inOid->mOid_Scope);
+ if (rowSpace) outRow = rowSpace->NewRowWithOid(ev, inOid);
+ }
+ return outRow;
+}
+
+morkRow* morkStore::OidToRow(morkEnv* ev, const mdbOid* inOid)
+// OidToRow() finds old row with oid, or makes new one if not found.
+{
+ morkRow* outRow = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inOid->mOid_Scope);
+ if (rowSpace) {
+ outRow = rowSpace->mRowSpace_Rows.GetOid(ev, inOid);
+ if (!outRow && ev->Good()) outRow = rowSpace->NewRowWithOid(ev, inOid);
+ }
+ }
+ return outRow;
+}
+
+morkTable* morkStore::OidToTable(
+ morkEnv* ev, const mdbOid* inOid,
+ const mdbOid* inOptionalMetaRowOid) // can be nil to avoid specifying
+// OidToTable() finds old table with oid, or makes new one if not found.
+{
+ morkTable* outTable = 0;
+ if (ev->Good()) {
+ morkRowSpace* rowSpace = this->LazyGetRowSpace(ev, inOid->mOid_Scope);
+ if (rowSpace) {
+ outTable = rowSpace->mRowSpace_Tables.GetTable(ev, inOid->mOid_Id);
+ if (!outTable && ev->Good()) {
+ mork_kind tableKind = morkStore_kNoneToken;
+ outTable = rowSpace->NewTableWithTid(ev, inOid->mOid_Id, tableKind,
+ inOptionalMetaRowOid);
+ }
+ }
+ }
+ return outTable;
+}
+
+// { ===== begin nsIMdbObject methods =====
+
+// { ----- begin ref counting for well-behaved cyclic graphs -----
+NS_IMETHODIMP
+morkStore::GetWeakRefCount(nsIMdbEnv* mev, // weak refs
+ mdb_count* outCount) {
+ *outCount = WeakRefsOnly();
+ return NS_OK;
+}
+NS_IMETHODIMP
+morkStore::GetStrongRefCount(nsIMdbEnv* mev, // strong refs
+ mdb_count* outCount) {
+ *outCount = StrongRefsOnly();
+ return NS_OK;
+}
+// ### TODO - clean up this cast, if required
+NS_IMETHODIMP
+morkStore::AddWeakRef(nsIMdbEnv* mev) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::AddWeakRef(ev));
+}
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkStore::AddStrongRef(morkEnv* mev) { return AddRef(); }
+#endif
+NS_IMETHODIMP_(mork_uses)
+morkStore::AddStrongRef(nsIMdbEnv* mev) { return AddRef(); }
+NS_IMETHODIMP
+morkStore::CutWeakRef(nsIMdbEnv* mev) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ // XXX Casting mork_refs to nsresult
+ return static_cast<nsresult>(morkNode::CutWeakRef(ev));
+}
+#ifndef _MSC_VER
+NS_IMETHODIMP_(mork_uses)
+morkStore::CutStrongRef(morkEnv* mev) { return Release(); }
+#endif
+NS_IMETHODIMP
+morkStore::CutStrongRef(nsIMdbEnv* mev) {
+ // XXX Casting nsrefcnt to nsresult
+ return static_cast<nsresult>(Release());
+}
+
+NS_IMETHODIMP
+morkStore::CloseMdbObject(nsIMdbEnv* mev) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ CloseMorkNode(ev);
+ Release();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkStore::IsOpenMdbObject(nsIMdbEnv* mev, mdb_bool* outOpen) {
+ *outOpen = IsOpenNode();
+ return NS_OK;
+}
+// } ----- end ref counting -----
+
+// } ===== end nsIMdbObject methods =====
+
+// { ===== begin nsIMdbPort methods =====
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP
+morkStore::GetIsPortReadonly(nsIMdbEnv* mev, mdb_bool* outBool) {
+ nsresult outErr = NS_OK;
+ mdb_bool isReadOnly = morkBool_kFalse;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ ev->StubMethodOnlyError();
+ outErr = ev->AsErr();
+ }
+ if (outBool) *outBool = isReadOnly;
+ return outErr;
+}
+
+morkEnv* morkStore::CanUseStore(nsIMdbEnv* mev, mork_bool inMutable,
+ nsresult* outErr) const {
+ morkEnv* outEnv = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (IsStore())
+ outEnv = ev;
+ else
+ NonStoreTypeError(ev);
+ *outErr = ev->AsErr();
+ }
+ MORK_ASSERT(outEnv);
+ return outEnv;
+}
+
+NS_IMETHODIMP
+morkStore::GetIsStore(nsIMdbEnv* mev, mdb_bool* outBool) {
+ MORK_USED_1(mev);
+ if (outBool) *outBool = morkBool_kTrue;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkStore::GetIsStoreAndDirty(nsIMdbEnv* mev, mdb_bool* outBool) {
+ nsresult outErr = NS_OK;
+ mdb_bool isStoreAndDirty = morkBool_kFalse;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ ev->StubMethodOnlyError();
+ outErr = ev->AsErr();
+ }
+ if (outBool) *outBool = isStoreAndDirty;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetUsagePolicy(nsIMdbEnv* mev, mdbUsagePolicy* ioUsagePolicy) {
+ MORK_USED_1(ioUsagePolicy);
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ ev->StubMethodOnlyError();
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::SetUsagePolicy(nsIMdbEnv* mev, const mdbUsagePolicy* inUsagePolicy) {
+ MORK_USED_1(inUsagePolicy);
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing?
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin memory policy methods -----
+NS_IMETHODIMP
+morkStore::IdleMemoryPurge( // do memory management already scheduled
+ nsIMdbEnv* mev, // context
+ mdb_size* outEstimatedBytesFreed) // approximate bytes actually freed
+{
+ nsresult outErr = NS_OK;
+ mdb_size estimatedBytesFreed = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing?
+ outErr = ev->AsErr();
+ }
+ if (outEstimatedBytesFreed) *outEstimatedBytesFreed = estimatedBytesFreed;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::SessionMemoryPurge( // request specific footprint decrease
+ nsIMdbEnv* mev, // context
+ mdb_size inDesiredBytesFreed, // approximate number of bytes wanted
+ mdb_size* outEstimatedBytesFreed) // approximate bytes actually freed
+{
+ MORK_USED_1(inDesiredBytesFreed);
+ nsresult outErr = NS_OK;
+ mdb_size estimate = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing?
+ outErr = ev->AsErr();
+ }
+ if (outEstimatedBytesFreed) *outEstimatedBytesFreed = estimate;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::PanicMemoryPurge( // desperately free all possible memory
+ nsIMdbEnv* mev, // context
+ mdb_size* outEstimatedBytesFreed) // approximate bytes actually freed
+{
+ nsresult outErr = NS_OK;
+ mdb_size estimate = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing?
+ outErr = ev->AsErr();
+ }
+ if (outEstimatedBytesFreed) *outEstimatedBytesFreed = estimate;
+ return outErr;
+}
+// } ----- end memory policy methods -----
+
+// { ----- begin filepath methods -----
+NS_IMETHODIMP
+morkStore::GetPortFilePath(
+ nsIMdbEnv* mev, // context
+ mdbYarn* outFilePath, // name of file holding port content
+ mdbYarn* outFormatVersion) // file format description
+{
+ nsresult outErr = NS_OK;
+ if (outFormatVersion) outFormatVersion->mYarn_Fill = 0;
+ if (outFilePath) outFilePath->mYarn_Fill = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ if (mStore_File)
+ mStore_File->Path(mev, outFilePath);
+ else
+ NilStoreFileError(ev);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetPortFile(
+ nsIMdbEnv* mev, // context
+ nsIMdbFile** acqFile) // acquire file used by port or store
+{
+ nsresult outErr = NS_OK;
+ if (acqFile) *acqFile = 0;
+
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ if (mStore_File) {
+ if (acqFile) {
+ mStore_File->AddRef();
+ if (ev->Good()) *acqFile = mStore_File;
+ }
+ } else
+ NilStoreFileError(ev);
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end filepath methods -----
+
+// { ----- begin export methods -----
+NS_IMETHODIMP
+morkStore::BestExportFormat( // determine preferred export format
+ nsIMdbEnv* mev, // context
+ mdbYarn* outFormatVersion) // file format description
+{
+ nsresult outErr = NS_OK;
+ if (outFormatVersion) outFormatVersion->mYarn_Fill = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ ev->StubMethodOnlyError();
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::CanExportToFormat( // can export content in given specific format?
+ nsIMdbEnv* mev, // context
+ const char* inFormatVersion, // file format description
+ mdb_bool* outCanExport) // whether ExportSource() might succeed
+{
+ MORK_USED_1(inFormatVersion);
+ mdb_bool canExport = morkBool_kFalse;
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ ev->StubMethodOnlyError();
+ outErr = ev->AsErr();
+ }
+ if (outCanExport) *outCanExport = canExport;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::ExportToFormat( // export content in given specific format
+ nsIMdbEnv* mev, // context
+ // const char* inFilePath, // the file to receive exported content
+ nsIMdbFile* ioFile, // destination abstract file interface
+ const char* inFormatVersion, // file format description
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental export
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the export will be finished.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ if (ioFile && inFormatVersion && acqThumb) {
+ ev->StubMethodOnlyError();
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+
+// } ----- end export methods -----
+
+// { ----- begin token methods -----
+NS_IMETHODIMP
+morkStore::TokenToString( // return a string name for an integer token
+ nsIMdbEnv* mev, // context
+ mdb_token inToken, // token for inTokenName inside this port
+ mdbYarn* outTokenName) // the type of table to access
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ TokenToString(ev, inToken, outTokenName);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::StringToToken( // return an integer token for scope name
+ nsIMdbEnv* mev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) // token for inTokenName inside this port
+// String token zero is never used and never supported. If the port
+// is a mutable store, then StringToToken() to create a new
+// association of inTokenName with a new integer token if possible.
+// But a readonly port will return zero for an unknown scope name.
+{
+ nsresult outErr = NS_OK;
+ mdb_token token = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ token = StringToToken(ev, inTokenName);
+ outErr = ev->AsErr();
+ }
+ if (outToken) *outToken = token;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::QueryToken( // like StringToToken(), but without adding
+ nsIMdbEnv* mev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) // token for inTokenName inside this port
+// QueryToken() will return a string token if one already exists,
+// but unlike StringToToken(), will not assign a new token if not
+// already in use.
+{
+ nsresult outErr = NS_OK;
+ mdb_token token = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ token = QueryToken(ev, inTokenName);
+ outErr = ev->AsErr();
+ }
+ if (outToken) *outToken = token;
+ return outErr;
+}
+
+// } ----- end token methods -----
+
+// { ----- begin row methods -----
+NS_IMETHODIMP
+morkStore::HasRow( // contains a row with the specified oid?
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_bool* outHasRow) // whether GetRow() might succeed
+{
+ nsresult outErr = NS_OK;
+ mdb_bool hasRow = morkBool_kFalse;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = GetRow(ev, inOid);
+ if (row) hasRow = morkBool_kTrue;
+
+ outErr = ev->AsErr();
+ }
+ if (outHasRow) *outHasRow = hasRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetRow( // access one row with specific oid
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ nsIMdbRow** acqRow) // acquire specific row (or null)
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = GetRow(ev, inOid);
+ if (row && ev->Good()) outRow = row->AcquireRowHandle(ev, this);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetRowRefCount( // get number of tables that contain a row
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_count* outRefCount) // number of tables containing inRowKey
+{
+ nsresult outErr = NS_OK;
+ mdb_count count = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = GetRow(ev, inOid);
+ if (row && ev->Good()) count = row->mRow_GcUses;
+
+ outErr = ev->AsErr();
+ }
+ if (outRefCount) *outRefCount = count;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::FindRow(
+ nsIMdbEnv* mev, // search for row with matching cell
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_column inColumn, // the column to search (and maintain an index)
+ const mdbYarn* inTargetCellValue, // cell value for which to search
+ mdbOid* outRowOid, // out row oid on match (or {0,-1} for no match)
+ nsIMdbRow** acqRow) // acquire matching row (or nil for no match)
+// FindRow() searches for one row that has a cell in column inColumn with
+// a contained value with the same form (i.e. charset) and is byte-wise
+// identical to the blob described by yarn inTargetCellValue. Both content
+// and form of the yarn must be an exact match to find a matching row.
+//
+// (In other words, both a yarn's blob bytes and form are significant. The
+// form is not expected to vary in columns used for identity anyway. This
+// is intended to make the cost of FindRow() cheaper for MDB implementors,
+// since any cell value atomization performed internally must necessarily
+// make yarn form significant in order to avoid data loss in atomization.)
+//
+// FindRow() can lazily create an index on attribute inColumn for all rows
+// with that attribute in row space scope inRowScope, so that subsequent
+// calls to FindRow() will perform faster. Such an index might or might
+// not be persistent (but this seems desirable if it is cheap to do so).
+// Note that lazy index creation in readonly DBs is not very feasible.
+//
+// This FindRow() interface assumes that attribute inColumn is effectively
+// an alternative means of unique identification for a row in a rowspace,
+// so correct behavior is only guaranteed when no duplicates for this col
+// appear in the given set of rows. (If more than one row has the same cell
+// value in this column, no more than one will be found; and cutting one of
+// two duplicate rows can cause the index to assume no other such row lives
+// in the row space, so future calls return nil for negative search results
+// even though some duplicate row might still live within the rowspace.)
+//
+// In other words, the FindRow() implementation is allowed to assume simple
+// hash tables mapping unique column keys to associated row values will be
+// sufficient, where any duplication is not recorded because only one copy
+// of a given key need be remembered. Implementors are not required to sort
+// all rows by the specified column.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ mdbOid rowOid;
+ rowOid.mOid_Scope = 0;
+ rowOid.mOid_Id = (mdb_id)-1;
+
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = FindRow(ev, inRowScope, inColumn, inTargetCellValue);
+ if (row && ev->Good()) {
+ rowOid = row->mRow_Oid;
+ if (acqRow) outRow = row->AcquireRowHandle(ev, this);
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ if (outRowOid) *outRowOid = rowOid;
+
+ return outErr;
+}
+
+// } ----- end row methods -----
+
+// { ----- begin table methods -----
+NS_IMETHODIMP
+morkStore::HasTable( // supports a table with the specified oid?
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ mdb_bool* outHasTable) // whether GetTable() might succeed
+{
+ nsresult outErr = NS_OK;
+ mork_bool hasTable = morkBool_kFalse;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = GetTable(ev, inOid);
+ if (table) hasTable = morkBool_kTrue;
+
+ outErr = ev->AsErr();
+ }
+ if (outHasTable) *outHasTable = hasTable;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetTable( // access one table with specific oid
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ nsIMdbTable** acqTable) // acquire specific table (or null)
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = GetTable(ev, inOid);
+ if (table && ev->Good()) outTable = table->AcquireTableHandle(ev);
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::HasTableKind( // supports a table of the specified type?
+ nsIMdbEnv* mev, // context
+ mdb_scope inRowScope, // rid scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outSupportsTable) // whether GetTableKind() might succeed
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ *outSupportsTable =
+ HasTableKind(ev, inRowScope, inTableKind, outTableCount);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetTableKind( // access one (random) table of specific type
+ nsIMdbEnv* mev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outMustBeUnique, // whether port can hold only one of these
+ nsIMdbTable** acqTable) // acquire scoped collection of rows
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = GetTableKind(ev, inRowScope, inTableKind, outTableCount,
+ outMustBeUnique);
+ if (table && ev->Good()) outTable = table->AcquireTableHandle(ev);
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::GetPortTableCursor( // get cursor for all tables of specific type
+ nsIMdbEnv* mev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ nsIMdbPortTableCursor** acqCursor) // all such tables in the port
+{
+ nsresult outErr = NS_OK;
+ nsIMdbPortTableCursor* outCursor = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkPortTableCursor* cursor =
+ GetPortTableCursor(ev, inRowScope, inTableKind);
+ if (cursor && ev->Good()) outCursor = cursor;
+
+ outErr = ev->AsErr();
+ }
+ if (acqCursor) *acqCursor = outCursor;
+ return outErr;
+}
+// } ----- end table methods -----
+
+// { ----- begin commit methods -----
+
+NS_IMETHODIMP
+morkStore::ShouldCompress( // store wastes at least inPercentWaste?
+ nsIMdbEnv* mev, // context
+ mdb_percent inPercentWaste, // 0..100 percent file size waste threshold
+ mdb_percent* outActualWaste, // 0..100 percent of file actually wasted
+ mdb_bool* outShould) // true when about inPercentWaste% is wasted
+{
+ mdb_percent actualWaste = 0;
+ mdb_bool shouldCompress = morkBool_kFalse;
+ nsresult outErr = NS_OK;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ actualWaste = PercentOfStoreWasted(ev);
+ if (inPercentWaste > 100) inPercentWaste = 100;
+ shouldCompress = (actualWaste >= inPercentWaste);
+ outErr = ev->AsErr();
+ }
+ if (outActualWaste) *outActualWaste = actualWaste;
+ if (outShould) *outShould = shouldCompress;
+ return outErr;
+}
+
+// } ===== end nsIMdbPort methods =====
+
+NS_IMETHODIMP
+morkStore::NewTable( // make one new table of specific type
+ nsIMdbEnv* mev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) // acquire scoped collection of rows
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev = this->CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = NewTable(ev, inRowScope, inTableKind, inMustBeUnique,
+ inOptionalMetaRowOid);
+ if (table && ev->Good()) outTable = table->AcquireTableHandle(ev);
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::NewTableWithOid( // make one new table of specific type
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // caller assigned oid
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) // acquire scoped collection of rows
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkTable* table = OidToTable(ev, inOid, inOptionalMetaRowOid);
+ if (table && ev->Good()) {
+ table->mTable_Kind = inTableKind;
+ if (inMustBeUnique) table->SetTableUnique();
+ outTable = table->AcquireTableHandle(ev);
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+// } ----- end table methods -----
+
+// { ----- begin row scope methods -----
+NS_IMETHODIMP
+morkStore::RowScopeHasAssignedIds(
+ nsIMdbEnv* mev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) // nonzero if store db assigned specified
+{
+ NS_ASSERTION(false, " not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkStore::SetCallerAssignedIds(
+ nsIMdbEnv* mev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) // nonzero if store db assigned specified
+{
+ NS_ASSERTION(false, " not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkStore::SetStoreAssignedIds(
+ nsIMdbEnv* mev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned) // nonzero if store db assigned specified
+{
+ NS_ASSERTION(false, " not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end row scope methods -----
+
+// { ----- begin row methods -----
+NS_IMETHODIMP
+morkStore::NewRowWithOid(nsIMdbEnv* mev, // new row w/ caller assigned oid
+ const mdbOid* inOid, // caller assigned oid
+ nsIMdbRow** acqRow) // create new row
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = NewRowWithOid(ev, inOid);
+ if (row && ev->Good()) outRow = row->AcquireRowHandle(ev, this);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::NewRow(nsIMdbEnv* mev, // new row with db assigned oid
+ mdb_scope inRowScope, // row scope for row ids
+ nsIMdbRow** acqRow) // create new row
+// Note this row must be added to some table or cell child before the
+// store is closed in order to make this row persist across sessions.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkRow* row = NewRow(ev, inRowScope);
+ if (row && ev->Good()) outRow = row->AcquireRowHandle(ev, this);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+// } ----- end row methods -----
+
+// { ----- begin import/export methods -----
+NS_IMETHODIMP
+morkStore::ImportContent( // import content from port
+ nsIMdbEnv* mev, // context
+ mdb_scope inRowScope, // scope for rows (or zero for all?)
+ nsIMdbPort* ioPort, // the port with content to add to store
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental import
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the import will be finished.
+{
+ NS_ASSERTION(false, " not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkStore::ImportFile( // import content from port
+ nsIMdbEnv* mev, // context
+ nsIMdbFile* ioFile, // the file with content to add to store
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental import
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the import will be finished.
+{
+ NS_ASSERTION(false, " not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end import/export methods -----
+
+// { ----- begin hinting methods -----
+NS_IMETHODIMP
+morkStore::ShareAtomColumnsHint( // advise re shared col content atomizing
+ nsIMdbEnv* mev, // context
+ mdb_scope inScopeHint, // zero, or suggested shared namespace
+ const mdbColumnSet* inColumnSet) // cols desired tokenized together
+{
+ MORK_USED_2(inColumnSet, inScopeHint);
+ nsresult outErr = NS_OK;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing for a hint method
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::AvoidAtomColumnsHint( // advise col w/ poor atomizing prospects
+ nsIMdbEnv* mev, // context
+ const mdbColumnSet* inColumnSet) // cols with poor atomizing prospects
+{
+ MORK_USED_1(inColumnSet);
+ nsresult outErr = NS_OK;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ // ev->StubMethodOnlyError(); // okay to do nothing for a hint method
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end hinting methods -----
+
+// { ----- begin commit methods -----
+NS_IMETHODIMP
+morkStore::LargeCommit( // save important changes if at all possible
+ nsIMdbEnv* mev, // context
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental commit
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the commit will be finished. Note the store is effectively write
+// locked until commit is finished or canceled through the thumb instance.
+// Until the commit is done, the store will report it has readonly status.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkThumb* thumb = 0;
+ // morkFile* file = store->mStore_File;
+ if (DoPreferLargeOverCompressCommit(ev)) {
+ thumb = morkThumb::Make_LargeCommit(ev, mPort_Heap, this);
+ } else {
+ mork_bool doCollect = morkBool_kFalse;
+ thumb = morkThumb::Make_CompressCommit(ev, mPort_Heap, this, doCollect);
+ }
+
+ if (thumb) {
+ outThumb = thumb;
+ thumb->AddRef();
+ }
+
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::SessionCommit( // save all changes if large commits delayed
+ nsIMdbEnv* mev, // context
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental commit
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the commit will be finished. Note the store is effectively write
+// locked until commit is finished or canceled through the thumb instance.
+// Until the commit is done, the store will report it has readonly status.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ morkThumb* thumb = 0;
+ if (DoPreferLargeOverCompressCommit(ev)) {
+ thumb = morkThumb::Make_LargeCommit(ev, mPort_Heap, this);
+ } else {
+ mork_bool doCollect = morkBool_kFalse;
+ thumb = morkThumb::Make_CompressCommit(ev, mPort_Heap, this, doCollect);
+ }
+
+ if (thumb) {
+ outThumb = thumb;
+ thumb->AddRef();
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkStore::CompressCommit( // commit and make db smaller if possible
+ nsIMdbEnv* mev, // context
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental commit
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the commit will be finished. Note the store is effectively write
+// locked until commit is finished or canceled through the thumb instance.
+// Until the commit is done, the store will report it has readonly status.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbThumb* outThumb = 0;
+ morkEnv* ev = CanUseStore(mev, /*inMutable*/ morkBool_kFalse, &outErr);
+ if (ev) {
+ mork_bool doCollect = morkBool_kFalse;
+ morkThumb* thumb =
+ morkThumb::Make_CompressCommit(ev, mPort_Heap, this, doCollect);
+ if (thumb) {
+ outThumb = thumb;
+ thumb->AddRef();
+ mStore_CanWriteIncremental = morkBool_kTrue;
+ }
+
+ outErr = ev->AsErr();
+ }
+ if (acqThumb) *acqThumb = outThumb;
+ return outErr;
+}
+
+// } ----- end commit methods -----
+
+// } ===== end nsIMdbStore methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkStore.h b/comm/mailnews/db/mork/morkStore.h
new file mode 100644
index 0000000000..c2a08cb7d2
--- /dev/null
+++ b/comm/mailnews/db/mork/morkStore.h
@@ -0,0 +1,770 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKSTORE_
+#define _MORKSTORE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+#ifndef _MORKPOOL_
+# include "morkPool.h"
+#endif
+
+#ifndef _MORKZONE_
+# include "morkZone.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kPort /*i*/ 0x7054 /* ascii 'pT' */
+
+#define morkDerived_kStore /*i*/ 0x7354 /* ascii 'sT' */
+
+/*| kGroundColumnSpace: we use the 'column space' as the default scope
+**| for grounding column name IDs, and this is also the default scope for
+**| all other explicitly tokenized strings.
+|*/
+#define morkStore_kGroundColumnSpace 'c' /* for mStore_GroundColumnSpace*/
+#define morkStore_kColumnSpaceScope ((mork_scope)'c') /*kGroundColumnSpace*/
+#define morkStore_kValueSpaceScope ((mork_scope)'v')
+#define morkStore_kStreamBufSize (8 * 1024) /* okay buffer size */
+
+#define morkStore_kReservedColumnCount 0x20 /* for well-known columns */
+
+#define morkStore_kNoneToken ((mork_token)'n')
+#define morkStore_kFormColumn ((mork_column)'f')
+#define morkStore_kAtomScopeColumn ((mork_column)'a')
+#define morkStore_kRowScopeColumn ((mork_column)'r')
+#define morkStore_kMetaScope ((mork_scope)'m')
+#define morkStore_kKindColumn ((mork_column)'k')
+#define morkStore_kStatusColumn ((mork_column)'s')
+
+/*| morkStore:
+|*/
+class morkStore : public morkObject, public nsIMdbStore {
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+
+ morkEnv* mPort_Env; // non-refcounted env which created port
+ morkFactory* mPort_Factory; // weak ref to suite factory
+ nsIMdbHeap* mPort_Heap; // heap in which this port allocs objects
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ void ClosePort(morkEnv* ev); // called by CloseMorkNode();
+
+ public: // dynamic type identification
+ mork_bool IsPort() const {
+ return IsNode() && mNode_Derived == morkDerived_kPort;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // other port methods
+ // { ----- begin attribute methods -----
+ // NS_IMETHOD IsFrozenMdbObject(nsIMdbEnv* ev, mdb_bool* outIsReadonly);
+ // same as nsIMdbPort::GetIsPortReadonly() when this object is inside a port.
+ // } ----- end attribute methods -----
+
+ // { ----- begin factory methods -----
+ // NS_IMETHOD GetMdbFactory(nsIMdbEnv* ev, nsIMdbFactory** acqFactory);
+ // } ----- end factory methods -----
+
+ // { ----- begin ref counting for well-behaved cyclic graphs -----
+ NS_IMETHOD GetWeakRefCount(nsIMdbEnv* ev, // weak refs
+ mdb_count* outCount) override;
+ NS_IMETHOD GetStrongRefCount(nsIMdbEnv* ev, // strong refs
+ mdb_count* outCount) override;
+
+ NS_IMETHOD AddWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of AddStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) AddStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD_(mork_uses) AddStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CutWeakRef(nsIMdbEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of CutStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ NS_IMETHOD_(mork_uses) CutStrongRef(morkEnv* ev) override;
+#endif
+ NS_IMETHOD CutStrongRef(nsIMdbEnv* ev) override;
+
+ NS_IMETHOD CloseMdbObject(
+ nsIMdbEnv* ev) override; // called at strong refs zero
+ NS_IMETHOD IsOpenMdbObject(nsIMdbEnv* ev, mdb_bool* outOpen) override;
+ // } ----- end ref counting -----
+
+ // } ===== end nsIMdbObject methods =====
+
+ // { ===== begin nsIMdbPort methods =====
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetIsPortReadonly(nsIMdbEnv* ev, mdb_bool* outBool) override;
+ NS_IMETHOD GetIsStore(nsIMdbEnv* ev, mdb_bool* outBool) override;
+ NS_IMETHOD GetIsStoreAndDirty(nsIMdbEnv* ev, mdb_bool* outBool) override;
+
+ NS_IMETHOD GetUsagePolicy(nsIMdbEnv* ev,
+ mdbUsagePolicy* ioUsagePolicy) override;
+
+ NS_IMETHOD SetUsagePolicy(nsIMdbEnv* ev,
+ const mdbUsagePolicy* inUsagePolicy) override;
+ // } ----- end attribute methods -----
+
+ // { ----- begin memory policy methods -----
+ NS_IMETHOD IdleMemoryPurge( // do memory management already scheduled
+ nsIMdbEnv* ev, // context
+ mdb_size* outEstimatedBytesFreed)
+ override; // approximate bytes actually freed
+
+ NS_IMETHOD SessionMemoryPurge( // request specific footprint decrease
+ nsIMdbEnv* ev, // context
+ mdb_size inDesiredBytesFreed, // approximate number of bytes wanted
+ mdb_size* outEstimatedBytesFreed)
+ override; // approximate bytes actually freed
+
+ NS_IMETHOD PanicMemoryPurge( // desperately free all possible memory
+ nsIMdbEnv* ev, // context
+ mdb_size* outEstimatedBytesFreed)
+ override; // approximate bytes actually freed
+ // } ----- end memory policy methods -----
+
+ // { ----- begin filepath methods -----
+ NS_IMETHOD GetPortFilePath(
+ nsIMdbEnv* ev, // context
+ mdbYarn* outFilePath, // name of file holding port content
+ mdbYarn* outFormatVersion) override; // file format description
+
+ NS_IMETHOD GetPortFile(
+ nsIMdbEnv* ev, // context
+ nsIMdbFile** acqFile) override; // acquire file used by port or store
+ // } ----- end filepath methods -----
+
+ // { ----- begin export methods -----
+ NS_IMETHOD BestExportFormat( // determine preferred export format
+ nsIMdbEnv* ev, // context
+ mdbYarn* outFormatVersion) override; // file format description
+
+ NS_IMETHOD
+ CanExportToFormat( // can export content in given specific format?
+ nsIMdbEnv* ev, // context
+ const char* inFormatVersion, // file format description
+ mdb_bool* outCanExport) override; // whether ExportSource() might succeed
+
+ NS_IMETHOD ExportToFormat( // export content in given specific format
+ nsIMdbEnv* ev, // context
+ // const char* inFilePath, // the file to receive exported content
+ nsIMdbFile* ioFile, // destination abstract file interface
+ const char* inFormatVersion, // file format description
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental export
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the export will be finished.
+
+ // } ----- end export methods -----
+
+ // { ----- begin token methods -----
+ NS_IMETHOD TokenToString( // return a string name for an integer token
+ nsIMdbEnv* ev, // context
+ mdb_token inToken, // token for inTokenName inside this port
+ mdbYarn* outTokenName) override; // the type of table to access
+
+ NS_IMETHOD StringToToken( // return an integer token for scope name
+ nsIMdbEnv* ev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) override; // token for inTokenName inside this port
+
+ // String token zero is never used and never supported. If the port
+ // is a mutable store, then StringToToken() to create a new
+ // association of inTokenName with a new integer token if possible.
+ // But a readonly port will return zero for an unknown scope name.
+
+ NS_IMETHOD QueryToken( // like StringToToken(), but without adding
+ nsIMdbEnv* ev, // context
+ const char* inTokenName, // Latin1 string to tokenize if possible
+ mdb_token* outToken) override; // token for inTokenName inside this port
+
+ // QueryToken() will return a string token if one already exists,
+ // but unlike StringToToken(), will not assign a new token if not
+ // already in use.
+
+ // } ----- end token methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD HasRow( // contains a row with the specified oid?
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_bool* outHasRow) override; // whether GetRow() might succeed
+
+ NS_IMETHOD GetRowRefCount( // get number of tables that contain a row
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ mdb_count* outRefCount) override; // number of tables containing inRowKey
+
+ NS_IMETHOD GetRow( // access one row with specific oid
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical row oid
+ nsIMdbRow** acqRow) override; // acquire specific row (or null)
+
+ NS_IMETHOD FindRow(
+ nsIMdbEnv* ev, // search for row with matching cell
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_column inColumn, // the column to search (and maintain an index)
+ const mdbYarn* inTargetCellValue, // cell value for which to search
+ mdbOid* outRowOid, // out row oid on match (or {0,-1} for no match)
+ nsIMdbRow** acqRow)
+ override; // acquire matching row (or nil for no match)
+ // can be null if you only want the oid
+ // FindRow() searches for one row that has a cell in column inColumn with
+ // a contained value with the same form (i.e. charset) and is byte-wise
+ // identical to the blob described by yarn inTargetCellValue. Both content
+ // and form of the yarn must be an exact match to find a matching row.
+ //
+ // (In other words, both a yarn's blob bytes and form are significant. The
+ // form is not expected to vary in columns used for identity anyway. This
+ // is intended to make the cost of FindRow() cheaper for MDB implementors,
+ // since any cell value atomization performed internally must necessarily
+ // make yarn form significant in order to avoid data loss in atomization.)
+ //
+ // FindRow() can lazily create an index on attribute inColumn for all rows
+ // with that attribute in row space scope inRowScope, so that subsequent
+ // calls to FindRow() will perform faster. Such an index might or might
+ // not be persistent (but this seems desirable if it is cheap to do so).
+ // Note that lazy index creation in readonly DBs is not very feasible.
+ //
+ // This FindRow() interface assumes that attribute inColumn is effectively
+ // an alternative means of unique identification for a row in a rowspace,
+ // so correct behavior is only guaranteed when no duplicates for this col
+ // appear in the given set of rows. (If more than one row has the same cell
+ // value in this column, no more than one will be found; and cutting one of
+ // two duplicate rows can cause the index to assume no other such row lives
+ // in the row space, so future calls return nil for negative search results
+ // even though some duplicate row might still live within the rowspace.)
+ //
+ // In other words, the FindRow() implementation is allowed to assume simple
+ // hash tables mapping unique column keys to associated row values will be
+ // sufficient, where any duplication is not recorded because only one copy
+ // of a given key need be remembered. Implementors are not required to sort
+ // all rows by the specified column.
+ // } ----- end row methods -----
+
+ // { ----- begin table methods -----
+ NS_IMETHOD HasTable( // supports a table with the specified oid?
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ mdb_bool* outHasTable) override; // whether GetTable() might succeed
+
+ NS_IMETHOD GetTable( // access one table with specific oid
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // hypothetical table oid
+ nsIMdbTable** acqTable) override; // acquire specific table (or null)
+
+ NS_IMETHOD HasTableKind( // supports a table of the specified type?
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // rid scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outSupportsTable)
+ override; // whether GetTableKind() might succeed
+
+ NS_IMETHOD GetTableKind( // access one (random) table of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_count* outTableCount, // current number of such tables
+ mdb_bool* outMustBeUnique, // whether port can hold only one of these
+ nsIMdbTable** acqTable) override; // acquire scoped collection of rows
+
+ NS_IMETHOD
+ GetPortTableCursor( // get cursor for all tables of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ nsIMdbPortTableCursor** acqCursor)
+ override; // all such tables in the port
+ // } ----- end table methods -----
+
+ // { ----- begin commit methods -----
+
+ NS_IMETHOD ShouldCompress( // store wastes at least inPercentWaste?
+ nsIMdbEnv* ev, // context
+ mdb_percent inPercentWaste, // 0..100 percent file size waste threshold
+ mdb_percent* outActualWaste, // 0..100 percent of file actually wasted
+ mdb_bool* outShould)
+ override; // true when about inPercentWaste% is wasted
+ // ShouldCompress() returns true if the store can determine that the file
+ // will shrink by an estimated percentage of inPercentWaste% (or more) if
+ // CompressCommit() is called, because that percentage of the file seems
+ // to be recoverable free space. The granularity is only in terms of
+ // percentage points, and any value over 100 is considered equal to 100.
+ //
+ // If a store only has an approximate idea how much space might be saved
+ // during a compress, then a best guess should be made. For example, the
+ // Mork implementation might keep track of how much file space began with
+ // text content before the first updating transaction, and then consider
+ // all content following the start of the first transaction as potentially
+ // wasted space if it is all updates and not just new content. (This is
+ // a safe assumption in the sense that behavior will stabilize on a low
+ // estimate of wastage after a commit removes all transaction updates.)
+ //
+ // Some db formats might attempt to keep a very accurate reckoning of free
+ // space size, so a very accurate determination can be made. But other db
+ // formats might have difficulty determining size of free space, and might
+ // require some lengthy calculation to answer. This is the reason for
+ // passing in the percentage threshold of interest, so that such lengthy
+ // computations can terminate early as soon as at least inPercentWaste is
+ // found, so that the entire file need not be groveled when unnecessary.
+ // However, we hope implementations will always favor fast but imprecise
+ // heuristic answers instead of extremely slow but very precise answers.
+ //
+ // If the outActualWaste parameter is non-nil, it will be used to return
+ // the actual estimated space wasted as a percentage of file size. (This
+ // parameter is provided so callers need not call repeatedly with altered
+ // inPercentWaste values to isolate the actual wastage figure.) Note the
+ // actual wastage figure returned can exactly equal inPercentWaste even
+ // when this grossly underestimates the real figure involved, if the db
+ // finds it very expensive to determine the extent of wastage after it is
+ // known to at least exceed inPercentWaste. Note we expect that whenever
+ // outShould returns true, that outActualWaste returns >= inPercentWaste.
+ //
+ // The effect of different inPercentWaste values is not very uniform over
+ // the permitted range. For example, 50 represents 50% wastage, or a file
+ // that is about double what it should be ideally. But 99 represents 99%
+ // wastage, or a file that is about ninety-nine times as big as it should
+ // be ideally. In the smaller direction, 25 represents 25% wastage, or
+ // a file that is only 33% larger than it should be ideally.
+ //
+ // Callers can determine what policy they want to use for considering when
+ // a file holds too much wasted space, and express this as a percentage
+ // of total file size to pass as in the inPercentWaste parameter. A zero
+ // likely returns always trivially true, and 100 always trivially false.
+ // The great majority of callers are expected to use values from 25 to 75,
+ // since most plausible thresholds for compressing might fall between the
+ // extremes of 133% of ideal size and 400% of ideal size. (Presumably the
+ // larger a file gets, the more important the percentage waste involved, so
+ // a sliding scale for compress thresholds might use smaller numbers for
+ // much bigger file sizes.)
+
+ // } ----- end commit methods -----
+
+ // } ===== end nsIMdbPort methods =====
+
+ // { ===== begin nsIMdbStore methods =====
+
+ // { ----- begin table methods -----
+ NS_IMETHOD NewTable( // make one new table of specific type
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) override; // acquire scoped collection of rows
+
+ NS_IMETHOD NewTableWithOid( // make one new table of specific type
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // caller assigned oid
+ mdb_kind inTableKind, // the type of table to access
+ mdb_bool inMustBeUnique, // whether store can hold only one of these
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ nsIMdbTable** acqTable) override; // acquire scoped collection of rows
+ // } ----- end table methods -----
+
+ // { ----- begin row scope methods -----
+ NS_IMETHOD RowScopeHasAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned)
+ override; // nonzero if store db assigned specified
+
+ NS_IMETHOD SetCallerAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned)
+ override; // nonzero if store db assigned specified
+
+ NS_IMETHOD SetStoreAssignedIds(
+ nsIMdbEnv* ev,
+ mdb_scope inRowScope, // row scope for row ids
+ mdb_bool* outCallerAssigned, // nonzero if caller assigned specified
+ mdb_bool* outStoreAssigned)
+ override; // nonzero if store db assigned specified
+ // } ----- end row scope methods -----
+
+ // { ----- begin row methods -----
+ NS_IMETHOD NewRowWithOid(nsIMdbEnv* ev, // new row w/ caller assigned oid
+ const mdbOid* inOid, // caller assigned oid
+ nsIMdbRow** acqRow) override; // create new row
+
+ NS_IMETHOD NewRow(nsIMdbEnv* ev, // new row with db assigned oid
+ mdb_scope inRowScope, // row scope for row ids
+ nsIMdbRow** acqRow) override; // create new row
+ // Note this row must be added to some table or cell child before the
+ // store is closed in order to make this row persist across sessions.
+
+ // } ----- end row methods -----
+
+ // { ----- begin import/export methods -----
+ NS_IMETHOD ImportContent( // import content from port
+ nsIMdbEnv* ev, // context
+ mdb_scope inRowScope, // scope for rows (or zero for all?)
+ nsIMdbPort* ioPort, // the port with content to add to store
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental import
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the import will be finished.
+
+ NS_IMETHOD ImportFile( // import content from port
+ nsIMdbEnv* ev, // context
+ nsIMdbFile* ioFile, // the file with content to add to store
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental import
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the import will be finished.
+ // } ----- end import/export methods -----
+
+ // { ----- begin hinting methods -----
+ NS_IMETHOD
+ ShareAtomColumnsHint( // advise re shared column content atomizing
+ nsIMdbEnv* ev, // context
+ mdb_scope inScopeHint, // zero, or suggested shared namespace
+ const mdbColumnSet* inColumnSet)
+ override; // cols desired tokenized together
+
+ NS_IMETHOD
+ AvoidAtomColumnsHint( // advise column with poor atomizing prospects
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet)
+ override; // cols with poor atomizing prospects
+ // } ----- end hinting methods -----
+
+ // { ----- begin commit methods -----
+ NS_IMETHOD LargeCommit( // save important changes if at all possible
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ NS_IMETHOD SessionCommit( // save all changes if large commits delayed
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ NS_IMETHOD
+ CompressCommit( // commit and make db physically smaller if possible
+ nsIMdbEnv* ev, // context
+ nsIMdbThumb** acqThumb) override; // acquire thumb for incremental commit
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the commit will be finished. Note the store is effectively write
+ // locked until commit is finished or canceled through the thumb instance.
+ // Until the commit is done, the store will report it has readonly status.
+
+ // } ----- end commit methods -----
+
+ // } ===== end nsIMdbStore methods =====
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakPort(morkPort* me, morkEnv* ev, morkPort** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongPort(morkPort* me, morkEnv* ev, morkPort** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+ // public: // slots inherited from morkPort (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkEnv* mPort_Env; // non-refcounted env which created port
+ // morkFactory* mPort_Factory; // weak ref to suite factory
+ // nsIMdbHeap* mPort_Heap; // heap in which this port allocs objects
+
+ public: // state is public because the entire Mork system is private
+ // mStore_OidAtomSpace might be unnecessary; I don't remember why I wanted it.
+ morkAtomSpace* mStore_OidAtomSpace; // ground atom space for oids
+ morkAtomSpace* mStore_GroundAtomSpace; // ground atom space for scopes
+ morkAtomSpace* mStore_GroundColumnSpace; // ground column space for scopes
+
+ nsIMdbFile* mStore_File; // the file containing Mork text
+ morkStream* mStore_InStream; // stream using file used by the builder
+ morkBuilder* mStore_Builder; // to parse Mork text and build structures
+
+ morkStream* mStore_OutStream; // stream using file used by the writer
+
+ morkRowSpaceMap mStore_RowSpaces; // maps mork_scope -> morkSpace
+ morkAtomSpaceMap mStore_AtomSpaces; // maps mork_scope -> morkSpace
+
+ morkZone mStore_Zone;
+
+ morkPool mStore_Pool;
+
+ // we alloc a max size book atom to reuse space for atom map key searches:
+ // morkMaxBookAtom mStore_BookAtom; // staging area for atom map searches
+
+ morkFarBookAtom mStore_FarBookAtom; // staging area for atom map searches
+
+ // GroupIdentity should be one more than largest seen in a parsed db file:
+ mork_gid mStore_CommitGroupIdentity; // transaction ID number
+
+ // group positions are used to help compute PercentOfStoreWasted():
+ mork_pos mStore_FirstCommitGroupPos; // start of first group
+ mork_pos mStore_SecondCommitGroupPos; // start of second group
+ // If the first commit group is very near the start of the file (say less
+ // than 512 bytes), then we might assume the file started nearly empty and
+ // that most of the first group is not wasted. In that case, the pos of
+ // the second commit group might make a better estimate of the start of
+ // transaction space that might represent wasted file space. That's why
+ // we support fields for both first and second commit group positions.
+ //
+ // We assume that a zero in either group pos means that the slot has not
+ // yet been given a valid value, since the file will always start with a
+ // tag, and a commit group cannot actually start at position zero.
+ //
+ // Either or both the first or second commit group positions might be
+ // supplied by either morkWriter (while committing) or morkBuilder (while
+ // parsing), since either reading or writing the file might encounter the
+ // first transaction groups which came into existence either in the past
+ // or in the very recent present.
+
+ mork_bool mStore_CanAutoAssignAtomIdentity;
+ mork_bool mStore_CanDirty; // changes imply the store becomes dirty?
+ mork_u1 mStore_CanWriteIncremental; // compress not required?
+ mork_u1 mStore_Pad; // for u4 alignment
+
+ // mStore_CanDirty should be FALSE when parsing a file while building the
+ // content going into the store, because such data structure modifications
+ // are actuallly in sync with the file. So content read from a file must
+ // be clean with respect to the file. After a file is finished parsing,
+ // the mStore_CanDirty slot should become TRUE, so that any additional
+ // changes at runtime cause structures to be marked dirty with respect to
+ // the file which must later be updated with changes during a commit.
+ //
+ // It might also make sense to set mStore_CanDirty to FALSE while a commit
+ // is in progress, lest some internal transformations make more content
+ // appear dirty when it should not. So anyone modifying content during a
+ // commit should think about the intended significance regarding dirty.
+
+ public: // more specific dirty methods for store:
+ void SetStoreDirty() { this->SetNodeDirty(); }
+ void SetStoreClean() { this->SetNodeClean(); }
+
+ mork_bool IsStoreClean() const { return this->IsNodeClean(); }
+ mork_bool IsStoreDirty() const { return this->IsNodeDirty(); }
+
+ public: // setting dirty based on CanDirty:
+ void MaybeDirtyStore() {
+ if (mStore_CanDirty) this->SetStoreDirty();
+ }
+
+ public: // space waste analysis
+ mork_percent PercentOfStoreWasted(morkEnv* ev);
+
+ public: // setting store and all subspaces canDirty:
+ void SetStoreAndAllSpacesCanDirty(morkEnv* ev, mork_bool inCanDirty);
+
+ public: // building an atom inside mStore_FarBookAtom from a char* string
+ morkFarBookAtom* StageAliasAsFarBookAtom(morkEnv* ev, const morkMid* inMid,
+ morkAtomSpace* ioSpace,
+ mork_cscode inForm);
+
+ morkFarBookAtom* StageYarnAsFarBookAtom(morkEnv* ev, const mdbYarn* inYarn,
+ morkAtomSpace* ioSpace);
+
+ morkFarBookAtom* StageStringAsFarBookAtom(morkEnv* ev, const char* inString,
+ mork_cscode inForm,
+ morkAtomSpace* ioSpace);
+
+ public: // determining whether incremental writing is a good use of time:
+ mork_bool DoPreferLargeOverCompressCommit(morkEnv* ev);
+ // true when mStore_CanWriteIncremental && store has file large enough
+
+ public: // lazy creation of members and nested row or atom spaces
+ morkAtomSpace* LazyGetOidAtomSpace(morkEnv* ev);
+ morkAtomSpace* LazyGetGroundAtomSpace(morkEnv* ev);
+ morkAtomSpace* LazyGetGroundColumnSpace(morkEnv* ev);
+
+ morkStream* LazyGetInStream(morkEnv* ev);
+ morkBuilder* LazyGetBuilder(morkEnv* ev);
+ void ForgetBuilder(morkEnv* ev);
+
+ morkStream* LazyGetOutStream(morkEnv* ev);
+
+ morkRowSpace* LazyGetRowSpace(morkEnv* ev, mdb_scope inRowScope);
+ morkAtomSpace* LazyGetAtomSpace(morkEnv* ev, mdb_scope inAtomScope);
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseStore() only if open
+
+ public: // morkStore construction & destruction
+ morkStore(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioNodeHeap, // the heap (if any) for this node instance
+ morkFactory* inFactory, // the factory for this
+ nsIMdbHeap* ioPortHeap // the heap to hold all content in the port
+ );
+ void CloseStore(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkStore(const morkStore& other);
+ morkStore& operator=(const morkStore& other);
+ virtual ~morkStore(); // assert that CloseStore() executed earlier
+
+ public: // dynamic type identification
+ morkEnv* CanUseStore(nsIMdbEnv* mev, mork_bool inMutable,
+ nsresult* outErr) const;
+ mork_bool IsStore() const {
+ return IsNode() && mNode_Derived == morkDerived_kStore;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonStoreTypeError(morkEnv* ev);
+ static void NilStoreFileError(morkEnv* ev);
+ static void CannotAutoAssignAtomIdentityError(morkEnv* ev);
+
+ public: // store utilities
+ morkAtom* YarnToAtom(morkEnv* ev, const mdbYarn* inYarn,
+ bool createIfMissing = true);
+ morkAtom* AddAlias(morkEnv* ev, const morkMid& inMid, mork_cscode inForm);
+
+ public: // other store methods
+ void RenumberAllCollectableContent(morkEnv* ev);
+
+ nsIMdbStore* AcquireStoreHandle(morkEnv* ev); // mObject_Handle
+
+ morkPool* StorePool() { return &mStore_Pool; }
+
+ mork_bool OpenStoreFile(morkEnv* ev, // return value equals ev->Good()
+ mork_bool inFrozen,
+ // const char* inFilePath,
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy);
+
+ mork_bool CreateStoreFile(morkEnv* ev, // return value equals ev->Good()
+ // const char* inFilePath,
+ nsIMdbFile* ioFile, // db abstract file interface
+ const mdbOpenPolicy* inOpenPolicy);
+
+ morkAtom* CopyAtom(morkEnv* ev, const morkAtom* inAtom);
+ // copy inAtom (from some other store) over to this store
+
+ mork_token CopyToken(morkEnv* ev, mdb_token inToken, morkStore* inStore);
+ // copy inToken from inStore over to this store
+
+ mork_token BufToToken(morkEnv* ev, const morkBuf* inBuf);
+ mork_token StringToToken(morkEnv* ev, const char* inTokenName);
+ mork_token QueryToken(morkEnv* ev, const char* inTokenName);
+ void TokenToString(morkEnv* ev, mdb_token inToken, mdbYarn* outTokenName);
+
+ mork_bool MidToOid(morkEnv* ev, const morkMid& inMid, mdbOid* outOid);
+ mork_bool OidToYarn(morkEnv* ev, const mdbOid& inOid, mdbYarn* outYarn);
+ mork_bool MidToYarn(morkEnv* ev, const morkMid& inMid, mdbYarn* outYarn);
+
+ morkBookAtom* MidToAtom(morkEnv* ev, const morkMid& inMid);
+ morkRow* MidToRow(morkEnv* ev, const morkMid& inMid);
+ morkTable* MidToTable(morkEnv* ev, const morkMid& inMid);
+
+ morkRow* OidToRow(morkEnv* ev, const mdbOid* inOid);
+ // OidToRow() finds old row with oid, or makes new one if not found.
+
+ morkTable* OidToTable(morkEnv* ev, const mdbOid* inOid,
+ const mdbOid* inOptionalMetaRowOid);
+ // OidToTable() finds old table with oid, or makes new one if not found.
+
+ static void SmallTokenToOneByteYarn(morkEnv* ev, mdb_token inToken,
+ mdbYarn* outYarn);
+
+ mork_bool HasTableKind(morkEnv* ev, mdb_scope inRowScope,
+ mdb_kind inTableKind, mdb_count* outTableCount);
+
+ morkTable* GetTableKind(morkEnv* ev, mdb_scope inRowScope,
+ mdb_kind inTableKind, mdb_count* outTableCount,
+ mdb_bool* outMustBeUnique);
+
+ morkRow* FindRow(morkEnv* ev, mdb_scope inScope, mdb_column inColumn,
+ const mdbYarn* inTargetCellValue);
+
+ morkRow* GetRow(morkEnv* ev, const mdbOid* inOid);
+ morkTable* GetTable(morkEnv* ev, const mdbOid* inOid);
+
+ morkTable* NewTable(morkEnv* ev, mdb_scope inRowScope, mdb_kind inTableKind,
+ mdb_bool inMustBeUnique,
+ const mdbOid* inOptionalMetaRowOid);
+
+ morkPortTableCursor* GetPortTableCursor(morkEnv* ev, mdb_scope inRowScope,
+ mdb_kind inTableKind);
+
+ morkRow* NewRowWithOid(morkEnv* ev, const mdbOid* inOid);
+ morkRow* NewRow(morkEnv* ev, mdb_scope inRowScope);
+
+ morkThumb* MakeCompressCommitThumb(morkEnv* ev, mork_bool inDoCollect);
+
+ public: // commit related methods
+ mork_bool MarkAllStoreContentDirty(morkEnv* ev);
+ // MarkAllStoreContentDirty() visits every object in the store and marks
+ // them dirty, including every table, row, cell, and atom. The return
+ // equals ev->Good(), to show whether any error happened. This method is
+ // intended for use in the beginning of a "compress commit" which writes
+ // all store content, whether dirty or not. We dirty everything first so
+ // that later iterations over content can mark things clean as they are
+ // written, and organize the process of serialization so that objects are
+ // written only at need (because of being dirty).
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakStore(morkStore* me, morkEnv* ev, morkStore** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongStore(morkStore* me, morkEnv* ev, morkStore** ioSlot) {
+ morkStore* store = *ioSlot;
+ if (me != store) {
+ if (store) {
+ // what if this nulls out the ev and causes asserts?
+ // can we move this after the CutStrongRef()?
+ *ioSlot = 0;
+ store->Release();
+ }
+ if (me && me->AddRef()) *ioSlot = me;
+ }
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKSTORE_ */
diff --git a/comm/mailnews/db/mork/morkStream.cpp b/comm/mailnews/db/mork/morkStream.cpp
new file mode 100644
index 0000000000..23fd7b91ae
--- /dev/null
+++ b/comm/mailnews/db/mork/morkStream.cpp
@@ -0,0 +1,790 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKFILE_
+# include "morkFile.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKSTREAM_
+# include "morkStream.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkStream::CloseMorkNode(
+ morkEnv* ev) // CloseStream() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseStream(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkStream::~morkStream() // assert CloseStream() executed earlier
+{
+ MORK_ASSERT(mStream_ContentFile == 0);
+ MORK_ASSERT(mStream_Buf == 0);
+}
+
+/*public non-poly*/
+morkStream::morkStream(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbFile* ioContentFile,
+ mork_size inBufSize, mork_bool inFrozen)
+ : morkFile(ev, inUsage, ioHeap, ioHeap),
+ mStream_At(0),
+ mStream_ReadEnd(0),
+ mStream_WriteEnd(0)
+
+ ,
+ mStream_ContentFile(0)
+
+ ,
+ mStream_Buf(0),
+ mStream_BufSize(inBufSize),
+ mStream_BufPos(0),
+ mStream_Dirty(morkBool_kFalse),
+ mStream_HitEof(morkBool_kFalse) {
+ if (ev->Good()) {
+ if (inBufSize < morkStream_kMinBufSize)
+ mStream_BufSize = inBufSize = morkStream_kMinBufSize;
+ else if (inBufSize > morkStream_kMaxBufSize)
+ mStream_BufSize = inBufSize = morkStream_kMaxBufSize;
+
+ if (ioContentFile && ioHeap) {
+ // if ( ioContentFile->FileFrozen() ) // forced to be readonly?
+ // inFrozen = morkBool_kTrue; // override the input value
+
+ nsIMdbFile_SlotStrongFile(ioContentFile, ev, &mStream_ContentFile);
+ if (ev->Good()) {
+ mork_u1* buf = 0;
+ ioHeap->Alloc(ev->AsMdbEnv(), inBufSize, (void**)&buf);
+ if (buf) {
+ mStream_At = mStream_Buf = buf;
+
+ if (!inFrozen) {
+ // physical buffer end never moves:
+ mStream_WriteEnd = buf + inBufSize;
+ } else
+ mStream_WriteEnd = 0; // no writing is allowed
+
+ if (inFrozen) {
+ // logical buffer end starts at Buf with no content:
+ mStream_ReadEnd = buf;
+ this->SetFileFrozen(inFrozen);
+ } else
+ mStream_ReadEnd = 0; // no reading is allowed
+
+ this->SetFileActive(morkBool_kTrue);
+ this->SetFileIoOpen(morkBool_kTrue);
+ }
+ if (ev->Good()) mNode_Derived = morkDerived_kStream;
+ }
+ } else
+ ev->NilPointerError();
+ }
+}
+
+/*public non-poly*/ void morkStream::CloseStream(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mStream_ContentFile);
+ nsIMdbHeap* heap = mFile_SlotHeap;
+ mork_u1* buf = mStream_Buf;
+ mStream_Buf = 0;
+
+ if (heap && buf) heap->Free(ev->AsMdbEnv(), buf);
+
+ this->CloseFile(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+#define morkStream_kSpacesPerIndent 1 /* one space per indent */
+#define morkStream_kMaxIndentDepth 70 /* max indent of 70 space bytes */
+static const char morkStream_kSpaces[] // next line to ease length perception
+ = " "
+ " ";
+// 123456789_123456789_123456789_123456789_123456789_123456789_123456789_
+// morkStream_kSpaces above must contain (at least) 70 spaces (ASCII 0x20)
+
+mork_size morkStream::PutIndent(morkEnv* ev, mork_count inDepth)
+// PutIndent() puts a linebreak, and then
+// "indents" by inDepth, and returns the line length after indentation.
+{
+ mork_size outLength = 0;
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+ if (ev->Good()) {
+ this->PutLineBreak(ev);
+ if (ev->Good()) {
+ outLength = inDepth;
+ mdb_size bytesWritten;
+ if (inDepth) this->Write(mev, morkStream_kSpaces, inDepth, &bytesWritten);
+ }
+ }
+ return outLength;
+}
+
+mork_size morkStream::PutByteThenIndent(morkEnv* ev, int inByte,
+ mork_count inDepth)
+// PutByteThenIndent() puts the byte, then a linebreak, and then
+// "indents" by inDepth, and returns the line length after indentation.
+{
+ mork_size outLength = 0;
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+
+ if (inDepth > morkStream_kMaxIndentDepth)
+ inDepth = morkStream_kMaxIndentDepth;
+
+ this->Putc(ev, inByte);
+ if (ev->Good()) {
+ this->PutLineBreak(ev);
+ if (ev->Good()) {
+ outLength = inDepth;
+ mdb_size bytesWritten;
+ if (inDepth) this->Write(mev, morkStream_kSpaces, inDepth, &bytesWritten);
+ }
+ }
+ return outLength;
+}
+
+mork_size morkStream::PutStringThenIndent(morkEnv* ev, const char* inString,
+ mork_count inDepth)
+// PutStringThenIndent() puts the string, then a linebreak, and then
+// "indents" by inDepth, and returns the line length after indentation.
+{
+ mork_size outLength = 0;
+ mdb_size bytesWritten;
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+
+ if (inDepth > morkStream_kMaxIndentDepth)
+ inDepth = morkStream_kMaxIndentDepth;
+
+ if (inString) {
+ mork_size length = strlen(inString);
+ if (length && ev->Good()) // any bytes to write?
+ this->Write(mev, inString, length, &bytesWritten);
+ }
+
+ if (ev->Good()) {
+ this->PutLineBreak(ev);
+ if (ev->Good()) {
+ outLength = inDepth;
+ if (inDepth) this->Write(mev, morkStream_kSpaces, inDepth, &bytesWritten);
+ }
+ }
+ return outLength;
+}
+
+mork_size morkStream::PutString(morkEnv* ev, const char* inString) {
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+ mork_size outSize = 0;
+ mdb_size bytesWritten;
+ if (inString) {
+ outSize = strlen(inString);
+ if (outSize && ev->Good()) // any bytes to write?
+ {
+ this->Write(mev, inString, outSize, &bytesWritten);
+ }
+ }
+ return outSize;
+}
+
+mork_size morkStream::PutStringThenNewline(morkEnv* ev, const char* inString)
+// PutStringThenNewline() returns total number of bytes written.
+{
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+ mork_size outSize = 0;
+ mdb_size bytesWritten;
+ if (inString) {
+ outSize = strlen(inString);
+ if (outSize && ev->Good()) // any bytes to write?
+ {
+ this->Write(mev, inString, outSize, &bytesWritten);
+ if (ev->Good()) outSize += this->PutLineBreak(ev);
+ }
+ }
+ return outSize;
+}
+
+mork_size morkStream::PutByteThenNewline(morkEnv* ev, int inByte)
+// PutByteThenNewline() returns total number of bytes written.
+{
+ mork_size outSize = 1; // one for the following byte
+ this->Putc(ev, inByte);
+ if (ev->Good()) outSize += this->PutLineBreak(ev);
+ return outSize;
+}
+
+mork_size morkStream::PutLineBreak(morkEnv* ev) {
+#if defined(MORK_MAC)
+
+ this->Putc(ev, mork_kCR);
+ return 1;
+
+#else
+# if defined(MORK_WIN)
+
+ this->Putc(ev, mork_kCR);
+ this->Putc(ev, mork_kLF);
+ return 2;
+
+# else
+# ifdef MORK_UNIX
+
+ this->Putc(ev, mork_kLF);
+ return 1;
+
+# endif /* MORK_UNIX */
+# endif /* MORK_WIN */
+#endif /* MORK_MAC */
+}
+// ````` ````` ````` ````` ````` ````` ````` `````
+// public: // virtual morkFile methods
+
+NS_IMETHODIMP
+morkStream::Steal(nsIMdbEnv* mev, nsIMdbFile* ioThief)
+// Steal: tell this file to close any associated i/o stream in the file
+// system, because the file ioThief intends to reopen the file in order
+// to provide the MDB implementation with more exotic file access than is
+// offered by the nsIMdbFile alone. Presumably the thief knows enough
+// from Path() in order to know which file to reopen. If Steal() is
+// successful, this file should probably delegate all future calls to
+// the nsIMdbFile interface down to the thief files, so that even after
+// the file has been stolen, it can still be read, written, or forcibly
+// closed (by a call to CloseMdbObject()).
+{
+ MORK_USED_1(ioThief);
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ ev->StubMethodOnlyError();
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkStream::BecomeTrunk(nsIMdbEnv* mev)
+// If this file is a file version branch created by calling AcquireBud(),
+// BecomeTrunk() causes this file's content to replace the original
+// file's content, typically by assuming the original file's identity.
+{
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ ev->StubMethodOnlyError();
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkStream::AcquireBud(nsIMdbEnv* mev, nsIMdbHeap* ioHeap, nsIMdbFile** acqBud)
+// AcquireBud() starts a new "branch" version of the file, empty of content,
+// so that a new version of the file can be written. This new file
+// can later be told to BecomeTrunk() the original file, so the branch
+// created by budding the file will replace the original file. Some
+// file subclasses might initially take the unsafe but expedient
+// approach of simply truncating this file down to zero length, and
+// then returning the same morkFile pointer as this, with an extra
+// reference count increment. Note that the caller of AcquireBud() is
+// expected to eventually call CutStrongRef() on the returned file
+// in order to release the strong reference. High quality versions
+// of morkFile subclasses will create entirely new files which later
+// are renamed to become the old file, so that better transactional
+// behavior is exhibited by the file, so crashes protect old files.
+// Note that AcquireBud() is an illegal operation on readonly files.
+{
+ MORK_USED_1(ioHeap);
+ morkFile* outFile = 0;
+ nsIMdbFile* file = mStream_ContentFile;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (this->IsOpenAndActiveFile() && file) {
+ // figure out how this interacts with buffering and mStream_WriteEnd:
+ ev->StubMethodOnlyError();
+ } else
+ this->NewFileDownError(ev);
+
+ *acqBud = outFile;
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+mork_pos morkStream::Length(morkEnv* ev) const // eof
+{
+ mork_pos outPos = 0;
+
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenAndActiveFile() && file) {
+ mork_pos contentEof = 0;
+ file->Eof(ev->AsMdbEnv(), &contentEof);
+ if (ev->Good()) {
+ if (mStream_WriteEnd) // this stream supports writing?
+ {
+ // the local buffer might have buffered content past content eof
+ if (ev->Good()) // no error happened during Length() above?
+ {
+ mork_u1* at = mStream_At;
+ mork_u1* buf = mStream_Buf;
+ if (at >= buf) // expected cursor order?
+ {
+ mork_pos localContent = mStream_BufPos + (at - buf);
+ if (localContent > contentEof) // buffered past eof?
+ contentEof = localContent; // return new logical eof
+
+ outPos = contentEof;
+ } else
+ this->NewBadCursorOrderError(ev);
+ }
+ } else
+ outPos = contentEof; // frozen files get length from content file
+ }
+ } else
+ this->NewFileDownError(ev);
+
+ return outPos;
+}
+
+void morkStream::NewBadCursorSlotsError(morkEnv* ev) const {
+ ev->NewError("bad stream cursor slots");
+}
+
+void morkStream::NewNullStreamBufferError(morkEnv* ev) const {
+ ev->NewError("null stream buffer");
+}
+
+void morkStream::NewCantReadSinkError(morkEnv* ev) const {
+ ev->NewError("can't read stream sink");
+}
+
+void morkStream::NewCantWriteSourceError(morkEnv* ev) const {
+ ev->NewError("can't write stream source");
+}
+
+void morkStream::NewPosBeyondEofError(morkEnv* ev) const {
+ ev->NewError("stream pos beyond eof");
+}
+
+void morkStream::NewBadCursorOrderError(morkEnv* ev) const {
+ ev->NewError("bad stream cursor order");
+}
+
+NS_IMETHODIMP
+morkStream::Tell(nsIMdbEnv* mdbev, mork_pos* aOutPos) const {
+ nsresult rv = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+
+ NS_ENSURE_ARG_POINTER(aOutPos);
+
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenAndActiveFile() && file) {
+ mork_u1* buf = mStream_Buf;
+ mork_u1* at = mStream_At;
+
+ mork_u1* readEnd = mStream_ReadEnd; // nonzero only if readonly
+ mork_u1* writeEnd = mStream_WriteEnd; // nonzero only if writeonly
+
+ if (writeEnd) {
+ if (buf && at >= buf && at <= writeEnd) {
+ *aOutPos = mStream_BufPos + (at - buf);
+ } else
+ this->NewBadCursorOrderError(ev);
+ } else if (readEnd) {
+ if (buf && at >= buf && at <= readEnd) {
+ *aOutPos = mStream_BufPos + (at - buf);
+ } else
+ this->NewBadCursorOrderError(ev);
+ }
+ } else
+ this->NewFileDownError(ev);
+
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStream::Read(nsIMdbEnv* mdbev, void* outBuf, mork_size inSize,
+ mork_size* aOutSize) {
+ NS_ENSURE_ARG_POINTER(aOutSize);
+ // First we satisfy the request from buffered bytes, if any. Then
+ // if additional bytes are needed, we satisfy these by direct reads
+ // from the content file without any local buffering (but we still need
+ // to adjust the buffer position to reflect the current i/o point).
+
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+ nsresult rv = NS_OK;
+
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenAndActiveFile() && file) {
+ mork_u1* end = mStream_ReadEnd; // byte after last buffered byte
+ if (end) // file is open for read access?
+ {
+ if (inSize) // caller wants any output?
+ {
+ mork_u1* sink = (mork_u1*)outBuf; // where we plan to write bytes
+ if (sink) // caller passed good buffer address?
+ {
+ mork_u1* at = mStream_At;
+ mork_u1* buf = mStream_Buf;
+ if (at >= buf && at <= end) // expected cursor order?
+ {
+ mork_num remaining = (mork_num)(end - at); // bytes left in buffer
+
+ mork_num quantum = inSize; // number of bytes to copy
+ if (quantum > remaining) // more than buffer content?
+ quantum = remaining; // restrict to buffered bytes
+
+ if (quantum) // any bytes left in the buffer?
+ {
+ MORK_MEMCPY(sink, at, quantum); // from buffer bytes
+
+ at += quantum; // advance past read bytes
+ mStream_At = at;
+ *aOutSize += quantum; // this much copied so far
+
+ sink += quantum; // in case we need to copy more
+ inSize -= quantum; // filled this much of request
+ mStream_HitEof = morkBool_kFalse;
+ }
+
+ if (inSize) // we still need to read more content?
+ {
+ // We need to read more bytes directly from the
+ // content file, without local buffering. We have
+ // exhausted the local buffer, so we need to show
+ // it is now empty, and adjust the current buf pos.
+
+ mork_num posDelta = (mork_num)(at - buf); // old buf content
+ mStream_BufPos += posDelta; // past now empty buf
+
+ mStream_At = mStream_ReadEnd = buf; // empty buffer
+
+ // file->Seek(ev, mStream_BufPos); // set file pos
+ // if ( ev->Good() ) // no seek error?
+ // {
+ // }
+
+ mork_num actual = 0;
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ file->Get(menv, sink, inSize, mStream_BufPos, &actual);
+ if (ev->Good()) // no read error?
+ {
+ if (actual) {
+ *aOutSize += actual;
+ mStream_BufPos += actual;
+ mStream_HitEof = morkBool_kFalse;
+ } else if (!*aOutSize)
+ mStream_HitEof = morkBool_kTrue;
+ }
+ }
+ } else
+ this->NewBadCursorOrderError(ev);
+ } else
+ this->NewNullStreamBufferError(ev);
+ }
+ } else
+ this->NewCantReadSinkError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ if (ev->Bad()) *aOutSize = 0;
+
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStream::Seek(nsIMdbEnv* mdbev, mork_pos inPos, mork_pos* aOutPos) {
+ NS_ENSURE_ARG_POINTER(aOutPos);
+ morkEnv* ev = morkEnv::FromMdbEnv(mdbev);
+ *aOutPos = 0;
+ nsresult rv = NS_OK;
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenOrClosingNode() && this->FileActive() && file) {
+ mork_u1* at = mStream_At; // current position in buffer
+ mork_u1* buf = mStream_Buf; // beginning of buffer
+ mork_u1* readEnd = mStream_ReadEnd; // nonzero only if readonly
+ mork_u1* writeEnd = mStream_WriteEnd; // nonzero only if writeonly
+
+ if (writeEnd) // file is mutable/writeonly?
+ {
+ if (mStream_Dirty) // need to commit buffer changes?
+ this->Flush(mdbev);
+
+ if (ev->Good()) // no errors during flush or earlier?
+ {
+ if (at == buf) // expected post flush cursor value?
+ {
+ if (mStream_BufPos != inPos) // need to change pos?
+ {
+ mork_pos eof = 0;
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ file->Eof(menv, &eof);
+ if (ev->Good()) // no errors getting length?
+ {
+ if (inPos <= eof) // acceptable new position?
+ {
+ mStream_BufPos = inPos; // new stream position
+ *aOutPos = inPos;
+ } else
+ this->NewPosBeyondEofError(ev);
+ }
+ }
+ } else
+ this->NewBadCursorOrderError(ev);
+ }
+ } else if (readEnd) // file is frozen/readonly?
+ {
+ if (at >= buf && at <= readEnd) // expected cursor order?
+ {
+ mork_pos eof = 0;
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ file->Eof(menv, &eof);
+ if (ev->Good()) // no errors getting length?
+ {
+ if (inPos <= eof) // acceptable new position?
+ {
+ *aOutPos = inPos;
+ mStream_BufPos = inPos; // new stream position
+ mStream_At = mStream_ReadEnd = buf; // empty buffer
+ if (inPos == eof) // notice eof reached?
+ mStream_HitEof = morkBool_kTrue;
+ } else
+ this->NewPosBeyondEofError(ev);
+ }
+ } else
+ this->NewBadCursorOrderError(ev);
+ }
+
+ } else
+ this->NewFileDownError(ev);
+
+ return rv;
+}
+
+NS_IMETHODIMP
+morkStream::Write(nsIMdbEnv* menv, const void* inBuf, mork_size inSize,
+ mork_size* aOutSize) {
+ mork_num outActual = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(menv);
+
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenActiveAndMutableFile() && file) {
+ mork_u1* end = mStream_WriteEnd; // byte after last buffered byte
+ if (end) // file is open for write access?
+ {
+ if (inSize) // caller provided any input?
+ {
+ const mork_u1* source = (const mork_u1*)inBuf; // from where
+ if (source) // caller passed good buffer address?
+ {
+ mork_u1* at = mStream_At;
+ mork_u1* buf = mStream_Buf;
+ if (at >= buf && at <= end) // expected cursor order?
+ {
+ mork_num space = (mork_num)(end - at); // space left in buffer
+
+ mork_num quantum = inSize; // number of bytes to write
+ if (quantum > space) // more than buffer size?
+ quantum = space; // restrict to avail space
+
+ if (quantum) // any space left in the buffer?
+ {
+ mStream_Dirty = morkBool_kTrue; // to ensure later flush
+ MORK_MEMCPY(at, source, quantum); // into buffer
+
+ mStream_At += quantum; // advance past written bytes
+ outActual += quantum; // this much written so far
+
+ source += quantum; // in case we need to write more
+ inSize -= quantum; // filled this much of request
+ }
+
+ if (inSize) // we still need to write more content?
+ {
+ // We need to write more bytes directly to the
+ // content file, without local buffering. We have
+ // exhausted the local buffer, so we need to flush
+ // it and empty it, and adjust the current buf pos.
+ // After flushing, if the rest of the write fits
+ // inside the buffer, we will put bytes into the
+ // buffer rather than write them to content file.
+
+ if (mStream_Dirty)
+ this->Flush(menv); // will update mStream_BufPos
+
+ at = mStream_At;
+ if (at < buf || at > end) // bad cursor?
+ this->NewBadCursorOrderError(ev);
+
+ if (ev->Good()) // no errors?
+ {
+ space = (mork_num)(end - at); // space left in buffer
+ if (space > inSize) // write to buffer?
+ {
+ mStream_Dirty = morkBool_kTrue; // ensure flush
+ MORK_MEMCPY(at, source, inSize); // copy
+
+ mStream_At += inSize; // past written bytes
+ outActual += inSize; // this much written
+ } else // directly to content file instead
+ {
+ // file->Seek(ev, mStream_BufPos); // set pos
+ // if ( ev->Good() ) // no seek error?
+ // {
+ // }
+
+ mork_num actual = 0;
+ file->Put(menv, source, inSize, mStream_BufPos, &actual);
+ if (ev->Good()) // no write error?
+ {
+ outActual += actual;
+ mStream_BufPos += actual;
+ }
+ }
+ }
+ }
+ } else
+ this->NewBadCursorOrderError(ev);
+ } else
+ this->NewNullStreamBufferError(ev);
+ }
+ } else
+ this->NewCantWriteSourceError(ev);
+ } else
+ this->NewFileDownError(ev);
+
+ if (ev->Bad()) outActual = 0;
+
+ *aOutSize = outActual;
+ return ev->AsErr();
+}
+
+NS_IMETHODIMP
+morkStream::Flush(nsIMdbEnv* ev) {
+ morkEnv* mev = morkEnv::FromMdbEnv(ev);
+ nsresult rv = NS_ERROR_FAILURE;
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenOrClosingNode() && this->FileActive() && file) {
+ if (mStream_Dirty) this->spill_buf(mev);
+
+ rv = file->Flush(ev);
+ } else
+ this->NewFileDownError(mev);
+ return rv;
+}
+
+// ````` ````` ````` ````` ````` ````` ````` `````
+// protected: // protected non-poly morkStream methods (for char io)
+
+int morkStream::fill_getc(morkEnv* ev) {
+ int c = EOF;
+
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenAndActiveFile() && file) {
+ mork_u1* buf = mStream_Buf;
+ mork_u1* end = mStream_ReadEnd; // beyond buf after earlier read
+ if (end > buf) // any earlier read bytes buffered?
+ {
+ mStream_BufPos += (end - buf); // advance past old read
+ }
+
+ if (ev->Good()) // no errors yet?
+ {
+ // file->Seek(ev, mStream_BufPos); // set file pos
+ // if ( ev->Good() ) // no seek error?
+ // {
+ // }
+
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ mork_num actual = 0;
+ file->Get(menv, buf, mStream_BufSize, mStream_BufPos, &actual);
+ if (ev->Good()) // no read errors?
+ {
+ if (actual > mStream_BufSize) // more than asked for??
+ actual = mStream_BufSize;
+
+ mStream_At = buf;
+ mStream_ReadEnd = buf + actual;
+ if (actual) // any bytes actually read?
+ {
+ c = *mStream_At++; // return first byte from buffer
+ mStream_HitEof = morkBool_kFalse;
+ } else
+ mStream_HitEof = morkBool_kTrue;
+ }
+ }
+ } else
+ this->NewFileDownError(ev);
+
+ return c;
+}
+
+void morkStream::spill_putc(morkEnv* ev, int c) {
+ this->spill_buf(ev);
+ if (ev->Good() && mStream_At < mStream_WriteEnd) this->Putc(ev, c);
+}
+
+void morkStream::spill_buf(morkEnv* ev) // spill/flush from buffer to file
+{
+ nsIMdbFile* file = mStream_ContentFile;
+ if (this->IsOpenOrClosingNode() && this->FileActive() && file) {
+ mork_u1* buf = mStream_Buf;
+ if (mStream_Dirty) {
+ mork_u1* at = mStream_At;
+ if (at >= buf && at <= mStream_WriteEnd) // order?
+ {
+ mork_num count = (mork_num)(at - buf); // bytes buffered
+ if (count) // anything to write to the string?
+ {
+ if (count > mStream_BufSize) // no more than max?
+ {
+ count = mStream_BufSize;
+ mStream_WriteEnd = buf + mStream_BufSize;
+ this->NewBadCursorSlotsError(ev);
+ }
+ if (ev->Good()) {
+ // file->Seek(ev, mStream_BufPos);
+ // if ( ev->Good() )
+ // {
+ // }
+ nsIMdbEnv* menv = ev->AsMdbEnv();
+ mork_num actual = 0;
+
+ file->Put(menv, buf, count, mStream_BufPos, &actual);
+ if (ev->Good()) {
+ mStream_BufPos += actual; // past bytes written
+ mStream_At = buf; // reset buffer cursor
+ mStream_Dirty = morkBool_kFalse;
+ }
+ }
+ }
+ } else
+ this->NewBadCursorOrderError(ev);
+ } else {
+#ifdef MORK_DEBUG
+ ev->NewWarning("stream:spill:not:dirty");
+#endif /*MORK_DEBUG*/
+ }
+ } else
+ this->NewFileDownError(ev);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkStream.h b/comm/mailnews/db/mork/morkStream.h
new file mode 100644
index 0000000000..be7a40c1dc
--- /dev/null
+++ b/comm/mailnews/db/mork/morkStream.h
@@ -0,0 +1,258 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _MORKSTREAM_
+#define _MORKSTREAM_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKFILE_
+# include "morkFile.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*=============================================================================
+ * morkStream: buffered file i/o
+ */
+
+/*| morkStream exists to define an morkFile subclass that provides buffered
+**| i/o for an underlying content file. Naturally this arrangement only makes
+**| sense when the underlying content file is itself not efficiently buffered
+**| (especially for character by character i/o).
+**|
+**|| morkStream is intended for either reading use or writing use, but not
+**| both simultaneously or interleaved. Pick one when the stream is created
+**| and don't change your mind. This restriction is intended to avoid obscure
+**| and complex bugs that might arise from interleaved reads and writes -- so
+**| just don't do it. A stream is either a sink or a source, but not both.
+**|
+**|| (When the underlying content file is intended to support both reading and
+**| writing, a developer might use two instances of morkStream where one is for
+**| reading and the other is for writing. In this case, a developer must take
+**| care to keep the two streams in sync because each will maintain a separate
+**| buffer representing a cache consistency problem for the other. A simple
+**| approach is to invalidate the buffer of one when one uses the other, with
+**| the assumption that closely mixed reading and writing is not expected, so
+**| that little cost is associated with changing read/write streaming modes.)
+**|
+**|| Exactly one of mStream_ReadEnd or mStream_WriteEnd must be a null pointer,
+**| and this will cause the right thing to occur when inlines use them, because
+**| mStream_At < mStream_WriteEnd (for example) will always be false and the
+**| else branch of the statement calls a function that raises an appropriate
+**| error to complain about either reading a sink or writing a source.
+**|
+**|| morkStream is a direct clone of ab_Stream from Communicator 4.5's
+**| address book code, which in turn was based on the stream class in the
+**| public domain Mithril programming language.
+|*/
+
+#define morkStream_kPrintBufSize /*i*/ 512 /* buffer size used by printf() */
+
+#define morkStream_kMinBufSize /*i*/ 512 /* buffer no fewer bytes */
+#define morkStream_kMaxBufSize /*i*/ (32 * 1024) /* buffer no more bytes */
+
+#define morkDerived_kStream /*i*/ 0x7A74 /* ascii 'zt' */
+
+class morkStream /*d*/ : public morkFile { /* from Mithril's AgStream class */
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected morkStream members
+ mork_u1* mStream_At; // pointer into mStream_Buf
+ mork_u1* mStream_ReadEnd; // null or one byte past last readable byte
+ mork_u1* mStream_WriteEnd; // null or mStream_Buf + mStream_BufSize
+
+ nsIMdbFile* mStream_ContentFile; // where content is read and written
+
+ mork_u1* mStream_Buf; // dynamically allocated memory to buffer io
+ mork_size mStream_BufSize; // requested buf size (fixed by min and max)
+ mork_pos mStream_BufPos; // logical position of byte at mStream_Buf
+ mork_bool mStream_Dirty; // does the buffer need to be flushed?
+ mork_bool mStream_HitEof; // has eof been reached? (only frozen streams)
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseStream() only if open
+ virtual ~morkStream(); // assert that CloseStream() executed earlier
+
+ public: // morkStream construction & destruction
+ morkStream(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbFile* ioContentFile, mork_size inBufSize,
+ mork_bool inFrozen);
+ void CloseStream(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkStream(const morkStream& other);
+ morkStream& operator=(const morkStream& other);
+
+ public: // dynamic type identification
+ mork_bool IsStream() const {
+ return IsNode() && mNode_Derived == morkDerived_kStream;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ void NonStreamTypeError(morkEnv* ev);
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // virtual morkFile methods
+ NS_IMETHOD Steal(nsIMdbEnv* ev, nsIMdbFile* ioThief) override;
+ // Steal: tell this file to close any associated i/o stream in the file
+ // system, because the file ioThief intends to reopen the file in order
+ // to provide the MDB implementation with more exotic file access than is
+ // offered by the nsIMdbFile alone. Presumably the thief knows enough
+ // from Path() in order to know which file to reopen. If Steal() is
+ // successful, this file should probably delegate all future calls to
+ // the nsIMdbFile interface down to the thief files, so that even after
+ // the file has been stolen, it can still be read, written, or forcibly
+ // closed (by a call to CloseMdbObject()).
+
+ NS_IMETHOD BecomeTrunk(nsIMdbEnv* ev) override;
+ // If this file is a file version branch created by calling AcquireBud(),
+ // BecomeTrunk() causes this file's content to replace the original
+ // file's content, typically by assuming the original file's identity.
+
+ NS_IMETHOD AcquireBud(nsIMdbEnv* ev, nsIMdbHeap* ioHeap,
+ nsIMdbFile** acqBud) override;
+ // AcquireBud() starts a new "branch" version of the file, empty of content,
+ // so that a new version of the file can be written. This new file
+ // can later be told to BecomeTrunk() the original file, so the branch
+ // created by budding the file will replace the original file. Some
+ // file subclasses might initially take the unsafe but expedient
+ // approach of simply truncating this file down to zero length, and
+ // then returning the same morkFile pointer as this, with an extra
+ // reference count increment. Note that the caller of AcquireBud() is
+ // expected to eventually call CutStrongRef() on the returned file
+ // in order to release the strong reference. High quality versions
+ // of morkFile subclasses will create entirely new files which later
+ // are renamed to become the old file, so that better transactional
+ // behavior is exhibited by the file, so crashes protect old files.
+ // Note that AcquireBud() is an illegal operation on readonly files.
+
+ virtual mork_pos Length(morkEnv* ev) const override; // eof
+ NS_IMETHOD Tell(nsIMdbEnv* ev, mork_pos* aOutPos) const override;
+ NS_IMETHOD Read(nsIMdbEnv* ev, void* outBuf, mork_size inSize,
+ mork_size* aOutCount) override;
+ NS_IMETHOD Seek(nsIMdbEnv* ev, mork_pos inPos, mork_pos* aOutPos) override;
+ NS_IMETHOD Write(nsIMdbEnv* ev, const void* inBuf, mork_size inSize,
+ mork_size* aOutCount) override;
+ NS_IMETHOD Flush(nsIMdbEnv* ev) override;
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ protected: // protected non-poly morkStream methods (for char io)
+ int fill_getc(morkEnv* ev);
+ void spill_putc(morkEnv* ev, int c);
+ void spill_buf(morkEnv* ev); // spill/flush from buffer to file
+
+ // ````` ````` ````` ````` ````` ````` ````` `````
+ public: // public non-poly morkStream methods
+ void NewBadCursorSlotsError(morkEnv* ev) const;
+ void NewBadCursorOrderError(morkEnv* ev) const;
+ void NewNullStreamBufferError(morkEnv* ev) const;
+ void NewCantReadSinkError(morkEnv* ev) const;
+ void NewCantWriteSourceError(morkEnv* ev) const;
+ void NewPosBeyondEofError(morkEnv* ev) const;
+
+ nsIMdbFile* GetStreamContentFile() const { return mStream_ContentFile; }
+ mork_size GetStreamBufferSize() const { return mStream_BufSize; }
+
+ mork_size PutIndent(morkEnv* ev, mork_count inDepth);
+ // PutIndent() puts a linebreak, and then
+ // "indents" by inDepth, and returns the line length after indentation.
+
+ mork_size PutByteThenIndent(morkEnv* ev, int inByte, mork_count inDepth);
+ // PutByteThenIndent() puts the byte, then a linebreak, and then
+ // "indents" by inDepth, and returns the line length after indentation.
+
+ mork_size PutStringThenIndent(morkEnv* ev, const char* inString,
+ mork_count inDepth);
+ // PutStringThenIndent() puts the string, then a linebreak, and then
+ // "indents" by inDepth, and returns the line length after indentation.
+
+ mork_size PutString(morkEnv* ev, const char* inString);
+ // PutString() returns the length of the string written.
+
+ mork_size PutStringThenNewline(morkEnv* ev, const char* inString);
+ // PutStringThenNewline() returns total number of bytes written.
+
+ mork_size PutByteThenNewline(morkEnv* ev, int inByte);
+ // PutByteThenNewline() returns total number of bytes written.
+
+ // ````` ````` stdio type methods ````` `````
+ void Ungetc(int c) /*i*/
+ {
+ if (mStream_At > mStream_Buf && c > 0) *--mStream_At = (mork_u1)c;
+ }
+
+ // Note Getc() returns EOF consistently after any fill_getc() error occurs.
+ int Getc(morkEnv* ev) /*i*/
+ {
+ return (mStream_At < mStream_ReadEnd) ? *mStream_At++ : fill_getc(ev);
+ }
+
+ void Putc(morkEnv* ev, int c) /*i*/
+ {
+ mStream_Dirty = morkBool_kTrue;
+ if (mStream_At < mStream_WriteEnd)
+ *mStream_At++ = (mork_u1)c;
+ else
+ spill_putc(ev, c);
+ }
+
+ mork_size PutLineBreak(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakStream(morkStream* me, morkEnv* ev, morkStream** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongStream(morkStream* me, morkEnv* ev,
+ morkStream** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKSTREAM_ */
diff --git a/comm/mailnews/db/mork/morkTable.cpp b/comm/mailnews/db/mork/morkTable.cpp
new file mode 100644
index 0000000000..ee27ff1328
--- /dev/null
+++ b/comm/mailnews/db/mork/morkTable.cpp
@@ -0,0 +1,1415 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKARRAY_
+# include "morkArray.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKTABLEROWCURSOR_
+# include "morkTableRowCursor.h"
+#endif
+
+#ifndef _MORKROWOBJECT_
+# include "morkRowObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkTable::CloseMorkNode(
+ morkEnv* ev) /*i*/ // CloseTable() only if open
+{
+ if (this->IsOpenNode()) {
+ morkObject::CloseMorkNode(ev); // give base class a chance.
+ this->MarkClosing();
+ this->CloseTable(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkTable::~morkTable() /*i*/ // assert CloseTable() executed earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(this->IsShutNode());
+ MORK_ASSERT(mTable_Store == 0);
+ MORK_ASSERT(mTable_RowSpace == 0);
+}
+
+/*public non-poly*/
+morkTable::morkTable(
+ morkEnv* ev, /*i*/
+ const morkUsage& inUsage, nsIMdbHeap* ioHeap, morkStore* ioStore,
+ nsIMdbHeap* ioSlotHeap, morkRowSpace* ioRowSpace,
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ mork_tid inTid, mork_kind inKind, mork_bool inMustBeUnique)
+ : morkObject(ev, inUsage, ioHeap, (mork_color)inTid, (morkHandle*)0),
+ mTable_Store(0),
+ mTable_RowSpace(0),
+ mTable_MetaRow(0)
+
+ ,
+ mTable_RowMap(0)
+ // , mTable_RowMap(ev, morkUsage::kMember, (nsIMdbHeap*) 0, ioSlotHeap,
+ // morkTable_kStartRowMapSlotCount)
+ ,
+ mTable_RowArray(ev, morkUsage::kMember, (nsIMdbHeap*)0,
+ morkTable_kStartRowArraySize, ioSlotHeap)
+
+ ,
+ mTable_ChangeList(),
+ mTable_ChangesCount(0),
+ mTable_ChangesMax(3) // any very small number greater than zero
+
+ ,
+ mTable_Kind(inKind)
+
+ ,
+ mTable_Flags(0),
+ mTable_Priority(morkPriority_kLo) // NOT high priority
+ ,
+ mTable_GcUses(0),
+ mTable_Pad(0) {
+ this->mLink_Next = 0;
+ this->mLink_Prev = 0;
+
+ if (ev->Good()) {
+ if (ioStore && ioSlotHeap && ioRowSpace) {
+ if (inKind) {
+ if (inMustBeUnique) this->SetTableUnique();
+ mTable_Store = ioStore;
+ mTable_RowSpace = ioRowSpace;
+ if (inOptionalMetaRowOid)
+ mTable_MetaRowOid = *inOptionalMetaRowOid;
+ else {
+ mTable_MetaRowOid.mOid_Scope = 0;
+ mTable_MetaRowOid.mOid_Id = morkRow_kMinusOneRid;
+ }
+ if (ev->Good()) {
+ if (this->MaybeDirtySpaceStoreAndTable())
+ this->SetTableRewrite(); // everything is dirty
+
+ mNode_Derived = morkDerived_kTable;
+ }
+ this->MaybeDirtySpaceStoreAndTable(); // new table might dirty store
+ } else
+ ioRowSpace->ZeroKindError(ev);
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkTable, morkObject, nsIMdbTable)
+
+/*public non-poly*/ void morkTable::CloseTable(
+ morkEnv* ev) /*i*/ // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkRowMap::SlotStrongRowMap((morkRowMap*)0, ev, &mTable_RowMap);
+ // mTable_RowMap.CloseMorkNode(ev);
+ mTable_RowArray.CloseMorkNode(ev);
+ mTable_Store = 0;
+ mTable_RowSpace = 0;
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ===== begin nsIMdbCollection methods =====
+
+// { ----- begin attribute methods -----
+NS_IMETHODIMP
+morkTable::GetSeed(nsIMdbEnv* mev,
+ mdb_seed* outSeed) // member change count
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ *outSeed = mTable_RowArray.mArray_Seed;
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::GetCount(nsIMdbEnv* mev,
+ mdb_count* outCount) // member count
+{
+ NS_ENSURE_ARG_POINTER(outCount);
+ *outCount = mTable_RowArray.mArray_Fill;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::GetPort(nsIMdbEnv* mev,
+ nsIMdbPort** acqPort) // collection container
+{
+ (void)morkEnv::FromMdbEnv(mev);
+ NS_ENSURE_ARG_POINTER(acqPort);
+ *acqPort = mTable_Store;
+ return NS_OK;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin cursor methods -----
+NS_IMETHODIMP
+morkTable::GetCursor( // make a cursor starting iter at inMemberPos
+ nsIMdbEnv* mev, // context
+ mdb_pos inMemberPos, // zero-based ordinal pos of member in collection
+ nsIMdbCursor** acqCursor) // acquire new cursor instance
+{
+ return this->GetTableRowCursor(mev, inMemberPos,
+ (nsIMdbTableRowCursor**)acqCursor);
+}
+// } ----- end cursor methods -----
+
+// { ----- begin ID methods -----
+NS_IMETHODIMP
+morkTable::GetOid(nsIMdbEnv* mev,
+ mdbOid* outOid) // read object identity
+{
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ GetTableOid(ev, outOid);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::BecomeContent(nsIMdbEnv* mev,
+ const mdbOid* inOid) // exchange content
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ // remember table->MaybeDirtySpaceStoreAndTable();
+}
+
+// } ----- end ID methods -----
+
+// { ----- begin activity dropping methods -----
+NS_IMETHODIMP
+morkTable::DropActivity( // tell collection usage no longer expected
+ nsIMdbEnv* mev) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// } ----- end activity dropping methods -----
+
+// } ===== end nsIMdbCollection methods =====
+
+// { ===== begin nsIMdbTable methods =====
+
+// { ----- begin attribute methods -----
+
+NS_IMETHODIMP
+morkTable::SetTablePriority(nsIMdbEnv* mev, mdb_priority inPrio) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (inPrio > morkPriority_kMax) inPrio = morkPriority_kMax;
+
+ mTable_Priority = inPrio;
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::GetTablePriority(nsIMdbEnv* mev, mdb_priority* outPrio) {
+ nsresult outErr = NS_OK;
+ mork_priority prio = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ prio = mTable_Priority;
+ if (prio > morkPriority_kMax) {
+ prio = morkPriority_kMax;
+ mTable_Priority = prio;
+ }
+ outErr = ev->AsErr();
+ }
+ if (outPrio) *outPrio = prio;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::GetTableBeVerbose(nsIMdbEnv* mev, mdb_bool* outBeVerbose) {
+ NS_ENSURE_ARG_POINTER(outBeVerbose);
+ *outBeVerbose = IsTableVerbose();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::SetTableBeVerbose(nsIMdbEnv* mev, mdb_bool inBeVerbose) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (inBeVerbose)
+ SetTableVerbose();
+ else
+ ClearTableVerbose();
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::GetTableIsUnique(nsIMdbEnv* mev, mdb_bool* outIsUnique) {
+ NS_ENSURE_ARG_POINTER(outIsUnique);
+ *outIsUnique = IsTableUnique();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::GetTableKind(nsIMdbEnv* mev, mdb_kind* outTableKind) {
+ NS_ENSURE_ARG_POINTER(outTableKind);
+ *outTableKind = mTable_Kind;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::GetRowScope(nsIMdbEnv* mev, mdb_scope* outRowScope) {
+ nsresult outErr = NS_OK;
+ mdb_scope rowScope = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mTable_RowSpace)
+ rowScope = mTable_RowSpace->SpaceScope();
+ else
+ NilRowSpaceError(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (outRowScope) *outRowScope = rowScope;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::GetMetaRow(
+ nsIMdbEnv* mev,
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ mdbOid* outOid, // output meta row oid, can be nil to suppress output
+ nsIMdbRow** acqRow) // acquire table's unique singleton meta row
+// The purpose of a meta row is to support the persistent recording of
+// meta info about a table as cells put into the distinguished meta row.
+// Each table has exactly one meta row, which is not considered a member
+// of the collection of rows inside the table. The only way to tell
+// whether a row is a meta row is by the fact that it is returned by this
+// GetMetaRow() method from some table. Otherwise nothing distinguishes
+// a meta row from any other row. A meta row can be used anyplace that
+// any other row can be used, and can even be put into other tables (or
+// the same table) as a table member, if this is useful for some reason.
+// The first attempt to access a table's meta row using GetMetaRow() will
+// cause the meta row to be created if it did not already exist. When the
+// meta row is created, it will have the row oid that was previously
+// requested for this table's meta row; or if no oid was ever explicitly
+// specified for this meta row, then a unique oid will be generated in
+// the row scope named "metaScope" (so obviously MDB clients should not
+// manually allocate any row IDs from that special meta scope namespace).
+// The meta row oid can be specified either when the table is created, or
+// else the first time that GetMetaRow() is called, by passing a non-nil
+// pointer to an oid for parameter inOptionalMetaRowOid. The meta row's
+// actual oid is returned in outOid (if this is a non-nil pointer), and
+// it will be different from inOptionalMetaRowOid when the meta row was
+// already given a different oid earlier.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRow* row = GetMetaRow(ev, inOptionalMetaRowOid);
+ if (row && ev->Good()) {
+ if (outOid) *outOid = row->mRow_Oid;
+
+ outRow = row->AcquireRowHandle(ev, mTable_Store);
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+
+ if (ev->Bad() && outOid) {
+ outOid->mOid_Scope = 0;
+ outOid->mOid_Id = morkRow_kMinusOneRid;
+ }
+ return outErr;
+}
+
+// } ----- end attribute methods -----
+
+// { ----- begin cursor methods -----
+NS_IMETHODIMP
+morkTable::GetTableRowCursor( // make a cursor, starting iteration at inRowPos
+ nsIMdbEnv* mev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbTableRowCursor** acqCursor) // acquire new cursor instance
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTableRowCursor* outCursor = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkTableRowCursor* cursor = NewTableRowCursor(ev, inRowPos);
+ if (cursor) {
+ if (ev->Good()) {
+ // cursor->mCursor_Seed = (mork_seed) inRowPos;
+ outCursor = cursor;
+ outCursor->AddRef();
+ }
+ }
+
+ outErr = ev->AsErr();
+ }
+ if (acqCursor) *acqCursor = outCursor;
+ return outErr;
+}
+// } ----- end row position methods -----
+
+// { ----- begin row position methods -----
+NS_IMETHODIMP
+morkTable::PosToOid( // get row member for a table position
+ nsIMdbEnv* mev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ mdbOid* outOid) // row oid at the specified position
+{
+ nsresult outErr = NS_OK;
+ mdbOid roid;
+ roid.mOid_Scope = 0;
+ roid.mOid_Id = (mork_id)-1;
+
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRow* row = SafeRowAt(ev, inRowPos);
+ if (row) roid = row->mRow_Oid;
+
+ outErr = ev->AsErr();
+ }
+ if (outOid) *outOid = roid;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::OidToPos( // test for the table position of a row member
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_pos* outPos) // zero-based ordinal position of row in table
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mork_pos pos = ArrayHasOid(ev, inOid);
+ if (outPos) *outPos = pos;
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::PosToRow( // get row member for a table position
+ nsIMdbEnv* mev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbRow** acqRow) // acquire row at table position inRowPos
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRow* row = SafeRowAt(ev, inRowPos);
+ if (row && mTable_Store) outRow = row->AcquireRowHandle(ev, mTable_Store);
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::RowToPos( // test for the table position of a row member
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_pos* outPos) // zero-based ordinal position of row in table
+{
+ nsresult outErr = NS_OK;
+ mork_pos pos = -1;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* row = (morkRowObject*)ioRow;
+ pos = ArrayHasOid(ev, &row->mRowObject_Row->mRow_Oid);
+ outErr = ev->AsErr();
+ }
+ if (outPos) *outPos = pos;
+ return outErr;
+}
+
+// Note that HasRow() performs the inverse oid->pos mapping
+// } ----- end row position methods -----
+
+// { ----- begin oid set methods -----
+NS_IMETHODIMP
+morkTable::AddOid( // make sure the row with inOid is a table member
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid) // row to ensure membership in table
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::HasOid( // test for the table position of a row member
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_bool* outHasOid) // whether inOid is a member row
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (outHasOid) *outHasOid = MapHasOid(ev, inOid);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::CutOid( // make sure the row with inOid is not a member
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid) // row to remove from table
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (inOid && mTable_Store) {
+ morkRow* row = mTable_Store->GetRow(ev, inOid);
+ if (row) CutRow(ev, row);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end oid set methods -----
+
+// { ----- begin row set methods -----
+NS_IMETHODIMP
+morkTable::NewRow( // create a new row instance in table
+ nsIMdbEnv* mev, // context
+ mdbOid* ioOid, // please use zero (unbound) rowId for db-assigned IDs
+ nsIMdbRow** acqRow) // create new row
+{
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (ioOid && mTable_Store) {
+ morkRow* row = 0;
+ if (ioOid->mOid_Id == morkRow_kMinusOneRid)
+ row = mTable_Store->NewRow(ev, ioOid->mOid_Scope);
+ else
+ row = mTable_Store->NewRowWithOid(ev, ioOid);
+
+ if (row && AddRow(ev, row))
+ outRow = row->AcquireRowHandle(ev, mTable_Store);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::AddRow( // make sure the row with inOid is a table member
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow) // row to ensure membership in table
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* rowObj = (morkRowObject*)ioRow;
+ morkRow* row = rowObj->mRowObject_Row;
+ AddRow(ev, row);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::HasRow( // test for the table position of a row member
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_bool* outBool) // zero-based ordinal position of row in table
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* rowObj = (morkRowObject*)ioRow;
+ morkRow* row = rowObj->mRowObject_Row;
+ if (outBool) *outBool = MapHasOid(ev, &row->mRow_Oid);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::CutRow( // make sure the row with inOid is not a member
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow) // row to remove from table
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* rowObj = (morkRowObject*)ioRow;
+ morkRow* row = rowObj->mRowObject_Row;
+ CutRow(ev, row);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::CutAllRows( // remove all rows from the table
+ nsIMdbEnv* mev) // context
+{
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ CutAllRows(ev);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ----- end row set methods -----
+
+// { ----- begin searching methods -----
+NS_IMETHODIMP
+morkTable::FindRowMatches( // search variable number of sorted cols
+ nsIMdbEnv* mev, // context
+ const mdbYarn* inPrefix, // content to find as prefix in row's column cell
+ nsIMdbTableRowCursor** acqCursor) // set of matching rows
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::GetSearchColumns( // query columns used by FindRowMatches()
+ nsIMdbEnv* mev, // context
+ mdb_count* outCount, // context
+ mdbColumnSet* outColSet) // caller supplied space to put columns
+// GetSearchColumns() returns the columns actually searched when the
+// FindRowMatches() method is called. No more than mColumnSet_Count
+// slots of mColumnSet_Columns will be written, since mColumnSet_Count
+// indicates how many slots are present in the column array. The
+// actual number of search column used by the table is returned in
+// the outCount parameter; if this number exceeds mColumnSet_Count,
+// then a caller needs a bigger array to read the entire column set.
+// The minimum of mColumnSet_Count and outCount is the number slots
+// in mColumnSet_Columns that were actually written by this method.
+//
+// Callers are expected to change this set of columns by calls to
+// nsIMdbTable::SearchColumnsHint() or SetSearchSorting(), or both.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end searching methods -----
+
+// { ----- begin hinting methods -----
+NS_IMETHODIMP
+morkTable::SearchColumnsHint( // advise re future expected search cols
+ nsIMdbEnv* mev, // context
+ const mdbColumnSet* inColumnSet) // columns likely to be searched
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::SortColumnsHint( // advise re future expected sort columns
+ nsIMdbEnv* mev, // context
+ const mdbColumnSet* inColumnSet) // columns for likely sort requests
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::StartBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* mev, // context
+ const void* inLabel) // intend unique address to match end call
+// If batch starts nest by virtue of nesting calls in the stack, then
+// the address of a local variable makes a good batch start label that
+// can be used at batch end time, and such addresses remain unique.
+{
+ // we don't do anything here.
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+morkTable::EndBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* mev, // context
+ const void* inLabel) // label matching start label
+// Suppose a table is maintaining one or many sort orders for a table,
+// so that every row added to the table must be inserted in each sort,
+// and every row cut must be removed from each sort. If a db client
+// intends to make many such changes before needing any information
+// about the order or positions of rows inside a table, then a client
+// might tell the table to start batch changes in order to disable
+// sorting of rows for the interim. Presumably a table will then do
+// a full sort of all rows at need when the batch changes end, or when
+// a surprise request occurs for row position during batch changes.
+{
+ // we don't do anything here.
+ return NS_OK;
+}
+// } ----- end hinting methods -----
+
+// { ----- begin sorting methods -----
+// sorting: note all rows are assumed sorted by row ID as a secondary
+// sort following the primary column sort, when table rows are sorted.
+
+NS_IMETHODIMP
+morkTable::CanSortColumn( // query which column is currently used for sorting
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // column to query sorting potential
+ mdb_bool* outCanSort) // whether the column can be sorted
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::GetSorting( // view same table in particular sorting
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // requested new column for sorting table
+ nsIMdbSorting** acqSorting) // acquire sorting for column
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::SetSearchSorting( // use this sorting in FindRowMatches()
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // often same as nsIMdbSorting::GetSortColumn()
+ nsIMdbSorting* ioSorting) // requested sorting for some column
+// SetSearchSorting() attempts to inform the table that ioSorting
+// should be used during calls to FindRowMatches() for searching
+// the column which is actually sorted by ioSorting. This method
+// is most useful in conjunction with nsIMdbSorting::SetCompare(),
+// because otherwise a caller would not be able to override the
+// comparison ordering method used during searches. Note that some
+// database implementations might be unable to use an arbitrarily
+// specified sort order, either due to schema or runtime interface
+// constraints, in which case ioSorting might not actually be used.
+// Presumably ioSorting is an instance that was returned from some
+// earlier call to nsIMdbTable::GetSorting(). A caller can also
+// use nsIMdbTable::SearchColumnsHint() to specify desired change
+// in which columns are sorted and searched by FindRowMatches().
+//
+// A caller can pass a nil pointer for ioSorting to request that
+// column inColumn no longer be used at all by FindRowMatches().
+// But when ioSorting is non-nil, then inColumn should match the
+// column actually sorted by ioSorting; when these do not agree,
+// implementations are instructed to give precedence to the column
+// specified by ioSorting (so this means callers might just pass
+// zero for inColumn when ioSorting is also provided, since then
+// inColumn is both redundant and ignored).
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+// } ----- end sorting methods -----
+
+// { ----- begin moving methods -----
+// moving a row does nothing unless a table is currently unsorted
+
+NS_IMETHODIMP
+morkTable::MoveOid( // change position of row in unsorted table
+ nsIMdbEnv* mev, // context
+ const mdbOid* inOid, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row inOid
+ mdb_pos* outActualPos) // actual new position of row in table
+{
+ nsresult outErr = NS_OK;
+ mdb_pos actualPos = -1; // meaning it was never found in table
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (inOid && mTable_Store) {
+ morkRow* row = mTable_Store->GetRow(ev, inOid);
+ if (row) actualPos = MoveRow(ev, row, inHintFromPos, inToPos);
+ } else
+ ev->NilPointerError();
+
+ outErr = ev->AsErr();
+ }
+ if (outActualPos) *outActualPos = actualPos;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTable::MoveRow( // change position of row in unsorted table
+ nsIMdbEnv* mev, // context
+ nsIMdbRow* ioRow, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row ioRow
+ mdb_pos* outActualPos) // actual new position of row in table
+{
+ mdb_pos actualPos = -1; // meaning it was never found in table
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ morkRowObject* rowObj = (morkRowObject*)ioRow;
+ morkRow* row = rowObj->mRowObject_Row;
+ actualPos = MoveRow(ev, row, inHintFromPos, inToPos);
+ outErr = ev->AsErr();
+ }
+ if (outActualPos) *outActualPos = actualPos;
+ return outErr;
+}
+// } ----- end moving methods -----
+
+// { ----- begin index methods -----
+NS_IMETHODIMP
+morkTable::AddIndex( // create a sorting index for column if possible
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // the column to sort by index
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental index building
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the index addition will be finished.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::CutIndex( // stop supporting a specific column index
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // the column with index to be removed
+ nsIMdbThumb** acqThumb) // acquire thumb for incremental index destroy
+// Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+// then the index removal will be finished.
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::HasIndex( // query for current presence of a column index
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outHasIndex) // whether column has index for this column
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::EnableIndexOnSort( // create an index for col on first sort
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn) // the column to index if ever sorted
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::QueryIndexOnSort( // check whether index on sort is enabled
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outIndexOnSort) // whether column has index-on-sort enabled
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+morkTable::DisableIndexOnSort( // prevent future index creation on sort
+ nsIMdbEnv* mev, // context
+ mdb_column inColumn) // the column to index if ever sorted
+{
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+// } ----- end index methods -----
+
+// } ===== end nsIMdbTable methods =====
+
+// we override these so that we'll use the xpcom add and release ref.
+#ifndef _MSC_VER
+mork_refs morkTable::AddStrongRef(nsIMdbEnv* ev) { return (mork_refs)AddRef(); }
+#endif
+
+mork_refs morkTable::AddStrongRef(morkEnv* ev) { return (mork_refs)AddRef(); }
+
+#ifndef _MSC_VER
+nsresult morkTable::CutStrongRef(nsIMdbEnv* ev) { return (nsresult)Release(); }
+#endif
+
+mork_refs morkTable::CutStrongRef(morkEnv* ev) { return (mork_refs)Release(); }
+
+mork_u2 morkTable::AddTableGcUse(morkEnv* ev) {
+ MORK_USED_1(ev);
+ if (mTable_GcUses < morkTable_kMaxTableGcUses) // not already maxed out?
+ ++mTable_GcUses;
+
+ return mTable_GcUses;
+}
+
+mork_u2 morkTable::CutTableGcUse(morkEnv* ev) {
+ if (mTable_GcUses) // any outstanding uses to cut?
+ {
+ if (mTable_GcUses < morkTable_kMaxTableGcUses) // not frozen at max?
+ --mTable_GcUses;
+ } else
+ this->TableGcUsesUnderflowWarning(ev);
+
+ return mTable_GcUses;
+}
+
+// table dirty handling more complex than morkNode::SetNodeDirty() etc.
+
+void morkTable::SetTableClean(morkEnv* ev) {
+ if (mTable_ChangeList.HasListMembers()) {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ mTable_ChangeList.CutAndZapAllListMembers(ev, heap); // forget changes
+ }
+ mTable_ChangesCount = 0;
+
+ mTable_Flags = 0;
+ this->SetNodeClean();
+}
+
+// notifications regarding table changes:
+
+void morkTable::NoteTableMoveRow(morkEnv* ev, morkRow* ioRow, mork_pos inPos) {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ if (this->IsTableRewrite() || this->HasChangeOverflow())
+ this->NoteTableSetAll(ev);
+ else {
+ morkTableChange* tableChange =
+ new (*heap, ev) morkTableChange(ev, ioRow, inPos);
+ if (tableChange) {
+ if (ev->Good()) {
+ mTable_ChangeList.PushTail(tableChange);
+ ++mTable_ChangesCount;
+ } else {
+ tableChange->ZapOldNext(ev, heap);
+ this->SetTableRewrite(); // just plan to write all table rows
+ }
+ }
+ }
+}
+
+void morkTable::note_row_move(morkEnv* ev, morkRow* ioRow, mork_pos inNewPos) {
+ if (this->IsTableRewrite() || this->HasChangeOverflow())
+ this->NoteTableSetAll(ev);
+ else {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ morkTableChange* tableChange =
+ new (*heap, ev) morkTableChange(ev, ioRow, inNewPos);
+ if (tableChange) {
+ if (ev->Good()) {
+ mTable_ChangeList.PushTail(tableChange);
+ ++mTable_ChangesCount;
+ } else {
+ tableChange->ZapOldNext(ev, heap);
+ this->NoteTableSetAll(ev);
+ }
+ }
+ }
+}
+
+void morkTable::note_row_change(morkEnv* ev, mork_change inChange,
+ morkRow* ioRow) {
+ if (this->IsTableRewrite() || this->HasChangeOverflow())
+ this->NoteTableSetAll(ev);
+ else {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ morkTableChange* tableChange =
+ new (*heap, ev) morkTableChange(ev, inChange, ioRow);
+ if (tableChange) {
+ if (ev->Good()) {
+ mTable_ChangeList.PushTail(tableChange);
+ ++mTable_ChangesCount;
+ } else {
+ tableChange->ZapOldNext(ev, heap);
+ this->NoteTableSetAll(ev);
+ }
+ }
+ }
+}
+
+void morkTable::NoteTableSetAll(morkEnv* ev) {
+ if (mTable_ChangeList.HasListMembers()) {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ mTable_ChangeList.CutAndZapAllListMembers(ev, heap); // forget changes
+ }
+ mTable_ChangesCount = 0;
+ this->SetTableRewrite();
+}
+
+/*static*/ void morkTable::TableGcUsesUnderflowWarning(morkEnv* ev) {
+ ev->NewWarning("mTable_GcUses underflow");
+}
+
+/*static*/ void morkTable::NonTableTypeError(morkEnv* ev) {
+ ev->NewError("non morkTable");
+}
+
+/*static*/ void morkTable::NonTableTypeWarning(morkEnv* ev) {
+ ev->NewWarning("non morkTable");
+}
+
+/*static*/ void morkTable::NilRowSpaceError(morkEnv* ev) {
+ ev->NewError("nil mTable_RowSpace");
+}
+
+mork_bool morkTable::MaybeDirtySpaceStoreAndTable() {
+ morkRowSpace* rowSpace = mTable_RowSpace;
+ if (rowSpace) {
+ morkStore* store = rowSpace->mSpace_Store;
+ if (store && store->mStore_CanDirty) {
+ store->SetStoreDirty();
+ rowSpace->mSpace_CanDirty = morkBool_kTrue;
+ }
+
+ if (rowSpace->mSpace_CanDirty) // first time being dirtied?
+ {
+ if (this->IsTableClean()) {
+ mork_count rowCount = this->GetRowCount();
+ mork_count oneThird = rowCount / 4; // one third of rows
+ if (oneThird > 0x07FFF) // more than half max u2?
+ oneThird = 0x07FFF;
+
+ mTable_ChangesMax = (mork_u2)oneThird;
+ }
+ this->SetTableDirty();
+ rowSpace->SetRowSpaceDirty();
+
+ return morkBool_kTrue;
+ }
+ }
+ return morkBool_kFalse;
+}
+
+morkRow* morkTable::GetMetaRow(morkEnv* ev,
+ const mdbOid* inOptionalMetaRowOid) {
+ morkRow* outRow = mTable_MetaRow;
+ if (!outRow) {
+ morkStore* store = mTable_Store;
+ mdbOid* oid = &mTable_MetaRowOid;
+ if (inOptionalMetaRowOid && !oid->mOid_Scope) *oid = *inOptionalMetaRowOid;
+
+ if (oid->mOid_Scope) // oid already recorded in table?
+ outRow = store->OidToRow(ev, oid);
+ else {
+ outRow = store->NewRow(ev, morkStore_kMetaScope);
+ if (outRow) // need to record new oid in table?
+ *oid = outRow->mRow_Oid;
+ }
+ mTable_MetaRow = outRow;
+ if (outRow) // need to note another use of this row?
+ {
+ outRow->AddRowGcUse(ev);
+
+ this->SetTableNewMeta();
+ if (this->IsTableClean()) // catch dirty status of meta row?
+ this->MaybeDirtySpaceStoreAndTable();
+ }
+ }
+
+ return outRow;
+}
+
+void morkTable::GetTableOid(morkEnv* ev, mdbOid* outOid) {
+ morkRowSpace* space = mTable_RowSpace;
+ if (space) {
+ outOid->mOid_Scope = space->SpaceScope();
+ outOid->mOid_Id = this->TableId();
+ } else
+ this->NilRowSpaceError(ev);
+}
+
+nsIMdbTable* morkTable::AcquireTableHandle(morkEnv* ev) {
+ AddRef();
+ return this;
+}
+
+mork_pos morkTable::ArrayHasOid(morkEnv* ev, const mdbOid* inOid) {
+ MORK_USED_1(ev);
+ mork_count count = mTable_RowArray.mArray_Fill;
+ mork_pos pos = -1;
+ while (++pos < (mork_pos)count) {
+ morkRow* row = (morkRow*)mTable_RowArray.At(pos);
+ MORK_ASSERT(row);
+ if (row && row->EqualOid(inOid)) {
+ return pos;
+ }
+ }
+ return -1;
+}
+
+mork_bool morkTable::MapHasOid(morkEnv* ev, const mdbOid* inOid) {
+ if (mTable_RowMap)
+ return (mTable_RowMap->GetOid(ev, inOid) != 0);
+ else
+ return (ArrayHasOid(ev, inOid) >= 0);
+}
+
+void morkTable::build_row_map(morkEnv* ev) {
+ morkRowMap* map = mTable_RowMap;
+ if (!map) {
+ mork_count count = mTable_RowArray.mArray_Fill + 3;
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ map = new (*heap, ev) morkRowMap(ev, morkUsage::kHeap, heap, heap, count);
+ if (map) {
+ if (ev->Good()) {
+ mTable_RowMap = map; // put strong ref here
+ count = mTable_RowArray.mArray_Fill;
+ mork_pos pos = -1;
+ while (++pos < (mork_pos)count) {
+ morkRow* row = (morkRow*)mTable_RowArray.At(pos);
+ if (row && row->IsRow())
+ map->AddRow(ev, row);
+ else
+ row->NonRowTypeError(ev);
+ }
+ } else
+ map->CutStrongRef(ev);
+ }
+ }
+}
+
+morkRow* morkTable::find_member_row(morkEnv* ev, morkRow* ioRow) {
+ if (mTable_RowMap)
+ return mTable_RowMap->GetRow(ev, ioRow);
+ else {
+ mork_count count = mTable_RowArray.mArray_Fill;
+ mork_pos pos = -1;
+ while (++pos < (mork_pos)count) {
+ morkRow* row = (morkRow*)mTable_RowArray.At(pos);
+ if (row == ioRow) return row;
+ }
+ }
+ return (morkRow*)0;
+}
+
+mork_pos morkTable::MoveRow(
+ morkEnv* ev, morkRow* ioRow, // change row position
+ mork_pos inHintFromPos, // suggested hint regarding start position
+ mork_pos inToPos) // desired new position for row ioRow
+// MoveRow() returns the actual position of ioRow afterwards; this
+// position is -1 if and only if ioRow was not found as a member.
+{
+ mork_pos outPos = -1; // means ioRow was not a table member
+ mork_bool canDirty = (this->IsTableClean())
+ ? this->MaybeDirtySpaceStoreAndTable()
+ : morkBool_kTrue;
+
+ morkRow** rows = (morkRow**)mTable_RowArray.mArray_Slots;
+ mork_count count = mTable_RowArray.mArray_Fill;
+ if (count && rows && ev->Good()) // any members at all? no errors?
+ {
+ mork_pos lastPos = count - 1; // index of last row slot
+
+ if (inToPos > lastPos) // beyond last used array slot?
+ inToPos = lastPos; // put row into last available slot
+ else if (inToPos < 0) // before first usable slot?
+ inToPos = 0; // put row in very first slow
+
+ if (inHintFromPos > lastPos) // beyond last used array slot?
+ inHintFromPos = lastPos; // seek row in last available slot
+ else if (inHintFromPos < 0) // before first usable slot?
+ inHintFromPos = 0; // seek row in very first slow
+
+ morkRow** fromSlot = 0; // becomes nonzero of ioRow is ever found
+ morkRow** rowsEnd = rows + count; // one past last used array slot
+
+ if (inHintFromPos <= 0) // start of table? just scan for row?
+ {
+ morkRow** cursor = rows - 1; // before first array slot
+ while (++cursor < rowsEnd) {
+ if (*cursor == ioRow) {
+ fromSlot = cursor;
+ break; // end while loop
+ }
+ }
+ } else // search near the start position and work outwards
+ {
+ morkRow** lo = rows + inHintFromPos; // lowest search point
+ morkRow** hi = lo; // highest search point starts at lowest point
+
+ // Seek ioRow in spiral widening search below and above inHintFromPos.
+ // This is faster when inHintFromPos is at all accurate, but is slower
+ // than a straightforward scan when inHintFromPos is nearly random.
+
+ while (lo >= rows || hi < rowsEnd) // keep searching?
+ {
+ if (lo >= rows) // low direction search still feasible?
+ {
+ if (*lo == ioRow) // actually found the row?
+ {
+ fromSlot = lo;
+ break; // end while loop
+ }
+ --lo; // advance further lower
+ }
+ if (hi < rowsEnd) // high direction search still feasible?
+ {
+ if (*hi == ioRow) // actually found the row?
+ {
+ fromSlot = hi;
+ break; // end while loop
+ }
+ ++hi; // advance further higher
+ }
+ }
+ }
+
+ if (fromSlot) // ioRow was found as a table member?
+ {
+ outPos = fromSlot - rows; // actual position where row was found
+ if (outPos != inToPos) // actually need to move this row?
+ {
+ morkRow** toSlot = rows + inToPos; // slot where row must go
+
+ ++mTable_RowArray.mArray_Seed; // we modify the array now:
+
+ if (fromSlot < toSlot) // row is moving upwards?
+ {
+ morkRow** up = fromSlot; // leading pointer going upward
+ while (++up <= toSlot) // have not gone above destination?
+ {
+ *fromSlot = *up; // shift down one
+ fromSlot = up; // shift trailing pointer up
+ }
+ } else // ( fromSlot > toSlot ) // row is moving downwards
+ {
+ morkRow** down = fromSlot; // leading pointer going downward
+ while (--down >= toSlot) // have not gone below destination?
+ {
+ *fromSlot = *down; // shift up one
+ fromSlot = down; // shift trailing pointer
+ }
+ }
+ *toSlot = ioRow;
+ outPos = inToPos; // okay, we actually moved the row here
+
+ if (canDirty) this->note_row_move(ev, ioRow, inToPos);
+ }
+ }
+ }
+ return outPos;
+}
+
+mork_bool morkTable::AddRow(morkEnv* ev, morkRow* ioRow) {
+ morkRow* row = this->find_member_row(ev, ioRow);
+ if (!row && ev->Good()) {
+ mork_bool canDirty = (this->IsTableClean())
+ ? this->MaybeDirtySpaceStoreAndTable()
+ : morkBool_kTrue;
+
+ mork_pos pos = mTable_RowArray.AppendSlot(ev, ioRow);
+ if (ev->Good() && pos >= 0) {
+ ioRow->AddRowGcUse(ev);
+ if (mTable_RowMap) {
+ if (mTable_RowMap->AddRow(ev, ioRow)) {
+ // okay, anything else?
+ } else
+ mTable_RowArray.CutSlot(ev, pos);
+ } else if (mTable_RowArray.mArray_Fill >= morkTable_kMakeRowMapThreshold)
+ this->build_row_map(ev);
+
+ if (canDirty && ev->Good()) this->NoteTableAddRow(ev, ioRow);
+ }
+ }
+ return ev->Good();
+}
+
+mork_bool morkTable::CutRow(morkEnv* ev, morkRow* ioRow) {
+ morkRow* row = this->find_member_row(ev, ioRow);
+ if (row) {
+ mork_bool canDirty = (this->IsTableClean())
+ ? this->MaybeDirtySpaceStoreAndTable()
+ : morkBool_kTrue;
+
+ mork_count count = mTable_RowArray.mArray_Fill;
+ morkRow** rowSlots = (morkRow**)mTable_RowArray.mArray_Slots;
+ if (rowSlots) // array has vector as expected?
+ {
+ mork_pos pos = -1;
+ morkRow** end = rowSlots + count;
+ morkRow** slot = rowSlots - 1; // prepare for preincrement:
+ while (++slot < end) // another slot to check?
+ {
+ if (*slot == row) // found the slot containing row?
+ {
+ pos = slot - rowSlots; // record absolute position
+ break; // end while loop
+ }
+ }
+ if (pos >= 0) // need to cut if from the array?
+ mTable_RowArray.CutSlot(ev, pos);
+ else
+ ev->NewWarning("row not found in array");
+ } else
+ mTable_RowArray.NilSlotsAddressError(ev);
+
+ if (mTable_RowMap) mTable_RowMap->CutRow(ev, ioRow);
+
+ if (canDirty) this->NoteTableCutRow(ev, ioRow);
+
+ if (ioRow->CutRowGcUse(ev) == 0) ioRow->OnZeroRowGcUse(ev);
+ }
+ return ev->Good();
+}
+
+mork_bool morkTable::CutAllRows(morkEnv* ev) {
+ if (this->MaybeDirtySpaceStoreAndTable()) {
+ this->SetTableRewrite(); // everything is dirty
+ this->NoteTableSetAll(ev);
+ }
+
+ if (ev->Good()) {
+ mTable_RowArray.CutAllSlots(ev);
+ if (mTable_RowMap) {
+ morkRowMapIter i(ev, mTable_RowMap);
+ mork_change* c = 0;
+ morkRow* r = 0;
+
+ for (c = i.FirstRow(ev, &r); c; c = i.NextRow(ev, &r)) {
+ if (r) {
+ if (r->CutRowGcUse(ev) == 0) r->OnZeroRowGcUse(ev);
+
+ i.CutHereRow(ev, (morkRow**)0);
+ } else
+ ev->NewWarning("nil row in table map");
+ }
+ }
+ }
+ return ev->Good();
+}
+
+morkTableRowCursor* morkTable::NewTableRowCursor(morkEnv* ev,
+ mork_pos inRowPos) {
+ morkTableRowCursor* outCursor = 0;
+ if (ev->Good()) {
+ nsIMdbHeap* heap = mTable_Store->mPort_Heap;
+ morkTableRowCursor* cursor = new (*heap, ev)
+ morkTableRowCursor(ev, morkUsage::kHeap, heap, this, inRowPos);
+ if (cursor) {
+ if (ev->Good())
+ outCursor = cursor;
+ else
+ cursor->CutStrongRef((nsIMdbEnv*)ev);
+ }
+ }
+ return outCursor;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkTableChange::morkTableChange(morkEnv* ev, mork_change inChange,
+ morkRow* ioRow)
+ // use this constructor for inChange == morkChange_kAdd or morkChange_kCut
+ : morkNext(),
+ mTableChange_Row(ioRow),
+ mTableChange_Pos(morkTableChange_kNone) {
+ if (ioRow) {
+ if (ioRow->IsRow()) {
+ if (inChange == morkChange_kAdd)
+ mTableChange_Pos = morkTableChange_kAdd;
+ else if (inChange == morkChange_kCut)
+ mTableChange_Pos = morkTableChange_kCut;
+ else
+ this->UnknownChangeError(ev);
+ } else
+ ioRow->NonRowTypeError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+morkTableChange::morkTableChange(morkEnv* ev, morkRow* ioRow, mork_pos inPos)
+ // use this constructor when the row is moved
+ : morkNext(), mTableChange_Row(ioRow), mTableChange_Pos(inPos) {
+ if (ioRow) {
+ if (ioRow->IsRow()) {
+ if (inPos < 0) this->NegativeMovePosError(ev);
+ } else
+ ioRow->NonRowTypeError(ev);
+ } else
+ ev->NilPointerError();
+}
+
+void morkTableChange::UnknownChangeError(morkEnv* ev) const
+// morkChange_kAdd or morkChange_kCut
+{
+ ev->NewError("mTableChange_Pos neither kAdd nor kCut");
+}
+
+void morkTableChange::NegativeMovePosError(morkEnv* ev) const
+// move must be non-neg position
+{
+ ev->NewError("negative mTableChange_Pos for row move");
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+morkTableMap::~morkTableMap() {}
+
+morkTableMap::morkTableMap(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, nsIMdbHeap* ioSlotHeap)
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ : morkBeadMap(ev, inUsage, ioHeap, ioSlotHeap)
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ : morkNodeMap(ev, inUsage, ioHeap, ioSlotHeap)
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+{
+ if (ev->Good()) mNode_Derived = morkDerived_kTableMap;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkTable.h b/comm/mailnews/db/mork/morkTable.h
new file mode 100644
index 0000000000..c0ca5ddd84
--- /dev/null
+++ b/comm/mailnews/db/mork/morkTable.h
@@ -0,0 +1,742 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKTABLE_
+#define _MORKTABLE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+#ifndef _MORKARRAY_
+# include "morkArray.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKNODEMAP_
+# include "morkNodeMap.h"
+#endif
+
+#ifndef _MORKPROBEMAP_
+# include "morkProbeMap.h"
+#endif
+
+#ifndef _MORKBEAD_
+# include "morkBead.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class nsIMdbTable;
+#define morkDerived_kTable /*i*/ 0x5462 /* ascii 'Tb' */
+
+/*| kStartRowArraySize: starting physical size of array for mTable_RowArray.
+**| We want this number very small, so that a table containing exactly one
+**| row member will not pay too significantly in space overhead. But we want
+**| a number bigger than one, so there is some space for growth.
+|*/
+#define morkTable_kStartRowArraySize 3 /* modest starting size for array */
+
+/*| kMakeRowMapThreshold: this is the number of rows in a table which causes
+**| a hash table (mTable_RowMap) to be lazily created for faster member row
+**| identification, during such operations as cuts and adds. This number must
+**| be small enough that linear searches are not bad for member counts less
+**| than this; but this number must also be large enough that creating a hash
+**| table does not increase the per-row space overhead by a big percentage.
+**| For speed, numbers on the order of ten to twenty are all fine; for space,
+**| I believe a number as small as ten will have too much space overhead.
+|*/
+#define morkTable_kMakeRowMapThreshold 17 /* when to build mTable_RowMap */
+
+#define morkTable_kStartRowMapSlotCount 13
+#define morkTable_kMaxTableGcUses 0x0FF /* max for 8-bit unsigned int */
+
+#define morkTable_kUniqueBit ((mork_u1)(1 << 0))
+#define morkTable_kVerboseBit ((mork_u1)(1 << 1))
+#define morkTable_kNotedBit ((mork_u1)(1 << 2)) /* space has change notes */
+#define morkTable_kRewriteBit ((mork_u1)(1 << 3)) /* must rewrite all rows */
+#define morkTable_kNewMetaBit ((mork_u1)(1 << 4)) /* new table meta row */
+
+class morkTable : public morkObject, public morkLink, public nsIMdbTable {
+ // NOTE the morkLink base is for morkRowSpace::mRowSpace_TablesByPriority
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // bead color setter & getter replace obsolete member mTable_Id:
+ NS_DECL_ISUPPORTS_INHERITED
+ mork_tid TableId() const { return mBead_Color; }
+ void SetTableId(mork_tid inTid) { mBead_Color = inTid; }
+
+ // we override these so we use xpcom ref-counting semantics.
+#ifndef _MSC_VER
+ // The first declaration of AddStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ virtual mork_refs AddStrongRef(nsIMdbEnv* ev) override;
+#endif
+ virtual mork_refs AddStrongRef(morkEnv* ev) override;
+#ifndef _MSC_VER
+ // The first declaration of CutStrongRef is to suppress
+ // -Werror,-Woverloaded-virtual.
+ virtual nsresult CutStrongRef(nsIMdbEnv* ev) override;
+#endif
+ virtual mork_refs CutStrongRef(morkEnv* ev) override;
+
+ public:
+ // { ===== begin nsIMdbCollection methods =====
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev,
+ mdb_seed* outSeed) override; // member change count
+ NS_IMETHOD GetCount(nsIMdbEnv* ev,
+ mdb_count* outCount) override; // member count
+
+ NS_IMETHOD GetPort(nsIMdbEnv* ev,
+ nsIMdbPort** acqPort) override; // collection container
+ // } ----- end attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD GetCursor( // make a cursor starting iter at inMemberPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inMemberPos, // zero-based ordinal pos of member in collection
+ nsIMdbCursor** acqCursor) override; // acquire new cursor instance
+ // } ----- end cursor methods -----
+
+ // { ----- begin ID methods -----
+ NS_IMETHOD GetOid(nsIMdbEnv* ev,
+ mdbOid* outOid) override; // read object identity
+ NS_IMETHOD BecomeContent(nsIMdbEnv* ev,
+ const mdbOid* inOid) override; // exchange content
+ // } ----- end ID methods -----
+
+ // { ----- begin activity dropping methods -----
+ NS_IMETHOD DropActivity( // tell collection usage no longer expected
+ nsIMdbEnv* ev) override;
+ // } ----- end activity dropping methods -----
+
+ // } ===== end nsIMdbCollection methods =====
+ NS_IMETHOD SetTablePriority(nsIMdbEnv* ev, mdb_priority inPrio) override;
+ NS_IMETHOD GetTablePriority(nsIMdbEnv* ev, mdb_priority* outPrio) override;
+
+ NS_IMETHOD GetTableBeVerbose(nsIMdbEnv* ev, mdb_bool* outBeVerbose) override;
+ NS_IMETHOD SetTableBeVerbose(nsIMdbEnv* ev, mdb_bool inBeVerbose) override;
+
+ NS_IMETHOD GetTableIsUnique(nsIMdbEnv* ev, mdb_bool* outIsUnique) override;
+
+ NS_IMETHOD GetTableKind(nsIMdbEnv* ev, mdb_kind* outTableKind) override;
+ NS_IMETHOD GetRowScope(nsIMdbEnv* ev, mdb_scope* outRowScope) override;
+
+ NS_IMETHOD GetMetaRow(
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ mdbOid* outOid, // output meta row oid, can be nil to suppress output
+ nsIMdbRow** acqRow)
+ override; // acquire table's unique singleton meta row
+ // The purpose of a meta row is to support the persistent recording of
+ // meta info about a table as cells put into the distinguished meta row.
+ // Each table has exactly one meta row, which is not considered a member
+ // of the collection of rows inside the table. The only way to tell
+ // whether a row is a meta row is by the fact that it is returned by this
+ // GetMetaRow() method from some table. Otherwise nothing distinguishes
+ // a meta row from any other row. A meta row can be used anyplace that
+ // any other row can be used, and can even be put into other tables (or
+ // the same table) as a table member, if this is useful for some reason.
+ // The first attempt to access a table's meta row using GetMetaRow() will
+ // cause the meta row to be created if it did not already exist. When the
+ // meta row is created, it will have the row oid that was previously
+ // requested for this table's meta row; or if no oid was ever explicitly
+ // specified for this meta row, then a unique oid will be generated in
+ // the row scope named "m" (so obviously MDB clients should not
+ // manually allocate any row IDs from that special meta scope namespace).
+ // The meta row oid can be specified either when the table is created, or
+ // else the first time that GetMetaRow() is called, by passing a non-nil
+ // pointer to an oid for parameter inOptionalMetaRowOid. The meta row's
+ // actual oid is returned in outOid (if this is a non-nil pointer), and
+ // it will be different from inOptionalMetaRowOid when the meta row was
+ // already given a different oid earlier.
+ // } ----- end meta attribute methods -----
+
+ // { ----- begin cursor methods -----
+ NS_IMETHOD
+ GetTableRowCursor( // make a cursor, starting iteration at inRowPos
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbTableRowCursor** acqCursor)
+ override; // acquire new cursor instance
+ // } ----- end row position methods -----
+
+ // { ----- begin row position methods -----
+ NS_IMETHOD PosToOid( // get row member for a table position
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ mdbOid* outOid) override; // row oid at the specified position
+
+ NS_IMETHOD OidToPos( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_pos* outPos) override; // zero-based ordinal position of row in table
+
+ NS_IMETHOD PosToRow( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ mdb_pos inRowPos, // zero-based ordinal position of row in table
+ nsIMdbRow** acqRow) override; // acquire row at table position inRowPos
+
+ NS_IMETHOD RowToPos( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_pos* outPos) override; // zero-based ordinal position of row in table
+ // } ----- end row position methods -----
+
+ // { ----- begin oid set methods -----
+ NS_IMETHOD AddOid( // make sure the row with inOid is a table member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid) override; // row to ensure membership in table
+
+ NS_IMETHOD HasOid( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row to find in table
+ mdb_bool* outHasOid) override; // whether inOid is a member row
+
+ NS_IMETHOD CutOid( // make sure the row with inOid is not a member
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid) override; // row to remove from table
+ // } ----- end oid set methods -----
+
+ // { ----- begin row set methods -----
+ NS_IMETHOD NewRow( // create a new row instance in table
+ nsIMdbEnv* ev, // context
+ mdbOid*
+ ioOid, // please use minus one (unbound) rowId for db-assigned IDs
+ nsIMdbRow** acqRow) override; // create new row
+
+ NS_IMETHOD AddRow( // make sure the row with inOid is a table member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow) override; // row to ensure membership in table
+
+ NS_IMETHOD HasRow( // test for the table position of a row member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row to find in table
+ mdb_bool* outHasRow) override; // whether row is a table member
+
+ NS_IMETHOD CutRow( // make sure the row with inOid is not a member
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow) override; // row to remove from table
+
+ NS_IMETHOD CutAllRows( // remove all rows from the table
+ nsIMdbEnv* ev) override; // context
+ // } ----- end row set methods -----
+
+ // { ----- begin hinting methods -----
+ NS_IMETHOD SearchColumnsHint( // advise re future expected search cols
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet)
+ override; // columns likely to be searched
+
+ NS_IMETHOD SortColumnsHint( // advise re future expected sort columns
+ nsIMdbEnv* ev, // context
+ const mdbColumnSet* inColumnSet)
+ override; // columns for likely sort requests
+
+ NS_IMETHOD StartBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* ev, // context
+ const void* inLabel) override; // intend unique address to match end call
+ // If batch starts nest by virtue of nesting calls in the stack, then
+ // the address of a local variable makes a good batch start label that
+ // can be used at batch end time, and such addresses remain unique.
+
+ NS_IMETHOD EndBatchChangeHint( // advise before many adds and cuts
+ nsIMdbEnv* ev, // context
+ const void* inLabel) override; // label matching start label
+ // Suppose a table is maintaining one or many sort orders for a table,
+ // so that every row added to the table must be inserted in each sort,
+ // and every row cut must be removed from each sort. If a db client
+ // intends to make many such changes before needing any information
+ // about the order or positions of rows inside a table, then a client
+ // might tell the table to start batch changes in order to disable
+ // sorting of rows for the interim. Presumably a table will then do
+ // a full sort of all rows at need when the batch changes end, or when
+ // a surprise request occurs for row position during batch changes.
+ // } ----- end hinting methods -----
+
+ // { ----- begin searching methods -----
+ NS_IMETHOD FindRowMatches( // search variable number of sorted cols
+ nsIMdbEnv* ev, // context
+ const mdbYarn*
+ inPrefix, // content to find as prefix in row's column cell
+ nsIMdbTableRowCursor** acqCursor) override; // set of matching rows
+
+ NS_IMETHOD GetSearchColumns( // query columns used by FindRowMatches()
+ nsIMdbEnv* ev, // context
+ mdb_count* outCount, // context
+ mdbColumnSet* outColSet)
+ override; // caller supplied space to put columns
+ // GetSearchColumns() returns the columns actually searched when the
+ // FindRowMatches() method is called. No more than mColumnSet_Count
+ // slots of mColumnSet_Columns will be written, since mColumnSet_Count
+ // indicates how many slots are present in the column array. The
+ // actual number of search column used by the table is returned in
+ // the outCount parameter; if this number exceeds mColumnSet_Count,
+ // then a caller needs a bigger array to read the entire column set.
+ // The minimum of mColumnSet_Count and outCount is the number slots
+ // in mColumnSet_Columns that were actually written by this method.
+ //
+ // Callers are expected to change this set of columns by calls to
+ // nsIMdbTable::SearchColumnsHint() or SetSearchSorting(), or both.
+ // } ----- end searching methods -----
+
+ // { ----- begin sorting methods -----
+ // sorting: note all rows are assumed sorted by row ID as a secondary
+ // sort following the primary column sort, when table rows are sorted.
+
+ NS_IMETHOD
+ CanSortColumn( // query which column is currently used for sorting
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // column to query sorting potential
+ mdb_bool* outCanSort) override; // whether the column can be sorted
+
+ NS_IMETHOD GetSorting( // view same table in particular sorting
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // requested new column for sorting table
+ nsIMdbSorting** acqSorting) override; // acquire sorting for column
+
+ NS_IMETHOD SetSearchSorting( // use this sorting in FindRowMatches()
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // often same as nsIMdbSorting::GetSortColumn()
+ nsIMdbSorting* ioSorting) override; // requested sorting for some column
+ // SetSearchSorting() attempts to inform the table that ioSorting
+ // should be used during calls to FindRowMatches() for searching
+ // the column which is actually sorted by ioSorting. This method
+ // is most useful in conjunction with nsIMdbSorting::SetCompare(),
+ // because otherwise a caller would not be able to override the
+ // comparison ordering method used during searches. Note that some
+ // database implementations might be unable to use an arbitrarily
+ // specified sort order, either due to schema or runtime interface
+ // constraints, in which case ioSorting might not actually be used.
+ // Presumably ioSorting is an instance that was returned from some
+ // earlier call to nsIMdbTable::GetSorting(). A caller can also
+ // use nsIMdbTable::SearchColumnsHint() to specify desired change
+ // in which columns are sorted and searched by FindRowMatches().
+ //
+ // A caller can pass a nil pointer for ioSorting to request that
+ // column inColumn no longer be used at all by FindRowMatches().
+ // But when ioSorting is non-nil, then inColumn should match the
+ // column actually sorted by ioSorting; when these do not agree,
+ // implementations are instructed to give precedence to the column
+ // specified by ioSorting (so this means callers might just pass
+ // zero for inColumn when ioSorting is also provided, since then
+ // inColumn is both redundant and ignored).
+ // } ----- end sorting methods -----
+
+ // { ----- begin moving methods -----
+ // moving a row does nothing unless a table is currently unsorted
+
+ NS_IMETHOD MoveOid( // change position of row in unsorted table
+ nsIMdbEnv* ev, // context
+ const mdbOid* inOid, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row inRowId
+ mdb_pos* outActualPos) override; // actual new position of row in table
+
+ NS_IMETHOD MoveRow( // change position of row in unsorted table
+ nsIMdbEnv* ev, // context
+ nsIMdbRow* ioRow, // row oid to find in table
+ mdb_pos inHintFromPos, // suggested hint regarding start position
+ mdb_pos inToPos, // desired new position for row inRowId
+ mdb_pos* outActualPos) override; // actual new position of row in table
+ // } ----- end moving methods -----
+
+ // { ----- begin index methods -----
+ NS_IMETHOD AddIndex( // create a sorting index for column if possible
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to sort by index
+ nsIMdbThumb** acqThumb)
+ override; // acquire thumb for incremental index building
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the index addition will be finished.
+
+ NS_IMETHOD CutIndex( // stop supporting a specific column index
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column with index to be removed
+ nsIMdbThumb** acqThumb)
+ override; // acquire thumb for incremental index destroy
+ // Call nsIMdbThumb::DoMore() until done, or until the thumb is broken, and
+ // then the index removal will be finished.
+
+ NS_IMETHOD HasIndex( // query for current presence of a column index
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outHasIndex)
+ override; // whether column has index for this column
+
+ NS_IMETHOD EnableIndexOnSort( // create an index for col on first sort
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) override; // the column to index if ever sorted
+
+ NS_IMETHOD QueryIndexOnSort( // check whether index on sort is enabled
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn, // the column to investigate
+ mdb_bool* outIndexOnSort)
+ override; // whether column has index-on-sort enabled
+
+ NS_IMETHOD DisableIndexOnSort( // prevent future index creation on sort
+ nsIMdbEnv* ev, // context
+ mdb_column inColumn) override; // the column to index if ever sorted
+ // } ----- end index methods -----
+
+ morkStore* mTable_Store; // non-refcnted ptr to port
+
+ // mTable_RowSpace->SpaceScope() is row scope
+ morkRowSpace* mTable_RowSpace; // non-refcnted ptr to containing space
+
+ morkRow* mTable_MetaRow; // table's actual meta row
+ mdbOid mTable_MetaRowOid; // oid for meta row
+
+ morkRowMap* mTable_RowMap; // (strong ref) hash table of all members
+ morkArray mTable_RowArray; // array of morkRow pointers
+
+ morkList mTable_ChangeList; // list of table changes
+ mork_u2 mTable_ChangesCount; // length of changes list
+ mork_u2 mTable_ChangesMax; // max list length before rewrite
+
+ // mork_tid mTable_Id;
+ mork_kind mTable_Kind;
+
+ mork_u1 mTable_Flags; // bit flags
+ mork_priority mTable_Priority; // 0..9, any other value equals 9
+ mork_u1 mTable_GcUses; // persistent references from cells
+ mork_u1 mTable_Pad; // for u4 alignment
+
+ public: // flags bit twiddling
+ void SetTableUnique() { mTable_Flags |= morkTable_kUniqueBit; }
+ void SetTableVerbose() { mTable_Flags |= morkTable_kVerboseBit; }
+ void SetTableNoted() { mTable_Flags |= morkTable_kNotedBit; }
+ void SetTableRewrite() { mTable_Flags |= morkTable_kRewriteBit; }
+ void SetTableNewMeta() { mTable_Flags |= morkTable_kNewMetaBit; }
+
+ void ClearTableUnique() { mTable_Flags &= (mork_u1)~morkTable_kUniqueBit; }
+ void ClearTableVerbose() { mTable_Flags &= (mork_u1)~morkTable_kVerboseBit; }
+ void ClearTableNoted() { mTable_Flags &= (mork_u1)~morkTable_kNotedBit; }
+ void ClearTableRewrite() { mTable_Flags &= (mork_u1)~morkTable_kRewriteBit; }
+ void ClearTableNewMeta() { mTable_Flags &= (mork_u1)~morkTable_kNewMetaBit; }
+
+ mork_bool IsTableUnique() const {
+ return (mTable_Flags & morkTable_kUniqueBit) != 0;
+ }
+
+ mork_bool IsTableVerbose() const {
+ return (mTable_Flags & morkTable_kVerboseBit) != 0;
+ }
+
+ mork_bool IsTableNoted() const {
+ return (mTable_Flags & morkTable_kNotedBit) != 0;
+ }
+
+ mork_bool IsTableRewrite() const {
+ return (mTable_Flags & morkTable_kRewriteBit) != 0;
+ }
+
+ mork_bool IsTableNewMeta() const {
+ return (mTable_Flags & morkTable_kNewMetaBit) != 0;
+ }
+
+ public
+ : // table dirty handling more complex than morkNode::SetNodeDirty() etc.
+ void SetTableDirty() { this->SetNodeDirty(); }
+ void SetTableClean(morkEnv* ev);
+
+ mork_bool IsTableClean() const { return this->IsNodeClean(); }
+ mork_bool IsTableDirty() const { return this->IsNodeDirty(); }
+
+ public: // morkNode memory management operators
+ void* operator new(size_t inSize, nsIMdbHeap& ioHeap,
+ morkEnv* ev) noexcept(true) {
+ return morkNode::MakeNew(inSize, ioHeap, ev);
+ }
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseTable() if open
+
+ public: // morkTable construction & destruction
+ morkTable(
+ morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap,
+ morkStore* ioStore, nsIMdbHeap* ioSlotHeap, morkRowSpace* ioRowSpace,
+ const mdbOid* inOptionalMetaRowOid, // can be nil to avoid specifying
+ mork_tid inTableId, mork_kind inKind, mork_bool inMustBeUnique);
+ void CloseTable(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkTable(const morkTable& other);
+ morkTable& operator=(const morkTable& other);
+ virtual ~morkTable(); // assert that close executed earlier
+
+ public: // dynamic type identification
+ mork_bool IsTable() const {
+ return IsNode() && mNode_Derived == morkDerived_kTable;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // errors
+ static void NonTableTypeError(morkEnv* ev);
+ static void NonTableTypeWarning(morkEnv* ev);
+ static void NilRowSpaceError(morkEnv* ev);
+
+ public: // warnings
+ static void TableGcUsesUnderflowWarning(morkEnv* ev);
+
+ public: // noting table changes
+ mork_bool HasChangeOverflow() const {
+ return mTable_ChangesCount >= mTable_ChangesMax;
+ }
+
+ void NoteTableSetAll(morkEnv* ev);
+ void NoteTableMoveRow(morkEnv* ev, morkRow* ioRow, mork_pos inPos);
+
+ void note_row_change(morkEnv* ev, mork_change inChange, morkRow* ioRow);
+ void note_row_move(morkEnv* ev, morkRow* ioRow, mork_pos inNewPos);
+
+ void NoteTableAddRow(morkEnv* ev, morkRow* ioRow) {
+ this->note_row_change(ev, morkChange_kAdd, ioRow);
+ }
+
+ void NoteTableCutRow(morkEnv* ev, morkRow* ioRow) {
+ this->note_row_change(ev, morkChange_kCut, ioRow);
+ }
+
+ protected: // internal row map methods
+ morkRow* find_member_row(morkEnv* ev, morkRow* ioRow);
+ void build_row_map(morkEnv* ev);
+
+ public: // other table methods
+ mork_bool MaybeDirtySpaceStoreAndTable();
+
+ morkRow* GetMetaRow(morkEnv* ev, const mdbOid* inOptionalMetaRowOid);
+
+ mork_u2 AddTableGcUse(morkEnv* ev);
+ mork_u2 CutTableGcUse(morkEnv* ev);
+
+ // void DirtyAllTableContent(morkEnv* ev);
+
+ mork_seed TableSeed() const { return mTable_RowArray.mArray_Seed; }
+
+ morkRow* SafeRowAt(morkEnv* ev, mork_pos inPos) {
+ return (morkRow*)mTable_RowArray.SafeAt(ev, inPos);
+ }
+
+ nsIMdbTable* AcquireTableHandle(morkEnv* ev); // mObject_Handle
+
+ mork_count GetRowCount() const { return mTable_RowArray.mArray_Fill; }
+
+ mork_bool IsTableUsed() const {
+ return (mTable_GcUses != 0 || this->GetRowCount() != 0);
+ }
+
+ void GetTableOid(morkEnv* ev, mdbOid* outOid);
+ mork_pos ArrayHasOid(morkEnv* ev, const mdbOid* inOid);
+ mork_bool MapHasOid(morkEnv* ev, const mdbOid* inOid);
+ mork_bool AddRow(morkEnv* ev, morkRow* ioRow); // returns ev->Good()
+ mork_bool CutRow(morkEnv* ev, morkRow* ioRow); // returns ev->Good()
+ mork_bool CutAllRows(morkEnv* ev); // returns ev->Good()
+
+ mork_pos MoveRow(
+ morkEnv* ev, morkRow* ioRow, // change row position
+ mork_pos inHintFromPos, // suggested hint regarding start position
+ mork_pos inToPos); // desired new position for row ioRow
+ // MoveRow() returns the actual position of ioRow afterwards; this
+ // position is -1 if and only if ioRow was not found as a member.
+
+ morkTableRowCursor* NewTableRowCursor(morkEnv* ev, mork_pos inRowPos);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakTable(morkTable* me, morkEnv* ev, morkTable** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongTable(morkTable* me, morkEnv* ev, morkTable** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// use negative values for kCut and kAdd, to keep non-neg move pos distinct:
+#define morkTableChange_kCut ((mork_pos)-1) /* shows row was cut */
+#define morkTableChange_kAdd ((mork_pos)-2) /* shows row was added */
+#define morkTableChange_kNone ((mork_pos)-3) /* unknown change */
+
+class morkTableChange : public morkNext {
+ public: // state is public because the entire Mork system is private
+ morkRow* mTableChange_Row; // the row in the change
+
+ mork_pos mTableChange_Pos; // kAdd, kCut, or non-neg for row move
+
+ public:
+ morkTableChange(morkEnv* ev, mork_change inChange, morkRow* ioRow);
+ // use this constructor for inChange == morkChange_kAdd or morkChange_kCut
+
+ morkTableChange(morkEnv* ev, morkRow* ioRow, mork_pos inPos);
+ // use this constructor when the row is moved
+
+ public:
+ void UnknownChangeError(
+ morkEnv* ev) const; // morkChange_kAdd or morkChange_kCut
+ void NegativeMovePosError(
+ morkEnv* ev) const; // move must be non-neg position
+
+ public:
+ mork_bool IsAddRowTableChange() const {
+ return (mTableChange_Pos == morkTableChange_kAdd);
+ }
+
+ mork_bool IsCutRowTableChange() const {
+ return (mTableChange_Pos == morkTableChange_kCut);
+ }
+
+ mork_bool IsMoveRowTableChange() const { return (mTableChange_Pos >= 0); }
+
+ public:
+ mork_pos GetMovePos() const { return mTableChange_Pos; }
+ // GetMovePos() assumes that IsMoveRowTableChange() is true.
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kTableMap /*i*/ 0x744D /* ascii 'tM' */
+
+/*| morkTableMap: maps mork_token -> morkTable
+|*/
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+class morkTableMap : public morkBeadMap {
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+class morkTableMap : public morkNodeMap { // for mapping tokens to tables
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+
+ public:
+ virtual ~morkTableMap();
+ morkTableMap(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap);
+
+ public: // other map methods
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ mork_bool AddTable(morkEnv* ev, morkTable* ioTable) {
+ return this->AddBead(ev, ioTable);
+ }
+ // the AddTable() boolean return equals ev->Good().
+
+ mork_bool CutTable(morkEnv* ev, mork_tid inTid) {
+ return this->CutBead(ev, inTid);
+ }
+ // The CutTable() boolean return indicates whether removal happened.
+
+ morkTable* GetTable(morkEnv* ev, mork_tid inTid) {
+ return (morkTable*)this->GetBead(ev, inTid);
+ }
+ // Note the returned table does NOT have an increase in refcount for this.
+
+ mork_num CutAllTables(morkEnv* ev) { return this->CutAllBeads(ev); }
+ // CutAllTables() releases all the referenced table values.
+
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ mork_bool AddTable(morkEnv* ev, morkTable* ioTable) {
+ return this->AddNode(ev, ioTable->TableId(), ioTable);
+ }
+ // the AddTable() boolean return equals ev->Good().
+
+ mork_bool CutTable(morkEnv* ev, mork_tid inTid) {
+ return this->CutNode(ev, inTid);
+ }
+ // The CutTable() boolean return indicates whether removal happened.
+
+ morkTable* GetTable(morkEnv* ev, mork_tid inTid) {
+ return (morkTable*)this->GetNode(ev, inTid);
+ }
+ // Note the returned table does NOT have an increase in refcount for this.
+
+ mork_num CutAllTables(morkEnv* ev) { return this->CutAllNodes(ev); }
+ // CutAllTables() releases all the referenced table values.
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+};
+
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+class morkTableMapIter : public morkBeadMapIter {
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+class morkTableMapIter : public morkMapIter { // typesafe wrapper class
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+
+ public:
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ morkTableMapIter(morkEnv* ev, morkTableMap* ioMap)
+ : morkBeadMapIter(ev, ioMap) {}
+
+ morkTableMapIter() : morkBeadMapIter() {}
+ void InitTableMapIter(morkEnv* ev, morkTableMap* ioMap) {
+ this->InitBeadMapIter(ev, ioMap);
+ }
+
+ morkTable* FirstTable(morkEnv* ev) { return (morkTable*)this->FirstBead(ev); }
+
+ morkTable* NextTable(morkEnv* ev) { return (morkTable*)this->NextBead(ev); }
+
+ morkTable* HereTable(morkEnv* ev) { return (morkTable*)this->HereBead(ev); }
+
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ morkTableMapIter(morkEnv* ev, morkTableMap* ioMap) : morkMapIter(ev, ioMap) {}
+
+ morkTableMapIter() : morkMapIter() {}
+ void InitTableMapIter(morkEnv* ev, morkTableMap* ioMap) {
+ this->InitMapIter(ev, ioMap);
+ }
+
+ mork_change* FirstTable(morkEnv* ev, mork_tid* outTid, morkTable** outTable) {
+ return this->First(ev, outTid, outTable);
+ }
+
+ mork_change* NextTable(morkEnv* ev, mork_tid* outTid, morkTable** outTable) {
+ return this->Next(ev, outTid, outTable);
+ }
+
+ mork_change* HereTable(morkEnv* ev, mork_tid* outTid, morkTable** outTable) {
+ return this->Here(ev, outTid, outTable);
+ }
+
+ // cutting while iterating hash map might dirty the parent table:
+ mork_change* CutHereTable(morkEnv* ev, mork_tid* outTid,
+ morkTable** outTable) {
+ return this->CutHere(ev, outTid, outTable);
+ }
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKTABLE_ */
diff --git a/comm/mailnews/db/mork/morkTableRowCursor.cpp b/comm/mailnews/db/mork/morkTableRowCursor.cpp
new file mode 100644
index 0000000000..6644d2c2b3
--- /dev/null
+++ b/comm/mailnews/db/mork/morkTableRowCursor.cpp
@@ -0,0 +1,410 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKTABLEROWCURSOR_
+# include "morkTableRowCursor.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkTableRowCursor::CloseMorkNode(
+ morkEnv* ev) // CloseTableRowCursor() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseTableRowCursor(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkTableRowCursor::~morkTableRowCursor() // CloseTableRowCursor() executed
+ // earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(this->IsShutNode());
+}
+
+/*public non-poly*/
+morkTableRowCursor::morkTableRowCursor(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkTable* ioTable,
+ mork_pos inRowPos)
+ : morkCursor(ev, inUsage, ioHeap), mTableRowCursor_Table(0) {
+ if (ev->Good()) {
+ if (ioTable) {
+ mCursor_Pos = inRowPos;
+ mCursor_Seed = ioTable->TableSeed();
+ morkTable::SlotWeakTable(ioTable, ev, &mTableRowCursor_Table);
+ if (ev->Good()) mNode_Derived = morkDerived_kTableRowCursor;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkTableRowCursor, morkCursor,
+ nsIMdbTableRowCursor)
+/*public non-poly*/ void morkTableRowCursor::CloseTableRowCursor(morkEnv* ev) {
+ if (this->IsNode()) {
+ mCursor_Pos = -1;
+ mCursor_Seed = 0;
+ morkTable::SlotWeakTable((morkTable*)0, ev, &mTableRowCursor_Table);
+ this->CloseCursor(ev);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+// { ----- begin attribute methods -----
+/*virtual*/ nsresult morkTableRowCursor::GetCount(nsIMdbEnv* mev,
+ mdb_count* outCount) {
+ nsresult outErr = NS_OK;
+ mdb_count count = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ count = GetMemberCount(ev);
+ outErr = ev->AsErr();
+ }
+ if (outCount) *outCount = count;
+ return outErr;
+}
+
+/*virtual*/ nsresult morkTableRowCursor::GetSeed(nsIMdbEnv* mev,
+ mdb_seed* outSeed) {
+ NS_ASSERTION(false, "not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+/*virtual*/ nsresult morkTableRowCursor::SetPos(nsIMdbEnv* mev, mdb_pos inPos) {
+ mCursor_Pos = inPos;
+ return NS_OK;
+}
+
+/*virtual*/ nsresult morkTableRowCursor::GetPos(nsIMdbEnv* mev,
+ mdb_pos* outPos) {
+ *outPos = mCursor_Pos;
+ return NS_OK;
+}
+
+/*virtual*/ nsresult morkTableRowCursor::SetDoFailOnSeedOutOfSync(
+ nsIMdbEnv* mev, mdb_bool inFail) {
+ mCursor_DoFailOnSeedOutOfSync = inFail;
+ return NS_OK;
+}
+
+/*virtual*/ nsresult morkTableRowCursor::GetDoFailOnSeedOutOfSync(
+ nsIMdbEnv* mev, mdb_bool* outFail) {
+ NS_ENSURE_ARG_POINTER(outFail);
+ *outFail = mCursor_DoFailOnSeedOutOfSync;
+ return NS_OK;
+}
+// } ----- end attribute methods -----
+
+// { ===== begin nsIMdbTableRowCursor methods =====
+
+// { ----- begin attribute methods -----
+
+NS_IMETHODIMP
+morkTableRowCursor::GetTable(nsIMdbEnv* mev, nsIMdbTable** acqTable) {
+ nsresult outErr = NS_OK;
+ nsIMdbTable* outTable = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (mTableRowCursor_Table)
+ outTable = mTableRowCursor_Table->AcquireTableHandle(ev);
+
+ outErr = ev->AsErr();
+ }
+ if (acqTable) *acqTable = outTable;
+ return outErr;
+}
+// } ----- end attribute methods -----
+
+// { ----- begin oid iteration methods -----
+NS_IMETHODIMP
+morkTableRowCursor::NextRowOid( // get row id of next row in the table
+ nsIMdbEnv* mev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) {
+ nsresult outErr = NS_OK;
+ mork_pos pos = -1;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (outOid) {
+ pos = NextRowOid(ev, outOid);
+ } else
+ ev->NilPointerError();
+ outErr = ev->AsErr();
+ }
+ if (outRowPos) *outRowPos = pos;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTableRowCursor::PrevRowOid( // get row id of previous row in the table
+ nsIMdbEnv* mev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) {
+ nsresult outErr = NS_OK;
+ mork_pos pos = -1;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ if (outOid) {
+ pos = PrevRowOid(ev, outOid);
+ } else
+ ev->NilPointerError();
+ outErr = ev->AsErr();
+ }
+ if (outRowPos) *outRowPos = pos;
+ return outErr;
+}
+// } ----- end oid iteration methods -----
+
+// { ----- begin row iteration methods -----
+NS_IMETHODIMP
+morkTableRowCursor::NextRow( // get row cells from table for cells already in
+ // row
+ nsIMdbEnv* mev, // context
+ nsIMdbRow** acqRow, // acquire next row in table
+ mdb_pos* outRowPos) {
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mdbOid oid; // place to put oid we intend to ignore
+ morkRow* row = NextRow(ev, &oid, outRowPos);
+ if (row) {
+ morkStore* store = row->GetRowSpaceStore(ev);
+ if (store) outRow = row->AcquireRowHandle(ev, store);
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTableRowCursor::PrevRow( // get row cells from table for cells already in
+ // row
+ nsIMdbEnv* mev, // context
+ nsIMdbRow** acqRow, // acquire previous row in table
+ mdb_pos* outRowPos) {
+ nsresult outErr = NS_OK;
+ nsIMdbRow* outRow = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mdbOid oid; // place to put oid we intend to ignore
+ morkRow* row = PrevRow(ev, &oid, outRowPos);
+ if (row) {
+ morkStore* store = row->GetRowSpaceStore(ev);
+ if (store) outRow = row->AcquireRowHandle(ev, store);
+ }
+ outErr = ev->AsErr();
+ }
+ if (acqRow) *acqRow = outRow;
+ return outErr;
+}
+
+// } ----- end row iteration methods -----
+
+// { ----- begin duplicate row removal methods -----
+NS_IMETHODIMP
+morkTableRowCursor::CanHaveDupRowMembers(
+ nsIMdbEnv* mev, // cursor might hold dups?
+ mdb_bool* outCanHaveDups) {
+ nsresult outErr = NS_OK;
+ mdb_bool canHaveDups = mdbBool_kFalse;
+
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ canHaveDups = CanHaveDupRowMembers(ev);
+ outErr = ev->AsErr();
+ }
+ if (outCanHaveDups) *outCanHaveDups = canHaveDups;
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkTableRowCursor::MakeUniqueCursor( // clone cursor, removing duplicate rows
+ nsIMdbEnv* mev, // context
+ nsIMdbTableRowCursor** acqCursor) // acquire clone with no dups
+// Note that MakeUniqueCursor() is never necessary for a cursor which was
+// created by table method nsIMdbTable::GetTableRowCursor(), because a table
+// never contains the same row as a member more than once. However, a cursor
+// created by table method nsIMdbTable::FindRowMatches() might contain the
+// same row more than once, because the same row can generate a hit by more
+// than one column with a matching string prefix. Note this method can
+// return the very same cursor instance with just an incremented refcount,
+// when the original cursor could not contain any duplicate rows (calling
+// CanHaveDupRowMembers() shows this case on a false return). Otherwise
+// this method returns a different cursor instance. Callers should not use
+// this MakeUniqueCursor() method lightly, because it tends to defeat the
+// purpose of lazy programming techniques, since it can force creation of
+// an explicit row collection in a new cursor's representation, in order to
+// inspect the row membership and remove any duplicates; this can have big
+// impact if a collection holds tens of thousands of rows or more, when
+// the original cursor with dups simply referenced rows indirectly by row
+// position ranges, without using an explicit row set representation.
+// Callers are encouraged to use nsIMdbCursor::GetCount() to determine
+// whether the row collection is very large (tens of thousands), and to
+// delay calling MakeUniqueCursor() when possible, until a user interface
+// element actually demands the creation of an explicit set representation.
+{
+ nsresult outErr = NS_OK;
+ nsIMdbTableRowCursor* outCursor = 0;
+
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ AddRef();
+ outCursor = this;
+
+ outErr = ev->AsErr();
+ }
+ if (acqCursor) *acqCursor = outCursor;
+ return outErr;
+}
+// } ----- end duplicate row removal methods -----
+
+// } ===== end nsIMdbTableRowCursor methods =====
+
+/*static*/ void morkTableRowCursor::NonTableRowCursorTypeError(morkEnv* ev) {
+ ev->NewError("non morkTableRowCursor");
+}
+
+mdb_pos morkTableRowCursor::NextRowOid(morkEnv* ev, mdbOid* outOid) {
+ mdb_pos outPos = -1;
+ (void)this->NextRow(ev, outOid, &outPos);
+ return outPos;
+}
+
+mdb_pos morkTableRowCursor::PrevRowOid(morkEnv* ev, mdbOid* outOid) {
+ mdb_pos outPos = -1;
+ (void)this->PrevRow(ev, outOid, &outPos);
+ return outPos;
+}
+
+mork_bool morkTableRowCursor::CanHaveDupRowMembers(morkEnv* ev) {
+ return morkBool_kFalse; // false default is correct
+}
+
+mork_count morkTableRowCursor::GetMemberCount(morkEnv* ev) {
+ morkTable* table = mTableRowCursor_Table;
+ if (table)
+ return table->mTable_RowArray.mArray_Fill;
+ else
+ return 0;
+}
+
+morkRow* morkTableRowCursor::PrevRow(morkEnv* ev, mdbOid* outOid,
+ mdb_pos* outPos) {
+ morkRow* outRow = 0;
+ mork_pos pos = -1;
+
+ morkTable* table = mTableRowCursor_Table;
+ if (table) {
+ if (table->IsOpenNode()) {
+ morkArray* array = &table->mTable_RowArray;
+ pos = mCursor_Pos - 1;
+
+ if (pos >= 0 && pos < (mork_pos)(array->mArray_Fill)) {
+ mCursor_Pos = pos; // update for next time
+ morkRow* row = (morkRow*)array->At(pos);
+ if (row) {
+ if (row->IsRow()) {
+ outRow = row;
+ *outOid = row->mRow_Oid;
+ } else
+ row->NonRowTypeError(ev);
+ } else
+ ev->NilPointerError();
+ } else {
+ outOid->mOid_Scope = 0;
+ outOid->mOid_Id = morkId_kMinusOne;
+ }
+ } else
+ table->NonOpenNodeError(ev);
+ } else
+ ev->NilPointerError();
+
+ *outPos = pos;
+ return outRow;
+}
+
+morkRow* morkTableRowCursor::NextRow(morkEnv* ev, mdbOid* outOid,
+ mdb_pos* outPos) {
+ morkRow* outRow = 0;
+ mork_pos pos = -1;
+
+ morkTable* table = mTableRowCursor_Table;
+ if (table) {
+ if (table->IsOpenNode()) {
+ morkArray* array = &table->mTable_RowArray;
+ pos = mCursor_Pos;
+ if (pos < 0)
+ pos = 0;
+ else
+ ++pos;
+
+ if (pos < (mork_pos)(array->mArray_Fill)) {
+ mCursor_Pos = pos; // update for next time
+ morkRow* row = (morkRow*)array->At(pos);
+ if (row) {
+ if (row->IsRow()) {
+ outRow = row;
+ *outOid = row->mRow_Oid;
+ } else
+ row->NonRowTypeError(ev);
+ } else
+ ev->NilPointerError();
+ } else {
+ outOid->mOid_Scope = 0;
+ outOid->mOid_Id = morkId_kMinusOne;
+ }
+ } else
+ table->NonOpenNodeError(ev);
+ } else
+ ev->NilPointerError();
+
+ *outPos = pos;
+ return outRow;
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkTableRowCursor.h b/comm/mailnews/db/mork/morkTableRowCursor.h
new file mode 100644
index 0000000000..9801eb174b
--- /dev/null
+++ b/comm/mailnews/db/mork/morkTableRowCursor.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKTABLEROWCURSOR_
+#define _MORKTABLEROWCURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class orkinTableRowCursor;
+#define morkDerived_kTableRowCursor /*i*/ 0x7243 /* ascii 'rC' */
+
+class morkTableRowCursor : public morkCursor,
+ public nsIMdbTableRowCursor { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkFactory* mObject_Factory; // weak ref to suite factory
+
+ // mork_seed mCursor_Seed;
+ // mork_pos mCursor_Pos;
+ // mork_bool mCursor_DoFailOnSeedOutOfSync;
+ // mork_u1 mCursor_Pad[ 3 ]; // explicitly pad to u4 alignment
+
+ public: // state is public because the entire Mork system is private
+ morkTable* mTableRowCursor_Table; // weak ref to table
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseTableRowCursor()
+
+ protected:
+ virtual ~morkTableRowCursor(); // assert that close executed earlier
+
+ public: // morkTableRowCursor construction & destruction
+ morkTableRowCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkTable* ioTable, mork_pos inRowPos);
+ void CloseTableRowCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkTableRowCursor(const morkTableRowCursor& other);
+ morkTableRowCursor& operator=(const morkTableRowCursor& other);
+
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+
+ // { ----- begin attribute methods -----
+ NS_IMETHOD GetCount(nsIMdbEnv* ev, mdb_count* outCount) override; // readonly
+ NS_IMETHOD GetSeed(nsIMdbEnv* ev, mdb_seed* outSeed) override; // readonly
+
+ NS_IMETHOD SetPos(nsIMdbEnv* ev, mdb_pos inPos) override; // mutable
+ NS_IMETHOD GetPos(nsIMdbEnv* ev, mdb_pos* outPos) override;
+
+ NS_IMETHOD SetDoFailOnSeedOutOfSync(nsIMdbEnv* ev, mdb_bool inFail) override;
+ NS_IMETHOD GetDoFailOnSeedOutOfSync(nsIMdbEnv* ev,
+ mdb_bool* outFail) override;
+
+ // } ----- end attribute methods -----
+ NS_IMETHOD GetTable(nsIMdbEnv* ev, nsIMdbTable** acqTable) override;
+ // } ----- end attribute methods -----
+
+ // { ----- begin duplicate row removal methods -----
+ NS_IMETHOD CanHaveDupRowMembers(nsIMdbEnv* ev, // cursor might hold dups?
+ mdb_bool* outCanHaveDups) override;
+
+ NS_IMETHOD MakeUniqueCursor( // clone cursor, removing duplicate rows
+ nsIMdbEnv* ev, // context
+ nsIMdbTableRowCursor** acqCursor) override; // acquire clone with no dups
+ // } ----- end duplicate row removal methods -----
+
+ // { ----- begin oid iteration methods -----
+ NS_IMETHOD NextRowOid( // get row id of next row in the table
+ nsIMdbEnv* ev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) override; // zero-based position of the row in table
+ NS_IMETHOD PrevRowOid( // get row id of previous row in the table
+ nsIMdbEnv* ev, // context
+ mdbOid* outOid, // out row oid
+ mdb_pos* outRowPos) override; // zero-based position of the row in table
+ // } ----- end oid iteration methods -----
+
+ // { ----- begin row iteration methods -----
+ NS_IMETHOD NextRow( // get row cells from table for cells already in row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // acquire next row in table
+ mdb_pos* outRowPos) override; // zero-based position of the row in table
+ NS_IMETHOD PrevRow( // get row cells from table for cells already in row
+ nsIMdbEnv* ev, // context
+ nsIMdbRow** acqRow, // acquire previous row in table
+ mdb_pos* outRowPos) override; // zero-based position of the row in table
+
+ // } ----- end row iteration methods -----
+
+ public: // dynamic type identification
+ mork_bool IsTableRowCursor() const {
+ return IsNode() && mNode_Derived == morkDerived_kTableRowCursor;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonTableRowCursorTypeError(morkEnv* ev);
+
+ public: // oid only iteration
+ mdb_pos NextRowOid(morkEnv* ev, mdbOid* outOid);
+ mdb_pos PrevRowOid(morkEnv* ev, mdbOid* outOid);
+
+ public: // other table row cursor methods
+ virtual mork_bool CanHaveDupRowMembers(morkEnv* ev);
+ virtual mork_count GetMemberCount(morkEnv* ev);
+
+ virtual morkRow* NextRow(morkEnv* ev, mdbOid* outOid, mdb_pos* outPos);
+ virtual morkRow* PrevRow(morkEnv* ev, mdbOid* outOid, mdb_pos* outPos);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakTableRowCursor(morkTableRowCursor* me, morkEnv* ev,
+ morkTableRowCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongTableRowCursor(morkTableRowCursor* me, morkEnv* ev,
+ morkTableRowCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKTABLEROWCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkThumb.cpp b/comm/mailnews/db/mork/morkThumb.cpp
new file mode 100644
index 0000000000..3076ca6f3e
--- /dev/null
+++ b/comm/mailnews/db/mork/morkThumb.cpp
@@ -0,0 +1,455 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKTHUMB_
+# include "morkThumb.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+// #ifndef _MORKFILE_
+// #include "morkFile.h"
+// #endif
+
+#ifndef _MORKWRITER_
+# include "morkWriter.h"
+#endif
+
+#ifndef _MORKPARSER_
+# include "morkParser.h"
+#endif
+
+#ifndef _MORKBUILDER_
+# include "morkBuilder.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkThumb::CloseMorkNode(
+ morkEnv* ev) // CloseThumb() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseThumb(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkThumb::~morkThumb() // assert CloseThumb() executed earlier
+{
+ CloseMorkNode(mMorkEnv);
+ MORK_ASSERT(mThumb_Magic == 0);
+ MORK_ASSERT(mThumb_Store == 0);
+ MORK_ASSERT(mThumb_File == 0);
+}
+
+/*public non-poly*/
+morkThumb::morkThumb(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_magic inMagic)
+ : morkObject(ev, inUsage, ioHeap, morkColor_kNone, (morkHandle*)0),
+ mThumb_Magic(0),
+ mThumb_Total(0),
+ mThumb_Current(0)
+
+ ,
+ mThumb_Done(morkBool_kFalse),
+ mThumb_Broken(morkBool_kFalse),
+ mThumb_Seed(0)
+
+ ,
+ mThumb_Store(0),
+ mThumb_File(0),
+ mThumb_Writer(0),
+ mThumb_Builder(0),
+ mThumb_SourcePort(0)
+
+ ,
+ mThumb_DoCollect(morkBool_kFalse) {
+ if (ev->Good()) {
+ if (ioSlotHeap) {
+ mThumb_Magic = inMagic;
+ mNode_Derived = morkDerived_kThumb;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(morkThumb, morkObject, nsIMdbThumb)
+
+/*public non-poly*/ void morkThumb::CloseThumb(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ mThumb_Magic = 0;
+ if (mThumb_Builder && mThumb_Store) mThumb_Store->ForgetBuilder(ev);
+ morkBuilder::SlotStrongBuilder((morkBuilder*)0, ev, &mThumb_Builder);
+
+ morkWriter::SlotStrongWriter((morkWriter*)0, ev, &mThumb_Writer);
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mThumb_File);
+ morkStore::SlotStrongStore((morkStore*)0, ev, &mThumb_Store);
+ morkStore::SlotStrongPort((morkPort*)0, ev, &mThumb_SourcePort);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+// { ===== begin nsIMdbThumb methods =====
+NS_IMETHODIMP
+morkThumb::GetProgress(nsIMdbEnv* mev, mdb_count* outTotal,
+ mdb_count* outCurrent, mdb_bool* outDone,
+ mdb_bool* outBroken) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ GetProgress(ev, outTotal, outCurrent, outDone, outBroken);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkThumb::DoMore(nsIMdbEnv* mev, mdb_count* outTotal, mdb_count* outCurrent,
+ mdb_bool* outDone, mdb_bool* outBroken) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ DoMore(ev, outTotal, outCurrent, outDone, outBroken);
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+
+NS_IMETHODIMP
+morkThumb::CancelAndBreakThumb(nsIMdbEnv* mev) {
+ nsresult outErr = NS_OK;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ mThumb_Done = morkBool_kTrue;
+ mThumb_Broken = morkBool_kTrue;
+ CloseMorkNode(ev); // should I close this here?
+ outErr = ev->AsErr();
+ }
+ return outErr;
+}
+// } ===== end nsIMdbThumb methods =====
+
+/*static*/ void morkThumb::NonThumbTypeError(morkEnv* ev) {
+ ev->NewError("non morkThumb");
+}
+
+/*static*/ void morkThumb::UnsupportedThumbMagicError(morkEnv* ev) {
+ ev->NewError("unsupported mThumb_Magic");
+}
+
+/*static*/ void morkThumb::NilThumbStoreError(morkEnv* ev) {
+ ev->NewError("nil mThumb_Store");
+}
+
+/*static*/ void morkThumb::NilThumbFileError(morkEnv* ev) {
+ ev->NewError("nil mThumb_File");
+}
+
+/*static*/ void morkThumb::NilThumbWriterError(morkEnv* ev) {
+ ev->NewError("nil mThumb_Writer");
+}
+
+/*static*/ void morkThumb::NilThumbBuilderError(morkEnv* ev) {
+ ev->NewError("nil mThumb_Builder");
+}
+
+/*static*/ void morkThumb::NilThumbSourcePortError(morkEnv* ev) {
+ ev->NewError("nil mThumb_SourcePort");
+}
+
+/*static*/ morkThumb* morkThumb::Make_OpenFileStore(morkEnv* ev,
+ nsIMdbHeap* ioHeap,
+ morkStore* ioStore) {
+ morkThumb* outThumb = 0;
+ if (ioHeap && ioStore) {
+ nsIMdbFile* file = ioStore->mStore_File;
+ if (file) {
+ mork_pos fileEof = 0;
+ file->Eof(ev->AsMdbEnv(), &fileEof);
+ if (ev->Good()) {
+ outThumb =
+ new (*ioHeap, ev) morkThumb(ev, morkUsage::kHeap, ioHeap, ioHeap,
+ morkThumb_kMagic_OpenFileStore);
+
+ if (outThumb) {
+ morkBuilder* builder = ioStore->LazyGetBuilder(ev);
+ if (builder) {
+ outThumb->mThumb_Total = (mork_count)fileEof;
+ morkStore::SlotStrongStore(ioStore, ev, &outThumb->mThumb_Store);
+ morkBuilder::SlotStrongBuilder(builder, ev,
+ &outThumb->mThumb_Builder);
+ }
+ }
+ }
+ } else
+ ioStore->NilStoreFileError(ev);
+ } else
+ ev->NilPointerError();
+
+ return outThumb;
+}
+
+/*static*/ morkThumb* morkThumb::Make_LargeCommit(morkEnv* ev,
+ nsIMdbHeap* ioHeap,
+ morkStore* ioStore) {
+ morkThumb* outThumb = 0;
+ if (ioHeap && ioStore) {
+ nsIMdbFile* file = ioStore->mStore_File;
+ if (file) {
+ outThumb = new (*ioHeap, ev) morkThumb(
+ ev, morkUsage::kHeap, ioHeap, ioHeap, morkThumb_kMagic_LargeCommit);
+
+ if (outThumb) {
+ morkWriter* writer = new (*ioHeap, ev)
+ morkWriter(ev, morkUsage::kHeap, ioHeap, ioStore, file, ioHeap);
+ if (writer) {
+ writer->mWriter_CommitGroupIdentity =
+ ++ioStore->mStore_CommitGroupIdentity;
+ writer->mWriter_NeedDirtyAll = morkBool_kFalse;
+ outThumb->mThumb_DoCollect = morkBool_kFalse;
+ morkStore::SlotStrongStore(ioStore, ev, &outThumb->mThumb_Store);
+
+ nsIMdbFile_SlotStrongFile(file, ev, &outThumb->mThumb_File);
+
+ outThumb->mThumb_Writer = writer; // pass writer ownership to thumb
+ }
+ }
+ } else
+ ioStore->NilStoreFileError(ev);
+ } else
+ ev->NilPointerError();
+
+ return outThumb;
+}
+
+/*static*/ morkThumb* morkThumb::Make_CompressCommit(morkEnv* ev,
+ nsIMdbHeap* ioHeap,
+ morkStore* ioStore,
+ mork_bool inDoCollect) {
+ morkThumb* outThumb = 0;
+ if (ioHeap && ioStore) {
+ nsIMdbFile* file = ioStore->mStore_File;
+ if (file) {
+ outThumb =
+ new (*ioHeap, ev) morkThumb(ev, morkUsage::kHeap, ioHeap, ioHeap,
+ morkThumb_kMagic_CompressCommit);
+
+ if (outThumb) {
+ morkWriter* writer = new (*ioHeap, ev)
+ morkWriter(ev, morkUsage::kHeap, ioHeap, ioStore, file, ioHeap);
+ if (writer) {
+ writer->mWriter_NeedDirtyAll = morkBool_kTrue;
+ outThumb->mThumb_DoCollect = inDoCollect;
+ morkStore::SlotStrongStore(ioStore, ev, &outThumb->mThumb_Store);
+ nsIMdbFile_SlotStrongFile(file, ev, &outThumb->mThumb_File);
+ outThumb->mThumb_Writer = writer; // pass writer ownership to thumb
+
+ // cope with fact that parsed transaction groups are going away:
+ ioStore->mStore_FirstCommitGroupPos = 0;
+ ioStore->mStore_SecondCommitGroupPos = 0;
+ }
+ }
+ } else
+ ioStore->NilStoreFileError(ev);
+ } else
+ ev->NilPointerError();
+
+ return outThumb;
+}
+
+// { ===== begin non-poly methods imitating nsIMdbThumb =====
+void morkThumb::GetProgress(morkEnv* ev, mdb_count* outTotal,
+ mdb_count* outCurrent, mdb_bool* outDone,
+ mdb_bool* outBroken) {
+ MORK_USED_1(ev);
+ if (outTotal) *outTotal = mThumb_Total;
+ if (outCurrent) *outCurrent = mThumb_Current;
+ if (outDone) *outDone = mThumb_Done;
+ if (outBroken) *outBroken = mThumb_Broken;
+}
+
+void morkThumb::DoMore(morkEnv* ev, mdb_count* outTotal, mdb_count* outCurrent,
+ mdb_bool* outDone, mdb_bool* outBroken) {
+ if (!mThumb_Done && !mThumb_Broken) {
+ switch (mThumb_Magic) {
+ case morkThumb_kMagic_OpenFilePort: // 1 /* factory method */
+ this->DoMore_OpenFilePort(ev);
+ break;
+
+ case morkThumb_kMagic_OpenFileStore: // 2 /* factory method */
+ this->DoMore_OpenFileStore(ev);
+ break;
+
+ case morkThumb_kMagic_ExportToFormat: // 3 /* port method */
+ this->DoMore_ExportToFormat(ev);
+ break;
+
+ case morkThumb_kMagic_ImportContent: // 4 /* store method */
+ this->DoMore_ImportContent(ev);
+ break;
+
+ case morkThumb_kMagic_LargeCommit: // 5 /* store method */
+ this->DoMore_LargeCommit(ev);
+ break;
+
+ case morkThumb_kMagic_SessionCommit: // 6 /* store method */
+ this->DoMore_SessionCommit(ev);
+ break;
+
+ case morkThumb_kMagic_CompressCommit: // 7 /* store method */
+ this->DoMore_CompressCommit(ev);
+ break;
+
+ case morkThumb_kMagic_SearchManyColumns: // 8 /* table method */
+ this->DoMore_SearchManyColumns(ev);
+ break;
+
+ case morkThumb_kMagic_NewSortColumn: // 9 /* table metho) */
+ this->DoMore_NewSortColumn(ev);
+ break;
+
+ case morkThumb_kMagic_NewSortColumnWithCompare: // 10 /* table method */
+ this->DoMore_NewSortColumnWithCompare(ev);
+ break;
+
+ case morkThumb_kMagic_CloneSortColumn: // 11 /* table method */
+ this->DoMore_CloneSortColumn(ev);
+ break;
+
+ case morkThumb_kMagic_AddIndex: // 12 /* table method */
+ this->DoMore_AddIndex(ev);
+ break;
+
+ case morkThumb_kMagic_CutIndex: // 13 /* table method */
+ this->DoMore_CutIndex(ev);
+ break;
+
+ default:
+ this->UnsupportedThumbMagicError(ev);
+ break;
+ }
+ }
+ if (outTotal) *outTotal = mThumb_Total;
+ if (outCurrent) *outCurrent = mThumb_Current;
+ if (outDone) *outDone = mThumb_Done;
+ if (outBroken) *outBroken = mThumb_Broken;
+}
+
+void morkThumb::CancelAndBreakThumb(morkEnv* ev) {
+ MORK_USED_1(ev);
+ mThumb_Broken = morkBool_kTrue;
+}
+
+// } ===== end non-poly methods imitating nsIMdbThumb =====
+
+morkStore* morkThumb::ThumbToOpenStore(morkEnv* ev)
+// for orkinFactory::ThumbToOpenStore() after OpenFileStore()
+{
+ MORK_USED_1(ev);
+ return mThumb_Store;
+}
+
+void morkThumb::DoMore_OpenFilePort(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_OpenFileStore(morkEnv* ev) {
+ morkBuilder* builder = mThumb_Builder;
+ if (builder) {
+ mork_pos pos = 0;
+ builder->ParseMore(ev, &pos, &mThumb_Done, &mThumb_Broken);
+ // mThumb_Total = builder->mBuilder_TotalCount;
+ // mThumb_Current = builder->mBuilder_DoneCount;
+ mThumb_Current = (mork_count)pos;
+ } else {
+ this->NilThumbBuilderError(ev);
+ mThumb_Broken = morkBool_kTrue;
+ mThumb_Done = morkBool_kTrue;
+ }
+}
+
+void morkThumb::DoMore_ExportToFormat(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_ImportContent(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_LargeCommit(morkEnv* ev) { this->DoMore_Commit(ev); }
+
+void morkThumb::DoMore_SessionCommit(morkEnv* ev) { this->DoMore_Commit(ev); }
+
+void morkThumb::DoMore_Commit(morkEnv* ev) {
+ morkWriter* writer = mThumb_Writer;
+ if (writer) {
+ writer->WriteMore(ev);
+ mThumb_Total = writer->mWriter_TotalCount;
+ mThumb_Current = writer->mWriter_DoneCount;
+ mThumb_Done = (ev->Bad() || writer->IsWritingDone());
+ mThumb_Broken = ev->Bad();
+ } else {
+ this->NilThumbWriterError(ev);
+ mThumb_Broken = morkBool_kTrue;
+ mThumb_Done = morkBool_kTrue;
+ }
+}
+
+void morkThumb::DoMore_CompressCommit(morkEnv* ev) { this->DoMore_Commit(ev); }
+
+void morkThumb::DoMore_SearchManyColumns(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_NewSortColumn(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_NewSortColumnWithCompare(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_CloneSortColumn(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_AddIndex(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+void morkThumb::DoMore_CutIndex(morkEnv* ev) {
+ this->UnsupportedThumbMagicError(ev);
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkThumb.h b/comm/mailnews/db/mork/morkThumb.h
new file mode 100644
index 0000000000..0e4f9f4592
--- /dev/null
+++ b/comm/mailnews/db/mork/morkThumb.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKTHUMB_
+#define _MORKTHUMB_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKOBJECT_
+# include "morkObject.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkThumb_kMagic_OpenFilePort 1 /* factory method */
+#define morkThumb_kMagic_OpenFileStore 2 /* factory method */
+#define morkThumb_kMagic_ExportToFormat 3 /* port method */
+#define morkThumb_kMagic_ImportContent 4 /* store method */
+#define morkThumb_kMagic_LargeCommit 5 /* store method */
+#define morkThumb_kMagic_SessionCommit 6 /* store method */
+#define morkThumb_kMagic_CompressCommit 7 /* store method */
+#define morkThumb_kMagic_SearchManyColumns 8 /* table method */
+#define morkThumb_kMagic_NewSortColumn 9 /* table metho) */
+#define morkThumb_kMagic_NewSortColumnWithCompare 10 /* table method */
+#define morkThumb_kMagic_CloneSortColumn 11 /* table method */
+#define morkThumb_kMagic_AddIndex 12 /* table method */
+#define morkThumb_kMagic_CutIndex 13 /* table method */
+
+#define morkDerived_kThumb /*i*/ 0x5468 /* ascii 'Th' */
+
+/*| morkThumb:
+|*/
+class morkThumb : public morkObject, public nsIMdbThumb {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // mork_color mBead_Color; // ID for this bead
+ // morkHandle* mObject_Handle; // weak ref to handle for this object
+
+ public: // state is public because the entire Mork system is private
+ NS_DECL_ISUPPORTS_INHERITED
+
+ // { ===== begin nsIMdbThumb methods =====
+ NS_IMETHOD GetProgress(nsIMdbEnv* ev, mdb_count* outTotal,
+ mdb_count* outCurrent, mdb_bool* outDone,
+ mdb_bool* outBroken) override;
+
+ NS_IMETHOD DoMore(nsIMdbEnv* ev, mdb_count* outTotal, mdb_count* outCurrent,
+ mdb_bool* outDone, mdb_bool* outBroken) override;
+
+ NS_IMETHOD CancelAndBreakThumb(nsIMdbEnv* ev) override;
+ // } ===== end nsIMdbThumb methods =====
+
+ // might as well include all the return values here:
+
+ mork_magic mThumb_Magic; // magic sig different in each thumb type
+ mork_count mThumb_Total;
+ mork_count mThumb_Current;
+
+ mork_bool mThumb_Done;
+ mork_bool mThumb_Broken;
+ mork_u2 mThumb_Seed; // optional seed for u4 alignment padding
+
+ morkStore* mThumb_Store; // weak ref to created store
+ nsIMdbFile* mThumb_File; // strong ref to file (store, import, export)
+ morkWriter* mThumb_Writer; // strong ref to writer (for commit)
+ morkBuilder* mThumb_Builder; // strong ref to builder (for store open)
+ morkPort* mThumb_SourcePort; // strong ref to port for import
+
+ mork_bool mThumb_DoCollect; // influence whether a collect happens
+ mork_bool mThumb_Pad[3]; // padding for u4 alignment
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(
+ morkEnv* ev) override; // CloseThumb() only if open
+
+ public: // morkThumb construction & destruction
+ morkThumb(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ nsIMdbHeap* ioSlotHeap, mork_magic inMagic);
+ void CloseThumb(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkThumb(const morkThumb& other);
+ morkThumb& operator=(const morkThumb& other);
+ virtual ~morkThumb(); // assert that CloseThumb() executed earlier
+
+ public: // dynamic type identification
+ mork_bool IsThumb() const {
+ return IsNode() && mNode_Derived == morkDerived_kThumb;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonThumbTypeError(morkEnv* ev);
+ static void UnsupportedThumbMagicError(morkEnv* ev);
+
+ static void NilThumbStoreError(morkEnv* ev);
+ static void NilThumbFileError(morkEnv* ev);
+ static void NilThumbWriterError(morkEnv* ev);
+ static void NilThumbBuilderError(morkEnv* ev);
+ static void NilThumbSourcePortError(morkEnv* ev);
+
+ public: // 'do more' methods
+ void DoMore_OpenFilePort(morkEnv* ev);
+ void DoMore_OpenFileStore(morkEnv* ev);
+ void DoMore_ExportToFormat(morkEnv* ev);
+ void DoMore_ImportContent(morkEnv* ev);
+ void DoMore_LargeCommit(morkEnv* ev);
+ void DoMore_SessionCommit(morkEnv* ev);
+ void DoMore_CompressCommit(morkEnv* ev);
+ void DoMore_Commit(morkEnv* ev);
+ void DoMore_SearchManyColumns(morkEnv* ev);
+ void DoMore_NewSortColumn(morkEnv* ev);
+ void DoMore_NewSortColumnWithCompare(morkEnv* ev);
+ void DoMore_CloneSortColumn(morkEnv* ev);
+ void DoMore_AddIndex(morkEnv* ev);
+ void DoMore_CutIndex(morkEnv* ev);
+
+ public: // other thumb methods
+ morkStore* ThumbToOpenStore(morkEnv* ev);
+ // for orkinFactory::ThumbToOpenStore() after OpenFileStore()
+
+ public: // assorted thumb constructors
+ static morkThumb* Make_OpenFileStore(morkEnv* ev, nsIMdbHeap* ioHeap,
+ morkStore* ioStore);
+
+ static morkThumb* Make_CompressCommit(morkEnv* ev, nsIMdbHeap* ioHeap,
+ morkStore* ioStore,
+ mork_bool inDoCollect);
+
+ static morkThumb* Make_LargeCommit(morkEnv* ev, nsIMdbHeap* ioHeap,
+ morkStore* ioStore);
+
+ // { ===== begin non-poly methods imitating nsIMdbThumb =====
+ void GetProgress(morkEnv* ev, mdb_count* outTotal, mdb_count* outCurrent,
+ mdb_bool* outDone, mdb_bool* outBroken);
+
+ void DoMore(morkEnv* ev, mdb_count* outTotal, mdb_count* outCurrent,
+ mdb_bool* outDone, mdb_bool* outBroken);
+
+ void CancelAndBreakThumb(morkEnv* ev);
+ // } ===== end non-poly methods imitating nsIMdbThumb =====
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakThumb(morkThumb* me, morkEnv* ev, morkThumb** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongThumb(morkThumb* me, morkEnv* ev, morkThumb** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKTHUMB_ */
diff --git a/comm/mailnews/db/mork/morkUniqRowCursor.h b/comm/mailnews/db/mork/morkUniqRowCursor.h
new file mode 100644
index 0000000000..4099f19996
--- /dev/null
+++ b/comm/mailnews/db/mork/morkUniqRowCursor.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKUNIQROWCURSOR_
+#define _MORKUNIQROWCURSOR_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKCURSOR_
+# include "morkCursor.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+class orkinTableRowCursor;
+// #define morkDerived_kUniqRowCursor /*i*/ 0x7352 /* ascii 'sR' */
+
+class morkUniqRowCursor : public morkTableRowCursor { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ // morkFactory* mObject_Factory; // weak ref to suite factory
+
+ // mork_seed mCursor_Seed;
+ // mork_pos mCursor_Pos;
+ // mork_bool mCursor_DoFailOnSeedOutOfSync;
+ // mork_u1 mCursor_Pad[ 3 ]; // explicitly pad to u4 alignment
+
+ // morkTable* mTableRowCursor_Table; // weak ref to table
+
+ // { ===== begin morkNode interface =====
+ public:
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseUniqRowCursor()
+ virtual ~morkUniqRowCursor(); // assert that close executed earlier
+
+ public: // morkUniqRowCursor construction & destruction
+ morkUniqRowCursor(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkTable* ioTable, mork_pos inRowPos);
+ void CloseUniqRowCursor(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkUniqRowCursor(const morkUniqRowCursor& other);
+ morkUniqRowCursor& operator=(const morkUniqRowCursor& other);
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonUniqRowCursorTypeError(morkEnv* ev);
+
+ public: // other search row cursor methods
+ virtual mork_bool CanHaveDupRowMembers(morkEnv* ev);
+ virtual mork_count GetMemberCount(morkEnv* ev);
+
+ virtual orkinTableRowCursor* AcquireUniqueRowCursorHandle(morkEnv* ev);
+
+ // virtual mdb_pos NextRowOid(morkEnv* ev, mdbOid* outOid);
+ virtual morkRow* NextRow(morkEnv* ev, mdbOid* outOid, mdb_pos* outPos);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakUniqRowCursor(morkUniqRowCursor* me, morkEnv* ev,
+ morkUniqRowCursor** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongUniqRowCursor(morkUniqRowCursor* me, morkEnv* ev,
+ morkUniqRowCursor** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKUNIQROWCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkWriter.cpp b/comm/mailnews/db/mork/morkWriter.cpp
new file mode 100644
index 0000000000..dc1bb1a1ed
--- /dev/null
+++ b/comm/mailnews/db/mork/morkWriter.cpp
@@ -0,0 +1,1936 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKBLOB_
+# include "morkBlob.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKARRAY_
+# include "morkWriter.h"
+#endif
+
+// #ifndef _MORKFILE_
+// #include "morkFile.h"
+// #endif
+
+#ifndef _MORKSTREAM_
+# include "morkStream.h"
+#endif
+
+#ifndef _MORKSTORE_
+# include "morkStore.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKATOMMAP_
+# include "morkAtomMap.h"
+#endif
+
+#ifndef _MORKROW_
+# include "morkRow.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKCELL_
+# include "morkCell.h"
+#endif
+
+#ifndef _MORKATOM_
+# include "morkAtom.h"
+#endif
+
+#ifndef _MORKCH_
+# include "morkCh.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkWriter::CloseMorkNode(
+ morkEnv* ev) // CloseTable() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseWriter(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkWriter::~morkWriter() // assert CloseTable() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+ MORK_ASSERT(mWriter_Store == 0);
+}
+
+/*public non-poly*/
+morkWriter::morkWriter(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioHeap, morkStore* ioStore,
+ nsIMdbFile* ioFile, nsIMdbHeap* ioSlotHeap)
+ : morkNode(ev, inUsage, ioHeap),
+ mWriter_Store(0),
+ mWriter_File(0),
+ mWriter_Bud(0),
+ mWriter_Stream(0),
+ mWriter_SlotHeap(0)
+
+ ,
+ mWriter_CommitGroupIdentity(0) // see mStore_CommitGroupIdentity
+ ,
+ mWriter_GroupBufFill(0)
+
+ ,
+ mWriter_TotalCount(morkWriter_kCountNumberOfPhases),
+ mWriter_DoneCount(0)
+
+ ,
+ mWriter_LineSize(0),
+ mWriter_MaxIndent(morkWriter_kMaxIndent),
+ mWriter_MaxLine(morkWriter_kMaxLine)
+
+ ,
+ mWriter_TableForm(0),
+ mWriter_TableAtomScope('v'),
+ mWriter_TableRowScope(0),
+ mWriter_TableKind(0)
+
+ ,
+ mWriter_RowForm(0),
+ mWriter_RowAtomScope(0),
+ mWriter_RowScope(0)
+
+ ,
+ mWriter_DictForm(0),
+ mWriter_DictAtomScope('v')
+
+ ,
+ mWriter_NeedDirtyAll(morkBool_kFalse),
+ mWriter_Incremental(morkBool_kTrue) // opposite of mWriter_NeedDirtyAll
+ ,
+ mWriter_DidStartDict(morkBool_kFalse),
+ mWriter_DidEndDict(morkBool_kTrue)
+
+ ,
+ mWriter_SuppressDirtyRowNewline(morkBool_kFalse),
+ mWriter_DidStartGroup(morkBool_kFalse),
+ mWriter_DidEndGroup(morkBool_kTrue),
+ mWriter_Phase(morkWriter_kPhaseNothingDone)
+
+ ,
+ mWriter_BeVerbose(ev->mEnv_BeVerbose)
+
+ ,
+ mWriter_TableRowArrayPos(0)
+
+ // empty constructors for map iterators:
+ ,
+ mWriter_StoreAtomSpacesIter(),
+ mWriter_AtomSpaceAtomAidsIter()
+
+ ,
+ mWriter_StoreRowSpacesIter(),
+ mWriter_RowSpaceTablesIter(),
+ mWriter_RowSpaceRowsIter() {
+ mWriter_GroupBuf[0] = 0;
+
+ mWriter_SafeNameBuf[0] = 0;
+ mWriter_SafeNameBuf[morkWriter_kMaxColumnNameSize * 2] = 0;
+ mWriter_ColNameBuf[0] = 0;
+ mWriter_ColNameBuf[morkWriter_kMaxColumnNameSize] = 0;
+
+ mdbYarn* y = &mWriter_ColYarn;
+ y->mYarn_Buf = mWriter_ColNameBuf; // where to put col bytes
+ y->mYarn_Fill = 0; // set later by writer
+ y->mYarn_Size = morkWriter_kMaxColumnNameSize; // our buf size
+ y->mYarn_More = 0; // set later by writer
+ y->mYarn_Form = 0; // set later by writer
+ y->mYarn_Grow = 0; // do not allow buffer growth
+
+ y = &mWriter_SafeYarn;
+ y->mYarn_Buf = mWriter_SafeNameBuf; // where to put col bytes
+ y->mYarn_Fill = 0; // set later by writer
+ y->mYarn_Size = morkWriter_kMaxColumnNameSize * 2; // our buf size
+ y->mYarn_More = 0; // set later by writer
+ y->mYarn_Form = 0; // set later by writer
+ y->mYarn_Grow = 0; // do not allow buffer growth
+
+ if (ev->Good()) {
+ if (ioSlotHeap && ioFile && ioStore) {
+ morkStore::SlotWeakStore(ioStore, ev, &mWriter_Store);
+ nsIMdbFile_SlotStrongFile(ioFile, ev, &mWriter_File);
+ nsIMdbHeap_SlotStrongHeap(ioSlotHeap, ev, &mWriter_SlotHeap);
+ if (ev->Good()) {
+ mNode_Derived = morkDerived_kWriter;
+ }
+ } else
+ ev->NilPointerError();
+ }
+}
+
+void morkWriter::MakeWriterStream(morkEnv* ev) // give writer a suitable stream
+{
+ mWriter_Incremental = !mWriter_NeedDirtyAll; // opposites
+
+ if (!mWriter_Stream && ev->Good()) {
+ if (mWriter_File) {
+ morkStream* stream = 0;
+ mork_bool frozen = morkBool_kFalse; // need to modify
+ nsIMdbHeap* heap = mWriter_SlotHeap;
+
+ if (mWriter_Incremental) {
+ stream =
+ new (*heap, ev) morkStream(ev, morkUsage::kHeap, heap, mWriter_File,
+ morkWriter_kStreamBufSize, frozen);
+ } else // compress commit
+ {
+ nsIMdbFile* bud = 0;
+ mWriter_File->AcquireBud(ev->AsMdbEnv(), heap, &bud);
+ if (bud) {
+ if (ev->Good()) {
+ mWriter_Bud = bud;
+ stream =
+ new (*heap, ev) morkStream(ev, morkUsage::kHeap, heap, bud,
+ morkWriter_kStreamBufSize, frozen);
+ } else
+ bud->Release();
+ }
+ }
+
+ if (stream) {
+ if (ev->Good())
+ mWriter_Stream = stream;
+ else
+ stream->CutStrongRef(ev->AsMdbEnv());
+ }
+ } else
+ this->NilWriterFileError(ev);
+ }
+}
+
+/*public non-poly*/ void morkWriter::CloseWriter(
+ morkEnv* ev) // called by CloseMorkNode();
+{
+ if (this->IsNode()) {
+ morkStore::SlotWeakStore((morkStore*)0, ev, &mWriter_Store);
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mWriter_File);
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mWriter_Bud);
+ morkStream::SlotStrongStream((morkStream*)0, ev, &mWriter_Stream);
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mWriter_SlotHeap);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkWriter::NonWriterTypeError(morkEnv* ev) {
+ ev->NewError("non morkWriter");
+}
+
+/*static*/ void morkWriter::NilWriterStoreError(morkEnv* ev) {
+ ev->NewError("nil mWriter_Store");
+}
+
+/*static*/ void morkWriter::NilWriterBudError(morkEnv* ev) {
+ ev->NewError("nil mWriter_Bud");
+}
+
+/*static*/ void morkWriter::NilWriterFileError(morkEnv* ev) {
+ ev->NewError("nil mWriter_File");
+}
+
+/*static*/ void morkWriter::NilWriterStreamError(morkEnv* ev) {
+ ev->NewError("nil mWriter_Stream");
+}
+
+/*static*/ void morkWriter::UnsupportedPhaseError(morkEnv* ev) {
+ ev->NewError("unsupported mWriter_Phase");
+}
+
+mork_bool morkWriter::WriteMore(
+ morkEnv* ev) // call until IsWritingDone() is true
+{
+ if (this->IsOpenNode()) {
+ if (this->IsWriter()) {
+ if (!mWriter_Stream) this->MakeWriterStream(ev);
+
+ if (mWriter_Stream) {
+ if (ev->Bad()) {
+ ev->NewWarning("writing stops on error");
+ mWriter_Phase = morkWriter_kPhaseWritingDone;
+ }
+ switch (mWriter_Phase) {
+ case morkWriter_kPhaseNothingDone:
+ OnNothingDone(ev);
+ break;
+
+ case morkWriter_kPhaseDirtyAllDone:
+ OnDirtyAllDone(ev);
+ break;
+
+ case morkWriter_kPhasePutHeaderDone:
+ OnPutHeaderDone(ev);
+ break;
+
+ case morkWriter_kPhaseRenumberAllDone:
+ OnRenumberAllDone(ev);
+ break;
+
+ case morkWriter_kPhaseStoreAtomSpaces:
+ OnStoreAtomSpaces(ev);
+ break;
+
+ case morkWriter_kPhaseAtomSpaceAtomAids:
+ OnAtomSpaceAtomAids(ev);
+ break;
+
+ case morkWriter_kPhaseStoreRowSpacesTables:
+ OnStoreRowSpacesTables(ev);
+ break;
+
+ case morkWriter_kPhaseRowSpaceTables:
+ OnRowSpaceTables(ev);
+ break;
+
+ case morkWriter_kPhaseTableRowArray:
+ OnTableRowArray(ev);
+ break;
+
+ case morkWriter_kPhaseStoreRowSpacesRows:
+ OnStoreRowSpacesRows(ev);
+ break;
+
+ case morkWriter_kPhaseRowSpaceRows:
+ OnRowSpaceRows(ev);
+ break;
+
+ case morkWriter_kPhaseContentDone:
+ OnContentDone(ev);
+ break;
+
+ case morkWriter_kPhaseWritingDone:
+ OnWritingDone(ev);
+ break;
+
+ default:
+ this->UnsupportedPhaseError(ev);
+ }
+ } else
+ this->NilWriterStreamError(ev);
+ } else
+ this->NonWriterTypeError(ev);
+ } else
+ this->NonOpenNodeError(ev);
+
+ return ev->Good();
+}
+
+static const char morkWriter_kHexDigits[] = "0123456789ABCDEF";
+
+mork_size morkWriter::WriteYarn(morkEnv* ev, const mdbYarn* inYarn)
+// return number of atom bytes written on the current line (which
+// implies that escaped line breaks will make the size value smaller
+// than the entire yarn's size, since only part goes on a last line).
+{
+ mork_size outSize = 0;
+ mork_size lineSize = mWriter_LineSize;
+ morkStream* stream = mWriter_Stream;
+
+ const mork_u1* b = (const mork_u1*)inYarn->mYarn_Buf;
+ if (b) {
+ int c;
+ mork_fill fill = inYarn->mYarn_Fill;
+
+ const mork_u1* end = b + fill;
+ while (b < end && ev->Good()) {
+ if (lineSize + outSize >= mWriter_MaxLine) // continue line?
+ {
+ stream->PutByteThenNewline(ev, '\\');
+ mWriter_LineSize = lineSize = outSize = 0;
+ }
+
+ c = *b++; // next byte to print
+ if (morkCh_IsValue(c)) {
+ stream->Putc(ev, c);
+ ++outSize; // c
+ } else if (c == ')' || c == '$' || c == '\\') {
+ stream->Putc(ev, '\\');
+ stream->Putc(ev, c);
+ outSize += 2; // '\' c
+ } else {
+ outSize += 3; // '$' hex hex
+ stream->Putc(ev, '$');
+ stream->Putc(ev, morkWriter_kHexDigits[(c >> 4) & 0x0F]);
+ stream->Putc(ev, morkWriter_kHexDigits[c & 0x0F]);
+ }
+ }
+ }
+ mWriter_LineSize += outSize;
+
+ return outSize;
+}
+
+mork_size morkWriter::WriteAtom(morkEnv* ev, const morkAtom* inAtom)
+// return number of atom bytes written on the current line (which
+// implies that escaped line breaks will make the size value smaller
+// than the entire atom's size, since only part goes on a last line).
+{
+ mork_size outSize = 0;
+ mdbYarn yarn; // to ref content inside atom
+
+ if (morkAtom::AliasYarn(inAtom, &yarn)) {
+ if (mWriter_DidStartDict && yarn.mYarn_Form != mWriter_DictForm)
+ this->ChangeDictForm(ev, yarn.mYarn_Form);
+
+ outSize = this->WriteYarn(ev, &yarn);
+ // mWriter_LineSize += stream->Write(ev, inYarn->mYarn_Buf, outSize);
+ } else
+ inAtom->BadAtomKindError(ev);
+
+ return outSize;
+}
+
+void morkWriter::WriteAtomSpaceAsDict(morkEnv* ev, morkAtomSpace* ioSpace) {
+ morkStream* stream = mWriter_Stream;
+ nsIMdbEnv* mdbev = ev->AsMdbEnv();
+ mork_scope scope = ioSpace->SpaceScope();
+ if (scope < 0x80) {
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ stream->PutString(ev, "< <(a=");
+ stream->Putc(ev, (int)scope);
+ ++mWriter_LineSize;
+ stream->PutString(ev, ")> // (f=iso-8859-1)");
+ mWriter_LineSize = stream->PutIndent(ev, morkWriter_kDictAliasDepth);
+ } else
+ ioSpace->NonAsciiSpaceScopeName(ev);
+
+ if (ev->Good()) {
+ mdbYarn yarn; // to ref content inside atom
+ char buf[64]; // buffer for staging the dict alias hex ID
+ char* idBuf = buf + 1; // where the id always starts
+ buf[0] = '('; // we always start with open paren
+ morkBookAtom* atom = 0;
+ morkAtomAidMapIter* ai = &mWriter_AtomSpaceAtomAidsIter;
+ ai->InitAtomAidMapIter(ev, &ioSpace->mAtomSpace_AtomAids);
+ mork_change* c = 0;
+
+ for (c = ai->FirstAtom(ev, &atom); c && ev->Good();
+ c = ai->NextAtom(ev, &atom)) {
+ if (atom) {
+ if (atom->IsAtomDirty()) {
+ atom->SetAtomClean(); // neutralize change
+
+ morkAtom::AliasYarn(atom, &yarn);
+ mork_size size = ev->TokenAsHex(idBuf, atom->mBookAtom_Id);
+
+ if (yarn.mYarn_Form != mWriter_DictForm)
+ this->ChangeDictForm(ev, yarn.mYarn_Form);
+
+ mork_size pending =
+ yarn.mYarn_Fill + size + morkWriter_kYarnEscapeSlop + 4;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kDictAliasDepth);
+ mork_size bytesWritten;
+ stream->Write(mdbev, buf, size + 1, &bytesWritten); // + '('
+ mWriter_LineSize += bytesWritten;
+
+ pending -= (size + 1);
+ this->IndentOverMaxLine(ev, pending, morkWriter_kDictAliasValueDepth);
+ stream->Putc(ev, '='); // start alias
+ ++mWriter_LineSize;
+
+ this->WriteYarn(ev, &yarn);
+ stream->Putc(ev, ')'); // end alias
+ ++mWriter_LineSize;
+
+ ++mWriter_DoneCount;
+ }
+ } else
+ ev->NilPointerError();
+ }
+ ai->CloseMapIter(ev);
+ }
+
+ if (ev->Good()) {
+ ioSpace->SetAtomSpaceClean();
+ // this->IndentAsNeeded(ev, 0);
+ // stream->PutByteThenNewline(ev, '>'); // end dict
+
+ stream->Putc(ev, '>'); // end dict
+ ++mWriter_LineSize;
+ }
+}
+
+/*
+(I'm putting the text of this message in file morkWriter.cpp.)
+
+I'm making a change which should cause rows and tables to go away
+when a Mork db is compress committed, when the rows and tables
+are no longer needed. Because this is subtle, I'm describing it
+here in case misbehavior is ever observed. Otherwise you'll have
+almost no hope of fixing a related bug.
+
+This is done entirely in morkWriter.cpp: morkWriter::DirtyAll(),
+which currently marks all rows and tables dirty so they will be
+written in a later phase of the commit. My change is to merely
+selectively not mark certain rows and tables dirty, when they seem
+to be superfluous.
+
+A row is no longer needed when the mRow_GcUses slot hits zero, and
+this is used by the following inline morkRow method:
+
+ mork_bool IsRowUsed() const { return mRow_GcUses != 0; }
+
+Naturally disaster ensues if mRow_GcUses is ever smaller than right.
+
+Similarly, we should drop tables when mTable_GcUses hits zero, but
+only when a table contains no row members. We consider tables to
+self reference (and prevent collection) when they contain content.
+Again, disaster ensues if mTable_GcUses is ever smaller than right.
+
+ mork_count GetRowCount() const
+ { return mTable_RowArray.mArray_Fill; }
+
+ mork_bool IsTableUsed() const
+ { return (mTable_GcUses != 0 || this->GetRowCount() != 0); }
+
+Now let's question why the design involves filtering what gets set
+to dirty. Why not apply a filter in the later phase when we write
+content? Because I'm afraid of missing some subtle interaction in
+updating table and row relationships. It seems safer to write a row
+or table when it starts out dirty, before morkWriter::DirtyAll() is
+called. So this design calls for writing out rows and tables when
+they are still clearly used, and additionally, <i>when we have just
+been actively writing to them right before this commit</i>.
+
+Presumably if they are truly useless, they will no longer be dirtied
+in later sessions and will get collected during the next compress
+commit. So we wait to collect them until they become all dead, and
+not just mostly dead. (At which time you can feel free to go through
+their pockets looking for loose change.)
+*/
+
+mork_bool morkWriter::DirtyAll(morkEnv* ev)
+// DirtyAll() visits every store sub-object and marks
+// them dirty, including every table, row, cell, and atom. The return
+// equals ev->Good(), to show whether any error happened. This method is
+// intended for use in the beginning of a "compress commit" which writes
+// all store content, whether dirty or not. We dirty everything first so
+// that later iterations over content can mark things clean as they are
+// written, and organize the process of serialization so that objects are
+// written only at need (because of being dirty). Note the method can
+// stop early when any error happens, since this will abort any commit.
+{
+ morkStore* store = mWriter_Store;
+ if (store) {
+ store->SetStoreDirty();
+ mork_change* c = 0;
+
+ if (ev->Good()) {
+ morkAtomSpaceMapIter* asi = &mWriter_StoreAtomSpacesIter;
+ asi->InitAtomSpaceMapIter(ev, &store->mStore_AtomSpaces);
+
+ mork_scope* key = 0; // ignore keys in map
+ morkAtomSpace* space = 0; // old val node in the map
+
+ for (c = asi->FirstAtomSpace(ev, key, &space); c && ev->Good();
+ c = asi->NextAtomSpace(ev, key, &space)) {
+ if (space) {
+ if (space->IsAtomSpace()) {
+ space->SetAtomSpaceDirty();
+ morkBookAtom* atom = 0;
+ morkAtomAidMapIter* ai = &mWriter_AtomSpaceAtomAidsIter;
+ ai->InitAtomAidMapIter(ev, &space->mAtomSpace_AtomAids);
+
+ for (c = ai->FirstAtom(ev, &atom); c && ev->Good();
+ c = ai->NextAtom(ev, &atom)) {
+ if (atom) {
+ atom->SetAtomDirty();
+ ++mWriter_TotalCount;
+ } else
+ ev->NilPointerError();
+ }
+
+ ai->CloseMapIter(ev);
+ } else
+ space->NonAtomSpaceTypeError(ev);
+ } else
+ ev->NilPointerError();
+ }
+ }
+
+ if (ev->Good()) {
+ morkRowSpaceMapIter* rsi = &mWriter_StoreRowSpacesIter;
+ rsi->InitRowSpaceMapIter(ev, &store->mStore_RowSpaces);
+
+ mork_scope* key = 0; // ignore keys in map
+ morkRowSpace* space = 0; // old val node in the map
+
+ for (c = rsi->FirstRowSpace(ev, key, &space); c && ev->Good();
+ c = rsi->NextRowSpace(ev, key, &space)) {
+ if (space) {
+ if (space->IsRowSpace()) {
+ space->SetRowSpaceDirty();
+ if (ev->Good()) {
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMapIter* ri = &mWriter_RowSpaceRowsIter;
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMapIter* ri = &mWriter_RowSpaceRowsIter;
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ ri->InitRowMapIter(ev, &space->mRowSpace_Rows);
+
+ morkRow* row = 0; // old key row in the map
+
+ for (c = ri->FirstRow(ev, &row); c && ev->Good();
+ c = ri->NextRow(ev, &row)) {
+ if (row && row->IsRow()) // need to dirty row?
+ {
+ if (row->IsRowUsed() || row->IsRowDirty()) {
+ row->DirtyAllRowContent(ev);
+ ++mWriter_TotalCount;
+ }
+ } else
+ row->NonRowTypeWarning(ev);
+ }
+ ri->CloseMapIter(ev);
+ }
+
+ if (ev->Good()) {
+ morkTableMapIter* ti = &mWriter_RowSpaceTablesIter;
+ ti->InitTableMapIter(ev, &space->mRowSpace_Tables);
+
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ morkTable* table = ti->FirstTable(ev);
+
+ for (; table && ev->Good(); table = ti->NextTable(ev))
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ mork_tid* tableKey = 0; // ignore keys in table map
+ morkTable* table = 0; // old key row in the map
+
+ for (c = ti->FirstTable(ev, tableKey, &table); c && ev->Good();
+ c = ti->NextTable(ev, tableKey, &table))
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+ {
+ if (table && table->IsTable()) // need to dirty table?
+ {
+ if (table->IsTableUsed() || table->IsTableDirty()) {
+ // table->DirtyAllTableContent(ev);
+ // only necessary to mark table itself dirty:
+ table->SetTableDirty();
+ table->SetTableRewrite();
+ ++mWriter_TotalCount;
+ }
+ } else
+ table->NonTableTypeWarning(ev);
+ }
+ ti->CloseMapIter(ev);
+ }
+ } else
+ space->NonRowSpaceTypeError(ev);
+ } else
+ ev->NilPointerError();
+ }
+ }
+ } else
+ this->NilWriterStoreError(ev);
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnNothingDone(morkEnv* ev) {
+ mWriter_Incremental = !mWriter_NeedDirtyAll; // opposites
+
+ if (!mWriter_Store->IsStoreDirty() && !mWriter_NeedDirtyAll) {
+ mWriter_Phase = morkWriter_kPhaseWritingDone;
+ return morkBool_kTrue;
+ }
+
+ // morkStream* stream = mWriter_Stream;
+ if (mWriter_NeedDirtyAll) this->DirtyAll(ev);
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseDirtyAllDone;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::StartGroup(morkEnv* ev) {
+ nsIMdbEnv* mdbev = ev->AsMdbEnv();
+ morkStream* stream = mWriter_Stream;
+ mWriter_DidStartGroup = morkBool_kTrue;
+ mWriter_DidEndGroup = morkBool_kFalse;
+
+ char buf[4 + morkWriter_kGroupBufSize + 2]; // "@$${" + groupid + "{@"
+ char* p = buf;
+ *p++ = '@';
+ *p++ = '$';
+ *p++ = '$';
+ *p++ = '{';
+
+ mork_token groupID = mWriter_CommitGroupIdentity;
+ mork_fill idFill = ev->TokenAsHex(p, groupID);
+ mWriter_GroupBufFill = 0;
+ // ev->TokenAsHex(mWriter_GroupBuf, groupID);
+ if (idFill < morkWriter_kGroupBufSize) {
+ // TokenAsHex appends a '\0', but it's not included in idFill count.
+ MORK_MEMCPY(mWriter_GroupBuf, p, idFill + 1);
+ mWriter_GroupBufFill = idFill;
+ } else {
+ *mWriter_GroupBuf = '\0';
+ }
+
+ p += idFill;
+ *p++ = '{';
+ *p++ = '@';
+
+ stream->PutLineBreak(ev);
+
+ morkStore* store = mWriter_Store;
+ if (store) // might need to capture commit group position?
+ {
+ mork_pos groupPos;
+ stream->Tell(mdbev, &groupPos);
+ if (!store->mStore_FirstCommitGroupPos)
+ store->mStore_FirstCommitGroupPos = groupPos;
+ else if (!store->mStore_SecondCommitGroupPos)
+ store->mStore_SecondCommitGroupPos = groupPos;
+ }
+
+ mork_size bytesWritten;
+ stream->Write(mdbev, buf, 4 + idFill + 2,
+ &bytesWritten); // '@$${' + idFill + '{@'
+ stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::CommitGroup(morkEnv* ev) {
+ if (mWriter_DidStartGroup) {
+ nsIMdbEnv* mdbev = ev->AsMdbEnv();
+ mork_size bytesWritten;
+ morkStream* stream = mWriter_Stream;
+
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ stream->Putc(ev, '@');
+ stream->Putc(ev, '$');
+ stream->Putc(ev, '$');
+ stream->Putc(ev, '}');
+
+ mork_fill bufFill = mWriter_GroupBufFill;
+ if (bufFill) stream->Write(mdbev, mWriter_GroupBuf, bufFill, &bytesWritten);
+
+ stream->Putc(ev, '}');
+ stream->Putc(ev, '@');
+ stream->PutLineBreak(ev);
+
+ mWriter_LineSize = 0;
+ }
+
+ mWriter_DidStartGroup = morkBool_kFalse;
+ mWriter_DidEndGroup = morkBool_kTrue;
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::AbortGroup(morkEnv* ev) {
+ if (mWriter_DidStartGroup) {
+ morkStream* stream = mWriter_Stream;
+ stream->PutLineBreak(ev);
+ stream->PutStringThenNewline(ev, "@$$}~~}@");
+ mWriter_LineSize = 0;
+ }
+
+ mWriter_DidStartGroup = morkBool_kFalse;
+ mWriter_DidEndGroup = morkBool_kTrue;
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnDirtyAllDone(morkEnv* ev) {
+ if (ev->Good()) {
+ nsIMdbEnv* mdbev = ev->AsMdbEnv();
+ morkStream* stream = mWriter_Stream;
+ mork_pos resultPos;
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ stream->Seek(mdbev, 0, &resultPos); // beginning of stream
+ stream->PutStringThenNewline(ev, morkWriter_kFileHeader);
+ mWriter_LineSize = 0;
+ } else // else mWriter_Incremental
+ {
+ mork_pos eos = stream->Length(ev); // length is end of stream
+ if (ev->Good()) {
+ stream->Seek(mdbev, eos, &resultPos); // goto end of stream
+ if (eos < 128) // maybe need file header?
+ {
+ stream->PutStringThenNewline(ev, morkWriter_kFileHeader);
+ mWriter_LineSize = 0;
+ }
+ this->StartGroup(ev); // begin incremental transaction
+ }
+ }
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhasePutHeaderDone;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnPutHeaderDone(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnPutHeaderDone()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ morkStore* store = mWriter_Store;
+ if (store)
+ store->RenumberAllCollectableContent(ev);
+ else
+ this->NilWriterStoreError(ev);
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseRenumberAllDone;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnRenumberAllDone(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnRenumberAllDone()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreAtomSpaces;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnStoreAtomSpaces(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnStoreAtomSpaces()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good()) {
+ morkStore* store = mWriter_Store;
+ if (store) {
+ morkAtomSpace* space = store->LazyGetGroundColumnSpace(ev);
+ if (space && space->IsAtomSpaceDirty()) {
+ // stream->PutStringThenNewline(ev, "// ground column space dict:");
+
+ if (mWriter_LineSize) {
+ stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+ }
+ this->WriteAtomSpaceAsDict(ev, space);
+ space->SetAtomSpaceClean();
+ }
+ } else
+ this->NilWriterStoreError(ev);
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreRowSpacesTables;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnAtomSpaceAtomAids(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnAtomSpaceAtomAids()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreRowSpacesTables;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+void morkWriter::WriteAllStoreTables(morkEnv* ev) {
+ morkStore* store = mWriter_Store;
+ if (store && ev->Good()) {
+ morkRowSpaceMapIter* rsi = &mWriter_StoreRowSpacesIter;
+ rsi->InitRowSpaceMapIter(ev, &store->mStore_RowSpaces);
+
+ mork_scope* key = 0; // ignore keys in map
+ morkRowSpace* space = 0; // old val node in the map
+ mork_change* c = 0;
+
+ for (c = rsi->FirstRowSpace(ev, key, &space); c && ev->Good();
+ c = rsi->NextRowSpace(ev, key, &space)) {
+ if (space) {
+ if (space->IsRowSpace()) {
+ space->SetRowSpaceClean();
+ if (ev->Good()) {
+ morkTableMapIter* ti = &mWriter_RowSpaceTablesIter;
+ ti->InitTableMapIter(ev, &space->mRowSpace_Tables);
+
+#ifdef MORK_BEAD_OVER_NODE_MAPS
+ morkTable* table = ti->FirstTable(ev);
+
+ for (; table && ev->Good(); table = ti->NextTable(ev))
+#else /*MORK_BEAD_OVER_NODE_MAPS*/
+ mork_tid* key2 = 0; // ignore keys in table map
+ morkTable* table = 0; // old key row in the map
+
+ for (c = ti->FirstTable(ev, key2, &table); c && ev->Good();
+ c = ti->NextTable(ev, key2, &table))
+#endif /*MORK_BEAD_OVER_NODE_MAPS*/
+ {
+ if (table && table->IsTable()) {
+ if (table->IsTableDirty()) {
+ mWriter_BeVerbose =
+ (ev->mEnv_BeVerbose || table->IsTableVerbose());
+
+ if (this->PutTableDict(ev, table)) this->PutTable(ev, table);
+
+ table->SetTableClean(ev);
+ mWriter_BeVerbose = ev->mEnv_BeVerbose;
+ }
+ } else
+ table->NonTableTypeWarning(ev);
+ }
+ ti->CloseMapIter(ev);
+ }
+ if (ev->Good()) {
+ mWriter_TableRowScope = 0; // ensure no table context now
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMapIter* ri = &mWriter_RowSpaceRowsIter;
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMapIter* ri = &mWriter_RowSpaceRowsIter;
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+ ri->InitRowMapIter(ev, &space->mRowSpace_Rows);
+
+ morkRow* row = 0; // old row in the map
+
+ for (c = ri->FirstRow(ev, &row); c && ev->Good();
+ c = ri->NextRow(ev, &row)) {
+ if (row && row->IsRow()) {
+ // later we should also check that table use count is nonzero:
+ if (row->IsRowDirty()) // && row->IsRowUsed() ??
+ {
+ mWriter_BeVerbose = ev->mEnv_BeVerbose;
+ if (this->PutRowDict(ev, row)) {
+ if (ev->Good() && mWriter_DidStartDict) {
+ this->EndDict(ev);
+ if (mWriter_LineSize < 32 && ev->Good())
+ mWriter_SuppressDirtyRowNewline = morkBool_kTrue;
+ }
+
+ if (ev->Good()) this->PutRow(ev, row);
+ }
+ mWriter_BeVerbose = ev->mEnv_BeVerbose;
+ }
+ } else
+ row->NonRowTypeWarning(ev);
+ }
+ ri->CloseMapIter(ev);
+ }
+ } else
+ space->NonRowSpaceTypeError(ev);
+ } else
+ ev->NilPointerError();
+ }
+ }
+}
+
+mork_bool morkWriter::OnStoreRowSpacesTables(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnStoreRowSpacesTables()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ // later we'll break this up, but today we'll write all in one shot:
+ this->WriteAllStoreTables(ev);
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreRowSpacesRows;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnRowSpaceTables(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnRowSpaceTables()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreRowSpacesRows;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnTableRowArray(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnTableRowArray()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseStoreRowSpacesRows;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnStoreRowSpacesRows(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnStoreRowSpacesRows()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseContentDone;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnRowSpaceRows(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnRowSpaceRows()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_NeedDirtyAll) // compress commit
+ {
+ }
+
+ if (ev->Good())
+ mWriter_Phase = morkWriter_kPhaseContentDone;
+ else
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop on error
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnContentDone(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+
+ // if ( mWriter_NeedDirtyAll )
+ // stream->PutStringThenNewline(ev, "// OnContentDone()");
+ mWriter_LineSize = 0;
+
+ if (mWriter_Incremental) {
+ if (ev->Good())
+ this->CommitGroup(ev);
+ else
+ this->AbortGroup(ev);
+ } else if (mWriter_Store && ev->Good()) {
+ // after rewriting everything, there are no transaction groups:
+ mWriter_Store->mStore_FirstCommitGroupPos = 0;
+ mWriter_Store->mStore_SecondCommitGroupPos = 0;
+ }
+
+ stream->Flush(ev->AsMdbEnv());
+ nsIMdbFile* bud = mWriter_Bud;
+ if (bud) {
+ bud->Flush(ev->AsMdbEnv());
+ bud->BecomeTrunk(ev->AsMdbEnv());
+ nsIMdbFile_SlotStrongFile((nsIMdbFile*)0, ev, &mWriter_Bud);
+ } else if (!mWriter_Incremental) // should have a bud?
+ this->NilWriterBudError(ev);
+
+ mWriter_Phase = morkWriter_kPhaseWritingDone; // stop always
+ mWriter_DoneCount = mWriter_TotalCount;
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::OnWritingDone(morkEnv* ev) {
+ mWriter_DoneCount = mWriter_TotalCount;
+ ev->NewWarning("writing is done");
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutTableChange(morkEnv* ev,
+ const morkTableChange* inChange) {
+ nsIMdbEnv* mdbev = ev->AsMdbEnv();
+ if (inChange->IsAddRowTableChange()) {
+ this->PutRow(ev, inChange->mTableChange_Row); // row alone means add
+ } else if (inChange->IsCutRowTableChange()) {
+ mWriter_Stream->Putc(ev, '-'); // prefix '-' indicates cut row
+ ++mWriter_LineSize;
+ this->PutRow(ev, inChange->mTableChange_Row);
+ } else if (inChange->IsMoveRowTableChange()) {
+ this->PutRow(ev, inChange->mTableChange_Row);
+ char buf[64];
+ char* p = buf;
+ *p++ = '!'; // for moves, position is indicated by prefix '!'
+ mork_size posSize = ev->TokenAsHex(p, inChange->mTableChange_Pos);
+ p += posSize;
+ *p++ = ' ';
+ mork_size bytesWritten;
+ mWriter_Stream->Write(mdbev, buf, posSize + 2, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+ } else
+ inChange->UnknownChangeError(ev);
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutTable(morkEnv* ev, morkTable* ioTable) {
+ if (ev->Good()) this->StartTable(ev, ioTable);
+
+ if (ev->Good()) {
+ if (ioTable->IsTableRewrite() || mWriter_NeedDirtyAll) {
+ morkArray* array = &ioTable->mTable_RowArray; // vector of rows
+ mork_fill fill = array->mArray_Fill; // count of rows
+ morkRow** rows = (morkRow**)array->mArray_Slots;
+ if (rows && fill) {
+ morkRow** end = rows + fill;
+ while (rows < end && ev->Good()) {
+ morkRow* r = *rows++; // next row to consider
+ this->PutRow(ev, r);
+ }
+ }
+ } else // incremental write only table changes
+ {
+ morkList* list = &ioTable->mTable_ChangeList;
+ morkNext* next = list->GetListHead();
+ while (next && ev->Good()) {
+ this->PutTableChange(ev, (morkTableChange*)next);
+ next = next->GetNextLink();
+ }
+ }
+ }
+
+ if (ev->Good()) this->EndTable(ev);
+
+ ioTable->SetTableClean(ev); // note this also cleans change list
+ mWriter_TableRowScope = 0;
+
+ ++mWriter_DoneCount;
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutTableDict(morkEnv* ev, morkTable* ioTable) {
+ morkRowSpace* space = ioTable->mTable_RowSpace;
+ mWriter_TableRowScope = space->SpaceScope();
+ mWriter_TableForm = 0; // (f=iso-8859-1)
+ mWriter_TableAtomScope = 'v'; // (a=v)
+ mWriter_TableKind = ioTable->mTable_Kind;
+
+ mWriter_RowForm = mWriter_TableForm;
+ mWriter_RowAtomScope = mWriter_TableAtomScope;
+ mWriter_RowScope = mWriter_TableRowScope;
+
+ mWriter_DictForm = mWriter_TableForm;
+ mWriter_DictAtomScope = mWriter_TableAtomScope;
+
+ // if ( ev->Good() )
+ // this->StartDict(ev); // delay as long as possible
+
+ if (ev->Good()) {
+ morkRow* r = ioTable->mTable_MetaRow;
+ if (r) {
+ if (r->IsRow())
+ this->PutRowDict(ev, r);
+ else
+ r->NonRowTypeError(ev);
+ }
+ morkArray* array = &ioTable->mTable_RowArray; // vector of rows
+ mork_fill fill = array->mArray_Fill; // count of rows
+ morkRow** rows = (morkRow**)array->mArray_Slots;
+ if (rows && fill) {
+ morkRow** end = rows + fill;
+ while (rows < end && ev->Good()) {
+ r = *rows++; // next row to consider
+ if (r && r->IsRow())
+ this->PutRowDict(ev, r);
+ else
+ r->NonRowTypeError(ev);
+ }
+ }
+ // we may have a change for a row which is no longer in the
+ // table, but contains a cell with something not in the dictionary.
+ // So, loop through the rows in the change log, writing out any
+ // dirty dictionary elements.
+ morkList* list = &ioTable->mTable_ChangeList;
+ morkNext* next = list->GetListHead();
+ while (next && ev->Good()) {
+ r = ((morkTableChange*)next)->mTableChange_Row;
+ if (r && r->IsRow()) this->PutRowDict(ev, r);
+ next = next->GetNextLink();
+ }
+ }
+ if (ev->Good()) this->EndDict(ev);
+
+ return ev->Good();
+}
+
+void morkWriter::WriteTokenToTokenMetaCell(morkEnv* ev, mork_token inCol,
+ mork_token inValue) {
+ morkStream* stream = mWriter_Stream;
+ mork_bool isKindCol = (morkStore_kKindColumn == inCol);
+ mork_u1 valSep = (mork_u1)((isKindCol) ? '^' : '=');
+
+ char buf[128]; // buffer for staging the two hex IDs
+ char* p = buf;
+
+ mork_size bytesWritten;
+ if (inCol < 0x80) {
+ stream->Putc(ev, '(');
+ stream->Putc(ev, (char)inCol);
+ stream->Putc(ev, valSep);
+ } else {
+ *p++ = '('; // we always start with open paren
+
+ *p++ = '^'; // indicates col is hex ID
+ mork_size colSize = ev->TokenAsHex(p, inCol);
+ p += colSize;
+ *p++ = (char)valSep;
+ stream->Write(ev->AsMdbEnv(), buf, colSize + 3, &bytesWritten);
+
+ mWriter_LineSize += bytesWritten;
+ }
+
+ if (isKindCol) {
+ p = buf;
+ mork_size valSize = ev->TokenAsHex(p, inValue);
+ p += valSize;
+ *p++ = ':';
+ *p++ = 'c';
+ *p++ = ')';
+ stream->Write(ev->AsMdbEnv(), buf, valSize + 3, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+ } else {
+ this->IndentAsNeeded(ev, morkWriter_kTableMetaCellValueDepth);
+ mdbYarn* yarn = &mWriter_ColYarn;
+ // mork_u1* yarnBuf = (mork_u1*) yarn->mYarn_Buf;
+ mWriter_Store->TokenToString(ev, inValue, yarn);
+ this->WriteYarn(ev, yarn);
+ stream->Putc(ev, ')');
+ ++mWriter_LineSize;
+ }
+
+ // mork_fill fill = yarn->mYarn_Fill;
+ // yarnBuf[ fill ] = ')'; // append terminator
+ // mWriter_LineSize += stream->Write(ev, yarnBuf, fill + 1); // +1 for ')'
+}
+
+void morkWriter::WriteStringToTokenDictCell(morkEnv* ev, const char* inCol,
+ mork_token inValue)
+// Note inCol should begin with '(' and end with '=', with col in between.
+{
+ morkStream* stream = mWriter_Stream;
+ mWriter_LineSize += stream->PutString(ev, inCol);
+
+ this->IndentAsNeeded(ev, morkWriter_kDictMetaCellValueDepth);
+ mdbYarn* yarn = &mWriter_ColYarn;
+ // mork_u1* yarnBuf = (mork_u1*) yarn->mYarn_Buf;
+ mWriter_Store->TokenToString(ev, inValue, yarn);
+ this->WriteYarn(ev, yarn);
+ stream->Putc(ev, ')');
+ ++mWriter_LineSize;
+
+ // mork_fill fill = yarn->mYarn_Fill;
+ // yarnBuf[ fill ] = ')'; // append terminator
+ // mWriter_LineSize += stream->Write(ev, yarnBuf, fill + 1); // +1 for ')'
+}
+
+void morkWriter::ChangeDictAtomScope(morkEnv* ev, mork_scope inScope) {
+ if (inScope != mWriter_DictAtomScope) {
+ ev->NewWarning("unexpected atom scope change");
+
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+
+ char buf[128]; // buffer for staging the two hex IDs
+ char* p = buf;
+ *p++ = '<'; // we always start with open paren
+ *p++ = '('; // we always start with open paren
+ *p++ = (char)morkStore_kAtomScopeColumn;
+
+ mork_size scopeSize = 1; // default to one byte
+ if (inScope >= 0x80) {
+ *p++ = '^'; // indicates col is hex ID
+ scopeSize = ev->TokenAsHex(p, inScope);
+ p += scopeSize;
+ } else {
+ *p++ = '='; // indicates col is imm byte
+ *p++ = (char)(mork_u1)inScope;
+ }
+
+ *p++ = ')';
+ *p++ = '>';
+ *p = 0;
+
+ mork_size pending = scopeSize + 6;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kDictAliasDepth);
+ mork_size bytesWritten;
+
+ stream->Write(ev->AsMdbEnv(), buf, pending, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ mWriter_DictAtomScope = inScope;
+ }
+}
+
+void morkWriter::ChangeRowForm(morkEnv* ev, mork_cscode inNewForm) {
+ if (inNewForm != mWriter_RowForm) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+
+ char buf[128]; // buffer for staging the two hex IDs
+ char* p = buf;
+ *p++ = '['; // we always start with open bracket
+ *p++ = '('; // we always start with open paren
+ *p++ = (char)morkStore_kFormColumn;
+
+ mork_size formSize = 1; // default to one byte
+ if (!morkCh_IsValue(inNewForm)) {
+ *p++ = '^'; // indicates col is hex ID
+ formSize = ev->TokenAsHex(p, inNewForm);
+ p += formSize;
+ } else {
+ *p++ = '='; // indicates col is imm byte
+ *p++ = (char)(mork_u1)inNewForm;
+ }
+
+ *p++ = ')';
+ *p++ = ']';
+ *p = 0;
+
+ mork_size pending = formSize + 6;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kRowCellDepth);
+ mork_size bytesWritten;
+ stream->Write(ev->AsMdbEnv(), buf, pending, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ mWriter_RowForm = inNewForm;
+ }
+}
+
+void morkWriter::ChangeDictForm(morkEnv* ev, mork_cscode inNewForm) {
+ if (inNewForm != mWriter_DictForm) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+
+ char buf[128]; // buffer for staging the two hex IDs
+ char* p = buf;
+ *p++ = '<'; // we always start with open angle
+ *p++ = '('; // we always start with open paren
+ *p++ = (char)morkStore_kFormColumn;
+
+ mork_size formSize = 1; // default to one byte
+ if (!morkCh_IsValue(inNewForm)) {
+ *p++ = '^'; // indicates col is hex ID
+ formSize = ev->TokenAsHex(p, inNewForm);
+ p += formSize;
+ } else {
+ *p++ = '='; // indicates col is imm byte
+ *p++ = (char)(mork_u1)inNewForm;
+ }
+
+ *p++ = ')';
+ *p++ = '>';
+ *p = 0;
+
+ mork_size pending = formSize + 6;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kDictAliasDepth);
+
+ mork_size bytesWritten;
+ stream->Write(ev->AsMdbEnv(), buf, pending, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ mWriter_DictForm = inNewForm;
+ }
+}
+
+void morkWriter::StartDict(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_DidStartDict) {
+ stream->Putc(ev, '>'); // end dict
+ ++mWriter_LineSize;
+ }
+ mWriter_DidStartDict = morkBool_kTrue;
+ mWriter_DidEndDict = morkBool_kFalse;
+
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+
+ if (mWriter_TableRowScope) // blank line before table's dict?
+ stream->PutLineBreak(ev);
+
+ if (mWriter_DictForm || mWriter_DictAtomScope != 'v') {
+ stream->Putc(ev, '<');
+ stream->Putc(ev, ' ');
+ stream->Putc(ev, '<');
+ mWriter_LineSize = 3;
+ if (mWriter_DictForm)
+ this->WriteStringToTokenDictCell(ev, "(f=", mWriter_DictForm);
+ if (mWriter_DictAtomScope != 'v')
+ this->WriteStringToTokenDictCell(ev, "(a=", mWriter_DictAtomScope);
+
+ stream->Putc(ev, '>');
+ ++mWriter_LineSize;
+
+ mWriter_LineSize = stream->PutIndent(ev, morkWriter_kDictAliasDepth);
+ } else {
+ stream->Putc(ev, '<');
+ // stream->Putc(ev, ' ');
+ ++mWriter_LineSize;
+ }
+}
+
+void morkWriter::EndDict(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_DidStartDict) {
+ stream->Putc(ev, '>'); // end dict
+ ++mWriter_LineSize;
+ }
+ mWriter_DidStartDict = morkBool_kFalse;
+ mWriter_DidEndDict = morkBool_kTrue;
+}
+
+void morkWriter::StartTable(morkEnv* ev, morkTable* ioTable) {
+ mdbOid toid; // to receive table oid
+ ioTable->GetTableOid(ev, &toid);
+
+ if (ev->Good()) {
+ morkStream* stream = mWriter_Stream;
+ if (mWriter_LineSize) stream->PutLineBreak(ev);
+ mWriter_LineSize = 0;
+ // stream->PutLineBreak(ev);
+
+ char buf[64 + 16]; // buffer for staging hex
+ char* p = buf;
+ *p++ = '{'; // punct 1
+ mork_size punctSize =
+ (mWriter_BeVerbose) ? 10 : 3; // counting "{ {/*r=*/ "
+
+ if (ioTable->IsTableRewrite() && mWriter_Incremental) {
+ *p++ = '-';
+ ++punctSize; // counting '-' // punct ++
+ ++mWriter_LineSize;
+ }
+ mork_size oidSize = ev->OidAsHex(p, toid);
+ p += oidSize;
+ *p++ = ' '; // punct 2
+ *p++ = '{'; // punct 3
+ if (mWriter_BeVerbose) {
+ *p++ = '/'; // punct=4
+ *p++ = '*'; // punct=5
+ *p++ = 'r'; // punct=6
+ *p++ = '='; // punct=7
+
+ mork_token tableUses = (mork_token)ioTable->mTable_GcUses;
+ mork_size usesSize = ev->TokenAsHex(p, tableUses);
+ punctSize += usesSize;
+ p += usesSize;
+
+ *p++ = '*'; // punct=8
+ *p++ = '/'; // punct=9
+ *p++ = ' '; // punct=10
+ }
+ mork_size bytesWritten;
+
+ stream->Write(ev->AsMdbEnv(), buf, oidSize + punctSize, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ mork_kind tk = mWriter_TableKind;
+ if (tk) {
+ this->IndentAsNeeded(ev, morkWriter_kTableMetaCellDepth);
+ this->WriteTokenToTokenMetaCell(ev, morkStore_kKindColumn, tk);
+ }
+
+ stream->Putc(ev, '('); // start 's' col cell
+ stream->Putc(ev, 's'); // column
+ stream->Putc(ev, '='); // column
+ mWriter_LineSize += 3;
+
+ int prio = (int)ioTable->mTable_Priority;
+ if (prio > 9) // need to force down to max decimal digit?
+ prio = 9;
+ prio += '0'; // add base digit zero
+ stream->Putc(ev, prio); // priority: (s=0
+ ++mWriter_LineSize;
+
+ if (ioTable->IsTableUnique()) {
+ stream->Putc(ev, 'u'); // (s=0u
+ ++mWriter_LineSize;
+ }
+ if (ioTable->IsTableVerbose()) {
+ stream->Putc(ev, 'v'); // (s=0uv
+ ++mWriter_LineSize;
+ }
+
+ // stream->Putc(ev, ':'); // (s=0uv:
+ // stream->Putc(ev, 'c'); // (s=0uv:c
+ stream->Putc(ev, ')'); // end 's' col cell (s=0uv:c)
+ mWriter_LineSize += 1; // maybe 3 if we add ':' and 'c'
+
+ morkRow* r = ioTable->mTable_MetaRow;
+ if (r) {
+ if (r->IsRow()) {
+ mWriter_SuppressDirtyRowNewline = morkBool_kTrue;
+ this->PutRow(ev, r);
+ } else
+ r->NonRowTypeError(ev);
+ }
+
+ stream->Putc(ev, '}'); // end meta
+ ++mWriter_LineSize;
+
+ if (mWriter_LineSize < mWriter_MaxIndent) {
+ stream->Putc(ev, ' '); // nice white space
+ ++mWriter_LineSize;
+ }
+ }
+}
+
+void morkWriter::EndTable(morkEnv* ev) {
+ morkStream* stream = mWriter_Stream;
+ stream->Putc(ev, '}'); // end table
+ ++mWriter_LineSize;
+
+ mWriter_TableAtomScope = 'v'; // (a=v)
+}
+
+mork_bool morkWriter::PutRowDict(morkEnv* ev, morkRow* ioRow) {
+ mWriter_RowForm = mWriter_TableForm;
+
+ morkCell* cells = ioRow->mRow_Cells;
+ if (cells) {
+ morkStream* stream = mWriter_Stream;
+ mdbYarn yarn; // to ref content inside atom
+ char buf[64]; // buffer for staging the dict alias hex ID
+ char* idBuf = buf + 1; // where the id always starts
+ buf[0] = '('; // we always start with open paren
+
+ morkCell* end = cells + ioRow->mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end && ev->Good()) {
+ morkAtom* atom = cells->GetAtom();
+ if (atom && atom->IsAtomDirty()) {
+ if (atom->IsBook()) // is it possible to write atom ID?
+ {
+ if (!this->DidStartDict()) {
+ this->StartDict(ev);
+ if (ev->Bad()) break;
+ }
+ atom->SetAtomClean(); // neutralize change
+
+ this->IndentAsNeeded(ev, morkWriter_kDictAliasDepth);
+ morkBookAtom* ba = (morkBookAtom*)atom;
+ mork_size size = ev->TokenAsHex(idBuf, ba->mBookAtom_Id);
+ mork_size bytesWritten;
+ stream->Write(ev->AsMdbEnv(), buf, size + 1, &bytesWritten); // '('
+ mWriter_LineSize += bytesWritten;
+
+ if (morkAtom::AliasYarn(atom, &yarn)) {
+ mork_scope atomScope = atom->GetBookAtomSpaceScope(ev);
+ if (atomScope && atomScope != mWriter_DictAtomScope)
+ this->ChangeDictAtomScope(ev, atomScope);
+
+ if (mWriter_DidStartDict && yarn.mYarn_Form != mWriter_DictForm)
+ this->ChangeDictForm(ev, yarn.mYarn_Form);
+
+ mork_size pending =
+ yarn.mYarn_Fill + morkWriter_kYarnEscapeSlop + 1;
+ this->IndentOverMaxLine(ev, pending,
+ morkWriter_kDictAliasValueDepth);
+
+ stream->Putc(ev, '='); // start value
+ ++mWriter_LineSize;
+
+ this->WriteYarn(ev, &yarn);
+
+ stream->Putc(ev, ')'); // end value
+ ++mWriter_LineSize;
+ } else
+ atom->BadAtomKindError(ev);
+
+ ++mWriter_DoneCount;
+ }
+ }
+ }
+ }
+ return ev->Good();
+}
+
+mork_bool morkWriter::IsYarnAllValue(const mdbYarn* inYarn) {
+ mork_fill fill = inYarn->mYarn_Fill;
+ const mork_u1* buf = (const mork_u1*)inYarn->mYarn_Buf;
+ const mork_u1* end = buf + fill;
+ --buf; // prepare for preincrement
+ while (++buf < end) {
+ mork_ch c = *buf;
+ if (!morkCh_IsValue(c)) return morkBool_kFalse;
+ }
+ return morkBool_kTrue;
+}
+
+mork_bool morkWriter::PutVerboseCell(morkEnv* ev, morkCell* ioCell,
+ mork_bool inWithVal) {
+ morkStream* stream = mWriter_Stream;
+ morkStore* store = mWriter_Store;
+
+ mdbYarn* colYarn = &mWriter_ColYarn;
+
+ morkAtom* atom = (inWithVal) ? ioCell->GetAtom() : (morkAtom*)0;
+
+ mork_column col = ioCell->GetColumn();
+ store->TokenToString(ev, col, colYarn);
+
+ mdbYarn yarn; // to ref content inside atom
+ morkAtom::AliasYarn(atom, &yarn); // works even when atom==nil
+
+ if (yarn.mYarn_Form != mWriter_RowForm)
+ this->ChangeRowForm(ev, yarn.mYarn_Form);
+
+ mork_size pending =
+ yarn.mYarn_Fill + colYarn->mYarn_Fill + morkWriter_kYarnEscapeSlop + 3;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kRowCellDepth);
+
+ stream->Putc(ev, '('); // start cell
+ ++mWriter_LineSize;
+
+ this->WriteYarn(ev, colYarn); // column
+
+ pending = yarn.mYarn_Fill + morkWriter_kYarnEscapeSlop;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kRowCellValueDepth);
+ stream->Putc(ev, '=');
+ ++mWriter_LineSize;
+
+ this->WriteYarn(ev, &yarn); // value
+
+ stream->Putc(ev, ')'); // end cell
+ ++mWriter_LineSize;
+
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutVerboseRowCells(morkEnv* ev, morkRow* ioRow) {
+ morkCell* cells = ioRow->mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + ioRow->mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end && ev->Good()) {
+ // note we prefer to avoid writing cells here with no value:
+ if (cells->GetAtom()) // does cell have any value?
+ this->PutVerboseCell(ev, cells, /*inWithVal*/ morkBool_kTrue);
+ }
+ }
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutCell(morkEnv* ev, morkCell* ioCell,
+ mork_bool inWithVal) {
+ morkStream* stream = mWriter_Stream;
+ char buf[128]; // buffer for staging hex ids
+ char* idBuf = buf + 2; // where the id always starts
+ buf[0] = '('; // we always start with open paren
+ buf[1] = '^'; // column is always a hex ID
+
+ mork_size colSize = 0; // the size of col hex ID
+ mork_size bytesWritten;
+
+ morkAtom* atom = (inWithVal) ? ioCell->GetAtom() : (morkAtom*)0;
+
+ mork_column col = ioCell->GetColumn();
+ char* p = idBuf;
+ colSize = ev->TokenAsHex(p, col);
+ p += colSize;
+
+ mdbYarn yarn; // to ref content inside atom
+ morkAtom::AliasYarn(atom, &yarn); // works even when atom==nil
+
+ if (yarn.mYarn_Form != mWriter_RowForm)
+ this->ChangeRowForm(ev, yarn.mYarn_Form);
+
+ if (atom && atom->IsBook()) // is it possible to write atom ID?
+ {
+ this->IndentAsNeeded(ev, morkWriter_kRowCellDepth);
+ *p++ = '^';
+ morkBookAtom* ba = (morkBookAtom*)atom;
+
+ mork_size valSize = ev->TokenAsHex(p, ba->mBookAtom_Id);
+ mork_fill yarnFill = yarn.mYarn_Fill;
+ mork_bool putImmYarn = (yarnFill <= valSize);
+ if (putImmYarn) putImmYarn = this->IsYarnAllValue(&yarn);
+
+ if (putImmYarn) // value no bigger than id?
+ {
+ p[-1] = '='; // go back and clobber '^' with '=' instead
+ if (yarnFill) {
+ MORK_MEMCPY(p, yarn.mYarn_Buf, yarnFill);
+ p += yarnFill;
+ }
+ *p++ = ')';
+ mork_size distance = (mork_size)(p - buf);
+ stream->Write(ev->AsMdbEnv(), buf, distance, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+ } else {
+ p += valSize;
+ *p = ')';
+ stream->Write(ev->AsMdbEnv(), buf, colSize + valSize + 4, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+ }
+
+ if (atom->IsAtomDirty()) {
+ atom->SetAtomClean();
+ ++mWriter_DoneCount;
+ }
+ } else // must write an anonymous atom
+ {
+ mork_size pending =
+ yarn.mYarn_Fill + colSize + morkWriter_kYarnEscapeSlop + 2;
+ this->IndentOverMaxLine(ev, pending, morkWriter_kRowCellDepth);
+
+ mork_size bytesWritten;
+ stream->Write(ev->AsMdbEnv(), buf, colSize + 2, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ pending -= (colSize + 2);
+ this->IndentOverMaxLine(ev, pending, morkWriter_kRowCellDepth);
+ stream->Putc(ev, '=');
+ ++mWriter_LineSize;
+
+ this->WriteYarn(ev, &yarn);
+ stream->Putc(ev, ')'); // end cell
+ ++mWriter_LineSize;
+ }
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutRowCells(morkEnv* ev, morkRow* ioRow) {
+ morkCell* cells = ioRow->mRow_Cells;
+ if (cells) {
+ morkCell* end = cells + ioRow->mRow_Length;
+ --cells; // prepare for preincrement:
+ while (++cells < end && ev->Good()) {
+ // note we prefer to avoid writing cells here with no value:
+ if (cells->GetAtom()) // does cell have any value?
+ this->PutCell(ev, cells, /*inWithVal*/ morkBool_kTrue);
+ }
+ }
+ return ev->Good();
+}
+
+mork_bool morkWriter::PutRow(morkEnv* ev, morkRow* ioRow) {
+ if (ioRow && ioRow->IsRow()) {
+ mWriter_RowForm = mWriter_TableForm;
+
+ mork_size bytesWritten;
+ morkStream* stream = mWriter_Stream;
+ char buf[128 + 16]; // buffer for staging hex
+ char* p = buf;
+ mdbOid* roid = &ioRow->mRow_Oid;
+ mork_size ridSize = 0;
+
+ mork_scope tableScope = mWriter_TableRowScope;
+
+ if (ioRow->IsRowDirty()) {
+ if (mWriter_SuppressDirtyRowNewline || !mWriter_LineSize)
+ mWriter_SuppressDirtyRowNewline = morkBool_kFalse;
+ else {
+ if (tableScope) // in a table?
+ mWriter_LineSize = stream->PutIndent(ev, morkWriter_kRowDepth);
+ else
+ mWriter_LineSize = stream->PutIndent(ev, 0); // no indent
+ }
+
+ // mork_rid rid = roid->mOid_Id;
+ *p++ = '['; // start row punct=1
+ mork_size punctSize =
+ (mWriter_BeVerbose) ? 9 : 1; // counting "[ /*r=*/ "
+
+ mork_bool rowRewrite = ioRow->IsRowRewrite();
+
+ if (rowRewrite && mWriter_Incremental) {
+ *p++ = '-';
+ ++punctSize; // counting '-'
+ ++mWriter_LineSize;
+ }
+
+ if (tableScope && roid->mOid_Scope == tableScope)
+ ridSize = ev->TokenAsHex(p, roid->mOid_Id);
+ else
+ ridSize = ev->OidAsHex(p, *roid);
+
+ p += ridSize;
+
+ if (mWriter_BeVerbose) {
+ *p++ = ' '; // punct=2
+ *p++ = '/'; // punct=3
+ *p++ = '*'; // punct=4
+ *p++ = 'r'; // punct=5
+ *p++ = '='; // punct=6
+
+ mork_size usesSize = ev->TokenAsHex(p, (mork_token)ioRow->mRow_GcUses);
+ punctSize += usesSize;
+ p += usesSize;
+
+ *p++ = '*'; // punct=7
+ *p++ = '/'; // punct=8
+ *p++ = ' '; // punct=9
+ }
+ stream->Write(ev->AsMdbEnv(), buf, ridSize + punctSize, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+
+ // special case situation where row puts exactly one column:
+ if (!rowRewrite && mWriter_Incremental && ioRow->HasRowDelta()) {
+ mork_column col = ioRow->GetDeltaColumn();
+ morkCell dummy(col, morkChange_kNil, (morkAtom*)0);
+ morkCell* cell = 0;
+
+ mork_bool withVal = (ioRow->GetDeltaChange() != morkChange_kCut);
+
+ if (withVal) {
+ mork_pos cellPos = 0; // dummy pos
+ cell = ioRow->GetCell(ev, col, &cellPos);
+ }
+ if (!cell) cell = &dummy;
+
+ if (mWriter_BeVerbose)
+ this->PutVerboseCell(ev, cell, withVal);
+ else
+ this->PutCell(ev, cell, withVal);
+ } else // put entire row?
+ {
+ if (mWriter_BeVerbose)
+ this->PutVerboseRowCells(ev, ioRow); // write all, verbosely
+ else
+ this->PutRowCells(ev, ioRow); // write all, hex notation
+ }
+
+ stream->Putc(ev, ']'); // end row
+ ++mWriter_LineSize;
+ } else {
+ this->IndentAsNeeded(ev, morkWriter_kRowDepth);
+
+ if (tableScope && roid->mOid_Scope == tableScope)
+ ridSize = ev->TokenAsHex(p, roid->mOid_Id);
+ else
+ ridSize = ev->OidAsHex(p, *roid);
+
+ stream->Write(ev->AsMdbEnv(), buf, ridSize, &bytesWritten);
+ mWriter_LineSize += bytesWritten;
+ stream->Putc(ev, ' ');
+ ++mWriter_LineSize;
+ }
+
+ ++mWriter_DoneCount;
+
+ ioRow->SetRowClean(); // try to do this at the very last
+ } else
+ ioRow->NonRowTypeWarning(ev);
+
+ return ev->Good();
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkWriter.h b/comm/mailnews/db/mork/morkWriter.h
new file mode 100644
index 0000000000..7e716bec6a
--- /dev/null
+++ b/comm/mailnews/db/mork/morkWriter.h
@@ -0,0 +1,340 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKWRITER_
+#define _MORKWRITER_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKMAP_
+# include "morkMap.h"
+#endif
+
+#ifndef _MORKROWMAP_
+# include "morkRowMap.h"
+#endif
+
+#ifndef _MORKTABLE_
+# include "morkTable.h"
+#endif
+
+#ifndef _MORKATOMMAP_
+# include "morkAtomMap.h"
+#endif
+
+#ifndef _MORKATOMSPACE_
+# include "morkAtomSpace.h"
+#endif
+
+#ifndef _MORKROWSPACE_
+# include "morkRowSpace.h"
+#endif
+
+#ifndef _MORKSTREAM_
+# include "morkStream.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/* buffer size for stream */
+#define morkWriter_kStreamBufSize /*i*/ (16 * 1024)
+
+#define morkDerived_kWriter /*i*/ 0x5772 /* ascii 'Wr' */
+
+#define morkWriter_kPhaseNothingDone 0 /* nothing has yet been done */
+#define morkWriter_kPhaseDirtyAllDone 1 /* DirtyAll() is done */
+#define morkWriter_kPhasePutHeaderDone 2 /* PutHeader() is done */
+
+#define morkWriter_kPhaseRenumberAllDone 3 /* RenumberAll() is done */
+
+#define morkWriter_kPhaseStoreAtomSpaces 4 /*mWriter_StoreAtomSpacesIter*/
+#define morkWriter_kPhaseAtomSpaceAtomAids 5 /*mWriter_AtomSpaceAtomAidsIter*/
+
+#define morkWriter_kPhaseStoreRowSpacesTables 6 /*mWriter_StoreRowSpacesIter*/
+#define morkWriter_kPhaseRowSpaceTables 7 /*mWriter_RowSpaceTablesIter*/
+#define morkWriter_kPhaseTableRowArray 8 /*mWriter_TableRowArrayPos */
+
+#define morkWriter_kPhaseStoreRowSpacesRows 9 /*mWriter_StoreRowSpacesIter*/
+#define morkWriter_kPhaseRowSpaceRows 10 /*mWriter_RowSpaceRowsIter*/
+
+#define morkWriter_kPhaseContentDone 11 /* all content written */
+#define morkWriter_kPhaseWritingDone 12 /* everything has been done */
+
+#define morkWriter_kCountNumberOfPhases 13 /* part of mWrite_TotalCount */
+
+#define morkWriter_kMaxColumnNameSize 128 /* longest writable col name */
+
+#define morkWriter_kMaxIndent 66 /* default value for mWriter_MaxIndent */
+#define morkWriter_kMaxLine 78 /* default value for mWriter_MaxLine */
+
+#define morkWriter_kYarnEscapeSlop 4 /* guess average yarn escape overhead */
+
+#define morkWriter_kTableMetaCellDepth 4 /* */
+#define morkWriter_kTableMetaCellValueDepth 6 /* */
+
+#define morkWriter_kDictMetaCellDepth 4 /* */
+#define morkWriter_kDictMetaCellValueDepth 6 /* */
+
+#define morkWriter_kDictAliasDepth 2 /* */
+#define morkWriter_kDictAliasValueDepth 4 /* */
+
+#define morkWriter_kRowDepth 2 /* */
+#define morkWriter_kRowCellDepth 4 /* */
+#define morkWriter_kRowCellValueDepth 6 /* */
+
+#define morkWriter_kGroupBufSize 64 /* */
+
+// v=1.1 retired on 23-Mar-99 (for metainfo one char column names)
+// v=1.2 retired on 20-Apr-99 (for ":c" suffix on table kind hex refs)
+// v=1.3 retired on 20-Apr-99 (for 1CE:m instead of ill-formed 1CE:^6D)
+#define morkWriter_kFileHeader "// <!-- <mdb:mork:z v=\"1.4\"/> -->"
+
+class morkWriter : public morkNode { // row iterator
+
+ // public: // slots inherited from morkObject (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ morkStore* mWriter_Store; // weak ref to committing store
+ nsIMdbFile* mWriter_File; // strong ref to store's file
+ nsIMdbFile* mWriter_Bud; // strong ref to bud of mWriter_File
+ morkStream* mWriter_Stream; // strong ref to stream on bud file
+ nsIMdbHeap* mWriter_SlotHeap; // strong ref to slot heap
+
+ // GroupIdentity should be based on mStore_CommitGroupIdentity:
+ mork_gid mWriter_CommitGroupIdentity; // transaction ID number
+
+ // GroupBuf holds a hex version of mWriter_CommitGroupIdentity:
+ char mWriter_GroupBuf[morkWriter_kGroupBufSize];
+ mork_fill mWriter_GroupBufFill; // actual bytes in GroupBuf
+
+ mork_count mWriter_TotalCount; // count of all things to be written
+ mork_count mWriter_DoneCount; // count of things already written
+
+ mork_size mWriter_LineSize; // length of current line being written
+ mork_size mWriter_MaxIndent; // line size forcing a line break
+ mork_size mWriter_MaxLine; // line size forcing a value continuation
+
+ mork_cscode mWriter_TableForm; // current charset metainfo
+ mork_scope mWriter_TableAtomScope; // current atom scope
+ mork_scope mWriter_TableRowScope; // current row scope
+ mork_kind mWriter_TableKind; // current table kind
+
+ mork_cscode mWriter_RowForm; // current charset metainfo
+ mork_scope mWriter_RowAtomScope; // current atom scope
+ mork_scope mWriter_RowScope; // current row scope
+
+ mork_cscode mWriter_DictForm; // current charset metainfo
+ mork_scope mWriter_DictAtomScope; // current atom scope
+
+ mork_bool mWriter_NeedDirtyAll; // need to call DirtyAll()
+ mork_bool mWriter_Incremental; // opposite of mWriter_NeedDirtyAll
+ mork_bool mWriter_DidStartDict; // true when a dict has been started
+ mork_bool mWriter_DidEndDict; // true when a dict has been ended
+
+ mork_bool mWriter_SuppressDirtyRowNewline; // for table meta rows
+ mork_bool mWriter_DidStartGroup; // true when a group has been started
+ mork_bool mWriter_DidEndGroup; // true when a group has been ended
+ mork_u1 mWriter_Phase; // status of writing process
+
+ mork_bool mWriter_BeVerbose; // driven by env and table verbose settings:
+ // mWriter_BeVerbose equals ( ev->mEnv_BeVerbose || table->IsTableVerbose() )
+
+ mork_u1 mWriter_Pad[3]; // for u4 alignment
+
+ mork_pos mWriter_TableRowArrayPos; // index into mTable_RowArray
+
+ char mWriter_SafeNameBuf[(morkWriter_kMaxColumnNameSize * 2) + 4];
+ // Note: extra four bytes in ColNameBuf means we can always append to yarn
+
+ char mWriter_ColNameBuf[morkWriter_kMaxColumnNameSize + 4];
+ // Note: extra four bytes in ColNameBuf means we can always append to yarn
+
+ mdbYarn mWriter_ColYarn; // a yarn to describe space in ColNameBuf:
+ // mYarn_Buf == mWriter_ColNameBuf, mYarn_Size ==
+ // morkWriter_kMaxColumnNameSize
+
+ mdbYarn mWriter_SafeYarn; // a yarn to describe space in ColNameBuf:
+ // mYarn_Buf == mWriter_SafeNameBuf, mYarn_Size == (kMaxColumnNameSize * 2)
+
+ morkAtomSpaceMapIter mWriter_StoreAtomSpacesIter; // for mStore_AtomSpaces
+ morkAtomAidMapIter mWriter_AtomSpaceAtomAidsIter; // for AtomSpace_AtomAids
+
+ morkRowSpaceMapIter mWriter_StoreRowSpacesIter; // for mStore_RowSpaces
+ morkTableMapIter mWriter_RowSpaceTablesIter; // for mRowSpace_Tables
+
+#ifdef MORK_ENABLE_PROBE_MAPS
+ morkRowProbeMapIter mWriter_RowSpaceRowsIter; // for mRowSpace_Rows
+#else /*MORK_ENABLE_PROBE_MAPS*/
+ morkRowMapIter mWriter_RowSpaceRowsIter; // for mRowSpace_Rows
+#endif /*MORK_ENABLE_PROBE_MAPS*/
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseWriter()
+ virtual ~morkWriter(); // assert that close executed earlier
+
+ public: // morkWriter construction & destruction
+ morkWriter(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap,
+ morkStore* ioStore, nsIMdbFile* ioFile, nsIMdbHeap* ioSlotHeap);
+ void CloseWriter(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkWriter(const morkWriter& other);
+ morkWriter& operator=(const morkWriter& other);
+
+ public: // dynamic type identification
+ mork_bool IsWriter() const {
+ return IsNode() && mNode_Derived == morkDerived_kWriter;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing & errors
+ static void NonWriterTypeError(morkEnv* ev);
+ static void NilWriterStoreError(morkEnv* ev);
+ static void NilWriterBudError(morkEnv* ev);
+ static void NilWriterStreamError(morkEnv* ev);
+ static void NilWriterFileError(morkEnv* ev);
+ static void UnsupportedPhaseError(morkEnv* ev);
+
+ public: // utitlities
+ void ChangeRowForm(morkEnv* ev, mork_cscode inNewForm);
+ void ChangeDictForm(morkEnv* ev, mork_cscode inNewForm);
+ void ChangeDictAtomScope(morkEnv* ev, mork_scope inScope);
+
+ public: // inlines
+ mork_bool DidStartDict() const { return mWriter_DidStartDict; }
+ mork_bool DidEndDict() const { return mWriter_DidEndDict; }
+
+ void IndentAsNeeded(morkEnv* ev, mork_size inDepth) {
+ if (mWriter_LineSize > mWriter_MaxIndent)
+ mWriter_LineSize = mWriter_Stream->PutIndent(ev, inDepth);
+ }
+
+ void IndentOverMaxLine(morkEnv* ev, mork_size inPendingSize,
+ mork_size inDepth) {
+ if (mWriter_LineSize + inPendingSize > mWriter_MaxLine)
+ mWriter_LineSize = mWriter_Stream->PutIndent(ev, inDepth);
+ }
+
+ public: // delayed construction
+ void MakeWriterStream(morkEnv* ev); // give writer a suitable stream
+
+ public: // iterative/asynchronous writing
+ mork_bool WriteMore(morkEnv* ev); // call until IsWritingDone() is true
+
+ mork_bool IsWritingDone() const // don't call WriteMore() any longer?
+ {
+ return mWriter_Phase == morkWriter_kPhaseWritingDone;
+ }
+
+ public: // marking all content dirty
+ mork_bool DirtyAll(morkEnv* ev);
+ // DirtyAll() visits every store sub-object and marks
+ // them dirty, including every table, row, cell, and atom. The return
+ // equals ev->Good(), to show whether any error happened. This method is
+ // intended for use in the beginning of a "compress commit" which writes
+ // all store content, whether dirty or not. We dirty everything first so
+ // that later iterations over content can mark things clean as they are
+ // written, and organize the process of serialization so that objects are
+ // written only at need (because of being dirty). Note the method can
+ // stop early when any error happens, since this will abort any commit.
+
+ public: // group commit transactions
+ mork_bool StartGroup(morkEnv* ev);
+ mork_bool CommitGroup(morkEnv* ev);
+ mork_bool AbortGroup(morkEnv* ev);
+
+ public: // phase methods
+ mork_bool OnNothingDone(morkEnv* ev);
+ mork_bool OnDirtyAllDone(morkEnv* ev);
+ mork_bool OnPutHeaderDone(morkEnv* ev);
+
+ mork_bool OnRenumberAllDone(morkEnv* ev);
+
+ mork_bool OnStoreAtomSpaces(morkEnv* ev);
+ mork_bool OnAtomSpaceAtomAids(morkEnv* ev);
+
+ mork_bool OnStoreRowSpacesTables(morkEnv* ev);
+ mork_bool OnRowSpaceTables(morkEnv* ev);
+ mork_bool OnTableRowArray(morkEnv* ev);
+
+ mork_bool OnStoreRowSpacesRows(morkEnv* ev);
+ mork_bool OnRowSpaceRows(morkEnv* ev);
+
+ mork_bool OnContentDone(morkEnv* ev);
+ mork_bool OnWritingDone(morkEnv* ev);
+
+ public: // writing dict items first pass
+ mork_bool PutTableDict(morkEnv* ev, morkTable* ioTable);
+ mork_bool PutRowDict(morkEnv* ev, morkRow* ioRow);
+
+ public: // writing node content second pass
+ mork_bool PutTable(morkEnv* ev, morkTable* ioTable);
+ mork_bool PutRow(morkEnv* ev, morkRow* ioRow);
+ mork_bool PutRowCells(morkEnv* ev, morkRow* ioRow);
+ mork_bool PutVerboseRowCells(morkEnv* ev, morkRow* ioRow);
+
+ mork_bool PutCell(morkEnv* ev, morkCell* ioCell, mork_bool inWithVal);
+ mork_bool PutVerboseCell(morkEnv* ev, morkCell* ioCell, mork_bool inWithVal);
+
+ mork_bool PutTableChange(morkEnv* ev, const morkTableChange* inChange);
+
+ public: // other writer methods
+ mork_bool IsYarnAllValue(const mdbYarn* inYarn);
+
+ mork_size WriteYarn(morkEnv* ev, const mdbYarn* inYarn);
+ // return number of atom bytes written on the current line (which
+ // implies that escaped line breaks will make the size value smaller
+ // than the entire yarn's size, since only part goes on a last line).
+
+ mork_size WriteAtom(morkEnv* ev, const morkAtom* inAtom);
+ // return number of atom bytes written on the current line (which
+ // implies that escaped line breaks will make the size value smaller
+ // than the entire atom's size, since only part goes on a last line).
+
+ void WriteAllStoreTables(morkEnv* ev);
+ void WriteAtomSpaceAsDict(morkEnv* ev, morkAtomSpace* ioSpace);
+
+ void WriteTokenToTokenMetaCell(morkEnv* ev, mork_token inCol,
+ mork_token inValue);
+ void WriteStringToTokenDictCell(morkEnv* ev, const char* inCol,
+ mork_token inValue);
+ // Note inCol should begin with '(' and end with '=', with col in between.
+
+ void StartDict(morkEnv* ev);
+ void EndDict(morkEnv* ev);
+
+ void StartTable(morkEnv* ev, morkTable* ioTable);
+ void EndTable(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakWriter(morkWriter* me, morkEnv* ev, morkWriter** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongWriter(morkWriter* me, morkEnv* ev,
+ morkWriter** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKTABLEROWCURSOR_ */
diff --git a/comm/mailnews/db/mork/morkYarn.cpp b/comm/mailnews/db/mork/morkYarn.cpp
new file mode 100644
index 0000000000..013b36c754
--- /dev/null
+++ b/comm/mailnews/db/mork/morkYarn.cpp
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#ifndef _MORKYARN_
+# include "morkYarn.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// ````` ````` ````` ````` `````
+// { ===== begin morkNode interface =====
+
+/*public virtual*/ void morkYarn::CloseMorkNode(
+ morkEnv* ev) /*i*/ // CloseYarn() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseYarn(ev);
+ this->MarkShut();
+ }
+}
+
+/*public virtual*/
+morkYarn::~morkYarn() /*i*/ // assert CloseYarn() executed earlier
+{
+ MORK_ASSERT(mYarn_Body.mYarn_Buf == 0);
+}
+
+/*public non-poly*/
+morkYarn::morkYarn(morkEnv* ev, /*i*/
+ const morkUsage& inUsage, nsIMdbHeap* ioHeap)
+ : morkNode(ev, inUsage, ioHeap) {
+ if (ev->Good()) mNode_Derived = morkDerived_kYarn;
+}
+
+/*public non-poly*/ void morkYarn::CloseYarn(
+ morkEnv* ev) /*i*/ // called by CloseMorkNode();
+{
+ if (this->IsNode())
+ this->MarkShut();
+ else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+// ````` ````` ````` ````` `````
+
+/*static*/ void morkYarn::NonYarnTypeError(morkEnv* ev) {
+ ev->NewError("non morkYarn");
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/morkYarn.h b/comm/mailnews/db/mork/morkYarn.h
new file mode 100644
index 0000000000..e4cca8a843
--- /dev/null
+++ b/comm/mailnews/db/mork/morkYarn.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKYARN_
+#define _MORKYARN_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkDerived_kYarn /*i*/ 0x7952 /* ascii 'yR' */
+
+/*| morkYarn: a reference counted nsIMdbYarn C struct. This is for use in those
+**| few cases where single instances of reference counted buffers are needed
+**| in Mork, and we expect few enough instances that overhead is not a factor
+**| in decided whether to use such a thing.
+|*/
+class morkYarn : public morkNode { // refcounted yarn
+
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ mdbYarn mYarn_Body;
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseYarn() only if open
+ virtual ~morkYarn(); // assert that CloseYarn() executed earlier
+
+ public: // morkYarn construction & destruction
+ morkYarn(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioHeap);
+ void CloseYarn(morkEnv* ev); // called by CloseMorkNode();
+
+ private: // copying is not allowed
+ morkYarn(const morkYarn& other);
+ morkYarn& operator=(const morkYarn& other);
+
+ public: // dynamic type identification
+ mork_bool IsYarn() const {
+ return IsNode() && mNode_Derived == morkDerived_kYarn;
+ }
+ // } ===== end morkNode methods =====
+
+ public: // typing
+ static void NonYarnTypeError(morkEnv* ev);
+
+ public: // typesafe refcounting inlines calling inherited morkNode methods
+ static void SlotWeakYarn(morkYarn* me, morkEnv* ev, morkYarn** ioSlot) {
+ morkNode::SlotWeakNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+
+ static void SlotStrongYarn(morkYarn* me, morkEnv* ev, morkYarn** ioSlot) {
+ morkNode::SlotStrongNode((morkNode*)me, ev, (morkNode**)ioSlot);
+ }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKYARN_ */
diff --git a/comm/mailnews/db/mork/morkZone.cpp b/comm/mailnews/db/mork/morkZone.cpp
new file mode 100644
index 0000000000..6ee3032f48
--- /dev/null
+++ b/comm/mailnews/db/mork/morkZone.cpp
@@ -0,0 +1,487 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKZONE_
+# include "morkZone.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// { ===== begin morkNode interface =====
+// public: // morkNode virtual methods
+void morkZone::CloseMorkNode(morkEnv* ev) // CloseZone() only if open
+{
+ if (this->IsOpenNode()) {
+ this->MarkClosing();
+ this->CloseZone(ev);
+ this->MarkShut();
+ }
+}
+
+morkZone::~morkZone() // assert that CloseZone() executed earlier
+{
+ MORK_ASSERT(this->IsShutNode());
+}
+
+// public: // morkMap construction & destruction
+morkZone::morkZone(morkEnv* ev, const morkUsage& inUsage,
+ nsIMdbHeap* ioNodeHeap, nsIMdbHeap* ioZoneHeap)
+ : morkNode(ev, inUsage, ioNodeHeap),
+ mZone_Heap(0),
+ mZone_HeapVolume(0),
+ mZone_BlockVolume(0),
+ mZone_RunVolume(0),
+ mZone_ChipVolume(0)
+
+ ,
+ mZone_FreeOldRunVolume(0)
+
+ ,
+ mZone_HunkCount(0),
+ mZone_FreeOldRunCount(0)
+
+ ,
+ mZone_HunkList(0),
+ mZone_FreeOldRunList(0)
+
+ ,
+ mZone_At(0),
+ mZone_AtSize(0)
+
+// morkRun* mZone_FreeRuns[ morkZone_kBuckets + 1 ];
+{
+ morkRun** runs = mZone_FreeRuns;
+ morkRun** end = runs + (morkZone_kBuckets + 1); // one past last slot
+ --runs; // prepare for preincrement
+ while (++runs < end) // another slot in array?
+ *runs = 0; // clear all the slots
+
+ if (ev->Good()) {
+ if (ioZoneHeap) {
+ nsIMdbHeap_SlotStrongHeap(ioZoneHeap, ev, &mZone_Heap);
+ if (ev->Good()) mNode_Derived = morkDerived_kZone;
+ } else
+ ev->NilPointerError();
+ }
+}
+
+void morkZone::CloseZone(morkEnv* ev) // called by CloseMorkNode()
+{
+ if (this->IsNode()) {
+ nsIMdbHeap* heap = mZone_Heap;
+ if (heap) {
+ morkHunk* hunk = 0;
+ nsIMdbEnv* mev = ev->AsMdbEnv();
+
+ morkHunk* next = mZone_HunkList;
+ while ((hunk = next) != 0) {
+#ifdef morkHunk_USE_TAG_SLOT
+ if (!hunk->HunkGoodTag()) hunk->BadHunkTagWarning(ev);
+#endif /* morkHunk_USE_TAG_SLOT */
+
+ next = hunk->HunkNext();
+ heap->Free(mev, hunk);
+ }
+ }
+ nsIMdbHeap_SlotStrongHeap((nsIMdbHeap*)0, ev, &mZone_Heap);
+ this->MarkShut();
+ } else
+ this->NonNodeError(ev);
+}
+
+// } ===== end morkNode methods =====
+
+/*static*/ void morkZone::NonZoneTypeError(morkEnv* ev) {
+ ev->NewError("non morkZone");
+}
+
+/*static*/ void morkZone::NilZoneHeapError(morkEnv* ev) {
+ ev->NewError("nil mZone_Heap");
+}
+
+/*static*/ void morkHunk::BadHunkTagWarning(morkEnv* ev) {
+ ev->NewWarning("bad mHunk_Tag");
+}
+
+/*static*/ void morkRun::BadRunTagError(morkEnv* ev) {
+ ev->NewError("bad mRun_Tag");
+}
+
+/*static*/ void morkRun::RunSizeAlignError(morkEnv* ev) {
+ ev->NewError("bad RunSize() alignment");
+}
+
+// { ===== begin morkZone methods =====
+
+mork_size morkZone::zone_grow_at(morkEnv* ev, mork_size inNeededSize) {
+ mZone_At = 0; // remove any ref to current hunk
+ mZone_AtSize = 0; // zero available bytes in current hunk
+
+ mork_size runSize = 0; // actual size of a particular run
+
+ // try to find a run in old run list with at least inNeededSize bytes:
+ morkRun* run = mZone_FreeOldRunList; // cursor in list scan
+ morkRun* prev = 0; // the node before run in the list scan
+
+ while (run) // another run in list to check?
+ {
+ morkOldRun* oldRun = (morkOldRun*)run;
+ mork_size oldSize = oldRun->OldSize();
+ if (oldSize >= inNeededSize) // found one big enough?
+ {
+ runSize = oldSize;
+ break; // end while loop early
+ }
+ prev = run; // remember last position in singly linked list
+ run = run->RunNext(); // advance cursor to next node in list
+ }
+ if (runSize && run) // found a usable old run?
+ {
+ morkRun* next = run->RunNext();
+ if (prev) // another node in free list precedes run?
+ prev->RunSetNext(next); // unlink run
+ else
+ mZone_FreeOldRunList = next; // unlink run from head of list
+
+ morkOldRun* oldRun = (morkOldRun*)run;
+ oldRun->OldSetSize(runSize);
+ mZone_At = (mork_u1*)run;
+ mZone_AtSize = runSize;
+
+#ifdef morkZone_CONFIG_DEBUG
+# ifdef morkZone_CONFIG_ALIGN_8
+ mork_ip lowThree = ((mork_ip)mZone_At) & 7;
+ if (lowThree) // not 8 byte aligned?
+# else /*morkZone_CONFIG_ALIGN_8*/
+ mork_ip lowTwo = ((mork_ip)mZone_At) & 3;
+ if (lowTwo) // not 4 byte aligned?
+# endif /*morkZone_CONFIG_ALIGN_8*/
+ ev->NewWarning("mZone_At not aligned");
+#endif /*morkZone_CONFIG_DEBUG*/
+ } else // need to allocate a brand new run
+ {
+ inNeededSize += 7; // allow for possible alignment padding
+ mork_size newSize = (inNeededSize > morkZone_kNewHunkSize)
+ ? inNeededSize
+ : morkZone_kNewHunkSize;
+
+ morkHunk* hunk = this->zone_new_hunk(ev, newSize);
+ if (hunk) {
+ morkRun* hunkRun = hunk->HunkRun();
+ mork_u1* at = (mork_u1*)hunkRun->RunAsBlock();
+ mork_ip lowBits = ((mork_ip)at) & 7;
+ if (lowBits) // not 8 byte aligned?
+ {
+ mork_ip skip = (8 - lowBits); // skip the complement to align
+ at += skip;
+ newSize -= skip;
+ }
+ mZone_At = at;
+ mZone_AtSize = newSize;
+ }
+ }
+
+ return mZone_AtSize;
+}
+
+morkHunk* morkZone::zone_new_hunk(morkEnv* ev, mdb_size inSize) // alloc
+{
+ mdb_size hunkSize = inSize + sizeof(morkHunk);
+ void* outBlock = 0; // we are going straight to the heap:
+ mZone_Heap->Alloc(ev->AsMdbEnv(), hunkSize, &outBlock);
+ if (outBlock) {
+#ifdef morkZone_CONFIG_VOL_STATS
+ mZone_HeapVolume += hunkSize; // track all heap allocations
+#endif /* morkZone_CONFIG_VOL_STATS */
+
+ morkHunk* hunk = (morkHunk*)outBlock;
+#ifdef morkHunk_USE_TAG_SLOT
+ hunk->HunkInitTag();
+#endif /* morkHunk_USE_TAG_SLOT */
+
+ hunk->HunkSetNext(mZone_HunkList);
+ mZone_HunkList = hunk;
+ ++mZone_HunkCount;
+
+ morkRun* run = hunk->HunkRun();
+ run->RunSetSize(inSize);
+#ifdef morkRun_USE_TAG_SLOT
+ run->RunInitTag();
+#endif /* morkRun_USE_TAG_SLOT */
+
+ return hunk;
+ }
+ if (ev->Good()) // got this far without any error reported yet?
+ ev->OutOfMemoryError();
+ return (morkHunk*)0;
+}
+
+void* morkZone::zone_new_chip(morkEnv* ev, mdb_size inSize) // alloc
+{
+#ifdef morkZone_CONFIG_VOL_STATS
+ mZone_BlockVolume += inSize; // sum sizes of both chips and runs
+#endif /* morkZone_CONFIG_VOL_STATS */
+
+ mork_u1* at = mZone_At;
+ mork_size atSize = mZone_AtSize; // available bytes in current hunk
+ if (atSize >= inSize) // current hunk can satisfy request?
+ {
+ mZone_At = at + inSize;
+ mZone_AtSize = atSize - inSize;
+ return at;
+ } else if (atSize > morkZone_kMaxHunkWaste) // over max waste allowed?
+ {
+ morkHunk* hunk = this->zone_new_hunk(ev, inSize);
+ if (hunk) return hunk->HunkRun();
+
+ return (void*)0; // show allocation has failed
+ } else // get ourselves a new hunk for suballocation:
+ {
+ atSize = this->zone_grow_at(ev, inSize); // get a new hunk
+ }
+
+ if (atSize >= inSize) // current hunk can satisfy request?
+ {
+ at = mZone_At;
+ mZone_At = at + inSize;
+ mZone_AtSize = atSize - inSize;
+ return at;
+ }
+
+ if (ev->Good()) // got this far without any error reported yet?
+ ev->OutOfMemoryError();
+
+ return (void*)0; // show allocation has failed
+}
+
+void* morkZone::ZoneNewChip(morkEnv* ev, mdb_size inSize) // alloc
+{
+#ifdef morkZone_CONFIG_ARENA
+
+# ifdef morkZone_CONFIG_DEBUG
+ if (!this->IsZone())
+ this->NonZoneTypeError(ev);
+ else if (!mZone_Heap)
+ this->NilZoneHeapError(ev);
+# endif /*morkZone_CONFIG_DEBUG*/
+
+# ifdef morkZone_CONFIG_ALIGN_8
+ inSize += 7;
+ inSize &= ~((mork_ip)7); // force to multiple of 8 bytes
+# else /*morkZone_CONFIG_ALIGN_8*/
+ inSize += 3;
+ inSize &= ~((mork_ip)3); // force to multiple of 4 bytes
+# endif /*morkZone_CONFIG_ALIGN_8*/
+
+# ifdef morkZone_CONFIG_VOL_STATS
+ mZone_ChipVolume += inSize; // sum sizes of chips only
+# endif /* morkZone_CONFIG_VOL_STATS */
+
+ return this->zone_new_chip(ev, inSize);
+
+#else /*morkZone_CONFIG_ARENA*/
+ void* outBlock = 0;
+ mZone_Heap->Alloc(ev->AsMdbEnv(), inSize, &outBlock);
+ return outBlock;
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+// public: // ...but runs do indeed know how big they are
+void* morkZone::ZoneNewRun(morkEnv* ev, mdb_size inSize) // alloc
+{
+#ifdef morkZone_CONFIG_ARENA
+
+# ifdef morkZone_CONFIG_DEBUG
+ if (!this->IsZone())
+ this->NonZoneTypeError(ev);
+ else if (!mZone_Heap)
+ this->NilZoneHeapError(ev);
+# endif /*morkZone_CONFIG_DEBUG*/
+
+ inSize += morkZone_kRoundAdd;
+ inSize &= morkZone_kRoundMask;
+ if (inSize <= morkZone_kMaxCachedRun) {
+ morkRun** bucket = mZone_FreeRuns + (inSize >> morkZone_kRoundBits);
+ morkRun* hit = *bucket;
+ if (hit) // cache hit?
+ {
+ *bucket = hit->RunNext();
+ hit->RunSetSize(inSize);
+ return hit->RunAsBlock();
+ }
+ }
+ mdb_size blockSize = inSize + sizeof(morkRun); // plus run overhead
+# ifdef morkZone_CONFIG_VOL_STATS
+ mZone_RunVolume += blockSize; // sum sizes of runs only
+# endif /* morkZone_CONFIG_VOL_STATS */
+ morkRun* run = (morkRun*)this->zone_new_chip(ev, blockSize);
+ if (run) {
+ run->RunSetSize(inSize);
+# ifdef morkRun_USE_TAG_SLOT
+ run->RunInitTag();
+# endif /* morkRun_USE_TAG_SLOT */
+ return run->RunAsBlock();
+ }
+
+ if (ev->Good()) // got this far without any error reported yet?
+ ev->OutOfMemoryError();
+
+ return (void*)0; // indicate failed allocation
+
+#else /*morkZone_CONFIG_ARENA*/
+ void* outBlock = 0;
+ mZone_Heap->Alloc(ev->AsMdbEnv(), inSize, &outBlock);
+ return outBlock;
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+void morkZone::ZoneZapRun(morkEnv* ev, void* ioRunBlock) // free
+{
+#ifdef morkZone_CONFIG_ARENA
+
+ morkRun* run = morkRun::BlockAsRun(ioRunBlock);
+ mdb_size runSize = run->RunSize();
+# ifdef morkZone_CONFIG_VOL_STATS
+ mZone_BlockVolume -= runSize; // tracking sizes of both chips and runs
+# endif /* morkZone_CONFIG_VOL_STATS */
+
+# ifdef morkZone_CONFIG_DEBUG
+ if (!this->IsZone())
+ this->NonZoneTypeError(ev);
+ else if (!mZone_Heap)
+ this->NilZoneHeapError(ev);
+ else if (!ioRunBlock)
+ ev->NilPointerError();
+ else if (runSize & morkZone_kRoundAdd)
+ run->RunSizeAlignError(ev);
+# ifdef morkRun_USE_TAG_SLOT
+ else if (!run->RunGoodTag())
+ run->BadRunTagError(ev);
+# endif /* morkRun_USE_TAG_SLOT */
+# endif /*morkZone_CONFIG_DEBUG*/
+
+ if (runSize <= morkZone_kMaxCachedRun) // goes into free run list?
+ {
+ morkRun** bucket = mZone_FreeRuns + (runSize >> morkZone_kRoundBits);
+ run->RunSetNext(*bucket); // push onto free run list
+ *bucket = run;
+ } else // free old run list
+ {
+ run->RunSetNext(mZone_FreeOldRunList); // push onto free old run list
+ mZone_FreeOldRunList = run;
+ ++mZone_FreeOldRunCount;
+# ifdef morkZone_CONFIG_VOL_STATS
+ mZone_FreeOldRunVolume += runSize;
+# endif /* morkZone_CONFIG_VOL_STATS */
+
+ morkOldRun* oldRun = (morkOldRun*)run; // to access extra size slot
+ oldRun->OldSetSize(runSize); // so we know how big this is later
+ }
+
+#else /*morkZone_CONFIG_ARENA*/
+ mZone_Heap->Free(ev->AsMdbEnv(), ioRunBlock);
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+void* morkZone::ZoneGrowRun(morkEnv* ev, void* ioRunBlock, mdb_size inSize) {
+#ifdef morkZone_CONFIG_ARENA
+
+ morkRun* run = morkRun::BlockAsRun(ioRunBlock);
+ mdb_size runSize = run->RunSize();
+
+# ifdef morkZone_CONFIG_DEBUG
+ if (!this->IsZone())
+ this->NonZoneTypeError(ev);
+ else if (!mZone_Heap)
+ this->NilZoneHeapError(ev);
+# endif /*morkZone_CONFIG_DEBUG*/
+
+# ifdef morkZone_CONFIG_ALIGN_8
+ inSize += 7;
+ inSize &= ~((mork_ip)7); // force to multiple of 8 bytes
+# else /*morkZone_CONFIG_ALIGN_8*/
+ inSize += 3;
+ inSize &= ~((mork_ip)3); // force to multiple of 4 bytes
+# endif /*morkZone_CONFIG_ALIGN_8*/
+
+ if (inSize > runSize) {
+ void* newBuf = this->ZoneNewRun(ev, inSize);
+ if (newBuf) {
+ MORK_MEMCPY(newBuf, ioRunBlock, runSize);
+ this->ZoneZapRun(ev, ioRunBlock);
+
+ return newBuf;
+ }
+ } else
+ return ioRunBlock; // old size is big enough
+
+ if (ev->Good()) // got this far without any error reported yet?
+ ev->OutOfMemoryError();
+
+ return (void*)0; // indicate failed allocation
+
+#else /*morkZone_CONFIG_ARENA*/
+ void* outBlock = 0;
+ mZone_Heap->Free(ev->AsMdbEnv(), ioRunBlock);
+ return outBlock;
+#endif /*morkZone_CONFIG_ARENA*/
+}
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+// { ===== begin nsIMdbHeap methods =====
+/*virtual*/ nsresult morkZone::Alloc(
+ nsIMdbEnv* mev, // allocate a piece of memory
+ mdb_size inSize, // requested size of new memory block
+ void** outBlock) // memory block of inSize bytes, or nil
+{
+ nsresult outErr = NS_OK;
+ void* block = 0;
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ block = this->ZoneNewRun(ev, inSize);
+ outErr = ev->AsErr();
+ } else
+ outErr = morkEnv_kOutOfMemoryError;
+
+ if (outBlock) *outBlock = block;
+
+ return outErr;
+}
+
+/*virtual*/ nsresult morkZone::Free(
+ nsIMdbEnv* mev, // free block allocated earlier by Alloc()
+ void* inBlock) {
+ nsresult outErr = NS_OK;
+ if (inBlock) {
+ morkEnv* ev = morkEnv::FromMdbEnv(mev);
+ if (ev) {
+ this->ZoneZapRun(ev, inBlock);
+ outErr = ev->AsErr();
+ } else
+ // XXX 1 is not a valid nsresult
+ outErr = static_cast<nsresult>(1);
+ }
+
+ return outErr;
+}
+
+// } ===== end nsIMdbHeap methods =====
diff --git a/comm/mailnews/db/mork/morkZone.h b/comm/mailnews/db/mork/morkZone.h
new file mode 100644
index 0000000000..5399bb9827
--- /dev/null
+++ b/comm/mailnews/db/mork/morkZone.h
@@ -0,0 +1,313 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MORKZONE_
+#define _MORKZONE_ 1
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _MORKNODE_
+# include "morkNode.h"
+#endif
+
+#ifndef _MORKDEQUE_
+# include "morkDeque.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| CONFIG_DEBUG: do paranoid debug checks if defined.
+|*/
+#ifdef MORK_DEBUG
+# define morkZone_CONFIG_DEBUG 1 /* debug paranoid if defined */
+#endif /*MORK_DEBUG*/
+
+/*| CONFIG_STATS: keep volume and usage statistics.
+|*/
+#define morkZone_CONFIG_VOL_STATS 1 /* count space used by zone instance */
+
+/*| CONFIG_ARENA: if this is defined, then the morkZone class will alloc big
+**| blocks from the zone's heap, and suballocate from these. If undefined,
+**| then morkZone will just pass all calls through to the zone's heap.
+|*/
+#ifdef MORK_ENABLE_ZONE_ARENAS
+# define morkZone_CONFIG_ARENA 1 /* be arena, if defined; otherwise no-op */
+#endif /*MORK_ENABLE_ZONE_ARENAS*/
+
+/*| CONFIG_ALIGN_8: if this is defined, then the morkZone class will give
+**| blocks 8 byte alignment instead of only 4 byte alignment.
+|*/
+#ifdef MORK_CONFIG_ALIGN_8
+# define morkZone_CONFIG_ALIGN_8 1 /* ifdef: align to 8 bytes, otherwise 4 */
+#endif /*MORK_CONFIG_ALIGN_8*/
+
+/*| CONFIG_PTR_SIZE_4: if this is defined, then the morkZone class will
+**| assume sizeof(void*) == 4, so a tag slot for padding is needed.
+|*/
+#ifdef MORK_CONFIG_PTR_SIZE_4
+# define morkZone_CONFIG_PTR_SIZE_4 1 /* ifdef: sizeof(void*) == 4 */
+#endif /*MORK_CONFIG_PTR_SIZE_4*/
+
+/*| morkZone_USE_TAG_SLOT: if this is defined, then define slot mRun_Tag
+**| in order to achieve eight byte alignment after the mRun_Next slot.
+|*/
+#if defined(morkZone_CONFIG_ALIGN_8) && defined(morkZone_CONFIG_PTR_SIZE_4)
+# define morkRun_USE_TAG_SLOT 1 /* need mRun_Tag slot inside morkRun */
+# define morkHunk_USE_TAG_SLOT 1 /* need mHunk_Tag slot inside morkHunk */
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkRun_kTag ((mork_u4)0x6D52754E) /* ascii 'mRuN' */
+
+/*| morkRun: structure used by morkZone for sized blocks
+|*/
+class morkRun {
+ protected: // member variable slots
+#ifdef morkRun_USE_TAG_SLOT
+ mork_u4 mRun_Tag; // force 8 byte alignment after mRun_Next
+#endif /* morkRun_USE_TAG_SLOT */
+
+ morkRun* mRun_Next;
+
+ public: // pointer interpretation of mRun_Next (when inside a list):
+ morkRun* RunNext() const { return mRun_Next; }
+ void RunSetNext(morkRun* ioNext) { mRun_Next = ioNext; }
+
+ public: // size interpretation of mRun_Next (when not inside a list):
+ mork_size RunSize() const { return (mork_size)((mork_ip)mRun_Next); }
+ void RunSetSize(mork_size inSize) { mRun_Next = (morkRun*)((mork_ip)inSize); }
+
+ public: // maintenance and testing of optional tag magic signature slot:
+#ifdef morkRun_USE_TAG_SLOT
+ void RunInitTag() { mRun_Tag = morkRun_kTag; }
+ mork_bool RunGoodTag() { return (mRun_Tag == morkRun_kTag); }
+#endif /* morkRun_USE_TAG_SLOT */
+
+ public: // conversion back and forth to inline block following run instance:
+ void* RunAsBlock() { return (((mork_u1*)this) + sizeof(morkRun)); }
+
+ static morkRun* BlockAsRun(void* ioBlock) {
+ return (morkRun*)(((mork_u1*)ioBlock) - sizeof(morkRun));
+ }
+
+ public: // typing & errors
+ static void BadRunTagError(morkEnv* ev);
+ static void RunSizeAlignError(morkEnv* ev);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| morkOldRun: more space to record size when run is put into old free list
+|*/
+class morkOldRun : public morkRun {
+ protected: // need another size field when mRun_Next is used for linkage:
+ mdb_size mOldRun_Size;
+
+ public: // size getter/setter
+ mork_size OldSize() const { return mOldRun_Size; }
+ void OldSetSize(mork_size inSize) { mOldRun_Size = inSize; }
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define morkHunk_kTag ((mork_u4)0x68556E4B) /* ascii 'hUnK' */
+
+/*| morkHunk: structure used by morkZone for heap allocations.
+|*/
+class morkHunk {
+ protected: // member variable slots
+#ifdef morkHunk_USE_TAG_SLOT
+ mork_u4 mHunk_Tag; // force 8 byte alignment after mHunk_Next
+#endif /* morkHunk_USE_TAG_SLOT */
+
+ morkHunk* mHunk_Next;
+
+ morkRun mHunk_Run;
+
+ public: // setters
+ void HunkSetNext(morkHunk* ioNext) { mHunk_Next = ioNext; }
+
+ public: // getters
+ morkHunk* HunkNext() const { return mHunk_Next; }
+
+ morkRun* HunkRun() { return &mHunk_Run; }
+
+ public: // maintenance and testing of optional tag magic signature slot:
+#ifdef morkHunk_USE_TAG_SLOT
+ void HunkInitTag() { mHunk_Tag = morkHunk_kTag; }
+ mork_bool HunkGoodTag() { return (mHunk_Tag == morkHunk_kTag); }
+#endif /* morkHunk_USE_TAG_SLOT */
+
+ public: // typing & errors
+ static void BadHunkTagWarning(morkEnv* ev);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+/*| kNewHunkSize: the default size for a hunk, assuming we must allocate
+**| a new one whenever the free hunk list does not already have. Note this
+**| number should not be changed without also considering suitable changes
+**| in the related kMaxHunkWaste and kMinHunkSize constants.
+|*/
+#define morkZone_kNewHunkSize ((mork_size)(64 * 1024)) /* 64K per hunk */
+
+/*| kMaxFreeVolume: some number of bytes of free space in the free hunk list
+**| over which we no longer want to add more free hunks to the list, for fear
+**| of accumulating too much unused, fragmented free space. This should be a
+**| small multiple of kNewHunkSize, say about two to four times as great, to
+**| allow for no more free hunk space than fits in a handful of new hunks.
+**| This strategy will let us usefully accumulate "some" free space in the
+**| free hunk list, but without accumulating "too much" free space that way.
+|*/
+#define morkZone_kMaxFreeVolume (morkZone_kNewHunkSize * 3)
+
+/*| kMaxHunkWaste: if a current request is larger than this, and we cannot
+**| satisfy the request with the current hunk, then we just allocate the
+**| block from the heap without changing the current hunk. Basically this
+**| number represents the largest amount of memory we are willing to waste,
+**| since a block request barely less than this can cause the current hunk
+**| to be retired (with any unused space wasted) as well get a new hunk.
+|*/
+#define morkZone_kMaxHunkWaste ((mork_size)4096) /* 1/16 kNewHunkSize */
+
+/*| kRound*: the algorithm for rounding up allocation sizes for caching
+**| in free lists works like the following. We add kRoundAdd to any size
+**| requested, and then bitwise AND with kRoundMask, and this will give us
+**| the smallest multiple of kRoundSize that is at least as large as the
+**| requested size. Then if we rightshift this number by kRoundBits, we
+**| will have the index into the mZone_FreeRuns array which will hold any
+**| cache runs of that size. So 4 bits of shift gives us a granularity
+**| of 16 bytes, so that free lists will hold successive runs that are
+**| 16 bytes greater than the next smaller run size. If we have 256 free
+**| lists of nonzero sized runs altogether, then the largest run that can
+**| be cached is 4096, or 4K (since 4096 == 16 * 256). A larger run that
+**| gets freed will go in to the free hunk list (or back to the heap).
+|*/
+#define morkZone_kRoundBits 4 /* bits to round-up size for free lists */
+#define morkZone_kRoundSize (1 << morkZone_kRoundBits)
+#define morkZone_kRoundAdd ((1 << morkZone_kRoundBits) - 1)
+#define morkZone_kRoundMask (~((mork_ip)morkZone_kRoundAdd))
+
+#define morkZone_kBuckets 256 /* number of distinct free lists */
+
+/*| kMaxCachedRun: the largest run that will be stored inside a free
+**| list of old zapped runs. A run larger than this cannot be put in
+**| a free list, and must be allocated from the heap at need, and put
+**| into the free hunk list when discarded.
+|*/
+#define morkZone_kMaxCachedRun (morkZone_kBuckets * morkZone_kRoundSize)
+
+#define morkDerived_kZone /*i*/ 0x5A6E /* ascii 'Zn' */
+
+/*| morkZone: a pooled memory allocator like an NSPR arena. The term 'zone'
+**| is roughly synonymous with 'heap'. I avoid calling this class a "heap"
+**| to avoid any confusion with nsIMdbHeap, and I avoid calling this class
+**| an arean to avoid confusion with NSPR usage.
+|*/
+class morkZone : public morkNode, public nsIMdbHeap {
+ // public: // slots inherited from morkNode (meant to inform only)
+ // nsIMdbHeap* mNode_Heap;
+
+ // mork_base mNode_Base; // must equal morkBase_kNode
+ // mork_derived mNode_Derived; // depends on specific node subclass
+
+ // mork_access mNode_Access; // kOpen, kClosing, kShut, or kDead
+ // mork_usage mNode_Usage; // kHeap, kStack, kMember, kGlobal, kNone
+ // mork_able mNode_Mutable; // can this node be modified?
+ // mork_load mNode_Load; // is this node clean or dirty?
+
+ // mork_uses mNode_Uses; // refcount for strong refs
+ // mork_refs mNode_Refs; // refcount for strong refs + weak refs
+
+ public: // state is public because the entire Mork system is private
+ nsIMdbHeap* mZone_Heap; // strong ref to heap allocating all space
+
+ mork_size mZone_HeapVolume; // total bytes allocated from heap
+ mork_size mZone_BlockVolume; // total bytes in all zone blocks
+ mork_size mZone_RunVolume; // total bytes in all zone runs
+ mork_size mZone_ChipVolume; // total bytes in all zone chips
+
+ mork_size mZone_FreeOldRunVolume; // total bytes in all used hunks
+
+ mork_count mZone_HunkCount; // total number of used hunks
+ mork_count mZone_FreeOldRunCount; // total free old runs
+
+ morkHunk* mZone_HunkList; // linked list of all used hunks
+ morkRun* mZone_FreeOldRunList; // linked list of free old runs
+
+ // note mZone_At is a byte pointer for single byte address arithmetic:
+ mork_u1* mZone_At; // current position in most recent hunk
+ mork_size mZone_AtSize; // number of bytes remaining in this hunk
+
+ // kBuckets+1 so indexes zero through kBuckets are all okay to use:
+
+ morkRun* mZone_FreeRuns[morkZone_kBuckets + 1];
+ // Each piece of memory stored in list mZone_FreeRuns[ i ] has an
+ // allocation size equal to sizeof(morkRun) + (i * kRoundSize), so
+ // that callers can be given a piece of memory with (i * kRoundSize)
+ // bytes of writeable space while reserving the first sizeof(morkRun)
+ // bytes to keep track of size information for later re-use. Note
+ // that mZone_FreeRuns[ 0 ] is unused because no run will be zero
+ // bytes in size (and morkZone plans to complain about zero sizes).
+
+ protected: // zone utilities
+ mork_size zone_grow_at(morkEnv* ev, mork_size inNeededSize);
+
+ void* zone_new_chip(morkEnv* ev, mdb_size inSize); // alloc
+ morkHunk* zone_new_hunk(morkEnv* ev, mdb_size inRunSize); // alloc
+
+ // { ===== begin nsIMdbHeap methods =====
+ public:
+ NS_IMETHOD Alloc(
+ nsIMdbEnv* ev, // allocate a piece of memory
+ mdb_size inSize, // requested size of new memory block
+ void** outBlock) override; // memory block of inSize bytes, or nil
+
+ NS_IMETHOD Free(nsIMdbEnv* ev, // free block allocated earlier by Alloc()
+ void* inBlock) override;
+
+ virtual size_t GetUsedSize() override { return mZone_Heap->GetUsedSize(); }
+ // } ===== end nsIMdbHeap methods =====
+
+ // { ===== begin morkNode interface =====
+ public: // morkNode virtual methods
+ virtual void CloseMorkNode(morkEnv* ev) override; // CloseZone() only if open
+ virtual ~morkZone(); // assert that CloseMap() executed earlier
+
+ public: // morkMap construction & destruction
+ morkZone(morkEnv* ev, const morkUsage& inUsage, nsIMdbHeap* ioNodeHeap,
+ nsIMdbHeap* ioZoneHeap);
+
+ void CloseZone(morkEnv* ev); // called by CloseMorkNode()
+
+ public: // dynamic type identification
+ mork_bool IsZone() const {
+ return IsNode() && mNode_Derived == morkDerived_kZone;
+ }
+ // } ===== end morkNode methods =====
+
+ // { ===== begin morkZone methods =====
+ public: // chips do not know how big they are...
+ void* ZoneNewChip(morkEnv* ev, mdb_size inSize); // alloc
+
+ public: // ...but runs do indeed know how big they are
+ void* ZoneNewRun(morkEnv* ev, mdb_size inSize); // alloc
+ void ZoneZapRun(morkEnv* ev, void* ioRunBody); // free
+ void* ZoneGrowRun(morkEnv* ev, void* ioRunBody, mdb_size inSize); // realloc
+
+ // } ===== end morkZone methods =====
+
+ public: // typing & errors
+ static void NonZoneTypeError(morkEnv* ev);
+ static void NilZoneHeapError(morkEnv* ev);
+ static void BadZoneTagError(morkEnv* ev);
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _MORKZONE_ */
diff --git a/comm/mailnews/db/mork/moz.build b/comm/mailnews/db/mork/moz.build
new file mode 100644
index 0000000000..4d97c3e562
--- /dev/null
+++ b/comm/mailnews/db/mork/moz.build
@@ -0,0 +1,68 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ "mdb.h",
+ "nsIMdbFactoryFactory.h",
+]
+
+SOURCES += [
+ "morkArray.cpp",
+ "morkAtom.cpp",
+ "morkAtomMap.cpp",
+ "morkAtomSpace.cpp",
+ "morkBead.cpp",
+ "morkBlob.cpp",
+ "morkBuilder.cpp",
+ "morkCell.cpp",
+ "morkCellObject.cpp",
+ "morkCh.cpp",
+ "morkConfig.cpp",
+ "morkCursor.cpp",
+ "morkDeque.cpp",
+ "morkEnv.cpp",
+ "morkFactory.cpp",
+ "morkFile.cpp",
+ "morkHandle.cpp",
+ "morkIntMap.cpp",
+ "morkMap.cpp",
+ "morkNode.cpp",
+ "morkNodeMap.cpp",
+ "morkObject.cpp",
+ "morkParser.cpp",
+ "morkPool.cpp",
+ "morkPortTableCursor.cpp",
+ "morkProbeMap.cpp",
+ "morkRow.cpp",
+ "morkRowCellCursor.cpp",
+ "morkRowMap.cpp",
+ "morkRowObject.cpp",
+ "morkRowSpace.cpp",
+ "morkSink.cpp",
+ "morkSpace.cpp",
+ "morkStore.cpp",
+ "morkStream.cpp",
+ "morkTable.cpp",
+ "morkTableRowCursor.cpp",
+ "morkThumb.cpp",
+ "morkWriter.cpp",
+ "morkYarn.cpp",
+ "morkZone.cpp",
+ "nsMorkFactory.cpp",
+ "orkinHeap.cpp",
+]
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ SOURCES += ["morkSearchRowCursor.cpp"]
+
+Library("mork")
+FINAL_LIBRARY = "mail"
+# clang-cl complains about this.
+if CONFIG["CC_TYPE"] == "clang-cl":
+ CXXFLAGS += ["-Wno-overloaded-virtual"]
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
diff --git a/comm/mailnews/db/mork/nsIMdbFactoryFactory.h b/comm/mailnews/db/mork/nsIMdbFactoryFactory.h
new file mode 100644
index 0000000000..06a362078d
--- /dev/null
+++ b/comm/mailnews/db/mork/nsIMdbFactoryFactory.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsIMdbFactoryFactory_h__
+#define nsIMdbFactoryFactory_h__
+
+#include "nsISupports.h"
+#include "nsIFactory.h"
+#include "nsIComponentManager.h"
+
+class nsIMdbFactory;
+
+// 2794D0B7-E740-47a4-91C0-3E4FCB95B806
+#define NS_IMDBFACTORYFACTORY_IID \
+ { \
+ 0x2794d0b7, 0xe740, 0x47a4, { \
+ 0x91, 0xc0, 0x3e, 0x4f, 0xcb, 0x95, 0xb8, 0x6 \
+ } \
+ }
+
+// because Mork doesn't support XPCOM, we have to wrap the mdb factory interface
+// with an interface that gives you an mdb factory.
+class nsIMdbFactoryService : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IMDBFACTORYFACTORY_IID)
+ NS_IMETHOD GetMdbFactory(nsIMdbFactory** aFactory) = 0;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIMdbFactoryService, NS_IMDBFACTORYFACTORY_IID)
+
+#endif
diff --git a/comm/mailnews/db/mork/nsMorkFactory.cpp b/comm/mailnews/db/mork/nsMorkFactory.cpp
new file mode 100644
index 0000000000..f1354699df
--- /dev/null
+++ b/comm/mailnews/db/mork/nsMorkFactory.cpp
@@ -0,0 +1,14 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsMorkFactory.h"
+
+NS_IMPL_ISUPPORTS(nsMorkFactoryService, nsIMdbFactoryService)
+
+NS_IMETHODIMP nsMorkFactoryService::GetMdbFactory(nsIMdbFactory** aFactory) {
+ if (!mMdbFactory) mMdbFactory = MakeMdbFactory();
+ NS_IF_ADDREF(*aFactory = mMdbFactory);
+ return *aFactory ? NS_OK : NS_ERROR_OUT_OF_MEMORY;
+}
diff --git a/comm/mailnews/db/mork/nsMorkFactory.h b/comm/mailnews/db/mork/nsMorkFactory.h
new file mode 100644
index 0000000000..582ffe3a83
--- /dev/null
+++ b/comm/mailnews/db/mork/nsMorkFactory.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsMorkFactory_h__
+#define nsMorkFactory_h__
+
+#include "mozilla/ModuleUtils.h"
+#include "nsCOMPtr.h"
+#include "nsIMdbFactoryFactory.h"
+#include "mdb.h"
+
+class nsMorkFactoryService final : public nsIMdbFactoryService {
+ public:
+ nsMorkFactoryService(){};
+ // nsISupports methods
+ NS_DECL_ISUPPORTS
+
+ NS_IMETHOD GetMdbFactory(nsIMdbFactory** aFactory) override;
+
+ protected:
+ ~nsMorkFactoryService() {}
+ nsCOMPtr<nsIMdbFactory> mMdbFactory;
+};
+
+#endif
diff --git a/comm/mailnews/db/mork/orkinHeap.cpp b/comm/mailnews/db/mork/orkinHeap.cpp
new file mode 100644
index 0000000000..0bd2545a7b
--- /dev/null
+++ b/comm/mailnews/db/mork/orkinHeap.cpp
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+#ifndef _ORKINHEAP_
+# include "orkinHeap.h"
+#endif
+
+#ifndef _MORKENV_
+# include "morkEnv.h"
+#endif
+
+#include "nsIMemoryReporter.h"
+
+#include <stdlib.h>
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+orkinHeap::orkinHeap() // does nothing
+ : mUsedSize(0) {}
+
+/*virtual*/
+orkinHeap::~orkinHeap() // does nothing
+{}
+
+MOZ_DEFINE_MALLOC_SIZE_OF_ON_ALLOC(MorkSizeOfOnAlloc)
+MOZ_DEFINE_MALLOC_SIZE_OF_ON_FREE(MorkSizeOfOnFree)
+
+// { ===== begin nsIMdbHeap methods =====
+/*virtual*/ nsresult orkinHeap::Alloc(
+ nsIMdbEnv* mev, // allocate a piece of memory
+ mdb_size inSize, // requested size of new memory block
+ void** outBlock) // memory block of inSize bytes, or nil
+{
+ MORK_USED_1(mev);
+ nsresult outErr = NS_OK;
+ void* block = malloc(inSize);
+ if (!block)
+ outErr = morkEnv_kOutOfMemoryError;
+ else
+ mUsedSize += MorkSizeOfOnAlloc(block);
+
+ MORK_ASSERT(outBlock);
+ if (outBlock) *outBlock = block;
+ return outErr;
+}
+
+/*virtual*/ nsresult orkinHeap::Free(
+ nsIMdbEnv* mev, // free block allocated earlier by Alloc()
+ void* inBlock) {
+ MORK_USED_1(mev);
+ MORK_ASSERT(inBlock);
+ if (inBlock) {
+ mUsedSize -= MorkSizeOfOnFree(inBlock);
+ free(inBlock);
+ }
+ return NS_OK;
+}
+
+size_t orkinHeap::GetUsedSize() { return mUsedSize; }
+// } ===== end nsIMdbHeap methods =====
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
diff --git a/comm/mailnews/db/mork/orkinHeap.h b/comm/mailnews/db/mork/orkinHeap.h
new file mode 100644
index 0000000000..f431d6fe82
--- /dev/null
+++ b/comm/mailnews/db/mork/orkinHeap.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _ORKINHEAP_
+#define _ORKINHEAP_ 1
+
+#ifndef _MDB_
+# include "mdb.h"
+#endif
+
+#ifndef _MORK_
+# include "mork.h"
+#endif
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#define orkinHeap_kTag 0x68456150 /* ascii 'hEaP' */
+
+/*| orkinHeap:
+|*/
+class orkinHeap : public nsIMdbHeap { //
+ protected:
+ size_t mUsedSize;
+
+ public:
+ orkinHeap(); // does nothing
+ virtual ~orkinHeap(); // does nothing
+
+ private: // copying is not allowed
+ orkinHeap(const orkinHeap& other);
+ orkinHeap& operator=(const orkinHeap& other);
+
+ public:
+ // { ===== begin nsIMdbHeap methods =====
+ NS_IMETHOD Alloc(nsIMdbEnv* ev, // allocate a piece of memory
+ mdb_size inSize, // requested size of new memory block
+ void** outBlock); // memory block of inSize bytes, or nil
+
+ NS_IMETHOD Free(nsIMdbEnv* ev, // free block allocated earlier by Alloc()
+ void* inBlock);
+
+ virtual size_t GetUsedSize();
+ // } ===== end nsIMdbHeap methods =====
+};
+
+// 456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789
+
+#endif /* _ORKINHEAP_ */
diff --git a/comm/mailnews/db/moz.build b/comm/mailnews/db/moz.build
new file mode 100644
index 0000000000..deeb047fbc
--- /dev/null
+++ b/comm/mailnews/db/moz.build
@@ -0,0 +1,9 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ "msgdb",
+ "gloda",
+]
diff --git a/comm/mailnews/db/msgdb/.eslintrc.js b/comm/mailnews/db/msgdb/.eslintrc.js
new file mode 100644
index 0000000000..5816519fbb
--- /dev/null
+++ b/comm/mailnews/db/msgdb/.eslintrc.js
@@ -0,0 +1,5 @@
+"use strict";
+
+module.exports = {
+ extends: ["plugin:mozilla/valid-jsdoc"],
+};
diff --git a/comm/mailnews/db/msgdb/moz.build b/comm/mailnews/db/msgdb/moz.build
new file mode 100644
index 0000000000..a49689ab64
--- /dev/null
+++ b/comm/mailnews/db/msgdb/moz.build
@@ -0,0 +1,11 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIRS += [
+ "public",
+ "src",
+]
+
+TEST_DIRS += ["test"]
diff --git a/comm/mailnews/db/msgdb/public/moz.build b/comm/mailnews/db/msgdb/public/moz.build
new file mode 100644
index 0000000000..5bed71ce9c
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/moz.build
@@ -0,0 +1,25 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPIDL_SOURCES += [
+ "nsIDBChangeAnnouncer.idl",
+ "nsIDBChangeListener.idl",
+ "nsIDBFolderInfo.idl",
+ "nsIMsgDatabase.idl",
+ "nsIMsgOfflineImapOperation.idl",
+ "nsINewsDatabase.idl",
+]
+
+XPIDL_MODULE = "msgdb"
+
+EXPORTS += [
+ "nsDBFolderInfo.h",
+ "nsImapMailDatabase.h",
+ "nsMailDatabase.h",
+ "nsMsgDatabase.h",
+ "nsMsgHdr.h",
+ "nsMsgThread.h",
+ "nsNewsDatabase.h",
+]
diff --git a/comm/mailnews/db/msgdb/public/nsDBFolderInfo.h b/comm/mailnews/db/msgdb/public/nsDBFolderInfo.h
new file mode 100644
index 0000000000..5b1150e4e7
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsDBFolderInfo.h
@@ -0,0 +1,151 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This class encapsulates the global information about a folder stored in the
+ summary file.
+*/
+#ifndef _nsDBFolderInfo_H
+#define _nsDBFolderInfo_H
+
+#include "mozilla/MemoryReporting.h"
+#include "nsString.h"
+#include "MailNewsTypes.h"
+#include "mdb.h"
+#include "nsTArray.h"
+#include "nsIDBFolderInfo.h"
+#include <time.h>
+
+class nsMsgDatabase;
+
+// again, this could inherit from nsISupports, but I don't see the need as of
+// yet. I'm not sure it needs to be ref-counted (but I think it does).
+
+// I think these getters and setters really need to go through mdb and not rely
+// on the object caching the values. If this somehow turns out to be
+// prohibitively expensive, we can invent some sort of dirty mechanism, but I
+// think it turns out that these values will be cached by the MSG_FolderInfo's
+// anyway.
+class nsDBFolderInfo : public nsIDBFolderInfo {
+ public:
+ friend class nsMsgDatabase;
+
+ explicit nsDBFolderInfo(nsMsgDatabase* mdb);
+
+ NS_DECL_ISUPPORTS
+ // interface methods.
+ NS_DECL_NSIDBFOLDERINFO
+ // create the appropriate table and row in a new db.
+ nsresult AddToNewMDB();
+ // accessor methods.
+
+ bool TestFlag(int32_t flags);
+ int16_t GetIMAPHierarchySeparator();
+ void SetIMAPHierarchySeparator(int16_t hierarchyDelimiter);
+ void ChangeImapTotalPendingMessages(int32_t delta);
+ void ChangeImapUnreadPendingMessages(int32_t delta);
+
+ nsresult InitFromExistingDB();
+ // get and set arbitrary property, aka row cell value.
+ nsresult SetPropertyWithToken(mdb_token aProperty,
+ const nsAString& propertyStr);
+ nsresult SetUint32PropertyWithToken(mdb_token aProperty,
+ uint32_t propertyValue);
+ nsresult SetInt64PropertyWithToken(mdb_token aProperty,
+ int64_t propertyValue);
+ nsresult SetInt32PropertyWithToken(mdb_token aProperty,
+ int32_t propertyValue);
+ nsresult GetPropertyWithToken(mdb_token aProperty, nsAString& propertyValue);
+ nsresult GetUint32PropertyWithToken(mdb_token aProperty,
+ uint32_t& propertyValue,
+ uint32_t defaultValue = 0);
+ nsresult GetInt32PropertyWithToken(mdb_token aProperty,
+ int32_t& propertyValue,
+ int32_t defaultValue = 0);
+ nsresult GetInt64PropertyWithToken(mdb_token aProperty,
+ int64_t& propertyValue,
+ int64_t defaultValue = 0);
+
+ nsTArray<nsMsgKey> m_lateredKeys; // list of latered messages
+
+ virtual size_t SizeOfExcludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ return m_lateredKeys.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+ virtual size_t SizeOfIncludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ protected:
+ virtual ~nsDBFolderInfo();
+
+ // initialize from appropriate table and row in existing db.
+ nsresult InitMDBInfo();
+ nsresult LoadMemberVariables();
+
+ nsresult AdjustHighWater(nsMsgKey highWater, bool force);
+
+ void
+ ReleaseExternalReferences(); // let go of any references to other objects.
+
+ int64_t m_folderSize;
+ int64_t m_expungedBytes; // sum of size of deleted messages in folder
+ uint32_t m_folderDate;
+ nsMsgKey m_highWaterMessageKey; // largest news article number or imap uid
+ // whose header we've seen
+
+ // m_numUnreadMessages and m_numMessages can never be negative. 0 means 'no
+ // msgs'.
+ int32_t m_numUnreadMessages;
+ int32_t m_numMessages; // includes expunged and ignored messages
+
+ int32_t m_flags; // folder specific flags. This holds things like re-use
+ // thread pane,
+ // configured for off-line use, use default retrieval, purge article/header
+ // options
+
+ uint16_t m_version; // for upgrading...
+ int16_t m_IMAPHierarchySeparator; // imap path separator
+
+ // mail only (for now)
+
+ // IMAP only
+ int32_t m_ImapUidValidity;
+ int32_t m_totalPendingMessages;
+ int32_t m_unreadPendingMessages;
+
+ // news only (for now)
+ nsMsgKey
+ m_expiredMark; // Highest invalid article number in group - for expiring
+ // the db folder info will have to know what db and row it belongs to, since
+ // it is really just a wrapper around the singleton folder info row in the
+ // mdb.
+ nsMsgDatabase* m_mdb;
+ nsIMdbTable* m_mdbTable; // singleton table in db
+ nsIMdbRow* m_mdbRow; // singleton row in table;
+
+ bool m_mdbTokensInitialized;
+
+ mdb_token m_rowScopeToken;
+ mdb_token m_tableKindToken;
+ // tokens for the pre-set columns - we cache these for speed, which may be
+ // silly
+ mdb_token m_mailboxNameColumnToken;
+ mdb_token m_numMessagesColumnToken;
+ mdb_token m_numUnreadMessagesColumnToken;
+ mdb_token m_flagsColumnToken;
+ mdb_token m_folderSizeColumnToken;
+ mdb_token m_expungedBytesColumnToken;
+ mdb_token m_folderDateColumnToken;
+ mdb_token m_highWaterMessageKeyColumnToken;
+
+ mdb_token m_imapUidValidityColumnToken;
+ mdb_token m_totalPendingMessagesColumnToken;
+ mdb_token m_unreadPendingMessagesColumnToken;
+ mdb_token m_expiredMarkColumnToken;
+ mdb_token m_versionColumnToken;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsIDBChangeAnnouncer.idl b/comm/mailnews/db/msgdb/public/nsIDBChangeAnnouncer.idl
new file mode 100644
index 0000000000..afdb06f8ca
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsIDBChangeAnnouncer.idl
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "MailNewsTypes2.idl"
+
+interface nsIDBChangeListener;
+interface nsIMsgDBHdr;
+
+[scriptable, uuid(22baf00b-939d-42c3-ac51-21d99dfa1f05)]
+interface nsIDBChangeAnnouncer : nsISupports {
+ void addListener(in nsIDBChangeListener listener);
+ void removeListener(in nsIDBChangeListener listener);
+
+ void notifyHdrChangeAll(in nsIMsgDBHdr aHdrChanged,
+ in unsigned long aOldFlags,
+ in unsigned long aNewFlags,
+ in nsIDBChangeListener instigator);
+
+ void notifyHdrAddedAll(in nsIMsgDBHdr aHdrAdded,
+ in nsMsgKey parentKey,
+ in long flags,
+ in nsIDBChangeListener instigator);
+
+ void notifyHdrDeletedAll(in nsIMsgDBHdr aHdrDeleted,
+ in nsMsgKey parentKey,
+ in long flags,
+ in nsIDBChangeListener instigator);
+
+ void notifyParentChangedAll(in nsMsgKey keyReparented,
+ in nsMsgKey oldParent,
+ in nsMsgKey newParent,
+ in nsIDBChangeListener instigator);
+
+ void notifyReadChanged(in nsIDBChangeListener instigator);
+
+ void notifyJunkScoreChanged(in nsIDBChangeListener aInstigator);
+
+ void notifyAnnouncerGoingAway();
+};
diff --git a/comm/mailnews/db/msgdb/public/nsIDBChangeListener.idl b/comm/mailnews/db/msgdb/public/nsIDBChangeListener.idl
new file mode 100644
index 0000000000..30c50db49c
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsIDBChangeListener.idl
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "MailNewsTypes2.idl"
+
+interface nsIDBChangeAnnouncer;
+interface nsIMsgDBHdr;
+interface nsIMsgDatabase;
+
+/**
+ * These callbacks are provided to allow listeners to the message database
+ * to update their status when changes occur.
+ */
+[scriptable, uuid(21c56d34-71b9-42bb-9606-331a6a5f8210)]
+
+interface nsIDBChangeListener : nsISupports {
+ /**
+ * Callback when message flags are changed.
+ *
+ * @param aHdrChanged The changed header.
+ * @param aOldFlags Message flags prior to change.
+ * @param aNewFlags Message flags after change.
+ * @param aInstigator Object that initiated the change.
+ */
+ void onHdrFlagsChanged(in nsIMsgDBHdr aHdrChanged, in unsigned long aOldFlags,
+ in unsigned long aNewFlags, in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback when message is marked as deleted.
+ *
+ * @param aHdrChanged The message header that is going to be deleted.
+ * @param aParentKey Key of parent.
+ * @param aFlags Flags that message has before delete.
+ * @param aInstigator Object that initiated the change. Can be null.
+ */
+ void onHdrDeleted(in nsIMsgDBHdr aHdrChanged, in nsMsgKey aParentKey, in long aFlags,
+ in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback when message is added.
+ *
+ * @param aHdrChanged The message header that is added.
+ * @param aParentKey Parent key of message.
+ * @param aFlags Flags that new message will have.
+ * @param aInstigator Object that initiated the change. Can be null.
+ */
+ void onHdrAdded(in nsIMsgDBHdr aHdrChanged, in nsMsgKey aParentKey, in long aFlags,
+ in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback when message parent is changed. Parent is changed when message is deleted or moved.
+ *
+ * @param aKeyChanged The message key that parent key was changed.
+ * @param oldParent Old parent key.
+ * @param newParent New parent key.
+ * @param aInstigator Object that initiated the change. Can be null.
+ */
+ void onParentChanged(in nsMsgKey aKeyChanged, in nsMsgKey oldParent, in nsMsgKey newParent,
+ in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback when announcer is going away. This is good place to release strong pointers to announcer.
+ *
+ * @param instigator Object that initiated the change. Can be null.
+ */
+ void onAnnouncerGoingAway(in nsIDBChangeAnnouncer instigator);
+
+ /**
+ * Callback when read flag is changed.
+ *
+ * @param aInstigator Object that initiated the change. Can be null.
+ */
+ void onReadChanged(in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback used in case when "junkscore" property is changed.
+ *
+ * @param aInstigator Object that initiated the change. Can be null.
+ */
+ void onJunkScoreChanged(in nsIDBChangeListener aInstigator);
+
+ /**
+ * Callback used in the general case where any field may have changed.
+ * OnHdrPropertyChanged is called twice per change. On the first call, aPreChange
+ * is true, and aStatus is undefined. OnHdrPropertyChanged saves any required status in aStatus
+ * (such as a filter match). The calling function stores the value of aStatus, changes the
+ * header aHdrToChange, then calls OnHdrPropertyChanged again with aPreChange false. On this
+ * second call, the stored value of aStatus is provided, so that any changes may be noted.
+ *
+ * @param aHdrToChange the message header that is changing.
+ * @param aPreChange true on first call before change, false on second call after change
+ * @param aStatus storage location provided by calling routine for status
+ * @param aInstigator object that initiated the change
+ */
+ void onHdrPropertyChanged(in nsIMsgDBHdr aHdrToChange,
+ in AUTF8String property,
+ in boolean aPreChange,
+ inout uint32_t aStatus,
+ in nsIDBChangeListener aInstigator);
+
+ /**
+ * Generic notification for extensibility. Common events should be documented
+ * here so we have a hope of keeping the documentation up to date.
+ * Current events are:
+ * "DBOpened" - When a pending listener becomes real. This can happen when
+ * the existing db is force closed and a new one opened. Only
+ * registered pending listeners are notified.
+ *
+ * @param aDB the db for this event.
+ * @param aEvent type of event.
+ *
+ */
+ void onEvent(in nsIMsgDatabase aDB, in string aEvent);
+};
diff --git a/comm/mailnews/db/msgdb/public/nsIDBFolderInfo.idl b/comm/mailnews/db/msgdb/public/nsIDBFolderInfo.idl
new file mode 100644
index 0000000000..cb41041268
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsIDBFolderInfo.idl
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "MailNewsTypes2.idl"
+
+[scriptable, uuid(a72dab4b-b3bd-471e-9a38-1b242b385459)]
+interface nsIDBFolderInfo : nsISupports {
+ attribute long flags;
+
+ /**
+ * Or's aFlags into flags.
+ *
+ * @param - the flags(s) to set
+ *
+ * @return - the resulting flags.
+ */
+ long orFlags(in long aFlags);
+ /**
+ * And's aFlags with flags, set flags to the result
+ *
+ * @param the flags(s) to AND
+ *
+ * @return the resulting flags.
+ */
+ long andFlags(in long aFlags);
+
+ /**
+ * Allows us to keep track of the highwater mark
+ *
+ * @param aNewKey If larger than the current highwater
+ * mark, sets the highwater mark to aNewKey.
+ */
+ void onKeyAdded(in nsMsgKey aNewKey);
+
+ attribute nsMsgKey highWater;
+ attribute nsMsgKey expiredMark;
+ attribute long long folderSize;
+ attribute unsigned long folderDate;
+ void changeNumUnreadMessages(in long aDelta);
+ void changeNumMessages(in long aDelta);
+
+ // numUnreadMessages and numMessages will never return negative numbers. 0 means 'no msgs'.
+ attribute long numUnreadMessages;
+ attribute long numMessages;
+
+ attribute long long expungedBytes;
+ attribute long imapUidValidity;
+ attribute unsigned long version;
+ attribute long imapTotalPendingMessages;
+ attribute long imapUnreadPendingMessages;
+
+ attribute nsMsgViewTypeValue viewType;
+ attribute nsMsgViewFlagsTypeValue viewFlags;
+ attribute nsMsgViewSortTypeValue sortType;
+ attribute nsMsgViewSortOrderValue sortOrder;
+
+ void changeExpungedBytes(in long aDelta);
+
+ /**
+ * Gets a string property from the folder. Also used for URIs, hence the AUTF8String type.
+ *
+ * @param propertyName The name of the property for the value to retrieve.
+ */
+ AUTF8String getCharProperty(in string propertyName);
+
+ /**
+ * Sets a string property from the folder. Also used for URIs, hence the AUTF8String type.
+ *
+ * @param propertyName The name of the property for which to set a value
+ * @param propertyValue The new value of the property.
+ */
+ void setCharProperty(in string aPropertyName, in AUTF8String aPropertyValue);
+ void setUint32Property(in string propertyName, in unsigned long propertyValue);
+ void setInt64Property(in string propertyName, in long long propertyValue);
+ unsigned long getUint32Property(in string propertyName, in unsigned long defaultValue);
+ long long getInt64Property(in string propertyName, in long long defaultValue);
+ boolean getBooleanProperty(in string propertyName, in boolean defaultValue);
+ void setBooleanProperty(in string propertyName, in boolean aPropertyValue);
+ nsIDBFolderInfo GetTransferInfo();
+ void initFromTransferInfo(in nsIDBFolderInfo transferInfo);
+
+ attribute AString locale;
+ attribute AString mailboxName;
+
+
+ AString getProperty(in string propertyName);
+ void setProperty(in string propertyName, in AString propertyStr);
+
+ attribute string knownArtsSet;
+ attribute ACString folderName;
+};
diff --git a/comm/mailnews/db/msgdb/public/nsIMsgDatabase.idl b/comm/mailnews/db/msgdb/public/nsIMsgDatabase.idl
new file mode 100644
index 0000000000..e54c938c15
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsIMsgDatabase.idl
@@ -0,0 +1,506 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * @defgroup msgdb Mailnews message database
+ * This module is the access point to locally-stored databases.
+ *
+ * These databases are stored in .msf files. Each file contains useful cached
+ * information, like the message id or references, as well as the cc header or
+ * tag information. This cached information is encapsulated in nsIMsgDBHdr.
+ *
+ * Also included is threading information, mostly encapsulated in nsIMsgThread.
+ * The final component is the database folder info, which contains information
+ * on the view and basic information also stored in the folder cache such as the
+ * name or most recent update.
+ *
+ * What this module does not do is access individual messages. Access is
+ * strictly controlled by the nsIMsgFolder objects and their backends.
+ * @{
+ */
+#include "nsISupports.idl"
+#include "nsIDBChangeAnnouncer.idl"
+
+interface nsIMsgDatabase;
+interface nsIDBChangeListener;
+interface nsIMsgDBHdr;
+interface nsIMsgEnumerator;
+interface nsIMsgThread;
+interface nsIMsgThreadEnumerator;
+interface nsIDBFolderInfo;
+interface nsIMsgOfflineImapOperation;
+interface nsIMsgFolder;
+interface nsIFile;
+interface nsIMsgSearchTerm;
+
+typedef unsigned long nsMsgRetainByPreference;
+
+
+[scriptable, uuid(fe8b7cec-eec8-4bcd-82ff-d8bb23cef3da)]
+
+interface nsIMsgRetentionSettings : nsISupports
+{
+ const unsigned long nsMsgRetainAll = 1;
+ const unsigned long nsMsgRetainByAge = 2;
+ const unsigned long nsMsgRetainByNumHeaders = 3;
+
+ attribute boolean useServerDefaults;
+ attribute nsMsgRetainByPreference retainByPreference;
+ attribute unsigned long daysToKeepHdrs;
+ attribute unsigned long numHeadersToKeep;
+
+ // this is for keeping offline bodies.
+ attribute boolean cleanupBodiesByDays;
+ attribute unsigned long daysToKeepBodies;
+
+ /**
+ * Should retention settings be applied to flagged/starred messages?
+ * If false, flagged messages are never automatically deleted.
+ */
+ attribute boolean applyToFlaggedMessages;
+};
+
+[scriptable, uuid(86a9da90-14f1-11d5-a5c0-0060b0fc04b7)]
+interface nsIMsgDownloadSettings : nsISupports
+{
+ attribute boolean useServerDefaults;
+ attribute boolean downloadByDate;
+ attribute boolean downloadUnreadOnly;
+ attribute unsigned long ageLimitOfMsgsToDownload;
+};
+
+typedef long nsMsgDBCommit;
+
+[scriptable, uuid(15431853-e448-45dc-8978-9958bf74d9b7)]
+interface nsMsgDBCommitType : nsISupports
+{
+ const long kLargeCommit = 1;
+ const long kSessionCommit = 2;
+ const long kCompressCommit = 3;
+};
+
+/**
+ * A service to open mail databases and manipulate listeners automatically.
+ *
+ * The contract ID for this component is
+ * <tt>\@mozilla.org/msgDatabase/msgDBService;1</tt>.
+ */
+[scriptable, uuid(4cbbf024-3760-402d-89f3-6ababafeb07d)]
+interface nsIMsgDBService : nsISupports
+{
+ /**
+ * Opens a database for a given folder.
+ *
+ * This method is preferred over nsIMsgDBService::openMailDBFromFile if the
+ * caller has an actual nsIMsgFolder around. If the database detects that it
+ * is unreadable or out of date (using nsIMsgDatabase::outOfDate) it will
+ * destroy itself and prepare to be rebuilt, unless aLeaveInvalidDB is true.
+ *
+ * If one gets a NS_MSG_ERROR_FOLDER_SUMMARY_MISSING message, then one
+ * should call nsIMsgDBService::createNewDB to create the new database.
+ *
+ * @param aFolder The folder whose database should be returned.
+ * @param aLeaveInvalidDB Whether or not the database should be deleted if it
+ * is invalid.
+ * @return A new nsIMsgDatabase object representing the folder
+ * database that was opened.
+ * @exception NS_ERROR_FILE_NOT_FOUND
+ * The file could not be created.
+ * @exception NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE
+ * The database is present (and was opened), but the
+ * summary file is out of date.
+ * @exception NS_MSG_ERROR_FOLDER_SUMMARY_MISSING
+ * The database is present, but the summary file is
+ * missing.
+ * @see nsIMsgDatabase::Open
+ * @see nsIMsgDBService::createNewDB
+ */
+ nsIMsgDatabase openFolderDB(in nsIMsgFolder aFolder,
+ in boolean aLeaveInvalidDB);
+
+ /**
+ * Creates a new database for the given folder.
+ *
+ * If the database already exists, it will return the database, emit a
+ * warning, but not fully initialize it. For this reason, it should only be
+ * used when it is known that the database does not exist, such as when
+ * nsIMsgDBService::openFolderDB throws an error.
+ *
+ * @see nsIMsgDBService::openFolderDB
+ */
+ nsIMsgDatabase createNewDB(in nsIMsgFolder aFolder);
+
+ /**
+ * Opens or creates a database for a given file.
+ *
+ * This method should only be used if the caller does not have a folder
+ * instance, because the resulting db and message headers retrieved from the
+ * database would not know their owning folder, which limits their usefulness.
+ * For this reason, one should use nsIMsgDBService::openFolderDB instead
+ * except under special circumstances.
+ *
+ * Unlike nsIMsgDBService::openFolderDB, there is no corresponding method to
+ * create a new database if opening the database failed. However, this method
+ * will never throw NS_MSG_ERROR_FOLDER_SUMMARY_MISSING, so no corresponding
+ * method is needed.
+ *
+ * @param aFile The file for which the database should be returned.
+ * @param aFolder Folder the db corresponds to (may be null)
+ * @param aCreate Whether or not the file should be created.
+ * @param aLeaveInvalidDB Whether or not the database should be deleted if it
+ * is invalid.
+ * @return A new nsIMsgDatabase object encapsulating the file
+ * passed in.
+ * @exception NS_ERROR_FILE_NOT_FOUND
+ * The file could not be created.
+ * @see nsIMsgDBService::openFolderDB
+ * @see nsIMsgDatabase::Open
+ */
+ nsIMsgDatabase openMailDBFromFile(in nsIFile aFile,
+ in nsIMsgFolder aFolder,
+ in boolean aCreate,
+ in boolean aLeaveInvalidDB);
+ /**
+ * Adds the given listener to the listener set for the folder.
+ *
+ * Since the message database will likely be opened and closed many times, by
+ * registering using this method, one will be guaranteed to see all subsequent
+ * modifications. This will also add the listener to the database if it is
+ * already opened.
+ *
+ * @param aFolder The folder to add a listener to.
+ * @param aListener The listener to add the folder to.
+ */
+ void registerPendingListener(in nsIMsgFolder aFolder,
+ in nsIDBChangeListener aListener);
+ /**
+ * Removes the listener from all folder listener sets.
+ *
+ * @param aListener The listener to remove.
+ * @exception NS_ERROR_FAILURE
+ * The listener is not registered.
+ */
+ void unregisterPendingListener(in nsIDBChangeListener aListener);
+
+ /**
+ * Get the db for a folder, if already open.
+ *
+ * @param aFolder The folder to get the cached (open) db for.
+ *
+ * @returns null if the db isn't open, otherwise the db.
+ */
+ nsIMsgDatabase cachedDBForFolder(in nsIMsgFolder aFolder);
+
+ /**
+ * Close the db for a folder, if already open.
+ *
+ * @param aFolder The folder to close the cached (open) db for.
+ */
+ void forceFolderDBClosed(in nsIMsgFolder aFolder);
+
+ /// an enumerator to iterate over the open dbs.
+ readonly attribute Array<nsIMsgDatabase> openDBs;
+};
+
+[scriptable, uuid(b64e66f8-4717-423a-be42-482658fb2199)]
+interface nsIMsgDatabase : nsIDBChangeAnnouncer {
+ void close(in boolean aForceCommit);
+
+ void commit(in nsMsgDBCommit commitType);
+ // Force closed is evil, and we should see if we can do without it.
+ // In 4.x, it was mainly used to remove corrupted databases.
+ void forceClosed();
+ void clearCachedHdrs();
+ void resetHdrCacheSize(in unsigned long size);
+
+ readonly attribute nsIDBFolderInfo dBFolderInfo;
+
+ /// Size of the database file in bytes.
+ readonly attribute long long databaseSize;
+
+ /// Folder this db was opened on.
+ readonly attribute nsIMsgFolder folder;
+
+ /**
+ * This is used when deciding which db's to close to free up memory
+ * and other resources in an LRU manner. It doesn't track every operation
+ * on every object from the db, but high level things like open, commit,
+ * and perhaps some of the list methods. Commit should be a proxy for all
+ * the mutation methods.
+ *
+ * I'm allowing clients to set the last use time as well, so that
+ * nsIMsgFolder.msgDatabase can set the last use time.
+ */
+ attribute PRTime lastUseTime;
+
+ // get a message header for the given key. Caller must release()!
+
+ nsIMsgDBHdr getMsgHdrForKey(in nsMsgKey key);
+ nsIMsgDBHdr getMsgHdrForMessageID(in string messageID);
+
+ /**
+ * Get a message header for a Gmail message with the given X-GM-MSGID.
+ * @param {string} aGmailMessageID - The ID of the message to find.
+ *
+ * @returns the message, or null if not found (without throwing an error).
+ */
+ nsIMsgDBHdr getMsgHdrForGMMsgID(in string aGmailMessageID);
+ //Returns whether or not this database contains the given key
+ boolean containsKey(in nsMsgKey key);
+
+/**
+ * Must call AddNewHdrToDB after creating. The idea is that you create
+ * a new header, fill in its properties, and then call AddNewHdrToDB.
+ * AddNewHdrToDB will send notifications to any listeners.
+ *
+ * @param aKey msgKey for the new header. If aKey is nsMsgKey_None,
+ * we will auto-assign a new key.
+ */
+ nsIMsgDBHdr createNewHdr(in nsMsgKey aKey);
+
+ void addNewHdrToDB(in nsIMsgDBHdr newHdr, in boolean notify);
+
+ nsIMsgDBHdr copyHdrFromExistingHdr(in nsMsgKey key, in nsIMsgDBHdr existingHdr, in boolean addHdrToDB);
+
+ /**
+ * Returns all message keys stored in the database.
+ * Keys are returned in the order as stored in the database.
+ * The caller should sort them if it needs to.
+ */
+ Array<nsMsgKey> listAllKeys();
+
+ nsIMsgEnumerator enumerateMessages();
+ nsIMsgEnumerator reverseEnumerateMessages();
+ nsIMsgThreadEnumerator enumerateThreads();
+
+ /**
+ * Get an enumerator of messages matching the passed-in search terms.
+ *
+ * @param searchTerms Array of search terms to evaluate.
+ * @param reverse Start at the end, defaults to false.
+ *
+ * @returns An enumerator to iterate over matching messages.
+ */
+ nsIMsgEnumerator getFilterEnumerator(in Array<nsIMsgSearchTerm> searchTerms,
+ [optional] in boolean reverse);
+
+ // count the total and unread msgs, and adjust global count if needed
+ void syncCounts();
+
+ nsIMsgThread getThreadContainingMsgHdr(in nsIMsgDBHdr msgHdr) ;
+
+ // helpers for user command functions like delete, mark read, etc.
+
+ void markHdrRead(in nsIMsgDBHdr msgHdr, in boolean bRead,
+ in nsIDBChangeListener instigator);
+
+ void markHdrReplied(in nsIMsgDBHdr msgHdr, in boolean bReplied,
+ in nsIDBChangeListener instigator);
+
+ void markHdrMarked(in nsIMsgDBHdr msgHdr, in boolean mark,
+ in nsIDBChangeListener instigator);
+ /**
+ * Remove the new status from a message.
+ *
+ * @param aMsgHdr The database reference header for the message
+ * @param aInstigator Reference to original calling object
+ */
+ void markHdrNotNew(in nsIMsgDBHdr aMsgHdr,
+ in nsIDBChangeListener aInstigator);
+
+ // MDN support
+ void markMDNNeeded(in nsMsgKey key, in boolean bNeeded,
+ in nsIDBChangeListener instigator);
+
+ void markMDNSent(in nsMsgKey key, in boolean bNeeded,
+ in nsIDBChangeListener instigator);
+ boolean isMDNSent(in nsMsgKey key);
+
+ void markRead(in nsMsgKey key, in boolean bRead,
+ in nsIDBChangeListener instigator);
+
+ void markReplied(in nsMsgKey key, in boolean bReplied,
+ in nsIDBChangeListener instigator);
+
+ void markForwarded(in nsMsgKey key, in boolean bForwarded,
+ in nsIDBChangeListener instigator);
+
+ void markRedirected(in nsMsgKey key, in boolean bRedirected,
+ in nsIDBChangeListener instigator);
+
+ void markHasAttachments(in nsMsgKey key, in boolean bHasAttachments,
+ in nsIDBChangeListener instigator);
+
+ Array<nsMsgKey> markThreadRead(in nsIMsgThread thread, in nsIDBChangeListener instigator);
+
+ /// Mark the specified thread ignored.
+ void markThreadIgnored(in nsIMsgThread thread, in nsMsgKey threadKey,
+ in boolean bIgnored,
+ in nsIDBChangeListener instigator);
+
+ /// Mark the specified thread watched.
+ void markThreadWatched(in nsIMsgThread thread, in nsMsgKey threadKey,
+ in boolean bWatched,
+ in nsIDBChangeListener instigator);
+
+ /// Mark the specified subthread ignored.
+ void markHeaderKilled(in nsIMsgDBHdr msg, in boolean bIgnored,
+ in nsIDBChangeListener instigator);
+
+ /// Is the message read.
+ boolean isRead(in nsMsgKey key);
+ /// Is the message part of an ignored thread.
+ boolean isIgnored(in nsMsgKey key);
+ /// Is the message part of a watched thread.
+ boolean isWatched(in nsMsgKey key);
+ /// Is the message flagged/starred.
+ boolean isMarked(in nsMsgKey key);
+ /// Does the message have attachments.
+ boolean hasAttachments(in nsMsgKey key);
+
+ Array<nsMsgKey> markAllRead();
+
+ void deleteMessages(in Array<nsMsgKey> nsMsgKeys,
+ in nsIDBChangeListener instigator);
+ void deleteMessage(in nsMsgKey key,
+ in nsIDBChangeListener instigator,
+ in boolean commit);
+ void deleteHeader(in nsIMsgDBHdr msgHdr, in nsIDBChangeListener instigator,
+ in boolean commit, in boolean notify);
+
+ /// Lower level routine that doesn't remove hdr from thread or adjust counts.
+ void removeHeaderMdbRow(in nsIMsgDBHdr msgHdr);
+
+ void undoDelete(in nsIMsgDBHdr msgHdr);
+
+ void markMarked(in nsMsgKey key, in boolean mark,
+ in nsIDBChangeListener instigator);
+ void markOffline(in nsMsgKey key, in boolean offline,
+ in nsIDBChangeListener instigator);
+ void setStringProperty(in nsMsgKey aKey, in string aProperty, in AUTF8String aValue);
+ /**
+ * Set the value of a string property in a message header
+ *
+ * @param msgHdr Header of the message whose property will be changed
+ * @param aProperty the property to change
+ * @param aValue new value for the property
+ */
+ void setStringPropertyByHdr(in nsIMsgDBHdr msgHdr, in string aProperty, in AUTF8String aValue);
+
+ /**
+ * Set the value of a uint32 property in a message header.
+ *
+ * @param aMsgHdr header of the message whose property will be changed
+ * @param aProperty the property to change
+ * @param aValue new value for the property
+ */
+ void setUint32PropertyByHdr(in nsIMsgDBHdr aMsgHdr,
+ in string aProperty, in unsigned long aValue);
+
+ void markImapDeleted(in nsMsgKey key, in boolean deleted,
+ in nsIDBChangeListener instigator);
+
+ readonly attribute nsMsgKey firstNew;
+
+ attribute nsIMsgRetentionSettings msgRetentionSettings;
+ // Purge unwanted message headers and/or bodies. If deleteViaFolder is
+ // true, we'll call nsIMsgFolder::DeleteMessages to delete the messages.
+ // Otherwise, we'll just delete them from the db.
+ void applyRetentionSettings(in nsIMsgRetentionSettings aMsgRetentionSettings,
+ in boolean aDeleteViaFolder);
+
+ attribute nsIMsgDownloadSettings msgDownloadSettings;
+
+ boolean hasNew();
+ void clearNewList(in boolean notify);
+ void addToNewList(in nsMsgKey key);
+
+ // Used mainly to force the timestamp of a local mail folder db to
+ // match the time stamp of the corresponding berkeley mail folder,
+ // but also useful to tell the summary to mark itself invalid
+ // Also, if a local folder is being reparsed, summary will be invalid
+ // until the reparsing is done.
+ attribute boolean summaryValid;
+
+ Array<nsMsgKey> listAllOfflineMsgs();
+
+ void setAttributeOnPendingHdr(in nsIMsgDBHdr pendingHdr, in string property,
+ in string propertyVal);
+
+ void setUint32AttributeOnPendingHdr(in nsIMsgDBHdr pendingHdr, in string property,
+ in unsigned long propertyVal);
+
+ /**
+ * Sets a pending 64 bit attribute, which tells the DB that when a message
+ * which looks like the pendingHdr (e.g., same message-id) is added to the
+ * db, set the passed in property and value on the new header. This is
+ * usually because we've copied an imap message to a different folder, and
+ * want to carry forward attributes from the original message to the copy,
+ * but don't have the message hdr for the copy yet so we can't set
+ * attributes directly.
+ *
+ * @param aPendingHdr usually the source of the copy.
+ * @param aProperty name of property to set.
+ * @param aPropertyVal 64 bit value of property to set.
+ */
+ void setUint64AttributeOnPendingHdr(in nsIMsgDBHdr aPendingHdr,
+ in string aProperty,
+ in unsigned long long aPropertyVal);
+
+ /**
+ * Given a message header with its message-id set, update any pending
+ * attributes on the header.
+ *
+ * @param aNewHdr a new header that may have pending attributes.
+ */
+ void updatePendingAttributes(in nsIMsgDBHdr aNewHdr);
+
+ readonly attribute nsMsgKey lowWaterArticleNum;
+ readonly attribute nsMsgKey highWaterArticleNum;
+ attribute nsMsgKey nextPseudoMsgKey; //for undo-redo of move pop->imap
+ readonly attribute nsMsgKey nextFakeOfflineMsgKey; // for saving "fake" offline msg hdrs
+ // for sorting
+ Array<octet> createCollationKey(in AString sourceString);
+ long compareCollationKeys(in Array<octet> key1, in Array<octet> key2);
+
+ // when creating a view, the default sort order and view flags
+ // use these for the default. (this allows news to override, so that
+ // news can be threaded by default)
+ readonly attribute nsMsgViewFlagsTypeValue defaultViewFlags;
+ readonly attribute nsMsgViewSortTypeValue defaultSortType;
+ readonly attribute nsMsgViewSortOrderValue defaultSortOrder;
+
+ // for msg hdr hash table allocation. controllable by caller to improve folder loading performance.
+ attribute unsigned long msgHdrCacheSize;
+
+ /**
+ * The list of messages currently in the NEW state.
+ */
+ Array<nsMsgKey> getNewList();
+
+ // These are used for caching search hits in a db, to speed up saved search folders.
+ nsIMsgEnumerator getCachedHits(in AUTF8String aSearchFolderUri);
+
+ /**
+ * Update search cache to ensure it contains aNewHits.
+ *
+ * @param aSearchFolderUri the target folder.
+ * @param aNewHits sorted list of new message keys.
+ * @returns list of keys of messages removed from cache.
+ */
+ Array<nsMsgKey> refreshCache(in AUTF8String aSearchFolderUri, in Array<nsMsgKey> aNewHits);
+ void updateHdrInCache(in AUTF8String aSearchFolderUri, in nsIMsgDBHdr aHdr, in boolean aAdd);
+ boolean hdrIsInCache(in AUTF8String aSearchFolderUri, in nsIMsgDBHdr aHdr);
+};
+
+[scriptable, uuid(7f98410c-41b7-4a55-8e0c-02107e7f4c0f)]
+interface nsIMsgOfflineOpsDatabase : nsIMsgDatabase {
+ // Has to be in nsMailDatabase, since local folders can be move destinations.
+
+ nsIMsgOfflineImapOperation getOfflineOpForKey(in nsMsgKey messageKey, in boolean create);
+ void removeOfflineOp(in nsIMsgOfflineImapOperation op);
+ Array<nsMsgKey> listAllOfflineOpIds();
+ Array<nsMsgKey> listAllOfflineDeletes();
+};
diff --git a/comm/mailnews/db/msgdb/public/nsIMsgOfflineImapOperation.idl b/comm/mailnews/db/msgdb/public/nsIMsgOfflineImapOperation.idl
new file mode 100644
index 0000000000..bc2aecfa28
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsIMsgOfflineImapOperation.idl
@@ -0,0 +1,50 @@
+/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "MailNewsTypes2.idl"
+#include "nsIImapUrl.idl" // for imapMessageFlagsType
+
+typedef long nsOfflineImapOperationType;
+
+[scriptable, uuid(b5229a55-22bb-444b-be92-13d719353828)]
+
+interface nsIMsgOfflineImapOperation : nsISupports
+{
+// type of stored imap operations
+ const long kFlagsChanged = 0x1;
+ const long kMsgMoved = 0x2;
+ const long kMsgCopy = 0x4;
+ const long kMoveResult = 0x8;
+ const long kAppendDraft = 0x10;
+ const long kAddedHeader = 0x20;
+ const long kDeletedMsg = 0x40;
+ const long kMsgMarkedDeleted = 0x80;
+ const long kAppendTemplate = 0x100;
+ const long kDeleteAllMsgs = 0x200;
+ const long kAddKeywords = 0x400;
+ const long kRemoveKeywords = 0x800;
+
+ attribute nsOfflineImapOperationType operation;
+ void clearOperation(in nsOfflineImapOperationType operation);
+ attribute nsMsgKey messageKey;
+
+ // for move/copy operations, the msg key of the source msg.
+ attribute nsMsgKey srcMessageKey;
+
+ attribute imapMessageFlagsType flagOperation;
+ attribute imapMessageFlagsType newFlags; // for kFlagsChanged
+ attribute AUTF8String destinationFolderURI; // for move or copy
+ attribute AUTF8String sourceFolderURI;
+ void addKeywordToAdd(in string aKeyword);
+ void addKeywordToRemove(in string aKeyword);
+ readonly attribute string keywordsToAdd;
+ readonly attribute string keywordsToRemove;
+ readonly attribute long numberOfCopies;
+ void addMessageCopyOperation(in AUTF8String destinationBox);
+ string getCopyDestination(in long copyIndex);
+ attribute unsigned long msgSize;
+ attribute boolean playingBack;
+};
diff --git a/comm/mailnews/db/msgdb/public/nsINewsDatabase.idl b/comm/mailnews/db/msgdb/public/nsINewsDatabase.idl
new file mode 100644
index 0000000000..151a42f019
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsINewsDatabase.idl
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+%{C++
+#include "nsMsgKeySet.h"
+%}
+
+[ptr] native nsMsgKeySetPtr(nsMsgKeySet);
+
+[scriptable, uuid(f700208a-1dd1-11b2-b947-e4e1e4fdf278)]
+
+interface nsINewsDatabase : nsISupports {
+ [noscript] attribute nsMsgKeySetPtr readSet;
+};
diff --git a/comm/mailnews/db/msgdb/public/nsImapMailDatabase.h b/comm/mailnews/db/msgdb/public/nsImapMailDatabase.h
new file mode 100644
index 0000000000..307ad2ed0a
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsImapMailDatabase.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef _nsImapMailDatabase_H_
+#define _nsImapMailDatabase_H_
+
+#include "mozilla/Attributes.h"
+#include "nsMailDatabase.h"
+
+class nsImapMailDatabase : public nsMailDatabase {
+ public:
+ // OK, it's dumb that this should require a fileSpec, since there is no file
+ // for the folder. This is mainly because we're deriving from nsMailDatabase;
+ // Perhaps we shouldn't...
+ nsImapMailDatabase();
+ virtual ~nsImapMailDatabase();
+
+ NS_IMETHOD GetSummaryValid(bool* aResult) override;
+ NS_IMETHOD SetSummaryValid(bool valid = true) override;
+ virtual nsresult AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr) override;
+
+ NS_IMETHOD ForceClosed() override;
+ NS_IMETHOD AddNewHdrToDB(nsIMsgDBHdr* newHdr, bool notify) override;
+ NS_IMETHOD SetAttributeOnPendingHdr(nsIMsgDBHdr* pendingHdr,
+ const char* property,
+ const char* propertyVal) override;
+ NS_IMETHOD SetUint32AttributeOnPendingHdr(nsIMsgDBHdr* pendingHdr,
+ const char* property,
+ uint32_t propertyVal) override;
+ NS_IMETHOD SetUint64AttributeOnPendingHdr(nsIMsgDBHdr* aPendingHdr,
+ const char* aProperty,
+ uint64_t aPropertyVal) override;
+ NS_IMETHOD DeleteMessages(nsTArray<nsMsgKey> const& nsMsgKeys,
+ nsIDBChangeListener* instigator) override;
+ NS_IMETHOD UpdatePendingAttributes(nsIMsgDBHdr* aNewHdr) override;
+
+ protected:
+ nsresult GetRowForPendingHdr(nsIMsgDBHdr* pendingHdr, nsIMdbRow** row);
+ nsresult GetAllPendingHdrsTable();
+ mdb_token m_pendingHdrsRowScopeToken;
+ mdb_token m_pendingHdrsTableKindToken;
+ nsCOMPtr<nsIMdbTable> m_mdbAllPendingHdrsTable;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsMailDatabase.h b/comm/mailnews/db/msgdb/public/nsMailDatabase.h
new file mode 100644
index 0000000000..4e29b26322
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsMailDatabase.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsMailDatabase_H_
+#define _nsMailDatabase_H_
+
+#include "mozilla/Attributes.h"
+#include "nsMsgDatabase.h"
+#include "nsTArray.h"
+
+#include "nsIDBChangeListener.h"
+#include "nsIMsgOfflineImapOperation.h"
+#include "nsISimpleEnumerator.h"
+#include "nsIFile.h"
+
+// This is the subclass of nsMsgDatabase that handles local mail messages.
+
+class nsMailDatabase : public nsMsgDatabase {
+ public:
+ nsMailDatabase();
+ virtual ~nsMailDatabase();
+ NS_IMETHOD ForceClosed() override;
+ NS_IMETHOD DeleteMessages(nsTArray<nsMsgKey> const& nsMsgKeys,
+ nsIDBChangeListener* instigator) override;
+
+ nsresult Open(nsMsgDBService* aDBService, nsIFile* aSummaryFile, bool create,
+ bool upgrading) override;
+ virtual nsMailDatabase* GetMailDB() { return this; }
+
+ virtual uint32_t GetCurVersion() override { return kMsgDBVersion; }
+
+ NS_IMETHOD GetOfflineOpForKey(nsMsgKey opKey, bool create,
+ nsIMsgOfflineImapOperation** op) override;
+ NS_IMETHOD RemoveOfflineOp(nsIMsgOfflineImapOperation* op) override;
+
+ NS_IMETHOD SetSummaryValid(bool valid) override;
+ NS_IMETHOD GetSummaryValid(bool* valid) override;
+
+ NS_IMETHOD ListAllOfflineOpIds(nsTArray<nsMsgKey>& offlineOpIds) override;
+ NS_IMETHOD ListAllOfflineDeletes(nsTArray<nsMsgKey>& offlineDeletes) override;
+
+ friend class nsMsgOfflineOpEnumerator;
+
+ protected:
+ nsresult GetAllOfflineOpsTable(); // get this on demand
+
+ // get the time and date of the mailbox file
+ void GetMailboxModProperties(int64_t* aSize, uint32_t* aDate);
+
+ nsCOMPtr<nsIMdbTable> m_mdbAllOfflineOpsTable;
+ mdb_token m_offlineOpsRowScopeToken;
+ mdb_token m_offlineOpsTableKindToken;
+
+ virtual void SetReparse(bool reparse);
+
+ protected:
+ bool m_reparse;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsMsgDatabase.h b/comm/mailnews/db/msgdb/public/nsMsgDatabase.h
new file mode 100644
index 0000000000..f61b9a7b25
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsMsgDatabase.h
@@ -0,0 +1,447 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsMsgDatabase_H_
+#define _nsMsgDatabase_H_
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Path.h"
+#include "nsIFile.h"
+#include "nsIMsgDatabase.h"
+#include "nsMsgHdr.h"
+#include "nsString.h"
+#include "nsIDBChangeAnnouncer.h"
+#include "nsMsgMessageFlags.h"
+#include "nsIMsgFolder.h"
+#include "nsDBFolderInfo.h"
+#include "mozilla/intl/Collator.h"
+#include "nsIMimeConverter.h"
+#include "nsCOMPtr.h"
+#include "nsCOMArray.h"
+#include "PLDHashTable.h"
+#include "nsTArray.h"
+#include "nsTObserverArray.h"
+
+using mozilla::intl::Collator;
+
+class nsMsgThread;
+class nsMsgDatabase;
+class nsIMsgOfflineOpsDatabase;
+class nsIMsgThread;
+class nsMsgDBEnumerator;
+class nsMsgDBThreadEnumerator;
+
+const int32_t kMsgDBVersion = 1;
+
+// Hopefully we're not opening up lots of databases at the same time, however
+// this will give us a buffer before we need to start reallocating the cache
+// array.
+const uint32_t kInitialMsgDBCacheSize = 20;
+
+class nsMsgDBService final : public nsIMsgDBService {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIMSGDBSERVICE
+
+ nsMsgDBService();
+
+ void AddToCache(nsMsgDatabase* pMessageDB);
+ void DumpCache();
+ void EnsureCached(nsMsgDatabase* pMessageDB) {
+ if (!m_dbCache.Contains(pMessageDB)) m_dbCache.AppendElement(pMessageDB);
+ }
+ void RemoveFromCache(nsMsgDatabase* pMessageDB) {
+ m_dbCache.RemoveElement(pMessageDB);
+ }
+
+ protected:
+ ~nsMsgDBService();
+ void HookupPendingListeners(nsIMsgDatabase* db, nsIMsgFolder* folder);
+ void FinishDBOpen(nsIMsgFolder* aFolder, nsMsgDatabase* aMsgDB);
+ nsMsgDatabase* FindInCache(nsIFile* dbName);
+
+ nsCOMArray<nsIMsgFolder> m_foldersPendingListeners;
+ nsCOMArray<nsIDBChangeListener> m_pendingListeners;
+ AutoTArray<nsMsgDatabase*, kInitialMsgDBCacheSize> m_dbCache;
+};
+
+namespace mozilla {
+namespace mailnews {
+class MsgDBReporter;
+}
+} // namespace mozilla
+
+class nsMsgDatabase : public nsIMsgOfflineOpsDatabase {
+ public:
+ friend class nsMsgDBService;
+ friend class nsMsgPropertyEnumerator; // accesses m_mdbEnv and m_mdbStore
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIDBCHANGEANNOUNCER
+ NS_DECL_NSIMSGDATABASE
+ NS_DECL_NSIMSGOFFLINEOPSDATABASE
+
+ /**
+ * Opens a database folder.
+ *
+ * @param aFolderName The name of the folder to create.
+ * @param aCreate Whether or not the file should be created.
+ * @param aLeaveInvalidDB Set to true if you do not want the database to be
+ * deleted if it is invalid.
+ * @exception NS_ERROR_FILE_NOT_FOUND
+ * The file could not be created.
+ * @exception NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE
+ * The database is present (and was opened), but the
+ * summary file is out of date.
+ * @exception NS_MSG_ERROR_FOLDER_SUMMARY_MISSING
+ * The database is present (and was opened), but the
+ * summary file is missing.
+ */
+ virtual nsresult Open(nsMsgDBService* aDBService, nsIFile* aFolderName,
+ bool aCreate, bool aLeaveInvalidDB);
+ virtual nsresult IsHeaderRead(nsIMsgDBHdr* hdr, bool* pRead);
+ virtual nsresult MarkHdrReadInDB(nsIMsgDBHdr* msgHdr, bool bRead,
+ nsIDBChangeListener* instigator);
+ nsresult OpenInternal(nsMsgDBService* aDBService, nsIFile* aFolderName,
+ bool aCreate, bool aLeaveInvalidDB, bool sync);
+ nsresult CheckForErrors(nsresult err, bool sync, nsMsgDBService* aDBService,
+ nsIFile* summaryFile);
+ virtual nsresult OpenMDB(nsIFile* dbfile, bool create, bool sync);
+ virtual nsresult CloseMDB(bool commit);
+ virtual nsresult CreateMsgHdr(nsIMdbRow* hdrRow, nsMsgKey key,
+ nsIMsgDBHdr** result);
+ virtual nsresult GetThreadForMsgKey(nsMsgKey msgKey, nsIMsgThread** result);
+ virtual nsresult EnumerateMessagesWithFlag(nsIMsgEnumerator** result,
+ uint32_t* pFlag);
+ nsresult GetSearchResultsTable(const nsACString& searchFolderUri,
+ bool createIfMissing, nsIMdbTable** table);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // nsMsgDatabase methods:
+ nsMsgDatabase();
+
+ nsresult GetMDBFactory(nsIMdbFactory** aMdbFactory);
+ nsIMdbEnv* GetEnv() { return m_mdbEnv; }
+ nsIMdbStore* GetStore() { return m_mdbStore; }
+ virtual uint32_t GetCurVersion();
+ nsresult GetCollationKeyGenerator();
+ nsIMimeConverter* GetMimeConverter();
+
+ nsresult GetTableCreateIfMissing(const char* scope, const char* kind,
+ nsIMdbTable** table, mdb_token& scopeToken,
+ mdb_token& kindToken);
+
+ // helper function to fill in nsStrings from hdr row cell contents.
+ nsresult RowCellColumnTonsString(nsIMdbRow* row, mdb_token columnToken,
+ nsAString& resultStr);
+ nsresult RowCellColumnToUInt32(nsIMdbRow* row, mdb_token columnToken,
+ uint32_t* uint32Result,
+ uint32_t defaultValue = 0);
+ nsresult RowCellColumnToUInt32(nsIMdbRow* row, mdb_token columnToken,
+ uint32_t& uint32Result,
+ uint32_t defaultValue = 0);
+ nsresult RowCellColumnToUInt64(nsIMdbRow* row, mdb_token columnToken,
+ uint64_t* uint64Result,
+ uint64_t defaultValue = 0);
+ nsresult RowCellColumnToMime2DecodedString(nsIMdbRow* row,
+ mdb_token columnToken,
+ nsAString& resultStr);
+ nsresult RowCellColumnToCollationKey(nsIMdbRow* row, mdb_token columnToken,
+ nsTArray<uint8_t>& result);
+ nsresult RowCellColumnToConstCharPtr(nsIMdbRow* row, mdb_token columnToken,
+ const char** ptr);
+ nsresult RowCellColumnToAddressCollationKey(nsIMdbRow* row,
+ mdb_token colToken,
+ nsTArray<uint8_t>& result);
+
+ nsresult GetEffectiveCharset(nsIMdbRow* row, nsACString& resultCharset);
+
+ // these methods take the property name as a string, not a token.
+ // they should be used when the properties aren't accessed a lot
+ nsresult GetProperty(nsIMdbRow* row, const char* propertyName, char** result);
+ nsresult SetProperty(nsIMdbRow* row, const char* propertyName,
+ const char* propertyVal);
+ nsresult GetPropertyAsNSString(nsIMdbRow* row, const char* propertyName,
+ nsAString& result);
+ nsresult SetPropertyFromNSString(nsIMdbRow* row, const char* propertyName,
+ const nsAString& propertyVal);
+ nsresult GetUint32Property(nsIMdbRow* row, const char* propertyName,
+ uint32_t* result, uint32_t defaultValue = 0);
+ nsresult GetUint64Property(nsIMdbRow* row, const char* propertyName,
+ uint64_t* result, uint64_t defaultValue = 0);
+ nsresult SetUint32Property(nsIMdbRow* row, const char* propertyName,
+ uint32_t propertyVal);
+ nsresult SetUint64Property(nsIMdbRow* row, const char* propertyName,
+ uint64_t propertyVal);
+ nsresult GetBooleanProperty(nsIMdbRow* row, const char* propertyName,
+ bool* result, bool defaultValue = false);
+ nsresult SetBooleanProperty(nsIMdbRow* row, const char* propertyName,
+ bool propertyVal);
+ // helper function for once we have the token.
+ nsresult SetNSStringPropertyWithToken(nsIMdbRow* row, mdb_token aProperty,
+ const nsAString& propertyStr);
+
+ // helper functions to put values in cells for the passed-in row
+ nsresult UInt32ToRowCellColumn(nsIMdbRow* row, mdb_token columnToken,
+ uint32_t value);
+ nsresult CharPtrToRowCellColumn(nsIMdbRow* row, mdb_token columnToken,
+ const char* charPtr);
+ nsresult RowCellColumnToCharPtr(nsIMdbRow* row, mdb_token columnToken,
+ char** result);
+ nsresult UInt64ToRowCellColumn(nsIMdbRow* row, mdb_token columnToken,
+ uint64_t value);
+
+ // helper functions to copy an nsString to a yarn, int32 to yarn, and vice
+ // versa.
+ static struct mdbYarn* nsStringToYarn(struct mdbYarn* yarn,
+ const nsAString& str);
+ static struct mdbYarn* UInt32ToYarn(struct mdbYarn* yarn, uint32_t i);
+ static struct mdbYarn* UInt64ToYarn(struct mdbYarn* yarn, uint64_t i);
+ static void YarnTonsString(struct mdbYarn* yarn, nsAString& str);
+ static void YarnTonsCString(struct mdbYarn* yarn, nsACString& str);
+ static void YarnToUInt32(struct mdbYarn* yarn, uint32_t* i);
+ static void YarnToUInt64(struct mdbYarn* yarn, uint64_t* i);
+
+#ifdef DEBUG
+ virtual nsresult DumpContents();
+#endif
+
+ friend class nsMsgHdr; // use this to get access to cached tokens for hdr
+ // fields
+ friend class nsMsgThread; // use this to get access to cached tokens for hdr
+ // fields
+
+ friend class nsMsgDBEnumerator;
+ friend class nsMsgDBThreadEnumerator;
+
+ protected:
+ virtual ~nsMsgDatabase();
+
+ // prefs stuff - in future, we might want to cache the prefs interface
+ nsresult GetBoolPref(const char* prefName, bool* result);
+ nsresult GetIntPref(const char* prefName, int32_t* result);
+ virtual void GetGlobalPrefs();
+ // retrieval methods
+ nsIMsgThread* GetThreadForReference(nsCString& msgID, nsIMsgDBHdr** pMsgHdr);
+ nsIMsgThread* GetThreadForSubject(nsCString& subject);
+ nsIMsgThread* GetThreadForMessageId(nsCString& msgId);
+ nsIMsgThread* GetThreadForThreadId(nsMsgKey threadId);
+ nsMsgHdr* GetMsgHdrForReference(nsCString& reference);
+ nsIMsgDBHdr* GetMsgHdrForSubject(nsCString& subject);
+ // threading interfaces
+ virtual nsresult CreateNewThread(nsMsgKey key, const char* subject,
+ nsMsgThread** newThread);
+ virtual bool ThreadBySubjectWithoutRe();
+ virtual bool UseStrictThreading();
+ virtual bool UseCorrectThreading();
+ virtual nsresult ThreadNewHdr(nsMsgHdr* hdr, bool& newThread);
+ virtual nsresult AddNewThread(nsMsgHdr* msgHdr);
+ virtual nsresult AddToThread(nsMsgHdr* newHdr, nsIMsgThread* thread,
+ nsIMsgDBHdr* pMsgHdr, bool threadInThread);
+
+ static PRTime gLastUseTime; // global last use time
+ PRTime m_lastUseTime; // last use time for this db
+ // inline to make instrumentation as cheap as possible
+ inline void RememberLastUseTime() { gLastUseTime = m_lastUseTime = PR_Now(); }
+
+ bool MatchDbName(nsIFile* dbName); // returns TRUE if they match
+
+ // Flag handling routines
+ virtual nsresult SetKeyFlag(nsMsgKey key, bool set, nsMsgMessageFlagType flag,
+ nsIDBChangeListener* instigator = nullptr);
+ virtual nsresult SetMsgHdrFlag(nsIMsgDBHdr* msgHdr, bool set,
+ nsMsgMessageFlagType flag,
+ nsIDBChangeListener* instigator);
+
+ virtual bool SetHdrFlag(nsIMsgDBHdr*, bool bSet, nsMsgMessageFlagType flag);
+ virtual bool SetHdrReadFlag(nsIMsgDBHdr*, bool pRead);
+ virtual uint32_t GetStatusFlags(nsIMsgDBHdr* msgHdr,
+ nsMsgMessageFlagType origFlags);
+ // helper function which doesn't involve thread object
+
+ virtual nsresult RemoveHeaderFromDB(nsMsgHdr* msgHdr);
+ virtual nsresult RemoveHeaderFromThread(nsMsgHdr* msgHdr);
+ virtual nsresult AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr);
+
+ mozilla::UniquePtr<mozilla::intl::Collator> m_collationKeyGenerator = nullptr;
+ nsCOMPtr<nsIMimeConverter> m_mimeConverter;
+ nsCOMPtr<nsIMsgRetentionSettings> m_retentionSettings;
+ nsCOMPtr<nsIMsgDownloadSettings> m_downloadSettings;
+
+ nsresult FindMessagesOlderThan(uint32_t daysToKeepHdrs,
+ bool applyToFlaggedMessages,
+ nsTArray<RefPtr<nsIMsgDBHdr>>& hdrsToDelete);
+ nsresult FindExcessMessages(uint32_t numHeadersToKeep,
+ bool applyToFlaggedMessages,
+ nsTArray<RefPtr<nsIMsgDBHdr>>& hdrsToDelete);
+
+ // mdb bookkeeping stuff
+ virtual nsresult InitExistingDB();
+ virtual nsresult InitNewDB();
+ virtual nsresult InitMDBInfo();
+
+ nsCOMPtr<nsIMsgFolder> m_folder;
+ RefPtr<nsDBFolderInfo> m_dbFolderInfo;
+ nsMsgKey m_nextPseudoMsgKey;
+ nsIMdbEnv* m_mdbEnv; // to be used in all the db calls.
+ nsIMdbStore* m_mdbStore;
+ nsIMdbTable* m_mdbAllMsgHeadersTable;
+ nsIMdbTable* m_mdbAllThreadsTable;
+
+ // Used for asynchronous db opens. If non-null, we're still opening
+ // the underlying mork database. If null, the db has been completely opened.
+ nsCOMPtr<nsIMdbThumb> m_thumb;
+ // used to remember the args to Open for async open.
+ bool m_create;
+ bool m_leaveInvalidDB;
+
+ nsCOMPtr<nsIFile> m_dbFile;
+ nsTArray<nsMsgKey> m_newSet; // new messages since last open.
+ bool m_mdbTokensInitialized;
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>> m_ChangeListeners;
+ mdb_token m_hdrRowScopeToken;
+ mdb_token m_threadRowScopeToken;
+ mdb_token m_hdrTableKindToken;
+ mdb_token m_threadTableKindToken;
+ mdb_token m_allThreadsTableKindToken;
+ mdb_token m_subjectColumnToken;
+ mdb_token m_senderColumnToken;
+ mdb_token m_messageIdColumnToken;
+ mdb_token m_referencesColumnToken;
+ mdb_token m_recipientsColumnToken;
+ mdb_token m_dateColumnToken;
+ mdb_token m_messageSizeColumnToken;
+ mdb_token m_flagsColumnToken;
+ mdb_token m_priorityColumnToken;
+ mdb_token m_labelColumnToken;
+ mdb_token m_numLinesColumnToken;
+ mdb_token m_ccListColumnToken;
+ mdb_token m_bccListColumnToken;
+ mdb_token m_threadFlagsColumnToken;
+ mdb_token m_threadIdColumnToken;
+ mdb_token m_threadChildrenColumnToken;
+ mdb_token m_threadUnreadChildrenColumnToken;
+ mdb_token m_messageThreadIdColumnToken;
+ mdb_token m_threadSubjectColumnToken;
+ mdb_token m_messageCharSetColumnToken;
+ mdb_token m_threadParentColumnToken;
+ mdb_token m_threadRootKeyColumnToken;
+ mdb_token m_threadNewestMsgDateColumnToken;
+ mdb_token m_offlineMsgOffsetColumnToken;
+ mdb_token m_offlineMessageSizeColumnToken;
+
+ // header caching stuff - MRU headers, keeps them around in memory
+ nsresult AddHdrToCache(nsIMsgDBHdr* hdr, nsMsgKey key);
+ nsresult ClearHdrCache(bool reInit);
+ nsresult RemoveHdrFromCache(nsIMsgDBHdr* hdr, nsMsgKey key);
+ // all headers currently instantiated, doesn't hold refs
+ // these get added when msg hdrs get constructed, and removed when they get
+ // destroyed.
+ nsresult GetHdrFromUseCache(nsMsgKey key, nsIMsgDBHdr** result);
+ nsresult AddHdrToUseCache(nsIMsgDBHdr* hdr, nsMsgKey key);
+ nsresult ClearUseHdrCache();
+ nsresult RemoveHdrFromUseCache(nsIMsgDBHdr* hdr, nsMsgKey key);
+
+ // not-reference holding array of threads we've handed out.
+ // If a db goes away, it will clean up the outstanding threads.
+ // We use an nsTArray because we don't expect to ever have very many
+ // of these, rarely more than 5.
+ nsTArray<nsMsgThread*> m_threads;
+ // Clear outstanding thread objects
+ void ClearThreads();
+ nsMsgThread* FindExistingThread(nsMsgKey threadId);
+
+ mdb_pos FindInsertIndexInSortedTable(nsIMdbTable* table, mdb_id idToInsert);
+
+ void ClearCachedObjects(bool dbGoingAway);
+ void InvalidateEnumerators();
+ // all instantiated headers, but doesn't hold refs.
+ PLDHashTable* m_headersInUse;
+ static PLDHashNumber HashKey(const void* aKey);
+ static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey);
+ static void MoveEntry(PLDHashTable* aTable, const PLDHashEntryHdr* aFrom,
+ PLDHashEntryHdr* aTo);
+ static void ClearEntry(PLDHashTable* aTable, PLDHashEntryHdr* aEntry);
+ static PLDHashTableOps gMsgDBHashTableOps;
+ struct MsgHdrHashElement : public PLDHashEntryHdr {
+ nsMsgKey mKey;
+ nsIMsgDBHdr* mHdr;
+ };
+ PLDHashTable* m_cachedHeaders;
+ bool m_bCacheHeaders;
+ nsMsgKey m_cachedThreadId;
+ nsCOMPtr<nsIMsgThread> m_cachedThread;
+ nsCOMPtr<nsIMdbFactory> mMdbFactory;
+
+ // Message reference hash table
+ static PLDHashTableOps gRefHashTableOps;
+ struct RefHashElement : public PLDHashEntryHdr {
+ const char* mRef; // Hash entry key, must come first
+ nsMsgKey mThreadId;
+ uint32_t mCount;
+ };
+ PLDHashTable* m_msgReferences;
+ nsresult GetRefFromHash(nsCString& reference, nsMsgKey* threadId);
+ nsresult AddRefToHash(nsCString& reference, nsMsgKey threadId);
+ nsresult AddMsgRefsToHash(nsIMsgDBHdr* msgHdr);
+ nsresult RemoveRefFromHash(nsCString& reference);
+ nsresult RemoveMsgRefsFromHash(nsIMsgDBHdr* msgHdr);
+ nsresult InitRefHash();
+
+ // The enumerators add themselves to these lists.
+ // If a db goes away - via destruction or ForceClosed() - it needs to
+ // invalidate any outstanding enumerators.
+ nsTArray<nsMsgDBEnumerator*> m_msgEnumerators;
+ nsTArray<nsMsgDBThreadEnumerator*> m_threadEnumerators;
+
+ // Memory reporter details
+ public:
+ static size_t HeaderHashSizeOf(PLDHashEntryHdr* hdr,
+ mozilla::MallocSizeOf aMallocSizeOf,
+ void* arg);
+ virtual size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+ virtual size_t SizeOfIncludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ private:
+ uint32_t m_cacheSize;
+ RefPtr<mozilla::mailnews::MsgDBReporter> mMemReporter;
+};
+
+class nsMsgRetentionSettings : public nsIMsgRetentionSettings {
+ public:
+ nsMsgRetentionSettings();
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIMSGRETENTIONSETTINGS
+ protected:
+ virtual ~nsMsgRetentionSettings();
+ nsMsgRetainByPreference m_retainByPreference;
+ uint32_t m_daysToKeepHdrs;
+ uint32_t m_numHeadersToKeep;
+ bool m_useServerDefaults;
+ bool m_cleanupBodiesByDays;
+ uint32_t m_daysToKeepBodies;
+ bool m_applyToFlaggedMessages;
+};
+
+class nsMsgDownloadSettings : public nsIMsgDownloadSettings {
+ public:
+ nsMsgDownloadSettings();
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIMSGDOWNLOADSETTINGS
+ protected:
+ virtual ~nsMsgDownloadSettings();
+ bool m_useServerDefaults;
+ bool m_downloadUnreadOnly;
+ bool m_downloadByDate;
+ int32_t m_ageLimitOfMsgsToDownload;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsMsgHdr.h b/comm/mailnews/db/msgdb/public/nsMsgHdr.h
new file mode 100644
index 0000000000..94e5b1b8c8
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsMsgHdr.h
@@ -0,0 +1,92 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsMsgHdr_H
+#define _nsMsgHdr_H
+
+#include "mozilla/MemoryReporting.h"
+#include "nsIMsgHdr.h"
+#include "nsString.h"
+#include "MailNewsTypes.h"
+#include "mdb.h"
+#include "nsTArray.h"
+
+class nsMsgDatabase;
+class nsIMsgThread;
+
+class nsMsgHdr : public nsIMsgDBHdr {
+ public:
+ NS_DECL_NSIMSGDBHDR
+ friend class nsMsgDatabase;
+ friend class nsImapMailDatabase;
+ friend class nsMsgPropertyEnumerator;
+ friend class nsMsgThread;
+
+ ////////////////////////////////////////////////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////////
+ // nsMsgHdr methods:
+ nsMsgHdr(nsMsgDatabase* db, nsIMdbRow* dbRow);
+
+ NS_DECL_ISUPPORTS
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOfFun) const {
+ return m_references.ShallowSizeOfExcludingThis(aMallocSizeOfFun);
+ }
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOfFun) const {
+ return aMallocSizeOfFun(this) + SizeOfExcludingThis(aMallocSizeOfFun);
+ }
+
+ protected:
+ nsIMdbRow* GetMDBRow() { return m_mdbRow; }
+ void ReleaseMDBRow() { NS_IF_RELEASE(m_mdbRow); }
+ nsMsgDatabase* GetMdb() { return m_mdb; }
+ void ClearCachedValues() { m_initedValues = 0; }
+
+ virtual nsresult GetRawFlags(uint32_t* result);
+
+ bool IsParentOf(nsIMsgDBHdr* possibleChild);
+ bool IsAncestorOf(nsIMsgDBHdr* possibleChild);
+
+ private:
+ virtual ~nsMsgHdr();
+
+ void Init();
+ virtual nsresult InitFlags();
+ virtual nsresult InitCachedValues();
+
+ bool IsAncestorKilled(uint32_t ancestorsToCheck);
+ void ReparentInThread(nsIMsgThread* thread);
+
+ nsresult SetStringColumn(const char* str, mdb_token token);
+ nsresult SetUInt32Column(uint32_t value, mdb_token token);
+ nsresult GetUInt32Column(mdb_token token, uint32_t* pvalue,
+ uint32_t defaultValue = 0);
+ nsresult SetUInt64Column(uint64_t value, mdb_token token);
+ nsresult GetUInt64Column(mdb_token token, uint64_t* pvalue,
+ uint64_t defaultValue = 0);
+
+ // reference and threading stuff.
+ nsresult ParseReferences(const char* references);
+ const char* GetNextReference(const char* startNextRef, nsCString& reference,
+ bool acceptNonDelimitedReferences);
+
+ nsMsgKey m_threadId;
+ nsMsgKey m_messageKey; // news: article number, local mail: key, imap: uid...
+ nsMsgKey m_threadParent; // message this is a reply to, in thread.
+ PRTime m_date;
+ uint32_t m_messageSize; // lines for news articles, bytes for mail messages
+ uint32_t m_flags;
+ // avoid parsing references every time we want one
+ nsTArray<nsCString> m_references;
+
+ // nsMsgHdrs will have to know what db and row they belong to, since they are
+ // really just a wrapper around the msg row in the mdb. This could cause
+ // problems, though I hope not.
+ nsMsgDatabase* m_mdb;
+ nsIMdbRow* m_mdbRow;
+ uint32_t m_initedValues;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsMsgThread.h b/comm/mailnews/db/msgdb/public/nsMsgThread.h
new file mode 100644
index 0000000000..37add58082
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsMsgThread.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsMsgThread_H
+#define _nsMsgThread_H
+
+#include "nsIMsgThread.h"
+#include "nsString.h"
+#include "MailNewsTypes.h"
+#include "mdb.h"
+
+class nsIMdbTable;
+class nsIMsgDBHdr;
+class nsMsgDatabase;
+
+class nsMsgThread : public nsIMsgThread {
+ public:
+ nsMsgThread();
+ nsMsgThread(nsMsgDatabase* db, nsIMdbTable* table);
+
+ friend class nsMsgThreadEnumerator;
+ friend class nsMsgDatabase;
+
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIMSGTHREAD
+
+ RefPtr<nsMsgDatabase> m_mdbDB;
+
+ protected:
+ virtual ~nsMsgThread();
+
+ void Init();
+ void Clear();
+ virtual nsresult InitCachedValues();
+ nsresult ChangeChildCount(int32_t delta);
+ nsresult ChangeUnreadChildCount(int32_t delta);
+ nsresult RemoveChild(nsMsgKey msgKey);
+ nsresult SetThreadRootKey(nsMsgKey threadRootKey);
+ nsresult GetChildHdrForKey(nsMsgKey desiredKey, nsIMsgDBHdr** result,
+ int32_t* resultIndex);
+ nsresult RerootThread(nsIMsgDBHdr* newParentOfOldRoot, nsIMsgDBHdr* oldRoot,
+ nsIDBChangeAnnouncer* announcer);
+ nsresult ReparentChildrenOf(nsMsgKey oldParent, nsMsgKey newParent,
+ nsIDBChangeAnnouncer* announcer);
+
+ nsresult ReparentNonReferenceChildrenOf(nsIMsgDBHdr* topLevelHdr,
+ nsMsgKey newParentKey,
+ nsIDBChangeAnnouncer* announcer);
+ nsresult ReparentMsgsWithInvalidParent(uint32_t numChildren,
+ nsMsgKey threadParentKey);
+
+ nsMsgKey m_threadKey;
+ uint32_t m_numChildren;
+ uint32_t m_numUnreadChildren;
+ uint32_t m_flags;
+ nsCOMPtr<nsIMdbTable> m_mdbTable;
+ nsCOMPtr<nsIMdbRow> m_metaRow;
+ bool m_cachedValuesInitialized;
+ nsMsgKey m_threadRootKey;
+ uint32_t m_newestMsgDate;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/public/nsNewsDatabase.h b/comm/mailnews/db/msgdb/public/nsNewsDatabase.h
new file mode 100644
index 0000000000..4a804d0d69
--- /dev/null
+++ b/comm/mailnews/db/msgdb/public/nsNewsDatabase.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef _nsNewsDatabase_H_
+#define _nsNewsDatabase_H_
+
+#include "mozilla/Attributes.h"
+#include "nsMsgDatabase.h"
+#include "nsINewsDatabase.h"
+#include "nsTArray.h"
+#include "nsIMsgHdr.h"
+
+// news group database
+
+class nsNewsDatabase : public nsMsgDatabase, public nsINewsDatabase {
+ public:
+ nsNewsDatabase();
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSINEWSDATABASE
+
+ NS_IMETHOD Close(bool forceCommit) override;
+ NS_IMETHOD ForceClosed() override;
+ NS_IMETHOD Commit(nsMsgDBCommit commitType) override;
+ virtual uint32_t GetCurVersion() override;
+
+ // methods to get and set docsets for ids.
+ NS_IMETHOD IsRead(nsMsgKey key, bool* pRead) override;
+ virtual nsresult IsHeaderRead(nsIMsgDBHdr* msgHdr, bool* pRead) override;
+
+ NS_IMETHOD GetHighWaterArticleNum(nsMsgKey* key) override;
+ NS_IMETHOD GetLowWaterArticleNum(nsMsgKey* key) override;
+ NS_IMETHOD MarkAllRead(nsTArray<nsMsgKey>& thoseMarked) override;
+
+ virtual nsresult ExpireUpTo(nsMsgKey expireKey);
+ virtual nsresult ExpireRange(nsMsgKey startRange, nsMsgKey endRange);
+
+ virtual bool SetHdrReadFlag(nsIMsgDBHdr* msgHdr, bool bRead) override;
+
+ virtual nsresult AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr) override;
+ nsresult SyncWithReadSet();
+
+ NS_IMETHOD GetDefaultViewFlags(
+ nsMsgViewFlagsTypeValue* aDefaultViewFlags) override;
+ NS_IMETHOD GetDefaultSortType(
+ nsMsgViewSortTypeValue* aDefaultSortType) override;
+ NS_IMETHOD GetDefaultSortOrder(
+ nsMsgViewSortOrderValue* aDefaultSortOrder) override;
+
+ protected:
+ virtual ~nsNewsDatabase();
+ // this is owned by the nsNewsFolder, which lives longer than the db.
+ nsMsgKeySet* m_readSet;
+};
+
+#endif
diff --git a/comm/mailnews/db/msgdb/src/components.conf b/comm/mailnews/db/msgdb/src/components.conf
new file mode 100644
index 0000000000..1d65e685f2
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/components.conf
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, you can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ "cid": "{a86c86ae-e97f-11d2-a506-0060b0fc04b7}",
+ "contract_ids": ["@mozilla.org/nsMsgDatabase/msgDB-mailbox"],
+ "type": "nsMailDatabase",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsMailDatabase.h"],
+ },
+ {
+ "cid": "{36414aa0-e980-11d2-a506-0060b0fc04b7}",
+ "contract_ids": ["@mozilla.org/nsMsgDatabase/msgDB-news"],
+ "type": "nsNewsDatabase",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsNewsDatabase.h"],
+ },
+ {
+ "cid": "{9e4b07ee-e980-11d2-a506-0060b0fc04b7}",
+ "contract_ids": ["@mozilla.org/nsMsgDatabase/msgDB-imap"],
+ "type": "nsImapMailDatabase",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsImapMailDatabase.h"],
+ },
+ {
+ "cid": "{1bd976d6-df44-11d4-a5b6-0060b0fc04b7}",
+ "contract_ids": ["@mozilla.org/msgDatabase/retentionSettings;1"],
+ "type": "nsMsgRetentionSettings",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsMsgDatabase.h"],
+ },
+ {
+ "cid": "{4e3dae5a-157a-11d5-a5c0-0060b0fc04b7}",
+ "contract_ids": ["@mozilla.org/msgDatabase/downloadSettings;1"],
+ "type": "nsMsgDownloadSettings",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsMsgDatabase.h"],
+ },
+ {
+ "cid": "{03223c50-1e88-45e8-ba1a-7ce792dc3fc3}",
+ "contract_ids": ["@mozilla.org/msgDatabase/msgDBService;1"],
+ "type": "nsMsgDBService",
+ "headers": ["/comm/mailnews/db/msgdb/public/nsMsgDatabase.h"],
+ "name": "DB",
+ "interfaces": ["nsIMsgDBService"],
+ },
+]
diff --git a/comm/mailnews/db/msgdb/src/moz.build b/comm/mailnews/db/msgdb/src/moz.build
new file mode 100644
index 0000000000..06c2b92475
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/moz.build
@@ -0,0 +1,22 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ "nsDBFolderInfo.cpp",
+ "nsImapMailDatabase.cpp",
+ "nsMailDatabase.cpp",
+ "nsMsgDatabase.cpp",
+ "nsMsgDatabaseEnumerators.cpp",
+ "nsMsgHdr.cpp",
+ "nsMsgOfflineImapOperation.cpp",
+ "nsMsgThread.cpp",
+ "nsNewsDatabase.cpp",
+]
+
+FINAL_LIBRARY = "mail"
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
diff --git a/comm/mailnews/db/msgdb/src/nsDBFolderInfo.cpp b/comm/mailnews/db/msgdb/src/nsDBFolderInfo.cpp
new file mode 100644
index 0000000000..57118ffaf8
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsDBFolderInfo.cpp
@@ -0,0 +1,749 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "nsDBFolderInfo.h"
+#include "nsMsgDatabase.h"
+#include "nsMsgFolderFlags.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "nsIPrefLocalizedString.h"
+#include "nsIObserver.h"
+#include "nsIObserverService.h"
+#include "nsIMsgDBView.h"
+#include "nsServiceManagerUtils.h"
+#include "nsImapCore.h"
+
+static const char* kDBFolderInfoScope = "ns:msg:db:row:scope:dbfolderinfo:all";
+static const char* kDBFolderInfoTableKind = "ns:msg:db:table:kind:dbfolderinfo";
+
+struct mdbOid gDBFolderInfoOID;
+
+static const char* kNumMessagesColumnName = "numMsgs";
+// have to leave this as numNewMsgs even though it's numUnread Msgs
+static const char* kNumUnreadMessagesColumnName = "numNewMsgs";
+static const char* kFlagsColumnName = "flags";
+static const char* kFolderSizeColumnName = "folderSize";
+static const char* kExpungedBytesColumnName = "expungedBytes";
+static const char* kFolderDateColumnName = "folderDate";
+static const char* kHighWaterMessageKeyColumnName = "highWaterKey";
+
+static const char* kImapUidValidityColumnName = "UIDValidity";
+static const char* kTotalPendingMessagesColumnName = "totPendingMsgs";
+static const char* kUnreadPendingMessagesColumnName = "unreadPendingMsgs";
+static const char* kMailboxNameColumnName = "mailboxName";
+static const char* kKnownArtsSetColumnName = "knownArts";
+static const char* kExpiredMarkColumnName = "expiredMark";
+static const char* kVersionColumnName = "version";
+static const char* kLocaleColumnName = "locale";
+
+NS_IMPL_ADDREF(nsDBFolderInfo)
+NS_IMPL_RELEASE(nsDBFolderInfo)
+
+NS_IMETHODIMP
+nsDBFolderInfo::QueryInterface(REFNSIID iid, void** result) {
+ if (!result) return NS_ERROR_NULL_POINTER;
+
+ *result = nullptr;
+ if (iid.Equals(NS_GET_IID(nsIDBFolderInfo)) ||
+ iid.Equals(NS_GET_IID(nsISupports))) {
+ *result = static_cast<nsIDBFolderInfo*>(this);
+ AddRef();
+ return NS_OK;
+ }
+ return NS_NOINTERFACE;
+}
+
+nsDBFolderInfo::nsDBFolderInfo(nsMsgDatabase* mdb)
+ : m_flags(0),
+ m_expiredMark(0),
+ m_tableKindToken(0),
+ m_expiredMarkColumnToken(0) {
+ m_mdbTable = NULL;
+ m_mdbRow = NULL;
+ m_version = 1; // for upgrading...
+ m_IMAPHierarchySeparator = 0; // imap path separator
+ // mail only (for now)
+ m_folderSize = 0;
+ m_folderDate = 0;
+ m_expungedBytes = 0; // sum of size of deleted messages in folder
+ m_highWaterMessageKey = 0;
+
+ m_numUnreadMessages = 0;
+ m_numMessages = 0;
+ // IMAP only
+ m_ImapUidValidity = kUidUnknown;
+ m_totalPendingMessages = 0;
+ m_unreadPendingMessages = 0;
+
+ m_mdbTokensInitialized = false;
+
+ m_mdb = mdb;
+ if (mdb) {
+ nsresult err;
+
+ err = m_mdb->GetStore()->StringToToken(mdb->GetEnv(), kDBFolderInfoScope,
+ &m_rowScopeToken);
+ if (NS_SUCCEEDED(err)) {
+ err = m_mdb->GetStore()->StringToToken(
+ mdb->GetEnv(), kDBFolderInfoTableKind, &m_tableKindToken);
+ if (NS_SUCCEEDED(err)) {
+ gDBFolderInfoOID.mOid_Scope = m_rowScopeToken;
+ gDBFolderInfoOID.mOid_Id = 1;
+ }
+ }
+ InitMDBInfo();
+ }
+}
+
+nsDBFolderInfo::~nsDBFolderInfo() {
+ // nsMsgDatabase strictly owns nsDBFolderInfo, so don't ref-count db.
+ ReleaseExternalReferences();
+}
+
+// Release any objects we're holding onto. This needs to be safe
+// to call multiple times.
+void nsDBFolderInfo::ReleaseExternalReferences() {
+ if (m_mdb) {
+ if (m_mdbTable) {
+ NS_RELEASE(m_mdbTable);
+ m_mdbTable = nullptr;
+ }
+ if (m_mdbRow) {
+ NS_RELEASE(m_mdbRow);
+ m_mdbRow = nullptr;
+ }
+ m_mdb = nullptr;
+ }
+}
+
+// this routine sets up a new db to know about the dbFolderInfo stuff...
+nsresult nsDBFolderInfo::AddToNewMDB() {
+ nsresult ret = NS_OK;
+ if (m_mdb && m_mdb->GetStore()) {
+ nsIMdbStore* store = m_mdb->GetStore();
+ // create the unique table for the dbFolderInfo.
+ nsresult err =
+ store->NewTable(m_mdb->GetEnv(), m_rowScopeToken, m_tableKindToken,
+ true, nullptr, &m_mdbTable);
+
+ // create the singleton row for the dbFolderInfo.
+ err = store->NewRowWithOid(m_mdb->GetEnv(), &gDBFolderInfoOID, &m_mdbRow);
+
+ // add the row to the singleton table.
+ if (m_mdbRow && NS_SUCCEEDED(err))
+ err = m_mdbTable->AddRow(m_mdb->GetEnv(), m_mdbRow);
+
+ ret = err; // what are we going to do about nsresult's?
+ }
+ return ret;
+}
+
+nsresult nsDBFolderInfo::InitFromExistingDB() {
+ nsresult ret = NS_OK;
+ if (m_mdb && m_mdb->GetStore()) {
+ nsIMdbStore* store = m_mdb->GetStore();
+ if (store) {
+ mdb_pos rowPos;
+ mdb_count outTableCount; // current number of such tables
+ mdb_bool mustBeUnique; // whether port can hold only one of these
+ mdb_bool hasOid;
+ ret = store->GetTableKind(m_mdb->GetEnv(), m_rowScopeToken,
+ m_tableKindToken, &outTableCount, &mustBeUnique,
+ &m_mdbTable);
+ // NS_ASSERTION(mustBeUnique && outTableCount == 1, "only one global db
+ // info allowed");
+
+ if (m_mdbTable) {
+ // find singleton row for global info.
+ ret = m_mdbTable->HasOid(m_mdb->GetEnv(), &gDBFolderInfoOID, &hasOid);
+ if (NS_SUCCEEDED(ret)) {
+ nsIMdbTableRowCursor* rowCursor;
+ rowPos = -1;
+ ret = m_mdbTable->GetTableRowCursor(m_mdb->GetEnv(), rowPos,
+ &rowCursor);
+ if (NS_SUCCEEDED(ret)) {
+ ret = rowCursor->NextRow(m_mdb->GetEnv(), &m_mdbRow, &rowPos);
+ NS_RELEASE(rowCursor);
+ if (!m_mdbRow) ret = NS_ERROR_FAILURE;
+ if (NS_SUCCEEDED(ret)) LoadMemberVariables();
+ }
+ }
+ } else
+ ret = NS_ERROR_FAILURE;
+ }
+ }
+ return ret;
+}
+
+nsresult nsDBFolderInfo::InitMDBInfo() {
+ nsresult ret = NS_OK;
+ if (!m_mdbTokensInitialized && m_mdb && m_mdb->GetStore()) {
+ nsIMdbStore* store = m_mdb->GetStore();
+ nsIMdbEnv* env = m_mdb->GetEnv();
+
+ store->StringToToken(env, kNumMessagesColumnName,
+ &m_numMessagesColumnToken);
+ store->StringToToken(env, kNumUnreadMessagesColumnName,
+ &m_numUnreadMessagesColumnToken);
+ store->StringToToken(env, kFlagsColumnName, &m_flagsColumnToken);
+ store->StringToToken(env, kFolderSizeColumnName, &m_folderSizeColumnToken);
+ store->StringToToken(env, kExpungedBytesColumnName,
+ &m_expungedBytesColumnToken);
+ store->StringToToken(env, kFolderDateColumnName, &m_folderDateColumnToken);
+
+ store->StringToToken(env, kHighWaterMessageKeyColumnName,
+ &m_highWaterMessageKeyColumnToken);
+ store->StringToToken(env, kMailboxNameColumnName,
+ &m_mailboxNameColumnToken);
+
+ store->StringToToken(env, kImapUidValidityColumnName,
+ &m_imapUidValidityColumnToken);
+ store->StringToToken(env, kTotalPendingMessagesColumnName,
+ &m_totalPendingMessagesColumnToken);
+ store->StringToToken(env, kUnreadPendingMessagesColumnName,
+ &m_unreadPendingMessagesColumnToken);
+ store->StringToToken(env, kExpiredMarkColumnName,
+ &m_expiredMarkColumnToken);
+ store->StringToToken(env, kVersionColumnName, &m_versionColumnToken);
+ m_mdbTokensInitialized = true;
+ }
+
+ return ret;
+}
+
+nsresult nsDBFolderInfo::LoadMemberVariables() {
+ // it's really not an error for these properties to not exist...
+ GetInt32PropertyWithToken(m_numMessagesColumnToken, m_numMessages);
+ GetInt32PropertyWithToken(m_numUnreadMessagesColumnToken,
+ m_numUnreadMessages);
+ GetInt32PropertyWithToken(m_flagsColumnToken, m_flags);
+ GetInt64PropertyWithToken(m_folderSizeColumnToken, m_folderSize);
+ GetUint32PropertyWithToken(m_folderDateColumnToken, m_folderDate);
+ GetInt32PropertyWithToken(m_imapUidValidityColumnToken, m_ImapUidValidity,
+ kUidUnknown);
+ GetUint32PropertyWithToken(m_expiredMarkColumnToken, m_expiredMark);
+ GetInt64PropertyWithToken(m_expungedBytesColumnToken, m_expungedBytes);
+ GetUint32PropertyWithToken(m_highWaterMessageKeyColumnToken,
+ m_highWaterMessageKey);
+ int32_t version;
+
+ GetInt32PropertyWithToken(m_versionColumnToken, version);
+ m_version = (uint16_t)version;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetVersion(uint32_t version) {
+ m_version = version;
+ return SetUint32PropertyWithToken(m_versionColumnToken, (uint32_t)m_version);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetVersion(uint32_t* version) {
+ *version = m_version;
+ return NS_OK;
+}
+
+nsresult nsDBFolderInfo::AdjustHighWater(nsMsgKey highWater, bool force) {
+ if (force || m_highWaterMessageKey < highWater) {
+ m_highWaterMessageKey = highWater;
+ SetUint32PropertyWithToken(m_highWaterMessageKeyColumnToken, highWater);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetHighWater(nsMsgKey highWater) {
+ return AdjustHighWater(highWater, true);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::OnKeyAdded(nsMsgKey aNewKey) {
+ return AdjustHighWater(aNewKey, false);
+}
+
+NS_IMETHODIMP
+nsDBFolderInfo::GetFolderSize(int64_t* size) {
+ NS_ENSURE_ARG_POINTER(size);
+ *size = m_folderSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetFolderSize(int64_t size) {
+ m_folderSize = size;
+ return SetInt64Property(kFolderSizeColumnName, m_folderSize);
+}
+
+NS_IMETHODIMP
+nsDBFolderInfo::GetFolderDate(uint32_t* folderDate) {
+ NS_ENSURE_ARG_POINTER(folderDate);
+ *folderDate = m_folderDate;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetFolderDate(uint32_t folderDate) {
+ m_folderDate = folderDate;
+ return SetUint32PropertyWithToken(m_folderDateColumnToken, folderDate);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetHighWater(nsMsgKey* result) {
+ // Sanity check highwater - if it gets too big, other code
+ // can fail. Look through last 100 messages to recalculate
+ // the highwater mark.
+ *result = m_highWaterMessageKey;
+ if (m_highWaterMessageKey > 0xFFFFFF00 && m_mdb) {
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ nsresult rv = m_mdb->ReverseEnumerateMessages(getter_AddRefs(hdrs));
+ if (NS_FAILED(rv)) return rv;
+ bool hasMore = false;
+ nsCOMPtr<nsIMsgDBHdr> pHeader;
+ nsMsgKey recalculatedHighWater = 1;
+ int32_t i = 0;
+ while (i++ < 100 && NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) &&
+ hasMore) {
+ (void)hdrs->GetNext(getter_AddRefs(pHeader));
+ if (pHeader) {
+ nsMsgKey msgKey;
+ pHeader->GetMessageKey(&msgKey);
+ if (msgKey > recalculatedHighWater) recalculatedHighWater = msgKey;
+ }
+ }
+ NS_ASSERTION(m_highWaterMessageKey >= recalculatedHighWater,
+ "highwater incorrect");
+ m_highWaterMessageKey = recalculatedHighWater;
+ }
+ *result = m_highWaterMessageKey;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetExpiredMark(nsMsgKey expiredKey) {
+ m_expiredMark = expiredKey;
+ return SetUint32PropertyWithToken(m_expiredMarkColumnToken, expiredKey);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetExpiredMark(nsMsgKey* result) {
+ *result = m_expiredMark;
+ return NS_OK;
+}
+
+// The size of the argument depends on the maximum size of a single message
+NS_IMETHODIMP nsDBFolderInfo::ChangeExpungedBytes(int32_t delta) {
+ return SetExpungedBytes(m_expungedBytes + delta);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetMailboxName(const nsAString& newBoxName) {
+ return SetPropertyWithToken(m_mailboxNameColumnToken, newBoxName);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetMailboxName(nsAString& boxName) {
+ return GetPropertyWithToken(m_mailboxNameColumnToken, boxName);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::ChangeNumUnreadMessages(int32_t delta) {
+ m_numUnreadMessages += delta;
+ // m_numUnreadMessages can never be set to negative.
+ if (m_numUnreadMessages < 0) {
+#ifdef DEBUG_bienvenu1
+ NS_ASSERTION(false, "Hardcoded assertion");
+#endif
+ m_numUnreadMessages = 0;
+ }
+ return SetUint32PropertyWithToken(m_numUnreadMessagesColumnToken,
+ m_numUnreadMessages);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::ChangeNumMessages(int32_t delta) {
+ m_numMessages += delta;
+ // m_numMessages can never be set to negative.
+ if (m_numMessages < 0) {
+#ifdef DEBUG_bienvenu
+ NS_ASSERTION(false, "num messages can't be < 0");
+#endif
+ m_numMessages = 0;
+ }
+ return SetUint32PropertyWithToken(m_numMessagesColumnToken, m_numMessages);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetNumUnreadMessages(int32_t* result) {
+ *result = m_numUnreadMessages;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetNumUnreadMessages(int32_t numUnreadMessages) {
+ m_numUnreadMessages = numUnreadMessages;
+ return SetUint32PropertyWithToken(m_numUnreadMessagesColumnToken,
+ m_numUnreadMessages);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetNumMessages(int32_t* result) {
+ *result = m_numMessages;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetNumMessages(int32_t numMessages) {
+ m_numMessages = numMessages;
+ return SetUint32PropertyWithToken(m_numMessagesColumnToken, m_numMessages);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetExpungedBytes(int64_t* result) {
+ *result = m_expungedBytes;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetExpungedBytes(int64_t expungedBytes) {
+ m_expungedBytes = expungedBytes;
+ return SetInt64PropertyWithToken(m_expungedBytesColumnToken, m_expungedBytes);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetFlags(int32_t* result) {
+ *result = m_flags;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetFlags(int32_t flags) {
+ nsresult ret = NS_OK;
+
+ if (m_flags != flags) {
+ NS_ASSERTION((m_flags & nsMsgFolderFlags::Inbox) == 0 ||
+ (flags & nsMsgFolderFlags::Inbox) != 0,
+ "lost inbox flag");
+ m_flags = flags;
+ ret = SetInt32PropertyWithToken(m_flagsColumnToken, m_flags);
+ }
+ return ret;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::OrFlags(int32_t flags, int32_t* result) {
+ m_flags |= flags;
+ *result = m_flags;
+ return SetInt32PropertyWithToken(m_flagsColumnToken, m_flags);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::AndFlags(int32_t flags, int32_t* result) {
+ m_flags &= flags;
+ *result = m_flags;
+ return SetInt32PropertyWithToken(m_flagsColumnToken, m_flags);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetImapUidValidity(int32_t* result) {
+ *result = m_ImapUidValidity;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetImapUidValidity(int32_t uidValidity) {
+ m_ImapUidValidity = uidValidity;
+ return SetUint32PropertyWithToken(m_imapUidValidityColumnToken,
+ m_ImapUidValidity);
+}
+
+bool nsDBFolderInfo::TestFlag(int32_t flags) { return (m_flags & flags) != 0; }
+
+NS_IMETHODIMP
+nsDBFolderInfo::GetLocale(nsAString& result) {
+ GetProperty(kLocaleColumnName, result);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetLocale(const nsAString& locale) {
+ return SetProperty(kLocaleColumnName, locale);
+}
+
+NS_IMETHODIMP
+nsDBFolderInfo::GetImapTotalPendingMessages(int32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = m_totalPendingMessages;
+ return NS_OK;
+}
+
+void nsDBFolderInfo::ChangeImapTotalPendingMessages(int32_t delta) {
+ m_totalPendingMessages += delta;
+ SetInt32PropertyWithToken(m_totalPendingMessagesColumnToken,
+ m_totalPendingMessages);
+}
+
+NS_IMETHODIMP
+nsDBFolderInfo::GetImapUnreadPendingMessages(int32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = m_unreadPendingMessages;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetImapUnreadPendingMessages(
+ int32_t numUnreadPendingMessages) {
+ m_unreadPendingMessages = numUnreadPendingMessages;
+ return SetUint32PropertyWithToken(m_unreadPendingMessagesColumnToken,
+ m_unreadPendingMessages);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetImapTotalPendingMessages(
+ int32_t numTotalPendingMessages) {
+ m_totalPendingMessages = numTotalPendingMessages;
+ return SetUint32PropertyWithToken(m_totalPendingMessagesColumnToken,
+ m_totalPendingMessages);
+}
+
+void nsDBFolderInfo::ChangeImapUnreadPendingMessages(int32_t delta) {
+ m_unreadPendingMessages += delta;
+ SetInt32PropertyWithToken(m_unreadPendingMessagesColumnToken,
+ m_unreadPendingMessages);
+}
+
+/* attribute nsMsgViewTypeValue viewType; */
+NS_IMETHODIMP nsDBFolderInfo::GetViewType(nsMsgViewTypeValue* aViewType) {
+ uint32_t viewTypeValue;
+ nsresult rv = GetUint32Property("viewType", nsMsgViewType::eShowAllThreads,
+ &viewTypeValue);
+ *aViewType = viewTypeValue;
+ return rv;
+}
+NS_IMETHODIMP nsDBFolderInfo::SetViewType(nsMsgViewTypeValue aViewType) {
+ return SetUint32Property("viewType", aViewType);
+}
+
+/* attribute nsMsgViewFlagsTypeValue viewFlags; */
+NS_IMETHODIMP nsDBFolderInfo::GetViewFlags(
+ nsMsgViewFlagsTypeValue* aViewFlags) {
+ nsMsgViewFlagsTypeValue defaultViewFlags;
+ nsresult rv = m_mdb->GetDefaultViewFlags(&defaultViewFlags);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t viewFlagsValue;
+ rv = GetUint32Property("viewFlags", defaultViewFlags, &viewFlagsValue);
+ *aViewFlags = viewFlagsValue;
+ return rv;
+}
+NS_IMETHODIMP nsDBFolderInfo::SetViewFlags(nsMsgViewFlagsTypeValue aViewFlags) {
+ return SetUint32Property("viewFlags", aViewFlags);
+}
+
+/* attribute nsMsgViewSortTypeValue sortType; */
+NS_IMETHODIMP nsDBFolderInfo::GetSortType(nsMsgViewSortTypeValue* aSortType) {
+ nsMsgViewSortTypeValue defaultSortType;
+ nsresult rv = m_mdb->GetDefaultSortType(&defaultSortType);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t sortTypeValue;
+ rv = GetUint32Property("sortType", defaultSortType, &sortTypeValue);
+ *aSortType = sortTypeValue;
+ return rv;
+}
+NS_IMETHODIMP nsDBFolderInfo::SetSortType(nsMsgViewSortTypeValue aSortType) {
+ return SetUint32Property("sortType", aSortType);
+}
+
+/* attribute nsMsgViewSortOrderValue sortOrder; */
+NS_IMETHODIMP nsDBFolderInfo::GetSortOrder(
+ nsMsgViewSortOrderValue* aSortOrder) {
+ nsMsgViewSortOrderValue defaultSortOrder;
+ nsresult rv = m_mdb->GetDefaultSortOrder(&defaultSortOrder);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ uint32_t sortOrderValue;
+ rv = GetUint32Property("sortOrder", defaultSortOrder, &sortOrderValue);
+ *aSortOrder = sortOrderValue;
+ return rv;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetSortOrder(nsMsgViewSortOrderValue aSortOrder) {
+ return SetUint32Property("sortOrder", aSortOrder);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetKnownArtsSet(const char* newsArtSet) {
+ return m_mdb->SetProperty(m_mdbRow, kKnownArtsSetColumnName, newsArtSet);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetKnownArtsSet(char** newsArtSet) {
+ return m_mdb->GetProperty(m_mdbRow, kKnownArtsSetColumnName, newsArtSet);
+}
+
+// get arbitrary property, aka row cell value.
+NS_IMETHODIMP nsDBFolderInfo::GetProperty(const char* propertyName,
+ nsAString& resultProperty) {
+ return m_mdb->GetPropertyAsNSString(m_mdbRow, propertyName, resultProperty);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetCharProperty(
+ const char* aPropertyName, const nsACString& aPropertyValue) {
+ return m_mdb->SetProperty(m_mdbRow, aPropertyName,
+ PromiseFlatCString(aPropertyValue).get());
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetCharProperty(const char* propertyName,
+ nsACString& resultProperty) {
+ nsCString result;
+ nsresult rv =
+ m_mdb->GetProperty(m_mdbRow, propertyName, getter_Copies(result));
+ if (NS_SUCCEEDED(rv)) resultProperty.Assign(result);
+ return rv;
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetUint32Property(const char* propertyName,
+ uint32_t propertyValue) {
+ return m_mdb->SetUint32Property(m_mdbRow, propertyName, propertyValue);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetInt64Property(const char* propertyName,
+ int64_t propertyValue) {
+ return m_mdb->SetUint64Property(m_mdbRow, propertyName,
+ (uint64_t)propertyValue);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetProperty(const char* propertyName,
+ const nsAString& propertyStr) {
+ return m_mdb->SetPropertyFromNSString(m_mdbRow, propertyName, propertyStr);
+}
+
+nsresult nsDBFolderInfo::SetPropertyWithToken(mdb_token aProperty,
+ const nsAString& propertyStr) {
+ return m_mdb->SetNSStringPropertyWithToken(m_mdbRow, aProperty, propertyStr);
+}
+
+nsresult nsDBFolderInfo::SetUint32PropertyWithToken(mdb_token aProperty,
+ uint32_t propertyValue) {
+ return m_mdb->UInt32ToRowCellColumn(m_mdbRow, aProperty, propertyValue);
+}
+
+nsresult nsDBFolderInfo::SetInt64PropertyWithToken(mdb_token aProperty,
+ int64_t propertyValue) {
+ return m_mdb->UInt64ToRowCellColumn(m_mdbRow, aProperty,
+ (uint64_t)propertyValue);
+}
+
+nsresult nsDBFolderInfo::SetInt32PropertyWithToken(mdb_token aProperty,
+ int32_t propertyValue) {
+ nsAutoString propertyStr;
+ propertyStr.AppendInt(propertyValue, 16);
+ return SetPropertyWithToken(aProperty, propertyStr);
+}
+
+nsresult nsDBFolderInfo::GetPropertyWithToken(mdb_token aProperty,
+ nsAString& resultProperty) {
+ return m_mdb->RowCellColumnTonsString(m_mdbRow, aProperty, resultProperty);
+}
+
+nsresult nsDBFolderInfo::GetUint32PropertyWithToken(mdb_token aProperty,
+ uint32_t& propertyValue,
+ uint32_t defaultValue) {
+ return m_mdb->RowCellColumnToUInt32(m_mdbRow, aProperty, propertyValue,
+ defaultValue);
+}
+
+nsresult nsDBFolderInfo::GetInt32PropertyWithToken(mdb_token aProperty,
+ int32_t& propertyValue,
+ int32_t defaultValue) {
+ return m_mdb->RowCellColumnToUInt32(m_mdbRow, aProperty,
+ (uint32_t&)propertyValue, defaultValue);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetUint32Property(const char* propertyName,
+ uint32_t defaultValue,
+ uint32_t* propertyValue) {
+ return m_mdb->GetUint32Property(m_mdbRow, propertyName, propertyValue,
+ defaultValue);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetInt64Property(const char* propertyName,
+ int64_t defaultValue,
+ int64_t* propertyValue) {
+ return m_mdb->GetUint64Property(m_mdbRow, propertyName,
+ (uint64_t*)&propertyValue, defaultValue);
+}
+
+nsresult nsDBFolderInfo::GetInt64PropertyWithToken(mdb_token aProperty,
+ int64_t& propertyValue,
+ int64_t defaultValue) {
+ return m_mdb->RowCellColumnToUInt64(m_mdbRow, aProperty,
+ (uint64_t*)&propertyValue, defaultValue);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetBooleanProperty(const char* propertyName,
+ bool defaultValue,
+ bool* propertyValue) {
+ uint32_t defaultUint32Value = (defaultValue) ? 1 : 0;
+ uint32_t returnValue;
+ nsresult rv = m_mdb->GetUint32Property(m_mdbRow, propertyName, &returnValue,
+ defaultUint32Value);
+ *propertyValue = (returnValue != 0);
+ return rv;
+}
+NS_IMETHODIMP nsDBFolderInfo::SetBooleanProperty(const char* propertyName,
+ bool propertyValue) {
+ return m_mdb->SetUint32Property(m_mdbRow, propertyName,
+ propertyValue ? 1 : 0);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::GetFolderName(nsACString& folderName) {
+ return GetCharProperty("folderName", folderName);
+}
+
+NS_IMETHODIMP nsDBFolderInfo::SetFolderName(const nsACString& folderName) {
+ return SetCharProperty("folderName", folderName);
+}
+
+class nsTransferDBFolderInfo : public nsDBFolderInfo {
+ public:
+ nsTransferDBFolderInfo();
+ virtual ~nsTransferDBFolderInfo();
+ // parallel arrays of properties and values
+ nsTArray<nsCString> m_properties;
+ nsTArray<nsCString> m_values;
+};
+
+nsTransferDBFolderInfo::nsTransferDBFolderInfo() : nsDBFolderInfo(nullptr) {}
+
+nsTransferDBFolderInfo::~nsTransferDBFolderInfo() {}
+
+/* void GetTransferInfo (out nsIDBFolderInfo transferInfo); */
+NS_IMETHODIMP nsDBFolderInfo::GetTransferInfo(nsIDBFolderInfo** transferInfo) {
+ NS_ENSURE_ARG_POINTER(transferInfo);
+ NS_ENSURE_STATE(m_mdbRow);
+
+ RefPtr<nsTransferDBFolderInfo> newInfo = new nsTransferDBFolderInfo;
+
+ mdb_count numCells;
+ mdbYarn cellYarn;
+ mdb_column cellColumn;
+ char columnName[100];
+ mdbYarn cellName = {columnName, 0, sizeof(columnName), 0, 0, nullptr};
+
+ m_mdbRow->GetCount(m_mdb->GetEnv(), &numCells);
+ // iterate over the cells in the dbfolderinfo remembering attribute names and
+ // values.
+ for (mdb_count cellIndex = 0; cellIndex < numCells; cellIndex++) {
+ nsresult err = m_mdbRow->SeekCellYarn(m_mdb->GetEnv(), cellIndex,
+ &cellColumn, nullptr);
+ if (NS_SUCCEEDED(err)) {
+ err = m_mdbRow->AliasCellYarn(m_mdb->GetEnv(), cellColumn, &cellYarn);
+ if (NS_SUCCEEDED(err)) {
+ m_mdb->GetStore()->TokenToString(m_mdb->GetEnv(), cellColumn,
+ &cellName);
+ newInfo->m_values.AppendElement(
+ Substring((const char*)cellYarn.mYarn_Buf,
+ (const char*)cellYarn.mYarn_Buf + cellYarn.mYarn_Fill));
+ newInfo->m_properties.AppendElement(
+ Substring((const char*)cellName.mYarn_Buf,
+ (const char*)cellName.mYarn_Buf + cellName.mYarn_Fill));
+ }
+ }
+ }
+
+ newInfo.forget(transferInfo);
+ return NS_OK;
+}
+
+/* void InitFromTransferInfo (in nsIDBFolderInfo transferInfo); */
+NS_IMETHODIMP nsDBFolderInfo::InitFromTransferInfo(
+ nsIDBFolderInfo* aTransferInfo) {
+ NS_ENSURE_ARG(aTransferInfo);
+
+ nsTransferDBFolderInfo* transferInfo =
+ static_cast<nsTransferDBFolderInfo*>(aTransferInfo);
+
+ for (uint32_t i = 0; i < transferInfo->m_values.Length(); i++)
+ SetCharProperty(transferInfo->m_properties[i].get(),
+ transferInfo->m_values[i]);
+
+ LoadMemberVariables();
+ return NS_OK;
+}
diff --git a/comm/mailnews/db/msgdb/src/nsImapMailDatabase.cpp b/comm/mailnews/db/msgdb/src/nsImapMailDatabase.cpp
new file mode 100644
index 0000000000..7103d0c475
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsImapMailDatabase.cpp
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <sys/stat.h>
+
+#include "msgCore.h"
+#include "nsImapMailDatabase.h"
+#include "nsDBFolderInfo.h"
+#include "nsMsgMessageFlags.h"
+
+const char* kPendingHdrsScope =
+ "ns:msg:db:row:scope:pending:all"; // scope for all offine ops table
+const char* kPendingHdrsTableKind = "ns:msg:db:table:kind:pending";
+struct mdbOid gAllPendingHdrsTableOID;
+
+nsImapMailDatabase::nsImapMailDatabase()
+ : m_pendingHdrsRowScopeToken(0), m_pendingHdrsTableKindToken(0) {}
+
+nsImapMailDatabase::~nsImapMailDatabase() {}
+
+NS_IMETHODIMP nsImapMailDatabase::GetSummaryValid(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ if (m_dbFolderInfo) {
+ uint32_t version;
+ m_dbFolderInfo->GetVersion(&version);
+ *aResult = (GetCurVersion() == version);
+ } else
+ *aResult = false;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsImapMailDatabase::SetSummaryValid(bool valid) {
+ if (m_dbFolderInfo) {
+ m_dbFolderInfo->SetVersion(valid ? GetCurVersion() : 0);
+ Commit(nsMsgDBCommitType::kLargeCommit);
+ }
+ return NS_OK;
+}
+
+// We override this to avoid our parent class (nsMailDatabase)'s
+// grabbing of the folder semaphore, and bailing on failure.
+NS_IMETHODIMP nsImapMailDatabase::DeleteMessages(
+ nsTArray<nsMsgKey> const& nsMsgKeys, nsIDBChangeListener* instigator) {
+ return nsMsgDatabase::DeleteMessages(nsMsgKeys, instigator);
+}
+
+nsresult nsImapMailDatabase::AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr) {
+ uint32_t msgFlags;
+ msgHdr->GetFlags(&msgFlags);
+ if (msgFlags & nsMsgMessageFlags::Offline && m_dbFolderInfo) {
+ uint32_t size = 0;
+ (void)msgHdr->GetOfflineMessageSize(&size);
+ return m_dbFolderInfo->ChangeExpungedBytes(size);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsImapMailDatabase::ForceClosed() {
+ m_mdbAllPendingHdrsTable = nullptr;
+ return nsMailDatabase::ForceClosed();
+}
+
+nsresult nsImapMailDatabase::GetAllPendingHdrsTable() {
+ nsresult rv = NS_OK;
+ if (!m_mdbAllPendingHdrsTable)
+ rv = GetTableCreateIfMissing(kPendingHdrsScope, kPendingHdrsTableKind,
+ getter_AddRefs(m_mdbAllPendingHdrsTable),
+ m_pendingHdrsRowScopeToken,
+ m_pendingHdrsTableKindToken);
+ return rv;
+}
+
+NS_IMETHODIMP nsImapMailDatabase::AddNewHdrToDB(nsIMsgDBHdr* newHdr,
+ bool notify) {
+ nsresult rv = nsMsgDatabase::AddNewHdrToDB(newHdr, notify);
+ if (NS_SUCCEEDED(rv)) rv = UpdatePendingAttributes(newHdr);
+ return rv;
+}
+
+NS_IMETHODIMP nsImapMailDatabase::UpdatePendingAttributes(
+ nsIMsgDBHdr* aNewHdr) {
+ nsresult rv = GetAllPendingHdrsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+ mdb_count numPendingHdrs = 0;
+ m_mdbAllPendingHdrsTable->GetCount(GetEnv(), &numPendingHdrs);
+ if (numPendingHdrs > 0) {
+ mdbYarn messageIdYarn;
+ nsCOMPtr<nsIMdbRow> pendingRow;
+ mdbOid outRowId;
+
+ nsCString messageId;
+ aNewHdr->GetMessageId(getter_Copies(messageId));
+ messageIdYarn.mYarn_Buf = (void*)messageId.get();
+ messageIdYarn.mYarn_Fill = messageId.Length();
+ messageIdYarn.mYarn_Form = 0;
+ messageIdYarn.mYarn_Size = messageIdYarn.mYarn_Fill;
+
+ m_mdbStore->FindRow(GetEnv(), m_pendingHdrsRowScopeToken,
+ m_messageIdColumnToken, &messageIdYarn, &outRowId,
+ getter_AddRefs(pendingRow));
+ if (pendingRow) {
+ mdb_count numCells;
+ mdbYarn cellYarn;
+ mdb_column cellColumn;
+ uint32_t existingFlags;
+
+ pendingRow->GetCount(GetEnv(), &numCells);
+ aNewHdr->GetFlags(&existingFlags);
+ // iterate over the cells in the pending hdr setting properties on the
+ // aNewHdr. we skip cell 0, which is the messageId;
+ nsMsgHdr* msgHdr =
+ static_cast<nsMsgHdr*>(aNewHdr); // closed system, cast ok
+ nsIMdbRow* row = msgHdr->GetMDBRow();
+ for (mdb_count cellIndex = 1; cellIndex < numCells; cellIndex++) {
+ nsresult err =
+ pendingRow->SeekCellYarn(GetEnv(), cellIndex, &cellColumn, nullptr);
+ if (NS_SUCCEEDED(err)) {
+ err = pendingRow->AliasCellYarn(GetEnv(), cellColumn, &cellYarn);
+ if (NS_SUCCEEDED(err)) {
+ if (row) row->AddColumn(GetEnv(), cellColumn, &cellYarn);
+ }
+ }
+ }
+ // We might have changed some cached values, so force a refresh.
+ msgHdr->ClearCachedValues();
+ uint32_t resultFlags;
+ msgHdr->OrFlags(existingFlags, &resultFlags);
+ m_mdbAllPendingHdrsTable->CutRow(GetEnv(), pendingRow);
+ pendingRow->CutAllColumns(GetEnv());
+ }
+ }
+ return rv;
+}
+
+nsresult nsImapMailDatabase::GetRowForPendingHdr(nsIMsgDBHdr* pendingHdr,
+ nsIMdbRow** row) {
+ nsresult rv = GetAllPendingHdrsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mdbYarn messageIdYarn;
+ nsCOMPtr<nsIMdbRow> pendingRow;
+ mdbOid outRowId;
+ nsCString messageId;
+ pendingHdr->GetMessageId(getter_Copies(messageId));
+ messageIdYarn.mYarn_Buf = (void*)messageId.get();
+ messageIdYarn.mYarn_Fill = messageId.Length();
+ messageIdYarn.mYarn_Form = 0;
+ messageIdYarn.mYarn_Size = messageIdYarn.mYarn_Fill;
+
+ rv = m_mdbStore->FindRow(GetEnv(), m_pendingHdrsRowScopeToken,
+ m_messageIdColumnToken, &messageIdYarn, &outRowId,
+ getter_AddRefs(pendingRow));
+
+ if (!pendingRow)
+ rv = m_mdbStore->NewRow(GetEnv(), m_pendingHdrsRowScopeToken,
+ getter_AddRefs(pendingRow));
+
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (pendingRow) {
+ // now we need to add cells to the row to remember the messageid, property
+ // and property value, and flags. Then, when hdrs are added to the db, we'll
+ // check if they have a matching message-id, and if so, set the property and
+ // flags
+ // XXX we already fetched messageId from the pending hdr, could it have
+ // changed by the time we get here?
+ nsCString messageId;
+ pendingHdr->GetMessageId(getter_Copies(messageId));
+ // we're just going to ignore messages without a message-id. They should be
+ // rare. If SPAM messages often didn't have message-id's, they'd be filtered
+ // on the server, most likely, and spammers would then start putting in
+ // message-id's.
+ if (!messageId.IsEmpty()) {
+ extern const char* kMessageIdColumnName;
+ m_mdbAllPendingHdrsTable->AddRow(GetEnv(), pendingRow);
+ // make sure this is the first cell so that when we ignore the first
+ // cell in nsImapMailDatabase::AddNewHdrToDB, we're ignoring the right one
+ (void)SetProperty(pendingRow, kMessageIdColumnName, messageId.get());
+ pendingRow.forget(row);
+ } else
+ return NS_ERROR_FAILURE;
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsImapMailDatabase::SetAttributeOnPendingHdr(
+ nsIMsgDBHdr* pendingHdr, const char* property, const char* propertyVal) {
+ NS_ENSURE_ARG_POINTER(pendingHdr);
+ nsCOMPtr<nsIMdbRow> pendingRow;
+ nsresult rv = GetRowForPendingHdr(pendingHdr, getter_AddRefs(pendingRow));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return SetProperty(pendingRow, property, propertyVal);
+}
+
+NS_IMETHODIMP
+nsImapMailDatabase::SetUint32AttributeOnPendingHdr(nsIMsgDBHdr* pendingHdr,
+ const char* property,
+ uint32_t propertyVal) {
+ NS_ENSURE_ARG_POINTER(pendingHdr);
+ nsCOMPtr<nsIMdbRow> pendingRow;
+ nsresult rv = GetRowForPendingHdr(pendingHdr, getter_AddRefs(pendingRow));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return SetUint32Property(pendingRow, property, propertyVal);
+}
+
+NS_IMETHODIMP
+nsImapMailDatabase::SetUint64AttributeOnPendingHdr(nsIMsgDBHdr* aPendingHdr,
+ const char* aProperty,
+ uint64_t aPropertyVal) {
+ NS_ENSURE_ARG_POINTER(aPendingHdr);
+ nsCOMPtr<nsIMdbRow> pendingRow;
+ nsresult rv = GetRowForPendingHdr(aPendingHdr, getter_AddRefs(pendingRow));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return SetUint64Property(pendingRow, aProperty, aPropertyVal);
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMailDatabase.cpp b/comm/mailnews/db/msgdb/src/nsMailDatabase.cpp
new file mode 100644
index 0000000000..c07294785a
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMailDatabase.cpp
@@ -0,0 +1,380 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "nsMailDatabase.h"
+#include "nsDBFolderInfo.h"
+#include "nsMsgLocalFolderHdrs.h"
+#include "nsNetUtil.h"
+#include "nsMsgOfflineImapOperation.h"
+#include "nsMsgFolderFlags.h"
+#include "mozilla/Logging.h"
+#include "prprf.h"
+#include "nsMsgUtils.h"
+#include "nsIMsgPluggableStore.h"
+#include "nsSimpleEnumerator.h"
+
+using namespace mozilla;
+
+extern LazyLogModule IMAPOffline; // defined in nsMsgOfflineImapOperation.cpp
+
+// scope for all offine ops table
+const char* kOfflineOpsScope = "ns:msg:db:row:scope:ops:all";
+const char* kOfflineOpsTableKind = "ns:msg:db:table:kind:ops";
+struct mdbOid gAllOfflineOpsTableOID;
+
+nsMailDatabase::nsMailDatabase() : m_reparse(false) {
+ m_mdbAllOfflineOpsTable = nullptr;
+ m_offlineOpsRowScopeToken = 0;
+ m_offlineOpsTableKindToken = 0;
+}
+
+nsMailDatabase::~nsMailDatabase() {}
+
+// caller passes in upgrading==true if they want back a db even if the db is out
+// of date. If so, they'll extract out the interesting info from the db, close
+// it, delete it, and then try to open the db again, prior to reparsing.
+nsresult nsMailDatabase::Open(nsMsgDBService* aDBService, nsIFile* aSummaryFile,
+ bool aCreate, bool aUpgrading) {
+#ifdef DEBUG
+ nsString leafName;
+ aSummaryFile->GetLeafName(leafName);
+ if (!StringEndsWith(leafName, NS_LITERAL_STRING_FROM_CSTRING(SUMMARY_SUFFIX),
+ nsCaseInsensitiveStringComparator))
+ NS_ERROR("non summary file passed into open");
+#endif
+ return nsMsgDatabase::Open(aDBService, aSummaryFile, aCreate, aUpgrading);
+}
+
+NS_IMETHODIMP nsMailDatabase::ForceClosed() {
+ m_mdbAllOfflineOpsTable = nullptr;
+ return nsMsgDatabase::ForceClosed();
+}
+
+// get this on demand so that only db's that have offline ops will
+// create the table.
+nsresult nsMailDatabase::GetAllOfflineOpsTable() {
+ nsresult rv = NS_OK;
+ if (!m_mdbAllOfflineOpsTable)
+ rv = GetTableCreateIfMissing(kOfflineOpsScope, kOfflineOpsTableKind,
+ getter_AddRefs(m_mdbAllOfflineOpsTable),
+ m_offlineOpsRowScopeToken,
+ m_offlineOpsTableKindToken);
+ return rv;
+}
+
+NS_IMETHODIMP nsMailDatabase::DeleteMessages(
+ nsTArray<nsMsgKey> const& nsMsgKeys, nsIDBChangeListener* instigator) {
+ nsresult rv;
+ if (m_folder) {
+ bool isLocked;
+ m_folder->GetLocked(&isLocked);
+ if (isLocked) {
+ NS_ASSERTION(false, "Some other operation is in progress");
+ return NS_MSG_FOLDER_BUSY;
+ }
+ }
+
+ rv = nsMsgDatabase::DeleteMessages(nsMsgKeys, instigator);
+ SetSummaryValid(true);
+ return rv;
+}
+
+NS_IMETHODIMP nsMailDatabase::GetSummaryValid(bool* aResult) {
+ uint32_t version;
+ m_dbFolderInfo->GetVersion(&version);
+ if (GetCurVersion() != version) {
+ *aResult = false;
+ return NS_OK;
+ }
+ if (!m_folder) {
+ // If the folder is not set, we just return without checking the validity
+ // of the summary file. For now, this is an expected condition when the
+ // message database is being opened from a URL in
+ // nsMailboxUrl::GetMsgHdrForKey() which calls
+ // nsMsgDBService::OpenMailDBFromFile() without a folder.
+ // Returning an error here would lead to the deletion of the MSF in the
+ // caller nsMsgDatabase::CheckForErrors().
+ *aResult = true;
+ return NS_OK;
+ }
+
+ // If this is a virtual folder, there is no storage.
+ bool isVirtual = false;
+ m_folder->GetFlag(nsMsgFolderFlags::Virtual, &isVirtual);
+ if (isVirtual) {
+ *aResult = true;
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIMsgPluggableStore> msgStore;
+ nsresult rv = m_folder->GetMsgStore(getter_AddRefs(msgStore));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return msgStore->IsSummaryFileValid(m_folder, this, aResult);
+}
+
+NS_IMETHODIMP nsMailDatabase::SetSummaryValid(bool aValid) {
+ nsMsgDatabase::SetSummaryValid(aValid);
+
+ if (!m_folder) return NS_ERROR_NULL_POINTER;
+
+ // If this is a virtual folder, there is no storage.
+ bool flag;
+ m_folder->GetFlag(nsMsgFolderFlags::Virtual, &flag);
+ if (flag) return NS_OK;
+
+ nsCOMPtr<nsIMsgPluggableStore> msgStore;
+ nsresult rv = m_folder->GetMsgStore(getter_AddRefs(msgStore));
+ NS_ENSURE_SUCCESS(rv, rv);
+ return msgStore->SetSummaryFileValid(m_folder, this, aValid);
+}
+
+NS_IMETHODIMP nsMailDatabase::RemoveOfflineOp(nsIMsgOfflineImapOperation* op) {
+ nsresult rv = GetAllOfflineOpsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!op || !m_mdbAllOfflineOpsTable) return NS_ERROR_NULL_POINTER;
+ nsMsgOfflineImapOperation* offlineOp =
+ static_cast<nsMsgOfflineImapOperation*>(
+ op); // closed system, so this is ok
+ nsIMdbRow* row = offlineOp->GetMDBRow();
+ rv = m_mdbAllOfflineOpsTable->CutRow(GetEnv(), row);
+ row->CutAllColumns(GetEnv());
+ return rv;
+}
+
+NS_IMETHODIMP nsMailDatabase::GetOfflineOpForKey(
+ nsMsgKey msgKey, bool create, nsIMsgOfflineImapOperation** offlineOp) {
+ mdb_bool hasOid;
+ mdbOid rowObjectId;
+ nsresult err;
+
+ nsresult rv = GetAllOfflineOpsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!offlineOp || !m_mdbAllOfflineOpsTable) return NS_ERROR_NULL_POINTER;
+
+ *offlineOp = NULL;
+
+ rowObjectId.mOid_Id = msgKey;
+ rowObjectId.mOid_Scope = m_offlineOpsRowScopeToken;
+ err = m_mdbAllOfflineOpsTable->HasOid(GetEnv(), &rowObjectId, &hasOid);
+ if (NS_SUCCEEDED(err) && m_mdbStore && (hasOid || create)) {
+ nsCOMPtr<nsIMdbRow> offlineOpRow;
+ err = m_mdbStore->GetRow(GetEnv(), &rowObjectId,
+ getter_AddRefs(offlineOpRow));
+
+ if (create) {
+ if (!offlineOpRow) {
+ err = m_mdbStore->NewRowWithOid(GetEnv(), &rowObjectId,
+ getter_AddRefs(offlineOpRow));
+ NS_ENSURE_SUCCESS(err, err);
+ }
+ if (offlineOpRow && !hasOid)
+ m_mdbAllOfflineOpsTable->AddRow(GetEnv(), offlineOpRow);
+ }
+
+ if (NS_SUCCEEDED(err) && offlineOpRow) {
+ NS_IF_ADDREF(*offlineOp =
+ new nsMsgOfflineImapOperation(this, offlineOpRow));
+ (*offlineOp)->SetMessageKey(msgKey);
+ }
+ if (!hasOid && m_dbFolderInfo) {
+ // set initial value for flags so we don't lose them.
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(msgKey, getter_AddRefs(msgHdr));
+ if (msgHdr) {
+ uint32_t flags;
+ msgHdr->GetFlags(&flags);
+ (*offlineOp)->SetNewFlags(flags);
+ }
+ int32_t newFlags;
+ m_dbFolderInfo->OrFlags(nsMsgFolderFlags::OfflineEvents, &newFlags);
+ }
+ }
+
+ return err;
+}
+
+NS_IMETHODIMP nsMailDatabase::ListAllOfflineOpIds(
+ nsTArray<nsMsgKey>& offlineOpIds) {
+ nsresult rv = GetAllOfflineOpsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+ nsIMdbTableRowCursor* rowCursor;
+
+ if (m_mdbAllOfflineOpsTable) {
+ nsresult err =
+ m_mdbAllOfflineOpsTable->GetTableRowCursor(GetEnv(), -1, &rowCursor);
+ while (NS_SUCCEEDED(err) && rowCursor) {
+ mdbOid outOid;
+ mdb_pos outPos;
+
+ err = rowCursor->NextRowOid(GetEnv(), &outOid, &outPos);
+ // is this right? Mork is returning a 0 id, but that should valid.
+ if (outPos < 0 || outOid.mOid_Id == (mdb_id)-1) break;
+ if (NS_SUCCEEDED(err)) {
+ offlineOpIds.AppendElement(outOid.mOid_Id);
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info)) {
+ nsCOMPtr<nsIMsgOfflineImapOperation> offlineOp;
+ GetOfflineOpForKey(outOid.mOid_Id, false, getter_AddRefs(offlineOp));
+ if (offlineOp) {
+ nsMsgOfflineImapOperation* logOp =
+ static_cast<nsMsgOfflineImapOperation*>(
+ static_cast<nsIMsgOfflineImapOperation*>(offlineOp.get()));
+ if (logOp) logOp->Log();
+ }
+ }
+ }
+ }
+ // TODO: would it cause a problem to replace this with "rv = err;" ?
+ rv = (NS_SUCCEEDED(err)) ? NS_OK : NS_ERROR_FAILURE;
+ rowCursor->Release();
+ }
+
+ offlineOpIds.Sort();
+ return rv;
+}
+
+NS_IMETHODIMP nsMailDatabase::ListAllOfflineDeletes(
+ nsTArray<nsMsgKey>& offlineDeletes) {
+ nsresult rv = GetAllOfflineOpsTable();
+ NS_ENSURE_SUCCESS(rv, rv);
+ nsIMdbTableRowCursor* rowCursor;
+ if (m_mdbAllOfflineOpsTable) {
+ nsresult err =
+ m_mdbAllOfflineOpsTable->GetTableRowCursor(GetEnv(), -1, &rowCursor);
+ while (NS_SUCCEEDED(err) && rowCursor) {
+ mdbOid outOid;
+ mdb_pos outPos;
+ nsIMdbRow* offlineOpRow;
+
+ err = rowCursor->NextRow(GetEnv(), &offlineOpRow, &outPos);
+ // is this right? Mork is returning a 0 id, but that should valid.
+ if (outPos < 0 || offlineOpRow == nullptr) break;
+ if (NS_SUCCEEDED(err)) {
+ offlineOpRow->GetOid(GetEnv(), &outOid);
+ RefPtr<nsIMsgOfflineImapOperation> offlineOp =
+ new nsMsgOfflineImapOperation(this, offlineOpRow);
+ imapMessageFlagsType newFlags;
+ nsOfflineImapOperationType opType;
+
+ offlineOp->GetOperation(&opType);
+ offlineOp->GetNewFlags(&newFlags);
+ if (opType & nsIMsgOfflineImapOperation::kMsgMoved ||
+ ((opType & nsIMsgOfflineImapOperation::kFlagsChanged) &&
+ (newFlags & nsIMsgOfflineImapOperation::kMsgMarkedDeleted)))
+ offlineDeletes.AppendElement(outOid.mOid_Id);
+
+ offlineOpRow->Release();
+ }
+ }
+ // TODO: would it cause a problem to replace this with "rv = err;" ?
+ rv = (NS_SUCCEEDED(err)) ? NS_OK : NS_ERROR_FAILURE;
+ rowCursor->Release();
+ }
+ return rv;
+}
+
+// This is used to remember that the db is out of sync with the mail folder
+// and needs to be regenerated.
+void nsMailDatabase::SetReparse(bool reparse) { m_reparse = reparse; }
+
+class nsMsgOfflineOpEnumerator : public nsSimpleEnumerator {
+ public:
+ const nsID& DefaultInterface() override {
+ return NS_GET_IID(nsIMsgOfflineImapOperation);
+ }
+
+ // nsISimpleEnumerator methods:
+ NS_DECL_NSISIMPLEENUMERATOR
+
+ explicit nsMsgOfflineOpEnumerator(nsMailDatabase* db);
+
+ protected:
+ ~nsMsgOfflineOpEnumerator() override;
+ nsresult GetRowCursor();
+ nsresult PrefetchNext();
+ nsMailDatabase* mDB;
+ nsIMdbTableRowCursor* mRowCursor;
+ nsCOMPtr<nsIMsgOfflineImapOperation> mResultOp;
+ bool mDone;
+ bool mNextPrefetched;
+};
+
+nsMsgOfflineOpEnumerator::nsMsgOfflineOpEnumerator(nsMailDatabase* db)
+ : mDB(db), mRowCursor(nullptr), mDone(false) {
+ NS_ADDREF(mDB);
+ mNextPrefetched = false;
+}
+
+nsMsgOfflineOpEnumerator::~nsMsgOfflineOpEnumerator() {
+ NS_IF_RELEASE(mRowCursor);
+ NS_RELEASE(mDB);
+}
+
+nsresult nsMsgOfflineOpEnumerator::GetRowCursor() {
+ nsresult rv = NS_OK;
+ mDone = false;
+
+ if (!mDB || !mDB->m_mdbAllOfflineOpsTable) return NS_ERROR_NULL_POINTER;
+
+ rv = mDB->m_mdbAllOfflineOpsTable->GetTableRowCursor(mDB->GetEnv(), -1,
+ &mRowCursor);
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgOfflineOpEnumerator::GetNext(nsISupports** aItem) {
+ NS_ENSURE_ARG_POINTER(aItem);
+
+ nsresult rv = NS_OK;
+ if (!mNextPrefetched) rv = PrefetchNext();
+ if (NS_SUCCEEDED(rv)) {
+ if (mResultOp) {
+ NS_ADDREF(*aItem = mResultOp);
+ mNextPrefetched = false;
+ }
+ }
+ return rv;
+}
+
+nsresult nsMsgOfflineOpEnumerator::PrefetchNext() {
+ nsresult rv = NS_OK;
+ nsIMdbRow* offlineOpRow;
+ mdb_pos rowPos;
+
+ if (!mRowCursor) {
+ rv = GetRowCursor();
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ rv = mRowCursor->NextRow(mDB->GetEnv(), &offlineOpRow, &rowPos);
+ if (!offlineOpRow) {
+ mDone = true;
+ return NS_ERROR_FAILURE;
+ }
+ if (NS_FAILED(rv)) {
+ mDone = true;
+ return rv;
+ }
+
+ nsIMsgOfflineImapOperation* op =
+ new nsMsgOfflineImapOperation(mDB, offlineOpRow);
+ mResultOp = op;
+ if (!op) return NS_ERROR_OUT_OF_MEMORY;
+
+ if (mResultOp) {
+ mNextPrefetched = true;
+ return NS_OK;
+ }
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP nsMsgOfflineOpEnumerator::HasMoreElements(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+
+ if (!mNextPrefetched) PrefetchNext();
+ *aResult = !mDone;
+ return NS_OK;
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMsgDatabase.cpp b/comm/mailnews/db/msgdb/src/nsMsgDatabase.cpp
new file mode 100644
index 0000000000..779ca400a9
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgDatabase.cpp
@@ -0,0 +1,4730 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// this file implements the nsMsgDatabase interface using the MDB Interface.
+
+#include "nscore.h"
+#include "msgCore.h"
+#include "nsIFile.h"
+#include "nsMailDatabase.h"
+#include "nsDBFolderInfo.h"
+#include "nsIMsgNewsFolder.h"
+#include "nsMsgThread.h"
+#include "nsIMsgSearchTerm.h"
+#include "nsIMdbFactoryFactory.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Telemetry.h"
+#include "prprf.h"
+#include "nsMsgFolderFlags.h"
+#include "nsIMsgAccountManager.h"
+#include "nsIMsgDBView.h"
+#include "nsIMsgFolderCache.h"
+#include "nsIMsgFolderCacheElement.h"
+#include "MailNewsTypes2.h"
+#include "nsMsgUtils.h"
+#include "nsComponentManagerUtils.h"
+#include "nsServiceManagerUtils.h"
+#include "nsMemory.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "nsPrintfCString.h"
+#include "nsMsgDatabaseEnumerators.h"
+#include "nsIMemoryReporter.h"
+#include "nsIWeakReferenceUtils.h"
+#include "nsMailDirServiceDefs.h"
+#include "mozilla/Components.h"
+#include "mozilla/mailnews/MimeHeaderParser.h"
+#include "mozilla/intl/LocaleService.h"
+
+using namespace mozilla::mailnews;
+using namespace mozilla;
+
+#if defined(DEBUG_sspitzer_) || defined(DEBUG_seth_)
+# define DEBUG_MSGKEYSET 1
+#endif
+
+#define MSG_HASH_SIZE 512
+
+// This will be used on discovery, since we don't know total.
+const int32_t kMaxHdrsInCache = 512;
+
+// special keys
+static const nsMsgKey kAllMsgHdrsTableKey = 1;
+static const nsMsgKey kTableKeyForThreadOne = 0xfffffffe;
+static const nsMsgKey kAllThreadsTableKey = 0xfffffffd;
+static const nsMsgKey kFirstPseudoKey = 0xfffffff0;
+static const nsMsgKey kIdStartOfFake = 0xffffff80;
+static const nsMsgKey kForceReparseKey = 0xfffffff0;
+
+LazyLogModule DBLog("MsgDB");
+
+PRTime nsMsgDatabase::gLastUseTime;
+
+/**
+ * mozilla::intl APIs require sizeable buffers. This class abstracts over
+ * the nsTArray.
+ */
+class nsTArrayU8Buffer {
+ public:
+ using CharType = uint8_t;
+
+ // Do not allow copy or move. Move could be added in the future if needed.
+ nsTArrayU8Buffer(const nsTArrayU8Buffer&) = delete;
+ nsTArrayU8Buffer& operator=(const nsTArrayU8Buffer&) = delete;
+
+ explicit nsTArrayU8Buffer(nsTArray<CharType>& aArray) : mArray(aArray) {}
+
+ /**
+ * Ensures the buffer has enough space to accommodate |size| elements.
+ */
+ [[nodiscard]] bool reserve(size_t size) {
+ mArray.SetCapacity(size);
+ // nsTArray::SetCapacity returns void, return true to keep the API the same
+ // as the other Buffer implementations.
+ return true;
+ }
+
+ /**
+ * Returns the raw data inside the buffer.
+ */
+ CharType* data() { return mArray.Elements(); }
+
+ /**
+ * Returns the count of elements written into the buffer.
+ */
+ size_t length() const { return mArray.Length(); }
+
+ /**
+ * Returns the buffer's overall capacity.
+ */
+ size_t capacity() const { return mArray.Capacity(); }
+
+ /**
+ * Resizes the buffer to the given amount of written elements.
+ */
+ void written(size_t amount) {
+ MOZ_ASSERT(amount <= mArray.Capacity());
+ // This sets |mArray|'s internal size so that it matches how much was
+ // written. This is necessary because the write happens across FFI
+ // boundaries.
+ mArray.SetLengthAndRetainStorage(amount);
+ }
+
+ private:
+ nsTArray<CharType>& mArray;
+};
+
+NS_IMPL_ISUPPORTS(nsMsgDBService, nsIMsgDBService)
+
+nsMsgDBService::nsMsgDBService() {}
+
+nsMsgDBService::~nsMsgDBService() {
+#ifdef DEBUG
+ // If you hit this warning, it means that some code is holding onto
+ // a db at shutdown.
+ NS_WARNING_ASSERTION(!m_dbCache.Length(), "some msg dbs left open");
+# ifndef MOZILLA_OFFICIAL
+ // Only print this on local builds since it causes crashes,
+ // see bug 1468691, bug 1377692 and bug 1342858.
+ for (uint32_t i = 0; i < m_dbCache.Length(); i++) {
+ nsMsgDatabase* pMessageDB = m_dbCache.ElementAt(i);
+ if (pMessageDB)
+ printf("db left open %s\n",
+ pMessageDB->m_dbFile->HumanReadablePath().get());
+ }
+# endif
+#endif
+}
+
+NS_IMETHODIMP nsMsgDBService::OpenFolderDB(nsIMsgFolder* aFolder,
+ bool aLeaveInvalidDB,
+ nsIMsgDatabase** _retval) {
+ NS_ENSURE_ARG(aFolder);
+ nsCOMPtr<nsIMsgIncomingServer> incomingServer;
+ nsresult rv = aFolder->GetServer(getter_AddRefs(incomingServer));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> summaryFilePath;
+ rv = aFolder->GetSummaryFile(getter_AddRefs(summaryFilePath));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsMsgDatabase* cacheDB = FindInCache(summaryFilePath);
+ if (cacheDB) {
+ // this db could have ended up in the folder cache w/o an m_folder pointer
+ // via OpenMailDBFromFile. If so, take this chance to fix the folder.
+ if (!cacheDB->m_folder) cacheDB->m_folder = aFolder;
+ cacheDB->RememberLastUseTime();
+ *_retval = cacheDB; // FindInCache already addRefed.
+ // if m_thumb is set, someone is asynchronously opening the db. But our
+ // caller wants to synchronously open it, so just do it.
+ if (cacheDB->m_thumb)
+ return cacheDB->Open(this, summaryFilePath, false, aLeaveInvalidDB);
+ return NS_OK;
+ }
+
+ nsCString localDatabaseType;
+ incomingServer->GetLocalDatabaseType(localDatabaseType);
+ nsAutoCString dbContractID("@mozilla.org/nsMsgDatabase/msgDB-");
+ dbContractID.Append(localDatabaseType.get());
+ nsCOMPtr<nsIMsgDatabase> msgDB = do_CreateInstance(dbContractID.get(), &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Don't try to create the database yet--let the createNewDB call do that.
+ nsMsgDatabase* msgDatabase = static_cast<nsMsgDatabase*>(msgDB.get());
+ msgDatabase->m_folder = aFolder;
+ rv = msgDatabase->Open(this, summaryFilePath, false, aLeaveInvalidDB);
+ if (NS_FAILED(rv) && rv != NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE) return rv;
+
+ NS_ADDREF(*_retval = msgDB);
+
+ if (NS_FAILED(rv)) {
+#ifdef DEBUG
+ // Doing these checks for debug only as we don't want to report certain
+ // errors in debug mode, but in release mode we wouldn't report them either
+
+ // These errors are expected.
+ if (rv == NS_MSG_ERROR_FOLDER_SUMMARY_MISSING ||
+ rv == NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE)
+ return rv;
+
+ // If it isn't one of the expected errors, throw a warning.
+ NS_ENSURE_SUCCESS(rv, rv);
+#endif
+ return rv;
+ }
+
+ FinishDBOpen(aFolder, msgDatabase);
+ return rv;
+}
+
+/**
+ * When a db is opened, we need to hook up any pending listeners for
+ * that db, and notify them.
+ */
+void nsMsgDBService::HookupPendingListeners(nsIMsgDatabase* db,
+ nsIMsgFolder* folder) {
+ for (int32_t listenerIndex = 0;
+ listenerIndex < m_foldersPendingListeners.Count(); listenerIndex++) {
+ // check if we have a pending listener on this db, and if so, add it.
+ if (m_foldersPendingListeners[listenerIndex] == folder) {
+ db->AddListener(m_pendingListeners.ObjectAt(listenerIndex));
+ m_pendingListeners.ObjectAt(listenerIndex)->OnEvent(db, "DBOpened");
+ }
+ }
+}
+
+void nsMsgDBService::FinishDBOpen(nsIMsgFolder* aFolder,
+ nsMsgDatabase* aMsgDB) {
+ uint32_t folderFlags;
+ aFolder->GetFlags(&folderFlags);
+
+ if (!(folderFlags & nsMsgFolderFlags::Virtual) &&
+ aMsgDB->m_mdbAllMsgHeadersTable) {
+ mdb_count numHdrsInTable = 0;
+ int32_t numMessages;
+ aMsgDB->m_mdbAllMsgHeadersTable->GetCount(aMsgDB->GetEnv(),
+ &numHdrsInTable);
+ aMsgDB->m_dbFolderInfo->GetNumMessages(&numMessages);
+ if (numMessages != (int32_t)numHdrsInTable) aMsgDB->SyncCounts();
+ }
+ HookupPendingListeners(aMsgDB, aFolder);
+ aMsgDB->RememberLastUseTime();
+}
+
+//----------------------------------------------------------------------
+// FindInCache - this addrefs the db it finds.
+//----------------------------------------------------------------------
+nsMsgDatabase* nsMsgDBService::FindInCache(nsIFile* dbName) {
+ for (uint32_t i = 0; i < m_dbCache.Length(); i++) {
+ nsMsgDatabase* pMessageDB = m_dbCache[i];
+ if (pMessageDB->MatchDbName(dbName)) {
+ if (pMessageDB->m_mdbStore) // don't return db without store
+ {
+ NS_ADDREF(pMessageDB);
+ return pMessageDB;
+ }
+ }
+ }
+ return nullptr;
+}
+
+// This method is called when the caller is trying to create a db without
+// having a corresponding nsIMsgFolder object. This happens in a few
+// situations, including imap folder discovery, compacting local folders,
+// and copying local folders.
+NS_IMETHODIMP nsMsgDBService::OpenMailDBFromFile(nsIFile* aFolderName,
+ nsIMsgFolder* aFolder,
+ bool aCreate,
+ bool aLeaveInvalidDB,
+ nsIMsgDatabase** pMessageDB) {
+ if (!aFolderName) return NS_ERROR_NULL_POINTER;
+
+ nsCOMPtr<nsIFile> dbPath;
+ nsresult rv = GetSummaryFileLocation(aFolderName, getter_AddRefs(dbPath));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *pMessageDB = FindInCache(dbPath);
+ if (*pMessageDB) return NS_OK;
+
+ RefPtr<nsMailDatabase> msgDB = new nsMailDatabase;
+ NS_ENSURE_TRUE(msgDB, NS_ERROR_OUT_OF_MEMORY);
+ rv = msgDB->Open(this, dbPath, aCreate, aLeaveInvalidDB);
+ if (rv == NS_ERROR_FILE_NOT_FOUND) return rv;
+ NS_IF_ADDREF(*pMessageDB = msgDB);
+ if (aCreate && msgDB && rv == NS_MSG_ERROR_FOLDER_SUMMARY_MISSING) rv = NS_OK;
+ if (NS_SUCCEEDED(rv)) msgDB->m_folder = aFolder;
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDBService::CreateNewDB(nsIMsgFolder* aFolder,
+ nsIMsgDatabase** _retval) {
+ NS_ENSURE_ARG(aFolder);
+
+ nsCOMPtr<nsIMsgIncomingServer> incomingServer;
+ nsresult rv = aFolder->GetServer(getter_AddRefs(incomingServer));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<nsIFile> summaryFilePath;
+ rv = aFolder->GetSummaryFile(getter_AddRefs(summaryFilePath));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCString localDatabaseType;
+ incomingServer->GetLocalDatabaseType(localDatabaseType);
+ nsAutoCString dbContractID("@mozilla.org/nsMsgDatabase/msgDB-");
+ dbContractID.Append(localDatabaseType.get());
+
+ nsCOMPtr<nsIMsgDatabase> msgDB = do_CreateInstance(dbContractID.get(), &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsMsgDatabase* msgDatabase = static_cast<nsMsgDatabase*>(msgDB.get());
+
+ msgDatabase->m_folder = aFolder;
+ rv = msgDatabase->Open(this, summaryFilePath, true, true);
+
+ // We are trying to create a new database, but that implies that it did not
+ // already exist. Open returns NS_MSG_ERROR_FOLDER_SUMMARY_MISSING for the
+ // successful creation of a new database. But if it existed for some
+ // reason, then we would get rv = NS_OK instead. That is a "failure"
+ // from our perspective, so we want to return a failure since we are not
+ // returning a valid database object.
+ NS_ENSURE_TRUE(rv == NS_MSG_ERROR_FOLDER_SUMMARY_MISSING,
+ NS_SUCCEEDED(rv) ? NS_ERROR_FILE_ALREADY_EXISTS : rv);
+
+ NS_ADDREF(*_retval = msgDB);
+
+ HookupPendingListeners(msgDB, aFolder);
+
+ msgDatabase->RememberLastUseTime();
+
+ return NS_OK;
+}
+
+/* void registerPendingListener (in nsIMsgFolder aFolder, in nsIDBChangeListener
+ * aListener); */
+NS_IMETHODIMP nsMsgDBService::RegisterPendingListener(
+ nsIMsgFolder* aFolder, nsIDBChangeListener* aListener) {
+ // need to make sure we don't hold onto these forever. Maybe a shutdown
+ // listener? if there is a db open on this folder already, we should register
+ // the listener.
+ m_foldersPendingListeners.AppendObject(aFolder);
+ m_pendingListeners.AppendObject(aListener);
+ nsCOMPtr<nsIMsgDatabase> openDB;
+ CachedDBForFolder(aFolder, getter_AddRefs(openDB));
+ if (openDB) openDB->AddListener(aListener);
+ return NS_OK;
+}
+
+/* void unregisterPendingListener (in nsIDBChangeListener aListener); */
+NS_IMETHODIMP nsMsgDBService::UnregisterPendingListener(
+ nsIDBChangeListener* aListener) {
+ int32_t listenerIndex = m_pendingListeners.IndexOfObject(aListener);
+ if (listenerIndex != -1) {
+ nsCOMPtr<nsIMsgDatabase> msgDB;
+ CachedDBForFolder(m_foldersPendingListeners[listenerIndex],
+ getter_AddRefs(msgDB));
+ if (msgDB) msgDB->RemoveListener(aListener);
+ m_foldersPendingListeners.RemoveObjectAt(listenerIndex);
+ m_pendingListeners.RemoveObjectAt(listenerIndex);
+ return NS_OK;
+ }
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP nsMsgDBService::CachedDBForFolder(nsIMsgFolder* aFolder,
+ nsIMsgDatabase** aRetDB) {
+ NS_ENSURE_ARG_POINTER(aFolder);
+ NS_ENSURE_ARG_POINTER(aRetDB);
+
+ nsCOMPtr<nsIFile> summaryFilePath;
+ nsresult rv = aFolder->GetSummaryFile(getter_AddRefs(summaryFilePath));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *aRetDB = FindInCache(summaryFilePath);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDBService::ForceFolderDBClosed(nsIMsgFolder* aFolder) {
+ nsCOMPtr<nsIMsgDatabase> mailDB;
+ nsresult rv = CachedDBForFolder(aFolder, getter_AddRefs(mailDB));
+ if (mailDB) {
+ mailDB->ForceClosed();
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDBService::GetOpenDBs(
+ nsTArray<RefPtr<nsIMsgDatabase>>& aOpenDBs) {
+ aOpenDBs.Clear();
+ aOpenDBs.SetCapacity(m_dbCache.Length());
+ for (auto db : m_dbCache) {
+ aOpenDBs.AppendElement(db);
+ }
+ return NS_OK;
+}
+
+static bool gGotGlobalPrefs = false;
+static bool gThreadWithoutRe = true;
+static bool gStrictThreading = false;
+static bool gCorrectThreading = false;
+
+void nsMsgDatabase::GetGlobalPrefs() {
+ if (!gGotGlobalPrefs) {
+ GetBoolPref("mail.thread_without_re", &gThreadWithoutRe);
+ GetBoolPref("mail.strict_threading", &gStrictThreading);
+ GetBoolPref("mail.correct_threading", &gCorrectThreading);
+ gGotGlobalPrefs = true;
+ }
+}
+
+nsresult nsMsgDatabase::AddHdrToCache(
+ nsIMsgDBHdr* hdr, nsMsgKey key) // do we want key? We could get it from hdr
+{
+ if (m_bCacheHeaders) {
+ if (!m_cachedHeaders)
+ m_cachedHeaders = new PLDHashTable(
+ &gMsgDBHashTableOps, sizeof(struct MsgHdrHashElement), m_cacheSize);
+ if (m_cachedHeaders) {
+ if (key == nsMsgKey_None) hdr->GetMessageKey(&key);
+ if (m_cachedHeaders->EntryCount() > m_cacheSize) ClearHdrCache(true);
+ PLDHashEntryHdr* entry =
+ m_cachedHeaders->Add((void*)(uintptr_t)key, mozilla::fallible);
+ if (!entry) return NS_ERROR_OUT_OF_MEMORY; // XXX out of memory
+
+ MsgHdrHashElement* element = static_cast<MsgHdrHashElement*>(entry);
+ element->mHdr = hdr;
+ element->mKey = key;
+ NS_ADDREF(hdr); // make the cache hold onto the header
+ return NS_OK;
+ }
+ }
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetMsgHdrCacheSize(uint32_t aSize) {
+ m_cacheSize = aSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetMsgHdrCacheSize(uint32_t* aSize) {
+ NS_ENSURE_ARG_POINTER(aSize);
+ *aSize = m_cacheSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetLastUseTime(PRTime* aTime) {
+ NS_ENSURE_ARG_POINTER(aTime);
+ *aTime = m_lastUseTime;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetLastUseTime(PRTime aTime) {
+ gLastUseTime = m_lastUseTime = aTime;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetDatabaseSize(int64_t* _retval) {
+ NS_ENSURE_ARG_POINTER(_retval);
+
+ nsresult rv;
+ bool exists;
+ NS_ENSURE_TRUE(m_dbFile, NS_ERROR_NULL_POINTER);
+ rv = m_dbFile->Exists(&exists);
+ if (NS_SUCCEEDED(rv)) {
+ if (exists)
+ rv = m_dbFile->GetFileSize(_retval);
+ else
+ *_retval = 0;
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ClearCachedHdrs() {
+ ClearCachedObjects(false);
+#ifdef DEBUG_bienvenu1
+ if (mRefCnt > 1) {
+ NS_ASSERTION(false, "");
+ printf("someone's holding onto db - refs = %ld\n", mRefCnt);
+ }
+#endif
+ return NS_OK;
+}
+
+// Invalidate any outstanding message enumerators using this db.
+void nsMsgDatabase::InvalidateEnumerators() {
+ RefPtr<nsMsgDatabase> kungFuDeathGrip(this);
+ // Work in reverse, as the enumerators remove themselves from the list.
+ {
+ auto n = m_msgEnumerators.Length();
+ for (auto i = n; i > 0; --i) {
+ m_msgEnumerators[i - 1]->Invalidate();
+ }
+ }
+ // And again for thread enumerators.
+ {
+ auto n = m_threadEnumerators.Length();
+ for (auto i = n; i > 0; --i) {
+ m_threadEnumerators[i - 1]->Invalidate();
+ }
+ }
+}
+
+nsMsgThread* nsMsgDatabase::FindExistingThread(nsMsgKey threadId) {
+ uint32_t numThreads = m_threads.Length();
+ for (uint32_t i = 0; i < numThreads; i++)
+ if (m_threads[i]->m_threadKey == threadId) return m_threads[i];
+
+ return nullptr;
+}
+
+void nsMsgDatabase::ClearThreads() {
+ // clear out existing threads
+ nsTArray<nsMsgThread*> copyThreads;
+ copyThreads.SwapElements(m_threads);
+
+ uint32_t numThreads = copyThreads.Length();
+ for (uint32_t i = 0; i < numThreads; i++) copyThreads[i]->Clear();
+}
+
+void nsMsgDatabase::ClearCachedObjects(bool dbGoingAway) {
+ ClearHdrCache(false);
+#ifdef DEBUG_DavidBienvenu
+ if (m_headersInUse && m_headersInUse->EntryCount() > 0) {
+ NS_ASSERTION(false, "leaking headers");
+ printf("leaking %d headers in %s\n", m_headersInUse->EntryCount(),
+ m_dbFile->HumanReadablePath().get());
+ }
+#endif
+ m_cachedThread = nullptr;
+ m_cachedThreadId = nsMsgKey_None;
+ // We should only clear the use hdr cache when the db is going away, or we
+ // could end up with multiple copies of the same logical msg hdr, which will
+ // lead to ref-counting problems.
+ if (dbGoingAway) {
+ ClearUseHdrCache();
+ ClearThreads();
+ }
+ m_thumb = nullptr;
+}
+
+nsresult nsMsgDatabase::ClearHdrCache(bool reInit) {
+ if (m_cachedHeaders) {
+ // save this away in case we renter this code.
+ PLDHashTable* saveCachedHeaders = m_cachedHeaders;
+ m_cachedHeaders = nullptr;
+ for (auto iter = saveCachedHeaders->Iter(); !iter.Done(); iter.Next()) {
+ auto element = static_cast<MsgHdrHashElement*>(iter.Get());
+ if (element) NS_IF_RELEASE(element->mHdr);
+ }
+
+ if (reInit) {
+ saveCachedHeaders->ClearAndPrepareForLength(m_cacheSize);
+ m_cachedHeaders = saveCachedHeaders;
+ } else {
+ delete saveCachedHeaders;
+ }
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::RemoveHdrFromCache(nsIMsgDBHdr* hdr, nsMsgKey key) {
+ if (m_cachedHeaders) {
+ if (key == nsMsgKey_None) hdr->GetMessageKey(&key);
+
+ PLDHashEntryHdr* entry =
+ m_cachedHeaders->Search((const void*)(uintptr_t)key);
+ if (entry) {
+ m_cachedHeaders->Remove((void*)(uintptr_t)key);
+ NS_RELEASE(hdr); // get rid of extra ref the cache was holding.
+ }
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::GetHdrFromUseCache(nsMsgKey key, nsIMsgDBHdr** result) {
+ if (!result) return NS_ERROR_NULL_POINTER;
+
+ nsresult rv = NS_ERROR_FAILURE;
+
+ *result = nullptr;
+
+ if (m_headersInUse) {
+ PLDHashEntryHdr* entry =
+ m_headersInUse->Search((const void*)(uintptr_t)key);
+ if (entry) {
+ MsgHdrHashElement* element = static_cast<MsgHdrHashElement*>(entry);
+ *result = element->mHdr;
+ }
+ if (*result) {
+ NS_ADDREF(*result);
+ rv = NS_OK;
+ }
+ }
+ return rv;
+}
+
+PLDHashTableOps nsMsgDatabase::gMsgDBHashTableOps = {
+ HashKey, MatchEntry, MoveEntry, ClearEntry, nullptr};
+
+// HashKey is supposed to maximize entropy in the low order bits, and the key
+// as is, should do that.
+PLDHashNumber nsMsgDatabase::HashKey(const void* aKey) {
+ return PLDHashNumber(NS_PTR_TO_INT32(aKey));
+}
+
+bool nsMsgDatabase::MatchEntry(const PLDHashEntryHdr* aEntry,
+ const void* aKey) {
+ const MsgHdrHashElement* hdr = static_cast<const MsgHdrHashElement*>(aEntry);
+ return aKey == (const void*)(uintptr_t)
+ hdr->mKey; // ### or get the key from the hdr...
+}
+
+void nsMsgDatabase::MoveEntry(PLDHashTable* aTable,
+ const PLDHashEntryHdr* aFrom,
+ PLDHashEntryHdr* aTo) {
+ new (KnownNotNull, aTo)
+ MsgHdrHashElement(std::move(*((MsgHdrHashElement*)aFrom)));
+}
+
+void nsMsgDatabase::ClearEntry(PLDHashTable* aTable, PLDHashEntryHdr* aEntry) {
+ MsgHdrHashElement* element = static_cast<MsgHdrHashElement*>(aEntry);
+ element->mHdr = nullptr; // eh? Need to release this or not?
+ element->mKey = nsMsgKey_None; // eh?
+}
+
+nsresult nsMsgDatabase::AddHdrToUseCache(nsIMsgDBHdr* hdr, nsMsgKey key) {
+ if (!m_headersInUse) {
+ mdb_count numHdrs = MSG_HASH_SIZE;
+ if (m_mdbAllMsgHeadersTable)
+ m_mdbAllMsgHeadersTable->GetCount(GetEnv(), &numHdrs);
+ m_headersInUse =
+ new PLDHashTable(&gMsgDBHashTableOps, sizeof(struct MsgHdrHashElement),
+ std::max((mdb_count)MSG_HASH_SIZE, numHdrs));
+ }
+ if (m_headersInUse) {
+ if (key == nsMsgKey_None) hdr->GetMessageKey(&key);
+ PLDHashEntryHdr* entry =
+ m_headersInUse->Add((void*)(uintptr_t)key, mozilla::fallible);
+ if (!entry) return NS_ERROR_OUT_OF_MEMORY; // XXX out of memory
+
+ MsgHdrHashElement* element = static_cast<MsgHdrHashElement*>(entry);
+ element->mHdr = hdr;
+ element->mKey = key;
+ // the hash table won't add ref, we'll do it ourselves
+ // stand for the addref that CreateMsgHdr normally does.
+ NS_ADDREF(hdr);
+ return NS_OK;
+ }
+
+ return NS_ERROR_OUT_OF_MEMORY;
+}
+
+nsresult nsMsgDatabase::ClearUseHdrCache() {
+ if (m_headersInUse) {
+ // clear mdb row pointers of any headers still in use, because the
+ // underlying db is going away.
+ for (auto iter = m_headersInUse->Iter(); !iter.Done(); iter.Next()) {
+ auto element = static_cast<const MsgHdrHashElement*>(iter.Get());
+ if (element && element->mHdr) {
+ nsMsgHdr* msgHdr = static_cast<nsMsgHdr*>(
+ element->mHdr); // closed system, so this is ok
+ // clear out m_mdbRow member variable - the db is going away, which
+ // means that this member variable might very well point to a mork db
+ // that is gone.
+ NS_IF_RELEASE(msgHdr->m_mdbRow);
+ // NS_IF_RELEASE(msgHdr->m_mdb);
+ }
+ }
+ delete m_headersInUse;
+ m_headersInUse = nullptr;
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::RemoveHdrFromUseCache(nsIMsgDBHdr* hdr, nsMsgKey key) {
+ if (m_headersInUse) {
+ if (key == nsMsgKey_None) hdr->GetMessageKey(&key);
+
+ m_headersInUse->Remove((void*)(uintptr_t)key);
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::CreateMsgHdr(nsIMdbRow* hdrRow, nsMsgKey key,
+ nsIMsgDBHdr** result) {
+ NS_ENSURE_ARG_POINTER(hdrRow);
+ NS_ENSURE_ARG_POINTER(result);
+
+ nsresult rv = GetHdrFromUseCache(key, result);
+ if (NS_SUCCEEDED(rv) && *result) {
+ hdrRow->Release();
+ return rv;
+ }
+
+ nsMsgHdr* msgHdr = new nsMsgHdr(this, hdrRow);
+ if (!msgHdr) return NS_ERROR_OUT_OF_MEMORY;
+ msgHdr->SetMessageKey(key);
+ // don't need to addref here; GetHdrFromUseCache addrefs.
+ *result = msgHdr;
+
+ AddHdrToCache(msgHdr, key);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::AddListener(nsIDBChangeListener* aListener) {
+ NS_ENSURE_ARG_POINTER(aListener);
+ m_ChangeListeners.AppendElementUnlessExists(aListener);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::RemoveListener(nsIDBChangeListener* aListener) {
+ NS_ENSURE_ARG_POINTER(aListener);
+ m_ChangeListeners.RemoveElement(aListener);
+ return NS_OK;
+}
+
+// XXX should we return rv for listener->propertyfunc_?
+#define NOTIFY_LISTENERS(propertyfunc_, params_) \
+ PR_BEGIN_MACRO \
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>>::ForwardIterator iter( \
+ m_ChangeListeners); \
+ nsCOMPtr<nsIDBChangeListener> listener; \
+ while (iter.HasMore()) { \
+ listener = iter.GetNext(); \
+ listener->propertyfunc_ params_; \
+ } \
+ PR_END_MACRO
+
+// change announcer methods - just broadcast to all listeners.
+NS_IMETHODIMP nsMsgDatabase::NotifyHdrChangeAll(
+ nsIMsgDBHdr* aHdrChanged, uint32_t aOldFlags, uint32_t aNewFlags,
+ nsIDBChangeListener* aInstigator) {
+ // We will only notify the change if the header exists in the database.
+ // This allows database functions to be usable in both the case where the
+ // header is in the db, or the header is not so no notifications should be
+ // given.
+ nsMsgKey key;
+ bool inDb = false;
+ if (aHdrChanged) {
+ aHdrChanged->GetMessageKey(&key);
+ ContainsKey(key, &inDb);
+ }
+ if (inDb)
+ NOTIFY_LISTENERS(OnHdrFlagsChanged,
+ (aHdrChanged, aOldFlags, aNewFlags, aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyReadChanged(
+ nsIDBChangeListener* aInstigator) {
+ NOTIFY_LISTENERS(OnReadChanged, (aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyJunkScoreChanged(
+ nsIDBChangeListener* aInstigator) {
+ NOTIFY_LISTENERS(OnJunkScoreChanged, (aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyHdrDeletedAll(
+ nsIMsgDBHdr* aHdrDeleted, nsMsgKey aParentKey, int32_t aFlags,
+ nsIDBChangeListener* aInstigator) {
+ NOTIFY_LISTENERS(OnHdrDeleted,
+ (aHdrDeleted, aParentKey, aFlags, aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyHdrAddedAll(
+ nsIMsgDBHdr* aHdrAdded, nsMsgKey aParentKey, int32_t aFlags,
+ nsIDBChangeListener* aInstigator) {
+#ifdef DEBUG_bienvenu1
+ printf("notifying add of %ld parent %ld\n", keyAdded, parentKey);
+#endif
+ NOTIFY_LISTENERS(OnHdrAdded, (aHdrAdded, aParentKey, aFlags, aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyParentChangedAll(
+ nsMsgKey aKeyReparented, nsMsgKey aOldParent, nsMsgKey aNewParent,
+ nsIDBChangeListener* aInstigator) {
+ NOTIFY_LISTENERS(OnParentChanged,
+ (aKeyReparented, aOldParent, aNewParent, aInstigator));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::NotifyAnnouncerGoingAway(void) {
+ NOTIFY_LISTENERS(OnAnnouncerGoingAway, (this));
+ return NS_OK;
+}
+
+bool nsMsgDatabase::MatchDbName(nsIFile* dbFile) // returns true if they match
+{
+ NS_ENSURE_TRUE(m_dbFile, false);
+ return dbFile->NativePath().Equals(m_dbFile->NativePath());
+}
+
+void nsMsgDBService::AddToCache(nsMsgDatabase* pMessageDB) {
+#ifdef DEBUG_David_Bienvenu
+ NS_ASSERTION(m_dbCache.Length() < 50, "50 or more open db's");
+#endif
+#ifdef DEBUG
+ if (pMessageDB->m_folder) {
+ nsCOMPtr<nsIMsgDatabase> msgDB;
+ CachedDBForFolder(pMessageDB->m_folder, getter_AddRefs(msgDB));
+ NS_ASSERTION(!msgDB, "shouldn't have db in cache");
+ }
+#endif
+ m_dbCache.AppendElement(pMessageDB);
+}
+
+/**
+ * Log the open db's, and how many headers are in memory.
+ */
+void nsMsgDBService::DumpCache() {
+ nsMsgDatabase* db = nullptr;
+ MOZ_LOG(DBLog, LogLevel::Info, ("%zu open DBs", m_dbCache.Length()));
+ for (uint32_t i = 0; i < m_dbCache.Length(); i++) {
+ db = m_dbCache.ElementAt(i);
+ MOZ_LOG(DBLog, LogLevel::Info,
+ ("%s - %" PRIu32 " hdrs in use",
+ db->m_dbFile->HumanReadablePath().get(),
+ db->m_headersInUse ? db->m_headersInUse->EntryCount() : 0));
+ }
+}
+
+// Memory Reporting implementations
+
+size_t nsMsgDatabase::SizeOfExcludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t totalSize = 0;
+ if (m_dbFolderInfo)
+ totalSize += m_dbFolderInfo->SizeOfExcludingThis(aMallocSizeOf);
+ if (m_mdbEnv) {
+ nsIMdbHeap* morkHeap = nullptr;
+ m_mdbEnv->GetHeap(&morkHeap);
+ if (morkHeap) totalSize += morkHeap->GetUsedSize();
+ }
+ totalSize += m_newSet.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ totalSize += m_ChangeListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ totalSize += m_threads.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ // We have two tables of header objects, but every header in m_cachedHeaders
+ // should be in m_headersInUse.
+ // double-counting...
+ size_t headerSize = 0;
+ if (m_headersInUse) {
+ headerSize = m_headersInUse->ShallowSizeOfIncludingThis(aMallocSizeOf);
+ for (auto iter = m_headersInUse->Iter(); !iter.Done(); iter.Next()) {
+ auto entry = static_cast<MsgHdrHashElement*>(iter.Get());
+ // Sigh, this is dangerous, but so long as this is a closed system, this
+ // is safe.
+ headerSize += static_cast<nsMsgHdr*>(entry->mHdr)
+ ->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ }
+ totalSize += headerSize;
+ if (m_msgReferences)
+ totalSize += m_msgReferences->ShallowSizeOfIncludingThis(aMallocSizeOf);
+ return totalSize;
+}
+
+namespace mozilla {
+namespace mailnews {
+
+MOZ_DEFINE_MALLOC_SIZE_OF(GetMallocSize)
+
+class MsgDBReporter final : public nsIMemoryReporter {
+ nsWeakPtr mDatabase;
+
+ public:
+ explicit MsgDBReporter(nsMsgDatabase* db)
+ : mDatabase(do_GetWeakReference(db)) {}
+
+ NS_DECL_ISUPPORTS
+ NS_IMETHOD GetName(nsACString& aName) {
+ aName.AssignLiteral("msg-database-objects");
+ return NS_OK;
+ }
+
+ NS_IMETHOD CollectReports(nsIHandleReportCallback* aCb, nsISupports* aClosure,
+ bool aAnonymize) override {
+ nsCString path;
+ GetPath(path, aAnonymize);
+ nsCOMPtr<nsIMsgDatabase> database = do_QueryReferent(mDatabase);
+ nsMsgDatabase* db =
+ database ? static_cast<nsMsgDatabase*>(database.get()) : nullptr;
+ return aCb->Callback(EmptyCString(), path, nsIMemoryReporter::KIND_HEAP,
+ nsIMemoryReporter::UNITS_BYTES,
+ db ? db->SizeOfIncludingThis(GetMallocSize) : 0,
+ "Memory used for the folder database."_ns, aClosure);
+ }
+
+ void GetPath(nsACString& memoryPath, bool aAnonymize) {
+ memoryPath.AssignLiteral("explicit/maildb/database(");
+ nsCOMPtr<nsIMsgDatabase> database = do_QueryReferent(mDatabase);
+ nsCOMPtr<nsIMsgFolder> folder;
+ if (database) database->GetFolder(getter_AddRefs(folder));
+ if (folder) {
+ if (aAnonymize)
+ memoryPath.AppendLiteral("<anonymized>");
+ else {
+ nsAutoCString folderURL;
+ folder->GetFolderURL(folderURL);
+ folderURL.ReplaceChar('/', '\\');
+ memoryPath += folderURL;
+ }
+ } else {
+ memoryPath.AppendLiteral("UNKNOWN-FOLDER");
+ }
+ memoryPath.Append(')');
+ }
+
+ private:
+ ~MsgDBReporter() {}
+};
+
+NS_IMPL_ISUPPORTS(MsgDBReporter, nsIMemoryReporter)
+} // namespace mailnews
+} // namespace mozilla
+
+nsMsgDatabase::nsMsgDatabase()
+ : m_dbFolderInfo(nullptr),
+ m_nextPseudoMsgKey(kFirstPseudoKey),
+ m_mdbEnv(nullptr),
+ m_mdbStore(nullptr),
+ m_mdbAllMsgHeadersTable(nullptr),
+ m_mdbAllThreadsTable(nullptr),
+ m_create(false),
+ m_leaveInvalidDB(false),
+ m_mdbTokensInitialized(false),
+ m_hdrRowScopeToken(0),
+ m_hdrTableKindToken(0),
+ m_threadTableKindToken(0),
+ m_subjectColumnToken(0),
+ m_senderColumnToken(0),
+ m_messageIdColumnToken(0),
+ m_referencesColumnToken(0),
+ m_recipientsColumnToken(0),
+ m_dateColumnToken(0),
+ m_messageSizeColumnToken(0),
+ m_flagsColumnToken(0),
+ m_priorityColumnToken(0),
+ m_labelColumnToken(0),
+ m_numLinesColumnToken(0),
+ m_ccListColumnToken(0),
+ m_bccListColumnToken(0),
+ m_threadFlagsColumnToken(0),
+ m_threadIdColumnToken(0),
+ m_threadChildrenColumnToken(0),
+ m_threadUnreadChildrenColumnToken(0),
+ m_messageThreadIdColumnToken(0),
+ m_threadSubjectColumnToken(0),
+ m_messageCharSetColumnToken(0),
+ m_threadParentColumnToken(0),
+ m_threadRootKeyColumnToken(0),
+ m_threadNewestMsgDateColumnToken(0),
+ m_offlineMsgOffsetColumnToken(0),
+ m_offlineMessageSizeColumnToken(0),
+ m_headersInUse(nullptr),
+ m_cachedHeaders(nullptr),
+ m_bCacheHeaders(true),
+ m_cachedThreadId(nsMsgKey_None),
+ m_msgReferences(nullptr),
+ m_cacheSize(kMaxHdrsInCache) {
+ mMemReporter = new mozilla::mailnews::MsgDBReporter(this);
+ mozilla::RegisterWeakMemoryReporter(mMemReporter);
+}
+
+nsMsgDatabase::~nsMsgDatabase() {
+ mozilla::UnregisterWeakMemoryReporter(mMemReporter);
+ mMemReporter = nullptr;
+ // Close(FALSE); // better have already been closed.
+ ClearCachedObjects(true);
+ InvalidateEnumerators();
+ delete m_cachedHeaders;
+ delete m_headersInUse;
+
+ if (m_msgReferences) {
+ delete m_msgReferences;
+ m_msgReferences = nullptr;
+ }
+
+ MOZ_LOG(DBLog, LogLevel::Info,
+ ("closing database %s", m_dbFile->HumanReadablePath().get()));
+
+ nsCOMPtr<nsIMsgDBService> serv(
+ do_GetService("@mozilla.org/msgDatabase/msgDBService;1"));
+ if (serv) static_cast<nsMsgDBService*>(serv.get())->RemoveFromCache(this);
+
+ // if the db folder info refers to the mdb db, we must clear it because
+ // the reference will be a dangling one soon.
+ if (m_dbFolderInfo) m_dbFolderInfo->ReleaseExternalReferences();
+ m_dbFolderInfo = nullptr;
+
+ if (m_mdbAllMsgHeadersTable) m_mdbAllMsgHeadersTable->Release();
+
+ if (m_mdbAllThreadsTable) m_mdbAllThreadsTable->Release();
+
+ if (m_mdbStore) m_mdbStore->Release();
+
+ if (m_mdbEnv) {
+ m_mdbEnv->Release(); //??? is this right?
+ m_mdbEnv = nullptr;
+ }
+ m_ChangeListeners.Clear();
+}
+
+NS_IMPL_ISUPPORTS(nsMsgDatabase, nsIMsgDatabase, nsIMsgOfflineOpsDatabase,
+ nsIDBChangeAnnouncer)
+
+nsresult nsMsgDatabase::GetMDBFactory(nsIMdbFactory** aMdbFactory) {
+ if (!mMdbFactory) {
+ nsresult rv;
+ nsCOMPtr<nsIMdbFactoryService> mdbFactoryService =
+ do_GetService("@mozilla.org/db/mork;1", &rv);
+ if (NS_SUCCEEDED(rv) && mdbFactoryService) {
+ rv = mdbFactoryService->GetMdbFactory(getter_AddRefs(mMdbFactory));
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!mMdbFactory) return NS_ERROR_FAILURE;
+ }
+ }
+ NS_ADDREF(*aMdbFactory = mMdbFactory);
+ return NS_OK;
+}
+
+// aLeaveInvalidDB: true if caller wants back a db even out of date.
+// If so, they'll extract out the interesting info from the db, close it,
+// delete it, and then try to open the db again, prior to reparsing.
+nsresult nsMsgDatabase::Open(nsMsgDBService* aDBService, nsIFile* aFolderName,
+ bool aCreate, bool aLeaveInvalidDB) {
+ return nsMsgDatabase::OpenInternal(aDBService, aFolderName, aCreate,
+ aLeaveInvalidDB,
+ true /* open synchronously */);
+}
+
+nsresult nsMsgDatabase::OpenInternal(nsMsgDBService* aDBService,
+ nsIFile* summaryFile, bool aCreate,
+ bool aLeaveInvalidDB, bool sync) {
+ MOZ_LOG(DBLog, LogLevel::Info,
+ ("nsMsgDatabase::Open(%s, %s, %p, %s)",
+ summaryFile->HumanReadablePath().get(), aCreate ? "TRUE" : "FALSE",
+ this, aLeaveInvalidDB ? "TRUE" : "FALSE"));
+
+ nsresult rv = OpenMDB(summaryFile, aCreate, sync);
+ if (NS_FAILED(rv))
+ MOZ_LOG(DBLog, LogLevel::Info,
+ ("error opening db %" PRIx32, static_cast<uint32_t>(rv)));
+
+ if (MOZ_LOG_TEST(DBLog, LogLevel::Debug)) aDBService->DumpCache();
+
+ if (rv == NS_ERROR_FILE_NOT_FOUND) return rv;
+
+ m_create = aCreate;
+ m_leaveInvalidDB = aLeaveInvalidDB;
+ if (!sync && NS_SUCCEEDED(rv)) {
+ aDBService->AddToCache(this);
+ // remember open options for when the parsing is complete.
+ return rv;
+ }
+ return CheckForErrors(rv, true, aDBService, summaryFile);
+}
+
+nsresult nsMsgDatabase::CheckForErrors(nsresult err, bool sync,
+ nsMsgDBService* aDBService,
+ nsIFile* summaryFile) {
+ nsCOMPtr<nsIDBFolderInfo> folderInfo;
+ bool summaryFileExists;
+ bool newFile = false;
+ bool deleteInvalidDB = false;
+
+ bool exists;
+ int64_t fileSize = 0;
+ summaryFile->Exists(&exists);
+ if (exists) summaryFile->GetFileSize(&fileSize);
+ // if the old summary doesn't exist, we're creating a new one.
+ if ((!exists || !fileSize) && m_create) newFile = true;
+
+ summaryFileExists = exists && fileSize > 0;
+
+ if (NS_SUCCEEDED(err)) {
+ if (!m_dbFolderInfo) {
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ } else {
+ if (!newFile && summaryFileExists) {
+ bool valid = false;
+ nsresult rv = GetSummaryValid(&valid);
+ if (NS_FAILED(rv) || !valid)
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ }
+ // compare current version of db versus filed out version info.
+ uint32_t version;
+ m_dbFolderInfo->GetVersion(&version);
+ if (GetCurVersion() != version)
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+
+ // Check if we should force a reparse because, for example, we have
+ // reached the key limit.
+ bool forceReparse;
+ m_dbFolderInfo->GetBooleanProperty("forceReparse", false, &forceReparse);
+ if (forceReparse) {
+ NS_WARNING("Forcing a reparse presumably because key limit reached");
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ }
+ }
+ if (NS_FAILED(err) && !m_leaveInvalidDB) deleteInvalidDB = true;
+ } else if (err != NS_MSG_ERROR_FOLDER_SUMMARY_MISSING) {
+ // No point declaring it out-of-date and trying to delete it
+ // if it's missing.
+ // We get here with NS_ERROR_FAILURE when Mork can't open the
+ // file due to too many open files. In this case there is no
+ // point to blow away the MSF file.
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ if (!m_leaveInvalidDB) deleteInvalidDB = true;
+ }
+
+ if (deleteInvalidDB) {
+ // this will make the db folder info release its ref to the mail db...
+ m_dbFolderInfo = nullptr;
+ ForceClosed();
+ if (err == NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE)
+ summaryFile->Remove(false);
+ }
+ if (NS_FAILED(err) || newFile) {
+ // if we couldn't open file, or we have a blank one, and we're supposed
+ // to upgrade, upgrade it.
+ if (newFile && !m_leaveInvalidDB) // caller is upgrading, and we have empty
+ // summary file,
+ { // leave db around and open so caller can upgrade it.
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_MISSING;
+ } else if (NS_FAILED(err) &&
+ err != NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE) {
+ Close(false);
+ summaryFile->Remove(false); // blow away the db if it's corrupt.
+ }
+ }
+ if (sync && (NS_SUCCEEDED(err) || err == NS_MSG_ERROR_FOLDER_SUMMARY_MISSING))
+ aDBService->AddToCache(this);
+ return (summaryFileExists) ? err : NS_MSG_ERROR_FOLDER_SUMMARY_MISSING;
+}
+
+/**
+ * Open the MDB database synchronously or async based on sync argument.
+ * If successful, this routine will set up the m_mdbStore and m_mdbEnv of
+ * the database object so other database calls can work.
+ */
+nsresult nsMsgDatabase::OpenMDB(nsIFile* dbFile, bool create, bool sync) {
+ nsCOMPtr<nsIMdbFactory> mdbFactory;
+ nsresult ret = GetMDBFactory(getter_AddRefs(mdbFactory));
+ NS_ENSURE_SUCCESS(ret, ret);
+
+ ret = mdbFactory->MakeEnv(NULL, &m_mdbEnv);
+ if (NS_SUCCEEDED(ret)) {
+ nsIMdbHeap* dbHeap = nullptr;
+
+ if (m_mdbEnv) m_mdbEnv->SetAutoClear(true);
+ PathString dbName = dbFile->NativePath();
+ ret = dbFile->Clone(getter_AddRefs(m_dbFile));
+ NS_ENSURE_SUCCESS(ret, ret);
+ bool exists = false;
+ ret = dbFile->Exists(&exists);
+ if (!exists) {
+ ret = NS_MSG_ERROR_FOLDER_SUMMARY_MISSING;
+ }
+ // If m_thumb is set, we're asynchronously opening the db already.
+ else if (!m_thumb) {
+ mdbOpenPolicy inOpenPolicy;
+ mdb_bool canOpen;
+ mdbYarn outFormatVersion;
+
+ nsIMdbFile* oldFile = nullptr;
+ ret = mdbFactory->OpenOldFile(
+ m_mdbEnv, dbHeap, dbName.get(),
+ mdbBool_kFalse, // not readonly, we want modifiable
+ &oldFile);
+ if (oldFile) {
+ if (NS_SUCCEEDED(ret)) {
+ ret = mdbFactory->CanOpenFilePort(m_mdbEnv,
+ oldFile, // the file to investigate
+ &canOpen, &outFormatVersion);
+ if (NS_SUCCEEDED(ret) && canOpen) {
+ inOpenPolicy.mOpenPolicy_ScopePlan.mScopeStringSet_Count = 0;
+ inOpenPolicy.mOpenPolicy_MinMemory = 0;
+ inOpenPolicy.mOpenPolicy_MaxLazy = 0;
+
+ ret = mdbFactory->OpenFileStore(m_mdbEnv, dbHeap, oldFile,
+ &inOpenPolicy,
+ getter_AddRefs(m_thumb));
+ } else
+ ret = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ }
+ NS_RELEASE(oldFile); // always release our file ref, store has own
+ }
+ }
+ if (NS_SUCCEEDED(ret) && m_thumb && sync) {
+ mdb_count outTotal; // total somethings to do in operation
+ mdb_count outCurrent; // subportion of total completed so far
+ mdb_bool outDone = false; // is operation finished?
+ mdb_bool outBroken; // is operation irreparably dead and broken?
+ do {
+ ret = m_thumb->DoMore(m_mdbEnv, &outTotal, &outCurrent, &outDone,
+ &outBroken);
+ if (NS_FAILED(ret)) { // mork isn't really doing NS errors yet.
+ outDone = true;
+ break;
+ }
+ } while (NS_SUCCEEDED(ret) && !outBroken && !outDone);
+ // m_mdbEnv->ClearErrors(); // ### temporary...
+ // only 0 is a non-error return.
+ if (NS_SUCCEEDED(ret) && outDone) {
+ ret = mdbFactory->ThumbToOpenStore(m_mdbEnv, m_thumb, &m_mdbStore);
+ if (NS_SUCCEEDED(ret))
+ ret = (m_mdbStore) ? InitExistingDB() : NS_ERROR_FAILURE;
+ }
+#ifdef DEBUG_bienvenu1
+ DumpContents();
+#endif
+ m_thumb = nullptr;
+ } else if (create) // ### need error code saying why open file store failed
+ {
+ nsIMdbFile* newFile = 0;
+ ret = mdbFactory->CreateNewFile(m_mdbEnv, dbHeap, dbName.get(), &newFile);
+ if (NS_FAILED(ret)) ret = NS_ERROR_FILE_NOT_FOUND;
+ if (newFile) {
+ if (NS_SUCCEEDED(ret)) {
+ mdbOpenPolicy inOpenPolicy;
+
+ inOpenPolicy.mOpenPolicy_ScopePlan.mScopeStringSet_Count = 0;
+ inOpenPolicy.mOpenPolicy_MinMemory = 0;
+ inOpenPolicy.mOpenPolicy_MaxLazy = 0;
+
+ ret = mdbFactory->CreateNewFileStore(m_mdbEnv, dbHeap, newFile,
+ &inOpenPolicy, &m_mdbStore);
+ if (NS_SUCCEEDED(ret))
+ ret = (m_mdbStore) ? InitNewDB() : NS_ERROR_FAILURE;
+ }
+ NS_RELEASE(newFile); // always release our file ref, store has own
+ }
+ }
+ }
+
+ return ret;
+}
+
+nsresult nsMsgDatabase::CloseMDB(bool commit) {
+ if (commit) Commit(nsMsgDBCommitType::kSessionCommit);
+ return (NS_OK);
+}
+
+// force the database to close - this'll flush out anybody holding onto
+// a database without having a listener!
+// This is evil in the com world, but there are times we need to delete the
+// file.
+NS_IMETHODIMP nsMsgDatabase::ForceClosed() {
+ nsresult err = NS_OK;
+
+ // make sure someone has a reference so object won't get deleted out from
+ // under us.
+ NS_ADDREF_THIS();
+ NotifyAnnouncerGoingAway();
+ // make sure dbFolderInfo isn't holding onto mork stuff because mork db is
+ // going away
+ if (m_dbFolderInfo) m_dbFolderInfo->ReleaseExternalReferences();
+ m_dbFolderInfo = nullptr;
+
+ err = CloseMDB(true); // Backup DB will try to recover info, so commit
+ ClearCachedObjects(true);
+ InvalidateEnumerators();
+ if (m_mdbAllMsgHeadersTable) {
+ m_mdbAllMsgHeadersTable->Release();
+ m_mdbAllMsgHeadersTable = nullptr;
+ }
+ if (m_mdbAllThreadsTable) {
+ m_mdbAllThreadsTable->Release();
+ m_mdbAllThreadsTable = nullptr;
+ }
+ if (m_mdbStore) {
+ m_mdbStore->Release();
+ m_mdbStore = nullptr;
+ }
+
+ // There'd better not be any listeners, because we're going away.
+ NS_ASSERTION(m_ChangeListeners.IsEmpty(),
+ "shouldn't have any listeners left");
+ m_ChangeListeners.Clear();
+
+ NS_RELEASE_THIS();
+ return err;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetDBFolderInfo(nsIDBFolderInfo** result) {
+ if (!m_dbFolderInfo) {
+ NS_ERROR("db must be corrupt");
+ return NS_ERROR_NULL_POINTER;
+ }
+ NS_ADDREF(*result = m_dbFolderInfo);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetFolder(nsIMsgFolder** aFolder) {
+ NS_ENSURE_ARG_POINTER(aFolder);
+ NS_IF_ADDREF(*aFolder = m_folder);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::Commit(nsMsgDBCommit commitType) {
+ nsresult err = NS_OK;
+ nsCOMPtr<nsIMdbThumb> commitThumb;
+
+ RememberLastUseTime();
+ if (commitType == nsMsgDBCommitType::kLargeCommit ||
+ commitType == nsMsgDBCommitType::kSessionCommit) {
+ mdb_percent outActualWaste = 0;
+ mdb_bool outShould;
+ if (m_mdbStore) {
+ err =
+ m_mdbStore->ShouldCompress(GetEnv(), 30, &outActualWaste, &outShould);
+ if (NS_SUCCEEDED(err) && outShould)
+ commitType = nsMsgDBCommitType::kCompressCommit;
+ }
+ }
+ // commitType = nsMsgDBCommitType::kCompressCommit; // ### until incremental
+ // writing works.
+
+ if (m_mdbStore) {
+ switch (commitType) {
+ case nsMsgDBCommitType::kLargeCommit:
+ err = m_mdbStore->LargeCommit(GetEnv(), getter_AddRefs(commitThumb));
+ break;
+ case nsMsgDBCommitType::kSessionCommit:
+ err = m_mdbStore->SessionCommit(GetEnv(), getter_AddRefs(commitThumb));
+ break;
+ case nsMsgDBCommitType::kCompressCommit:
+ err = m_mdbStore->CompressCommit(GetEnv(), getter_AddRefs(commitThumb));
+ break;
+ }
+ }
+ if (commitThumb) {
+ mdb_count outTotal = 0; // total somethings to do in operation
+ mdb_count outCurrent = 0; // subportion of total completed so far
+ mdb_bool outDone = false; // is operation finished?
+ mdb_bool outBroken = false; // is operation irreparably dead and broken?
+ while (!outDone && !outBroken && NS_SUCCEEDED(err)) {
+ err = commitThumb->DoMore(GetEnv(), &outTotal, &outCurrent, &outDone,
+ &outBroken);
+ }
+ }
+ // ### do something with error, but clear it now because mork errors out on
+ // commits.
+ if (GetEnv()) GetEnv()->ClearErrors();
+
+ nsresult rv;
+ nsCOMPtr<nsIMsgAccountManager> accountManager =
+ do_GetService("@mozilla.org/messenger/account-manager;1", &rv);
+ if (NS_SUCCEEDED(rv) && accountManager) {
+ nsCOMPtr<nsIMsgFolderCache> folderCache;
+
+ rv = accountManager->GetFolderCache(getter_AddRefs(folderCache));
+ if (NS_SUCCEEDED(rv) && folderCache) {
+ nsCOMPtr<nsIMsgFolderCacheElement> cacheElement;
+ nsCString persistentPath;
+ NS_ENSURE_TRUE(m_dbFile, NS_ERROR_NULL_POINTER);
+ rv = m_dbFile->GetPersistentDescriptor(persistentPath);
+ NS_ENSURE_SUCCESS(rv, err);
+ rv = folderCache->GetCacheElement(persistentPath, false,
+ getter_AddRefs(cacheElement));
+ if (NS_SUCCEEDED(rv) && cacheElement && m_dbFolderInfo) {
+ int32_t totalMessages, unreadMessages, pendingMessages,
+ pendingUnreadMessages;
+
+ m_dbFolderInfo->GetNumMessages(&totalMessages);
+ m_dbFolderInfo->GetNumUnreadMessages(&unreadMessages);
+ m_dbFolderInfo->GetImapUnreadPendingMessages(&pendingUnreadMessages);
+ m_dbFolderInfo->GetImapTotalPendingMessages(&pendingMessages);
+ cacheElement->SetCachedInt32("totalMsgs", totalMessages);
+ cacheElement->SetCachedInt32("totalUnreadMsgs", unreadMessages);
+ cacheElement->SetCachedInt32("pendingMsgs", pendingMessages);
+ cacheElement->SetCachedInt32("pendingUnreadMsgs",
+ pendingUnreadMessages);
+ }
+ }
+ }
+
+ return err;
+}
+
+NS_IMETHODIMP nsMsgDatabase::Close(bool forceCommit /* = TRUE */) {
+ InvalidateEnumerators();
+ return CloseMDB(forceCommit);
+}
+
+const char* kMsgHdrsScope =
+ "ns:msg:db:row:scope:msgs:all"; // scope for all headers table
+const char* kMsgHdrsTableKind = "ns:msg:db:table:kind:msgs";
+const char* kThreadTableKind = "ns:msg:db:table:kind:thread";
+const char* kThreadHdrsScope =
+ "ns:msg:db:row:scope:threads:all"; // scope for all threads table
+const char* kAllThreadsTableKind =
+ "ns:msg:db:table:kind:allthreads"; // kind for table of all threads
+const char* kSubjectColumnName = "subject";
+const char* kSenderColumnName = "sender";
+const char* kMessageIdColumnName = "message-id";
+const char* kReferencesColumnName = "references";
+const char* kRecipientsColumnName = "recipients";
+const char* kDateColumnName = "date";
+const char* kMessageSizeColumnName = "size";
+const char* kFlagsColumnName = "flags";
+const char* kPriorityColumnName = "priority";
+const char* kLabelColumnName = "label";
+const char* kNumLinesColumnName = "numLines";
+const char* kCCListColumnName = "ccList";
+const char* kBCCListColumnName = "bccList";
+const char* kMessageThreadIdColumnName = "msgThreadId";
+const char* kThreadFlagsColumnName = "threadFlags";
+const char* kThreadIdColumnName = "threadId";
+const char* kThreadChildrenColumnName = "children";
+const char* kThreadUnreadChildrenColumnName = "unreadChildren";
+const char* kThreadSubjectColumnName = "threadSubject";
+const char* kMessageCharSetColumnName = "msgCharSet";
+const char* kThreadParentColumnName = "threadParent";
+const char* kThreadRootColumnName = "threadRoot";
+const char* kThreadNewestMsgDateColumnName = "threadNewestMsgDate";
+const char* kOfflineMsgOffsetColumnName = "msgOffset";
+const char* kOfflineMsgSizeColumnName = "offlineMsgSize";
+struct mdbOid gAllMsgHdrsTableOID;
+struct mdbOid gAllThreadsTableOID;
+const char* kFixedBadRefThreadingProp = "fixedBadRefThreading";
+
+// set up empty tables, dbFolderInfo, etc.
+nsresult nsMsgDatabase::InitNewDB() {
+ nsresult err = NS_OK;
+
+ err = InitMDBInfo();
+ if (NS_SUCCEEDED(err)) {
+ nsDBFolderInfo* dbFolderInfo = new nsDBFolderInfo(this);
+ if (dbFolderInfo) {
+ err = dbFolderInfo->AddToNewMDB();
+ dbFolderInfo->SetVersion(GetCurVersion());
+ dbFolderInfo->SetBooleanProperty("forceReparse", false);
+ dbFolderInfo->SetBooleanProperty(kFixedBadRefThreadingProp, true);
+ nsIMdbStore* store = GetStore();
+ // create the unique table for the dbFolderInfo.
+ struct mdbOid allMsgHdrsTableOID;
+ struct mdbOid allThreadsTableOID;
+ if (!store) return NS_ERROR_NULL_POINTER;
+
+ allMsgHdrsTableOID.mOid_Scope = m_hdrRowScopeToken;
+ allMsgHdrsTableOID.mOid_Id = kAllMsgHdrsTableKey;
+ allThreadsTableOID.mOid_Scope = m_threadRowScopeToken;
+ allThreadsTableOID.mOid_Id = kAllThreadsTableKey;
+
+ // TODO: check this error value?
+ (void)store->NewTableWithOid(GetEnv(), &allMsgHdrsTableOID,
+ m_hdrTableKindToken, false, nullptr,
+ &m_mdbAllMsgHeadersTable);
+
+ // error here is not fatal.
+ (void)store->NewTableWithOid(GetEnv(), &allThreadsTableOID,
+ m_allThreadsTableKindToken, false, nullptr,
+ &m_mdbAllThreadsTable);
+
+ m_dbFolderInfo = dbFolderInfo;
+
+ } else
+ err = NS_ERROR_OUT_OF_MEMORY;
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::GetTableCreateIfMissing(const char* scope,
+ const char* kind,
+ nsIMdbTable** table,
+ mdb_token& scopeToken,
+ mdb_token& kindToken) {
+ struct mdbOid tableOID;
+
+ if (!m_mdbStore) return NS_ERROR_FAILURE;
+ (void)m_mdbStore->StringToToken(GetEnv(), scope, &scopeToken);
+ (void)m_mdbStore->StringToToken(GetEnv(), kind, &kindToken);
+ tableOID.mOid_Scope = scopeToken;
+ tableOID.mOid_Id = 1;
+
+ nsresult rv = m_mdbStore->GetTable(GetEnv(), &tableOID, table);
+ NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE);
+
+ // create new all all offline ops table, if it doesn't exist.
+ if (NS_SUCCEEDED(rv) && !*table) {
+ rv = m_mdbStore->NewTable(GetEnv(), scopeToken, kindToken, false, nullptr,
+ table);
+ if (NS_FAILED(rv) || !*table) rv = NS_ERROR_FAILURE;
+ }
+ NS_ASSERTION(NS_SUCCEEDED(rv), "couldn't create offline ops table");
+ return rv;
+}
+
+nsresult nsMsgDatabase::InitExistingDB() {
+ nsresult err = NS_OK;
+
+ err = InitMDBInfo();
+ if (NS_SUCCEEDED(err)) {
+ err = GetStore()->GetTable(GetEnv(), &gAllMsgHdrsTableOID,
+ &m_mdbAllMsgHeadersTable);
+ if (NS_SUCCEEDED(err)) {
+ m_dbFolderInfo = new nsDBFolderInfo(this);
+ if (m_dbFolderInfo) {
+ err = m_dbFolderInfo->InitFromExistingDB();
+ }
+ } else
+ err = NS_ERROR_FAILURE;
+
+ NS_ASSERTION(NS_SUCCEEDED(err), "failed initing existing db");
+ NS_ENSURE_SUCCESS(err, err);
+ // create new all msg hdrs table, if it doesn't exist.
+ if (NS_SUCCEEDED(err) && !m_mdbAllMsgHeadersTable) {
+ struct mdbOid allMsgHdrsTableOID;
+ allMsgHdrsTableOID.mOid_Scope = m_hdrRowScopeToken;
+ allMsgHdrsTableOID.mOid_Id = kAllMsgHdrsTableKey;
+
+ nsresult mdberr = GetStore()->NewTableWithOid(
+ GetEnv(), &allMsgHdrsTableOID, m_hdrTableKindToken, false, nullptr,
+ &m_mdbAllMsgHeadersTable);
+ if (NS_FAILED(mdberr) || !m_mdbAllMsgHeadersTable) err = NS_ERROR_FAILURE;
+ }
+ struct mdbOid allThreadsTableOID;
+ allThreadsTableOID.mOid_Scope = m_threadRowScopeToken;
+ allThreadsTableOID.mOid_Id = kAllThreadsTableKey;
+ err = GetStore()->GetTable(GetEnv(), &gAllThreadsTableOID,
+ &m_mdbAllThreadsTable);
+ if (!m_mdbAllThreadsTable) {
+ nsresult mdberr = GetStore()->NewTableWithOid(
+ GetEnv(), &allThreadsTableOID, m_allThreadsTableKindToken, false,
+ nullptr, &m_mdbAllThreadsTable);
+ if (NS_FAILED(mdberr) || !m_mdbAllThreadsTable) err = NS_ERROR_FAILURE;
+ }
+ }
+ if (NS_SUCCEEDED(err) && m_dbFolderInfo) {
+ bool fixedBadRefThreading;
+ m_dbFolderInfo->GetBooleanProperty(kFixedBadRefThreadingProp, false,
+ &fixedBadRefThreading);
+ if (!fixedBadRefThreading) {
+ nsCOMPtr<nsIMsgEnumerator> enumerator;
+ err = EnumerateMessages(getter_AddRefs(enumerator));
+ if (NS_SUCCEEDED(err) && enumerator) {
+ bool hasMore;
+
+ while (NS_SUCCEEDED(err = enumerator->HasMoreElements(&hasMore)) &&
+ hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ err = enumerator->GetNext(getter_AddRefs(msgHdr));
+ if (msgHdr && NS_SUCCEEDED(err)) {
+ nsCString messageId;
+ nsAutoCString firstReference;
+ msgHdr->GetMessageId(getter_Copies(messageId));
+ msgHdr->GetStringReference(0, firstReference);
+ if (messageId.Equals(firstReference)) {
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ break;
+ }
+ }
+ }
+ }
+
+ m_dbFolderInfo->SetBooleanProperty(kFixedBadRefThreadingProp, true);
+ }
+ }
+ return err;
+}
+
+// initialize the various tokens and tables in our db's env
+nsresult nsMsgDatabase::InitMDBInfo() {
+ nsresult err = NS_OK;
+
+ if (!m_mdbTokensInitialized && GetStore()) {
+ m_mdbTokensInitialized = true;
+ err =
+ GetStore()->StringToToken(GetEnv(), kMsgHdrsScope, &m_hdrRowScopeToken);
+ if (NS_SUCCEEDED(err)) {
+ GetStore()->StringToToken(GetEnv(), kSubjectColumnName,
+ &m_subjectColumnToken);
+ GetStore()->StringToToken(GetEnv(), kSenderColumnName,
+ &m_senderColumnToken);
+ GetStore()->StringToToken(GetEnv(), kMessageIdColumnName,
+ &m_messageIdColumnToken);
+ // if we just store references as a string, we won't get any savings from
+ // the fact there's a lot of duplication. So we may want to break them up
+ // into multiple columns, r1, r2, etc.
+ GetStore()->StringToToken(GetEnv(), kReferencesColumnName,
+ &m_referencesColumnToken);
+ // similarly, recipients could be tokenized properties
+ GetStore()->StringToToken(GetEnv(), kRecipientsColumnName,
+ &m_recipientsColumnToken);
+ GetStore()->StringToToken(GetEnv(), kDateColumnName, &m_dateColumnToken);
+ GetStore()->StringToToken(GetEnv(), kMessageSizeColumnName,
+ &m_messageSizeColumnToken);
+ GetStore()->StringToToken(GetEnv(), kFlagsColumnName,
+ &m_flagsColumnToken);
+ GetStore()->StringToToken(GetEnv(), kPriorityColumnName,
+ &m_priorityColumnToken);
+ GetStore()->StringToToken(GetEnv(), kLabelColumnName,
+ &m_labelColumnToken);
+ GetStore()->StringToToken(GetEnv(), kNumLinesColumnName,
+ &m_numLinesColumnToken);
+ GetStore()->StringToToken(GetEnv(), kCCListColumnName,
+ &m_ccListColumnToken);
+ GetStore()->StringToToken(GetEnv(), kBCCListColumnName,
+ &m_bccListColumnToken);
+ GetStore()->StringToToken(GetEnv(), kMessageThreadIdColumnName,
+ &m_messageThreadIdColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadIdColumnName,
+ &m_threadIdColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadFlagsColumnName,
+ &m_threadFlagsColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadNewestMsgDateColumnName,
+ &m_threadNewestMsgDateColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadChildrenColumnName,
+ &m_threadChildrenColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadUnreadChildrenColumnName,
+ &m_threadUnreadChildrenColumnToken);
+ GetStore()->StringToToken(GetEnv(), kThreadSubjectColumnName,
+ &m_threadSubjectColumnToken);
+ GetStore()->StringToToken(GetEnv(), kMessageCharSetColumnName,
+ &m_messageCharSetColumnToken);
+ err = GetStore()->StringToToken(GetEnv(), kMsgHdrsTableKind,
+ &m_hdrTableKindToken);
+ if (NS_SUCCEEDED(err))
+ err = GetStore()->StringToToken(GetEnv(), kThreadTableKind,
+ &m_threadTableKindToken);
+ err = GetStore()->StringToToken(GetEnv(), kAllThreadsTableKind,
+ &m_allThreadsTableKindToken);
+ err = GetStore()->StringToToken(GetEnv(), kThreadHdrsScope,
+ &m_threadRowScopeToken);
+ err = GetStore()->StringToToken(GetEnv(), kThreadParentColumnName,
+ &m_threadParentColumnToken);
+ err = GetStore()->StringToToken(GetEnv(), kThreadRootColumnName,
+ &m_threadRootKeyColumnToken);
+ err = GetStore()->StringToToken(GetEnv(), kOfflineMsgOffsetColumnName,
+ &m_offlineMsgOffsetColumnToken);
+ err = GetStore()->StringToToken(GetEnv(), kOfflineMsgSizeColumnName,
+ &m_offlineMessageSizeColumnToken);
+
+ if (NS_SUCCEEDED(err)) {
+ // The table of all message hdrs will have table id 1.
+ gAllMsgHdrsTableOID.mOid_Scope = m_hdrRowScopeToken;
+ gAllMsgHdrsTableOID.mOid_Id = kAllMsgHdrsTableKey;
+ gAllThreadsTableOID.mOid_Scope = m_threadRowScopeToken;
+ gAllThreadsTableOID.mOid_Id = kAllThreadsTableKey;
+ }
+ }
+ }
+ return err;
+}
+
+// Returns if the db contains this key
+NS_IMETHODIMP nsMsgDatabase::ContainsKey(nsMsgKey key, bool* containsKey) {
+ nsresult err = NS_OK;
+ mdb_bool hasOid;
+ mdbOid rowObjectId;
+
+ if (!containsKey || !m_mdbAllMsgHeadersTable) return NS_ERROR_NULL_POINTER;
+ *containsKey = false;
+
+ rowObjectId.mOid_Id = key;
+ rowObjectId.mOid_Scope = m_hdrRowScopeToken;
+ err = m_mdbAllMsgHeadersTable->HasOid(GetEnv(), &rowObjectId, &hasOid);
+ if (NS_SUCCEEDED(err)) *containsKey = hasOid;
+
+ return err;
+}
+
+// get a message header for the given key. Caller must release()!
+NS_IMETHODIMP nsMsgDatabase::GetMsgHdrForKey(nsMsgKey key,
+ nsIMsgDBHdr** pmsgHdr) {
+ *pmsgHdr = nullptr;
+ NS_ENSURE_ARG_POINTER(pmsgHdr);
+ NS_ENSURE_STATE(m_folder);
+ NS_ENSURE_STATE(m_mdbAllMsgHeadersTable);
+ NS_ENSURE_STATE(m_mdbStore);
+
+ // Because this may be called a lot, and we don't want gettimeofday() to show
+ // up in trace logs, we just remember the most recent time any db was used,
+ // which should be close enough for our purposes.
+ m_lastUseTime = gLastUseTime;
+
+ nsresult rv = GetHdrFromUseCache(key, pmsgHdr);
+ if (NS_SUCCEEDED(rv) && *pmsgHdr) return rv;
+
+ mdbOid rowObjectId;
+ rowObjectId.mOid_Id = key;
+ rowObjectId.mOid_Scope = m_hdrRowScopeToken;
+ mdb_bool hasOid;
+ rv = m_mdbAllMsgHeadersTable->HasOid(GetEnv(), &rowObjectId, &hasOid);
+ if (NS_SUCCEEDED(rv) /* && hasOid */) {
+ nsIMdbRow* hdrRow;
+ rv = m_mdbStore->GetRow(GetEnv(), &rowObjectId, &hdrRow);
+ if (NS_SUCCEEDED(rv)) {
+ if (!hdrRow) {
+ rv = NS_ERROR_NULL_POINTER;
+ } else {
+ rv = CreateMsgHdr(hdrRow, key, pmsgHdr);
+ }
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::DeleteMessage(nsMsgKey key,
+ nsIDBChangeListener* instigator,
+ bool commit) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ return DeleteHeader(msgHdr, instigator, commit, true);
+}
+
+NS_IMETHODIMP nsMsgDatabase::DeleteMessages(nsTArray<nsMsgKey> const& nsMsgKeys,
+ nsIDBChangeListener* instigator) {
+ nsresult err = NS_OK;
+
+ uint32_t kindex;
+ for (kindex = 0; kindex < nsMsgKeys.Length(); kindex++) {
+ nsMsgKey key = nsMsgKeys[kindex];
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ bool hasKey;
+
+ if (NS_SUCCEEDED(ContainsKey(key, &hasKey)) && hasKey) {
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) {
+ err = NS_MSG_MESSAGE_NOT_FOUND;
+ break;
+ }
+ err = DeleteHeader(msgHdr, instigator, kindex % 300 == 0, true);
+ if (NS_FAILED(err)) break;
+ }
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr) {
+ uint32_t size = 0;
+ (void)msgHdr->GetMessageSize(&size);
+ return m_dbFolderInfo->ChangeExpungedBytes(size);
+}
+
+NS_IMETHODIMP nsMsgDatabase::DeleteHeader(nsIMsgDBHdr* msg,
+ nsIDBChangeListener* instigator,
+ bool commit, bool notify) {
+ if (!msg) return NS_ERROR_NULL_POINTER;
+
+ nsMsgHdr* msgHdr =
+ static_cast<nsMsgHdr*>(msg); // closed system, so this is ok
+ nsMsgKey key;
+ (void)msg->GetMessageKey(&key);
+ // only need to do this for mail - will this speed up news expiration?
+ SetHdrFlag(msg, true, nsMsgMessageFlags::Expunged); // tell mailbox (mail)
+
+ bool hdrWasNew = m_newSet.BinaryIndexOf(key) != m_newSet.NoIndex;
+ m_newSet.RemoveElement(key);
+
+ if (m_dbFolderInfo) {
+ bool isRead;
+ m_dbFolderInfo->ChangeNumMessages(-1);
+ IsRead(key, &isRead);
+ if (!isRead) m_dbFolderInfo->ChangeNumUnreadMessages(-1);
+ AdjustExpungedBytesOnDelete(msg);
+ }
+
+ uint32_t flags;
+ nsMsgKey threadParent;
+
+ // Save off flags and threadparent since they will no longer exist after we
+ // remove the header from the db.
+ if (notify) {
+ (void)msg->GetFlags(&flags);
+ msg->GetThreadParent(&threadParent);
+ }
+
+ RemoveHeaderFromThread(msgHdr);
+ if (notify) {
+ // If deleted hdr was new, restore the new flag on flags
+ // so saved searches will know to reduce their new msg count.
+ if (hdrWasNew) flags |= nsMsgMessageFlags::New;
+ NotifyHdrDeletedAll(msg, threadParent, flags,
+ instigator); // tell listeners
+ }
+ // if (!onlyRemoveFromThread) // to speed up expiration, try this. But
+ // really need to do this in RemoveHeaderFromDB
+ nsresult ret = RemoveHeaderFromDB(msgHdr);
+
+ if (commit)
+ Commit(nsMsgDBCommitType::kLargeCommit); // ### dmb is this a good time to
+ // commit?
+ return ret;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::UndoDelete(nsIMsgDBHdr* aMsgHdr) {
+ if (aMsgHdr) {
+ // Force deleted flag, so SetHdrFlag won't bail out because deleted flag
+ // isn't set.
+ uint32_t result;
+ aMsgHdr->OrFlags(nsMsgMessageFlags::Expunged, &result);
+ SetHdrFlag(aMsgHdr, false,
+ nsMsgMessageFlags::Expunged); // Clear deleted flag in db.
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::RemoveHeaderFromThread(nsMsgHdr* msgHdr) {
+ if (!msgHdr) return NS_ERROR_NULL_POINTER;
+ nsresult ret = NS_OK;
+ nsCOMPtr<nsIMsgThread> thread;
+ ret = GetThreadContainingMsgHdr(msgHdr, getter_AddRefs(thread));
+ if (NS_SUCCEEDED(ret) && thread) {
+ ret = thread->RemoveChildHdr(msgHdr, this);
+ }
+ return ret;
+}
+
+NS_IMETHODIMP nsMsgDatabase::RemoveHeaderMdbRow(nsIMsgDBHdr* msg) {
+ NS_ENSURE_ARG_POINTER(msg);
+ nsMsgHdr* msgHdr =
+ static_cast<nsMsgHdr*>(msg); // closed system, so this is ok
+ return RemoveHeaderFromDB(msgHdr);
+}
+
+// This is a lower level routine which doesn't send notifications or
+// update folder info. One use is when a rule fires moving a header
+// from one db to another, to remove it from the first db.
+
+nsresult nsMsgDatabase::RemoveHeaderFromDB(nsMsgHdr* msgHdr) {
+ if (!msgHdr) return NS_ERROR_NULL_POINTER;
+ nsresult ret = NS_OK;
+
+ RemoveHdrFromCache(msgHdr, nsMsgKey_None);
+ if (UseCorrectThreading()) RemoveMsgRefsFromHash(msgHdr);
+ nsIMdbRow* row = msgHdr->GetMDBRow();
+ if (row) {
+ ret = m_mdbAllMsgHeadersTable->CutRow(GetEnv(), row);
+ row->CutAllColumns(GetEnv());
+ }
+ msgHdr->ClearCachedValues();
+ return ret;
+}
+
+nsresult nsMsgDatabase::IsRead(nsMsgKey key, bool* pRead) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ return IsHeaderRead(msgHdr, pRead);
+}
+
+uint32_t nsMsgDatabase::GetStatusFlags(nsIMsgDBHdr* msgHdr,
+ nsMsgMessageFlagType origFlags) {
+ uint32_t statusFlags = origFlags;
+ bool isRead = true;
+
+ nsMsgKey key;
+ (void)msgHdr->GetMessageKey(&key);
+ if ((!m_newSet.IsEmpty() && m_newSet[m_newSet.Length() - 1] == key) ||
+ (m_newSet.BinaryIndexOf(key) != m_newSet.NoIndex))
+ statusFlags |= nsMsgMessageFlags::New;
+ if (NS_SUCCEEDED(IsHeaderRead(msgHdr, &isRead)) && isRead)
+ statusFlags |= nsMsgMessageFlags::Read;
+ return statusFlags;
+}
+
+nsresult nsMsgDatabase::IsHeaderRead(nsIMsgDBHdr* msgHdr, bool* pRead) {
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ nsMsgHdr* hdr = static_cast<nsMsgHdr*>(msgHdr); // closed system, cast ok
+ // can't call GetFlags, because it will be recursive.
+ uint32_t flags;
+ hdr->GetRawFlags(&flags);
+ *pRead = !!(flags & nsMsgMessageFlags::Read);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::IsMarked(nsMsgKey key, bool* pMarked) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ uint32_t flags;
+ (void)msgHdr->GetFlags(&flags);
+ *pMarked = !!(flags & nsMsgMessageFlags::Marked);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::IsIgnored(nsMsgKey key, bool* pIgnored) {
+ NS_ENSURE_ARG_POINTER(pIgnored);
+
+ nsCOMPtr<nsIMsgThread> threadHdr;
+
+ nsresult rv = GetThreadForMsgKey(key, getter_AddRefs(threadHdr));
+ // This should be very surprising, but we leave that up to the caller
+ // to determine for now.
+ if (!threadHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ uint32_t threadFlags;
+ threadHdr->GetFlags(&threadFlags);
+ *pIgnored = !!(threadFlags & nsMsgMessageFlags::Ignored);
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::IsWatched(nsMsgKey key, bool* pWatched) {
+ NS_ENSURE_ARG_POINTER(pWatched);
+
+ nsCOMPtr<nsIMsgThread> threadHdr;
+
+ nsresult rv = GetThreadForMsgKey(key, getter_AddRefs(threadHdr));
+ // This should be very surprising, but we leave that up to the caller
+ // to determine for now.
+ if (!threadHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ uint32_t threadFlags;
+ threadHdr->GetFlags(&threadFlags);
+ *pWatched = !!(threadFlags & nsMsgMessageFlags::Watched);
+ return rv;
+}
+
+nsresult nsMsgDatabase::HasAttachments(nsMsgKey key, bool* pHasThem) {
+ NS_ENSURE_ARG_POINTER(pHasThem);
+
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ uint32_t flags;
+ (void)msgHdr->GetFlags(&flags);
+ *pHasThem = !!(flags & nsMsgMessageFlags::Attachment);
+ return NS_OK;
+}
+
+bool nsMsgDatabase::SetHdrReadFlag(nsIMsgDBHdr* msgHdr, bool bRead) {
+ return SetHdrFlag(msgHdr, bRead, nsMsgMessageFlags::Read);
+}
+
+nsresult nsMsgDatabase::MarkHdrReadInDB(nsIMsgDBHdr* msgHdr, bool bRead,
+ nsIDBChangeListener* instigator) {
+ nsresult rv;
+ nsMsgKey key;
+ uint32_t oldFlags;
+ bool hdrInDB;
+ (void)msgHdr->GetMessageKey(&key);
+ msgHdr->GetFlags(&oldFlags);
+
+ m_newSet.RemoveElement(key);
+ (void)ContainsKey(key, &hdrInDB);
+ if (hdrInDB && m_dbFolderInfo) {
+ if (bRead)
+ m_dbFolderInfo->ChangeNumUnreadMessages(-1);
+ else
+ m_dbFolderInfo->ChangeNumUnreadMessages(1);
+ }
+
+ SetHdrReadFlag(msgHdr, bRead); // this will cause a commit, at least for
+ // local mail, so do it after we change
+ // the folder counts above, so they will get committed too.
+ uint32_t flags;
+ rv = msgHdr->GetFlags(&flags);
+ flags &= ~nsMsgMessageFlags::New;
+ msgHdr->SetFlags(flags);
+ if (NS_FAILED(rv)) return rv;
+
+ if (oldFlags == flags) return NS_OK;
+
+ return NotifyHdrChangeAll(msgHdr, oldFlags, flags, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkRead(nsMsgKey key, bool bRead,
+ nsIDBChangeListener* instigator) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ return MarkHdrRead(msgHdr, bRead, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkReplied(
+ nsMsgKey key, bool bReplied, nsIDBChangeListener* instigator /* = NULL */) {
+ return SetKeyFlag(key, bReplied, nsMsgMessageFlags::Replied, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkForwarded(
+ nsMsgKey key, bool bForwarded,
+ nsIDBChangeListener* instigator /* = NULL */) {
+ return SetKeyFlag(key, bForwarded, nsMsgMessageFlags::Forwarded, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkRedirected(
+ nsMsgKey key, bool bRedirected,
+ nsIDBChangeListener* instigator /* = NULL */) {
+ return SetKeyFlag(key, bRedirected, nsMsgMessageFlags::Redirected,
+ instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkHasAttachments(
+ nsMsgKey key, bool bHasAttachments, nsIDBChangeListener* instigator) {
+ return SetKeyFlag(key, bHasAttachments, nsMsgMessageFlags::Attachment,
+ instigator);
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::MarkThreadRead(nsIMsgThread* thread,
+ nsIDBChangeListener* instigator,
+ nsTArray<nsMsgKey>& aThoseMarkedRead) {
+ NS_ENSURE_ARG_POINTER(thread);
+ aThoseMarkedRead.ClearAndRetainStorage();
+ nsresult rv = NS_OK;
+
+ uint32_t numChildren;
+ thread->GetNumChildren(&numChildren);
+ aThoseMarkedRead.SetCapacity(numChildren);
+ for (uint32_t curChildIndex = 0; curChildIndex < numChildren;
+ curChildIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> child;
+
+ rv = thread->GetChildHdrAt(curChildIndex, getter_AddRefs(child));
+ if (NS_SUCCEEDED(rv) && child) {
+ bool isRead = true;
+ IsHeaderRead(child, &isRead);
+ if (!isRead) {
+ nsMsgKey key;
+ if (NS_SUCCEEDED(child->GetMessageKey(&key)))
+ aThoseMarkedRead.AppendElement(key);
+ MarkHdrRead(child, true, instigator);
+ }
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::MarkThreadIgnored(nsIMsgThread* thread, nsMsgKey threadKey,
+ bool bIgnored,
+ nsIDBChangeListener* instigator) {
+ NS_ENSURE_ARG(thread);
+ uint32_t threadFlags;
+ thread->GetFlags(&threadFlags);
+ uint32_t oldThreadFlags =
+ threadFlags; // not quite right, since we probably want msg hdr flags.
+ if (bIgnored) {
+ threadFlags |= nsMsgMessageFlags::Ignored;
+ threadFlags &= ~nsMsgMessageFlags::Watched; // ignore is implicit un-watch
+ } else
+ threadFlags &= ~nsMsgMessageFlags::Ignored;
+ thread->SetFlags(threadFlags);
+
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ GetMsgHdrForKey(threadKey, getter_AddRefs(msg));
+ NS_ENSURE_TRUE(msg, NS_MSG_MESSAGE_NOT_FOUND);
+
+ // We'll add the message flags to the thread flags when notifying, since
+ // notifications are supposed to be about messages, not threads.
+ uint32_t msgFlags;
+ msg->GetFlags(&msgFlags);
+
+ return NotifyHdrChangeAll(msg, oldThreadFlags | msgFlags,
+ threadFlags | msgFlags, instigator);
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::MarkHeaderKilled(nsIMsgDBHdr* msg, bool bIgnored,
+ nsIDBChangeListener* instigator) {
+ uint32_t msgFlags;
+ msg->GetFlags(&msgFlags);
+ uint32_t oldFlags = msgFlags;
+ if (bIgnored)
+ msgFlags |= nsMsgMessageFlags::Ignored;
+ else
+ msgFlags &= ~nsMsgMessageFlags::Ignored;
+ msg->SetFlags(msgFlags);
+
+ return NotifyHdrChangeAll(msg, oldFlags, msgFlags, instigator);
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::MarkThreadWatched(nsIMsgThread* thread, nsMsgKey threadKey,
+ bool bWatched,
+ nsIDBChangeListener* instigator) {
+ NS_ENSURE_ARG(thread);
+ uint32_t threadFlags;
+ thread->GetFlags(&threadFlags);
+ uint32_t oldThreadFlags =
+ threadFlags; // not quite right, since we probably want msg hdr flags.
+ if (bWatched) {
+ threadFlags |= nsMsgMessageFlags::Watched;
+ threadFlags &= ~nsMsgMessageFlags::Ignored; // watch is implicit un-ignore
+ } else
+ threadFlags &= ~nsMsgMessageFlags::Watched;
+ thread->SetFlags(threadFlags);
+
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ GetMsgHdrForKey(threadKey, getter_AddRefs(msg));
+ if (!msg) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ // We'll add the message flags to the thread flags when notifying, since
+ // notifications are supposed to be about messages, not threads.
+ uint32_t msgFlags;
+ msg->GetFlags(&msgFlags);
+
+ return NotifyHdrChangeAll(msg, oldThreadFlags | msgFlags,
+ threadFlags | msgFlags, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkMarked(nsMsgKey key, bool mark,
+ nsIDBChangeListener* instigator) {
+ return SetKeyFlag(key, mark, nsMsgMessageFlags::Marked, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkOffline(nsMsgKey key, bool offline,
+ nsIDBChangeListener* instigator) {
+ return SetKeyFlag(key, offline, nsMsgMessageFlags::Offline, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetStringProperty(nsMsgKey aKey,
+ const char* aProperty,
+ const nsACString& aValue) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(aKey, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+ return SetStringPropertyByHdr(msgHdr, aProperty, aValue);
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetStringPropertyByHdr(nsIMsgDBHdr* msgHdr,
+ const char* aProperty,
+ const nsACString& aValue) {
+ // don't do notifications if message not yet added to database.
+ // Ignore errors (consequences of failure are minor).
+ bool notify = true;
+ nsMsgKey key = nsMsgKey_None;
+ msgHdr->GetMessageKey(&key);
+ ContainsKey(key, &notify);
+
+ nsCString oldValue;
+ nsresult rv = msgHdr->GetStringProperty(aProperty, oldValue);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // if no change to this string property, bail out
+ if (oldValue.Equals(aValue)) return NS_OK;
+
+ // Precall OnHdrPropertyChanged to store prechange status
+ nsTArray<uint32_t> statusArray(m_ChangeListeners.Length());
+ nsCOMPtr<nsIDBChangeListener> listener;
+ if (notify) {
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>>::ForwardIterator listeners(
+ m_ChangeListeners);
+ while (listeners.HasMore()) {
+ listener = listeners.GetNext();
+ // initialize |status| because some implementations of
+ // OnHdrPropertyChanged does not set the value.
+ uint32_t status = 0;
+ (void)listener->OnHdrPropertyChanged(msgHdr, nsCString(aProperty), true,
+ &status, nullptr);
+ // ignore errors, but append element to keep arrays in sync
+ statusArray.AppendElement(status);
+ }
+ }
+
+ rv = msgHdr->SetStringProperty(aProperty, aValue);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Postcall OnHdrPropertyChanged to process the change
+ if (notify) {
+ // if this is the junk score property notify, as long as we're not going
+ // from no value to non junk
+ if (!strcmp(aProperty, "junkscore") &&
+ !(oldValue.IsEmpty() && aValue.Equals("0")))
+ NotifyJunkScoreChanged(nullptr);
+
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>>::ForwardIterator listeners(
+ m_ChangeListeners);
+ for (uint32_t i = 0; listeners.HasMore() && i < statusArray.Length(); i++) {
+ listener = listeners.GetNext();
+ uint32_t status = statusArray[i];
+ (void)listener->OnHdrPropertyChanged(msgHdr, nsCString(aProperty), false,
+ &status, nullptr);
+ // ignore errors
+ }
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::SetUint32PropertyByHdr(nsIMsgDBHdr* aMsgHdr,
+ const char* aProperty, uint32_t aValue) {
+ // If no change to this property, bail out.
+ uint32_t oldValue;
+ nsresult rv = aMsgHdr->GetUint32Property(aProperty, &oldValue);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (oldValue == aValue) return NS_OK;
+
+ // Don't do notifications if message not yet added to database.
+ bool notify = true;
+ nsMsgKey key = nsMsgKey_None;
+ aMsgHdr->GetMessageKey(&key);
+ ContainsKey(key, &notify);
+
+ // Precall OnHdrPropertyChanged to store prechange status.
+ nsTArray<uint32_t> statusArray(m_ChangeListeners.Length());
+ nsCOMPtr<nsIDBChangeListener> listener;
+ if (notify) {
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>>::ForwardIterator listeners(
+ m_ChangeListeners);
+ while (listeners.HasMore()) {
+ listener = listeners.GetNext();
+ // initialize |status| because some implementations of
+ // OnHdrPropertyChanged does not set the value.
+ uint32_t status = 0;
+ (void)listener->OnHdrPropertyChanged(aMsgHdr, nsCString(aProperty), true,
+ &status, nullptr);
+ // Ignore errors, but append element to keep arrays in sync.
+ statusArray.AppendElement(status);
+ }
+ }
+
+ rv = aMsgHdr->SetUint32Property(aProperty, aValue);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Postcall OnHdrPropertyChanged to process the change.
+ if (notify) {
+ nsTObserverArray<nsCOMPtr<nsIDBChangeListener>>::ForwardIterator listeners(
+ m_ChangeListeners);
+ for (uint32_t i = 0; listeners.HasMore(); i++) {
+ listener = listeners.GetNext();
+ uint32_t status = statusArray[i];
+ (void)listener->OnHdrPropertyChanged(aMsgHdr, nsCString(aProperty), false,
+ &status, nullptr);
+ // Ignore errors.
+ }
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkImapDeleted(nsMsgKey key, bool deleted,
+ nsIDBChangeListener* instigator) {
+ return SetKeyFlag(key, deleted, nsMsgMessageFlags::IMAPDeleted, instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkMDNNeeded(
+ nsMsgKey key, bool bNeeded, nsIDBChangeListener* instigator /* = NULL */) {
+ return SetKeyFlag(key, bNeeded, nsMsgMessageFlags::MDNReportNeeded,
+ instigator);
+}
+
+nsresult nsMsgDatabase::MarkMDNSent(
+ nsMsgKey key, bool bSent, nsIDBChangeListener* instigator /* = NULL */) {
+ return SetKeyFlag(key, bSent, nsMsgMessageFlags::MDNReportSent, instigator);
+}
+
+nsresult nsMsgDatabase::IsMDNSent(nsMsgKey key, bool* pSent) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ uint32_t flags;
+ (void)msgHdr->GetFlags(&flags);
+ *pSent = !!(flags & nsMsgMessageFlags::MDNReportSent);
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::SetKeyFlag(nsMsgKey key, bool set,
+ nsMsgMessageFlagType flag,
+ nsIDBChangeListener* instigator) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (!msgHdr) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ return SetMsgHdrFlag(msgHdr, set, flag, instigator);
+}
+
+nsresult nsMsgDatabase::SetMsgHdrFlag(nsIMsgDBHdr* msgHdr, bool set,
+ nsMsgMessageFlagType flag,
+ nsIDBChangeListener* instigator) {
+ uint32_t oldFlags;
+ (void)msgHdr->GetFlags(&oldFlags);
+
+ if (!SetHdrFlag(msgHdr, set, flag)) return NS_OK;
+
+ uint32_t flags;
+ (void)msgHdr->GetFlags(&flags);
+
+ return NotifyHdrChangeAll(msgHdr, oldFlags, flags, instigator);
+}
+
+// Helper routine - lowest level of flag setting - returns true if flags change,
+// false otherwise.
+bool nsMsgDatabase::SetHdrFlag(nsIMsgDBHdr* msgHdr, bool bSet,
+ nsMsgMessageFlagType flag) {
+ uint32_t statusFlags;
+ (void)msgHdr->GetFlags(&statusFlags);
+ uint32_t currentStatusFlags = GetStatusFlags(msgHdr, statusFlags);
+ bool flagAlreadySet = (currentStatusFlags & flag) != 0;
+
+ if ((flagAlreadySet && !bSet) || (!flagAlreadySet && bSet)) {
+ uint32_t resultFlags;
+ if (bSet)
+ msgHdr->OrFlags(flag, &resultFlags);
+ else
+ msgHdr->AndFlags(~flag, &resultFlags);
+ return true;
+ }
+ return false;
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkHdrRead(nsIMsgDBHdr* msgHdr, bool bRead,
+ nsIDBChangeListener* instigator) {
+ bool isReadInDB = true;
+ nsresult rv = nsMsgDatabase::IsHeaderRead(msgHdr, &isReadInDB);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool isRead = true;
+ rv = IsHeaderRead(msgHdr, &isRead);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // if the flag is already correct in the db, don't change it.
+ // Check msg flags as well as IsHeaderRead in case it's a newsgroup
+ // and the msghdr flags are out of sync with the newsrc settings.
+ // (we could override this method for news db's, but it's a trivial fix here.
+ if (bRead != isRead || isRead != isReadInDB) {
+ nsMsgKey msgKey;
+ msgHdr->GetMessageKey(&msgKey);
+
+ bool inDB = false;
+ (void)ContainsKey(msgKey, &inDB);
+
+ if (inDB) {
+ nsCOMPtr<nsIMsgThread> threadHdr;
+ rv = GetThreadForMsgKey(msgKey, getter_AddRefs(threadHdr));
+ if (threadHdr) threadHdr->MarkChildRead(bRead);
+ }
+
+#ifndef MOZ_SUITE
+ if (bRead) {
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TB_MAILS_READ, 1);
+ }
+#endif
+
+ return MarkHdrReadInDB(msgHdr, bRead, instigator);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkHdrReplied(nsIMsgDBHdr* msgHdr, bool bReplied,
+ nsIDBChangeListener* instigator) {
+ return SetMsgHdrFlag(msgHdr, bReplied, nsMsgMessageFlags::Replied,
+ instigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkHdrMarked(nsIMsgDBHdr* msgHdr, bool mark,
+ nsIDBChangeListener* instigator) {
+ return SetMsgHdrFlag(msgHdr, mark, nsMsgMessageFlags::Marked, instigator);
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::MarkHdrNotNew(nsIMsgDBHdr* aMsgHdr,
+ nsIDBChangeListener* aInstigator) {
+ NS_ENSURE_ARG_POINTER(aMsgHdr);
+ nsMsgKey msgKey;
+ aMsgHdr->GetMessageKey(&msgKey);
+ m_newSet.RemoveElement(msgKey);
+ return SetMsgHdrFlag(aMsgHdr, false, nsMsgMessageFlags::New, aInstigator);
+}
+
+NS_IMETHODIMP nsMsgDatabase::MarkAllRead(nsTArray<nsMsgKey>& aThoseMarked) {
+ aThoseMarked.ClearAndRetainStorage();
+
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ nsresult rv = EnumerateMessages(getter_AddRefs(hdrs));
+ NS_ENSURE_SUCCESS(rv, rv);
+ bool hasMore = false;
+
+ while (NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ rv = hdrs->GetNext(getter_AddRefs(msg));
+ if (NS_FAILED(rv)) break;
+
+ bool isRead;
+ IsHeaderRead(msg, &isRead);
+
+ if (!isRead) {
+ nsMsgKey key;
+ (void)msg->GetMessageKey(&key);
+ aThoseMarked.AppendElement(key);
+ rv = MarkHdrRead(msg, true, nullptr); // ### dmb - blow off error?
+ }
+ }
+
+ // force num new to 0.
+ int32_t numUnreadMessages;
+
+ rv = m_dbFolderInfo->GetNumUnreadMessages(&numUnreadMessages);
+ if (NS_SUCCEEDED(rv))
+ m_dbFolderInfo->ChangeNumUnreadMessages(-numUnreadMessages);
+ // caller will Commit the db, so no need to do it here.
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::AddToNewList(nsMsgKey key) {
+ // we add new keys in increasing order...
+ if (m_newSet.IsEmpty() || (m_newSet[m_newSet.Length() - 1] < key))
+ m_newSet.AppendElement(key);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ClearNewList(bool notify /* = FALSE */) {
+ if (notify && !m_newSet.IsEmpty()) // need to update view
+ {
+ nsTArray<nsMsgKey> saveNewSet;
+ // clear m_newSet so that the code that's listening to the key change
+ // doesn't think we have new messages and send notifications all over
+ // that we have new messages.
+ saveNewSet.SwapElements(m_newSet);
+ for (uint32_t elementIndex = saveNewSet.Length() - 1;; elementIndex--) {
+ nsMsgKey lastNewKey = saveNewSet.ElementAt(elementIndex);
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(lastNewKey, getter_AddRefs(msgHdr));
+ if (msgHdr) {
+ uint32_t flags;
+ (void)msgHdr->GetFlags(&flags);
+
+ if ((flags | nsMsgMessageFlags::New) != flags) {
+ msgHdr->AndFlags(~nsMsgMessageFlags::New, &flags);
+ NotifyHdrChangeAll(msgHdr, flags | nsMsgMessageFlags::New, flags,
+ nullptr);
+ }
+ }
+ if (elementIndex == 0) break;
+ }
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::HasNew(bool* _retval) {
+ if (!_retval) return NS_ERROR_NULL_POINTER;
+
+ *_retval = (m_newSet.Length() > 0);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetFirstNew(nsMsgKey* result) {
+ bool hasnew;
+ nsresult rv = HasNew(&hasnew);
+ if (NS_FAILED(rv)) return rv;
+ *result = (hasnew) ? m_newSet.ElementAt(0) : nsMsgKey_None;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::EnumerateMessages(nsIMsgEnumerator** result) {
+ RememberLastUseTime();
+ NS_ENSURE_ARG_POINTER(result);
+ NS_ADDREF(*result = new nsMsgDBEnumerator(this, m_mdbAllMsgHeadersTable,
+ nullptr, nullptr));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::ReverseEnumerateMessages(nsIMsgEnumerator** result) {
+ NS_ENSURE_ARG_POINTER(result);
+ NS_ADDREF(*result = new nsMsgDBEnumerator(this, m_mdbAllMsgHeadersTable,
+ nullptr, nullptr, false));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::GetFilterEnumerator(
+ const nsTArray<RefPtr<nsIMsgSearchTerm>>& searchTerms, bool aReverse,
+ nsIMsgEnumerator** aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ RefPtr<nsMsgFilteredDBEnumerator> e =
+ new nsMsgFilteredDBEnumerator(this, m_mdbAllMsgHeadersTable, aReverse);
+
+ NS_ENSURE_TRUE(e, NS_ERROR_OUT_OF_MEMORY);
+ nsresult rv = e->InitSearchSession(searchTerms, m_folder);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ e.forget(aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::SyncCounts() {
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ nsresult rv = EnumerateMessages(getter_AddRefs(hdrs));
+ if (NS_FAILED(rv)) return rv;
+ bool hasMore = false;
+
+ mdb_count numHdrsInTable = 0;
+ int32_t numUnread = 0;
+ int32_t numHdrs = 0;
+
+ if (m_mdbAllMsgHeadersTable)
+ m_mdbAllMsgHeadersTable->GetCount(GetEnv(), &numHdrsInTable);
+ else
+ return NS_ERROR_NULL_POINTER;
+
+ while (NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> header;
+ rv = hdrs->GetNext(getter_AddRefs(header));
+ NS_ASSERTION(NS_SUCCEEDED(rv), "nsMsgDBEnumerator broken");
+ if (NS_FAILED(rv)) break;
+
+ bool isRead;
+ IsHeaderRead(header, &isRead);
+ if (!isRead) numUnread++;
+ numHdrs++;
+ }
+
+ int32_t oldTotal, oldUnread;
+ (void)m_dbFolderInfo->GetNumUnreadMessages(&oldUnread);
+ (void)m_dbFolderInfo->GetNumMessages(&oldTotal);
+ if (oldUnread != numUnread)
+ m_dbFolderInfo->ChangeNumUnreadMessages(numUnread - oldUnread);
+ if (oldTotal != numHdrs)
+ m_dbFolderInfo->ChangeNumMessages(numHdrs - oldTotal);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ListAllKeys(nsTArray<nsMsgKey>& keys) {
+ nsresult rv = NS_OK;
+ nsCOMPtr<nsIMdbTableRowCursor> rowCursor;
+ RememberLastUseTime();
+ keys.Clear();
+
+ if (m_mdbAllMsgHeadersTable) {
+ uint32_t numMsgs = 0;
+ m_mdbAllMsgHeadersTable->GetCount(GetEnv(), &numMsgs);
+ keys.SetCapacity(numMsgs);
+ rv = m_mdbAllMsgHeadersTable->GetTableRowCursor(GetEnv(), -1,
+ getter_AddRefs(rowCursor));
+ while (NS_SUCCEEDED(rv) && rowCursor) {
+ mdbOid outOid;
+ mdb_pos outPos;
+
+ rv = rowCursor->NextRowOid(GetEnv(), &outOid, &outPos);
+ // is this right? Mork is returning a 0 id, but that should valid.
+ if (outPos < 0 || outOid.mOid_Id == (mdb_id)-1) break;
+ if (NS_SUCCEEDED(rv)) keys.AppendElement(outOid.mOid_Id);
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::EnumerateThreads(nsIMsgThreadEnumerator** result) {
+ RememberLastUseTime();
+ NS_ADDREF(*result = new nsMsgDBThreadEnumerator(this, nullptr));
+ return NS_OK;
+}
+
+// only return headers with a particular flag set
+static nsresult nsMsgFlagSetFilter(nsIMsgDBHdr* msg, void* closure) {
+ uint32_t msgFlags, desiredFlags;
+ desiredFlags = *(uint32_t*)closure;
+ msg->GetFlags(&msgFlags);
+ return (msgFlags & desiredFlags) ? NS_OK : NS_ERROR_FAILURE;
+}
+
+nsresult nsMsgDatabase::EnumerateMessagesWithFlag(nsIMsgEnumerator** result,
+ uint32_t* pFlag) {
+ RememberLastUseTime();
+ NS_ADDREF(*result = new nsMsgDBEnumerator(this, m_mdbAllMsgHeadersTable,
+ nsMsgFlagSetFilter, pFlag));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::CreateNewHdr(nsMsgKey key, nsIMsgDBHdr** pnewHdr) {
+ nsresult err = NS_OK;
+ nsIMdbRow* hdrRow = nullptr;
+ struct mdbOid allMsgHdrsTableOID;
+
+ if (!pnewHdr || !m_mdbAllMsgHeadersTable || !m_mdbStore)
+ return NS_ERROR_NULL_POINTER;
+
+ if (key != nsMsgKey_None) {
+ allMsgHdrsTableOID.mOid_Scope = m_hdrRowScopeToken;
+ allMsgHdrsTableOID.mOid_Id = key; // presumes 0 is valid key value
+
+ err = m_mdbStore->GetRow(GetEnv(), &allMsgHdrsTableOID, &hdrRow);
+ if (!hdrRow)
+ err = m_mdbStore->NewRowWithOid(GetEnv(), &allMsgHdrsTableOID, &hdrRow);
+ } else {
+ // Mork will assign an ID to the new row, generally the next available ID.
+ err = m_mdbStore->NewRow(GetEnv(), m_hdrRowScopeToken, &hdrRow);
+ if (hdrRow) {
+ struct mdbOid oid;
+ hdrRow->GetOid(GetEnv(), &oid);
+ key = oid.mOid_Id;
+ } else {
+ // We failed to create a new row. That can happen if we run out of keys,
+ // which will force a reparse.
+ nsTArray<nsMsgKey> keys;
+ if (NS_SUCCEEDED(ListAllKeys(keys))) {
+ for (nsMsgKey key : keys) {
+ if (key >= kForceReparseKey) {
+ // Force a reparse.
+ if (m_dbFolderInfo)
+ m_dbFolderInfo->SetBooleanProperty("forceReparse", true);
+ break;
+ }
+ }
+ }
+ err = NS_MSG_ERROR_FOLDER_SUMMARY_OUT_OF_DATE;
+ }
+ }
+ if (NS_FAILED(err)) return err;
+ err = CreateMsgHdr(hdrRow, key, pnewHdr);
+ return err;
+}
+
+NS_IMETHODIMP nsMsgDatabase::AddNewHdrToDB(nsIMsgDBHdr* newHdr, bool notify) {
+ NS_ENSURE_ARG_POINTER(newHdr);
+ nsMsgHdr* hdr = static_cast<nsMsgHdr*>(newHdr); // closed system, cast ok
+ bool newThread;
+ bool hasKey = false;
+ nsMsgKey msgKey = nsMsgKey_None;
+ (void)hdr->GetMessageKey(&msgKey);
+ (void)ContainsKey(msgKey, &hasKey);
+ if (hasKey) {
+ NS_ERROR("adding hdr that already exists");
+ return NS_ERROR_FAILURE;
+ }
+ nsresult err = ThreadNewHdr(hdr, newThread);
+ // we thread header before we add it to the all headers table
+ // so that subject and reference threading will work (otherwise,
+ // when we try to find the first header with the same subject or
+ // reference, we get the new header!)
+ if (NS_SUCCEEDED(err)) {
+ nsMsgKey key;
+ uint32_t flags;
+
+ newHdr->GetMessageKey(&key);
+ hdr->GetRawFlags(&flags);
+ // use raw flags instead of GetFlags, because GetFlags will
+ // pay attention to what's in m_newSet, and this new hdr isn't
+ // in m_newSet yet.
+ if (flags & nsMsgMessageFlags::New) {
+ uint32_t newFlags;
+ newHdr->AndFlags(~nsMsgMessageFlags::New,
+ &newFlags); // make sure not filed out
+ AddToNewList(key);
+ }
+ if (m_dbFolderInfo) {
+ m_dbFolderInfo->ChangeNumMessages(1);
+ bool isRead = true;
+ IsHeaderRead(newHdr, &isRead);
+ if (!isRead) m_dbFolderInfo->ChangeNumUnreadMessages(1);
+ m_dbFolderInfo->OnKeyAdded(key);
+ }
+
+ err = m_mdbAllMsgHeadersTable->AddRow(GetEnv(), hdr->GetMDBRow());
+ if (notify) {
+ nsMsgKey threadParent;
+
+ newHdr->GetThreadParent(&threadParent);
+ NotifyHdrAddedAll(newHdr, threadParent, flags, NULL);
+ }
+
+ if (UseCorrectThreading()) err = AddMsgRefsToHash(newHdr);
+ }
+ NS_ASSERTION(NS_SUCCEEDED(err), "error creating thread");
+ return err;
+}
+
+NS_IMETHODIMP nsMsgDatabase::CopyHdrFromExistingHdr(nsMsgKey key,
+ nsIMsgDBHdr* existingHdr,
+ bool addHdrToDB,
+ nsIMsgDBHdr** newHdr) {
+ nsresult err = NS_OK;
+
+ if (existingHdr) {
+ nsMsgHdr* sourceMsgHdr =
+ static_cast<nsMsgHdr*>(existingHdr); // closed system, cast ok
+ nsMsgHdr* destMsgHdr = nullptr;
+ CreateNewHdr(key, (nsIMsgDBHdr**)&destMsgHdr);
+ nsIMdbRow* sourceRow = sourceMsgHdr->GetMDBRow();
+ if (!destMsgHdr || !sourceRow) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ nsIMdbRow* destRow = destMsgHdr->GetMDBRow();
+ if (!destRow) return NS_ERROR_UNEXPECTED;
+
+ err = destRow->SetRow(GetEnv(), sourceRow);
+ if (NS_SUCCEEDED(err)) {
+ // we may have gotten the header from a cache - calling SetRow
+ // basically invalidates any cached values, so invalidate them.
+ destMsgHdr->ClearCachedValues();
+ if (addHdrToDB) err = AddNewHdrToDB(destMsgHdr, true);
+ if (NS_SUCCEEDED(err) && newHdr) *newHdr = destMsgHdr;
+ }
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::RowCellColumnTonsString(nsIMdbRow* hdrRow,
+ mdb_token columnToken,
+ nsAString& resultStr) {
+ NS_ENSURE_ARG_POINTER(hdrRow);
+
+ struct mdbYarn yarn;
+ nsresult rv = hdrRow->AliasCellYarn(GetEnv(), columnToken, &yarn);
+ NS_ENSURE_SUCCESS(rv, rv);
+ YarnTonsString(&yarn, resultStr);
+ return NS_OK;
+}
+
+// as long as the row still exists, and isn't changed, the returned const char
+// ** will be valid. But be very careful using this data - the caller should
+// never return it in turn to another caller.
+nsresult nsMsgDatabase::RowCellColumnToConstCharPtr(nsIMdbRow* hdrRow,
+ mdb_token columnToken,
+ const char** ptr) {
+ NS_ENSURE_ARG_POINTER(hdrRow);
+
+ struct mdbYarn yarn;
+ nsresult rv = hdrRow->AliasCellYarn(GetEnv(), columnToken, &yarn);
+ NS_ENSURE_SUCCESS(rv, rv);
+ *ptr = (const char*)yarn.mYarn_Buf;
+ return NS_OK;
+}
+
+nsIMimeConverter* nsMsgDatabase::GetMimeConverter() {
+ if (!m_mimeConverter) {
+ // apply mime decode
+ m_mimeConverter = do_GetService("@mozilla.org/messenger/mimeconverter;1");
+ }
+ return m_mimeConverter;
+}
+
+nsresult nsMsgDatabase::GetEffectiveCharset(nsIMdbRow* row,
+ nsACString& resultCharset) {
+ resultCharset.Truncate();
+ nsresult rv = RowCellColumnToCharPtr(row, m_messageCharSetColumnToken,
+ getter_Copies(resultCharset));
+ if (NS_FAILED(rv) || resultCharset.IsEmpty() ||
+ resultCharset.EqualsLiteral("us-ascii")) {
+ resultCharset.AssignLiteral("UTF-8");
+ nsCOMPtr<nsIMsgNewsFolder> newsfolder(do_QueryInterface(m_folder));
+ if (newsfolder) newsfolder->GetCharset(resultCharset);
+ }
+ return rv;
+}
+
+nsresult nsMsgDatabase::RowCellColumnToMime2DecodedString(
+ nsIMdbRow* row, mdb_token columnToken, nsAString& resultStr) {
+ nsresult err = NS_OK;
+ const char* nakedString = nullptr;
+ err = RowCellColumnToConstCharPtr(row, columnToken, &nakedString);
+ if (NS_SUCCEEDED(err) && nakedString && strlen(nakedString)) {
+ GetMimeConverter();
+ if (m_mimeConverter) {
+ nsAutoString decodedStr;
+ nsCString charSet;
+ GetEffectiveCharset(row, charSet);
+
+ err = m_mimeConverter->DecodeMimeHeader(nakedString, charSet.get(), false,
+ true, resultStr);
+ }
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::RowCellColumnToAddressCollationKey(
+ nsIMdbRow* row, mdb_token colToken, nsTArray<uint8_t>& result) {
+ nsString sender;
+ nsresult rv = RowCellColumnToMime2DecodedString(row, colToken, sender);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsString name;
+ ExtractName(DecodedHeader(sender), name);
+ return CreateCollationKey(name, result);
+}
+
+nsresult nsMsgDatabase::GetCollationKeyGenerator() {
+ if (!m_collationKeyGenerator) {
+ auto result = mozilla::intl::LocaleService::TryCreateComponent<Collator>();
+ if (result.isErr()) {
+ NS_WARNING("Could not create mozilla::intl::Collation.");
+ return NS_ERROR_FAILURE;
+ }
+
+ m_collationKeyGenerator = result.unwrap();
+
+ // Sort in a case-insensitive way, where "base" letters are considered
+ // equal, e.g: a = á, a = A, a ≠ b.
+ Collator::Options options{};
+ options.sensitivity = Collator::Sensitivity::Base;
+ auto optResult = m_collationKeyGenerator->SetOptions(options);
+
+ if (optResult.isErr()) {
+ NS_WARNING("Could not configure the mozilla::intl::Collation.");
+ m_collationKeyGenerator = nullptr;
+ return NS_ERROR_FAILURE;
+ }
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::RowCellColumnToCollationKey(nsIMdbRow* row,
+ mdb_token columnToken,
+ nsTArray<uint8_t>& result) {
+ const char* nakedString = nullptr;
+ nsresult err;
+
+ err = RowCellColumnToConstCharPtr(row, columnToken, &nakedString);
+ if (!nakedString) nakedString = "";
+ if (NS_SUCCEEDED(err)) {
+ GetMimeConverter();
+ if (m_mimeConverter) {
+ nsCString decodedStr;
+ nsCString charSet;
+ GetEffectiveCharset(row, charSet);
+
+ err = m_mimeConverter->DecodeMimeHeaderToUTF8(
+ nsDependentCString(nakedString), charSet.get(), false, true,
+ decodedStr);
+ if (NS_SUCCEEDED(err))
+ err = CreateCollationKey(NS_ConvertUTF8toUTF16(decodedStr), result);
+ }
+ }
+ return err;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::CompareCollationKeys(const nsTArray<uint8_t>& key1,
+ const nsTArray<uint8_t>& key2,
+ int32_t* result) {
+ nsresult rv = GetCollationKeyGenerator();
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!m_collationKeyGenerator) return NS_ERROR_FAILURE;
+
+ *result = m_collationKeyGenerator->CompareSortKeys(key1, key2);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::CreateCollationKey(const nsAString& sourceString,
+ nsTArray<uint8_t>& key) {
+ nsresult err = GetCollationKeyGenerator();
+ NS_ENSURE_SUCCESS(err, err);
+ if (!m_collationKeyGenerator) return NS_ERROR_FAILURE;
+
+ nsTArrayU8Buffer buffer(key);
+
+ auto result = m_collationKeyGenerator->GetSortKey(sourceString, buffer);
+ NS_ENSURE_TRUE(result.isOk(), NS_ERROR_FAILURE);
+
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::RowCellColumnToUInt32(nsIMdbRow* hdrRow,
+ mdb_token columnToken,
+ uint32_t& uint32Result,
+ uint32_t defaultValue) {
+ return RowCellColumnToUInt32(hdrRow, columnToken, &uint32Result,
+ defaultValue);
+}
+
+nsresult nsMsgDatabase::RowCellColumnToUInt32(nsIMdbRow* hdrRow,
+ mdb_token columnToken,
+ uint32_t* uint32Result,
+ uint32_t defaultValue) {
+ nsresult err = NS_OK;
+
+ if (uint32Result) *uint32Result = defaultValue;
+ if (hdrRow) // ### probably should be an error if hdrRow is NULL...
+ {
+ struct mdbYarn yarn;
+ err = hdrRow->AliasCellYarn(GetEnv(), columnToken, &yarn);
+ if (NS_SUCCEEDED(err)) YarnToUInt32(&yarn, uint32Result);
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::UInt32ToRowCellColumn(nsIMdbRow* row,
+ mdb_token columnToken,
+ uint32_t value) {
+ struct mdbYarn yarn;
+ char yarnBuf[100];
+
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ yarn.mYarn_Buf = (void*)yarnBuf;
+ yarn.mYarn_Size = sizeof(yarnBuf);
+ yarn.mYarn_Fill = yarn.mYarn_Size;
+ yarn.mYarn_Form = 0;
+ yarn.mYarn_Grow = NULL;
+ return row->AddColumn(GetEnv(), columnToken, UInt32ToYarn(&yarn, value));
+}
+
+nsresult nsMsgDatabase::UInt64ToRowCellColumn(nsIMdbRow* row,
+ mdb_token columnToken,
+ uint64_t value) {
+ NS_ENSURE_ARG_POINTER(row);
+ struct mdbYarn yarn;
+ char yarnBuf[17]; // max string is 16 bytes, + 1 for null.
+
+ yarn.mYarn_Buf = (void*)yarnBuf;
+ yarn.mYarn_Size = sizeof(yarnBuf);
+ yarn.mYarn_Form = 0;
+ yarn.mYarn_Grow = NULL;
+ PR_snprintf((char*)yarn.mYarn_Buf, yarn.mYarn_Size, "%llx", value);
+ yarn.mYarn_Fill = PL_strlen((const char*)yarn.mYarn_Buf);
+ return row->AddColumn(GetEnv(), columnToken, &yarn);
+}
+
+nsresult nsMsgDatabase::RowCellColumnToUInt64(nsIMdbRow* hdrRow,
+ mdb_token columnToken,
+ uint64_t* uint64Result,
+ uint64_t defaultValue) {
+ nsresult err = NS_OK;
+
+ if (uint64Result) *uint64Result = defaultValue;
+ if (hdrRow) // ### probably should be an error if hdrRow is NULL...
+ {
+ struct mdbYarn yarn;
+ err = hdrRow->AliasCellYarn(GetEnv(), columnToken, &yarn);
+ if (NS_SUCCEEDED(err)) YarnToUInt64(&yarn, uint64Result);
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::CharPtrToRowCellColumn(nsIMdbRow* row,
+ mdb_token columnToken,
+ const char* charPtr) {
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ struct mdbYarn yarn;
+ yarn.mYarn_Buf = (void*)charPtr;
+ yarn.mYarn_Size = PL_strlen((const char*)yarn.mYarn_Buf) + 1;
+ yarn.mYarn_Fill = yarn.mYarn_Size - 1;
+ yarn.mYarn_Form =
+ 0; // what to do with this? we're storing csid in the msg hdr...
+
+ return row->AddColumn(GetEnv(), columnToken, &yarn);
+}
+
+// caller must free result
+nsresult nsMsgDatabase::RowCellColumnToCharPtr(nsIMdbRow* row,
+ mdb_token columnToken,
+ char** result) {
+ nsresult err = NS_ERROR_NULL_POINTER;
+
+ if (row && result) {
+ struct mdbYarn yarn;
+ err = row->AliasCellYarn(GetEnv(), columnToken, &yarn);
+ if (NS_SUCCEEDED(err)) {
+ *result = (char*)moz_xmalloc(yarn.mYarn_Fill + 1);
+ if (*result) {
+ if (yarn.mYarn_Fill > 0)
+ memcpy(*result, yarn.mYarn_Buf, yarn.mYarn_Fill);
+ (*result)[yarn.mYarn_Fill] = '\0';
+ } else
+ err = NS_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ return err;
+}
+
+/* static */ struct mdbYarn* nsMsgDatabase::nsStringToYarn(
+ struct mdbYarn* yarn, const nsAString& str) {
+ yarn->mYarn_Buf = ToNewCString(NS_ConvertUTF16toUTF8(str));
+ yarn->mYarn_Size = strlen((const char*)yarn->mYarn_Buf) + 1;
+ yarn->mYarn_Fill = yarn->mYarn_Size - 1;
+ yarn->mYarn_Form =
+ 0; // what to do with this? we're storing csid in the msg hdr...
+ return yarn;
+}
+
+/* static */ struct mdbYarn* nsMsgDatabase::UInt32ToYarn(struct mdbYarn* yarn,
+ uint32_t i) {
+ PR_snprintf((char*)yarn->mYarn_Buf, yarn->mYarn_Size, "%lx", i);
+ yarn->mYarn_Fill = PL_strlen((const char*)yarn->mYarn_Buf);
+ yarn->mYarn_Form =
+ 0; // what to do with this? Should be parsed out of the mime2 header?
+ return yarn;
+}
+
+/* static */ struct mdbYarn* nsMsgDatabase::UInt64ToYarn(struct mdbYarn* yarn,
+ uint64_t i) {
+ PR_snprintf((char*)yarn->mYarn_Buf, yarn->mYarn_Size, "%llx", i);
+ yarn->mYarn_Fill = PL_strlen((const char*)yarn->mYarn_Buf);
+ yarn->mYarn_Form = 0;
+ return yarn;
+}
+
+/* static */ void nsMsgDatabase::YarnTonsString(struct mdbYarn* yarn,
+ nsAString& str) {
+ const char* buf = (const char*)yarn->mYarn_Buf;
+ if (buf)
+ CopyUTF8toUTF16(Substring(buf, buf + yarn->mYarn_Fill), str);
+ else
+ str.Truncate();
+}
+
+/* static */ void nsMsgDatabase::YarnTonsCString(struct mdbYarn* yarn,
+ nsACString& str) {
+ const char* buf = (const char*)yarn->mYarn_Buf;
+ if (buf)
+ str.Assign(buf, yarn->mYarn_Fill);
+ else
+ str.Truncate();
+}
+
+// WARNING - if yarn is empty, *pResult will not be changed!!!!
+// this is so we can leave default values as they were.
+/* static */ void nsMsgDatabase::YarnToUInt32(struct mdbYarn* yarn,
+ uint32_t* pResult) {
+ uint8_t numChars = std::min<mdb_fill>(8, yarn->mYarn_Fill);
+
+ if (numChars == 0) return;
+
+ *pResult = MsgUnhex((char*)yarn->mYarn_Buf, numChars);
+}
+
+// WARNING - if yarn is empty, *pResult will not be changed!!!!
+// this is so we can leave default values as they were.
+/* static */ void nsMsgDatabase::YarnToUInt64(struct mdbYarn* yarn,
+ uint64_t* pResult) {
+ uint8_t numChars = std::min<mdb_fill>(16, yarn->mYarn_Fill);
+
+ if (numChars == 0) return;
+
+ *pResult = MsgUnhex((char*)yarn->mYarn_Buf, numChars);
+}
+
+nsresult nsMsgDatabase::GetProperty(nsIMdbRow* row, const char* propertyName,
+ char** result) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ if (m_mdbStore)
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ else
+ err = NS_ERROR_NULL_POINTER;
+ if (NS_SUCCEEDED(err))
+ err = RowCellColumnToCharPtr(row, property_token, result);
+
+ return err;
+}
+
+nsresult nsMsgDatabase::SetProperty(nsIMdbRow* row, const char* propertyName,
+ const char* propertyVal) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err))
+ CharPtrToRowCellColumn(row, property_token, propertyVal);
+ return err;
+}
+
+nsresult nsMsgDatabase::GetPropertyAsNSString(nsIMdbRow* row,
+ const char* propertyName,
+ nsAString& result) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err))
+ err = RowCellColumnTonsString(row, property_token, result);
+
+ return err;
+}
+
+nsresult nsMsgDatabase::SetPropertyFromNSString(nsIMdbRow* row,
+ const char* propertyName,
+ const nsAString& propertyVal) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err))
+ return SetNSStringPropertyWithToken(row, property_token, propertyVal);
+
+ return err;
+}
+
+nsresult nsMsgDatabase::GetUint32Property(nsIMdbRow* row,
+ const char* propertyName,
+ uint32_t* result,
+ uint32_t defaultValue) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err))
+ err = RowCellColumnToUInt32(row, property_token, result, defaultValue);
+
+ return err;
+}
+
+nsresult nsMsgDatabase::GetUint64Property(nsIMdbRow* row,
+ const char* propertyName,
+ uint64_t* result,
+ uint64_t defaultValue) {
+ nsresult err = NS_OK;
+ mdb_token property_token;
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ err = m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err))
+ err = RowCellColumnToUInt64(row, property_token, result, defaultValue);
+
+ return err;
+}
+
+nsresult nsMsgDatabase::SetUint32Property(nsIMdbRow* row,
+ const char* propertyName,
+ uint32_t propertyVal) {
+ struct mdbYarn yarn;
+ char int32StrBuf[20];
+ yarn.mYarn_Buf = int32StrBuf;
+ yarn.mYarn_Size = sizeof(int32StrBuf);
+ yarn.mYarn_Fill = sizeof(int32StrBuf);
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ mdb_token property_token;
+
+ nsresult err =
+ m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err)) {
+ UInt32ToYarn(&yarn, propertyVal);
+ err = row->AddColumn(GetEnv(), property_token, &yarn);
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::SetUint64Property(nsIMdbRow* row,
+ const char* propertyName,
+ uint64_t propertyVal) {
+ struct mdbYarn yarn;
+ char int64StrBuf[100];
+ yarn.mYarn_Buf = int64StrBuf;
+ yarn.mYarn_Size = sizeof(int64StrBuf);
+ yarn.mYarn_Fill = sizeof(int64StrBuf);
+
+ NS_ENSURE_STATE(m_mdbStore); // db might have been closed out from under us.
+ if (!row) return NS_ERROR_NULL_POINTER;
+
+ mdb_token property_token;
+
+ nsresult err =
+ m_mdbStore->StringToToken(GetEnv(), propertyName, &property_token);
+ if (NS_SUCCEEDED(err)) {
+ UInt64ToYarn(&yarn, propertyVal);
+ err = row->AddColumn(GetEnv(), property_token, &yarn);
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::GetBooleanProperty(nsIMdbRow* row,
+ const char* propertyName,
+ bool* result,
+ bool defaultValue /* = false */) {
+ uint32_t res;
+ nsresult rv =
+ GetUint32Property(row, propertyName, &res, (uint32_t)defaultValue);
+ *result = !!res;
+ return rv;
+}
+
+nsresult nsMsgDatabase::SetBooleanProperty(nsIMdbRow* row,
+ const char* propertyName,
+ bool propertyVal) {
+ return SetUint32Property(row, propertyName, (uint32_t)propertyVal);
+}
+
+nsresult nsMsgDatabase::SetNSStringPropertyWithToken(
+ nsIMdbRow* row, mdb_token aProperty, const nsAString& propertyStr) {
+ NS_ENSURE_ARG(row);
+ struct mdbYarn yarn;
+
+ yarn.mYarn_Grow = NULL;
+ nsresult err =
+ row->AddColumn(GetEnv(), aProperty, nsStringToYarn(&yarn, propertyStr));
+ free((char*)yarn.mYarn_Buf); // won't need this when we have nsCString
+ return err;
+}
+
+uint32_t nsMsgDatabase::GetCurVersion() { return kMsgDBVersion; }
+
+NS_IMETHODIMP nsMsgDatabase::SetSummaryValid(bool valid /* = true */) {
+ // If the file was invalid when opened (for example in folder compact), then
+ // it may
+ // not have been added to the cache. Add it now if missing.
+ if (valid) {
+ nsCOMPtr<nsIMsgDBService> serv(mozilla::components::DB::Service());
+ static_cast<nsMsgDBService*>(serv.get())->EnsureCached(this);
+ }
+ // setting the version to 0 ought to make it pretty invalid.
+ if (m_dbFolderInfo) m_dbFolderInfo->SetVersion(valid ? GetCurVersion() : 0);
+
+ // for default db (and news), there's no nothing to set to make it it valid
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetSummaryValid(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ *aResult = true;
+ return NS_OK;
+}
+
+// protected routines
+
+// should we thread messages with common subjects that don't start with Re:
+// together? I imagine we might have separate preferences for mail and news, so
+// this is a virtual method.
+bool nsMsgDatabase::ThreadBySubjectWithoutRe() {
+ GetGlobalPrefs();
+ return gThreadWithoutRe;
+}
+
+bool nsMsgDatabase::UseStrictThreading() {
+ GetGlobalPrefs();
+ return gStrictThreading;
+}
+
+// Should we make sure messages are always threaded correctly (see bug 181446)
+bool nsMsgDatabase::UseCorrectThreading() {
+ GetGlobalPrefs();
+ return gCorrectThreading;
+}
+
+// adapted from removed PL_DHashFreeStringKey
+static void msg_DHashFreeStringKey(PLDHashTable* aTable,
+ PLDHashEntryHdr* aEntry) {
+ const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
+ free((void*)stub->key);
+ PLDHashTable::ClearEntryStub(aTable, aEntry);
+}
+
+PLDHashTableOps nsMsgDatabase::gRefHashTableOps = {
+ PLDHashTable::HashStringKey, PLDHashTable::MatchStringKey,
+ PLDHashTable::MoveEntryStub, msg_DHashFreeStringKey, nullptr};
+
+nsresult nsMsgDatabase::GetRefFromHash(nsCString& reference,
+ nsMsgKey* threadId) {
+ // Initialize the reference hash
+ if (!m_msgReferences) {
+ nsresult rv = InitRefHash();
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ // Find reference from the hash
+ PLDHashEntryHdr* entry =
+ m_msgReferences->Search((const void*)reference.get());
+ if (entry) {
+ RefHashElement* element = static_cast<RefHashElement*>(entry);
+ *threadId = element->mThreadId;
+ return NS_OK;
+ }
+
+ return NS_ERROR_FAILURE;
+}
+
+nsresult nsMsgDatabase::AddRefToHash(nsCString& reference, nsMsgKey threadId) {
+ if (m_msgReferences) {
+ PLDHashEntryHdr* entry =
+ m_msgReferences->Add((void*)reference.get(), mozilla::fallible);
+ if (!entry) return NS_ERROR_OUT_OF_MEMORY; // XXX out of memory
+
+ RefHashElement* element = static_cast<RefHashElement*>(entry);
+ if (!element->mRef) {
+ element->mRef =
+ ToNewCString(reference); // Will be freed in msg_DHashFreeStringKey()
+ element->mThreadId = threadId;
+ element->mCount = 1;
+ } else
+ element->mCount++;
+ }
+
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::AddMsgRefsToHash(nsIMsgDBHdr* msgHdr) {
+ uint16_t numReferences = 0;
+ nsMsgKey threadId;
+ nsresult rv = NS_OK;
+
+ msgHdr->GetThreadId(&threadId);
+ msgHdr->GetNumReferences(&numReferences);
+
+ for (int32_t i = 0; i < numReferences; i++) {
+ nsAutoCString reference;
+
+ msgHdr->GetStringReference(i, reference);
+ if (reference.IsEmpty()) break;
+
+ rv = AddRefToHash(reference, threadId);
+ if (NS_FAILED(rv)) break;
+ }
+
+ return rv;
+}
+
+nsresult nsMsgDatabase::RemoveRefFromHash(nsCString& reference) {
+ if (m_msgReferences) {
+ PLDHashEntryHdr* entry =
+ m_msgReferences->Search((const void*)reference.get());
+ if (entry) {
+ RefHashElement* element = static_cast<RefHashElement*>(entry);
+ if (--element->mCount == 0)
+ m_msgReferences->Remove((void*)reference.get());
+ }
+ }
+ return NS_OK;
+}
+
+// Filter only messages with one or more references
+nsresult nsMsgDatabase::RemoveMsgRefsFromHash(nsIMsgDBHdr* msgHdr) {
+ uint16_t numReferences = 0;
+ nsresult rv = NS_OK;
+
+ msgHdr->GetNumReferences(&numReferences);
+
+ for (int32_t i = 0; i < numReferences; i++) {
+ nsAutoCString reference;
+
+ msgHdr->GetStringReference(i, reference);
+ if (reference.IsEmpty()) break;
+
+ rv = RemoveRefFromHash(reference);
+ if (NS_FAILED(rv)) break;
+ }
+
+ return rv;
+}
+
+static nsresult nsReferencesOnlyFilter(nsIMsgDBHdr* msg, void* closure) {
+ uint16_t numReferences = 0;
+ msg->GetNumReferences(&numReferences);
+ return (numReferences) ? NS_OK : NS_ERROR_FAILURE;
+}
+
+nsresult nsMsgDatabase::InitRefHash() {
+ // Delete an existing table just in case
+ if (m_msgReferences) delete m_msgReferences;
+
+ // Create new table
+ m_msgReferences = new PLDHashTable(
+ &gRefHashTableOps, sizeof(struct RefHashElement), MSG_HASH_SIZE);
+ if (!m_msgReferences) return NS_ERROR_OUT_OF_MEMORY;
+
+ // Create enumerator to go through all messages with references
+ nsCOMPtr<nsIMsgEnumerator> enumerator;
+ enumerator = new nsMsgDBEnumerator(this, m_mdbAllMsgHeadersTable,
+ nsReferencesOnlyFilter, nullptr);
+ if (enumerator == nullptr) return NS_ERROR_OUT_OF_MEMORY;
+
+ // Populate table with references of existing messages
+ bool hasMore;
+ nsresult rv = NS_OK;
+ while (NS_SUCCEEDED(rv = enumerator->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ rv = enumerator->GetNext(getter_AddRefs(msgHdr));
+ if (msgHdr && NS_SUCCEEDED(rv)) rv = AddMsgRefsToHash(msgHdr);
+ if (NS_FAILED(rv)) break;
+ }
+
+ return rv;
+}
+
+nsresult nsMsgDatabase::CreateNewThread(nsMsgKey threadId, const char* subject,
+ nsMsgThread** pnewThread) {
+ nsresult err = NS_OK;
+ nsCOMPtr<nsIMdbTable> threadTable;
+ struct mdbOid threadTableOID;
+ struct mdbOid allThreadsTableOID;
+
+ if (!pnewThread || !m_mdbStore) return NS_ERROR_NULL_POINTER;
+
+ threadTableOID.mOid_Scope = m_hdrRowScopeToken;
+ threadTableOID.mOid_Id = threadId;
+
+ // Under some circumstances, mork seems to reuse an old table when we create
+ // one. Prevent problems from that by finding any old table first, and
+ // deleting its rows.
+ nsresult res = GetStore()->GetTable(GetEnv(), &threadTableOID,
+ getter_AddRefs(threadTable));
+ if (NS_SUCCEEDED(res) && threadTable) threadTable->CutAllRows(GetEnv());
+
+ err = GetStore()->NewTableWithOid(GetEnv(), &threadTableOID,
+ m_threadTableKindToken, false, nullptr,
+ getter_AddRefs(threadTable));
+ if (NS_FAILED(err)) return err;
+
+ allThreadsTableOID.mOid_Scope = m_threadRowScopeToken;
+ allThreadsTableOID.mOid_Id = threadId;
+
+ // add a row for this thread in the table of all threads that we'll use
+ // to do our mapping between subject strings and threads.
+ nsCOMPtr<nsIMdbRow> threadRow;
+
+ err = m_mdbStore->GetRow(GetEnv(), &allThreadsTableOID,
+ getter_AddRefs(threadRow));
+ if (!threadRow) {
+ err = m_mdbStore->NewRowWithOid(GetEnv(), &allThreadsTableOID,
+ getter_AddRefs(threadRow));
+ if (NS_SUCCEEDED(err) && threadRow) {
+ if (m_mdbAllThreadsTable)
+ m_mdbAllThreadsTable->AddRow(GetEnv(), threadRow);
+ err = CharPtrToRowCellColumn(threadRow, m_threadSubjectColumnToken,
+ subject);
+ }
+ } else {
+#ifdef DEBUG_David_Bienvenu
+ NS_WARNING("odd that thread row already exists");
+#endif
+ threadRow->CutAllColumns(GetEnv());
+ nsCOMPtr<nsIMdbRow> metaRow;
+ threadTable->GetMetaRow(GetEnv(), nullptr, nullptr,
+ getter_AddRefs(metaRow));
+ if (metaRow) metaRow->CutAllColumns(GetEnv());
+
+ CharPtrToRowCellColumn(threadRow, m_threadSubjectColumnToken, subject);
+ }
+
+ *pnewThread = new nsMsgThread(this, threadTable);
+ if (*pnewThread) {
+ (*pnewThread)->SetThreadKey(threadId);
+ m_cachedThread = *pnewThread;
+ m_cachedThreadId = threadId;
+ }
+ return err;
+}
+
+nsIMsgThread* nsMsgDatabase::GetThreadForReference(nsCString& msgID,
+ nsIMsgDBHdr** pMsgHdr) {
+ nsMsgKey threadId;
+ nsIMsgDBHdr* msgHdr = nullptr;
+ GetMsgHdrForMessageID(msgID.get(), &msgHdr);
+ nsIMsgThread* thread = NULL;
+
+ if (msgHdr != NULL) {
+ if (NS_SUCCEEDED(msgHdr->GetThreadId(&threadId))) {
+ // find thread header for header whose message id we matched.
+ thread = GetThreadForThreadId(threadId);
+ }
+ if (pMsgHdr)
+ *pMsgHdr = msgHdr;
+ else
+ msgHdr->Release();
+ }
+ // Referenced message not found, check if there are messages that reference
+ // same message
+ else if (UseCorrectThreading()) {
+ if (NS_SUCCEEDED(GetRefFromHash(msgID, &threadId)))
+ thread = GetThreadForThreadId(threadId);
+ }
+
+ return thread;
+}
+
+nsIMsgThread* nsMsgDatabase::GetThreadForSubject(nsCString& subject) {
+ nsIMsgThread* thread = nullptr;
+
+ mdbYarn subjectYarn;
+
+ subjectYarn.mYarn_Buf = (void*)subject.get();
+ subjectYarn.mYarn_Fill = PL_strlen(subject.get());
+ subjectYarn.mYarn_Form = 0;
+ subjectYarn.mYarn_Size = subjectYarn.mYarn_Fill;
+
+ nsCOMPtr<nsIMdbRow> threadRow;
+ mdbOid outRowId;
+ if (m_mdbStore) {
+ nsresult result = m_mdbStore->FindRow(
+ GetEnv(), m_threadRowScopeToken, m_threadSubjectColumnToken,
+ &subjectYarn, &outRowId, getter_AddRefs(threadRow));
+ if (NS_SUCCEEDED(result) && threadRow) {
+ // Get key from row
+ mdbOid outOid;
+ nsMsgKey key = nsMsgKey_None;
+ if (NS_SUCCEEDED(threadRow->GetOid(GetEnv(), &outOid)))
+ key = outOid.mOid_Id;
+ // find thread header for header whose message id we matched.
+ // It is fine if key was not found,
+ // GetThreadForThreadId(nsMsgKey_None) returns nullptr.
+ thread = GetThreadForThreadId(key);
+ }
+#ifdef DEBUG_bienvenu1
+ else {
+ nsresult rv;
+ RefPtr<nsMsgThread> pThread;
+
+ nsCOMPtr<nsIMdbPortTableCursor> tableCursor;
+ m_mdbStore->GetPortTableCursor(GetEnv(), m_hdrRowScopeToken,
+ m_threadTableKindToken,
+ getter_AddRefs(tableCursor));
+
+ nsCOMPtr<nsIMdbTable> table;
+
+ while (true) {
+ rv = tableCursor->NextTable(GetEnv(), getter_AddRefs(table));
+ if (!table) break;
+ if (NS_FAILED(rv)) break;
+
+ pThread = new nsMsgThread(this, table);
+ if (pThread) {
+ nsCString curSubject;
+ pThread->GetSubject(curSubject);
+ if (subject.Equals(curSubject)) {
+ NS_ERROR("thread with subject exists, but FindRow didn't find it");
+ break;
+ }
+ } else
+ break;
+ }
+ }
+#endif
+ }
+ return thread;
+}
+
+// Returns thread that contains a message that references the passed message ID
+nsIMsgThread* nsMsgDatabase::GetThreadForMessageId(nsCString& msgId) {
+ nsIMsgThread* thread = NULL;
+ nsMsgKey threadId;
+
+ if (NS_SUCCEEDED(GetRefFromHash(msgId, &threadId)))
+ thread = GetThreadForThreadId(threadId);
+
+ return thread;
+}
+
+nsresult nsMsgDatabase::ThreadNewHdr(nsMsgHdr* newHdr, bool& newThread) {
+ nsresult result = NS_ERROR_UNEXPECTED;
+ nsCOMPtr<nsIMsgThread> thread;
+ nsCOMPtr<nsIMsgDBHdr> replyToHdr;
+ nsMsgKey threadId = nsMsgKey_None, newHdrKey;
+
+ if (!newHdr) return NS_ERROR_NULL_POINTER;
+
+ newHdr->SetThreadParent(
+ nsMsgKey_None); // if we're undoing, could have a thread parent
+ uint16_t numReferences = 0;
+ uint32_t newHdrFlags = 0;
+
+ // use raw flags instead of GetFlags, because GetFlags will
+ // pay attention to what's in m_newSet, and this new hdr isn't
+ // in m_newSet yet.
+ newHdr->GetRawFlags(&newHdrFlags);
+ newHdr->GetNumReferences(&numReferences);
+ newHdr->GetMessageKey(&newHdrKey);
+
+ // try reference threading first
+ for (int32_t i = numReferences - 1; i >= 0; i--) {
+ nsAutoCString reference;
+
+ newHdr->GetStringReference(i, reference);
+ // first reference we have hdr for is best top-level hdr.
+ // but we have to handle case of promoting new header to top-level
+ // in case the top-level header comes after a reply.
+
+ if (reference.IsEmpty()) break;
+
+ thread = dont_AddRef(
+ GetThreadForReference(reference, getter_AddRefs(replyToHdr)));
+ if (thread) {
+ if (replyToHdr) {
+ nsMsgKey replyToKey;
+ replyToHdr->GetMessageKey(&replyToKey);
+ // message claims to be a reply to itself - ignore that since it leads
+ // to corrupt threading.
+ if (replyToKey == newHdrKey) {
+ // bad references - throw them all away.
+ newHdr->SetMessageId("");
+ thread = nullptr;
+ break;
+ }
+ }
+ thread->GetThreadKey(&threadId);
+ newHdr->SetThreadId(threadId);
+ result = AddToThread(newHdr, thread, replyToHdr, true);
+ break;
+ }
+ }
+ // if user hasn't said "only thread by ref headers", thread by subject
+ if (!thread && !UseStrictThreading()) {
+ // try subject threading if we couldn't find a reference and the subject
+ // starts with Re:
+ nsCString subject;
+ newHdr->GetSubject(subject);
+ if (ThreadBySubjectWithoutRe() ||
+ (newHdrFlags & nsMsgMessageFlags::HasRe)) {
+ nsAutoCString cSubject(subject);
+ thread = dont_AddRef(GetThreadForSubject(cSubject));
+ if (thread) {
+ thread->GetThreadKey(&threadId);
+ newHdr->SetThreadId(threadId);
+ // TRACE("threading based on subject %s\n", (const char *)
+ // msgHdr->m_subject);
+ // if we move this and do subject threading after, ref threading,
+ // don't thread within children, since we know it won't work. But for
+ // now, pass TRUE.
+ result = AddToThread(newHdr, thread, nullptr, true);
+ }
+ }
+ }
+
+ // Check if this is a new parent to an existing message (that has a reference
+ // to this message)
+ if (!thread && UseCorrectThreading()) {
+ nsCString msgId;
+ newHdr->GetMessageId(getter_Copies(msgId));
+
+ thread = dont_AddRef(GetThreadForMessageId(msgId));
+ if (thread) {
+ thread->GetThreadKey(&threadId);
+ newHdr->SetThreadId(threadId);
+ result = AddToThread(newHdr, thread, nullptr, true);
+ }
+ }
+
+ if (!thread) {
+ // Not a parent or child, make it a new thread for now
+ result = AddNewThread(newHdr);
+ newThread = true;
+ } else {
+ newThread = false;
+ }
+ return result;
+}
+
+nsresult nsMsgDatabase::AddToThread(nsMsgHdr* newHdr, nsIMsgThread* thread,
+ nsIMsgDBHdr* inReplyTo,
+ bool threadInThread) {
+ // don't worry about real threading yet.
+ return thread->AddChild(newHdr, inReplyTo, threadInThread, this);
+}
+
+nsMsgHdr* nsMsgDatabase::GetMsgHdrForReference(nsCString& reference) {
+ NS_ASSERTION(false, "not implemented yet.");
+ return nullptr;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetMsgHdrForMessageID(const char* aMsgID,
+ nsIMsgDBHdr** aHdr) {
+ NS_ENSURE_ARG_POINTER(aHdr);
+ NS_ENSURE_ARG_POINTER(aMsgID);
+ nsIMsgDBHdr* msgHdr = nullptr;
+ nsresult rv = NS_OK;
+ mdbYarn messageIdYarn;
+
+ messageIdYarn.mYarn_Buf = (void*)aMsgID;
+ messageIdYarn.mYarn_Fill = PL_strlen(aMsgID);
+ messageIdYarn.mYarn_Form = 0;
+ messageIdYarn.mYarn_Size = messageIdYarn.mYarn_Fill;
+
+ nsIMdbRow* hdrRow;
+ mdbOid outRowId;
+ nsresult result;
+ if (m_mdbStore)
+ result = m_mdbStore->FindRow(GetEnv(), m_hdrRowScopeToken,
+ m_messageIdColumnToken, &messageIdYarn,
+ &outRowId, &hdrRow);
+ else
+ return NS_ERROR_FAILURE;
+ if (NS_SUCCEEDED(result) && hdrRow) {
+ // Get key from row
+ mdbOid outOid;
+ nsMsgKey key = nsMsgKey_None;
+ rv = hdrRow->GetOid(GetEnv(), &outOid);
+ if (NS_WARN_IF(NS_FAILED(rv))) return rv;
+ key = outOid.mOid_Id;
+
+ rv = CreateMsgHdr(hdrRow, key, &msgHdr);
+ if (NS_WARN_IF(NS_FAILED(rv))) return rv;
+ }
+ *aHdr = msgHdr; // already addreffed above.
+ return NS_OK; // it's not an error not to find a msg hdr.
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetMsgHdrForGMMsgID(const char* aGMMsgId,
+ nsIMsgDBHdr** aHdr) {
+ NS_ENSURE_ARG_POINTER(aGMMsgId);
+ NS_ENSURE_ARG_POINTER(aHdr);
+ nsIMsgDBHdr* msgHdr = nullptr;
+ nsresult rv = NS_OK;
+ mdbYarn gMailMessageIdYarn;
+ gMailMessageIdYarn.mYarn_Buf = (void*)aGMMsgId;
+ gMailMessageIdYarn.mYarn_Fill = strlen(aGMMsgId);
+ gMailMessageIdYarn.mYarn_Form = 0;
+ gMailMessageIdYarn.mYarn_Size = gMailMessageIdYarn.mYarn_Fill;
+
+ nsIMdbRow* hdrRow;
+ mdbOid outRowId;
+ nsresult result;
+ mdb_token property_token;
+ NS_ENSURE_TRUE(m_mdbStore, NS_ERROR_NULL_POINTER);
+ result = m_mdbStore->StringToToken(GetEnv(), "X-GM-MSGID", &property_token);
+ NS_ENSURE_SUCCESS(result, result);
+ result = m_mdbStore->FindRow(GetEnv(), m_hdrRowScopeToken, property_token,
+ &gMailMessageIdYarn, &outRowId, &hdrRow);
+ if (NS_SUCCEEDED(result) && hdrRow) {
+ // Get key from row
+ mdbOid outOid;
+ rv = hdrRow->GetOid(GetEnv(), &outOid);
+ NS_ENSURE_SUCCESS(rv, rv);
+ nsMsgKey key = outOid.mOid_Id;
+ rv = CreateMsgHdr(hdrRow, key, &msgHdr);
+ if (NS_WARN_IF(NS_FAILED(rv))) return rv;
+ }
+ *aHdr = msgHdr;
+ return NS_OK; // it's not an error not to find a msg hdr.
+}
+
+nsIMsgDBHdr* nsMsgDatabase::GetMsgHdrForSubject(nsCString& subject) {
+ nsIMsgDBHdr* msgHdr = nullptr;
+ nsresult rv = NS_OK;
+ mdbYarn subjectYarn;
+
+ subjectYarn.mYarn_Buf = (void*)subject.get();
+ subjectYarn.mYarn_Fill = PL_strlen(subject.get());
+ subjectYarn.mYarn_Form = 0;
+ subjectYarn.mYarn_Size = subjectYarn.mYarn_Fill;
+
+ nsIMdbRow* hdrRow;
+ mdbOid outRowId;
+ nsresult result =
+ GetStore()->FindRow(GetEnv(), m_hdrRowScopeToken, m_subjectColumnToken,
+ &subjectYarn, &outRowId, &hdrRow);
+ if (NS_SUCCEEDED(result) && hdrRow) {
+ // Get key from row
+ mdbOid outOid;
+ nsMsgKey key = nsMsgKey_None;
+ rv = hdrRow->GetOid(GetEnv(), &outOid);
+ if (NS_WARN_IF(NS_FAILED(rv))) return nullptr;
+ key = outOid.mOid_Id;
+
+ rv = CreateMsgHdr(hdrRow, key, &msgHdr);
+ if (NS_WARN_IF(NS_FAILED(rv))) return nullptr;
+ }
+ return msgHdr;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetThreadContainingMsgHdr(nsIMsgDBHdr* msgHdr,
+ nsIMsgThread** result) {
+ NS_ENSURE_ARG_POINTER(msgHdr);
+ NS_ENSURE_ARG_POINTER(result);
+
+ *result = nullptr;
+ nsMsgKey threadId = nsMsgKey_None;
+ (void)msgHdr->GetThreadId(&threadId);
+ if (threadId != nsMsgKey_None) *result = GetThreadForThreadId(threadId);
+
+ // if we can't find the thread, try using the msg key as the thread id,
+ // because the msg hdr might not have the thread id set correctly
+ // Or maybe the message was deleted?
+ if (!*result) {
+ nsMsgKey msgKey;
+ msgHdr->GetMessageKey(&msgKey);
+ *result = GetThreadForThreadId(msgKey);
+ }
+ // failure is normal when message was deleted
+ return (*result) ? NS_OK : NS_ERROR_FAILURE;
+}
+
+nsresult nsMsgDatabase::GetThreadForMsgKey(nsMsgKey msgKey,
+ nsIMsgThread** aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ GetMsgHdrForKey(msgKey, getter_AddRefs(msg));
+ if (!msg) return NS_MSG_MESSAGE_NOT_FOUND;
+
+ return GetThreadContainingMsgHdr(msg, aResult);
+}
+
+// caller needs to unrefer.
+nsIMsgThread* nsMsgDatabase::GetThreadForThreadId(nsMsgKey threadId) {
+ nsIMsgThread* retThread = (threadId == m_cachedThreadId && m_cachedThread)
+ ? m_cachedThread.get()
+ : FindExistingThread(threadId);
+ if (retThread) {
+ NS_ADDREF(retThread);
+ return retThread;
+ }
+ if (m_mdbStore) {
+ mdbOid tableId;
+ tableId.mOid_Id = threadId;
+ tableId.mOid_Scope = m_hdrRowScopeToken;
+
+ nsCOMPtr<nsIMdbTable> threadTable;
+ nsresult res =
+ m_mdbStore->GetTable(GetEnv(), &tableId, getter_AddRefs(threadTable));
+
+ if (NS_SUCCEEDED(res) && threadTable) {
+ retThread = new nsMsgThread(this, threadTable);
+ if (retThread) {
+ NS_ADDREF(retThread);
+ m_cachedThread = retThread;
+ m_cachedThreadId = threadId;
+ }
+ }
+ }
+ return retThread;
+}
+
+// make the passed in header a thread header
+nsresult nsMsgDatabase::AddNewThread(nsMsgHdr* msgHdr) {
+ if (!msgHdr) return NS_ERROR_NULL_POINTER;
+
+ nsMsgThread* threadHdr = nullptr;
+
+ nsCString subject;
+ nsMsgKey threadKey;
+ msgHdr->GetMessageKey(&threadKey);
+ // can't have a thread with key 1 since that's the table id of the all msg hdr
+ // table, so give it kTableKeyForThreadOne (0xfffffffe).
+ if (threadKey == kAllMsgHdrsTableKey) threadKey = kTableKeyForThreadOne;
+
+ nsresult err = msgHdr->GetSubject(subject);
+
+ err = CreateNewThread(threadKey, subject.get(), &threadHdr);
+ msgHdr->SetThreadId(threadKey);
+ if (threadHdr) {
+ NS_ADDREF(threadHdr);
+ // err = msgHdr->GetSubject(subject);
+ // threadHdr->SetThreadKey(msgHdr->m_messageKey);
+ // threadHdr->SetSubject(subject.get());
+ // need to add the thread table to the db.
+ AddToThread(msgHdr, threadHdr, nullptr, false);
+ NS_RELEASE(threadHdr);
+ }
+ return err;
+}
+
+nsresult nsMsgDatabase::GetBoolPref(const char* prefName, bool* result) {
+ bool prefValue = false;
+ nsresult rv;
+ nsCOMPtr<nsIPrefBranch> pPrefBranch(
+ do_GetService(NS_PREFSERVICE_CONTRACTID, &rv));
+ if (pPrefBranch) {
+ rv = pPrefBranch->GetBoolPref(prefName, &prefValue);
+ *result = prefValue;
+ }
+ return rv;
+}
+
+nsresult nsMsgDatabase::GetIntPref(const char* prefName, int32_t* result) {
+ int32_t prefValue = 0;
+ nsresult rv;
+ nsCOMPtr<nsIPrefBranch> pPrefBranch(
+ do_GetService(NS_PREFSERVICE_CONTRACTID, &rv));
+ if (pPrefBranch) {
+ rv = pPrefBranch->GetIntPref(prefName, &prefValue);
+ *result = prefValue;
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetAttributeOnPendingHdr(nsIMsgDBHdr* pendingHdr,
+ const char* property,
+ const char* propertyVal) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetUint32AttributeOnPendingHdr(
+ nsIMsgDBHdr* pendingHdr, const char* property, uint32_t propertyVal) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::SetUint64AttributeOnPendingHdr(nsIMsgDBHdr* aPendingHdr,
+ const char* aProperty,
+ uint64_t aPropertyVal) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::UpdatePendingAttributes(nsIMsgDBHdr* aNewHdr) { return NS_OK; }
+
+NS_IMETHODIMP nsMsgDatabase::GetOfflineOpForKey(
+ nsMsgKey msgKey, bool create, nsIMsgOfflineImapOperation** offlineOp) {
+ NS_ASSERTION(false, "overridden by nsMailDatabase");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsMsgDatabase::RemoveOfflineOp(nsIMsgOfflineImapOperation* op) {
+ NS_ASSERTION(false, "overridden by nsMailDatabase");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ListAllOfflineMsgs(nsTArray<nsMsgKey>& keys) {
+ keys.Clear();
+ nsCOMPtr<nsIMsgEnumerator> enumerator;
+ uint32_t flag = nsMsgMessageFlags::Offline;
+ // if we change this routine to return an enumerator that generates the keys
+ // one by one, we'll need to somehow make a copy of flag for the enumerator
+ // to own, since the enumerator will persist past the life of flag on the
+ // stack.
+ nsresult rv = EnumerateMessagesWithFlag(getter_AddRefs(enumerator), &flag);
+ if (NS_SUCCEEDED(rv) && enumerator) {
+ bool hasMoreElements;
+ while (NS_SUCCEEDED(enumerator->HasMoreElements(&hasMoreElements)) &&
+ hasMoreElements) {
+ // clear out db hdr, because it won't be valid when we get rid of the .msf
+ // file
+ nsCOMPtr<nsIMsgDBHdr> dbMessage;
+ rv = enumerator->GetNext(getter_AddRefs(dbMessage));
+ if (NS_SUCCEEDED(rv) && dbMessage) {
+ nsMsgKey msgKey;
+ dbMessage->GetMessageKey(&msgKey);
+ keys.AppendElement(msgKey);
+ }
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ListAllOfflineOpIds(
+ nsTArray<nsMsgKey>& offlineOpIds) {
+ NS_ASSERTION(false, "overridden by nsMailDatabase");
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ListAllOfflineDeletes(
+ nsTArray<nsMsgKey>& offlineDeletes) {
+ // technically, notimplemented, but no one's putting offline ops in anyway.
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgDatabase::GetHighWaterArticleNum(nsMsgKey* key) {
+ if (!m_dbFolderInfo) return NS_ERROR_NULL_POINTER;
+ return m_dbFolderInfo->GetHighWater(key);
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetLowWaterArticleNum(nsMsgKey* key) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+/* attribute nsMsgKey NextPseudoMsgKey */
+
+NS_IMETHODIMP nsMsgDatabase::GetNextPseudoMsgKey(nsMsgKey* nextPseudoMsgKey) {
+ NS_ENSURE_ARG_POINTER(nextPseudoMsgKey);
+ *nextPseudoMsgKey = m_nextPseudoMsgKey--;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetNextPseudoMsgKey(nsMsgKey nextPseudoMsgKey) {
+ m_nextPseudoMsgKey = nextPseudoMsgKey;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetNextFakeOfflineMsgKey(
+ nsMsgKey* nextFakeOfflineMsgKey) {
+ NS_ENSURE_ARG_POINTER(nextFakeOfflineMsgKey);
+ // iterate over hdrs looking for first non-existent fake offline msg key
+ nsMsgKey fakeMsgKey = kIdStartOfFake;
+
+ bool containsKey;
+ do {
+ ContainsKey(fakeMsgKey, &containsKey);
+ if (!containsKey) break;
+ fakeMsgKey--;
+ } while (containsKey);
+
+ *nextFakeOfflineMsgKey = fakeMsgKey;
+ return NS_OK;
+}
+
+#ifdef DEBUG
+nsresult nsMsgDatabase::DumpContents() {
+ nsTArray<nsMsgKey> keys;
+ nsresult rv = ListAllKeys(keys);
+ NS_ENSURE_SUCCESS(rv, rv);
+ for (nsMsgKey key : keys) {
+ nsCOMPtr<nsIMsgDBHdr> msgHdr;
+ GetMsgHdrForKey(key, getter_AddRefs(msgHdr));
+ if (msgHdr) {
+ nsCString author;
+ nsCString subject;
+
+ msgHdr->GetMessageKey(&key);
+ msgHdr->GetAuthor(getter_Copies(author));
+ msgHdr->GetSubject(subject);
+ printf("hdr key = %u, author = %s subject = %s\n", key, author.get(),
+ subject.get());
+ }
+ }
+
+ nsCOMPtr<nsIMsgThreadEnumerator> threads;
+ rv = EnumerateThreads(getter_AddRefs(threads));
+ NS_ENSURE_SUCCESS(rv, rv);
+ bool hasMore = false;
+ while (NS_SUCCEEDED(rv = threads->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgThread> thread;
+ rv = threads->GetNext(getter_AddRefs(thread));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsMsgKey key;
+ thread->GetThreadKey(&key);
+ printf("thread key = %u\n", key);
+ // DumpThread(key);
+ }
+ return NS_OK;
+}
+#endif /* DEBUG */
+
+NS_IMETHODIMP nsMsgDatabase::SetMsgRetentionSettings(
+ nsIMsgRetentionSettings* retentionSettings) {
+ m_retentionSettings = retentionSettings;
+ if (retentionSettings && m_dbFolderInfo) {
+ nsresult rv;
+
+ nsMsgRetainByPreference retainByPreference;
+ uint32_t daysToKeepHdrs;
+ uint32_t numHeadersToKeep;
+ uint32_t daysToKeepBodies;
+ bool cleanupBodiesByDays;
+ bool useServerDefaults;
+ bool applyToFlaggedMessages;
+
+ rv = retentionSettings->GetRetainByPreference(&retainByPreference);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = retentionSettings->GetDaysToKeepHdrs(&daysToKeepHdrs);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = retentionSettings->GetNumHeadersToKeep(&numHeadersToKeep);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = retentionSettings->GetDaysToKeepBodies(&daysToKeepBodies);
+ NS_ENSURE_SUCCESS(rv, rv);
+ (void)retentionSettings->GetCleanupBodiesByDays(&cleanupBodiesByDays);
+ (void)retentionSettings->GetUseServerDefaults(&useServerDefaults);
+ rv = retentionSettings->GetApplyToFlaggedMessages(&applyToFlaggedMessages);
+ NS_ENSURE_SUCCESS(rv, rv);
+ // need to write this to the db. We'll just use the dbfolderinfo to write
+ // properties.
+ m_dbFolderInfo->SetUint32Property("retainBy", retainByPreference);
+ m_dbFolderInfo->SetUint32Property("daysToKeepHdrs", daysToKeepHdrs);
+ m_dbFolderInfo->SetUint32Property("numHdrsToKeep", numHeadersToKeep);
+ m_dbFolderInfo->SetUint32Property("daysToKeepBodies", daysToKeepBodies);
+ m_dbFolderInfo->SetBooleanProperty("cleanupBodies", cleanupBodiesByDays);
+ m_dbFolderInfo->SetBooleanProperty("useServerDefaults", useServerDefaults);
+ m_dbFolderInfo->SetBooleanProperty("applyToFlaggedMessages",
+ applyToFlaggedMessages);
+ }
+ Commit(nsMsgDBCommitType::kLargeCommit);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetMsgRetentionSettings(
+ nsIMsgRetentionSettings** retentionSettings) {
+ NS_ENSURE_ARG_POINTER(retentionSettings);
+ if (!m_retentionSettings) {
+ // create a new one, and initialize it from the db.
+ m_retentionSettings = new nsMsgRetentionSettings;
+ if (m_retentionSettings && m_dbFolderInfo) {
+ nsMsgRetainByPreference retainByPreference;
+ uint32_t daysToKeepHdrs = 0;
+ uint32_t numHeadersToKeep = 0;
+ bool useServerDefaults;
+ uint32_t daysToKeepBodies = 0;
+ bool cleanupBodiesByDays = false;
+ bool applyToFlaggedMessages;
+
+ m_dbFolderInfo->GetUint32Property("retainBy",
+ nsIMsgRetentionSettings::nsMsgRetainAll,
+ &retainByPreference);
+ m_dbFolderInfo->GetUint32Property("daysToKeepHdrs", 0, &daysToKeepHdrs);
+ m_dbFolderInfo->GetUint32Property("numHdrsToKeep", 0, &numHeadersToKeep);
+ m_dbFolderInfo->GetUint32Property("daysToKeepBodies", 0,
+ &daysToKeepBodies);
+ m_dbFolderInfo->GetBooleanProperty("useServerDefaults", true,
+ &useServerDefaults);
+ m_dbFolderInfo->GetBooleanProperty("cleanupBodies", false,
+ &cleanupBodiesByDays);
+ m_dbFolderInfo->GetBooleanProperty("applyToFlaggedMessages", false,
+ &applyToFlaggedMessages);
+ m_retentionSettings->SetRetainByPreference(retainByPreference);
+ m_retentionSettings->SetDaysToKeepHdrs(daysToKeepHdrs);
+ m_retentionSettings->SetNumHeadersToKeep(numHeadersToKeep);
+ m_retentionSettings->SetDaysToKeepBodies(daysToKeepBodies);
+ m_retentionSettings->SetUseServerDefaults(useServerDefaults);
+ m_retentionSettings->SetCleanupBodiesByDays(cleanupBodiesByDays);
+ m_retentionSettings->SetApplyToFlaggedMessages(applyToFlaggedMessages);
+ }
+ }
+ NS_IF_ADDREF(*retentionSettings = m_retentionSettings);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::SetMsgDownloadSettings(
+ nsIMsgDownloadSettings* downloadSettings) {
+ m_downloadSettings = downloadSettings;
+ if (downloadSettings && m_dbFolderInfo) {
+ nsresult rv;
+
+ bool useServerDefaults;
+ bool downloadByDate;
+ uint32_t ageLimitOfMsgsToDownload;
+ bool downloadUnreadOnly;
+
+ rv = downloadSettings->GetUseServerDefaults(&useServerDefaults);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = downloadSettings->GetDownloadByDate(&downloadByDate);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = downloadSettings->GetDownloadUnreadOnly(&downloadUnreadOnly);
+ NS_ENSURE_SUCCESS(rv, rv);
+ rv = downloadSettings->GetAgeLimitOfMsgsToDownload(
+ &ageLimitOfMsgsToDownload);
+ NS_ENSURE_SUCCESS(rv, rv);
+ // need to write this to the db. We'll just use the dbfolderinfo to write
+ // properties.
+ m_dbFolderInfo->SetBooleanProperty("useServerDefaults", useServerDefaults);
+ m_dbFolderInfo->SetBooleanProperty("downloadByDate", downloadByDate);
+ m_dbFolderInfo->SetBooleanProperty("downloadUnreadOnly",
+ downloadUnreadOnly);
+ m_dbFolderInfo->SetUint32Property("ageLimit", ageLimitOfMsgsToDownload);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetMsgDownloadSettings(
+ nsIMsgDownloadSettings** downloadSettings) {
+ NS_ENSURE_ARG_POINTER(downloadSettings);
+ if (!m_downloadSettings) {
+ // create a new one, and initialize it from the db.
+ m_downloadSettings = new nsMsgDownloadSettings;
+ if (m_downloadSettings && m_dbFolderInfo) {
+ bool useServerDefaults;
+ bool downloadByDate;
+ uint32_t ageLimitOfMsgsToDownload;
+ bool downloadUnreadOnly;
+
+ m_dbFolderInfo->GetBooleanProperty("useServerDefaults", true,
+ &useServerDefaults);
+ m_dbFolderInfo->GetBooleanProperty("downloadByDate", false,
+ &downloadByDate);
+ m_dbFolderInfo->GetBooleanProperty("downloadUnreadOnly", false,
+ &downloadUnreadOnly);
+ m_dbFolderInfo->GetUint32Property("ageLimit", 0,
+ &ageLimitOfMsgsToDownload);
+
+ m_downloadSettings->SetUseServerDefaults(useServerDefaults);
+ m_downloadSettings->SetDownloadByDate(downloadByDate);
+ m_downloadSettings->SetDownloadUnreadOnly(downloadUnreadOnly);
+ m_downloadSettings->SetAgeLimitOfMsgsToDownload(ageLimitOfMsgsToDownload);
+ }
+ }
+ NS_IF_ADDREF(*downloadSettings = m_downloadSettings);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ApplyRetentionSettings(
+ nsIMsgRetentionSettings* aMsgRetentionSettings, bool aDeleteViaFolder) {
+ NS_ENSURE_ARG_POINTER(aMsgRetentionSettings);
+ nsresult rv = NS_OK;
+
+ if (!m_folder) return NS_ERROR_NULL_POINTER;
+
+ bool isDraftsTemplatesOutbox;
+ uint32_t dtoFlags = nsMsgFolderFlags::Drafts | nsMsgFolderFlags::Templates |
+ nsMsgFolderFlags::Queue;
+ (void)m_folder->IsSpecialFolder(dtoFlags, true, &isDraftsTemplatesOutbox);
+ // Never apply retention settings to Drafts/Templates/Outbox.
+ if (isDraftsTemplatesOutbox) return NS_OK;
+
+ nsTArray<RefPtr<nsIMsgDBHdr>> msgHdrsToDelete;
+ nsMsgRetainByPreference retainByPreference;
+ aMsgRetentionSettings->GetRetainByPreference(&retainByPreference);
+
+ bool applyToFlaggedMessages = false;
+ aMsgRetentionSettings->GetApplyToFlaggedMessages(&applyToFlaggedMessages);
+
+ uint32_t daysToKeepHdrs = 0;
+ uint32_t numHeadersToKeep = 0;
+ switch (retainByPreference) {
+ case nsIMsgRetentionSettings::nsMsgRetainAll:
+ break;
+ case nsIMsgRetentionSettings::nsMsgRetainByAge:
+ aMsgRetentionSettings->GetDaysToKeepHdrs(&daysToKeepHdrs);
+ rv = FindMessagesOlderThan(daysToKeepHdrs, applyToFlaggedMessages,
+ msgHdrsToDelete);
+ break;
+ case nsIMsgRetentionSettings::nsMsgRetainByNumHeaders:
+ aMsgRetentionSettings->GetNumHeadersToKeep(&numHeadersToKeep);
+ rv = FindExcessMessages(numHeadersToKeep, applyToFlaggedMessages,
+ msgHdrsToDelete);
+ break;
+ }
+ if (m_folder) {
+ // update the time we attempted to purge this folder
+ char dateBuf[100];
+ dateBuf[0] = '\0';
+ PRExplodedTime exploded;
+ PR_ExplodeTime(PR_Now(), PR_LocalTimeParameters, &exploded);
+ PR_FormatTimeUSEnglish(dateBuf, sizeof(dateBuf), "%a %b %d %H:%M:%S %Y",
+ &exploded);
+ m_folder->SetStringProperty("LastPurgeTime", nsDependentCString(dateBuf));
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (msgHdrsToDelete.IsEmpty()) {
+ return NS_OK; // No action required.
+ }
+
+ if (aDeleteViaFolder) {
+ // The folder delete will also delete headers from the DB.
+ rv = m_folder->DeleteMessages(msgHdrsToDelete, nullptr, true, false,
+ nullptr, false);
+ } else {
+ // We're just deleting headers in the DB.
+ uint32_t kindex = 0;
+ for (nsIMsgDBHdr* hdr : msgHdrsToDelete) {
+ // Commit after every 300.
+ rv = DeleteHeader(hdr, nullptr, kindex % 300, true);
+ if (NS_FAILED(rv)) {
+ break;
+ }
+ }
+ // compress commit if we deleted more than 10
+ if (msgHdrsToDelete.Length() > 10) {
+ Commit(nsMsgDBCommitType::kCompressCommit);
+ } else {
+ Commit(nsMsgDBCommitType::kLargeCommit);
+ }
+ }
+ return rv;
+}
+
+nsresult nsMsgDatabase::FindMessagesOlderThan(
+ uint32_t daysToKeepHdrs, bool applyToFlaggedMessages,
+ nsTArray<RefPtr<nsIMsgDBHdr>>& hdrsToDelete) {
+ nsresult rv = NS_OK;
+ hdrsToDelete.Clear();
+
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ rv = EnumerateMessages(getter_AddRefs(hdrs));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // cutOffDay is the PRTime cut-off point. Any msg with a date less than
+ // that will get purged.
+ PRTime cutOffDay = PR_Now() - daysToKeepHdrs * PR_USEC_PER_DAY;
+
+ bool hasMore = false;
+ while (NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ rv = hdrs->GetNext(getter_AddRefs(msg));
+ NS_ASSERTION(NS_SUCCEEDED(rv), "nsMsgDBEnumerator broken");
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!applyToFlaggedMessages) {
+ uint32_t flags;
+ (void)msg->GetFlags(&flags);
+ if (flags & nsMsgMessageFlags::Marked) {
+ continue;
+ }
+ }
+
+ PRTime date;
+ msg->GetDate(&date);
+ if (date < cutOffDay) {
+ hdrsToDelete.AppendElement(msg);
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::FindExcessMessages(
+ uint32_t numHeadersToKeep, bool applyToFlaggedMessages,
+ nsTArray<RefPtr<nsIMsgDBHdr>>& hdrsToDelete) {
+ nsresult rv = NS_OK;
+ hdrsToDelete.Clear();
+
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ rv = EnumerateMessages(getter_AddRefs(hdrs));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mdb_count numHdrs = 0;
+ if (m_mdbAllMsgHeadersTable)
+ m_mdbAllMsgHeadersTable->GetCount(GetEnv(), &numHdrs);
+ else
+ return NS_ERROR_NULL_POINTER;
+
+ bool hasMore = false;
+ while (NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> msg;
+ rv = hdrs->GetNext(getter_AddRefs(msg));
+ NS_ASSERTION(NS_SUCCEEDED(rv), "nsMsgDBEnumerator broken");
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!applyToFlaggedMessages) {
+ uint32_t flags;
+ (void)msg->GetFlags(&flags);
+ if (flags & nsMsgMessageFlags::Marked) {
+ continue;
+ }
+ }
+
+ // this isn't quite right - we want to prefer unread messages (keep all of
+ // those we can)
+ if (numHdrs > numHeadersToKeep) {
+ numHdrs--;
+ hdrsToDelete.AppendElement(msg);
+ }
+ }
+
+ return NS_OK;
+}
+
+NS_IMPL_ISUPPORTS(nsMsgRetentionSettings, nsIMsgRetentionSettings)
+
+// Initialise the member variables to reasonable defaults.
+nsMsgRetentionSettings::nsMsgRetentionSettings()
+ : m_retainByPreference(1),
+ m_daysToKeepHdrs(0),
+ m_numHeadersToKeep(0),
+ m_useServerDefaults(true),
+ m_cleanupBodiesByDays(false),
+ m_daysToKeepBodies(0),
+ m_applyToFlaggedMessages(false) {}
+
+nsMsgRetentionSettings::~nsMsgRetentionSettings() {}
+
+/* attribute unsigned long retainByPreference */
+
+NS_IMETHODIMP nsMsgRetentionSettings::GetRetainByPreference(
+ nsMsgRetainByPreference* retainByPreference) {
+ NS_ENSURE_ARG_POINTER(retainByPreference);
+ *retainByPreference = m_retainByPreference;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgRetentionSettings::SetRetainByPreference(
+ nsMsgRetainByPreference retainByPreference) {
+ m_retainByPreference = retainByPreference;
+ return NS_OK;
+}
+
+/* attribute long daysToKeepHdrs; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetDaysToKeepHdrs(
+ uint32_t* aDaysToKeepHdrs) {
+ NS_ENSURE_ARG_POINTER(aDaysToKeepHdrs);
+ *aDaysToKeepHdrs = m_daysToKeepHdrs;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgRetentionSettings::SetDaysToKeepHdrs(
+ uint32_t aDaysToKeepHdrs) {
+ m_daysToKeepHdrs = aDaysToKeepHdrs;
+ return NS_OK;
+}
+
+/* attribute long numHeadersToKeep; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetNumHeadersToKeep(
+ uint32_t* aNumHeadersToKeep) {
+ NS_ENSURE_ARG_POINTER(aNumHeadersToKeep);
+ *aNumHeadersToKeep = m_numHeadersToKeep;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgRetentionSettings::SetNumHeadersToKeep(
+ uint32_t aNumHeadersToKeep) {
+ m_numHeadersToKeep = aNumHeadersToKeep;
+ return NS_OK;
+}
+/* attribute boolean useServerDefaults; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetUseServerDefaults(
+ bool* aUseServerDefaults) {
+ NS_ENSURE_ARG_POINTER(aUseServerDefaults);
+ *aUseServerDefaults = m_useServerDefaults;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgRetentionSettings::SetUseServerDefaults(
+ bool aUseServerDefaults) {
+ m_useServerDefaults = aUseServerDefaults;
+ return NS_OK;
+}
+
+/* attribute boolean cleanupBodiesByDays; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetCleanupBodiesByDays(
+ bool* aCleanupBodiesByDays) {
+ NS_ENSURE_ARG_POINTER(aCleanupBodiesByDays);
+ *aCleanupBodiesByDays = m_cleanupBodiesByDays;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgRetentionSettings::SetCleanupBodiesByDays(
+ bool aCleanupBodiesByDays) {
+ m_cleanupBodiesByDays = aCleanupBodiesByDays;
+ return NS_OK;
+}
+
+/* attribute long daysToKeepBodies; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetDaysToKeepBodies(
+ uint32_t* aDaysToKeepBodies) {
+ NS_ENSURE_ARG_POINTER(aDaysToKeepBodies);
+ *aDaysToKeepBodies = m_daysToKeepBodies;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgRetentionSettings::SetDaysToKeepBodies(
+ uint32_t aDaysToKeepBodies) {
+ m_daysToKeepBodies = aDaysToKeepBodies;
+ return NS_OK;
+}
+
+/* attribute boolean applyToFlaggedMessages; */
+NS_IMETHODIMP nsMsgRetentionSettings::GetApplyToFlaggedMessages(
+ bool* aApplyToFlaggedMessages) {
+ NS_ENSURE_ARG_POINTER(aApplyToFlaggedMessages);
+ *aApplyToFlaggedMessages = m_applyToFlaggedMessages;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgRetentionSettings::SetApplyToFlaggedMessages(
+ bool aApplyToFlaggedMessages) {
+ m_applyToFlaggedMessages = aApplyToFlaggedMessages;
+ return NS_OK;
+}
+
+NS_IMPL_ISUPPORTS(nsMsgDownloadSettings, nsIMsgDownloadSettings)
+
+nsMsgDownloadSettings::nsMsgDownloadSettings() {
+ m_useServerDefaults = false;
+ m_downloadUnreadOnly = false;
+ m_downloadByDate = false;
+ m_ageLimitOfMsgsToDownload = 0;
+}
+
+nsMsgDownloadSettings::~nsMsgDownloadSettings() {}
+
+/* attribute boolean useServerDefaults; */
+NS_IMETHODIMP nsMsgDownloadSettings::GetUseServerDefaults(
+ bool* aUseServerDefaults) {
+ NS_ENSURE_ARG_POINTER(aUseServerDefaults);
+ *aUseServerDefaults = m_useServerDefaults;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgDownloadSettings::SetUseServerDefaults(
+ bool aUseServerDefaults) {
+ m_useServerDefaults = aUseServerDefaults;
+ return NS_OK;
+}
+
+/* attribute boolean downloadUnreadOnly; */
+NS_IMETHODIMP nsMsgDownloadSettings::GetDownloadUnreadOnly(
+ bool* aDownloadUnreadOnly) {
+ NS_ENSURE_ARG_POINTER(aDownloadUnreadOnly);
+ *aDownloadUnreadOnly = m_downloadUnreadOnly;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgDownloadSettings::SetDownloadUnreadOnly(
+ bool aDownloadUnreadOnly) {
+ m_downloadUnreadOnly = aDownloadUnreadOnly;
+ return NS_OK;
+}
+
+/* attribute boolean downloadByDate; */
+NS_IMETHODIMP nsMsgDownloadSettings::GetDownloadByDate(bool* aDownloadByDate) {
+ NS_ENSURE_ARG_POINTER(aDownloadByDate);
+ *aDownloadByDate = m_downloadByDate;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDownloadSettings::SetDownloadByDate(bool aDownloadByDate) {
+ m_downloadByDate = aDownloadByDate;
+ return NS_OK;
+}
+
+/* attribute long ageLimitOfMsgsToDownload; */
+NS_IMETHODIMP nsMsgDownloadSettings::GetAgeLimitOfMsgsToDownload(
+ uint32_t* ageLimitOfMsgsToDownload) {
+ NS_ENSURE_ARG_POINTER(ageLimitOfMsgsToDownload);
+ *ageLimitOfMsgsToDownload = m_ageLimitOfMsgsToDownload;
+ return NS_OK;
+}
+NS_IMETHODIMP nsMsgDownloadSettings::SetAgeLimitOfMsgsToDownload(
+ uint32_t ageLimitOfMsgsToDownload) {
+ m_ageLimitOfMsgsToDownload = ageLimitOfMsgsToDownload;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetDefaultViewFlags(
+ nsMsgViewFlagsTypeValue* aDefaultViewFlags) {
+ NS_ENSURE_ARG_POINTER(aDefaultViewFlags);
+ GetIntPref("mailnews.default_view_flags", aDefaultViewFlags);
+ if (*aDefaultViewFlags < nsMsgViewFlagsType::kNone ||
+ *aDefaultViewFlags >
+ (nsMsgViewFlagsType::kThreadedDisplay |
+ nsMsgViewFlagsType::kShowIgnored | nsMsgViewFlagsType::kUnreadOnly |
+ nsMsgViewFlagsType::kExpandAll | nsMsgViewFlagsType::kGroupBySort))
+ *aDefaultViewFlags = nsMsgViewFlagsType::kNone;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetDefaultSortType(
+ nsMsgViewSortTypeValue* aDefaultSortType) {
+ NS_ENSURE_ARG_POINTER(aDefaultSortType);
+ GetIntPref("mailnews.default_sort_type", aDefaultSortType);
+ if (*aDefaultSortType < nsMsgViewSortType::byDate ||
+ *aDefaultSortType > nsMsgViewSortType::byAccount)
+ *aDefaultSortType = nsMsgViewSortType::byDate;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::GetDefaultSortOrder(
+ nsMsgViewSortOrderValue* aDefaultSortOrder) {
+ NS_ENSURE_ARG_POINTER(aDefaultSortOrder);
+ GetIntPref("mailnews.default_sort_order", aDefaultSortOrder);
+ if (*aDefaultSortOrder != nsMsgViewSortOrder::descending)
+ *aDefaultSortOrder = nsMsgViewSortOrder::ascending;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::ResetHdrCacheSize(uint32_t aSize) {
+ if (m_cacheSize > aSize) {
+ m_cacheSize = aSize;
+ ClearHdrCache(false);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::GetNewList(nsTArray<nsMsgKey>& aNewKeys) {
+ aNewKeys = m_newSet.Clone();
+ return NS_OK;
+}
+
+nsresult nsMsgDatabase::GetSearchResultsTable(const nsACString& searchFolderUri,
+ bool createIfMissing,
+ nsIMdbTable** table) {
+ mdb_kind kindToken;
+ mdb_count numTables;
+ mdb_bool mustBeUnique;
+ NS_ENSURE_TRUE(m_mdbStore, NS_ERROR_NULL_POINTER);
+
+ nsresult err = m_mdbStore->StringToToken(
+ GetEnv(), PromiseFlatCString(searchFolderUri).get(), &kindToken);
+ err = m_mdbStore->GetTableKind(GetEnv(), m_hdrRowScopeToken, kindToken,
+ &numTables, &mustBeUnique, table);
+ if ((!*table || NS_FAILED(err)) && createIfMissing)
+ err = m_mdbStore->NewTable(GetEnv(), m_hdrRowScopeToken, kindToken, true,
+ nullptr, table);
+
+ return *table ? err : NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+nsMsgDatabase::GetCachedHits(const nsACString& aSearchFolderUri,
+ nsIMsgEnumerator** aEnumerator) {
+ nsCOMPtr<nsIMdbTable> table;
+ (void)GetSearchResultsTable(aSearchFolderUri, false, getter_AddRefs(table));
+ if (!table) return NS_ERROR_FAILURE; // expected result for no cached hits
+ NS_ADDREF(*aEnumerator =
+ new nsMsgDBEnumerator(this, table, nullptr, nullptr));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDatabase::RefreshCache(const nsACString& aSearchFolderUri,
+ nsTArray<nsMsgKey> const& aNewHits,
+ nsTArray<nsMsgKey>& aStaleHits) {
+ nsCOMPtr<nsIMdbTable> table;
+ nsresult err =
+ GetSearchResultsTable(aSearchFolderUri, true, getter_AddRefs(table));
+ NS_ENSURE_SUCCESS(err, err);
+ // update the table so that it just contains aNewHits.
+ // And, keep track of the headers in the original table but not in aNewHits,
+ // so we can put those in aStaleHits. both aNewHits and the db table are
+ // sorted by uid/key. So, start at the beginning of the table and the aNewHits
+ // array.
+ uint32_t newHitIndex = 0;
+ uint32_t tableRowIndex = 0;
+
+ uint32_t rowCount;
+ table->GetCount(GetEnv(), &rowCount);
+ aStaleHits.Clear();
+
+#ifdef DEBUG
+ for (uint64_t i = 1; i < aNewHits.Length(); i++) {
+ NS_ASSERTION(aNewHits[i - 1] < aNewHits[i],
+ "cached hits for storage not sorted correctly");
+ }
+#endif
+
+ while (newHitIndex < aNewHits.Length() || tableRowIndex < rowCount) {
+ mdbOid oid;
+ nsMsgKey tableRowKey = nsMsgKey_None;
+ if (tableRowIndex < rowCount) {
+ nsresult ret = table->PosToOid(GetEnv(), tableRowIndex, &oid);
+ if (NS_FAILED(ret)) {
+ tableRowIndex++;
+ continue;
+ }
+ tableRowKey =
+ oid.mOid_Id; // ### TODO need the real key for the 0th key problem.
+ }
+
+ if (newHitIndex < aNewHits.Length() &&
+ aNewHits[newHitIndex] == tableRowKey) {
+ newHitIndex++;
+ tableRowIndex++;
+ continue;
+ } else if (tableRowIndex >= rowCount ||
+ (newHitIndex < aNewHits.Length() &&
+ aNewHits[newHitIndex] < tableRowKey)) {
+ nsCOMPtr<nsIMdbRow> hdrRow;
+ mdbOid rowObjectId;
+
+ rowObjectId.mOid_Id = aNewHits[newHitIndex];
+ rowObjectId.mOid_Scope = m_hdrRowScopeToken;
+ err = m_mdbStore->GetRow(GetEnv(), &rowObjectId, getter_AddRefs(hdrRow));
+ if (hdrRow) {
+ table->AddRow(GetEnv(), hdrRow);
+ mdb_pos newPos;
+ table->MoveRow(GetEnv(), hdrRow, rowCount, tableRowIndex, &newPos);
+ rowCount++;
+ tableRowIndex++;
+ }
+ newHitIndex++;
+ continue;
+ } else if (newHitIndex >= aNewHits.Length() ||
+ aNewHits[newHitIndex] > tableRowKey) {
+ aStaleHits.AppendElement(tableRowKey);
+ table->CutOid(GetEnv(), &oid);
+ rowCount--;
+ continue; // don't increment tableRowIndex since we removed that row.
+ }
+ }
+
+#ifdef DEBUG_David_Bienvenu
+ printf("after refreshing cache\n");
+ // iterate over table and assert that it's in id order
+ table->GetCount(GetEnv(), &rowCount);
+ mdbOid oid;
+ tableRowIndex = 0;
+ mdb_id prevId = 0;
+ while (tableRowIndex < rowCount) {
+ nsresult ret = table->PosToOid(m_mdbEnv, tableRowIndex++, &oid);
+ if (tableRowIndex > 1 && oid.mOid_Id <= prevId) {
+ NS_ASSERTION(
+ false, "inserting row into cached hits table, not sorted correctly");
+ printf("key %lx is before or equal %lx\n", prevId, oid.mOid_Id);
+ }
+ prevId = oid.mOid_Id;
+ }
+
+#endif
+ Commit(nsMsgDBCommitType::kLargeCommit);
+ return NS_OK;
+}
+
+// search sorted table
+mdb_pos nsMsgDatabase::FindInsertIndexInSortedTable(nsIMdbTable* table,
+ mdb_id idToInsert) {
+ mdb_pos searchPos = 0;
+ uint32_t rowCount;
+ table->GetCount(GetEnv(), &rowCount);
+ mdb_pos hi = rowCount;
+ mdb_pos lo = 0;
+
+ while (hi > lo) {
+ mdbOid outOid;
+ searchPos = (lo + hi - 1) / 2;
+ table->PosToOid(GetEnv(), searchPos, &outOid);
+ if (outOid.mOid_Id == idToInsert) {
+ NS_ASSERTION(false, "id shouldn't be in table");
+ return hi;
+ }
+ if (outOid.mOid_Id > idToInsert)
+ hi = searchPos;
+ else // if (outOid.mOid_Id < idToInsert)
+ lo = searchPos + 1;
+ }
+ return hi;
+}
+NS_IMETHODIMP
+nsMsgDatabase::UpdateHdrInCache(const nsACString& aSearchFolderUri,
+ nsIMsgDBHdr* aHdr, bool aAdd) {
+ nsCOMPtr<nsIMdbTable> table;
+ nsresult err =
+ GetSearchResultsTable(aSearchFolderUri, true, getter_AddRefs(table));
+ NS_ENSURE_SUCCESS(err, err);
+ nsMsgKey key;
+ err = aHdr->GetMessageKey(&key);
+ nsMsgHdr* msgHdr =
+ static_cast<nsMsgHdr*>(aHdr); // closed system, so this is ok
+ nsIMdbRow* hdrRow = msgHdr->GetMDBRow();
+ if (NS_SUCCEEDED(err) && m_mdbStore && hdrRow) {
+ if (!aAdd) {
+ table->CutRow(m_mdbEnv, hdrRow);
+ } else {
+ mdbOid rowId;
+ hdrRow->GetOid(m_mdbEnv, &rowId);
+ mdb_pos insertPos = FindInsertIndexInSortedTable(table, rowId.mOid_Id);
+ uint32_t rowCount;
+ table->GetCount(m_mdbEnv, &rowCount);
+ table->AddRow(m_mdbEnv, hdrRow);
+ mdb_pos newPos;
+ table->MoveRow(m_mdbEnv, hdrRow, rowCount, insertPos, &newPos);
+ }
+ }
+
+ // if (aAdd)
+ // if we need to add this hdr, we need to insert it in key order.
+ return NS_OK;
+}
+NS_IMETHODIMP
+nsMsgDatabase::HdrIsInCache(const nsACString& aSearchFolderUri,
+ nsIMsgDBHdr* aHdr, bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ nsCOMPtr<nsIMdbTable> table;
+ nsresult err =
+ GetSearchResultsTable(aSearchFolderUri, true, getter_AddRefs(table));
+ NS_ENSURE_SUCCESS(err, err);
+ nsMsgKey key;
+ aHdr->GetMessageKey(&key);
+ mdbOid rowObjectId;
+ rowObjectId.mOid_Id = key;
+ rowObjectId.mOid_Scope = m_hdrRowScopeToken;
+ mdb_bool hasOid;
+ err = table->HasOid(GetEnv(), &rowObjectId, &hasOid);
+ *aResult = hasOid;
+ return err;
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.cpp b/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.cpp
new file mode 100644
index 0000000000..a64ef46f57
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.cpp
@@ -0,0 +1,317 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsMsgDatabaseEnumerators.h"
+#include "nsMsgDatabase.h"
+#include "nsIMsgHdr.h"
+#include "nsMsgThread.h"
+
+/*
+ * nsMsgDBEnumerator implementation
+ */
+
+nsMsgDBEnumerator::nsMsgDBEnumerator(nsMsgDatabase* db, nsIMdbTable* table,
+ nsMsgDBEnumeratorFilter filter,
+ void* closure, bool iterateForwards)
+ : mDB(db),
+ mDone(false),
+ mIterateForwards(iterateForwards),
+ mFilter(filter),
+ mClosure(closure) {
+ mTable = table;
+ mRowPos = 0;
+ mDB->m_msgEnumerators.AppendElement(this);
+}
+
+nsMsgDBEnumerator::~nsMsgDBEnumerator() { Invalidate(); }
+
+void nsMsgDBEnumerator::Invalidate() {
+ // Order is important here. If the database is destroyed first, releasing
+ // the cursor will crash (due, I think, to a disconnect between XPCOM and
+ // Mork internal memory management).
+ mRowCursor = nullptr;
+ mTable = nullptr;
+ mResultHdr = nullptr;
+ mDone = true;
+ if (mDB) {
+ mDB->m_msgEnumerators.RemoveElement(this);
+ mDB = nullptr;
+ }
+}
+
+nsresult nsMsgDBEnumerator::GetRowCursor() {
+ mDone = false;
+
+ if (!mDB || !mTable) return NS_ERROR_NULL_POINTER;
+
+ if (mIterateForwards) {
+ mRowPos = -1;
+ } else {
+ mdb_count numRows;
+ mTable->GetCount(mDB->GetEnv(), &numRows);
+ mRowPos = numRows; // startPos is 0 relative.
+ }
+ return mTable->GetTableRowCursor(mDB->GetEnv(), mRowPos,
+ getter_AddRefs(mRowCursor));
+}
+
+NS_IMETHODIMP nsMsgDBEnumerator::GetNext(nsIMsgDBHdr** aItem) {
+ if (!aItem) return NS_ERROR_NULL_POINTER;
+ *aItem = nullptr;
+
+ // If we've already got one ready, return it.
+ if (mResultHdr) {
+ mResultHdr.forget(aItem);
+ return NS_OK;
+ }
+
+ // Bail out if enumerator has been invalidated.
+ if (!mDB) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv = InternalGetNext(aItem);
+ NS_ENSURE_SUCCESS(rv, rv);
+ return *aItem ? NS_OK : NS_ERROR_FAILURE;
+}
+
+nsresult nsMsgDBEnumerator::InternalGetNext(nsIMsgDBHdr** nextHdr) {
+ nsresult rv;
+
+ *nextHdr = nullptr;
+
+ if (!mRowCursor) {
+ rv = GetRowCursor();
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ while (true) {
+ nsIMdbRow* hdrRow;
+ if (mIterateForwards) {
+ rv = mRowCursor->NextRow(mDB->GetEnv(), &hdrRow, &mRowPos);
+ } else {
+ rv = mRowCursor->PrevRow(mDB->GetEnv(), &hdrRow, &mRowPos);
+ }
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!hdrRow) {
+ // No more rows, so we're done.
+ *nextHdr = nullptr;
+ return NS_OK;
+ }
+
+ // Get key from row
+ mdbOid outOid;
+ nsMsgKey key = nsMsgKey_None;
+ rv = hdrRow->GetOid(mDB->GetEnv(), &outOid);
+ NS_ENSURE_SUCCESS(rv, rv);
+ key = outOid.mOid_Id;
+
+ nsCOMPtr<nsIMsgDBHdr> hdr;
+ rv = mDB->CreateMsgHdr(hdrRow, key, getter_AddRefs(hdr));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Ignore expunged messages.
+ uint32_t flags;
+ hdr->GetFlags(&flags);
+ if (flags & nsMsgMessageFlags::Expunged) {
+ continue;
+ }
+
+ // Ignore anything which doesn't pass the filter func (if there is one).
+ if (mFilter && NS_FAILED(mFilter(hdr, mClosure))) {
+ continue;
+ }
+
+ // If we get this far, we've found it.
+ hdr.forget(nextHdr);
+ return NS_OK;
+ }
+}
+
+NS_IMETHODIMP nsMsgDBEnumerator::HasMoreElements(bool* aResult) {
+ if (!aResult) return NS_ERROR_NULL_POINTER;
+
+ if (!mResultHdr) {
+ // Bail out if enumerator has been invalidated.
+ if (!mDB) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv = InternalGetNext(getter_AddRefs(mResultHdr));
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!mResultHdr) {
+ mDone = true;
+ }
+ }
+
+ *aResult = !mDone;
+ return NS_OK;
+}
+
+/*
+ * nsMsgFilteredDBEnumerator implementation
+ */
+
+nsMsgFilteredDBEnumerator::nsMsgFilteredDBEnumerator(nsMsgDatabase* db,
+ nsIMdbTable* table,
+ bool reverse)
+ : nsMsgDBEnumerator(db, table, nullptr, nullptr, !reverse) {}
+
+nsMsgFilteredDBEnumerator::~nsMsgFilteredDBEnumerator() {}
+
+/**
+ * Create the search session for the enumerator,
+ * add the scope term for "folder" to the search session, and add the search
+ * terms in the array to the search session.
+ */
+nsresult nsMsgFilteredDBEnumerator::InitSearchSession(
+ const nsTArray<RefPtr<nsIMsgSearchTerm>>& searchTerms,
+ nsIMsgFolder* folder) {
+ nsresult rv;
+ m_searchSession =
+ do_CreateInstance("@mozilla.org/messenger/searchSession;1", &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ m_searchSession->AddScopeTerm(nsMsgSearchScope::offlineMail, folder);
+ for (auto searchTerm : searchTerms) {
+ m_searchSession->AppendTerm(searchTerm);
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgFilteredDBEnumerator::InternalGetNext(nsIMsgDBHdr** nextHdr) {
+ nsCOMPtr<nsIMsgDBHdr> hdr;
+ while (true) {
+ nsresult rv = nsMsgDBEnumerator::InternalGetNext(getter_AddRefs(hdr));
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!hdr) {
+ break; // No more.
+ }
+ bool matches;
+ rv = m_searchSession->MatchHdr(hdr, mDB, &matches);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (matches) {
+ break; // Found one!
+ }
+ }
+ hdr.forget(nextHdr);
+ return NS_OK;
+}
+
+/*
+ * nsMsgDBThreadEnumerator implementation
+ */
+
+nsMsgDBThreadEnumerator::nsMsgDBThreadEnumerator(
+ nsMsgDatabase* db, nsMsgDBThreadEnumeratorFilter filter)
+ : mDB(db),
+ mTableCursor(nullptr),
+ mResultThread(nullptr),
+ mDone(false),
+ mFilter(filter) {
+ mDB->m_threadEnumerators.AppendElement(this);
+ mNextPrefetched = false;
+}
+
+nsMsgDBThreadEnumerator::~nsMsgDBThreadEnumerator() { Invalidate(); }
+
+void nsMsgDBThreadEnumerator::Invalidate() {
+ // Order is important here. If the database is destroyed first, releasing
+ // the cursor will crash (due, I think, to a disconnect between XPCOM and
+ // Mork internal memory management).
+ mTableCursor = nullptr;
+ mResultThread = nullptr;
+ mDone = true;
+ if (mDB) {
+ mDB->m_threadEnumerators.RemoveElement(this);
+ mDB = nullptr;
+ }
+}
+
+nsresult nsMsgDBThreadEnumerator::GetTableCursor(void) {
+ nsresult rv = NS_OK;
+
+ // DB might have disappeared.
+ if (!mDB || !mDB->m_mdbStore) return NS_ERROR_NULL_POINTER;
+ if (NS_FAILED(rv)) return rv;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDBThreadEnumerator::HasMoreElements(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+
+ if (!mNextPrefetched) {
+ PrefetchNext();
+ }
+ *aResult = !mDone;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgDBThreadEnumerator::GetNext(nsIMsgThread** aItem) {
+ NS_ENSURE_ARG_POINTER(aItem);
+
+ *aItem = nullptr;
+ nsresult rv = NS_OK;
+ if (!mNextPrefetched) rv = PrefetchNext();
+ if (NS_SUCCEEDED(rv)) {
+ if (mResultThread) {
+ NS_ADDREF(*aItem = mResultThread);
+ mNextPrefetched = false;
+ }
+ }
+ return rv;
+}
+
+nsresult nsMsgDBThreadEnumerator::PrefetchNext() {
+ nsresult rv;
+
+ // DB might have disappeared.
+ if (!mDB || !mDB->m_mdbStore) {
+ return NS_ERROR_NULL_POINTER;
+ }
+
+ if (!mTableCursor) {
+ rv = mDB->m_mdbStore->GetPortTableCursor(
+ mDB->GetEnv(), mDB->m_hdrRowScopeToken, mDB->m_threadTableKindToken,
+ getter_AddRefs(mTableCursor));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ nsCOMPtr<nsIMdbTable> table;
+ while (true) {
+ mResultThread = nullptr;
+ rv = mTableCursor->NextTable(mDB->GetEnv(), getter_AddRefs(table));
+ if (!table) {
+ mDone = true;
+ return NS_ERROR_FAILURE;
+ }
+ if (NS_FAILED(rv)) {
+ mDone = true;
+ return rv;
+ }
+
+ mdbOid tableId;
+ table->GetOid(mDB->GetEnv(), &tableId);
+
+ mResultThread = mDB->FindExistingThread(tableId.mOid_Id);
+ if (!mResultThread) mResultThread = new nsMsgThread(mDB, table);
+
+ if (mResultThread) {
+ uint32_t numChildren = 0;
+ mResultThread->GetNumChildren(&numChildren);
+ // we've got empty thread; don't tell caller about it.
+ if (numChildren == 0) continue;
+ }
+ if (mFilter && NS_FAILED(mFilter(mResultThread)))
+ continue;
+ else
+ break;
+ }
+ if (mResultThread) {
+ mNextPrefetched = true;
+ return NS_OK;
+ }
+ return NS_ERROR_FAILURE;
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.h b/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.h
new file mode 100644
index 0000000000..f68b084611
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgDatabaseEnumerators.h
@@ -0,0 +1,133 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsMsgDatabaseEnumerators_H_
+#define _nsMsgDatabaseEnumerators_H_
+
+/*
+ * This file provides some enumerator classes, private to nsMsgDatabase.
+ * The outside world would only ever see these as nsIMsgEnumerator or
+ * nsIMsgThreadEnumerator.
+ *
+ * These enumerators automatically register themselves with the nsMsgDatabase
+ * during construction/destruction. This lets the database track all
+ * outstanding enumerators, so they can be invalidated if the database is
+ * destroyed or ForceClosed().
+ * Due to this lifecycle coupling, we try to avoid using refcounted pointers
+ * here, as we don't want outstanding enumerators to lock an otherwise unused
+ * database in existence.
+ */
+
+#include "nsMsgEnumerator.h"
+#include "nsCOMPtr.h"
+#include "nsTArray.h"
+#include "mdb.h"
+#include "nsIDBChangeListener.h"
+
+#include "nsIMsgSearchTerm.h"
+#include "nsIMsgSearchSession.h"
+
+class nsMsgDatabase;
+class nsIMdbTable;
+class nsIMdbTableRowCursor;
+class nsIMsgFolder;
+
+/**
+ * Enumerates over messages, forwards or backward, with an optional filter fn.
+ */
+class nsMsgDBEnumerator : public nsBaseMsgEnumerator {
+ public:
+ // nsIMsgEnumerator support.
+ NS_IMETHOD GetNext(nsIMsgDBHdr** aItem) override;
+ NS_IMETHOD HasMoreElements(bool* aResult) override;
+
+ // Function type for filtering which messages are enumerated.
+ typedef nsresult (*nsMsgDBEnumeratorFilter)(nsIMsgDBHdr* hdr, void* closure);
+
+ nsMsgDBEnumerator(nsMsgDatabase* db, nsIMdbTable* table,
+ nsMsgDBEnumeratorFilter filter, void* closure,
+ bool iterateForwards = true);
+ // Called by db when no longer valid (db is being destroyed or ForcedClosed).
+ void Invalidate();
+
+ protected:
+ // internals
+ nsresult GetRowCursor();
+
+ // Returns next message or nullptr if none more.
+ virtual nsresult InternalGetNext(nsIMsgDBHdr** nextHdr);
+
+ // Our source DB. Not refcounted, because we don't want to lock the DB
+ // in existence. The enumerator is registered with the DB, and the DB will
+ // call Invalidate() if it is destroyed or ForceClosed().
+ nsMsgDatabase* mDB;
+ nsCOMPtr<nsIMdbTableRowCursor> mRowCursor;
+ mdb_pos mRowPos;
+ nsCOMPtr<nsIMsgDBHdr> mResultHdr;
+ bool mDone;
+ bool mIterateForwards;
+ nsMsgDBEnumeratorFilter mFilter;
+ nsIMdbTable* mTable;
+ void* mClosure;
+
+ virtual ~nsMsgDBEnumerator() override;
+};
+
+/**
+ * Enumerate over messages which match the given search terms.
+ */
+class nsMsgFilteredDBEnumerator : public nsMsgDBEnumerator {
+ public:
+ nsMsgFilteredDBEnumerator(nsMsgDatabase* db, nsIMdbTable* table,
+ bool reverse);
+ virtual ~nsMsgFilteredDBEnumerator() override;
+ nsresult InitSearchSession(
+ const nsTArray<RefPtr<nsIMsgSearchTerm>>& searchTerms,
+ nsIMsgFolder* folder);
+
+ protected:
+ virtual nsresult InternalGetNext(nsIMsgDBHdr** nextHdr) override;
+
+ nsCOMPtr<nsIMsgSearchSession> m_searchSession;
+};
+
+/**
+ * Helper class for fetching message threads from a database.
+ * This enumerator automatically registers itself with the nsMsgDatabase.
+ * If the DB is destroyed or ForceClosed() it will call the enumerators
+ * Invalidate() method.
+ */
+class nsMsgDBThreadEnumerator : public nsBaseMsgThreadEnumerator {
+ public:
+ // nsIMsgThreadEnumerator support.
+ NS_IMETHOD GetNext(nsIMsgThread** aItem) override;
+ NS_IMETHOD HasMoreElements(bool* aResult) override;
+
+ // Function type for filtering threads that appear in the enumeration.
+ typedef nsresult (*nsMsgDBThreadEnumeratorFilter)(nsIMsgThread* thread);
+
+ nsMsgDBThreadEnumerator(nsMsgDatabase* db,
+ nsMsgDBThreadEnumeratorFilter filter);
+
+ // Called by DB when being destroyed or ForcedClosed.
+ void Invalidate();
+
+ protected:
+ virtual ~nsMsgDBThreadEnumerator();
+ nsresult GetTableCursor(void);
+ nsresult PrefetchNext();
+
+ // Our source DB. Not refcounted, because we don't want to lock the DB
+ // in existence. The enumerator is registered with the DB, and the DB will
+ // call Invalidate() if it is destroyed or ForceClosed().
+ nsMsgDatabase* mDB;
+ nsCOMPtr<nsIMdbPortTableCursor> mTableCursor;
+ RefPtr<nsIMsgThread> mResultThread;
+ bool mDone;
+ bool mNextPrefetched;
+ nsMsgDBThreadEnumeratorFilter mFilter;
+};
+
+#endif // _nsMsgDatabaseEnumerators_H_
diff --git a/comm/mailnews/db/msgdb/src/nsMsgHdr.cpp b/comm/mailnews/db/msgdb/src/nsMsgHdr.cpp
new file mode 100644
index 0000000000..c3d36d9b9b
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgHdr.cpp
@@ -0,0 +1,936 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "mozilla/mailnews/MimeHeaderParser.h"
+#include "nsMsgHdr.h"
+#include "nsMsgDatabase.h"
+#include "nsMsgUtils.h"
+#include "nsMsgMessageFlags.h"
+#include "nsIMsgThread.h"
+#include "mozilla/Attributes.h"
+#include "nsStringEnumerator.h"
+#ifdef DEBUG
+# include "nsPrintfCString.h"
+#endif
+using namespace mozilla::mailnews;
+
+NS_IMPL_ISUPPORTS(nsMsgHdr, nsIMsgDBHdr)
+
+#define FLAGS_INITED 0x1
+#define CACHED_VALUES_INITED 0x2
+#define REFERENCES_INITED 0x4
+#define THREAD_PARENT_INITED 0x8
+
+nsMsgHdr::nsMsgHdr(nsMsgDatabase* db, nsIMdbRow* dbRow) {
+ m_mdb = db;
+ Init();
+ m_mdbRow = dbRow;
+ if (m_mdb) {
+ NS_ADDREF(m_mdb); // Released in DTOR.
+ mdbOid outOid;
+ if (dbRow && NS_SUCCEEDED(dbRow->GetOid(m_mdb->GetEnv(), &outOid))) {
+ m_messageKey = outOid.mOid_Id;
+ m_mdb->AddHdrToUseCache((nsIMsgDBHdr*)this, m_messageKey);
+ }
+ }
+}
+
+void nsMsgHdr::Init() {
+ m_initedValues = 0;
+ m_messageKey = nsMsgKey_None;
+ m_messageSize = 0;
+ m_date = 0;
+ m_flags = 0;
+ m_mdbRow = NULL;
+ m_threadId = nsMsgKey_None;
+ m_threadParent = nsMsgKey_None;
+}
+
+nsresult nsMsgHdr::InitCachedValues() {
+ nsresult err = NS_OK;
+
+ if (!m_mdb || !m_mdbRow) return NS_ERROR_NULL_POINTER;
+
+ if (!(m_initedValues & CACHED_VALUES_INITED)) {
+ uint32_t uint32Value;
+ mdbOid outOid;
+ if (NS_SUCCEEDED(m_mdbRow->GetOid(m_mdb->GetEnv(), &outOid)))
+ m_messageKey = outOid.mOid_Id;
+
+ err = GetUInt32Column(m_mdb->m_messageSizeColumnToken, &m_messageSize);
+
+ err = GetUInt32Column(m_mdb->m_dateColumnToken, &uint32Value);
+ Seconds2PRTime(uint32Value, &m_date);
+
+ err = GetUInt32Column(m_mdb->m_messageThreadIdColumnToken, &m_threadId);
+
+ if (NS_SUCCEEDED(err)) m_initedValues |= CACHED_VALUES_INITED;
+ }
+ return err;
+}
+
+nsresult nsMsgHdr::InitFlags() {
+ nsresult err = NS_OK;
+
+ if (!m_mdb) return NS_ERROR_NULL_POINTER;
+
+ if (!(m_initedValues & FLAGS_INITED)) {
+ err = GetUInt32Column(m_mdb->m_flagsColumnToken, &m_flags);
+ m_flags &= ~nsMsgMessageFlags::New; // don't get new flag from MDB
+
+ if (NS_SUCCEEDED(err)) m_initedValues |= FLAGS_INITED;
+ }
+
+ return err;
+}
+
+nsMsgHdr::~nsMsgHdr() {
+ if (m_mdbRow) {
+ if (m_mdb) {
+ NS_RELEASE(m_mdbRow);
+ m_mdb->RemoveHdrFromUseCache((nsIMsgDBHdr*)this, m_messageKey);
+ }
+ }
+ NS_IF_RELEASE(m_mdb);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMessageKey(nsMsgKey* result) {
+ if (m_messageKey == nsMsgKey_None && m_mdbRow != NULL) {
+ mdbOid outOid;
+ if (NS_SUCCEEDED(m_mdbRow->GetOid(m_mdb->GetEnv(), &outOid)))
+ m_messageKey = outOid.mOid_Id;
+ }
+ *result = m_messageKey;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetThreadId(nsMsgKey* result) {
+ if (!(m_initedValues & CACHED_VALUES_INITED)) InitCachedValues();
+
+ if (result) {
+ *result = m_threadId;
+ return NS_OK;
+ }
+ return NS_ERROR_NULL_POINTER;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetThreadId(nsMsgKey inKey) {
+ m_threadId = inKey;
+ return SetUInt32Column(m_threadId, m_mdb->m_messageThreadIdColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetMessageKey(nsMsgKey value) {
+ m_messageKey = value;
+ return NS_OK;
+}
+
+nsresult nsMsgHdr::GetRawFlags(uint32_t* result) {
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ *result = m_flags;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetFlags(uint32_t* result) {
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ if (m_mdb)
+ *result = m_mdb->GetStatusFlags(this, m_flags);
+ else
+ *result = m_flags;
+#ifdef DEBUG_bienvenu
+ NS_ASSERTION(!(*result & (nsMsgMessageFlags::Elided)),
+ "shouldn't be set in db");
+#endif
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetFlags(uint32_t flags) {
+#ifdef DEBUG_bienvenu
+ NS_ASSERTION(!(flags & (nsMsgMessageFlags::Elided)),
+ "shouldn't set this flag on db");
+#endif
+ m_initedValues |= FLAGS_INITED;
+ m_flags = flags;
+ // don't write out nsMsgMessageFlags::New to MDB.
+ return SetUInt32Column(m_flags & ~nsMsgMessageFlags::New,
+ m_mdb->m_flagsColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::OrFlags(uint32_t flags, uint32_t* result) {
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ if ((m_flags & flags) != flags) SetFlags(m_flags | flags);
+ *result = m_flags;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::AndFlags(uint32_t flags, uint32_t* result) {
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ if ((m_flags & flags) != m_flags) SetFlags(m_flags & flags);
+ *result = m_flags;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::MarkHasAttachments(bool bHasAttachments) {
+ nsresult rv = NS_OK;
+
+ if (m_mdb) {
+ nsMsgKey key;
+ rv = GetMessageKey(&key);
+ if (NS_SUCCEEDED(rv))
+ rv = m_mdb->MarkHasAttachments(key, bHasAttachments, nullptr);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgHdr::MarkRead(bool bRead) {
+ nsresult rv = NS_OK;
+
+ if (m_mdb) {
+ nsMsgKey key;
+ rv = GetMessageKey(&key);
+ if (NS_SUCCEEDED(rv)) rv = m_mdb->MarkRead(key, bRead, nullptr);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgHdr::MarkFlagged(bool bFlagged) {
+ nsresult rv = NS_OK;
+
+ if (m_mdb) {
+ nsMsgKey key;
+ rv = GetMessageKey(&key);
+ if (NS_SUCCEEDED(rv)) rv = m_mdb->MarkMarked(key, bFlagged, nullptr);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetStringProperty(const char* propertyName,
+ const nsACString& propertyValue) {
+ NS_ENSURE_ARG_POINTER(propertyName);
+ if (!m_mdb || !m_mdbRow) return NS_ERROR_NULL_POINTER;
+ return m_mdb->SetProperty(m_mdbRow, propertyName,
+ PromiseFlatCString(propertyValue).get());
+}
+
+NS_IMETHODIMP nsMsgHdr::GetStringProperty(const char* propertyName,
+ nsACString& aPropertyValue) {
+ NS_ENSURE_ARG_POINTER(propertyName);
+ if (!m_mdb || !m_mdbRow) return NS_ERROR_NULL_POINTER;
+ return m_mdb->GetProperty(m_mdbRow, propertyName,
+ getter_Copies(aPropertyValue));
+}
+
+NS_IMETHODIMP nsMsgHdr::GetUint32Property(const char* propertyName,
+ uint32_t* pResult) {
+ NS_ENSURE_ARG_POINTER(propertyName);
+ if (!m_mdb || !m_mdbRow) return NS_ERROR_NULL_POINTER;
+ return m_mdb->GetUint32Property(m_mdbRow, propertyName, pResult);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetUint32Property(const char* propertyName,
+ uint32_t value) {
+ NS_ENSURE_ARG_POINTER(propertyName);
+ if (!m_mdb || !m_mdbRow) return NS_ERROR_NULL_POINTER;
+ return m_mdb->SetUint32Property(m_mdbRow, propertyName, value);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetNumReferences(uint16_t* result) {
+ if (!(m_initedValues & REFERENCES_INITED)) {
+ const char* references;
+ if (NS_SUCCEEDED(m_mdb->RowCellColumnToConstCharPtr(
+ GetMDBRow(), m_mdb->m_referencesColumnToken, &references)))
+ ParseReferences(references);
+ m_initedValues |= REFERENCES_INITED;
+ }
+
+ if (result) *result = m_references.Length();
+ // there is no real failure here; if there are no references, there are no
+ // references.
+ return NS_OK;
+}
+
+nsresult nsMsgHdr::ParseReferences(const char* references) {
+ const char* startNextRef = references;
+ nsAutoCString resultReference;
+ nsCString messageId;
+ GetMessageId(getter_Copies(messageId));
+
+ while (startNextRef && *startNextRef) {
+ startNextRef = GetNextReference(startNextRef, resultReference,
+ startNextRef == references);
+ // Don't add self-references.
+ if (!resultReference.IsEmpty() && !resultReference.Equals(messageId))
+ m_references.AppendElement(resultReference);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetStringReference(int32_t refNum,
+ nsACString& resultReference) {
+ nsresult err = NS_OK;
+
+ if (!(m_initedValues & REFERENCES_INITED))
+ GetNumReferences(nullptr); // it can handle the null
+
+ if ((uint32_t)refNum < m_references.Length())
+ resultReference = m_references.ElementAt(refNum);
+ else
+ err = NS_ERROR_ILLEGAL_VALUE;
+ return err;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetDate(PRTime* result) {
+ if (!(m_initedValues & CACHED_VALUES_INITED)) InitCachedValues();
+
+ *result = m_date;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetDateInSeconds(uint32_t* aResult) {
+ return GetUInt32Column(m_mdb->m_dateColumnToken, aResult);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetMessageId(const char* messageId) {
+ if (messageId && *messageId == '<') {
+ nsAutoCString tempMessageID(messageId + 1);
+ if (tempMessageID.CharAt(tempMessageID.Length() - 1) == '>')
+ tempMessageID.SetLength(tempMessageID.Length() - 1);
+ return SetStringColumn(tempMessageID.get(), m_mdb->m_messageIdColumnToken);
+ }
+ return SetStringColumn(messageId, m_mdb->m_messageIdColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetSubject(const nsACString& subject) {
+ return SetStringColumn(PromiseFlatCString(subject).get(),
+ m_mdb->m_subjectColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetAuthor(const char* author) {
+ return SetStringColumn(author, m_mdb->m_senderColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetReferences(const nsACString& references) {
+ m_references.Clear();
+ ParseReferences(PromiseFlatCString(references).get());
+
+ m_initedValues |= REFERENCES_INITED;
+
+ return SetStringColumn(PromiseFlatCString(references).get(),
+ m_mdb->m_referencesColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetRecipients(const char* recipients) {
+ // need to put in rfc822 address parsing code here (or make caller do it...)
+ return SetStringColumn(recipients, m_mdb->m_recipientsColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetCcList(const char* ccList) {
+ return SetStringColumn(ccList, m_mdb->m_ccListColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetBccList(const char* bccList) {
+ return SetStringColumn(bccList, m_mdb->m_bccListColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetMessageSize(uint32_t messageSize) {
+ SetUInt32Column(messageSize, m_mdb->m_messageSizeColumnToken);
+ m_messageSize = messageSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetOfflineMessageSize(uint32_t* result) {
+ uint32_t size;
+ nsresult res = GetUInt32Column(m_mdb->m_offlineMessageSizeColumnToken, &size);
+
+ *result = size;
+ return res;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetOfflineMessageSize(uint32_t messageSize) {
+ return SetUInt32Column(messageSize, m_mdb->m_offlineMessageSizeColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetLineCount(uint32_t lineCount) {
+ SetUInt32Column(lineCount, m_mdb->m_numLinesColumnToken);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetDate(PRTime date) {
+ m_date = date;
+ uint32_t seconds;
+ PRTime2Seconds(date, &seconds);
+ return SetUInt32Column((uint32_t)seconds, m_mdb->m_dateColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetPriority(nsMsgPriorityValue priority) {
+ SetUInt32Column((uint32_t)priority, m_mdb->m_priorityColumnToken);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetPriority(nsMsgPriorityValue* result) {
+ if (!result) return NS_ERROR_NULL_POINTER;
+
+ uint32_t priority = 0;
+ nsresult rv = GetUInt32Column(m_mdb->m_priorityColumnToken, &priority);
+ if (NS_FAILED(rv)) return rv;
+
+ *result = (nsMsgPriorityValue)priority;
+ return NS_OK;
+}
+
+// I'd like to not store the account key, if the msg is in
+// the same account as it was received in, to save disk space and memory.
+// This might be problematic when a message gets moved...
+// And I'm not sure if we should short circuit it here,
+// or at a higher level where it might be more efficient.
+NS_IMETHODIMP nsMsgHdr::SetAccountKey(const char* aAccountKey) {
+ return SetStringProperty("account", nsDependentCString(aAccountKey));
+}
+
+NS_IMETHODIMP nsMsgHdr::GetAccountKey(char** aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+
+ nsCString key;
+ nsresult rv = GetStringProperty("account", key);
+ NS_ENSURE_SUCCESS(rv, rv);
+ *aResult = ToNewCString(key);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMessageOffset(uint64_t* result) {
+ NS_ENSURE_ARG(result);
+
+ (void)GetUInt64Column(m_mdb->m_offlineMsgOffsetColumnToken, result,
+ (unsigned)-1);
+ if (*result == (unsigned)-1) {
+ // It's unset. Unfortunately there's not much we can do here. There's
+ // a lot of code which relies on being able to read .messageOffset,
+ // even if it doesn't require it to return anything sensible.
+ // (For example - in js unit tests - Assert.equals() stringifies the
+ // attributes of it's expected/actual values to produce an error
+ // message even if the assert passes).
+#ifdef DEBUG
+ nsCString tok;
+ GetStringProperty("storeToken", tok);
+ nsPrintfCString err("Missing .messageOffset (key=%u, storeToken='%s')",
+ m_messageKey, tok.get());
+ NS_WARNING(err.get());
+#endif
+ // Return something obviously broken.
+ *result = 12345678;
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::SetMessageOffset(uint64_t offset) {
+ SetUInt64Column(offset, m_mdb->m_offlineMsgOffsetColumnToken);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMessageSize(uint32_t* result) {
+ uint32_t size;
+ nsresult res = GetUInt32Column(m_mdb->m_messageSizeColumnToken, &size);
+
+ *result = size;
+ return res;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetLineCount(uint32_t* result) {
+ uint32_t linecount;
+ nsresult res = GetUInt32Column(m_mdb->m_numLinesColumnToken, &linecount);
+ *result = linecount;
+ return res;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetAuthor(char** resultAuthor) {
+ return m_mdb->RowCellColumnToCharPtr(GetMDBRow(), m_mdb->m_senderColumnToken,
+ resultAuthor);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetSubject(nsACString& resultSubject) {
+ return m_mdb->RowCellColumnToCharPtr(GetMDBRow(), m_mdb->m_subjectColumnToken,
+ getter_Copies(resultSubject));
+}
+
+NS_IMETHODIMP nsMsgHdr::GetRecipients(char** resultRecipients) {
+ return m_mdb->RowCellColumnToCharPtr(
+ GetMDBRow(), m_mdb->m_recipientsColumnToken, resultRecipients);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetCcList(char** resultCCList) {
+ return m_mdb->RowCellColumnToCharPtr(GetMDBRow(), m_mdb->m_ccListColumnToken,
+ resultCCList);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetBccList(char** resultBCCList) {
+ return m_mdb->RowCellColumnToCharPtr(GetMDBRow(), m_mdb->m_bccListColumnToken,
+ resultBCCList);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMessageId(char** resultMessageId) {
+ return m_mdb->RowCellColumnToCharPtr(
+ GetMDBRow(), m_mdb->m_messageIdColumnToken, resultMessageId);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMime2DecodedAuthor(nsAString& resultAuthor) {
+ return m_mdb->RowCellColumnToMime2DecodedString(
+ GetMDBRow(), m_mdb->m_senderColumnToken, resultAuthor);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMime2DecodedSubject(nsAString& resultSubject) {
+ return m_mdb->RowCellColumnToMime2DecodedString(
+ GetMDBRow(), m_mdb->m_subjectColumnToken, resultSubject);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetMime2DecodedRecipients(nsAString& resultRecipients) {
+ return m_mdb->RowCellColumnToMime2DecodedString(
+ GetMDBRow(), m_mdb->m_recipientsColumnToken, resultRecipients);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetAuthorCollationKey(nsTArray<uint8_t>& resultAuthor) {
+ return m_mdb->RowCellColumnToAddressCollationKey(
+ GetMDBRow(), m_mdb->m_senderColumnToken, resultAuthor);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetSubjectCollationKey(
+ nsTArray<uint8_t>& resultSubject) {
+ return m_mdb->RowCellColumnToCollationKey(
+ GetMDBRow(), m_mdb->m_subjectColumnToken, resultSubject);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetRecipientsCollationKey(
+ nsTArray<uint8_t>& resultRecipients) {
+ return m_mdb->RowCellColumnToCollationKey(
+ GetMDBRow(), m_mdb->m_recipientsColumnToken, resultRecipients);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetCharset(char** aCharset) {
+ return m_mdb->RowCellColumnToCharPtr(
+ GetMDBRow(), m_mdb->m_messageCharSetColumnToken, aCharset);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetCharset(const char* aCharset) {
+ return SetStringColumn(aCharset, m_mdb->m_messageCharSetColumnToken);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetEffectiveCharset(nsACString& resultCharset) {
+ return m_mdb->GetEffectiveCharset(m_mdbRow, resultCharset);
+}
+
+NS_IMETHODIMP nsMsgHdr::SetThreadParent(nsMsgKey inKey) {
+ m_threadParent = inKey;
+ if (inKey == m_messageKey) NS_ASSERTION(false, "can't be your own parent");
+ SetUInt32Column(m_threadParent, m_mdb->m_threadParentColumnToken);
+ m_initedValues |= THREAD_PARENT_INITED;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetThreadParent(nsMsgKey* result) {
+ nsresult res;
+ if (!(m_initedValues & THREAD_PARENT_INITED)) {
+ res = GetUInt32Column(m_mdb->m_threadParentColumnToken, &m_threadParent,
+ nsMsgKey_None);
+ if (NS_SUCCEEDED(res)) m_initedValues |= THREAD_PARENT_INITED;
+ }
+ *result = m_threadParent;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetFolder(nsIMsgFolder** result) {
+ NS_ENSURE_ARG(result);
+
+ if (m_mdb && m_mdb->m_folder) {
+ NS_ADDREF(*result = m_mdb->m_folder);
+ } else
+ *result = nullptr;
+ return NS_OK;
+}
+
+nsresult nsMsgHdr::SetStringColumn(const char* str, mdb_token token) {
+ NS_ENSURE_ARG_POINTER(str);
+ return m_mdb->CharPtrToRowCellColumn(m_mdbRow, token, str);
+}
+
+nsresult nsMsgHdr::SetUInt32Column(uint32_t value, mdb_token token) {
+ return m_mdb->UInt32ToRowCellColumn(m_mdbRow, token, value);
+}
+
+nsresult nsMsgHdr::GetUInt32Column(mdb_token token, uint32_t* pvalue,
+ uint32_t defaultValue) {
+ return m_mdb->RowCellColumnToUInt32(GetMDBRow(), token, pvalue, defaultValue);
+}
+
+nsresult nsMsgHdr::SetUInt64Column(uint64_t value, mdb_token token) {
+ return m_mdb->UInt64ToRowCellColumn(m_mdbRow, token, value);
+}
+
+nsresult nsMsgHdr::GetUInt64Column(mdb_token token, uint64_t* pvalue,
+ uint64_t defaultValue) {
+ return m_mdb->RowCellColumnToUInt64(GetMDBRow(), token, pvalue, defaultValue);
+}
+
+/**
+ * Roughly speaking, get the next message-id (starts with a '<' ends with a
+ * '>'). Except, we also try to handle the case where your reference is of
+ * a prehistoric vintage that just stuck any old random junk in there. Our
+ * old logic would (unintentionally?) just trim the whitespace off the front
+ * and hand you everything after that. We change things at all because that
+ * same behaviour does not make sense if we have already seen a proper message
+ * id. We keep the old behaviour at all because it would seem to have
+ * benefits. (See jwz's non-zero stats: http://www.jwz.org/doc/threading.html)
+ * So, to re-state, if there is a valid message-id in there at all, we only
+ * return valid message-id's (sans bracketing '<' and '>'). If there isn't,
+ * our result (via "references") is a left-trimmed copy of the string. If
+ * there is nothing in there, our result is an empty string.) We do require
+ * that you pass allowNonDelimitedReferences what it demands, though.
+ * For example: "<valid@stuff> this stuff is invalid" would net you
+ * "valid@stuff" and "this stuff is invalid" as results. We now only would
+ * provide "valid-stuff" and an empty string (which you should ignore) as
+ * results. However "this stuff is invalid" would return itself, allowing
+ * anything relying on that behaviour to keep working.
+ *
+ * Note: We accept anything inside the '<' and '>'; technically, we should want
+ * at least a '@' in there (per rfc 2822). But since we're going out of our
+ * way to support weird things...
+ *
+ * @param startNextRef The position to start at; this should either be the start
+ * of your references string or our return value from a previous call.
+ * @param reference You pass a nsCString by reference, we put the reference we
+ * find in it, if we find one. It may be empty! Beware!
+ * @param allowNonDelimitedReferences Should we support the
+ * pre-reasonable-standards form of In-Reply-To where it could be any
+ * arbitrary string and our behaviour was just to take off leading
+ * whitespace. It only makes sense to pass true for your first call to this
+ * function, as if you are around to make a second call, it means we found
+ * a properly formatted message-id and so we should only look for more
+ * properly formatted message-ids.
+ * NOTE: this option will also strip off a single leading '<' if there is
+ * one. Some examples:
+ * " foo" => "foo"
+ * " <bar" => "bar"
+ * "<<<foo" => "<<foo"
+ * "<foo@bar>" => "foo@bar" (completed message-id)
+ * @returns The next starting position of this routine, which may be pointing at
+ * a nul '\0' character to indicate termination.
+ */
+const char* nsMsgHdr::GetNextReference(const char* startNextRef,
+ nsCString& reference,
+ bool acceptNonDelimitedReferences) {
+ const char* ptr = startNextRef;
+ const char* whitespaceEndedAt = nullptr;
+ const char* firstMessageIdChar = nullptr;
+
+ // make the reference result string empty by default; we will set it to
+ // something valid if the time comes.
+ reference.Truncate();
+
+ // walk until we find a '<', but keep track of the first point we found that
+ // was not whitespace (as defined by previous versions of this code.)
+ for (bool foundLessThan = false; !foundLessThan; ptr++) {
+ switch (*ptr) {
+ case '\0':
+ // if we are at the end of the string, we found some non-whitespace, and
+ // the caller requested that we accept non-delimited whitespace,
+ // give them that as their reference. (otherwise, leave it empty)
+ if (acceptNonDelimitedReferences && whitespaceEndedAt)
+ reference = whitespaceEndedAt;
+ return ptr;
+ case ' ':
+ case '\r':
+ case '\n':
+ case '\t':
+ // do nothing, make default case mean you didn't get whitespace
+ break;
+ case '<':
+ firstMessageIdChar = ptr + 1; // skip over the '<'
+ foundLessThan = true; // (flag to stop)
+ // Ensure whitespaceEndedAt skips the leading '<' and is set to
+ // a non-NULL value, just in case the message-id is not valid (no '>')
+ // and the old-school support is desired.
+ if (!whitespaceEndedAt) whitespaceEndedAt = ptr + 1;
+ break;
+ default:
+ if (!whitespaceEndedAt) whitespaceEndedAt = ptr;
+ break;
+ }
+ }
+
+ // keep going until we hit a '>' or hit the end of the string
+ for (; *ptr; ptr++) {
+ if (*ptr == '>') {
+ // it's valid, update reference, making sure to stop before the '>'
+ reference.Assign(firstMessageIdChar, ptr - firstMessageIdChar);
+ // and return a start point just after the '>'
+ return ++ptr;
+ }
+ }
+
+ // we did not have a fully-formed, valid message-id, so consider falling back
+ if (acceptNonDelimitedReferences && whitespaceEndedAt)
+ reference = whitespaceEndedAt;
+ return ptr;
+}
+
+bool nsMsgHdr::IsParentOf(nsIMsgDBHdr* possibleChild) {
+ uint16_t referenceToCheck = 0;
+ possibleChild->GetNumReferences(&referenceToCheck);
+ nsAutoCString reference;
+
+ nsCString messageId;
+ GetMessageId(getter_Copies(messageId));
+
+ while (referenceToCheck > 0) {
+ possibleChild->GetStringReference(referenceToCheck - 1, reference);
+
+ if (reference.Equals(messageId)) return true;
+ // if reference didn't match, check if this ref is for a non-existent
+ // header. If it is, continue looking at ancestors.
+ nsCOMPtr<nsIMsgDBHdr> refHdr;
+ if (!m_mdb) break;
+ (void)m_mdb->GetMsgHdrForMessageID(reference.get(), getter_AddRefs(refHdr));
+ if (refHdr) break;
+ referenceToCheck--;
+ }
+ return false;
+}
+
+bool nsMsgHdr::IsAncestorOf(nsIMsgDBHdr* possibleChild) {
+ const char* references;
+ nsMsgHdr* curHdr =
+ static_cast<nsMsgHdr*>(possibleChild); // closed system, cast ok
+ m_mdb->RowCellColumnToConstCharPtr(
+ curHdr->GetMDBRow(), m_mdb->m_referencesColumnToken, &references);
+ if (!references) return false;
+
+ nsCString messageId;
+ // should put < > around message id to make strstr strictly match
+ GetMessageId(getter_Copies(messageId));
+ return (strstr(references, messageId.get()) != nullptr);
+}
+
+NS_IMETHODIMP nsMsgHdr::GetIsRead(bool* isRead) {
+ NS_ENSURE_ARG_POINTER(isRead);
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ *isRead = !!(m_flags & nsMsgMessageFlags::Read);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetIsFlagged(bool* isFlagged) {
+ NS_ENSURE_ARG_POINTER(isFlagged);
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ *isFlagged = !!(m_flags & nsMsgMessageFlags::Marked);
+ return NS_OK;
+}
+
+void nsMsgHdr::ReparentInThread(nsIMsgThread* thread) {
+ NS_WARNING("Borked message header, attempting to fix!");
+ uint32_t numChildren;
+ thread->GetNumChildren(&numChildren);
+ // bail out early for the singleton thread case.
+ if (numChildren == 1) {
+ SetThreadParent(nsMsgKey_None);
+ return;
+ } else {
+ nsCOMPtr<nsIMsgDBHdr> curHdr;
+ // loop through thread, looking for our proper parent.
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ thread->GetChildHdrAt(childIndex, getter_AddRefs(curHdr));
+ // closed system, cast ok
+ nsMsgHdr* curMsgHdr = static_cast<nsMsgHdr*>(curHdr.get());
+ if (curHdr && curMsgHdr->IsParentOf(this)) {
+ nsMsgKey curHdrKey;
+ curHdr->GetMessageKey(&curHdrKey);
+ SetThreadParent(curHdrKey);
+ return;
+ }
+ }
+ // we didn't find it. So either the root header is our parent,
+ // or we're the root.
+ nsCOMPtr<nsIMsgDBHdr> rootHdr;
+ thread->GetRootHdr(getter_AddRefs(rootHdr));
+ NS_ASSERTION(rootHdr, "thread has no root hdr - shouldn't happen");
+ if (rootHdr) {
+ nsMsgKey rootKey;
+ rootHdr->GetMessageKey(&rootKey);
+ // if we're the root, our thread parent is -1.
+ SetThreadParent(rootKey == m_messageKey ? nsMsgKey_None : rootKey);
+ }
+ }
+}
+
+bool nsMsgHdr::IsAncestorKilled(uint32_t ancestorsToCheck) {
+ if (!(m_initedValues & FLAGS_INITED)) InitFlags();
+ bool isKilled = m_flags & nsMsgMessageFlags::Ignored;
+
+ if (!isKilled) {
+ nsMsgKey threadParent;
+ GetThreadParent(&threadParent);
+
+ if (threadParent == m_messageKey) {
+ // isKilled is false by virtue of the enclosing if statement
+ NS_ERROR("Thread is parent of itself, please fix!");
+ nsCOMPtr<nsIMsgThread> thread;
+ (void)m_mdb->GetThreadContainingMsgHdr(this, getter_AddRefs(thread));
+ if (!thread) return false;
+ ReparentInThread(thread);
+ // Something's wrong, but the problem happened some time ago, so erroring
+ // out now is probably not a good idea. Ergo, we'll pretend to be OK, show
+ // the user the thread (err on the side of caution), and let the assertion
+ // alert debuggers to a problem.
+ return false;
+ }
+ if (threadParent != nsMsgKey_None) {
+ nsCOMPtr<nsIMsgDBHdr> parentHdr;
+ (void)m_mdb->GetMsgHdrForKey(threadParent, getter_AddRefs(parentHdr));
+
+ if (parentHdr) {
+ // More proofing against crashers. This crasher was derived from the
+ // fact that something got borked, leaving is in hand with a circular
+ // reference to borked headers inducing these loops. The defining
+ // characteristic of these headers is that they don't actually seat
+ // themselves in the thread properly.
+ nsCOMPtr<nsIMsgThread> thread;
+ (void)m_mdb->GetThreadContainingMsgHdr(this, getter_AddRefs(thread));
+ if (thread) {
+ nsCOMPtr<nsIMsgDBHdr> claimant;
+ (void)thread->GetChild(threadParent, getter_AddRefs(claimant));
+ if (!claimant) {
+ // attempt to reparent, and say the thread isn't killed,
+ // erring on the side of safety.
+ ReparentInThread(thread);
+ return false;
+ }
+ }
+
+ if (!ancestorsToCheck) {
+ // We think we have a parent, but we have no more ancestors to check
+ NS_ASSERTION(false, "cycle in parent relationship, please fix!");
+ return false;
+ }
+ // closed system, cast ok
+ nsMsgHdr* parent = static_cast<nsMsgHdr*>(parentHdr.get());
+ return parent->IsAncestorKilled(ancestorsToCheck - 1);
+ }
+ }
+ }
+ return isKilled;
+}
+
+NS_IMETHODIMP nsMsgHdr::GetIsKilled(bool* isKilled) {
+ NS_ENSURE_ARG_POINTER(isKilled);
+ *isKilled = false;
+ nsCOMPtr<nsIMsgThread> thread;
+ (void)m_mdb->GetThreadContainingMsgHdr(this, getter_AddRefs(thread));
+ // if we can't find the thread, let's at least check one level; maybe
+ // the header hasn't been added to a thread yet.
+ uint32_t numChildren = 1;
+ if (thread) thread->GetNumChildren(&numChildren);
+ if (!numChildren) return NS_ERROR_FAILURE;
+ // We can't have as many ancestors as there are messages in the thread,
+ // so tell IsAncestorKilled to only check numChildren - 1 ancestors.
+ *isKilled = IsAncestorKilled(numChildren - 1);
+ return NS_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#include "nsIStringEnumerator.h"
+#define NULL_MORK_COLUMN 0
+class nsMsgPropertyEnumerator : public nsStringEnumeratorBase {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIUTF8STRINGENUMERATOR
+
+ using nsStringEnumeratorBase::GetNext;
+
+ explicit nsMsgPropertyEnumerator(nsMsgHdr* aHdr);
+ void PrefetchNext();
+
+ protected:
+ virtual ~nsMsgPropertyEnumerator();
+ nsCOMPtr<nsIMdbRowCellCursor> mRowCellCursor;
+ nsCOMPtr<nsIMdbEnv> m_mdbEnv;
+ nsCOMPtr<nsIMdbStore> m_mdbStore;
+ // Hold a reference to the hdr so it will hold an xpcom reference to the
+ // underlying mdb row. The row cell cursor will crash if the underlying
+ // row goes away.
+ RefPtr<nsMsgHdr> m_hdr;
+ bool mNextPrefetched;
+ mdb_column mNextColumn;
+};
+
+nsMsgPropertyEnumerator::nsMsgPropertyEnumerator(nsMsgHdr* aHdr)
+ : mNextPrefetched(false), mNextColumn(NULL_MORK_COLUMN) {
+ RefPtr<nsMsgDatabase> mdb;
+ nsCOMPtr<nsIMdbRow> mdbRow;
+
+ if (aHdr && (mdbRow = aHdr->GetMDBRow()) && (m_hdr = aHdr) &&
+ (mdb = aHdr->GetMdb()) && (m_mdbEnv = mdb->m_mdbEnv) &&
+ (m_mdbStore = mdb->m_mdbStore)) {
+ mdbRow->GetRowCellCursor(m_mdbEnv, -1, getter_AddRefs(mRowCellCursor));
+ }
+}
+
+nsMsgPropertyEnumerator::~nsMsgPropertyEnumerator() {
+ // Need to clear this before the nsMsgHdr and its corresponding
+ // nsIMdbRow potentially go away.
+ mRowCellCursor = nullptr;
+}
+
+NS_IMPL_ISUPPORTS(nsMsgPropertyEnumerator, nsIUTF8StringEnumerator,
+ nsIStringEnumerator)
+
+NS_IMETHODIMP nsMsgPropertyEnumerator::GetNext(nsACString& aItem) {
+ PrefetchNext();
+ if (mNextColumn == NULL_MORK_COLUMN)
+ return NS_ERROR_FAILURE; // call HasMore first
+ if (!m_mdbStore || !m_mdbEnv) return NS_ERROR_NOT_INITIALIZED;
+ mNextPrefetched = false;
+ char columnName[100];
+ struct mdbYarn colYarn = {columnName, 0, sizeof(columnName), 0, 0, nullptr};
+ // Get the column of the cell
+ nsresult rv = m_mdbStore->TokenToString(m_mdbEnv, mNextColumn, &colYarn);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aItem.Assign(static_cast<char*>(colYarn.mYarn_Buf), colYarn.mYarn_Fill);
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgPropertyEnumerator::HasMore(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+
+ PrefetchNext();
+ *aResult = (mNextColumn != NULL_MORK_COLUMN);
+ return NS_OK;
+}
+
+void nsMsgPropertyEnumerator::PrefetchNext(void) {
+ if (!mNextPrefetched && m_mdbEnv && mRowCellCursor) {
+ mNextPrefetched = true;
+ nsCOMPtr<nsIMdbCell> cell;
+ mRowCellCursor->NextCell(m_mdbEnv, getter_AddRefs(cell), &mNextColumn,
+ nullptr);
+ if (mNextColumn == NULL_MORK_COLUMN) {
+ // free up references
+ m_mdbStore = nullptr;
+ m_mdbEnv = nullptr;
+ mRowCellCursor = nullptr;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+NS_IMETHODIMP nsMsgHdr::GetProperties(nsTArray<nsCString>& headers) {
+ nsCOMPtr<nsIUTF8StringEnumerator> propertyEnumerator =
+ new nsMsgPropertyEnumerator(this);
+ bool hasMore;
+ while (NS_SUCCEEDED(propertyEnumerator->HasMore(&hasMore)) && hasMore) {
+ nsAutoCString property;
+ propertyEnumerator->GetNext(property);
+ headers.AppendElement(property);
+ }
+ return NS_OK;
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.cpp b/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.cpp
new file mode 100644
index 0000000000..96410654d8
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.cpp
@@ -0,0 +1,385 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "nsMsgOfflineImapOperation.h"
+#include "nsMsgUtils.h"
+#include "mozilla/Logging.h"
+
+using namespace mozilla;
+
+LazyLogModule IMAPOffline("IMAPOffline");
+
+/* Implementation file */
+NS_IMPL_ISUPPORTS(nsMsgOfflineImapOperation, nsIMsgOfflineImapOperation)
+
+// property names for offine imap operation fields.
+#define PROP_OPERATION "op"
+#define PROP_OPERATION_FLAGS "opFlags"
+#define PROP_NEW_FLAGS "newFlags"
+#define PROP_MESSAGE_KEY "msgKey"
+#define PROP_SRC_MESSAGE_KEY "srcMsgKey"
+#define PROP_SRC_FOLDER_URI "srcFolderURI"
+#define PROP_MOVE_DEST_FOLDER_URI "moveDest"
+#define PROP_NUM_COPY_DESTS "numCopyDests"
+#define PROP_COPY_DESTS \
+ "copyDests" // how to delimit these? Or should we do the "dest1","dest2" etc
+ // trick? But then we'd need to shuffle them around since we
+ // delete off the front first.
+#define PROP_KEYWORD_ADD "addedKeywords"
+#define PROP_KEYWORD_REMOVE "removedKeywords"
+#define PROP_MSG_SIZE "msgSize"
+#define PROP_PLAYINGBACK "inPlayback"
+
+nsMsgOfflineImapOperation::nsMsgOfflineImapOperation(nsMsgDatabase* db,
+ nsIMdbRow* row) {
+ NS_ASSERTION(db, "can't have null db");
+ NS_ASSERTION(row, "can't have null row");
+ m_operation = 0;
+ m_operationFlags = 0;
+ m_messageKey = nsMsgKey_None;
+ m_sourceMessageKey = nsMsgKey_None;
+ m_mdb = db;
+ NS_ADDREF(m_mdb);
+ m_mdbRow = row;
+ m_newFlags = 0;
+ m_mdb->GetUint32Property(m_mdbRow, PROP_OPERATION, (uint32_t*)&m_operation,
+ 0);
+ m_mdb->GetUint32Property(m_mdbRow, PROP_MESSAGE_KEY, &m_messageKey, 0);
+ m_mdb->GetUint32Property(m_mdbRow, PROP_OPERATION_FLAGS, &m_operationFlags,
+ 0);
+ m_mdb->GetUint32Property(m_mdbRow, PROP_NEW_FLAGS, (uint32_t*)&m_newFlags, 0);
+}
+
+nsMsgOfflineImapOperation::~nsMsgOfflineImapOperation() {
+ // clear the row first, in case we're holding the last reference
+ // to the db.
+ m_mdbRow = nullptr;
+ NS_IF_RELEASE(m_mdb);
+}
+
+/* attribute nsOfflineImapOperationType operation; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetOperation(
+ nsOfflineImapOperationType* aOperation) {
+ NS_ENSURE_ARG(aOperation);
+ *aOperation = m_operation;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetOperation(
+ nsOfflineImapOperationType aOperation) {
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info))
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x setOperation was %x add %x", m_messageKey, m_operation,
+ aOperation));
+
+ m_operation |= aOperation;
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_OPERATION, m_operation);
+}
+
+/* void clearOperation (in nsOfflineImapOperationType operation); */
+NS_IMETHODIMP nsMsgOfflineImapOperation::ClearOperation(
+ nsOfflineImapOperationType aOperation) {
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info))
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x clearOperation was %x clear %x", m_messageKey,
+ m_operation, aOperation));
+ m_operation &= ~aOperation;
+ switch (aOperation) {
+ case kMsgMoved:
+ case kAppendTemplate:
+ case kAppendDraft:
+ m_moveDestination.Truncate();
+ break;
+ case kMsgCopy:
+ m_copyDestinations.RemoveElementAt(0);
+ break;
+ }
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_OPERATION, m_operation);
+}
+
+/* attribute nsMsgKey messageKey; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetMessageKey(nsMsgKey* aMessageKey) {
+ NS_ENSURE_ARG(aMessageKey);
+ *aMessageKey = m_messageKey;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetMessageKey(nsMsgKey aMessageKey) {
+ m_messageKey = aMessageKey;
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_MESSAGE_KEY, m_messageKey);
+}
+
+/* attribute nsMsgKey srcMessageKey; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetSrcMessageKey(
+ nsMsgKey* aMessageKey) {
+ NS_ENSURE_ARG(aMessageKey);
+ return m_mdb->GetUint32Property(m_mdbRow, PROP_SRC_MESSAGE_KEY, aMessageKey,
+ nsMsgKey_None);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetSrcMessageKey(
+ nsMsgKey aMessageKey) {
+ m_messageKey = aMessageKey;
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_SRC_MESSAGE_KEY, m_messageKey);
+}
+
+/* attribute imapMessageFlagsType flagOperation; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetFlagOperation(
+ imapMessageFlagsType* aFlagOperation) {
+ NS_ENSURE_ARG(aFlagOperation);
+ *aFlagOperation = m_operationFlags;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetFlagOperation(
+ imapMessageFlagsType aFlagOperation) {
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info))
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x setFlagOperation was %x add %x", m_messageKey,
+ m_operationFlags, aFlagOperation));
+ SetOperation(kFlagsChanged);
+ nsresult rv = SetNewFlags(aFlagOperation);
+ NS_ENSURE_SUCCESS(rv, rv);
+ m_operationFlags |= aFlagOperation;
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_OPERATION_FLAGS,
+ m_operationFlags);
+}
+
+/* attribute imapMessageFlagsType flagOperation; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetNewFlags(
+ imapMessageFlagsType* aNewFlags) {
+ NS_ENSURE_ARG(aNewFlags);
+ uint32_t flags;
+ nsresult rv = m_mdb->GetUint32Property(m_mdbRow, PROP_NEW_FLAGS, &flags, 0);
+ *aNewFlags = m_newFlags = (imapMessageFlagsType)flags;
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetNewFlags(
+ imapMessageFlagsType aNewFlags) {
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info) && m_newFlags != aNewFlags)
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x SetNewFlags was %x to %x", m_messageKey, m_newFlags,
+ aNewFlags));
+ m_newFlags = aNewFlags;
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_NEW_FLAGS, m_newFlags);
+}
+
+/* attribute string destinationFolderURI; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetDestinationFolderURI(
+ nsACString& aDestinationFolderURI) {
+ (void)m_mdb->GetProperty(m_mdbRow, PROP_MOVE_DEST_FOLDER_URI,
+ getter_Copies(m_moveDestination));
+ aDestinationFolderURI = m_moveDestination;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetDestinationFolderURI(
+ const nsACString& aDestinationFolderURI) {
+ if (MOZ_LOG_TEST(IMAPOffline, LogLevel::Info))
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x SetDestinationFolderURI to %s", m_messageKey,
+ PromiseFlatCString(aDestinationFolderURI).get()));
+ m_moveDestination = aDestinationFolderURI;
+ return m_mdb->SetProperty(m_mdbRow, PROP_MOVE_DEST_FOLDER_URI,
+ PromiseFlatCString(aDestinationFolderURI).get());
+}
+
+/* attribute string sourceFolderURI; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetSourceFolderURI(
+ nsACString& aSourceFolderURI) {
+ nsresult rv = m_mdb->GetProperty(m_mdbRow, PROP_SRC_FOLDER_URI,
+ getter_Copies(m_sourceFolder));
+ aSourceFolderURI = m_sourceFolder;
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetSourceFolderURI(
+ const nsACString& aSourceFolderURI) {
+ m_sourceFolder = aSourceFolderURI;
+ SetOperation(kMoveResult);
+
+ return m_mdb->SetProperty(m_mdbRow, PROP_SRC_FOLDER_URI,
+ PromiseFlatCString(aSourceFolderURI).get());
+}
+
+/* attribute string keyword; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetKeywordsToAdd(char** aKeywords) {
+ NS_ENSURE_ARG(aKeywords);
+ nsresult rv = m_mdb->GetProperty(m_mdbRow, PROP_KEYWORD_ADD,
+ getter_Copies(m_keywordsToAdd));
+ *aKeywords = ToNewCString(m_keywordsToAdd);
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::AddKeywordToAdd(const char* aKeyword) {
+ SetOperation(kAddKeywords);
+ return AddKeyword(aKeyword, m_keywordsToAdd, PROP_KEYWORD_ADD,
+ m_keywordsToRemove, PROP_KEYWORD_REMOVE);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetKeywordsToRemove(char** aKeywords) {
+ NS_ENSURE_ARG(aKeywords);
+ nsresult rv = m_mdb->GetProperty(m_mdbRow, PROP_KEYWORD_REMOVE,
+ getter_Copies(m_keywordsToRemove));
+ *aKeywords = ToNewCString(m_keywordsToRemove);
+ return rv;
+}
+
+nsresult nsMsgOfflineImapOperation::AddKeyword(const char* aKeyword,
+ nsCString& addList,
+ const char* addProp,
+ nsCString& removeList,
+ const char* removeProp) {
+ int32_t startOffset, keywordLength;
+ if (!MsgFindKeyword(nsDependentCString(aKeyword), addList, &startOffset,
+ &keywordLength)) {
+ if (!addList.IsEmpty()) addList.Append(' ');
+ addList.Append(aKeyword);
+ }
+ // if the keyword we're removing was in the list of keywords to add,
+ // cut it from that list.
+ if (MsgFindKeyword(nsDependentCString(aKeyword), removeList, &startOffset,
+ &keywordLength)) {
+ removeList.Cut(startOffset, keywordLength);
+ m_mdb->SetProperty(m_mdbRow, removeProp, removeList.get());
+ }
+ return m_mdb->SetProperty(m_mdbRow, addProp, addList.get());
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::AddKeywordToRemove(
+ const char* aKeyword) {
+ SetOperation(kRemoveKeywords);
+ return AddKeyword(aKeyword, m_keywordsToRemove, PROP_KEYWORD_REMOVE,
+ m_keywordsToAdd, PROP_KEYWORD_ADD);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::AddMessageCopyOperation(
+ const nsACString& destinationBox) {
+ SetOperation(kMsgCopy);
+ nsresult rv = GetCopiesFromDB();
+ NS_ENSURE_SUCCESS(rv, rv);
+ m_copyDestinations.AppendElement(destinationBox);
+ return SetCopiesToDB();
+}
+
+// we write out the folders as one string, separated by 0x1.
+#define FOLDER_SEP_CHAR '\001'
+
+nsresult nsMsgOfflineImapOperation::GetCopiesFromDB() {
+ nsCString copyDests;
+ m_copyDestinations.Clear();
+ nsresult rv =
+ m_mdb->GetProperty(m_mdbRow, PROP_COPY_DESTS, getter_Copies(copyDests));
+ // use 0x1 as the delimiter between folder names since it's not a legal
+ // character
+ if (NS_SUCCEEDED(rv) && !copyDests.IsEmpty()) {
+ int32_t curCopyDestStart = 0;
+ int32_t nextCopyDestPos = 0;
+
+ while (nextCopyDestPos != -1) {
+ nsCString curDest;
+ nextCopyDestPos = copyDests.FindChar(FOLDER_SEP_CHAR, curCopyDestStart);
+ if (nextCopyDestPos > 0)
+ curDest = Substring(copyDests, curCopyDestStart,
+ nextCopyDestPos - curCopyDestStart);
+ else
+ curDest = Substring(copyDests, curCopyDestStart,
+ copyDests.Length() - curCopyDestStart);
+ curCopyDestStart = nextCopyDestPos + 1;
+ m_copyDestinations.AppendElement(curDest);
+ }
+ }
+ return rv;
+}
+
+nsresult nsMsgOfflineImapOperation::SetCopiesToDB() {
+ nsAutoCString copyDests;
+
+ // use 0x1 as the delimiter between folders
+ for (uint32_t i = 0; i < m_copyDestinations.Length(); i++) {
+ if (i > 0) copyDests.Append(FOLDER_SEP_CHAR);
+ copyDests.Append(m_copyDestinations.ElementAt(i));
+ }
+ return m_mdb->SetProperty(m_mdbRow, PROP_COPY_DESTS, copyDests.get());
+}
+
+/* attribute long numberOfCopies; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetNumberOfCopies(
+ int32_t* aNumberOfCopies) {
+ NS_ENSURE_ARG(aNumberOfCopies);
+ nsresult rv = GetCopiesFromDB();
+ NS_ENSURE_SUCCESS(rv, rv);
+ *aNumberOfCopies = m_copyDestinations.Length();
+ return NS_OK;
+}
+
+/* string getCopyDestination (in long copyIndex); */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetCopyDestination(int32_t copyIndex,
+ char** retval) {
+ NS_ENSURE_ARG(retval);
+ nsresult rv = GetCopiesFromDB();
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (copyIndex >= (int32_t)m_copyDestinations.Length())
+ return NS_ERROR_ILLEGAL_VALUE;
+ *retval = ToNewCString(m_copyDestinations.ElementAt(copyIndex));
+ return (*retval) ? NS_OK : NS_ERROR_OUT_OF_MEMORY;
+}
+
+/* attribute unsigned log msgSize; */
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetMsgSize(uint32_t* aMsgSize) {
+ NS_ENSURE_ARG(aMsgSize);
+ return m_mdb->GetUint32Property(m_mdbRow, PROP_MSG_SIZE, aMsgSize, 0);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetMsgSize(uint32_t aMsgSize) {
+ return m_mdb->SetUint32Property(m_mdbRow, PROP_MSG_SIZE, aMsgSize);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::SetPlayingBack(bool aPlayingBack) {
+ return m_mdb->SetBooleanProperty(m_mdbRow, PROP_PLAYINGBACK, aPlayingBack);
+}
+
+NS_IMETHODIMP nsMsgOfflineImapOperation::GetPlayingBack(bool* aPlayingBack) {
+ NS_ENSURE_ARG(aPlayingBack);
+ return m_mdb->GetBooleanProperty(m_mdbRow, PROP_PLAYINGBACK, aPlayingBack);
+}
+
+void nsMsgOfflineImapOperation::Log() {
+ if (!MOZ_LOG_TEST(IMAPOffline, LogLevel::Info)) return;
+ // const long kMoveResult = 0x8;
+ // const long kAppendDraft = 0x10;
+ // const long kAddedHeader = 0x20;
+ // const long kDeletedMsg = 0x40;
+ // const long kMsgMarkedDeleted = 0x80;
+ // const long kAppendTemplate = 0x100;
+ // const long kDeleteAllMsgs = 0x200;
+ if (m_operation & nsIMsgOfflineImapOperation::kFlagsChanged)
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x changeFlag:%x", m_messageKey, m_newFlags));
+ if (m_operation & nsIMsgOfflineImapOperation::kMsgMoved) {
+ nsCString moveDestFolder;
+ GetDestinationFolderURI(moveDestFolder);
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x moveTo:%s", m_messageKey, moveDestFolder.get()));
+ }
+ if (m_operation & nsIMsgOfflineImapOperation::kMsgCopy) {
+ nsCString copyDests;
+ m_mdb->GetProperty(m_mdbRow, PROP_COPY_DESTS, getter_Copies(copyDests));
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x moveTo:%s", m_messageKey, copyDests.get()));
+ }
+ if (m_operation & nsIMsgOfflineImapOperation::kAppendDraft)
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x append draft", m_messageKey));
+ if (m_operation & nsIMsgOfflineImapOperation::kAddKeywords)
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x add keyword:%s", m_messageKey, m_keywordsToAdd.get()));
+ if (m_operation & nsIMsgOfflineImapOperation::kRemoveKeywords)
+ MOZ_LOG(IMAPOffline, LogLevel::Info,
+ ("msg id %x remove keyword:%s", m_messageKey,
+ m_keywordsToRemove.get()));
+}
diff --git a/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.h b/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.h
new file mode 100644
index 0000000000..ba6ad3f079
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgOfflineImapOperation.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef _nsMsgOfflineImapOperation_H_
+
+# include "nsIMsgOfflineImapOperation.h"
+# include "mdb.h"
+# include "nsMsgDatabase.h"
+# include "prlog.h"
+
+class nsMsgOfflineImapOperation : public nsIMsgOfflineImapOperation {
+ public:
+ /** Instance Methods **/
+ nsMsgOfflineImapOperation(nsMsgDatabase* db, nsIMdbRow* row);
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIMSGOFFLINEIMAPOPERATION
+
+ nsIMdbRow* GetMDBRow() { return m_mdbRow; }
+ nsresult GetCopiesFromDB();
+ nsresult SetCopiesToDB();
+ void Log();
+
+ protected:
+ virtual ~nsMsgOfflineImapOperation();
+ nsresult AddKeyword(const char* aKeyword, nsCString& addList,
+ const char* addProp, nsCString& removeList,
+ const char* removeProp);
+
+ nsOfflineImapOperationType m_operation;
+ nsMsgKey m_messageKey;
+ nsMsgKey m_sourceMessageKey;
+ uint32_t m_operationFlags; // what to do on sync
+ imapMessageFlagsType m_newFlags; // used for kFlagsChanged
+
+ // these are URI's, and are escaped. Thus, we can use a delimter like ' '
+ // because the real spaces should be escaped.
+ nsCString m_sourceFolder;
+ nsCString m_moveDestination;
+ nsTArray<nsCString> m_copyDestinations;
+
+ nsCString m_keywordsToAdd;
+ nsCString m_keywordsToRemove;
+
+ // nsMsgOfflineImapOperation will have to know what db and row they belong to,
+ // since they are really just a wrapper around the offline operation row in
+ // the mdb. though I hope not.
+ nsMsgDatabase* m_mdb;
+ nsCOMPtr<nsIMdbRow> m_mdbRow;
+};
+
+#endif /* _nsMsgOfflineImapOperation_H_ */
diff --git a/comm/mailnews/db/msgdb/src/nsMsgThread.cpp b/comm/mailnews/db/msgdb/src/nsMsgThread.cpp
new file mode 100644
index 0000000000..15398cf321
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsMsgThread.cpp
@@ -0,0 +1,1050 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "nsMsgDatabase.h"
+#include "nsCOMPtr.h"
+#include "nsMsgThread.h"
+#include "nsMsgMessageFlags.h"
+#include "nsMsgEnumerator.h"
+#include "MailNewsTypes2.h"
+#include "mozilla/DebugOnly.h"
+
+NS_IMPL_ISUPPORTS(nsMsgThread, nsIMsgThread)
+
+nsMsgThread::nsMsgThread() { Init(); }
+
+nsMsgThread::nsMsgThread(nsMsgDatabase* db, nsIMdbTable* table) {
+ Init();
+ m_mdbTable = table;
+ m_mdbDB = db;
+ if (db)
+ db->m_threads.AppendElement(this);
+ else
+ NS_ERROR("no db for thread");
+#ifdef DEBUG_David_Bienvenu
+ if (m_mdbDB->m_threads.Length() > 5)
+ printf("more than five outstanding threads\n");
+#endif
+ if (table && db) {
+ table->GetMetaRow(db->GetEnv(), nullptr, nullptr,
+ getter_AddRefs(m_metaRow));
+ InitCachedValues();
+ }
+}
+
+void nsMsgThread::Init() {
+ m_threadKey = nsMsgKey_None;
+ m_threadRootKey = nsMsgKey_None;
+ m_numChildren = 0;
+ m_numUnreadChildren = 0;
+ m_flags = 0;
+ m_newestMsgDate = 0;
+ m_cachedValuesInitialized = false;
+}
+
+nsMsgThread::~nsMsgThread() {
+ if (m_mdbDB) {
+ mozilla::DebugOnly<bool> found = m_mdbDB->m_threads.RemoveElement(this);
+ NS_ASSERTION(found, "removing thread not in threads array");
+ } else // This can happen if db is forced closed
+ NS_WARNING("null db in thread");
+ Clear();
+}
+
+void nsMsgThread::Clear() {
+ m_mdbTable = nullptr;
+ m_metaRow = nullptr;
+ m_mdbDB = nullptr;
+}
+
+nsresult nsMsgThread::InitCachedValues() {
+ nsresult err = NS_OK;
+
+ NS_ENSURE_TRUE(m_mdbDB && m_metaRow, NS_ERROR_INVALID_POINTER);
+
+ if (!m_cachedValuesInitialized) {
+ err = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadFlagsColumnToken, &m_flags);
+ err = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadChildrenColumnToken, &m_numChildren);
+ err = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadIdColumnToken, &m_threadKey, nsMsgKey_None);
+ err = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadUnreadChildrenColumnToken,
+ &m_numUnreadChildren);
+ err = m_mdbDB->RowCellColumnToUInt32(m_metaRow,
+ m_mdbDB->m_threadRootKeyColumnToken,
+ &m_threadRootKey, nsMsgKey_None);
+ err = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadNewestMsgDateColumnToken, &m_newestMsgDate,
+ 0);
+ // fix num children if it's wrong. this doesn't work - some DB's have a
+ // bogus thread table that is full of bogus headers - don't know why.
+ uint32_t rowCount = 0;
+ m_mdbTable->GetCount(m_mdbDB->GetEnv(), &rowCount);
+ // NS_ASSERTION(m_numChildren <= rowCount, "num children wrong -
+ // fixing");
+ if (m_numChildren > rowCount)
+ ChangeChildCount((int32_t)rowCount - (int32_t)m_numChildren);
+ if ((int32_t)m_numUnreadChildren < 0)
+ ChangeUnreadChildCount(-(int32_t)m_numUnreadChildren);
+ if (NS_SUCCEEDED(err)) m_cachedValuesInitialized = true;
+ }
+ return err;
+}
+
+NS_IMETHODIMP nsMsgThread::SetThreadKey(nsMsgKey threadKey) {
+ NS_ASSERTION(m_threadKey == nsMsgKey_None || m_threadKey == threadKey,
+ "shouldn't be changing thread key");
+ m_threadKey = threadKey;
+ // by definition, the initial thread key is also the thread root key.
+ SetThreadRootKey(threadKey);
+ // gotta set column in meta row here.
+ return m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadIdColumnToken, threadKey);
+}
+
+NS_IMETHODIMP nsMsgThread::GetThreadKey(nsMsgKey* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsresult res = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadIdColumnToken, &m_threadKey);
+ *result = m_threadKey;
+ return res;
+}
+
+NS_IMETHODIMP nsMsgThread::GetFlags(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ nsresult res = m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadFlagsColumnToken, &m_flags);
+ *result = m_flags;
+ return res;
+}
+
+NS_IMETHODIMP nsMsgThread::SetFlags(uint32_t flags) {
+ m_flags = flags;
+ return m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadFlagsColumnToken, m_flags);
+}
+
+NS_IMETHODIMP nsMsgThread::SetSubject(const nsACString& aSubject) {
+ return m_mdbDB->CharPtrToRowCellColumn(m_metaRow,
+ m_mdbDB->m_threadSubjectColumnToken,
+ PromiseFlatCString(aSubject).get());
+}
+
+NS_IMETHODIMP nsMsgThread::GetSubject(nsACString& aSubject) {
+ nsCString subjectStr;
+ nsresult rv = m_mdbDB->RowCellColumnToCharPtr(
+ m_metaRow, m_mdbDB->m_threadSubjectColumnToken,
+ getter_Copies(subjectStr));
+
+ aSubject.Assign(subjectStr);
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::GetNumChildren(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = m_numChildren;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgThread::GetNumUnreadChildren(uint32_t* result) {
+ NS_ENSURE_ARG_POINTER(result);
+ *result = m_numUnreadChildren;
+ return NS_OK;
+}
+
+nsresult nsMsgThread::RerootThread(nsIMsgDBHdr* newParentOfOldRoot,
+ nsIMsgDBHdr* oldRoot,
+ nsIDBChangeAnnouncer* announcer) {
+ nsresult rv = NS_OK;
+ mdb_pos outPos;
+ nsMsgKey newHdrAncestor;
+ nsCOMPtr<nsIMsgDBHdr> ancestorHdr = newParentOfOldRoot;
+ nsMsgKey newRoot;
+
+ ancestorHdr->GetMessageKey(&newRoot);
+ // loop trying to find the oldest ancestor of this msg
+ // that is a parent of the root. The oldest ancestor will
+ // become the root of the thread.
+ do {
+ ancestorHdr->GetThreadParent(&newHdrAncestor);
+ if (newHdrAncestor != nsMsgKey_None && newHdrAncestor != m_threadRootKey &&
+ newHdrAncestor != newRoot) {
+ newRoot = newHdrAncestor;
+ rv = m_mdbDB->GetMsgHdrForKey(newRoot, getter_AddRefs(ancestorHdr));
+ }
+ } while (NS_SUCCEEDED(rv) && ancestorHdr && newHdrAncestor != nsMsgKey_None &&
+ newHdrAncestor != m_threadRootKey && newHdrAncestor != newRoot);
+ SetThreadRootKey(newRoot);
+ ReparentNonReferenceChildrenOf(oldRoot, newRoot, announcer);
+ if (ancestorHdr) {
+ nsIMsgDBHdr* msgHdr = ancestorHdr;
+ nsMsgHdr* rootMsgHdr =
+ static_cast<nsMsgHdr*>(msgHdr); // closed system, cast ok
+ nsIMdbRow* newRootHdrRow = rootMsgHdr->GetMDBRow();
+ // move the root hdr to pos 0.
+ m_mdbTable->MoveRow(m_mdbDB->GetEnv(), newRootHdrRow, -1, 0, &outPos);
+ ancestorHdr->SetThreadParent(nsMsgKey_None);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::AddChild(nsIMsgDBHdr* child, nsIMsgDBHdr* inReplyTo,
+ bool threadInThread,
+ nsIDBChangeAnnouncer* announcer) {
+ nsresult rv = NS_OK;
+ nsMsgHdr* hdr = static_cast<nsMsgHdr*>(child); // closed system, cast ok
+ uint32_t newHdrFlags = 0;
+ uint32_t msgDate;
+ nsMsgKey newHdrKey = 0;
+ bool parentKeyNeedsSetting = true;
+
+ nsIMdbRow* hdrRow = hdr->GetMDBRow();
+ NS_ENSURE_STATE(hdrRow);
+ hdr->GetRawFlags(&newHdrFlags);
+ hdr->GetMessageKey(&newHdrKey);
+ hdr->GetDateInSeconds(&msgDate);
+ if (msgDate > m_newestMsgDate) SetNewestMsgDate(msgDate);
+
+ if (newHdrFlags & nsMsgMessageFlags::Watched)
+ SetFlags(m_flags | nsMsgMessageFlags::Watched);
+
+ child->AndFlags(~(nsMsgMessageFlags::Watched), &newHdrFlags);
+
+ // These are threading flags that the child may have set before being added
+ // to the database.
+ uint32_t protoThreadFlags;
+ child->GetUint32Property("ProtoThreadFlags", &protoThreadFlags);
+ SetFlags(m_flags | protoThreadFlags);
+ // Clear the flag so that it doesn't fudge anywhere else
+ child->SetUint32Property("ProtoThreadFlags", 0);
+
+ uint32_t numChildren = 0;
+ // get the num children before we add the new header.
+ GetNumChildren(&numChildren);
+
+ // if this is an empty thread, set the root key to this header's key
+ if (numChildren == 0) SetThreadRootKey(newHdrKey);
+
+ if (m_mdbTable) {
+ m_mdbTable->AddRow(m_mdbDB->GetEnv(), hdrRow);
+ ChangeChildCount(1);
+ if (!(newHdrFlags & nsMsgMessageFlags::Read)) ChangeUnreadChildCount(1);
+ }
+ if (inReplyTo) {
+ nsMsgKey parentKey;
+ inReplyTo->GetMessageKey(&parentKey);
+ child->SetThreadParent(parentKey);
+ parentKeyNeedsSetting = false;
+ }
+
+ // check if this header is a parent of one of the messages in this thread
+ bool hdrMoved = false;
+ nsCOMPtr<nsIMsgDBHdr> curHdr;
+ uint32_t moveIndex = 0;
+
+ PRTime newHdrDate;
+ child->GetDate(&newHdrDate);
+
+ // This is an ugly but simple fix for a difficult problem. Basically, when we
+ // add a message to a thread, we have to run through the thread to see if the
+ // new message is a parent of an existing message in the thread, and adjust
+ // things accordingly. If you thread by subject, and you have a large folder
+ // with messages w/ all the same subject, this code can take a really long
+ // time. So the pragmatic thing is to say that for threads with more than 1000
+ // messages, it's simply not worth dealing with the case where the parent
+ // comes in after the child. Threads with more than 1000 messages are pretty
+ // unwieldy anyway. See Bug 90452
+
+ if (numChildren < 1000) {
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsMsgKey msgKey = nsMsgKey_None;
+
+ rv = GetChildHdrAt(childIndex, getter_AddRefs(curHdr));
+ if (NS_SUCCEEDED(rv) && curHdr) {
+ if (hdr->IsParentOf(curHdr)) {
+ nsMsgKey oldThreadParent;
+ mdb_pos outPos;
+ // move this hdr before the current header.
+ if (!hdrMoved) {
+ m_mdbTable->MoveRow(m_mdbDB->GetEnv(), hdrRow, -1, childIndex,
+ &outPos);
+ hdrMoved = true;
+ curHdr->GetThreadParent(&oldThreadParent);
+ curHdr->GetMessageKey(&msgKey);
+ nsCOMPtr<nsIMsgDBHdr> curParent;
+ m_mdbDB->GetMsgHdrForKey(oldThreadParent,
+ getter_AddRefs(curParent));
+ if (curParent && hdr->IsAncestorOf(curParent)) {
+ nsMsgKey curParentKey;
+ curParent->GetMessageKey(&curParentKey);
+ if (curParentKey == m_threadRootKey) {
+ m_mdbTable->MoveRow(m_mdbDB->GetEnv(), hdrRow, -1, 0, &outPos);
+ RerootThread(child, curParent, announcer);
+ parentKeyNeedsSetting = false;
+ }
+ } else if (msgKey == m_threadRootKey) {
+ RerootThread(child, curHdr, announcer);
+ parentKeyNeedsSetting = false;
+ }
+ }
+ curHdr->SetThreadParent(newHdrKey);
+ // TODO: what should be msgKey if hdrMoved was true above?
+ if (msgKey == newHdrKey) parentKeyNeedsSetting = false;
+
+ // OK, this is a reparenting - need to send notification
+ if (announcer)
+ announcer->NotifyParentChangedAll(msgKey, oldThreadParent,
+ newHdrKey, nullptr);
+#ifdef DEBUG_bienvenu1
+ if (newHdrKey != m_threadKey) printf("adding second level child\n");
+#endif
+ }
+ // Calculate a position for this child in date order
+ else if (!hdrMoved && childIndex > 0 && moveIndex == 0) {
+ PRTime curHdrDate;
+
+ curHdr->GetDate(&curHdrDate);
+ if (newHdrDate < curHdrDate) moveIndex = childIndex;
+ }
+ }
+ }
+ }
+ // If this header is not a reply to a header in the thread, and isn't a parent
+ // check to see if it starts with Re: - if not, and the first header does
+ // start with re, should we make this header the top level header? If it's
+ // date is less (or it's ID?), then yes.
+ if (numChildren > 0 && !(newHdrFlags & nsMsgMessageFlags::HasRe) &&
+ !inReplyTo) {
+ PRTime topLevelHdrDate;
+
+ nsCOMPtr<nsIMsgDBHdr> topLevelHdr;
+ rv = GetRootHdr(getter_AddRefs(topLevelHdr));
+ if (NS_SUCCEEDED(rv) && topLevelHdr) {
+ topLevelHdr->GetDate(&topLevelHdrDate);
+ if (newHdrDate < topLevelHdrDate) {
+ RerootThread(child, topLevelHdr, announcer);
+ mdb_pos outPos;
+ m_mdbTable->MoveRow(m_mdbDB->GetEnv(), hdrRow, -1, 0, &outPos);
+ hdrMoved = true;
+ topLevelHdr->SetThreadParent(newHdrKey);
+ parentKeyNeedsSetting = false;
+ // ### need to get ancestor of new hdr here too.
+ SetThreadRootKey(newHdrKey);
+ child->SetThreadParent(nsMsgKey_None);
+ // argh, here we'd need to adjust all the headers that listed
+ // the demoted header as their thread parent, but only because
+ // of subject threading. Adjust them to point to the new parent,
+ // that is.
+ ReparentNonReferenceChildrenOf(topLevelHdr, newHdrKey, announcer);
+ }
+ }
+ }
+ // OK, check to see if we added this header, and didn't parent it.
+
+ if (numChildren > 0 && parentKeyNeedsSetting)
+ child->SetThreadParent(m_threadRootKey);
+
+ // Move child to keep thread sorted in ascending date order
+ if (!hdrMoved && moveIndex > 0) {
+ mdb_pos outPos;
+ m_mdbTable->MoveRow(m_mdbDB->GetEnv(), hdrRow, -1, moveIndex, &outPos);
+ }
+
+ // do this after we've put the new hdr in the thread
+ bool isKilled;
+ child->GetIsKilled(&isKilled);
+ if ((m_flags & nsMsgMessageFlags::Ignored || isKilled) && m_mdbDB)
+ m_mdbDB->MarkHdrRead(child, true, nullptr);
+#ifdef DEBUG_David_Bienvenu
+ nsMsgKey msgHdrThreadKey;
+ child->GetThreadId(&msgHdrThreadKey);
+ NS_ASSERTION(msgHdrThreadKey == m_threadKey,
+ "adding msg to thread it doesn't belong to");
+#endif
+ return rv;
+}
+
+nsresult nsMsgThread::ReparentNonReferenceChildrenOf(
+ nsIMsgDBHdr* oldTopLevelHdr, nsMsgKey newParentKey,
+ nsIDBChangeAnnouncer* announcer) {
+ nsCOMPtr<nsIMsgDBHdr> curHdr;
+ uint32_t numChildren = 0;
+
+ GetNumChildren(&numChildren);
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsMsgKey oldTopLevelHdrKey;
+
+ oldTopLevelHdr->GetMessageKey(&oldTopLevelHdrKey);
+ nsresult rv = GetChildHdrAt(childIndex, getter_AddRefs(curHdr));
+ if (NS_SUCCEEDED(rv) && curHdr) {
+ nsMsgKey oldThreadParent, curHdrKey;
+ nsMsgHdr* oldTopLevelMsgHdr =
+ static_cast<nsMsgHdr*>(oldTopLevelHdr); // closed system, cast ok
+ curHdr->GetThreadParent(&oldThreadParent);
+ curHdr->GetMessageKey(&curHdrKey);
+ if (oldThreadParent == oldTopLevelHdrKey && curHdrKey != newParentKey &&
+ !oldTopLevelMsgHdr->IsParentOf(curHdr)) {
+ curHdr->GetThreadParent(&oldThreadParent);
+ curHdr->SetThreadParent(newParentKey);
+ // OK, this is a reparenting - need to send notification
+ if (announcer)
+ announcer->NotifyParentChangedAll(curHdrKey, oldThreadParent,
+ newParentKey, nullptr);
+ }
+ }
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgThread::GetChildKeyAt(uint32_t aIndex, nsMsgKey* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ nsresult rv;
+
+ if (aIndex >= m_numChildren) {
+ *aResult = nsMsgKey_None;
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+ mdbOid oid;
+ rv = m_mdbTable->PosToOid(m_mdbDB->GetEnv(), aIndex, &oid);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *aResult = oid.mOid_Id;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgThread::GetChildHdrAt(uint32_t aIndex,
+ nsIMsgDBHdr** result) {
+ // mork doesn't seem to handle this correctly, so deal with going off
+ // the end here.
+ if (aIndex >= m_numChildren) return NS_MSG_MESSAGE_NOT_FOUND;
+ mdbOid oid;
+ nsresult rv = m_mdbTable->PosToOid(m_mdbDB->GetEnv(), aIndex, &oid);
+ NS_ENSURE_SUCCESS(rv, NS_MSG_MESSAGE_NOT_FOUND);
+ nsIMdbRow* hdrRow = nullptr;
+ rv = m_mdbTable->PosToRow(m_mdbDB->GetEnv(), aIndex, &hdrRow);
+ NS_ENSURE_TRUE(NS_SUCCEEDED(rv) && hdrRow, NS_ERROR_FAILURE);
+ // CreateMsgHdr takes ownership of the hdrRow reference.
+ rv = m_mdbDB->CreateMsgHdr(hdrRow, oid.mOid_Id, result);
+ return (NS_SUCCEEDED(rv)) ? NS_OK : NS_MSG_MESSAGE_NOT_FOUND;
+}
+
+NS_IMETHODIMP nsMsgThread::GetChild(nsMsgKey msgKey, nsIMsgDBHdr** result) {
+ nsresult rv;
+
+ mdb_bool hasOid;
+ mdbOid rowObjectId;
+
+ NS_ENSURE_ARG_POINTER(result);
+ NS_ENSURE_TRUE(m_mdbTable, NS_ERROR_INVALID_POINTER);
+
+ *result = NULL;
+ rowObjectId.mOid_Id = msgKey;
+ rowObjectId.mOid_Scope = m_mdbDB->m_hdrRowScopeToken;
+ rv = m_mdbTable->HasOid(m_mdbDB->GetEnv(), &rowObjectId, &hasOid);
+
+ if (NS_SUCCEEDED(rv) && hasOid && m_mdbDB && m_mdbDB->m_mdbStore) {
+ nsIMdbRow* hdrRow = nullptr;
+ rv = m_mdbDB->m_mdbStore->GetRow(m_mdbDB->GetEnv(), &rowObjectId, &hdrRow);
+ NS_ENSURE_TRUE(NS_SUCCEEDED(rv) && hdrRow, NS_ERROR_FAILURE);
+ rv = m_mdbDB->CreateMsgHdr(hdrRow, msgKey, result);
+ }
+
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::RemoveChildAt(uint32_t aIndex) { return NS_OK; }
+
+nsresult nsMsgThread::RemoveChild(nsMsgKey msgKey) {
+ nsresult rv;
+
+ mdbOid rowObjectId;
+ rowObjectId.mOid_Id = msgKey;
+ rowObjectId.mOid_Scope = m_mdbDB->m_hdrRowScopeToken;
+ rv = m_mdbTable->CutOid(m_mdbDB->GetEnv(), &rowObjectId);
+ // if this thread is empty, remove it from the all threads table.
+ if (m_numChildren == 0 && m_mdbDB->m_mdbAllThreadsTable) {
+ mdbOid rowID;
+ rowID.mOid_Id = m_threadKey;
+ rowID.mOid_Scope = m_mdbDB->m_threadRowScopeToken;
+
+ m_mdbDB->m_mdbAllThreadsTable->CutOid(m_mdbDB->GetEnv(), &rowID);
+ }
+#if 0 // this seems to cause problems
+ if (m_numChildren == 0 && m_metaRow && m_mdbDB)
+ m_metaRow->CutAllColumns(m_mdbDB->GetEnv());
+#endif
+
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::RemoveChildHdr(nsIMsgDBHdr* child,
+ nsIDBChangeAnnouncer* announcer) {
+ uint32_t flags;
+ nsMsgKey key;
+ nsMsgKey threadParent;
+
+ NS_ENSURE_ARG_POINTER(child);
+
+ child->GetFlags(&flags);
+ child->GetMessageKey(&key);
+
+ child->GetThreadParent(&threadParent);
+ ReparentChildrenOf(key, threadParent, announcer);
+
+ // if this was the newest msg, clear the newest msg date so we'll recalc.
+ uint32_t date;
+ child->GetDateInSeconds(&date);
+ if (date == m_newestMsgDate) SetNewestMsgDate(0);
+
+ if (!(flags & nsMsgMessageFlags::Read)) ChangeUnreadChildCount(-1);
+ ChangeChildCount(-1);
+ return RemoveChild(key);
+}
+
+nsresult nsMsgThread::ReparentChildrenOf(nsMsgKey oldParent, nsMsgKey newParent,
+ nsIDBChangeAnnouncer* announcer) {
+ nsresult rv = NS_OK;
+
+ uint32_t numChildren = 0;
+ GetNumChildren(&numChildren);
+
+ nsCOMPtr<nsIMsgDBHdr> curHdr;
+ if (numChildren > 0) {
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ rv = GetChildHdrAt(childIndex, getter_AddRefs(curHdr));
+ if (NS_SUCCEEDED(rv) && curHdr) {
+ nsMsgKey threadParent;
+
+ curHdr->GetThreadParent(&threadParent);
+ if (threadParent == oldParent) {
+ nsMsgKey curKey;
+
+ curHdr->SetThreadParent(newParent);
+ curHdr->GetMessageKey(&curKey);
+ if (announcer)
+ announcer->NotifyParentChangedAll(curKey, oldParent, newParent,
+ nullptr);
+ // if the old parent was the root of the thread, then only the first
+ // child gets promoted to root, and other children become children of
+ // the new root.
+ if (newParent == nsMsgKey_None) {
+ SetThreadRootKey(curKey);
+ newParent = curKey;
+ }
+ }
+ }
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::MarkChildRead(bool bRead) {
+ ChangeUnreadChildCount(bRead ? -1 : 1);
+ return NS_OK;
+}
+
+/**
+ * Helper class for enumerating through the messages in a thread.
+ */
+class nsMsgThreadEnumerator : public nsBaseMsgEnumerator {
+ public:
+ // nsIMsgEnumerator support.
+ NS_IMETHOD GetNext(nsIMsgDBHdr** aItem) override;
+ NS_IMETHOD HasMoreElements(bool* aResult) override;
+
+ // nsMsgThreadEnumerator methods:
+ typedef nsresult (*nsMsgThreadEnumeratorFilter)(nsIMsgDBHdr* hdr,
+ void* closure);
+
+ nsMsgThreadEnumerator(nsMsgThread* thread, nsMsgKey startKey,
+ nsMsgThreadEnumeratorFilter filter, void* closure);
+ int32_t MsgKeyFirstChildIndex(nsMsgKey inMsgKey);
+
+ protected:
+ ~nsMsgThreadEnumerator() override = default;
+ nsresult Prefetch();
+
+ nsIMdbTableRowCursor* mRowCursor;
+ nsCOMPtr<nsIMsgDBHdr> mResultHdr;
+ RefPtr<nsMsgThread> mThread;
+ nsMsgKey mThreadParentKey;
+ nsMsgKey mFirstMsgKey;
+ int32_t mChildIndex;
+ bool mDone;
+ bool mNeedToPrefetch;
+ nsMsgThreadEnumeratorFilter mFilter;
+ void* mClosure;
+ bool mFoundChildren;
+};
+
+nsMsgThreadEnumerator::nsMsgThreadEnumerator(nsMsgThread* thread,
+ nsMsgKey startKey,
+ nsMsgThreadEnumeratorFilter filter,
+ void* closure)
+ : mRowCursor(nullptr),
+ mDone(false),
+ mFilter(filter),
+ mClosure(closure),
+ mFoundChildren(false) {
+ mThreadParentKey = startKey;
+ mChildIndex = 0;
+ mThread = thread;
+ mNeedToPrefetch = true;
+ mFirstMsgKey = nsMsgKey_None;
+
+ nsresult rv = mThread->GetRootHdr(getter_AddRefs(mResultHdr));
+
+ if (NS_SUCCEEDED(rv) && mResultHdr) mResultHdr->GetMessageKey(&mFirstMsgKey);
+
+ uint32_t numChildren = 0;
+ mThread->GetNumChildren(&numChildren);
+
+ if (mThreadParentKey != nsMsgKey_None) {
+ nsMsgKey msgKey = nsMsgKey_None;
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ rv = mThread->GetChildHdrAt(childIndex, getter_AddRefs(mResultHdr));
+ if (NS_SUCCEEDED(rv) && mResultHdr) {
+ mResultHdr->GetMessageKey(&msgKey);
+
+ if (msgKey == startKey) {
+ mChildIndex = MsgKeyFirstChildIndex(msgKey);
+ mDone = (mChildIndex < 0);
+ break;
+ }
+
+ if (mDone) break;
+ } else
+ NS_ASSERTION(false, "couldn't get child from thread");
+ }
+ }
+
+#ifdef DEBUG_bienvenu1
+ nsCOMPtr<nsIMsgDBHdr> child;
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ rv = mThread->GetChildHdrAt(childIndex, getter_AddRefs(child));
+ if (NS_SUCCEEDED(rv) && child) {
+ nsMsgKey threadParent;
+ nsMsgKey msgKey;
+ // we're only doing one level of threading, so check if caller is
+ // asking for children of the first message in the thread or not.
+ // if not, we will tell him there are no children.
+ child->GetMessageKey(&msgKey);
+ child->GetThreadParent(&threadParent);
+
+ printf("index = %ld key = %ld parent = %lx\n", childIndex, msgKey,
+ threadParent);
+ }
+ }
+#endif
+}
+
+int32_t nsMsgThreadEnumerator::MsgKeyFirstChildIndex(nsMsgKey inMsgKey) {
+ // if (msgKey != mThreadParentKey)
+ // mDone = true;
+ // look through rest of thread looking for a child of this message.
+ // If the inMsgKey is the first message in the thread, then all children
+ // without parents are considered to be children of inMsgKey.
+ // Otherwise, only true children qualify.
+
+ int32_t firstChildIndex = -1;
+ uint32_t numChildren = 0;
+ mThread->GetNumChildren(&numChildren);
+
+ // if this is the first message in the thread, just check if there's more than
+ // one message in the thread.
+ // if (inMsgKey == mThread->m_threadRootKey)
+ // return (numChildren > 1) ? 1 : -1;
+
+ for (uint32_t curChildIndex = 0; curChildIndex < numChildren;
+ curChildIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> curHdr;
+ nsresult rv = mThread->GetChildHdrAt(curChildIndex, getter_AddRefs(curHdr));
+ if (NS_SUCCEEDED(rv) && curHdr) {
+ nsMsgKey parentKey;
+
+ curHdr->GetThreadParent(&parentKey);
+ if (parentKey == inMsgKey) {
+ firstChildIndex = curChildIndex;
+ break;
+ }
+ }
+ }
+#ifdef DEBUG_bienvenu1
+ printf("first child index of %ld = %ld\n", inMsgKey, firstChildIndex);
+#endif
+ return firstChildIndex;
+}
+
+NS_IMETHODIMP nsMsgThreadEnumerator::GetNext(nsIMsgDBHdr** aItem) {
+ NS_ENSURE_ARG_POINTER(aItem);
+ nsresult rv;
+
+ if (mNeedToPrefetch) {
+ rv = Prefetch();
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (mResultHdr) {
+ NS_ADDREF(*aItem = mResultHdr);
+ mNeedToPrefetch = true;
+ }
+ return NS_OK;
+}
+
+nsresult nsMsgThreadEnumerator::Prefetch() {
+ nsresult rv = NS_OK; // XXX or should this default to an error?
+ mResultHdr = nullptr;
+ if (mThreadParentKey == nsMsgKey_None) {
+ rv = mThread->GetRootHdr(getter_AddRefs(mResultHdr));
+ NS_ASSERTION(NS_SUCCEEDED(rv) && mResultHdr,
+ "better be able to get root hdr");
+ mChildIndex = 0; // since root can be anywhere, set mChildIndex to 0.
+ } else if (!mDone) {
+ uint32_t numChildren = 0;
+ mThread->GetNumChildren(&numChildren);
+
+ while (mChildIndex < (int32_t)numChildren) {
+ rv = mThread->GetChildHdrAt(mChildIndex++, getter_AddRefs(mResultHdr));
+ if (NS_SUCCEEDED(rv) && mResultHdr) {
+ nsMsgKey parentKey;
+ nsMsgKey curKey;
+
+ if (mFilter && NS_FAILED(mFilter(mResultHdr, mClosure))) {
+ mResultHdr = nullptr;
+ continue;
+ }
+
+ mResultHdr->GetThreadParent(&parentKey);
+ mResultHdr->GetMessageKey(&curKey);
+ // if the parent is the same as the msg we're enumerating over,
+ // or the parentKey isn't set, and we're iterating over the top
+ // level message in the thread, then leave mResultHdr set to cur msg.
+ if (parentKey == mThreadParentKey ||
+ (parentKey == nsMsgKey_None && mThreadParentKey == mFirstMsgKey &&
+ curKey != mThreadParentKey))
+ break;
+ mResultHdr = nullptr;
+ } else
+ NS_ASSERTION(false, "better be able to get child");
+ }
+ if (!mResultHdr && mThreadParentKey == mFirstMsgKey && !mFoundChildren &&
+ numChildren > 1)
+ mThread->ReparentMsgsWithInvalidParent(numChildren, mThreadParentKey);
+ }
+ if (!mResultHdr) {
+ mDone = true;
+ return NS_ERROR_FAILURE;
+ }
+ if (NS_FAILED(rv)) {
+ mDone = true;
+ return rv;
+ } else
+ mNeedToPrefetch = false;
+ mFoundChildren = true;
+
+#ifdef DEBUG_bienvenu1
+ nsMsgKey debugMsgKey;
+ mResultHdr->GetMessageKey(&debugMsgKey);
+ printf("next for %ld = %ld\n", mThreadParentKey, debugMsgKey);
+#endif
+
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThreadEnumerator::HasMoreElements(bool* aResult) {
+ NS_ENSURE_ARG_POINTER(aResult);
+ if (mNeedToPrefetch) Prefetch();
+ *aResult = !mDone;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgThread::EnumerateMessages(nsMsgKey parentKey,
+ nsIMsgEnumerator** result) {
+ NS_ADDREF(*result =
+ new nsMsgThreadEnumerator(this, parentKey, nullptr, nullptr));
+ return NS_OK;
+}
+
+nsresult nsMsgThread::ReparentMsgsWithInvalidParent(uint32_t numChildren,
+ nsMsgKey threadParentKey) {
+ nsresult rv = NS_OK;
+ // run through looking for messages that don't have a correct parent,
+ // i.e., a parent that's in the thread!
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> curChild;
+ rv = GetChildHdrAt(childIndex, getter_AddRefs(curChild));
+ if (NS_SUCCEEDED(rv) && curChild) {
+ nsMsgKey parentKey;
+ nsCOMPtr<nsIMsgDBHdr> parent;
+
+ curChild->GetThreadParent(&parentKey);
+
+ if (parentKey != nsMsgKey_None) {
+ GetChild(parentKey, getter_AddRefs(parent));
+ if (!parent)
+ curChild->SetThreadParent(threadParentKey);
+ else {
+ nsMsgKey childKey;
+ curChild->GetMessageKey(&childKey);
+ // can't be your own parent; set parent to thread parent,
+ // or make ourselves the root if we are the root.
+ if (childKey == parentKey)
+ curChild->SetThreadParent(
+ m_threadRootKey == childKey ? nsMsgKey_None : m_threadRootKey);
+ }
+ }
+ }
+ }
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::GetRootHdr(nsIMsgDBHdr** result) {
+ NS_ENSURE_ARG_POINTER(result);
+
+ *result = nullptr;
+ int32_t resultIndex = -1;
+ nsresult rv = NS_OK;
+
+ if (m_threadRootKey != nsMsgKey_None) {
+ rv = GetChildHdrForKey(m_threadRootKey, result, &resultIndex);
+ if (NS_SUCCEEDED(rv) && *result) {
+ // check that we're really the root key.
+ nsMsgKey parentKey;
+ (*result)->GetThreadParent(&parentKey);
+ if (parentKey == nsMsgKey_None) return rv;
+ // XXX Hack: since GetChildHdrForKey() addref'ed result, we need to
+ // release any unwanted result before continuing.
+ NS_RELEASE(*result);
+ }
+#ifdef DEBUG_David_Bienvenu
+ printf("need to reset thread root key\n");
+#endif
+ nsMsgKey threadParentKey = nsMsgKey_None;
+ uint32_t numChildren = 0;
+ GetNumChildren(&numChildren);
+
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> curChild;
+ rv = GetChildHdrAt(childIndex, getter_AddRefs(curChild));
+ if (NS_SUCCEEDED(rv) && curChild) {
+ nsMsgKey parentKey;
+
+ curChild->GetThreadParent(&parentKey);
+ if (parentKey == nsMsgKey_None) {
+ curChild->GetMessageKey(&threadParentKey);
+ if (*result) {
+ NS_WARNING("two top level msgs, not good");
+ continue;
+ }
+ SetThreadRootKey(threadParentKey);
+ curChild.forget(result);
+ ReparentMsgsWithInvalidParent(numChildren, threadParentKey);
+ }
+ }
+ }
+ }
+ if (!*result) {
+ // if we can't get the thread root key, we'll just get the first hdr.
+ // there's a bug where sometimes we weren't resetting the thread root key
+ // when removing the thread root key.
+ rv = GetChildHdrAt(0, result);
+ }
+ if (!*result) return rv;
+ // Check that the thread id of the message is this thread.
+ nsMsgKey threadId = nsMsgKey_None;
+ (void)(*result)->GetThreadId(&threadId);
+ if (threadId != m_threadKey) (*result)->SetThreadId(m_threadKey);
+ return rv;
+}
+
+nsresult nsMsgThread::ChangeChildCount(int32_t delta) {
+ nsresult rv;
+
+ uint32_t childCount = 0;
+ m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadChildrenColumnToken, childCount);
+
+ NS_WARNING_ASSERTION(childCount != 0 || delta > 0,
+ "child count gone negative");
+ childCount += delta;
+
+ NS_WARNING_ASSERTION((int32_t)childCount >= 0,
+ "child count gone to 0 or below");
+ if ((int32_t)childCount < 0) // force child count to >= 0
+ childCount = 0;
+
+ rv = m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadChildrenColumnToken, childCount);
+ m_numChildren = childCount;
+ return rv;
+}
+
+nsresult nsMsgThread::ChangeUnreadChildCount(int32_t delta) {
+ nsresult rv;
+
+ uint32_t childCount = 0;
+ m_mdbDB->RowCellColumnToUInt32(
+ m_metaRow, m_mdbDB->m_threadUnreadChildrenColumnToken, childCount);
+ childCount += delta;
+ if ((int32_t)childCount < 0) {
+#ifdef DEBUG_bienvenu1
+ NS_ASSERTION(false, "negative unread child count");
+#endif
+ childCount = 0;
+ }
+ rv = m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadUnreadChildrenColumnToken, childCount);
+ m_numUnreadChildren = childCount;
+ return rv;
+}
+
+nsresult nsMsgThread::SetThreadRootKey(nsMsgKey threadRootKey) {
+ m_threadRootKey = threadRootKey;
+ return m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadRootKeyColumnToken, threadRootKey);
+}
+
+nsresult nsMsgThread::GetChildHdrForKey(nsMsgKey desiredKey,
+ nsIMsgDBHdr** result,
+ int32_t* resultIndex) {
+ NS_ENSURE_ARG_POINTER(result);
+
+ nsresult rv = NS_OK; // XXX or should this default to an error?
+ uint32_t numChildren = 0;
+ GetNumChildren(&numChildren);
+ uint32_t childIndex;
+ for (childIndex = 0; childIndex < numChildren; childIndex++) {
+ rv = GetChildHdrAt(childIndex, result);
+ if (NS_SUCCEEDED(rv) && *result) {
+ nsMsgKey msgKey;
+ // we're only doing one level of threading, so check if caller is
+ // asking for children of the first message in the thread or not.
+ // if not, we will tell him there are no children.
+ (*result)->GetMessageKey(&msgKey);
+
+ if (msgKey == desiredKey) {
+ nsMsgKey threadKey;
+ (*result)->GetThreadId(&threadKey);
+ if (threadKey != m_threadKey) // this msg isn't in this thread
+ {
+ NS_WARNING("msg in wrong thread - this shouldn't happen");
+ uint32_t msgSize;
+ (*result)->GetMessageSize(&msgSize);
+ if (msgSize == 0) // this is a phantom message - let's get rid of it.
+ {
+ RemoveChild(msgKey);
+ rv = NS_ERROR_UNEXPECTED;
+ } else {
+ // otherwise, let's try to figure out which thread
+ // this message really belongs to.
+ nsCOMPtr<nsIMsgThread> threadKeyThread =
+ dont_AddRef(m_mdbDB->GetThreadForThreadId(threadKey));
+ if (threadKeyThread) {
+ nsCOMPtr<nsIMsgDBHdr> otherThreadHdr;
+ threadKeyThread->GetChild(msgKey, getter_AddRefs(otherThreadHdr));
+ if (otherThreadHdr) {
+ // Message is in one thread but has a different thread id.
+ // Remove it from the thread and then rethread it.
+ RemoveChild(msgKey);
+ threadKeyThread->RemoveChildHdr(otherThreadHdr, nullptr);
+ bool newThread;
+ nsMsgHdr* msgHdr = static_cast<nsMsgHdr*>(otherThreadHdr.get());
+ m_mdbDB->ThreadNewHdr(msgHdr, newThread);
+ } else {
+ (*result)->SetThreadId(m_threadKey);
+ }
+ }
+ }
+ }
+ break;
+ }
+ // XXX Hack: since GetChildHdrAt() addref'ed result, we need to
+ // release any unwanted result before continuing in the loop.
+ NS_RELEASE(*result);
+ }
+ }
+ if (resultIndex) *resultIndex = (int32_t)childIndex;
+
+ return rv;
+}
+
+NS_IMETHODIMP nsMsgThread::GetFirstUnreadChild(nsIMsgDBHdr** result) {
+ NS_ENSURE_ARG_POINTER(result);
+
+ uint8_t minLevel = 0xff;
+
+ uint32_t numChildren = 0;
+ GetNumChildren(&numChildren);
+
+ nsCOMPtr<nsIMsgDBHdr> retHdr;
+
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> child;
+ nsresult rv = GetChildHdrAt(childIndex, getter_AddRefs(child));
+ if (NS_SUCCEEDED(rv) && child) {
+ nsMsgKey msgKey;
+ child->GetMessageKey(&msgKey);
+
+ bool isRead;
+ rv = m_mdbDB->IsRead(msgKey, &isRead);
+ if (NS_SUCCEEDED(rv) && !isRead) {
+ // this is the root, so it's the best we're going to do.
+ if (msgKey == m_threadRootKey) {
+ retHdr = child;
+ break;
+ }
+ uint8_t level = 0;
+ nsMsgKey parentId;
+ child->GetThreadParent(&parentId);
+ nsCOMPtr<nsIMsgDBHdr> parent;
+ // count number of ancestors - that's our level
+ while (parentId != nsMsgKey_None) {
+ rv = m_mdbDB->GetMsgHdrForKey(parentId, getter_AddRefs(parent));
+ if (parent) {
+ parent->GetThreadParent(&parentId);
+ level++;
+ }
+ }
+ if (level < minLevel) {
+ minLevel = level;
+ retHdr = child;
+ }
+ }
+ }
+ }
+
+ retHdr.forget(result);
+ return (*result) ? NS_OK : NS_ERROR_NULL_POINTER;
+}
+
+NS_IMETHODIMP nsMsgThread::GetNewestMsgDate(uint32_t* aResult) {
+ // if this hasn't been set, figure it out by enumerating the msgs in the
+ // thread.
+ if (!m_newestMsgDate) {
+ nsresult rv;
+ uint32_t numChildren;
+ GetNumChildren(&numChildren);
+ for (uint32_t childIndex = 0; childIndex < numChildren; childIndex++) {
+ nsCOMPtr<nsIMsgDBHdr> child;
+ rv = GetChildHdrAt(childIndex, getter_AddRefs(child));
+ if (NS_SUCCEEDED(rv)) {
+ uint32_t msgDate;
+ child->GetDateInSeconds(&msgDate);
+ if (msgDate > m_newestMsgDate) m_newestMsgDate = msgDate;
+ }
+ }
+ }
+ *aResult = m_newestMsgDate;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsMsgThread::SetNewestMsgDate(uint32_t aNewestMsgDate) {
+ m_newestMsgDate = aNewestMsgDate;
+ return m_mdbDB->UInt32ToRowCellColumn(
+ m_metaRow, m_mdbDB->m_threadNewestMsgDateColumnToken, aNewestMsgDate);
+}
diff --git a/comm/mailnews/db/msgdb/src/nsNewsDatabase.cpp b/comm/mailnews/db/msgdb/src/nsNewsDatabase.cpp
new file mode 100644
index 0000000000..5a5ba19d5e
--- /dev/null
+++ b/comm/mailnews/db/msgdb/src/nsNewsDatabase.cpp
@@ -0,0 +1,307 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "msgCore.h"
+#include "nsIMsgDBView.h"
+#include "nsIMsgThread.h"
+#include "nsNewsDatabase.h"
+#include "nsMsgKeySet.h"
+#include "nsMsgMessageFlags.h"
+#include "nsCOMPtr.h"
+#include "prlog.h"
+
+#if defined(DEBUG_sspitzer_) || defined(DEBUG_seth_)
+# define DEBUG_NEWS_DATABASE 1
+#endif
+
+nsNewsDatabase::nsNewsDatabase() { m_readSet = nullptr; }
+
+nsNewsDatabase::~nsNewsDatabase() {}
+
+NS_IMPL_ADDREF_INHERITED(nsNewsDatabase, nsMsgDatabase)
+NS_IMPL_RELEASE_INHERITED(nsNewsDatabase, nsMsgDatabase)
+
+NS_IMETHODIMP nsNewsDatabase::QueryInterface(REFNSIID aIID,
+ void** aInstancePtr) {
+ if (!aInstancePtr) return NS_ERROR_NULL_POINTER;
+ *aInstancePtr = nullptr;
+
+ if (aIID.Equals(NS_GET_IID(nsINewsDatabase))) {
+ *aInstancePtr = static_cast<nsINewsDatabase*>(this);
+ }
+
+ if (*aInstancePtr) {
+ AddRef();
+ return NS_OK;
+ }
+
+ return nsMsgDatabase::QueryInterface(aIID, aInstancePtr);
+}
+
+nsresult nsNewsDatabase::Close(bool forceCommit) {
+ return nsMsgDatabase::Close(forceCommit);
+}
+
+nsresult nsNewsDatabase::ForceClosed() { return nsMsgDatabase::ForceClosed(); }
+
+nsresult nsNewsDatabase::Commit(nsMsgDBCommit commitType) {
+ if (m_dbFolderInfo && m_readSet) {
+ // let's write out our idea of the read set so we can compare it with that
+ // of the .rc file next time we start up.
+ nsCString readSet;
+ m_readSet->Output(getter_Copies(readSet));
+ m_dbFolderInfo->SetCharProperty("readSet", readSet);
+ }
+ return nsMsgDatabase::Commit(commitType);
+}
+
+uint32_t nsNewsDatabase::GetCurVersion() { return kMsgDBVersion; }
+
+NS_IMETHODIMP nsNewsDatabase::IsRead(nsMsgKey key, bool* pRead) {
+ NS_ASSERTION(pRead, "null out param in IsRead");
+ if (!pRead) return NS_ERROR_NULL_POINTER;
+
+ if (!m_readSet) return NS_ERROR_FAILURE;
+
+ *pRead = m_readSet->IsMember(key);
+ return NS_OK;
+}
+
+nsresult nsNewsDatabase::IsHeaderRead(nsIMsgDBHdr* msgHdr, bool* pRead) {
+ nsresult rv;
+ nsMsgKey messageKey;
+
+ if (!msgHdr || !pRead) return NS_ERROR_NULL_POINTER;
+
+ rv = msgHdr->GetMessageKey(&messageKey);
+ if (NS_FAILED(rv)) return rv;
+
+ rv = IsRead(messageKey, pRead);
+ return rv;
+}
+
+// return highest article number we've seen.
+NS_IMETHODIMP nsNewsDatabase::GetHighWaterArticleNum(nsMsgKey* key) {
+ NS_ASSERTION(m_dbFolderInfo, "null db folder info");
+ if (!m_dbFolderInfo) return NS_ERROR_FAILURE;
+ return m_dbFolderInfo->GetHighWater(key);
+}
+
+// return the key of the first article number we know about.
+// Since the iterator iterates in id order, we can just grab the
+// messagekey of the first header it returns.
+// ### dmb
+// This will not deal with the situation where we get holes in
+// the headers we know about. Need to figure out how and when
+// to solve that. This could happen if a transfer is interrupted.
+// Do we need to keep track of known arts permanently?
+NS_IMETHODIMP nsNewsDatabase::GetLowWaterArticleNum(nsMsgKey* key) {
+ nsresult rv;
+
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ rv = EnumerateMessages(getter_AddRefs(hdrs));
+ if (NS_FAILED(rv)) return rv;
+
+ nsCOMPtr<nsIMsgDBHdr> first;
+ rv = hdrs->GetNext(getter_AddRefs(first));
+ NS_ASSERTION(NS_SUCCEEDED(rv), "nsMsgDBEnumerator broken");
+ if (NS_FAILED(rv)) return rv;
+
+ return first->GetMessageKey(key);
+}
+
+nsresult nsNewsDatabase::ExpireUpTo(nsMsgKey expireKey) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+nsresult nsNewsDatabase::ExpireRange(nsMsgKey startRange, nsMsgKey endRange) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP nsNewsDatabase::GetReadSet(nsMsgKeySet** pSet) {
+ if (!pSet) return NS_ERROR_NULL_POINTER;
+ *pSet = m_readSet;
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsNewsDatabase::SetReadSet(nsMsgKeySet* pSet) {
+ m_readSet = pSet;
+
+ if (m_readSet) {
+ // compare this read set with the one in the db folder info.
+ // If not equivalent, sync with this one.
+ nsCString dbReadSet;
+ if (m_dbFolderInfo) m_dbFolderInfo->GetCharProperty("readSet", dbReadSet);
+ nsCString newsrcReadSet;
+ m_readSet->Output(getter_Copies(newsrcReadSet));
+ if (!dbReadSet.Equals(newsrcReadSet)) SyncWithReadSet();
+ }
+ return NS_OK;
+}
+
+bool nsNewsDatabase::SetHdrReadFlag(nsIMsgDBHdr* msgHdr, bool bRead) {
+ nsresult rv;
+ bool isRead;
+ rv = IsHeaderRead(msgHdr, &isRead);
+
+ if (isRead == bRead) {
+ // give the base class a chance to update m_flags.
+ nsMsgDatabase::SetHdrReadFlag(msgHdr, bRead);
+ return false;
+ } else {
+ nsMsgKey messageKey;
+
+ // give the base class a chance to update m_flags.
+ nsMsgDatabase::SetHdrReadFlag(msgHdr, bRead);
+ rv = msgHdr->GetMessageKey(&messageKey);
+ if (NS_FAILED(rv)) return false;
+
+ NS_ASSERTION(m_readSet, "m_readSet is null");
+ if (!m_readSet) return false;
+
+ if (!bRead) {
+#ifdef DEBUG_NEWS_DATABASE
+ printf("remove %d from the set\n", messageKey);
+#endif
+
+ m_readSet->Remove(messageKey);
+
+ rv = NotifyReadChanged(nullptr);
+ if (NS_FAILED(rv)) return false;
+ } else {
+#ifdef DEBUG_NEWS_DATABASE
+ printf("add %d to the set\n", messageKey);
+#endif
+
+ if (m_readSet->Add(messageKey) < 0) return false;
+
+ rv = NotifyReadChanged(nullptr);
+ if (NS_FAILED(rv)) return false;
+ }
+ }
+ return true;
+}
+
+NS_IMETHODIMP nsNewsDatabase::MarkAllRead(nsTArray<nsMsgKey>& aThoseMarked) {
+ nsMsgKey lowWater = nsMsgKey_None, highWater;
+ nsCString knownArts;
+ if (m_dbFolderInfo) {
+ m_dbFolderInfo->GetKnownArtsSet(getter_Copies(knownArts));
+ RefPtr<nsMsgKeySet> knownKeys = nsMsgKeySet::Create(knownArts.get());
+ if (knownKeys) lowWater = knownKeys->GetFirstMember();
+ }
+ if (lowWater == nsMsgKey_None) GetLowWaterArticleNum(&lowWater);
+ GetHighWaterArticleNum(&highWater);
+ if (lowWater > 2) m_readSet->AddRange(1, lowWater - 1);
+ nsresult err = nsMsgDatabase::MarkAllRead(aThoseMarked);
+ if (NS_SUCCEEDED(err) && 1 <= highWater)
+ m_readSet->AddRange(1, highWater); // mark everything read in newsrc.
+
+ return err;
+}
+
+nsresult nsNewsDatabase::SyncWithReadSet() {
+ // The code below attempts to update the underlying nsMsgDatabase's idea
+ // of read/unread flags to match the read set in the .newsrc file. It should
+ // only be called when they don't match, e.g., we crashed after committing the
+ // db but before writing out the .newsrc
+ nsCOMPtr<nsIMsgEnumerator> hdrs;
+ nsresult rv = EnumerateMessages(getter_AddRefs(hdrs));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool hasMore = false, readInNewsrc, isReadInDB, changed = false;
+ int32_t numMessages = 0, numUnreadMessages = 0;
+
+ // Scan all messages in DB
+ while (NS_SUCCEEDED(rv = hdrs->HasMoreElements(&hasMore)) && hasMore) {
+ nsCOMPtr<nsIMsgDBHdr> header;
+ rv = hdrs->GetNext(getter_AddRefs(header));
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = nsMsgDatabase::IsHeaderRead(header, &isReadInDB);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsMsgKey messageKey;
+ header->GetMessageKey(&messageKey);
+ IsRead(messageKey, &readInNewsrc);
+
+ numMessages++;
+ if (!readInNewsrc) numUnreadMessages++;
+
+ // If DB and readSet disagree on Read/Unread, fix DB
+ if (readInNewsrc != isReadInDB) {
+ MarkHdrRead(header, readInNewsrc, nullptr);
+ changed = true;
+ }
+ }
+
+ // Update FolderInfo Counters
+ if (m_dbFolderInfo) {
+ do {
+ int32_t oldMessages, oldUnreadMessages;
+ rv = m_dbFolderInfo->GetNumMessages(&oldMessages);
+ if (NS_FAILED(rv)) break;
+ if (oldMessages != numMessages) {
+ changed = true;
+ m_dbFolderInfo->ChangeNumMessages(numMessages - oldMessages);
+ }
+ rv = m_dbFolderInfo->GetNumUnreadMessages(&oldUnreadMessages);
+ if (NS_FAILED(rv)) break;
+ if (oldUnreadMessages != numUnreadMessages) {
+ changed = true;
+ m_dbFolderInfo->ChangeNumUnreadMessages(numUnreadMessages -
+ oldUnreadMessages);
+ }
+ } while (false);
+ }
+
+ if (changed) Commit(nsMsgDBCommitType::kLargeCommit);
+
+ return rv;
+}
+
+nsresult nsNewsDatabase::AdjustExpungedBytesOnDelete(nsIMsgDBHdr* msgHdr) {
+ uint32_t msgFlags;
+ msgHdr->GetFlags(&msgFlags);
+ if (msgFlags & nsMsgMessageFlags::Offline && m_dbFolderInfo) {
+ uint32_t size = 0;
+ (void)msgHdr->GetOfflineMessageSize(&size);
+ return m_dbFolderInfo->ChangeExpungedBytes(size);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsNewsDatabase::GetDefaultViewFlags(
+ nsMsgViewFlagsTypeValue* aDefaultViewFlags) {
+ NS_ENSURE_ARG_POINTER(aDefaultViewFlags);
+ GetIntPref("mailnews.default_news_view_flags", aDefaultViewFlags);
+ if (*aDefaultViewFlags < nsMsgViewFlagsType::kNone ||
+ *aDefaultViewFlags >
+ (nsMsgViewFlagsType::kThreadedDisplay |
+ nsMsgViewFlagsType::kShowIgnored | nsMsgViewFlagsType::kUnreadOnly |
+ nsMsgViewFlagsType::kExpandAll | nsMsgViewFlagsType::kGroupBySort))
+ *aDefaultViewFlags = nsMsgViewFlagsType::kThreadedDisplay;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsNewsDatabase::GetDefaultSortType(nsMsgViewSortTypeValue* aDefaultSortType) {
+ NS_ENSURE_ARG_POINTER(aDefaultSortType);
+ GetIntPref("mailnews.default_news_sort_type", aDefaultSortType);
+ if (*aDefaultSortType < nsMsgViewSortType::byDate ||
+ *aDefaultSortType > nsMsgViewSortType::byAccount)
+ *aDefaultSortType = nsMsgViewSortType::byThread;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsNewsDatabase::GetDefaultSortOrder(
+ nsMsgViewSortOrderValue* aDefaultSortOrder) {
+ NS_ENSURE_ARG_POINTER(aDefaultSortOrder);
+ GetIntPref("mailnews.default_news_sort_order", aDefaultSortOrder);
+ if (*aDefaultSortOrder != nsMsgViewSortOrder::descending)
+ *aDefaultSortOrder = nsMsgViewSortOrder::ascending;
+ return NS_OK;
+}
diff --git a/comm/mailnews/db/msgdb/test/moz.build b/comm/mailnews/db/msgdb/test/moz.build
new file mode 100644
index 0000000000..6b37fdbe09
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/moz.build
@@ -0,0 +1,6 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPCSHELL_TESTS_MANIFESTS += ["unit/xpcshell.ini"]
diff --git a/comm/mailnews/db/msgdb/test/unit/head_maildb.js b/comm/mailnews/db/msgdb/test/unit/head_maildb.js
new file mode 100644
index 0000000000..5b52dbb304
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/head_maildb.js
@@ -0,0 +1,21 @@
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+var { XPCOMUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/XPCOMUtils.sys.mjs"
+);
+var { mailTestUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/MailTestUtils.jsm"
+);
+var { localAccountUtils } = ChromeUtils.import(
+ "resource://testing-common/mailnews/LocalAccountUtils.jsm"
+);
+
+var CC = Components.Constructor;
+
+// Ensure the profile directory is set up
+do_get_profile();
+
+registerCleanupFunction(function () {
+ load("../../../../../mailnews/resources/mailShutdown.js");
+});
diff --git a/comm/mailnews/db/msgdb/test/unit/test_enumerator_cleanup.js b/comm/mailnews/db/msgdb/test/unit/test_enumerator_cleanup.js
new file mode 100644
index 0000000000..dfb5aa5285
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_enumerator_cleanup.js
@@ -0,0 +1,56 @@
+/*
+ * Test nsMsgDatabase's cleanup of nsMsgDBEnumerators
+ */
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+var anyOldMessage = do_get_file("../../../../data/bugmail1");
+
+/**
+ * Test closing a db with an outstanding enumerator.
+ */
+function test_enumerator_cleanup() {
+ let db = localAccountUtils.inboxFolder.msgDatabase;
+ let enumerator = db.enumerateMessages();
+ Cc["@mozilla.org/msgDatabase/msgDBService;1"]
+ .getService(Ci.nsIMsgDBService)
+ .forceFolderDBClosed(localAccountUtils.inboxFolder);
+ localAccountUtils.inboxFolder.msgDatabase = null;
+ db = null;
+ gc();
+ [...enumerator];
+ do_test_finished();
+}
+
+/*
+ * This infrastructure down here exists just to get
+ * test_references_header_parsing its message header.
+ */
+
+function run_test() {
+ localAccountUtils.loadLocalMailAccount();
+ do_test_pending();
+ MailServices.copy.copyFileMessage(
+ anyOldMessage,
+ localAccountUtils.inboxFolder,
+ null,
+ false,
+ 0,
+ "",
+ messageHeaderGetterListener,
+ null
+ );
+ return true;
+}
+
+var messageHeaderGetterListener = {
+ OnStartCopy() {},
+ OnProgress(aProgress, aProgressMax) {},
+ GetMessageId(aMessageId) {},
+ SetMessageKey(aKey) {},
+ OnStopCopy(aStatus) {
+ do_timeout(0, test_enumerator_cleanup);
+ },
+};
diff --git a/comm/mailnews/db/msgdb/test/unit/test_filter_enumerator.js b/comm/mailnews/db/msgdb/test/unit/test_filter_enumerator.js
new file mode 100644
index 0000000000..914b5afd29
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_filter_enumerator.js
@@ -0,0 +1,100 @@
+/* import-globals-from ../../../../test/resources/MessageGenerator.jsm */
+load("../../../../resources/MessageGenerator.jsm");
+
+var gMessages = [];
+
+const kSetCount = 13;
+const kNumExpectedMatches = 10;
+
+function setupGlobals() {
+ localAccountUtils.loadLocalMailAccount();
+ // Create a message generator
+ let messageGenerator = new MessageGenerator();
+ let localInbox = localAccountUtils.inboxFolder.QueryInterface(
+ Ci.nsIMsgLocalMailFolder
+ );
+
+ for (let i = 0; i < kSetCount; i++) {
+ let message = messageGenerator.makeMessage();
+ gMessages.push(message);
+ localInbox.addMessage(message.toMboxString());
+ }
+}
+
+function run_test() {
+ setupGlobals();
+ do_test_pending();
+ let inboxDB = localAccountUtils.inboxFolder.msgDatabase;
+
+ // give messages 1,3,5 gloda-ids. These won't end up in our search hits.
+ let msgHdr1 = inboxDB.getMsgHdrForMessageID(gMessages[0].messageId);
+ msgHdr1.setUint32Property("gloda-id", 11111);
+ let msgHdr3 = inboxDB.getMsgHdrForMessageID(gMessages[2].messageId);
+ msgHdr3.setUint32Property("gloda-id", 33333);
+ let msgHdr5 = inboxDB.getMsgHdrForMessageID(gMessages[4].messageId);
+ msgHdr5.setUint32Property("gloda-id", 5555);
+ // set up a search term array that will give us the array of messages
+ // that gloda should index, as defined by this function:
+ let searchSession = Cc[
+ "@mozilla.org/messenger/searchSession;1"
+ ].createInstance(Ci.nsIMsgSearchSession);
+ let searchTerms = [];
+
+ searchSession.addScopeTerm(
+ Ci.nsMsgSearchScope.offlineMail,
+ localAccountUtils.inboxFolder
+ );
+ let searchTerm = searchSession.createTerm();
+
+ // Create the following search term:
+ // (folderFlag & Mail && folderFlag != ImapBox) &&
+ // msg property.gloda-id isEmpty
+
+ searchTerm.beginsGrouping = true;
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = Ci.nsMsgSearchAttrib.FolderFlag;
+ searchTerm.op = Ci.nsMsgSearchOp.Is;
+ let value = searchTerm.value;
+ value.status = Ci.nsMsgFolderFlags.Mail;
+ value.attrib = Ci.nsMsgSearchAttrib.FolderFlag;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = Ci.nsMsgSearchAttrib.FolderFlag;
+ searchTerm.op = Ci.nsMsgSearchOp.Isnt;
+ value = searchTerm.value;
+ value.status = Ci.nsMsgFolderFlags.ImapBox;
+ value.attrib = Ci.nsMsgSearchAttrib.FolderFlag;
+ searchTerm.value = value;
+ searchTerm.endsGrouping = true;
+ searchTerms.push(searchTerm);
+
+ searchTerm = searchSession.createTerm();
+ searchTerm.booleanAnd = true;
+ searchTerm.attrib = Ci.nsMsgSearchAttrib.HdrProperty;
+ searchTerm.hdrProperty = "gloda-id";
+ searchTerm.op = Ci.nsMsgSearchOp.IsEmpty;
+ value = searchTerm.value;
+ value.str = "gloda-id";
+ value.attrib = Ci.nsMsgSearchAttrib.HdrProperty;
+ searchTerm.value = value;
+ searchTerms.push(searchTerm);
+
+ let msgEnumerator = inboxDB.getFilterEnumerator(searchTerms);
+ let matchingHdrs = [...msgEnumerator];
+ Assert.equal(kNumExpectedMatches, matchingHdrs.length);
+ Assert.equal(matchingHdrs[0].messageId, gMessages[1].messageId);
+ Assert.equal(matchingHdrs[1].messageId, gMessages[3].messageId);
+
+ // try it backwards, with roller skates:
+ msgEnumerator = inboxDB.getFilterEnumerator(searchTerms, true);
+ matchingHdrs = [...msgEnumerator];
+ Assert.equal(kNumExpectedMatches, matchingHdrs.length);
+ Assert.equal(matchingHdrs[0].messageId, gMessages[12].messageId);
+ Assert.equal(matchingHdrs[1].messageId, gMessages[11].messageId);
+ Assert.equal(matchingHdrs[9].messageId, gMessages[1].messageId);
+
+ do_test_finished();
+}
diff --git a/comm/mailnews/db/msgdb/test/unit/test_mailTelemetry.js b/comm/mailnews/db/msgdb/test/unit/test_mailTelemetry.js
new file mode 100644
index 0000000000..c0bc034bad
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_mailTelemetry.js
@@ -0,0 +1,38 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/**
+ * Test telemetry related to mails read.
+ */
+
+let { TelemetryTestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/TelemetryTestUtils.sys.mjs"
+);
+
+/**
+ * Check that we're counting mails read.
+ */
+add_task(async function test_mails_read() {
+ Services.telemetry.clearScalars();
+
+ localAccountUtils.loadLocalMailAccount();
+
+ const NUM_MAILS = 5;
+ let headers =
+ "from: alice@t1.example.com\r\n" +
+ "to: bob@t2.example.net\r\n" +
+ "return-path: alice@t1.example.com\r\n" +
+ "Disposition-Notification-To: alice@t1.example.com\r\n";
+ for (let i = 0; i < NUM_MAILS; i++) {
+ localAccountUtils.inboxFolder.addMessage(
+ "From \r\n" + headers + "\r\nhello\r\n"
+ );
+ }
+ localAccountUtils.inboxFolder.markAllMessagesRead(null);
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.equal(
+ scalars["tb.mails.read"],
+ NUM_MAILS,
+ "Count of mails read must be correct."
+ );
+});
diff --git a/comm/mailnews/db/msgdb/test/unit/test_maildb.js b/comm/mailnews/db/msgdb/test/unit/test_maildb.js
new file mode 100644
index 0000000000..9b6bca9303
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_maildb.js
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * Test suite for msg database functions.
+ */
+
+/* import-globals-from ../../../../test/resources/MessageGenerator.jsm */
+load("../../../../resources/MessageGenerator.jsm");
+
+var dbService;
+var gTestFolder;
+var gCurTestNum = 0;
+var kNumTestMessages = 10;
+
+var gTestArray = [
+ function test_db_open() {
+ dbService = Cc["@mozilla.org/msgDatabase/msgDBService;1"].getService(
+ Ci.nsIMsgDBService
+ );
+ // Get the root folder
+ let root = localAccountUtils.incomingServer.rootFolder;
+ root.createSubfolder("dbTest", null);
+ gTestFolder = root.getChildNamed("dbTest");
+ let db = dbService.openFolderDB(gTestFolder, true);
+ Assert.notEqual(db, null);
+ db.dBFolderInfo.highWater = 10;
+ db.close(true);
+ db = dbService.openFolderDB(gTestFolder, true);
+ Assert.notEqual(db, null);
+ Assert.equal(db.dBFolderInfo.highWater, 10);
+ db.dBFolderInfo.onKeyAdded(15);
+ Assert.equal(db.dBFolderInfo.highWater, 15);
+ db.close(true);
+ db.forceClosed();
+ db = null;
+ doTest(++gCurTestNum);
+ },
+];
+
+function doTest(test) {
+ if (test <= gTestArray.length) {
+ dump("Doing test " + test + "\n");
+ gCurTestNum = test;
+
+ var testFn = gTestArray[test - 1];
+ // Set a limit of 10 seconds; if the notifications haven't arrived by then there's a problem.
+ do_timeout(10000, function () {
+ if (gCurTestNum == test) {
+ do_throw(
+ "Notifications not received in 10000 ms for operation " + testFn.name
+ );
+ }
+ });
+ try {
+ testFn();
+ } catch (ex) {
+ do_throw(ex);
+ }
+ } else {
+ do_test_finished(); // for the one in run_test()
+ }
+}
+
+function run_test() {
+ localAccountUtils.loadLocalMailAccount();
+ do_test_pending();
+ doTest(1);
+}
diff --git a/comm/mailnews/db/msgdb/test/unit/test_propertyEnumerator.js b/comm/mailnews/db/msgdb/test/unit/test_propertyEnumerator.js
new file mode 100644
index 0000000000..57fb2605bd
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_propertyEnumerator.js
@@ -0,0 +1,66 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// tests properties in nsIMsgDBHdr;
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+var gHdr;
+
+function run_test() {
+ localAccountUtils.loadLocalMailAccount();
+ // Get a message into the local filestore.
+ // Function continue_test() continues the testing after the copy.
+ var bugmail1 = do_get_file("../../../../data/bugmail1");
+ do_test_pending();
+ MailServices.copy.copyFileMessage(
+ bugmail1,
+ localAccountUtils.inboxFolder,
+ null,
+ false,
+ 0,
+ "",
+ copyListener,
+ null
+ );
+}
+
+var copyListener = {
+ OnStartCopy() {},
+ OnProgress(aProgress, aProgressMax) {},
+ SetMessageKey(aKey) {
+ gHdr = localAccountUtils.inboxFolder.GetMessageHeader(aKey);
+ },
+ SetMessageId(aMessageId) {},
+ OnStopCopy(aStatus) {
+ continue_test();
+ },
+};
+
+function continue_test() {
+ // test some of the default properties
+ let properties = gHdr.properties;
+ Assert.ok(properties.includes("flags"));
+ Assert.ok(properties.includes("size"));
+ // this will be added in the next section, but does not exist yet
+ Assert.ok(!properties.includes("iamnew"));
+
+ // add a new property, and make sure that it appears
+ gHdr.setStringProperty("iamnew", "somevalue");
+
+ properties = [];
+ for (let property of gHdr.properties) {
+ // dump("\nProperty 2 is " + property);
+ properties.push(property);
+ }
+ Assert.ok(properties.includes("flags"));
+ Assert.ok(properties.includes("size"));
+ Assert.ok(properties.includes("iamnew"));
+ Assert.ok(!properties.includes("idonotexist"));
+
+ gHdr = null;
+ do_test_finished();
+}
diff --git a/comm/mailnews/db/msgdb/test/unit/test_references_parsing.js b/comm/mailnews/db/msgdb/test/unit/test_references_parsing.js
new file mode 100644
index 0000000000..fdfa76dd6d
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/test_references_parsing.js
@@ -0,0 +1,124 @@
+/*
+ * Test nsMsgHdr's In-Reply-To/References parsing logic.
+ */
+
+var { MailServices } = ChromeUtils.import(
+ "resource:///modules/MailServices.jsm"
+);
+
+var anyOldMessage = do_get_file("../../../../data/bugmail1");
+
+var refsAndResults = [
+ // an empty string is not a reference.
+ ["", []],
+ // super valid things
+ ["<abc@def>", ["abc@def"]],
+ [
+ "<up@down> <left@right> <ying@yang>",
+ ["up@down", "left@right", "ying@yang"],
+ ],
+ // whitespace type things
+ [" ", []],
+ [" <left@space>", ["left@space"]],
+ ["<space@right> ", ["space@right"]],
+ [" <space@space> ", ["space@space"]],
+ ["\t<tab@tab>\t", ["tab@tab"]],
+ ["<a@b>\n\t<tab@newline.n>", ["a@b", "tab@newline.n"]],
+ ["<a@b>\r\t<tab@newline.r>", ["a@b", "tab@newline.r"]],
+ ["<a@b>\n\t<tab@newline.nr>", ["a@b", "tab@newline.nr"]],
+ [
+ "<a@1>\n<a@2> <a@3>\t <a@4>\n <a@5>\r\t<a@6>\r\n <a@7>\r\n\t ",
+ ["a@1", "a@2", "a@3", "a@4", "a@5", "a@6", "a@7"],
+ ],
+ // be backwards compatible with old-school things that make some sense
+ ["i am a stupid message-id", ["i am a stupid message-id"]],
+ [" those were spaces!", ["those were spaces!"]],
+ // be backwards compatible with things that make no sense
+ [" seriously\n who does this?", ["seriously\n who does this?"]],
+ // handle things we used to be stupid about
+ ["<z@1a> was an awesome message!", ["z@1a"]],
+ [" <z@1b> was an awesomer message!", ["z@1b"]],
+ ["I can't get enough of <z@2a>", ["z@2a"]],
+ [" nor of I can enough get <z@2b> ", ["z@2b"]],
+ ["let's talk about <z@3a> shall we", ["z@3a"]],
+ ["and then let us speak of <z@3b> and its\n many points", ["z@3b"]],
+ // be backwards compatible with things that just seem malicious
+ [" 4 < 5", ["4 < 5"]],
+ [" 6 > 3", ["6 > 3"]],
+ [" look ma!\n newlines!", ["look ma!\n newlines!"]],
+];
+
+/**
+ * Parse the references in refsAndResults and ensure their references match
+ * the corresponding results.
+ *
+ * @param {nsIMsgDBHdr} aMsgHdr - A message header that you don't mind if we
+ * mess with.
+ */
+function test_references_header_parsing(aMsgHdr) {
+ var iCase, iResult, refString, results;
+ for (iCase = 0; iCase < refsAndResults.length; iCase++) {
+ refString = refsAndResults[iCase][0];
+ results = refsAndResults[iCase][1];
+
+ dump("Setting references to: '" + refString + "'\n");
+ aMsgHdr.setReferences(refString);
+ if (aMsgHdr.numReferences != results.length) {
+ dump("Length mismatch! Was expecting:\n");
+ for (iResult = 0; iResult < results.length; iResult++) {
+ dump("'" + results[iResult] + "'\n");
+ }
+
+ dump("Got:\n");
+
+ for (iResult = 0; iResult < aMsgHdr.numReferences; iResult++) {
+ dump("'" + aMsgHdr.getStringReference(iResult) + "'\n");
+ }
+
+ Assert.equal(aMsgHdr.numReferences, results.length);
+ }
+
+ for (iResult = 0; iResult < results.length; iResult++) {
+ Assert.equal(aMsgHdr.getStringReference(iResult), results[iResult]);
+ }
+ }
+
+ do_test_finished();
+}
+
+/*
+ * This infrastructure down here exists just to get
+ * test_references_header_parsing its message header.
+ */
+
+function run_test() {
+ localAccountUtils.loadLocalMailAccount();
+ do_test_pending();
+ MailServices.copy.copyFileMessage(
+ anyOldMessage,
+ localAccountUtils.inboxFolder,
+ null,
+ false,
+ 0,
+ "",
+ messageHeaderGetterListener,
+ null
+ );
+ return true;
+}
+
+var messageHeaderGetterListener = {
+ msgKey: null,
+
+ OnStartCopy() {},
+ OnProgress(aProgress, aProgressMax) {},
+ GetMessageId(aMessageId) {},
+ SetMessageKey(aKey) {
+ this.msgKey = aKey;
+ },
+ OnStopCopy(aStatus) {
+ test_references_header_parsing(
+ localAccountUtils.inboxFolder.GetMessageHeader(this.msgKey)
+ );
+ },
+};
diff --git a/comm/mailnews/db/msgdb/test/unit/xpcshell.ini b/comm/mailnews/db/msgdb/test/unit/xpcshell.ini
new file mode 100644
index 0000000000..d32c984408
--- /dev/null
+++ b/comm/mailnews/db/msgdb/test/unit/xpcshell.ini
@@ -0,0 +1,10 @@
+[DEFAULT]
+head = head_maildb.js
+tail =
+
+[test_enumerator_cleanup.js]
+[test_filter_enumerator.js]
+[test_maildb.js]
+[test_mailTelemetry.js]
+[test_propertyEnumerator.js]
+[test_references_parsing.js]