summaryrefslogtreecommitdiffstats
path: root/dom/quota
diff options
context:
space:
mode:
Diffstat (limited to 'dom/quota')
-rw-r--r--dom/quota/ActorsParent.cpp48
-rw-r--r--dom/quota/QuotaManager.h6
-rw-r--r--dom/quota/StorageOriginAttributes.cpp39
-rwxr-xr-xdom/quota/scripts/analyze_qm_failures.py137
-rwxr-xr-xdom/quota/scripts/fetch_qm_failures.py142
-rw-r--r--dom/quota/scripts/qm-try-analysis/.containerignore5
-rw-r--r--dom/quota/scripts/qm-try-analysis/Containerfile24
-rw-r--r--dom/quota/scripts/qm-try-analysis/README.md104
-rw-r--r--dom/quota/scripts/qm-try-analysis/poetry.lock283
-rw-r--r--dom/quota/scripts/qm-try-analysis/pyproject.toml25
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/__init__.py (renamed from dom/quota/test/marionette/dummy.py)0
-rwxr-xr-xdom/quota/scripts/qm-try-analysis/qm_try_analysis/analyze.py150
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/cli.py22
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch.py137
-rwxr-xr-xdom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch_fn_names.sh (renamed from dom/quota/scripts/fetch_fn_names.sh)4
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/fn_anchors.py (renamed from dom/quota/scripts/fn_anchors.py)14
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/logging.py21
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/report.py266
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/stackanalysis.py (renamed from dom/quota/scripts/stackanalysis.py)0
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/telemetry.py (renamed from dom/quota/scripts/telemetry.py)30
-rw-r--r--dom/quota/scripts/qm-try-analysis/qm_try_analysis/utils.py (renamed from dom/quota/scripts/utils.py)8
-rw-r--r--dom/quota/scripts/qm-try-analysis/tests/test_reporting.py96
-rw-r--r--dom/quota/test/marionette/manifest.toml3
-rw-r--r--dom/quota/test/marionette/quota_test_case.py180
-rw-r--r--dom/quota/test/marionette/test_private_repository_cleanup.py94
25 files changed, 1466 insertions, 372 deletions
diff --git a/dom/quota/ActorsParent.cpp b/dom/quota/ActorsParent.cpp
index 451d55a450..dc6b49055f 100644
--- a/dom/quota/ActorsParent.cpp
+++ b/dom/quota/ActorsParent.cpp
@@ -1555,6 +1555,14 @@ QuotaManager::Observer::Observe(nsISupports* aSubject, const char* aTopic,
return rv;
}
+#ifdef XP_WIN
+ // Annotate if our profile lives on a network resource.
+ bool isNetworkPath = PathIsNetworkPathW(gBasePath->get());
+ CrashReporter::RecordAnnotationBool(
+ CrashReporter::Annotation::QuotaManagerStorageIsNetworkResource,
+ isNetworkPath);
+#endif
+
gStorageName = new nsString();
rv = Preferences::GetString("dom.quotaManager.storageName", *gStorageName);
@@ -2380,13 +2388,25 @@ void QuotaManager::Shutdown() {
ScopedLogExtraInfo scope{ScopedLogExtraInfo::kTagContextTainted,
"dom::quota::QuotaManager::Shutdown"_ns};
+ // We always need to ensure that firefox does not shutdown with a private
+ // repository still on disk. They are ideally cleaned up on PBM session end
+ // but, in some cases like PBM autostart (i.e.
+ // browser.privatebrowsing.autostart), private repository could only be
+ // cleaned up on shutdown. ClearPrivateRepository below runs a async op and is
+ // better to do it before we run the ShutdownStorageOp since it expects all
+ // cleanup operations to be done by that point. We don't need to use the
+ // returned promise here because `ClearPrivateRepository` registers the
+ // underlying `ClearPrivateRepositoryOp` in `gNormalOriginOps`.
+ ClearPrivateRepository();
+
// This must be called before `flagShutdownStarted`, it would fail otherwise.
// `ShutdownStorageOp` needs to acquire an exclusive directory lock over
// entire <profile>/storage which will abort any existing operations and wait
// for all existing directory locks to be released. So the shutdown operation
// will effectively run after all existing operations.
- // We don't need to use the returned promise here because `ShutdownStorage`
- // registers `ShudownStorageOp` in `gNormalOriginOps`.
+ // Similar, to ClearPrivateRepository operation above, ShutdownStorageOp also
+ // registers it's operation in `gNormalOriginOps` so we don't need to assign
+ // returned promise.
ShutdownStorage();
flagShutdownStarted();
@@ -3206,6 +3226,15 @@ Result<nsCOMPtr<nsIFile>, nsresult> QuotaManager::GetOriginDirectory(
return directory;
}
+Result<bool, nsresult> QuotaManager::DoesOriginDirectoryExist(
+ const OriginMetadata& aOriginMetadata) const {
+ AssertIsOnIOThread();
+
+ QM_TRY_INSPECT(const auto& directory, GetOriginDirectory(aOriginMetadata));
+
+ QM_TRY_RETURN(MOZ_TO_RESULT_INVOKE_MEMBER(directory, Exists));
+}
+
// static
nsresult QuotaManager::CreateDirectoryMetadata(
nsIFile& aDirectory, int64_t aTimestamp,
@@ -3516,6 +3545,18 @@ Result<Ok, nsresult> QuotaManager::RemoveOriginDirectory(nsIFile& aDirectory) {
toBeRemovedStorageDir, NSID_TrimBracketsUTF16(nsID::GenerateUUID()))));
}
+Result<bool, nsresult> QuotaManager::DoesClientDirectoryExist(
+ const ClientMetadata& aClientMetadata) const {
+ AssertIsOnIOThread();
+
+ QM_TRY_INSPECT(const auto& directory, GetOriginDirectory(aClientMetadata));
+
+ QM_TRY(MOZ_TO_RESULT(
+ directory->Append(Client::TypeToString(aClientMetadata.mClientType))));
+
+ QM_TRY_RETURN(MOZ_TO_RESULT_INVOKE_MEMBER(directory, Exists));
+}
+
template <typename OriginFunc>
nsresult QuotaManager::InitializeRepository(PersistenceType aPersistenceType,
OriginFunc&& aOriginFunc) {
@@ -7638,11 +7679,10 @@ Result<bool, nsresult> UpgradeStorageFrom1_0To2_0Helper::MaybeRemoveAppsData(
if (!URLParams::Parse(
Substring(originalSuffix, 1, originalSuffix.Length() - 1), true,
- [](const nsAString& aName, const nsAString& aValue) {
+ [](const nsACString& aName, const nsACString& aValue) {
if (aName.EqualsLiteral("appId")) {
return false;
}
-
return true;
})) {
QM_TRY(MOZ_TO_RESULT(RemoveObsoleteOrigin(aOriginProps)));
diff --git a/dom/quota/QuotaManager.h b/dom/quota/QuotaManager.h
index 354977166a..25c069810d 100644
--- a/dom/quota/QuotaManager.h
+++ b/dom/quota/QuotaManager.h
@@ -243,6 +243,9 @@ class QuotaManager final : public BackgroundThreadObject {
Result<nsCOMPtr<nsIFile>, nsresult> GetOriginDirectory(
const OriginMetadata& aOriginMetadata) const;
+ Result<bool, nsresult> DoesOriginDirectoryExist(
+ const OriginMetadata& aOriginMetadata) const;
+
static nsresult CreateDirectoryMetadata(
nsIFile& aDirectory, int64_t aTimestamp,
const OriginMetadata& aOriginMetadata);
@@ -265,6 +268,9 @@ class QuotaManager final : public BackgroundThreadObject {
Result<Ok, nsresult> RemoveOriginDirectory(nsIFile& aDirectory);
+ Result<bool, nsresult> DoesClientDirectoryExist(
+ const ClientMetadata& aClientMetadata) const;
+
RefPtr<UniversalDirectoryLockPromise> OpenStorageDirectory(
const Nullable<PersistenceType>& aPersistenceType,
const OriginScope& aOriginScope,
diff --git a/dom/quota/StorageOriginAttributes.cpp b/dom/quota/StorageOriginAttributes.cpp
index bcdf47bce8..b5fd68407a 100644
--- a/dom/quota/StorageOriginAttributes.cpp
+++ b/dom/quota/StorageOriginAttributes.cpp
@@ -17,18 +17,17 @@ void StorageOriginAttributes::CreateSuffix(nsACString& aStr) const {
nsCString str1;
URLParams params;
- nsAutoString value;
+ nsAutoCString value;
if (mInIsolatedMozBrowser) {
- params.Set(u"inBrowser"_ns, u"1"_ns);
+ params.Set("inBrowser"_ns, "1"_ns);
}
str1.Truncate();
-
params.Serialize(value, true);
if (!value.IsEmpty()) {
str1.AppendLiteral("^");
- str1.Append(NS_ConvertUTF16toUTF8(value));
+ str1.Append(value);
}
// Make sure that the string don't contain characters that would get replaced
@@ -67,22 +66,22 @@ bool StorageOriginAttributes::PopulateFromSuffix(const nsACString& aStr) {
return false;
}
- bool ok =
- URLParams::Parse(Substring(aStr, 1, aStr.Length() - 1), true,
- [this](const nsAString& aName, const nsAString& aValue) {
- if (aName.EqualsLiteral("inBrowser")) {
- if (!aValue.EqualsLiteral("1")) {
- return false;
- }
-
- mInIsolatedMozBrowser = true;
- return true;
- }
-
- // Let OriginAttributes::PopulateFromSuffix parse other
- // origin attributes.
- return true;
- });
+ bool ok = URLParams::Parse(
+ Substring(aStr, 1, aStr.Length() - 1), true,
+ [this](const nsACString& aName, const nsACString& aValue) {
+ if (aName.EqualsLiteral("inBrowser")) {
+ if (!aValue.EqualsLiteral("1")) {
+ return false;
+ }
+
+ mInIsolatedMozBrowser = true;
+ return true;
+ }
+
+ // Let OriginAttributes::PopulateFromSuffix parse other
+ // origin attributes.
+ return true;
+ });
if (!ok) {
return false;
}
diff --git a/dom/quota/scripts/analyze_qm_failures.py b/dom/quota/scripts/analyze_qm_failures.py
deleted file mode 100755
index f4afd64ab4..0000000000
--- a/dom/quota/scripts/analyze_qm_failures.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python3
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import getopt
-import sys
-
-import fn_anchors
-import stackanalysis
-import utils
-
-"""
-The analysis is based on stack frames of the following form:
-
-[
- {
- "event_timeabs": 1617121013137,
- "session_startabs": 1617120840000,
- "build_id": "20210329095128",
- "client_id": "0013a68f-9893-461a-93d4-2d7a2f85583f",
- "session_id": "8cd37159-bd5c-481c-99ad-9eace9ea726a",
- "seq": 1,
- "context": "Initialization::TemporaryStorage",
- "source_file": "dom/localstorage/ActorsParent.cpp",
- "source_line": "1018",
- "severity": "ERROR",
- "result": "NS_ERROR_FILE_NOT_FOUND"
- },
-...
-]
-
-The location of the input file is expected to be found in the
-last item of the list inside qmexecutions.json.
-"""
-
-
-def usage():
- print("analyze_qm_faiures.py -w <workdir=.>")
- print("")
- print("Analyzes the results from fetch_qm_failures.py's JSON file.")
- print(
- "Writes out several JSON results as files and a bugzilla markup table on stdout."
- )
- print("-w <workdir>: Working directory, default is '.'")
- sys.exit(2)
-
-
-days = 1
-workdir = "."
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], "w:", ["workdir="])
- for opt, arg in opts:
- if opt == "-w":
- workdir = arg
-except getopt.GetoptError:
- usage()
-
-run = utils.getLastRunFromExecutionFile(workdir)
-if "numrows" not in run:
- print("No previous execution from fetch_qm_failures.py found.")
- usage()
-if run["numrows"] == 0:
- print("The last execution yielded no result.")
-
-infile = run["rawfile"]
-
-
-def getFname(prefix):
- return "{}/{}_until_{}.json".format(workdir, prefix, run["lasteventtime"])
-
-
-# read rows from JSON
-rows = utils.readJSONFile(getFname("qmrows"))
-print("Found {} rows of data.".format(len(rows)))
-rows = stackanalysis.sanitize(rows)
-
-# enrich rows with hg locations
-buildids = stackanalysis.extractBuildIDs(rows)
-utils.fetchBuildRevisions(buildids)
-stackanalysis.constructHGLinks(buildids, rows)
-
-# transform rows to unique stacks
-raw_stacks = stackanalysis.collectRawStacks(rows)
-all_stacks = stackanalysis.mergeEqualStacks(raw_stacks)
-
-# enrich with function anchors
-for stack in all_stacks:
- for frame in stack["frames"]:
- frame["anchor"] = "{}:{}".format(
- frame["source_file"], fn_anchors.getFunctionName(frame["location"])
- )
-
-# separate stacks for relevance
-error_stacks = []
-warn_stacks = []
-info_stacks = []
-abort_stacks = []
-stackanalysis.filterStacksForPropagation(
- all_stacks, error_stacks, warn_stacks, info_stacks, abort_stacks
-)
-run["errorfile"] = getFname("qmerrors")
-utils.writeJSONFile(run["errorfile"], error_stacks)
-run["warnfile"] = getFname("qmwarnings")
-utils.writeJSONFile(run["warnfile"], warn_stacks)
-run["infofile"] = getFname("qminfo")
-utils.writeJSONFile(run["infofile"], info_stacks)
-run["abortfile"] = getFname("qmabort")
-utils.writeJSONFile(run["abortfile"], abort_stacks)
-utils.updateLastRunToExecutionFile(workdir, run)
-
-
-# print results to stdout
-print("Found {} error stacks.".format(len(error_stacks)))
-print("Found {} warning stacks.".format(len(warn_stacks)))
-print("Found {} info stacks.".format(len(info_stacks)))
-print("Found {} aborted stacks.".format(len(abort_stacks)))
-print("")
-print("Error stacks:")
-print(stackanalysis.printStacks(error_stacks))
-print("")
-print("Error stacks grouped by anchors:")
-anchors = stackanalysis.groupStacksForAnchors(error_stacks)
-anchornames = list(anchors.keys())
-for a in anchornames:
- print(stackanalysis.printStacks(anchors[a]["stacks"]))
- print("")
-print("")
-print("Warning stacks:")
-print(stackanalysis.printStacks(warn_stacks))
-print("")
-print("Info stacks:")
-print(stackanalysis.printStacks(info_stacks))
-print("")
-print("Aborted stacks:")
-print(stackanalysis.printStacks(abort_stacks))
diff --git a/dom/quota/scripts/fetch_qm_failures.py b/dom/quota/scripts/fetch_qm_failures.py
deleted file mode 100755
index 546b213582..0000000000
--- a/dom/quota/scripts/fetch_qm_failures.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python3
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import getopt
-import sys
-
-import telemetry
-import utils
-
-"""
-The analysis is based on the following query:
-https://sql.telemetry.mozilla.org/queries/78691/source?p_day=28&p_month=03&p_year=2021
-
-SELECT UNIX_MILLIS(timestamp) AS submit_timeabs,
- session_start_time,
- submission_date,
- build_id,
- client_id,
- session_id,
- event_timestamp,
- CAST(mozfun.map.get_key(event_map_values, "seq") AS INT64) AS seq,
- mozfun.map.get_key(event_map_values, "context") AS context,
- mozfun.map.get_key(event_map_values, "source_file") AS source_file,
- mozfun.map.get_key(event_map_values, "source_line") AS source_line,
- mozfun.map.get_key(event_map_values, "severity") AS severity,
- mozfun.map.get_key(event_map_values, "result") AS result,
-FROM telemetry.events
-WHERE submission_date >= CAST('{{ year }}-{{ month }}-{{ day }}' AS DATE)
- AND event_category='dom.quota.try'
- AND build_id >= '{{ build }}'
- AND UNIX_MILLIS(timestamp) > {{ last }}
-ORDER BY submit_timeabs
-LIMIT 600000
-
-We fetch events in chronological order, as we want to keep track of where we already
-arrived with our analysis. To accomplish this we write our runs into qmexecutions.json.
-
-[
- {
- "workdir": ".",
- "daysback": 1,
- "numrows": 17377,
- "lasteventtime": 1617303855145,
- "rawfile": "./qmrows_until_1617303855145.json"
- }
-]
-
-lasteventtime is the highest value of event_timeabs we found in our data.
-
-analyze_qm_failures instead needs the rows to be ordered by
-client_id, session_id, thread_id, submit_timeabs, seq
-Thus we sort the rows accordingly before writing them.
-"""
-
-
-def usage():
- print(
- "fetch_qm_faiures.py -k <apikey> -b <minimum build=20210329000000>"
- "-d <days back=1> -l <last event time> -w <workdir=.>"
- )
- print("")
- print("Invokes the query 78691 and stores the result in a JSON file.")
- print("-k <apikey>: Your personal telemetry API key (not the query key!).")
- print("-d <daysback>: Number of days to go back. Default is 1.")
- print("-b <minimum build>: The lowest build id we will fetch data for.")
- print("-l <last event time>: Fetch only events after this. Default is 0.")
- print("-w <workdir>: Working directory, default is '.'")
- sys.exit(2)
-
-
-days = 1
-lasteventtime = 0
-key = "undefined"
-workdir = "."
-minbuild = "20210329000000"
-
-try:
- opts, args = getopt.getopt(
- sys.argv[1:],
- "k:b:d:l:w:",
- ["key=", "build=", "days=", "lasteventtime=", "workdir="],
- )
- for opt, arg in opts:
- if opt == "-k":
- key = arg
- elif opt == "-d":
- days = int(arg)
- elif opt == "-l":
- lasteventtime = int(arg)
- elif opt == "-b":
- minbuild = arg
- elif opt == "-w":
- workdir = arg
-except getopt.GetoptError:
- usage()
-
-if key == "undefined":
- usage()
-
-start = utils.dateback(days)
-year = start.year
-month = start.month
-day = start.day
-
-run = {}
-lastrun = utils.getLastRunFromExecutionFile(workdir)
-if "lasteventtime" in lastrun:
- lasteventtime = lastrun["lasteventtime"]
-run["workdir"] = workdir
-run["daysback"] = days
-run["minbuild"] = minbuild
-
-p_params = "p_year={:04d}&p_month={:02d}&p_day={:02d}&p_build={}" "&p_last={}".format(
- year, month, day, minbuild, lasteventtime
-)
-print(p_params)
-result = telemetry.query(key, 78691, p_params)
-rows = result["query_result"]["data"]["rows"]
-run["numrows"] = len(rows)
-if run["numrows"] > 0:
- lasteventtime = telemetry.getLastEventTimeAbs(rows)
- run["lasteventtime"] = lasteventtime
- rows.sort(
- key=lambda row: "{}.{}.{}.{}.{:06d}".format(
- row["client_id"],
- row["session_id"],
- row["seq"] >> 32, # thread_id
- row["submit_timeabs"],
- row["seq"] & 0x00000000FFFFFFFF, # seq,
- ),
- reverse=False,
- )
- outfile = "{}/qmrows_until_{}.json".format(workdir, lasteventtime)
- utils.writeJSONFile(outfile, rows)
- run["rawfile"] = outfile
-else:
- print("No results found, maybe next time.")
- run["lasteventtime"] = lasteventtime
-
-utils.addNewRunToExecutionFile(workdir, run)
diff --git a/dom/quota/scripts/qm-try-analysis/.containerignore b/dom/quota/scripts/qm-try-analysis/.containerignore
new file mode 100644
index 0000000000..39b984a045
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/.containerignore
@@ -0,0 +1,5 @@
+.venv
+.gitignore
+.vscode
+output
+Containerfile
diff --git a/dom/quota/scripts/qm-try-analysis/Containerfile b/dom/quota/scripts/qm-try-analysis/Containerfile
new file mode 100644
index 0000000000..a0999b7557
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/Containerfile
@@ -0,0 +1,24 @@
+FROM rust:1.74 as build
+
+RUN git clone https://github.com/mozilla/rust-code-analysis && \
+ cd rust-code-analysis && \
+ git checkout 56f182ac570 && \
+ cargo build -p rust-code-analysis-cli --release -v
+
+FROM python:3.9.18-slim
+
+COPY --from=build /rust-code-analysis/target/release/rust-code-analysis-cli /usr/local/bin/
+
+RUN pip install --no-cache-dir poetry==1.7
+
+RUN useradd -ms /bin/bash scripts
+USER scripts
+
+WORKDIR /home/scripts/qm-try-analysis
+
+COPY poetry.lock pyproject.toml ./
+RUN poetry install --no-dev
+
+COPY ./ .
+
+ENTRYPOINT /bin/bash -c "poetry shell && exec /bin/bash -i"
diff --git a/dom/quota/scripts/qm-try-analysis/README.md b/dom/quota/scripts/qm-try-analysis/README.md
new file mode 100644
index 0000000000..cef1a6717c
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/README.md
@@ -0,0 +1,104 @@
+# QM_TRY Analysis Guide
+
+Welcome to the QM_TRY Analysis Guide!
+This document walks you through the process of setting up the environment for semi-automatic monitoring of QM_TRY failures within Mozilla.
+Follow these step-by-step instructions to ensure you have the necessary requirements before initiating the analysis.
+
+## Setup Instructions
+
+### 1. Clone mozilla-central
+
+Ensure you have a local clone of mozilla-central.
+If you don't have it yet, refer to [this link](https://firefox-source-docs.mozilla.org/contributing/contribution_quickref.html#bootstrap-a-copy-of-the-firefox-source-code).
+Building the project is not necessary for this setup.
+
+### 2. Install rust-code-analysis
+
+If not done already, set up Rust by visiting [rustup.rs](https://rustup.rs/).
+Once Rust is installed, install rust-code-analysis using the following command:
+
+```bash
+cargo install --git https://github.com/mozilla/rust-code-analysis --rev 56f182ac570
+```
+
+### 3. Obtain Telemetry API Key
+
+Obtain a Telemetry API Key by visiting [Telemetry API Key](https://sql.telemetry.mozilla.org/users/me).
+Save this key for later use in the analysis scripts.
+
+### 4. Obtain Bugzilla API Key
+
+Obtain your Bugzilla API Key from [Bugzilla User Preferences](https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey).
+
+### 5. Install Python
+
+Install Python if not already set up.
+
+### 6. Install Poetry and set up dependencies
+
+If you haven't installed Poetry, use the following commands to install it and set up the project:
+
+```bash
+pip install poetry
+cd mozilla-unified/dom/quota/scripts/qm-try-analysis
+poetry install
+```
+
+### Containerized setup
+
+To streamline the setup process, use [`Podman`](https://github.com/containers/podman?tab=readme-ov-file#podman-a-tool-for-managing-oci-containers-and-pods) with the provided `Containerfile`. Navigate to the relevant directory:
+
+```bash
+cd mozilla-unified/dom/quota/scripts/qm-try-analysis
+```
+
+Build the container image and run the container:
+
+```bash
+podman run -it $(podman build -q .) -v <path on your system>:/home/scripts/qm-try-analysis/output
+```
+
+## Effort
+
+- Each run takes approximately 5–15 minutes, with scripts running in less than 5 minutes.
+- Analysis is performed once a week (as of November 2023) on Monday.
+
+## Generate Output
+
+Navigate to the analysis directory:
+
+```bash
+cd mozilla-unified/dom/quota/scripts/qm-try-analysis
+```
+
+The process involves fetching data, analyzing, and reporting. Here's a quick overview:
+
+```bash
+# Jump into a poetry shell session
+poetry shell
+
+# Fetch data
+qm-try-analysis fetch [OPTIONS]
+
+# Analyze data
+qm-try-analysis analyze [OPTIONS]
+
+# Report failures to Bugzilla
+qm-try-analysis report [OPTIONS]
+
+# To exit the shell session
+exit
+```
+
+Refer to the detailed usage instructions provided by adding the `--help` option to one of the commands above.
+
+## Analysis
+
+- Look for noticeable problems such as new errors, unusual stacks, or issues not encountered for a long time.
+
+## Additional Hints
+
+- Treat QM_TRY bugs as meta bugs; do not attach patches there; create a new bug for that and cross-link using blocks/depends on.
+- Interesting bugs to cross-link:
+ - [Bug 1705304](https://bugzilla.mozilla.org/show_bug.cgi?id=1705304) (FATAL errors): "error conditions we cannot meaningfully recover from."
+ - [Bug 1712582](https://bugzilla.mozilla.org/show_bug.cgi?id=1712582) (Replace generic NS_ERROR_FAILURE errors with more specific codes).
diff --git a/dom/quota/scripts/qm-try-analysis/poetry.lock b/dom/quota/scripts/qm-try-analysis/poetry.lock
new file mode 100644
index 0000000000..079172f133
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/poetry.lock
@@ -0,0 +1,283 @@
+# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand.
+
+[[package]]
+name = "bugzilla"
+version = "1.0.0"
+description = "A client library for Bugzilla"
+optional = false
+python-versions = "*"
+files = [
+ {file = "bugzilla-1.0.0.tar.gz", hash = "sha256:6e864ddafc4e46c821c1f3735d7c9686522a4eece056be0cadf51221e22dfa11"},
+]
+
+[[package]]
+name = "certifi"
+version = "2023.11.17"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"},
+ {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.0"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
+ {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "packaging"
+version = "23.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.3.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+ {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pytest"
+version = "7.4.3"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"},
+ {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.1.0"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"},
+ {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.9"
+content-hash = "aff36e769a32652ef044eecff451fa01657c0ed660af9a372a7ca8b874361d47"
diff --git a/dom/quota/scripts/qm-try-analysis/pyproject.toml b/dom/quota/scripts/qm-try-analysis/pyproject.toml
new file mode 100644
index 0000000000..f567928dd8
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/pyproject.toml
@@ -0,0 +1,25 @@
+[tool.poetry]
+name = "qm-try-analysis"
+version = "0.1.0"
+description = ""
+authors = [
+ "Jens Stutte <jstutte@mozilla.com>",
+ "Michael van Straten <mvanstraten@mozilla.com>",
+]
+readme = "README.md"
+
+[tool.poetry.dependencies]
+python = "^3.9"
+click = "^8.1.7"
+requests = "^2.31.0"
+bugzilla = "^1.0.0"
+
+[tool.poetry.scripts]
+qm-try-analysis = "qm_try_analysis.cli:cli"
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^7.4.3"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/dom/quota/test/marionette/dummy.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/dom/quota/test/marionette/dummy.py
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/__init__.py
diff --git a/dom/quota/scripts/qm-try-analysis/qm_try_analysis/analyze.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/analyze.py
new file mode 100755
index 0000000000..1173555e08
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/analyze.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+from os import path
+
+import click
+
+from qm_try_analysis import fn_anchors, stackanalysis, utils
+from qm_try_analysis.logging import error, info
+
+"""
+The analysis is based on stack frames of the following form:
+
+[
+ {
+ "event_timeabs": 1617121013137,
+ "session_startabs": 1617120840000,
+ "build_id": "20210329095128",
+ "client_id": "0013a68f-9893-461a-93d4-2d7a2f85583f",
+ "session_id": "8cd37159-bd5c-481c-99ad-9eace9ea726a",
+ "seq": 1,
+ "context": "Initialization::TemporaryStorage",
+ "source_file": "dom/localstorage/ActorsParent.cpp",
+ "source_line": "1018",
+ "severity": "ERROR",
+ "result": "NS_ERROR_FILE_NOT_FOUND"
+ },
+...
+]
+
+The location of the input file is expected to be found in the
+last item of the list inside qmexecutions.json.
+"""
+
+
+@click.command()
+@click.option(
+ "--output-to",
+ type=click.Path(dir_okay=False, writable=True),
+ default="qmstacks_until_<lasteventtime>.txt",
+ help="Specify the output file for the analyzed data.",
+)
+@click.option(
+ "-w",
+ "--workdir",
+ type=click.Path(file_okay=False, exists=True, writable=True),
+ default="output",
+ help="Working directory",
+)
+def analyze_qm_failures(output_to, workdir):
+ """
+ Analyzes the results from fetch's JSON file.
+ Writes out several JSON results as files and a bugzilla markup table on stdout.
+ """
+ run = utils.getLastRunFromExecutionFile(workdir)
+ if "numrows" not in run or run["numrows"] == 0:
+ error(
+ "No previous execution from fetch_qm_failures.py found or the last execution yielded no result."
+ )
+ sys.exit(2)
+
+ if output_to == "qmstacks_until_<lasteventtime>.txt":
+ output_to = path.join(workdir, f'qmstacks_until_{run["lasteventtime"]}.txt')
+ elif output_to.exists():
+ error(
+ f'The output file "{output_to}" already exists. This script would override it.'
+ )
+ sys.exit(2)
+ run["stacksfile"] = output_to
+
+ def getFname(prefix):
+ return "{}/{}_until_{}.json".format(workdir, prefix, run["lasteventtime"])
+
+ # read rows from JSON
+ rows = utils.readJSONFile(getFname("qmrows"))
+ info(f"Found {len(rows)} rows of data")
+ rows = stackanalysis.sanitize(rows)
+
+ # enrich rows with hg locations
+ buildids = stackanalysis.extractBuildIDs(rows)
+ utils.fetchBuildRevisions(buildids)
+ stackanalysis.constructHGLinks(buildids, rows)
+
+ # transform rows to unique stacks
+ raw_stacks = stackanalysis.collectRawStacks(rows)
+ all_stacks = stackanalysis.mergeEqualStacks(raw_stacks)
+
+ # enrich with function anchors
+ for stack in all_stacks:
+ for frame in stack["frames"]:
+ frame["anchor"] = "{}:{}".format(
+ frame["source_file"], fn_anchors.getFunctionName(frame["location"])
+ )
+
+ # separate stacks for relevance
+ error_stacks = []
+ warn_stacks = []
+ info_stacks = []
+ abort_stacks = []
+ stackanalysis.filterStacksForPropagation(
+ all_stacks, error_stacks, warn_stacks, info_stacks, abort_stacks
+ )
+ run["errorfile"] = getFname("qmerrors")
+ utils.writeJSONFile(run["errorfile"], error_stacks)
+ run["warnfile"] = getFname("qmwarnings")
+ utils.writeJSONFile(run["warnfile"], warn_stacks)
+ run["infofile"] = getFname("qminfo")
+ utils.writeJSONFile(run["infofile"], info_stacks)
+ run["abortfile"] = getFname("qmabort")
+ utils.writeJSONFile(run["abortfile"], abort_stacks)
+ utils.updateLastRunToExecutionFile(workdir, run)
+
+ info(f"Found {len(error_stacks)} error stacks")
+ info(f"Found {len(warn_stacks)} warning stacks")
+ info(f"Found {len(info_stacks)} info stacks")
+ info(f"Found {len(abort_stacks)} aborted stacks")
+
+ # Write results to the specified output file
+ with open(output_to, "w") as output:
+
+ def print_to_output(message):
+ print(message, file=output)
+
+ print_to_output("Error stacks:")
+ print_to_output(stackanalysis.printStacks(error_stacks))
+ print_to_output("")
+ print_to_output("Error stacks grouped by anchors:")
+ anchors = stackanalysis.groupStacksForAnchors(error_stacks)
+ anchornames = list(anchors.keys())
+ for a in anchornames:
+ print_to_output(stackanalysis.printStacks(anchors[a]["stacks"]))
+ print_to_output("")
+ print_to_output("")
+ print_to_output("Warning stacks:")
+ print_to_output(stackanalysis.printStacks(warn_stacks))
+ print_to_output("")
+ print_to_output("Info stacks:")
+ print_to_output(stackanalysis.printStacks(info_stacks))
+ print_to_output("")
+ print_to_output("Aborted stacks:")
+ print_to_output(stackanalysis.printStacks(abort_stacks))
+
+ info(f"Wrote results to specified output file {output_to}")
+
+
+if __name__ == "__main__":
+ analyze_qm_failures()
diff --git a/dom/quota/scripts/qm-try-analysis/qm_try_analysis/cli.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/cli.py
new file mode 100644
index 0000000000..509a8e33e1
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/cli.py
@@ -0,0 +1,22 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+import click
+
+from qm_try_analysis.analyze import analyze_qm_failures
+from qm_try_analysis.fetch import fetch_qm_failures
+from qm_try_analysis.report import report_qm_failures
+
+
+@click.group(context_settings={"show_default": True})
+def cli():
+ pass
+
+
+cli.add_command(fetch_qm_failures, "fetch")
+cli.add_command(analyze_qm_failures, "analyze")
+cli.add_command(report_qm_failures, "report")
+
+if __name__ == "__main__":
+ cli()
diff --git a/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch.py
new file mode 100644
index 0000000000..2512293c29
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+import pathlib
+
+import click
+
+from qm_try_analysis import telemetry, utils
+from qm_try_analysis.logging import info
+
+"""
+The analysis is based on the following query:
+https://sql.telemetry.mozilla.org/queries/78691/source?p_day=28&p_month=03&p_year=2021
+
+SELECT UNIX_MILLIS(timestamp) AS submit_timeabs,
+ session_start_time,
+ submission_date,
+ build_id,
+ client_id,
+ session_id,
+ event_timestamp,
+ CAST(mozfun.map.get_key(event_map_values, "seq") AS INT64) AS seq,
+ mozfun.map.get_key(event_map_values, "context") AS context,
+ mozfun.map.get_key(event_map_values, "source_file") AS source_file,
+ mozfun.map.get_key(event_map_values, "source_line") AS source_line,
+ mozfun.map.get_key(event_map_values, "severity") AS severity,
+ mozfun.map.get_key(event_map_values, "result") AS result,
+FROM telemetry.events
+WHERE submission_date >= CAST('{{ year }}-{{ month }}-{{ day }}' AS DATE)
+ AND event_category='dom.quota.try'
+ AND build_id >= '{{ build }}'
+ AND UNIX_MILLIS(timestamp) > {{ last }}
+ORDER BY submit_timeabs
+LIMIT 600000
+
+We fetch events in chronological order, as we want to keep track of where we already
+arrived with our analysis. To accomplish this we write our runs into qmexecutions.json.
+
+[
+ {
+ "workdir": ".",
+ "daysback": 1,
+ "numrows": 17377,
+ "lasteventtime": 1617303855145,
+ "rawfile": "./qmrows_until_1617303855145.json"
+ }
+]
+
+lasteventtime is the highest value of event_timeabs we found in our data.
+
+analyze_qm_failures instead needs the rows to be ordered by
+client_id, session_id, thread_id, submit_timeabs, seq
+Thus we sort the rows accordingly before writing them.
+"""
+
+
+@click.command()
+@click.option(
+ "-k",
+ "--key",
+ required=True,
+ help="Your personal telemetry API key.",
+)
+@click.option(
+ "-b",
+ "--minbuild",
+ default="20210329000000",
+ help="The lowest build id we will fetch data for. This should have the following format: `yyyymmddhhmmss`.",
+)
+@click.option("-d", "--days", type=int, default=7, help="Number of days to go back.")
+@click.option(
+ "-l",
+ "--lasteventtime",
+ type=int,
+ default=0,
+ help="Fetch only events after this number of Unix milliseconds.",
+)
+@click.option(
+ "-w",
+ "--workdir",
+ type=click.Path(file_okay=False, writable=True, path_type=pathlib.Path),
+ default="output",
+ help="Working directory",
+)
+def fetch_qm_failures(key, minbuild, days, lasteventtime, workdir):
+ """
+ Invokes the query 78691 and stores the result in a JSON file.
+ """
+ # Creeate output dir if it does not exist
+ workdir.mkdir(exist_ok=True)
+
+ start = utils.dateback(days)
+ year, month, day = start.year, start.month, start.day
+
+ run = {}
+ lastrun = utils.getLastRunFromExecutionFile(workdir)
+ if "lasteventtime" in lastrun:
+ lasteventtime = lastrun["lasteventtime"]
+
+ run["workdir"] = workdir.as_posix()
+ run["daysback"] = days
+ run["minbuild"] = minbuild
+
+ p_params = f"p_year={year:04d}&p_month={month:02d}&p_day={day:02d}&p_build={minbuild}&p_last={lasteventtime}"
+
+ # Read string at the start of the file for more information on query 78691
+ result = telemetry.query(key, 78691, p_params)
+ rows = result["query_result"]["data"]["rows"]
+ run["numrows"] = len(rows)
+
+ if run["numrows"] > 0:
+ lasteventtime = telemetry.getLastEventTimeAbs(rows)
+ run["lasteventtime"] = lasteventtime
+ rows.sort(
+ key=lambda row: "{}.{}.{}.{}.{:06d}".format(
+ row["client_id"],
+ row["session_id"],
+ row["seq"] >> 32, # thread_id
+ row["submit_timeabs"],
+ row["seq"] & 0x00000000FFFFFFFF, # seq,
+ ),
+ reverse=False,
+ )
+ outfile = f"{workdir}/qmrows_until_{lasteventtime}.json"
+ utils.writeJSONFile(outfile, rows)
+ run["rawfile"] = outfile
+ else:
+ info("No results found, maybe next time.")
+ run["lasteventtime"] = lasteventtime
+
+ utils.addNewRunToExecutionFile(workdir, run)
+
+
+if __name__ == "__main__":
+ fetch_qm_failures()
diff --git a/dom/quota/scripts/fetch_fn_names.sh b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch_fn_names.sh
index 6d3a3c4d23..bd619186cd 100755
--- a/dom/quota/scripts/fetch_fn_names.sh
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fetch_fn_names.sh
@@ -5,13 +5,13 @@
# This script assumes to have rust-code-analysis-cli in the path.
HG_URL=$1
-TEMPDIR=/tmp/fetch_fn_names_$BASHPID
+TEMPDIR=/tmp/fetch_fn_names_$$
TEMPSRC=$TEMPDIR/src
mkdir $TEMPDIR
echo "" > $TEMPDIR/empty.json
HG_URL=`echo $HG_URL | sed 's/annotate/raw-file/g'`
wget -q -O "$TEMPSRC" $HG_URL
-rust-code-analysis-cli -m -O json -o "$TEMPDIR" -p "$TEMPSRC"
+rust-code-analysis-cli -m -O json -p "$TEMPSRC"
CONTENT=`cat $TEMPDIR/*.json`
rm -rf $TEMPDIR
echo $CONTENT
diff --git a/dom/quota/scripts/fn_anchors.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fn_anchors.py
index eeaf43764c..13e3802399 100644
--- a/dom/quota/scripts/fn_anchors.py
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/fn_anchors.py
@@ -4,16 +4,24 @@
import json
import subprocess
+from os import path
+
+from qm_try_analysis.logging import info, warning
cached_functions = {}
def getMetricsJson(src_url):
if src_url.startswith("http"):
- print("Fetching source for function extraction: {}".format(src_url))
- metrics = subprocess.check_output(["./fetch_fn_names.sh", src_url])
+ info(f"Fetching source for function extraction: {src_url}")
+ metrics = subprocess.check_output(
+ [
+ path.join(path.dirname(path.realpath(__file__)), "fetch_fn_names.sh"),
+ src_url,
+ ]
+ )
else:
- print("Skip fetching source: {}".format(src_url))
+ warning(f"Skip fetching source: {src_url}")
metrics = ""
try:
diff --git a/dom/quota/scripts/qm-try-analysis/qm_try_analysis/logging.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/logging.py
new file mode 100644
index 0000000000..c96679f96c
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/logging.py
@@ -0,0 +1,21 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+import click
+
+
+def debug(message):
+ click.echo(click.style("Debug", fg="cyan") + f": {message}")
+
+
+def info(message):
+ click.echo(click.style("Info", fg="white") + f": {message}")
+
+
+def warning(message):
+ click.echo(click.style("Warning", fg="yellow") + f": {message}")
+
+
+def error(message):
+ click.echo(click.style("Error", fg="red") + f": {message}")
diff --git a/dom/quota/scripts/qm-try-analysis/qm_try_analysis/report.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/report.py
new file mode 100644
index 0000000000..0ec5428679
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/report.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+import hashlib
+import json
+import re
+import sys
+import webbrowser
+from typing import Union
+
+import bugzilla
+import click
+from click.utils import echo
+
+from qm_try_analysis import stackanalysis, utils
+from qm_try_analysis.logging import error, info, warning
+
+# Flag for toggling development mod
+DEV = True
+
+# Constants for Bugzilla URLs
+if DEV:
+ BUGZILLA_BASE_URL = "https://bugzilla-dev.allizom.org/"
+else:
+ BUGZILLA_BASE_URL = "https://bugzilla.mozilla.org/"
+
+BUGZILLA_API_URL = BUGZILLA_BASE_URL + "rest/"
+BUGZILLA_ATTACHMENT_URL = BUGZILLA_BASE_URL + "attachment.cgi?id="
+BUGZILLA_BUG_URL = BUGZILLA_BASE_URL + "show_bug.cgi?id="
+
+# Constants for static bugs
+QM_TRY_FAILURES_BUG = 1702411
+WARNING_STACKS_BUG = 1711703
+
+# Regex pattern for parsing anchor strings
+ANCHOR_REGEX_PATTERN = re.compile(r"([^:]+):([^:]+)?:*([^:]+)?")
+
+
+@click.command()
+@click.option(
+ "-k",
+ "--key",
+ help="Your personal Bugzilla API key",
+ required=True,
+)
+@click.option(
+ "--stacksfile",
+ type=click.File("rb"),
+ help="The output file of the previous analysis run. You only have to specify this, if the previous run does not include this info.",
+)
+@click.option(
+ "--open-modified/--no-open-modified",
+ default=True,
+ help="Whether to open modified bugs in your default browser after updating them.",
+)
+@click.option(
+ "-w",
+ "--workdir",
+ type=click.Path(file_okay=False, exists=True, writable=True),
+ default="output",
+ help="Working directory",
+)
+def report_qm_failures(key, stacksfile, open_modified, workdir):
+ """
+ Report QM failures to Bugzilla based on stack analysis.
+ """
+ run = utils.getLastRunFromExecutionFile(workdir)
+
+ # Check for valid execution file from the previous run
+ if not {"errorfile", "warnfile"} <= run.keys():
+ error("No analyzable execution from the previous run of analyze found.")
+ echo("Did you remember to run `poetry run qm-try-analysis analyze`?")
+ sys.exit(2)
+
+ # Handle missing stacksfile
+ if not stacksfile:
+ if "stacksfile" not in run:
+ error(
+ "The previous analyze run did not contain the location of the stacksfile."
+ )
+ echo('Please provide the file location using the "--stacksfile" option.')
+ sys.exit(2)
+ stacksfile = open(run["stacksfile"], "rb")
+
+ # Create Bugzilla client
+ bugzilla_client = bugzilla.Bugzilla(url=BUGZILLA_API_URL, api_key=key)
+
+ # Initialize report data
+ report = run.get("report", {})
+ run["report"] = report
+ attachment_id = report.get("stacksfile_attachment", None)
+ reported = report.get("reported", [])
+ report["reported"] = reported
+
+ def post_comment(bug_id, comment):
+ """
+ Post a comment to a Bugzilla bug.
+ """
+ data = {"id": bug_id, "comment": comment, "is_markdown": True}
+ res = bugzilla_client._post(f"bug/{bug_id}/comment", json.dumps(data))
+ return res["id"]
+
+ # Handle missing attachment ID
+ if not attachment_id:
+ attachment = bugzilla.DotDict()
+ attachment.file_name = f"qmstacks_until_{run['lasteventtime']}.txt"
+ attachment.summary = attachment.file_name
+ attachment.content_type = "text/plain"
+ attachment.data = stacksfile.read().decode()
+ res = bugzilla_client.post_attachment(QM_TRY_FAILURES_BUG, attachment)
+ attachment_id = next(iter(res["attachments"].values()))["id"]
+ report["stacksfile_attachment"] = attachment_id
+ utils.updateLastRunToExecutionFile(workdir, run)
+
+ info(
+ f'Created attachment for "{attachment.file_name}": {BUGZILLA_ATTACHMENT_URL + str(attachment_id)}.'
+ )
+
+ def generate_comment(stacks):
+ """
+ Generate a comment for Bugzilla based on error stacks.
+ """
+ comment = f"Taken from Attachment {attachment_id}\n\n"
+ comment += stackanalysis.printStacks(stacks)
+ return comment
+
+ # Handle missing warnings comment
+ if "warnings_comment" not in report:
+ warning_stacks = utils.readJSONFile(run["warnfile"])
+ warning_stacks = filter(lambda stack: stack["hit_count"] >= 100, warning_stacks)
+
+ comment = generate_comment(warning_stacks)
+ comment_id = post_comment(WARNING_STACKS_BUG, comment)
+
+ report["warnings_comment"] = comment_id
+ utils.updateLastRunToExecutionFile(workdir, run)
+
+ info("Created comment for warning stacks.")
+
+ error_stacks = utils.readJSONFile(run["errorfile"])
+
+ def reduce(search_results, by: str) -> Union[int, None]:
+ """
+ Reduce bug search results automatically or based on user input.
+ """
+ anchor = by
+
+ search_results = remove_duplicates(search_results, bugzilla_client)
+
+ if not search_results:
+ return
+
+ if len(search_results) == 1:
+ return search_results[0]["id"]
+
+ echo(f'Multiple bugs found for anchor "{anchor}":')
+
+ for i, result in enumerate(search_results, start=1):
+ echo(
+ f"{i}.{' [closed]' if result['resolution'] != '' else ''} {BUGZILLA_BUG_URL + str(result['id'])} - {result['summary']}"
+ )
+
+ choice = click.prompt(
+ "Enter the number of the bug you want to use",
+ type=click.Choice(
+ [str(i) for i in range(1, len(search_results) + 1)] + ["skip"]
+ ),
+ default="skip",
+ show_default=True,
+ confirmation_prompt="Please confirm the selected choice",
+ )
+
+ if choice == "skip":
+ return
+
+ return search_results[int(choice) - 1]["id"]
+
+ anchors = stackanalysis.groupStacksForAnchors(error_stacks)
+
+ for anchor in anchors:
+ if hash_str(anchor) in reported:
+ info(f'Skipping anchor "{anchor}" since it has already been reported.')
+ continue
+
+ if not (match := ANCHOR_REGEX_PATTERN.match(anchor)):
+ warning(f'"{anchor}" did not match the regex pattern.')
+
+ if "Unknown" in match.group(2):
+ warning(f'Skipping "{anchor}" since it is not a valid anchor.')
+ continue
+
+ search_string = " ".join(filter(None, match.groups()))
+ search_results = bugzilla_client.search_bugs(
+ [{"product": "Core", "summary": search_string}]
+ )["bugs"]
+
+ if bug_id := reduce(search_results, by=anchor):
+ info(f'Found bug {BUGZILLA_BUG_URL + str(bug_id)} for anchor "{anchor}".')
+ else:
+ warning(f'No bug found for anchor "{anchor}".')
+
+ if not click.confirm("Would you like to create one?"):
+ continue
+
+ bug = bugzilla.DotDict()
+ bug.product = "Core"
+ bug.component = "Storage: Quota Manager"
+ bug.summary = f"[QM_TRY] Failures in {anchor}"
+ bug.description = f"This bug keeps track of the semi-automatic monitoring of QM_TRY failures in `{anchor}`"
+ bug["type"] = "defect"
+ bug.blocks = QM_TRY_FAILURES_BUG
+ bug.version = "unspecified"
+
+ bug_id = bugzilla_client.post_bug(bug)["id"]
+
+ info(f'Created bug {BUGZILLA_BUG_URL + str(bug_id)} for anchor "{anchor}".')
+
+ comment = generate_comment(anchors[anchor]["stacks"])
+ comment_id = post_comment(bug_id, comment)
+
+ reported.append(hash_str(anchor))
+ utils.updateLastRunToExecutionFile(workdir, run)
+
+ if open_modified:
+ comment_seq_number = bugzilla_client.get_comment(comment_id)["comments"][
+ str(comment_id)
+ ]["count"]
+ webbrowser.open(
+ BUGZILLA_BUG_URL + str(bug_id) + "#c" + str(comment_seq_number)
+ )
+
+
+def hash_str(s):
+ """
+ Hash a string using MD5.
+ """
+ encoded_str = s.encode("utf-8")
+ return int(hashlib.md5(encoded_str).hexdigest(), 16)
+
+
+def remove_duplicates(search_results, bugzilla_client):
+ """
+ Remove duplicate bugs in search results.
+ """
+ resolved_bugs = set(bug["id"] for bug in search_results if not bug.get("dupe_of"))
+
+ def resolve_if_dupe(bug):
+ if not (dupe_of := bug.get("dupe_of")):
+ return bug
+
+ if dupe_of in resolved_bugs:
+ return None
+
+ remote = resolve_if_dupe(bugzilla_client.get_bug(dupe_of))
+ if remote:
+ resolved_bugs.add(remote["id"])
+
+ return remote
+
+ return [non_dupe for bug in search_results if (non_dupe := resolve_if_dupe(bug))]
+
+
+if __name__ == "__main__":
+ report_qm_failures()
diff --git a/dom/quota/scripts/stackanalysis.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/stackanalysis.py
index f0363c5e1f..f0363c5e1f 100644
--- a/dom/quota/scripts/stackanalysis.py
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/stackanalysis.py
diff --git a/dom/quota/scripts/telemetry.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/telemetry.py
index a62abd62b1..26debd0546 100644
--- a/dom/quota/scripts/telemetry.py
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/telemetry.py
@@ -6,20 +6,22 @@ import time
import requests
+from qm_try_analysis.logging import info
+
+TELEMETRY_BASE_URL = "https://sql.telemetry.mozilla.org/api/"
+
def query(key, query, p_params):
headers = {"Authorization": "Key {}".format(key)}
- start_url = "https://sql.telemetry.mozilla.org/api/" "queries/{}/refresh?{}".format(
- query, p_params
- )
- print(start_url)
+ start_url = TELEMETRY_BASE_URL + f"queries/{query}/refresh?{p_params}"
+ info(f"Starting job using url {start_url}")
resp = requests.post(url=start_url, headers=headers)
job = resp.json()["job"]
- jid = job["id"]
- print("Started job {}".format(jid))
+ job_id = job["id"]
+ info(f"Started job {job_id}")
- poll_url = "https://sql.telemetry.mozilla.org/api/" "jobs/{}".format(jid)
- print(poll_url)
+ poll_url = TELEMETRY_BASE_URL + f"jobs/{job_id}"
+ info(f"Polling query status from {poll_url}")
poll = True
status = 0
qresultid = 0
@@ -34,15 +36,13 @@ def query(key, query, p_params):
else:
time.sleep(0.2)
print(".")
- print("Finished with status {}".format(status))
+ info(f"Finished with status {status}")
if status == 3:
- fetch_url = (
- "https://sql.telemetry.mozilla.org/api/"
- "queries/78691/results/{}.json".format(qresultid)
- )
- print(fetch_url)
- resp = requests.get(url=fetch_url, headers=headers)
+ results_url = TELEMETRY_BASE_URL + f"queries/78691/results/{qresultid}.json"
+
+ info(f"Querying result from {results_url}")
+ resp = requests.get(url=results_url, headers=headers)
return resp.json()
return {"query_result": {"data": {"rows": {}}}}
diff --git a/dom/quota/scripts/utils.py b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/utils.py
index d5322728a5..485dbf66f5 100644
--- a/dom/quota/scripts/utils.py
+++ b/dom/quota/scripts/qm-try-analysis/qm_try_analysis/utils.py
@@ -7,6 +7,8 @@ import json
import requests
+from qm_try_analysis.logging import error, info, warning
+
def readJSONFile(FileName):
f = open(FileName, "r")
@@ -37,7 +39,7 @@ def fetchBuildRevisions(buildids):
buildhub_url = "https://buildhub.moz.tools/api/search"
delids = {}
for bid in buildids:
- print("Fetching revision for build {}.".format(bid))
+ info(f"Fetching revision for build {bid}.")
body = {"size": 1, "query": {"term": {"build.id": bid}}}
resp = requests.post(url=buildhub_url, json=body)
hits = resp.json()["hits"]["hits"]
@@ -48,7 +50,7 @@ def fetchBuildRevisions(buildids):
+ hits[0]["_source"]["source"]["revision"]
)
else:
- print("No revision for build.id {}".format(bid))
+ warning(f"No revision for build.id {bid}")
delids[bid] = "x"
for bid in delids:
buildids.pop(bid)
@@ -67,7 +69,7 @@ def writeExecutionFile(workdir, executions):
try:
writeJSONFile(exefile, executions)
except OSError:
- print("Error writing execution record.")
+ error("Error writing execution record.")
def getLastRunFromExecutionFile(workdir):
diff --git a/dom/quota/scripts/qm-try-analysis/tests/test_reporting.py b/dom/quota/scripts/qm-try-analysis/tests/test_reporting.py
new file mode 100644
index 0000000000..f3be6d9cf0
--- /dev/null
+++ b/dom/quota/scripts/qm-try-analysis/tests/test_reporting.py
@@ -0,0 +1,96 @@
+import copy
+
+from qm_try_analysis.report import remove_duplicates
+
+
+class MockClient(object):
+ def __init__(self, search_results, remote_bugs):
+ self.search_results = copy.deepcopy(search_results)
+ self.remote_bugs = copy.deepcopy(remote_bugs)
+
+ def get_bug(self, bug_id):
+ for item in self.search_results:
+ if bug_id == item["id"]:
+ return copy.deepcopy(item)
+ for item in self.remote_bugs:
+ if bug_id == item["id"]:
+ return copy.deepcopy(item)
+ return {"id": bug_id, "dupe_of": None}
+
+
+def test_duplicate_bug_removal():
+ test_cases = [
+ {
+ "search_results": [
+ {"id": "k1", "dupe_of": "k4"},
+ {"id": "k3", "dupe_of": "k5"},
+ {"id": "k2", "dupe_of": "k6"},
+ ],
+ "remote_bugs": [
+ {"id": "k4", "dupe_of": "k2"},
+ {"id": "k5", "dupe_of": None},
+ {"id": "k6", "dupe_of": "k3"},
+ ],
+ "expected": [{"id": "k5", "dupe_of": None}],
+ },
+ {
+ "search_results": [
+ {"id": "k2", "dupe_of": "k6"},
+ {"id": "k3", "dupe_of": "k5"},
+ {"id": "k1", "dupe_of": "k4"},
+ ],
+ "remote_bugs": [
+ {"id": "k4", "dupe_of": "k2"},
+ {"id": "k5", "dupe_of": None},
+ {"id": "k6", "dupe_of": "k3"},
+ ],
+ "expected": [{"id": "k5", "dupe_of": None}],
+ },
+ {
+ "search_results": [
+ {"id": "k1", "dupe_of": "k3"},
+ {"id": "k2", "dupe_of": "k4"},
+ ],
+ "remote_bugs": [
+ {"id": "k3", "dupe_of": "k2"},
+ {"id": "k4", "dupe_of": None},
+ ],
+ "expected": [{"id": "k4", "dupe_of": None}],
+ },
+ {
+ "search_results": [
+ {"id": "k1", "dupe_of": "k3"},
+ {"id": "k2", "dupe_of": None},
+ ],
+ "remote_bugs": [
+ {"id": "k3", "dupe_of": "k4"},
+ {"id": "k4", "dupe_of": "k2"},
+ ],
+ "expected": [{"id": "k2", "dupe_of": None}],
+ },
+ {
+ "search_results": [
+ {"id": "k1", "dupe_of": "k3"},
+ {"id": "k2", "dupe_of": None},
+ ],
+ "remote_bugs": [{"id": "k3", "dupe_of": "k2"}],
+ "expected": [{"id": "k2", "dupe_of": None}],
+ },
+ {
+ "search_results": [
+ {"id": "k1", "dupe_of": None},
+ {"id": "k2", "dupe_of": "k3"},
+ ],
+ "remote_bugs": [{"id": "k3", "dupe_of": "k1"}],
+ "expected": [{"id": "k1", "dupe_of": None}],
+ },
+ ]
+
+ for test_case in test_cases:
+ search_results = test_case["search_results"]
+ remote_bugs = test_case["remote_bugs"]
+ expected = test_case["expected"]
+
+ mock_client = MockClient(search_results, remote_bugs)
+
+ assert remove_duplicates(search_results, mock_client) == expected
diff --git a/dom/quota/test/marionette/manifest.toml b/dom/quota/test/marionette/manifest.toml
index de02cdd541..f80095c590 100644
--- a/dom/quota/test/marionette/manifest.toml
+++ b/dom/quota/test/marionette/manifest.toml
@@ -1,5 +1,4 @@
[DEFAULT]
support-files = ["quota_test_case.py"]
-["dummy.py"]
-skip-if = ["true"]
+["test_private_repository_cleanup.py"]
diff --git a/dom/quota/test/marionette/quota_test_case.py b/dom/quota/test/marionette/quota_test_case.py
index c31edcaaf7..7d161cf4ff 100644
--- a/dom/quota/test/marionette/quota_test_case.py
+++ b/dom/quota/test/marionette/quota_test_case.py
@@ -10,6 +10,51 @@ from marionette_harness import MarionetteTestCase
class QuotaTestCase(MarionetteTestCase):
+ def executeAsyncScript(self, script, script_args):
+ res = self.marionette.execute_async_script(
+ """
+ const resolve = arguments[arguments.length - 1];
+
+ class RequestError extends Error {
+ constructor(resultCode, resultName) {
+ super(`Request failed (code: ${resultCode}, name: ${resultName})`);
+ this.name = "RequestError";
+ this.resultCode = resultCode;
+ this.resultName = resultName;
+ }
+ }
+
+ async function requestFinished(request) {
+ await new Promise(function (resolve) {
+ request.callback = function () {
+ resolve();
+ };
+ });
+
+ if (request.resultCode !== Cr.NS_OK) {
+ throw new RequestError(request.resultCode, request.resultName);
+ }
+
+ return request.result;
+ }
+ """
+ + script
+ + """
+ main()
+ .then(function(result) {
+ resolve(result);
+ })
+ .catch(function() {
+ resolve(null);
+ });;
+ """,
+ script_args=script_args,
+ new_sandbox=False,
+ )
+
+ assert res is not None
+ return res
+
def ensureInvariantHolds(self, op):
maxWaitTime = 60
Wait(self.marionette, timeout=maxWaitTime).until(
@@ -24,27 +69,23 @@ class QuotaTestCase(MarionetteTestCase):
return None
def getFullOriginMetadata(self, persistenceType, origin):
- with self.marionette.using_context("chrome"):
- res = self.marionette.execute_async_script(
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ res = self.executeAsyncScript(
"""
- const [persistenceType, origin, resolve] = arguments;
+ const [persistenceType, origin] = arguments;
- const principal = Services.scriptSecurityManager.
- createContentPrincipalFromOrigin(origin);
-
- const request = Services.qms.getFullOriginMetadata(
- persistenceType, principal);
-
- request.callback = function() {
- if (request.resultCode != Cr.NS_OK) {
- resolve(null);
- } else {
- resolve(request.result);
- }
- }
+ async function main() {
+ const principal = Services.scriptSecurityManager.
+ createContentPrincipalFromOrigin(origin);
+
+ const request = Services.qms.getFullOriginMetadata(
+ persistenceType, principal);
+ const metadata = await requestFinished(request);
+
+ return metadata;
+ }
""",
script_args=(persistenceType, origin),
- new_sandbox=False,
)
assert res is not None
@@ -57,41 +98,91 @@ class QuotaTestCase(MarionetteTestCase):
sanitizedStorageOrigin = storageOrigin.replace(":", "+").replace("/", "+")
return os.path.join(
- profilePath, "storage", persistenceType, sanitizedStorageOrigin, client
+ self.getRepositoryPath(persistenceType), sanitizedStorageOrigin, client
)
+ def getRepositoryPath(self, persistenceType):
+ profilePath = self.marionette.instance.profile.profile
+ assert profilePath is not None
+
+ return os.path.join(profilePath, "storage", persistenceType)
+
+ def initStorage(self):
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.executeAsyncScript(
+ """
+ async function main() {
+ let req = Services.qms.init();
+ await requestFinished(req);
+
+ return true;
+ }
+ """,
+ script_args=(),
+ )
+
+ def initTemporaryOrigin(self, persistenceType, origin):
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.executeAsyncScript(
+ """
+ const [persistenceType, origin] = arguments;
+ async function main() {
+ const principal = Services.scriptSecurityManager.
+ createContentPrincipalFromOrigin(origin);
+
+ let req = Services.qms.initializeTemporaryOrigin(persistenceType, principal);
+ await requestFinished(req)
+
+ return true;
+ }
+ """,
+ script_args=(
+ persistenceType,
+ origin,
+ ),
+ )
+
+ def initTemporaryStorage(self):
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.executeAsyncScript(
+ """
+ async function main() {
+ const req = Services.qms.initTemporaryStorage();
+ await requestFinished(req);
+
+ return true;
+ }
+ """,
+ script_args=(),
+ )
+
def resetStoragesForPrincipal(self, origin, persistenceType, client):
# This method is used to force sqlite to write journal file contents to
# main sqlite database file
- script = """
- const [resolve] = arguments
-
- let origin = '%s';
- let persistenceType = '%s';
- let client = '%s';
- let principal = Services.scriptSecurityManager.
- createContentPrincipalFromOrigin(origin);
-
- let req = Services.qms.resetStoragesForPrincipal(principal, persistenceType, client);
- req.callback = () => {
- if (req.resultCode == Cr.NS_OK) {
- resolve(true);
- } else {
- resolve(false);
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ res = self.executeAsyncScript(
+ """
+ const [origin, persistenceType, client] = arguments;
+
+ async function main() {
+ const principal = Services.scriptSecurityManager.
+ createContentPrincipalFromOrigin(origin);
+
+ const request = Services.qms.resetStoragesForPrincipal(principal, persistenceType, client);
+ await requestFinished(request);
+
+ return true;
}
- }
- """ % (
- origin,
- persistenceType,
- client,
- )
+ """,
+ script_args=(origin, persistenceType, client),
+ )
- with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
- return self.marionette.execute_async_script(script)
+ assert res is not None
+ return res
@contextmanager
- def using_new_window(self, path, private=False):
+ def using_new_window(self, path, private=False, skipCleanup=False):
"""
This helper method is created to ensure that a temporary
window required inside the test scope is lifetime'd properly
@@ -109,5 +200,6 @@ class QuotaTestCase(MarionetteTestCase):
yield (origin, "private" if private else "default")
finally:
- self.marionette.close()
- self.marionette.switch_to_window(oldWindow)
+ if not skipCleanup:
+ self.marionette.close()
+ self.marionette.switch_to_window(oldWindow)
diff --git a/dom/quota/test/marionette/test_private_repository_cleanup.py b/dom/quota/test/marionette/test_private_repository_cleanup.py
new file mode 100644
index 0000000000..0d4c488757
--- /dev/null
+++ b/dom/quota/test/marionette/test_private_repository_cleanup.py
@@ -0,0 +1,94 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+from pathlib import Path
+
+sys.path.append(os.fspath(Path(__file__).parents[0]))
+from quota_test_case import QuotaTestCase
+
+QM_TESTING_PREF = "dom.quotaManager.testing"
+AUTOSTART_PBM_PREF = "browser.privatebrowsing.autostart"
+
+"""
+ This test ensures that private repository gets properly purged in all below scenarios:
+ 1. at PBM session end
+ 2. on firefox start when last session was abnormally terminated (crash)
+ 3. on firefox shutdown (when PBM autostart is enabled i.e. browser.privatebrowsing.autostart set to true)
+"""
+
+
+class PrivateRepositoryCleanup(QuotaTestCase):
+ def setUp(self, autostartPBM=False):
+ super(PrivateRepositoryCleanup, self).setUp()
+
+ self.marionette.set_pref(AUTOSTART_PBM_PREF, autostartPBM)
+ self.marionette.set_pref(QM_TESTING_PREF, True)
+
+ self.marionette.restart(in_app=True)
+
+ assert self.initStorage()
+ assert self.initTemporaryStorage()
+
+ def tearDown(self):
+ self.marionette.clear_pref(AUTOSTART_PBM_PREF)
+ self.marionette.clear_pref(QM_TESTING_PREF)
+
+ self.marionette.restart(in_app=True)
+ super(PrivateRepositoryCleanup, self).tearDown()
+
+ def doStorageWork(self):
+ origin = self.marionette.absolute_url("")[:-1] + "^privateBrowsingId=1"
+ assert self.initTemporaryOrigin("private", origin)
+
+ self.ensureInvariantHolds(lambda _: os.path.exists(self.getPrivateRepository()))
+
+ def verifyCleanup(self):
+ self.ensureInvariantHolds(
+ lambda _: not os.path.exists(self.getPrivateRepository())
+ )
+
+ def getPrivateRepository(self):
+ return self.getRepositoryPath("private")
+
+
+class PBM(PrivateRepositoryCleanup):
+ def test_ensure_cleanup(self):
+ with self.using_new_window("", private=True):
+ self.doStorageWork()
+
+ # private window must have been close by now
+ self.verifyCleanup()
+
+ def test_ensure_cleanup_after_crash(self):
+ with self.using_new_window("", private=True, skipCleanup=True):
+ self.doStorageWork()
+
+ # verify cleanup was performed after crashing and restarting
+ # firefox with in_app=False
+ self.marionette.restart(in_app=False)
+ self.verifyCleanup()
+
+
+class PBMAutoStart(PrivateRepositoryCleanup):
+ def setUp(self):
+ super(PBMAutoStart, self).setUp(True)
+
+ def test_ensure_cleanup(self):
+ self.doStorageWork()
+
+ # verify cleanup was performed at the shutdown
+ self.marionette.quit(in_app=True)
+ self.verifyCleanup()
+
+ self.marionette.start_session()
+
+ def test_ensure_cleanup_after_crash(self):
+ self.doStorageWork()
+
+ # verify cleanup was performed after crashing and restarting
+ # firefox with in_app=False
+ self.marionette.restart(in_app=False)
+ self.verifyCleanup()