summaryrefslogtreecommitdiffstats
path: root/browser/components/textrecognition/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /browser/components/textrecognition/tests
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'browser/components/textrecognition/tests')
-rw-r--r--browser/components/textrecognition/tests/browser/browser.toml12
-rw-r--r--browser/components/textrecognition/tests/browser/browser_textrecognition.js140
-rw-r--r--browser/components/textrecognition/tests/browser/browser_textrecognition_no_result.js99
-rw-r--r--browser/components/textrecognition/tests/browser/head.js59
-rw-r--r--browser/components/textrecognition/tests/browser/image.pngbin0 -> 7061 bytes
5 files changed, 310 insertions, 0 deletions
diff --git a/browser/components/textrecognition/tests/browser/browser.toml b/browser/components/textrecognition/tests/browser/browser.toml
new file mode 100644
index 0000000000..12c79add6c
--- /dev/null
+++ b/browser/components/textrecognition/tests/browser/browser.toml
@@ -0,0 +1,12 @@
+[DEFAULT]
+support-files = [
+ "head.js",
+ "image.png",
+ "!/toolkit/content/tests/browser/doggy.png",
+]
+
+["browser_textrecognition.js"]
+run-if = ["os == 'mac'"] # Mac only feature.
+
+["browser_textrecognition_no_result.js"]
+run-if = ["os == 'mac'"] # Mac only feature.
diff --git a/browser/components/textrecognition/tests/browser/browser_textrecognition.js b/browser/components/textrecognition/tests/browser/browser_textrecognition.js
new file mode 100644
index 0000000000..c949902e67
--- /dev/null
+++ b/browser/components/textrecognition/tests/browser/browser_textrecognition.js
@@ -0,0 +1,140 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+"use strict";
+
+add_task(async function () {
+ const URL_IMG =
+ "http://mochi.test:8888/browser/browser/components/textrecognition/tests/browser/image.png";
+
+ await SpecialPowers.pushPrefEnv({
+ set: [["dom.text-recognition.enabled", true]],
+ });
+
+ clearTelemetry();
+
+ await BrowserTestUtils.withNewTab(URL_IMG, async function (browser) {
+ setClipboardText("");
+ is(getTextFromClipboard(), "", "The copied text is empty.");
+ ok(
+ !getTelemetryScalars()["browser.ui.interaction.content_context"],
+ "No telemetry has been recorded yet."
+ );
+ is(
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_API_PERFORMANCE")
+ .snapshot().sum,
+ 0,
+ "No histogram timing was recorded."
+ );
+
+ info("Right click image to show context menu.");
+ let popupShownPromise = BrowserTestUtils.waitForEvent(
+ document,
+ "popupshown"
+ );
+ await BrowserTestUtils.synthesizeMouseAtCenter(
+ "img",
+ { type: "contextmenu", button: 2 },
+ browser
+ );
+ await popupShownPromise;
+
+ info("Click context menu to copy the image text.");
+ document.getElementById("context-imagetext").doCommand();
+
+ info("Close the context menu.");
+ let contextMenu = document.getElementById("contentAreaContextMenu");
+ let popupHiddenPromise = BrowserTestUtils.waitForEvent(
+ contextMenu,
+ "popuphidden"
+ );
+ contextMenu.hidePopup();
+ await popupHiddenPromise;
+
+ info("Waiting for the dialog browser to be shown.");
+ const { contentDocument } = await BrowserTestUtils.waitForCondition(() =>
+ document.querySelector(".textRecognitionDialogFrame")
+ );
+
+ {
+ info("Check the scalar telemetry.");
+ const scalars = await BrowserTestUtils.waitForCondition(() =>
+ getTelemetryScalars()
+ );
+ const contentContext = scalars["browser.ui.interaction.content_context"];
+ ok(contentContext, "Opening the context menu was recorded.");
+
+ is(contentContext["context-imagetext"], 1, "Telemetry has been recorded");
+ }
+
+ info("Waiting for text results.");
+ const resultsHeader = contentDocument.querySelector(
+ "#text-recognition-header-results"
+ );
+ await BrowserTestUtils.waitForCondition(() => {
+ return resultsHeader.style.display !== "none";
+ });
+
+ const expectedResultText = "Mozilla\n\nFirefox";
+
+ {
+ info("Check the text results.");
+ const text = contentDocument.querySelector(".textRecognitionText");
+ is(text.children.length, 2, "Two piece of text were found");
+ const [p1, p2] = text.children;
+ is(p1.tagName, "P", "The children are paragraph tags.");
+ is(p2.tagName, "P", "The children are paragraph tags.");
+ is(p1.innerText, "Mozilla", "The first piece of text matches.");
+ is(p2.innerText, "Firefox", "The second piece of text matches.");
+
+ const clipboardText = getTextFromClipboard();
+ is(clipboardText, expectedResultText, "The copied text matches.");
+
+ is(
+ clipboardText,
+ text.innerText,
+ "The copied text and the text elements innerText match."
+ );
+ }
+
+ Assert.greater(
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_API_PERFORMANCE")
+ .snapshot().sum,
+ 0,
+ "Text recognition API performance was recorded."
+ );
+
+ info("Close the dialog box.");
+ const close = contentDocument.querySelector("#text-recognition-close");
+ close.click();
+
+ is(
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_TEXT_LENGTH")
+ .snapshot().sum,
+ expectedResultText.length,
+ "The length of the text was recorded."
+ );
+
+ info("Waiting for the dialog frame to close.");
+ await BrowserTestUtils.waitForCondition(
+ () => !document.querySelector(".textRecognitionDialogFrame")
+ );
+
+ info("Check for interaction telemetry.");
+ const timing = await BrowserTestUtils.waitForCondition(() => {
+ const { sum } = Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_INTERACTION_TIMING")
+ .snapshot();
+ if (sum > 0) {
+ return sum;
+ }
+ return false;
+ });
+ Assert.greater(timing, 0, "Interaction timing was measured.");
+
+ setClipboardText("");
+ clearTelemetry();
+ });
+});
diff --git a/browser/components/textrecognition/tests/browser/browser_textrecognition_no_result.js b/browser/components/textrecognition/tests/browser/browser_textrecognition_no_result.js
new file mode 100644
index 0000000000..2e7f1a5d49
--- /dev/null
+++ b/browser/components/textrecognition/tests/browser/browser_textrecognition_no_result.js
@@ -0,0 +1,99 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+"use strict";
+
+add_task(async function () {
+ const url =
+ "http://mochi.test:8888/browser/toolkit/content/tests/browser/doggy.png";
+
+ await SpecialPowers.pushPrefEnv({
+ set: [["dom.text-recognition.enabled", true]],
+ });
+
+ clearTelemetry();
+
+ await BrowserTestUtils.withNewTab(url, async function (browser) {
+ setClipboardText("");
+ is(getTextFromClipboard(), "", "The copied text is empty.");
+
+ info("Right click image to show context menu.");
+ let popupShownPromise = BrowserTestUtils.waitForEvent(
+ document,
+ "popupshown"
+ );
+ await BrowserTestUtils.synthesizeMouseAtCenter(
+ "img",
+ { type: "contextmenu", button: 2 },
+ browser
+ );
+ await popupShownPromise;
+
+ info("Click context menu to copy the image text.");
+ document.getElementById("context-imagetext").doCommand();
+
+ info("Close the context menu.");
+ let contextMenu = document.getElementById("contentAreaContextMenu");
+ let popupHiddenPromise = BrowserTestUtils.waitForEvent(
+ contextMenu,
+ "popuphidden"
+ );
+ contextMenu.hidePopup();
+ await popupHiddenPromise;
+
+ info("Waiting for the dialog browser to be shown.");
+ const { contentDocument } = await BrowserTestUtils.waitForCondition(() =>
+ document.querySelector(".textRecognitionDialogFrame")
+ );
+
+ info("Waiting for no results message.");
+ const noResultsHeader = contentDocument.querySelector(
+ "#text-recognition-header-no-results"
+ );
+ await BrowserTestUtils.waitForCondition(() => {
+ return noResultsHeader.style.display !== "none";
+ });
+
+ {
+ info("Check the scalar telemetry.");
+ const scalars = await BrowserTestUtils.waitForCondition(() =>
+ getTelemetryScalars()
+ );
+ const contentContext = scalars["browser.ui.interaction.content_context"];
+ ok(contentContext, "Opening the context menu was recorded.");
+
+ is(contentContext["context-imagetext"], 1, "Telemetry has been recorded");
+ }
+
+ const text = contentDocument.querySelector(".textRecognitionText");
+ is(text.children.length, 0, "No results are listed.");
+
+ Assert.greater(
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_API_PERFORMANCE")
+ .snapshot().sum,
+ 0,
+ "Histogram timing was recorded even though there were no results."
+ );
+
+ is(
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_INTERACTION_TIMING")
+ .snapshot().sum,
+ 0,
+ "No interaction timing has been measured yet."
+ );
+
+ info("Close the dialog box.");
+ const close = contentDocument.querySelector("#text-recognition-close");
+ close.click();
+
+ info("Waiting for the dialog frame to close.");
+ await BrowserTestUtils.waitForCondition(
+ () => !document.querySelector(".textRecognitionDialogFrame")
+ );
+
+ is(getTextFromClipboard(), "", "The copied text is still empty.");
+ });
+
+ clearTelemetry();
+});
diff --git a/browser/components/textrecognition/tests/browser/head.js b/browser/components/textrecognition/tests/browser/head.js
new file mode 100644
index 0000000000..a765d86501
--- /dev/null
+++ b/browser/components/textrecognition/tests/browser/head.js
@@ -0,0 +1,59 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/**
+ * @param {string} text
+ */
+function setClipboardText(text) {
+ const ClipboardHelper = Cc[
+ "@mozilla.org/widget/clipboardhelper;1"
+ ].getService(Ci.nsIClipboardHelper);
+ ClipboardHelper.copyString(text);
+}
+
+/**
+ * @returns {string}
+ */
+function getTextFromClipboard() {
+ const transferable = Cc["@mozilla.org/widget/transferable;1"].createInstance(
+ Ci.nsITransferable
+ );
+ transferable.init(window.docShell.QueryInterface(Ci.nsILoadContext));
+ transferable.addDataFlavor("text/plain");
+ Services.clipboard.getData(
+ transferable,
+ Services.clipboard.kGlobalClipboard,
+ SpecialPowers.wrap(window).browsingContext.currentWindowContext
+ );
+
+ const results = {};
+ transferable.getTransferData("text/plain", results);
+ return results.value.QueryInterface(Ci.nsISupportsString)?.data ?? "";
+}
+
+/**
+ * Returns events specifically for text recognition.
+ */
+function getTelemetryScalars() {
+ const snapshot = Services.telemetry.getSnapshotForKeyedScalars(
+ "main",
+ true /* clear events */
+ );
+
+ if (!snapshot.parent) {
+ return {};
+ }
+
+ return snapshot.parent;
+}
+
+function clearTelemetry() {
+ Services.telemetry.clearScalars();
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_API_PERFORMANCE")
+ .clear();
+ Services.telemetry
+ .getHistogramById("TEXT_RECOGNITION_INTERACTION_TIMING")
+ .clear();
+ Services.telemetry.getHistogramById("TEXT_RECOGNITION_TEXT_LENGTH").clear();
+}
diff --git a/browser/components/textrecognition/tests/browser/image.png b/browser/components/textrecognition/tests/browser/image.png
new file mode 100644
index 0000000000..3faa11b221
--- /dev/null
+++ b/browser/components/textrecognition/tests/browser/image.png
Binary files differ