diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /layout/tools | |
parent | Initial commit. (diff) | |
download | firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'layout/tools')
72 files changed, 13300 insertions, 0 deletions
diff --git a/layout/tools/layout-debug/LayoutDebugChild.jsm b/layout/tools/layout-debug/LayoutDebugChild.jsm new file mode 100644 index 0000000000..ba815a8cc7 --- /dev/null +++ b/layout/tools/layout-debug/LayoutDebugChild.jsm @@ -0,0 +1,34 @@ +/* vim: set ts=2 sw=2 sts=2 et tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +var EXPORTED_SYMBOLS = ["LayoutDebugChild"]; + +const NS_LAYOUT_DEBUGGINGTOOLS_CONTRACTID = + "@mozilla.org/layout-debug/layout-debuggingtools;1"; + +const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); + +class LayoutDebugChild extends JSWindowActorChild { + receiveMessage(msg) { + if (!this._debuggingTools) { + this._debuggingTools = Cc[ + NS_LAYOUT_DEBUGGINGTOOLS_CONTRACTID + ].createInstance(Ci.nsILayoutDebuggingTools); + this._debuggingTools.init(this.contentWindow); + } + switch (msg.name) { + case "LayoutDebug:Call": + let pid = Services.appinfo.processID; + dump(`[${pid} ${this.contentWindow.location}]\n`); + this._debuggingTools[msg.data.name](msg.data.arg); + dump("\n"); + break; + default: + throw `unknown message ${msg.name} sent to LayoutDebugChild`; + } + return Promise.resolve(true); + } +} diff --git a/layout/tools/layout-debug/moz.build b/layout/tools/layout-debug/moz.build new file mode 100644 index 0000000000..9f8cca5b38 --- /dev/null +++ b/layout/tools/layout-debug/moz.build @@ -0,0 +1,13 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +DIRS += ["src", "ui"] + +XPCSHELL_TESTS_MANIFESTS += ["tests/unit/xpcshell.ini"] + +BROWSER_CHROME_MANIFESTS += ["tests/browser/browser.ini"] + +FINAL_TARGET_FILES.actors += ["LayoutDebugChild.jsm"] diff --git a/layout/tools/layout-debug/src/components.conf b/layout/tools/layout-debug/src/components.conf new file mode 100644 index 0000000000..07ad3d155d --- /dev/null +++ b/layout/tools/layout-debug/src/components.conf @@ -0,0 +1,21 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Classes = [ + { + 'cid': '{3f4c3b63-e640-4712-abbf-fff1301ceb60}', + 'contract_ids': ['@mozilla.org/layout-debug/layout-debuggingtools;1'], + 'type': 'nsLayoutDebuggingTools', + 'headers': ['/layout/tools/layout-debug/src/nsLayoutDebuggingTools.h'], + }, + { + 'cid': '{a8f52633-5ecf-424a-a147-47c322f7bc2e}', + 'contract_ids': ['@mozilla.org/commandlinehandler/general-startup;1?type=layoutdebug'], + 'type': 'nsLayoutDebugCLH', + 'headers': ['/layout/tools/layout-debug/src/nsLayoutDebugCLH.h'], + 'categories': {'command-line-handler': 'm-layoutdebug'}, + }, +] diff --git a/layout/tools/layout-debug/src/moz.build b/layout/tools/layout-debug/src/moz.build new file mode 100644 index 0000000000..37f9389fa3 --- /dev/null +++ b/layout/tools/layout-debug/src/moz.build @@ -0,0 +1,24 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +XPIDL_SOURCES += [ + "nsILayoutDebuggingTools.idl", +] + +XPIDL_MODULE = "layout_debug" + +UNIFIED_SOURCES += [ + "nsLayoutDebugCLH.cpp", + "nsLayoutDebuggingTools.cpp", +] + +XPCOM_MANIFESTS += [ + "components.conf", +] + +include("/ipc/chromium/chromium-config.mozbuild") + +FINAL_LIBRARY = "xul" diff --git a/layout/tools/layout-debug/src/nsILayoutDebuggingTools.idl b/layout/tools/layout-debug/src/nsILayoutDebuggingTools.idl new file mode 100644 index 0000000000..5ee0cc91b5 --- /dev/null +++ b/layout/tools/layout-debug/src/nsILayoutDebuggingTools.idl @@ -0,0 +1,46 @@ +/* -*- Mode: IDL; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +// vim:cindent:tabstop=4:expandtab:shiftwidth=4: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsISupports.idl" + +interface mozIDOMWindow; + +/** + * A series of hooks into non-IDL-ized layout code to allow all the + * layout debugging functions to be used from chrome. + */ + +[scriptable, uuid(f336d8d3-9721-4ad3-85d0-a7018c0a3383)] +interface nsILayoutDebuggingTools : nsISupports +{ + + /* + * Initialize debugger object to act on a docshell. + */ + void init(in mozIDOMWindow win); + + // Repaint the window. + void forceRefresh(); + + /* Toggle various debugging states */ + void setVisualDebugging(in boolean enabled); + void setVisualEventDebugging(in boolean enabled); + void setReflowCounts(in boolean enabled); + void setPagedMode(in boolean enabled); + + /* Run various tests. */ + void dumpContent(); + void dumpFrames(); + void dumpFramesInCSSPixels(); + void dumpTextRuns(); + void dumpViews(); + + void dumpStyleSheets(); + void dumpMatchedRules(); + void dumpComputedStyles(); + + void dumpReflowStats(); +}; diff --git a/layout/tools/layout-debug/src/nsLayoutDebugCLH.cpp b/layout/tools/layout-debug/src/nsLayoutDebugCLH.cpp new file mode 100644 index 0000000000..e6a0097c3b --- /dev/null +++ b/layout/tools/layout-debug/src/nsLayoutDebugCLH.cpp @@ -0,0 +1,188 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +// vim:cindent:tabstop=4:expandtab:shiftwidth=4: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsLayoutDebugCLH.h" +#include "mozIDOMWindow.h" +#include "nsArray.h" +#include "nsString.h" +#include "plstr.h" +#include "nsComponentManagerUtils.h" +#include "nsCOMPtr.h" +#include "nsIWindowWatcher.h" +#include "nsISupportsPrimitives.h" +#include "nsICommandLine.h" +#include "nsIURI.h" +#include "nsServiceManagerUtils.h" + +nsLayoutDebugCLH::nsLayoutDebugCLH() = default; + +nsLayoutDebugCLH::~nsLayoutDebugCLH() = default; + +NS_IMPL_ISUPPORTS(nsLayoutDebugCLH, ICOMMANDLINEHANDLER) + +static nsresult HandleFlagWithOptionalArgument(nsICommandLine* aCmdLine, + const nsAString& aName, + const nsAString& aDefaultValue, + nsAString& aValue, + bool& aFlagPresent) { + aValue.Truncate(); + aFlagPresent = false; + + nsresult rv; + int32_t idx; + + rv = aCmdLine->FindFlag(aName, false, &idx); + NS_ENSURE_SUCCESS(rv, rv); + if (idx < 0) return NS_OK; + + aFlagPresent = true; + + int32_t length; + aCmdLine->GetLength(&length); + + bool argPresent = false; + + if (idx + 1 < length) { + rv = aCmdLine->GetArgument(idx + 1, aValue); + NS_ENSURE_SUCCESS(rv, rv); + + if (!aValue.IsEmpty() && aValue.CharAt(0) == '-') { + aValue.Truncate(); + } else { + argPresent = true; + } + } + + if (!argPresent) { + aValue = aDefaultValue; + } + + return aCmdLine->RemoveArguments(idx, idx + argPresent); +} + +static nsresult HandleFlagWithOptionalArgument(nsICommandLine* aCmdLine, + const nsAString& aName, + double aDefaultValue, + double& aValue, + bool& aFlagPresent) { + nsresult rv; + nsString s; + + rv = + HandleFlagWithOptionalArgument(aCmdLine, aName, u"0"_ns, s, aFlagPresent); + NS_ENSURE_SUCCESS(rv, rv); + + if (!aFlagPresent) { + aValue = 0.0; + return NS_OK; + } + + aValue = s.ToDouble(&rv); + return rv; +} + +static nsresult AppendArg(nsIMutableArray* aArray, const nsAString& aString) { + nsCOMPtr<nsISupportsString> s = + do_CreateInstance(NS_SUPPORTS_STRING_CONTRACTID); + NS_ENSURE_TRUE(s, NS_ERROR_FAILURE); + s->SetData(aString); + return aArray->AppendElement(s); +} + +NS_IMETHODIMP +nsLayoutDebugCLH::Handle(nsICommandLine* aCmdLine) { + nsresult rv; + bool flagPresent; + + nsString url; + bool autoclose = false; + double delay = 0.0; + bool captureProfile = false; + nsString profileFilename; + bool paged = false; + + rv = HandleFlagWithOptionalArgument(aCmdLine, u"layoutdebug"_ns, + u"about:blank"_ns, url, flagPresent); + NS_ENSURE_SUCCESS(rv, rv); + + if (!flagPresent) { + return NS_OK; + } + + rv = HandleFlagWithOptionalArgument(aCmdLine, u"autoclose"_ns, 0.0, delay, + autoclose); + NS_ENSURE_SUCCESS(rv, rv); + + rv = HandleFlagWithOptionalArgument(aCmdLine, u"capture-profile"_ns, + u"profile.json"_ns, profileFilename, + captureProfile); + NS_ENSURE_SUCCESS(rv, rv); + + rv = aCmdLine->HandleFlag(u"paged"_ns, false, &paged); + NS_ENSURE_SUCCESS(rv, rv); + + nsCOMPtr<nsIMutableArray> argsArray = nsArray::Create(); + + nsCOMPtr<nsIURI> uri; + nsAutoCString resolvedSpec; + + rv = aCmdLine->ResolveURI(url, getter_AddRefs(uri)); + NS_ENSURE_SUCCESS(rv, rv); + + rv = uri->GetSpec(resolvedSpec); + NS_ENSURE_SUCCESS(rv, rv); + + rv = AppendArg(argsArray, NS_ConvertUTF8toUTF16(resolvedSpec)); + NS_ENSURE_SUCCESS(rv, rv); + + if (autoclose) { + nsString arg; + arg.AppendPrintf("autoclose=%f", delay); + + rv = AppendArg(argsArray, arg); + NS_ENSURE_SUCCESS(rv, rv); + } + + if (captureProfile) { + nsString arg; + arg.AppendLiteral("profile="); + arg.Append(profileFilename); + + rv = AppendArg(argsArray, arg); + NS_ENSURE_SUCCESS(rv, rv); + } + + if (paged) { + rv = AppendArg(argsArray, u"paged"_ns); + NS_ENSURE_SUCCESS(rv, rv); + } + + nsCOMPtr<nsIWindowWatcher> wwatch = + do_GetService(NS_WINDOWWATCHER_CONTRACTID); + NS_ENSURE_TRUE(wwatch, NS_ERROR_FAILURE); + + nsCOMPtr<mozIDOMWindowProxy> opened; + wwatch->OpenWindow( + nullptr, "chrome://layoutdebug/content/layoutdebug.xhtml"_ns, "_blank"_ns, + "chrome,dialog=no,all"_ns, argsArray, getter_AddRefs(opened)); + aCmdLine->SetPreventDefault(true); + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebugCLH::GetHelpInfo(nsACString& aResult) { + aResult.AssignLiteral( + " --layoutdebug [<url>] Start with Layout Debugger\n" + " --autoclose [<seconds>] Automatically close the Layout Debugger once\n" + " the page has loaded, after delaying the specified\n" + " number of seconds (which defaults to 0).\n" + " --capture-profile [<filename>] Capture a profile of the Layout\n" + " Debugger using the Gecko Profiler, and save the\n" + " profile to the specified file (which defaults to\n" + " profile.json).\n" + " --paged Layout the page in paginated mode.\n"); + return NS_OK; +} diff --git a/layout/tools/layout-debug/src/nsLayoutDebugCLH.h b/layout/tools/layout-debug/src/nsLayoutDebugCLH.h new file mode 100644 index 0000000000..aa99bf9eff --- /dev/null +++ b/layout/tools/layout-debug/src/nsLayoutDebugCLH.h @@ -0,0 +1,24 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +// vim:cindent:tabstop=4:expandtab:shiftwidth=4: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef nsLayoutDebugCLH_h_ +#define nsLayoutDebugCLH_h_ + +#include "nsICommandLineHandler.h" +#define ICOMMANDLINEHANDLER nsICommandLineHandler + +class nsLayoutDebugCLH : public ICOMMANDLINEHANDLER { + public: + nsLayoutDebugCLH(); + + NS_DECL_ISUPPORTS + NS_DECL_NSICOMMANDLINEHANDLER + + protected: + virtual ~nsLayoutDebugCLH(); +}; + +#endif /* !defined(nsLayoutDebugCLH_h_) */ diff --git a/layout/tools/layout-debug/src/nsLayoutDebuggingTools.cpp b/layout/tools/layout-debug/src/nsLayoutDebuggingTools.cpp new file mode 100644 index 0000000000..7251c704ba --- /dev/null +++ b/layout/tools/layout-debug/src/nsLayoutDebuggingTools.cpp @@ -0,0 +1,334 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +// vim:cindent:tabstop=4:expandtab:shiftwidth=4: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsLayoutDebuggingTools.h" + +#include "nsIDocShell.h" +#include "nsPIDOMWindow.h" +#include "nsIContentViewer.h" +#include "nsIPrintSettings.h" +#include "nsIPrintSettingsService.h" + +#include "nsAtom.h" +#include "nsQuickSort.h" + +#include "nsIContent.h" + +#include "nsViewManager.h" +#include "nsIFrame.h" + +#include "nsLayoutCID.h" + +#include "mozilla/dom/Document.h" +#include "mozilla/dom/Element.h" +#include "mozilla/Preferences.h" +#include "mozilla/PresShell.h" + +using namespace mozilla; +using mozilla::dom::Document; + +static already_AddRefed<nsIContentViewer> doc_viewer(nsIDocShell* aDocShell) { + if (!aDocShell) return nullptr; + nsCOMPtr<nsIContentViewer> result; + aDocShell->GetContentViewer(getter_AddRefs(result)); + return result.forget(); +} + +static PresShell* GetPresShell(nsIDocShell* aDocShell) { + nsCOMPtr<nsIContentViewer> cv = doc_viewer(aDocShell); + if (!cv) return nullptr; + return cv->GetPresShell(); +} + +static nsViewManager* view_manager(nsIDocShell* aDocShell) { + PresShell* presShell = GetPresShell(aDocShell); + if (!presShell) { + return nullptr; + } + return presShell->GetViewManager(); +} + +#ifdef DEBUG +static already_AddRefed<Document> document(nsIDocShell* aDocShell) { + nsCOMPtr<nsIContentViewer> cv(doc_viewer(aDocShell)); + if (!cv) return nullptr; + RefPtr<Document> result = cv->GetDocument(); + return result.forget(); +} +#endif + +nsLayoutDebuggingTools::nsLayoutDebuggingTools() { ForceRefresh(); } + +nsLayoutDebuggingTools::~nsLayoutDebuggingTools() = default; + +NS_IMPL_ISUPPORTS(nsLayoutDebuggingTools, nsILayoutDebuggingTools) + +NS_IMETHODIMP +nsLayoutDebuggingTools::Init(mozIDOMWindow* aWin) { + if (!Preferences::GetService()) { + return NS_ERROR_UNEXPECTED; + } + + { + if (!aWin) return NS_ERROR_UNEXPECTED; + auto* window = nsPIDOMWindowInner::From(aWin); + mDocShell = window->GetDocShell(); + } + NS_ENSURE_TRUE(mDocShell, NS_ERROR_UNEXPECTED); + + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::SetVisualDebugging(bool aVisualDebugging) { +#ifdef DEBUG + nsIFrame::ShowFrameBorders(aVisualDebugging); + ForceRefresh(); +#endif + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::SetVisualEventDebugging(bool aVisualEventDebugging) { +#ifdef DEBUG + nsIFrame::ShowEventTargetFrameBorder(aVisualEventDebugging); + ForceRefresh(); +#endif + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::SetReflowCounts(bool aShow) { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + if (PresShell* presShell = GetPresShell(mDocShell)) { +#ifdef MOZ_REFLOW_PERF + presShell->SetPaintFrameCount(aShow); +#else + printf("************************************************\n"); + printf("Sorry, you have not built with MOZ_REFLOW_PERF=1\n"); + printf("************************************************\n"); +#endif + } + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::SetPagedMode(bool aPagedMode) { + nsCOMPtr<nsIPrintSettingsService> printSettingsService = + do_GetService("@mozilla.org/gfx/printsettings-service;1"); + nsCOMPtr<nsIPrintSettings> printSettings; + + printSettingsService->GetNewPrintSettings(getter_AddRefs(printSettings)); + + // Use the same setup as setupPrintMode() in reftest-content.js. + printSettings->SetPaperWidth(5); + printSettings->SetPaperHeight(3); + + nsIntMargin unwriteableMargin(0, 0, 0, 0); + printSettings->SetUnwriteableMarginInTwips(unwriteableMargin); + + printSettings->SetHeaderStrLeft(u""_ns); + printSettings->SetHeaderStrCenter(u""_ns); + printSettings->SetHeaderStrRight(u""_ns); + + printSettings->SetFooterStrLeft(u""_ns); + printSettings->SetFooterStrCenter(u""_ns); + printSettings->SetFooterStrRight(u""_ns); + + printSettings->SetPrintBGColors(true); + printSettings->SetPrintBGImages(true); + + nsCOMPtr<nsIContentViewer> contentViewer(doc_viewer(mDocShell)); + contentViewer->SetPageModeForTesting(aPagedMode, printSettings); + + ForceRefresh(); + return NS_OK; +} + +static void DumpContentRecur(nsIDocShell* aDocShell, FILE* out) { +#ifdef DEBUG + if (nullptr != aDocShell) { + fprintf(out, "docshell=%p \n", static_cast<void*>(aDocShell)); + RefPtr<Document> doc(document(aDocShell)); + if (doc) { + dom::Element* root = doc->GetRootElement(); + if (root) { + root->List(out); + } + } else { + fputs("no document\n", out); + } + } +#endif +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpContent() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + DumpContentRecur(mDocShell, stdout); + return NS_OK; +} + +static void DumpFramesRecur( + nsIDocShell* aDocShell, FILE* out, + nsIFrame::ListFlags aFlags = nsIFrame::ListFlags()) { + if (aFlags.contains(nsIFrame::ListFlag::DisplayInCSSPixels)) { + fprintf(out, "Frame tree in CSS pixels:\n"); + } else { + fprintf(out, "Frame tree in app units:\n"); + } + + fprintf(out, "docshell=%p \n", aDocShell); + if (PresShell* presShell = GetPresShell(aDocShell)) { + nsIFrame* root = presShell->GetRootFrame(); + if (root) { + root->List(out, "", aFlags); + } + } else { + fputs("null pres shell\n", out); + } +} + +static void DumpTextRunsRecur(nsIDocShell* aDocShell, FILE* out) { + fprintf(out, "Text runs:\n"); + + fprintf(out, "docshell=%p \n", aDocShell); + if (PresShell* presShell = GetPresShell(aDocShell)) { + nsIFrame* root = presShell->GetRootFrame(); + if (root) { + root->ListTextRuns(out); + } + } else { + fputs("null pres shell\n", out); + } +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpFrames() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + DumpFramesRecur(mDocShell, stdout); + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpFramesInCSSPixels() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + DumpFramesRecur(mDocShell, stdout, nsIFrame::ListFlag::DisplayInCSSPixels); + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpTextRuns() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + DumpTextRunsRecur(mDocShell, stdout); + return NS_OK; +} + +static void DumpViewsRecur(nsIDocShell* aDocShell, FILE* out) { +#ifdef DEBUG + fprintf(out, "docshell=%p \n", static_cast<void*>(aDocShell)); + RefPtr<nsViewManager> vm(view_manager(aDocShell)); + if (vm) { + nsView* root = vm->GetRootView(); + if (root) { + root->List(out); + } + } else { + fputs("null view manager\n", out); + } +#endif // DEBUG +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpViews() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + DumpViewsRecur(mDocShell, stdout); + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpStyleSheets() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); +#if defined(DEBUG) || defined(MOZ_LAYOUT_DEBUGGER) + FILE* out = stdout; + if (PresShell* presShell = GetPresShell(mDocShell)) { + presShell->ListStyleSheets(out); + } else { + fputs("null pres shell\n", out); + } +#endif + return NS_OK; +} + +NS_IMETHODIMP nsLayoutDebuggingTools::DumpMatchedRules() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + FILE* out = stdout; + if (PresShell* presShell = GetPresShell(mDocShell)) { + nsIFrame* root = presShell->GetRootFrame(); + if (root) { + root->ListWithMatchedRules(out); + } + } else { + fputs("null pres shell\n", out); + } + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpComputedStyles() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); +#ifdef DEBUG + FILE* out = stdout; + if (PresShell* presShell = GetPresShell(mDocShell)) { + presShell->ListComputedStyles(out); + } else { + fputs("null pres shell\n", out); + } +#endif + return NS_OK; +} + +NS_IMETHODIMP +nsLayoutDebuggingTools::DumpReflowStats() { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); +#ifdef DEBUG + if (RefPtr<PresShell> presShell = GetPresShell(mDocShell)) { +# ifdef MOZ_REFLOW_PERF + presShell->DumpReflows(); +# else + printf("************************************************\n"); + printf("Sorry, you have not built with MOZ_REFLOW_PERF=1\n"); + printf("************************************************\n"); +# endif + } +#endif + return NS_OK; +} + +nsresult nsLayoutDebuggingTools::ForceRefresh() { + RefPtr<nsViewManager> vm(view_manager(mDocShell)); + if (!vm) return NS_OK; + nsView* root = vm->GetRootView(); + if (root) { + vm->InvalidateView(root); + } + return NS_OK; +} + +nsresult nsLayoutDebuggingTools::SetBoolPrefAndRefresh(const char* aPrefName, + bool aNewVal) { + NS_ENSURE_TRUE(mDocShell, NS_ERROR_NOT_INITIALIZED); + + nsIPrefService* prefService = Preferences::GetService(); + NS_ENSURE_TRUE(prefService && aPrefName, NS_OK); + + Preferences::SetBool(aPrefName, aNewVal); + prefService->SavePrefFile(nullptr); + + ForceRefresh(); + + return NS_OK; +} diff --git a/layout/tools/layout-debug/src/nsLayoutDebuggingTools.h b/layout/tools/layout-debug/src/nsLayoutDebuggingTools.h new file mode 100644 index 0000000000..f6b37fecfb --- /dev/null +++ b/layout/tools/layout-debug/src/nsLayoutDebuggingTools.h @@ -0,0 +1,30 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +// vim:cindent:tabstop=4:expandtab:shiftwidth=4: +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef nsLayoutDebuggingTools_h +#define nsLayoutDebuggingTools_h + +#include "nsILayoutDebuggingTools.h" +#include "nsIDocShell.h" +#include "nsCOMPtr.h" + +class nsLayoutDebuggingTools : public nsILayoutDebuggingTools { + public: + nsLayoutDebuggingTools(); + + NS_DECL_ISUPPORTS + + NS_DECL_NSILAYOUTDEBUGGINGTOOLS + + protected: + virtual ~nsLayoutDebuggingTools(); + + nsresult SetBoolPrefAndRefresh(const char* aPrefName, bool aNewValue); + + nsCOMPtr<nsIDocShell> mDocShell; +}; + +#endif diff --git a/layout/tools/layout-debug/tests/browser/browser.ini b/layout/tools/layout-debug/tests/browser/browser.ini new file mode 100644 index 0000000000..b732158dc3 --- /dev/null +++ b/layout/tools/layout-debug/tests/browser/browser.ini @@ -0,0 +1,2 @@ +[browser_openLayoutDebug.js] +run-if = debug diff --git a/layout/tools/layout-debug/tests/browser/browser_openLayoutDebug.js b/layout/tools/layout-debug/tests/browser/browser_openLayoutDebug.js new file mode 100644 index 0000000000..4402f36a7f --- /dev/null +++ b/layout/tools/layout-debug/tests/browser/browser_openLayoutDebug.js @@ -0,0 +1,41 @@ +"use strict"; + +/* +When run locally this won't test whether the files are packaged and available +in a distributed build unless `./mach mochitest --appname dist` is used +(after `./mach package`) +*/ + +function test() { + waitForExplicitFinish(); + + const windowListener = { + onOpenWindow(win) { + info("Observed window open"); + + const domWindow = win.docShell.domWindow; + waitForFocus(() => { + is( + domWindow.location.href, + "chrome://layoutdebug/content/layoutdebug.xhtml", + "Window location is correct" + ); + domWindow.close(); + }, domWindow); + }, + + onCloseWindow() { + info("Observed window closed"); + Services.wm.removeListener(this); + finish(); + }, + }; + Services.wm.addListener(windowListener); + + const menuitem = document.getElementById("menu_layout_debugger"); + ok(menuitem, "Menuitem present"); + if (menuitem) { + // open the debugger window + menuitem.click(); + } +} diff --git a/layout/tools/layout-debug/tests/unit/test_componentsRegistered.js b/layout/tools/layout-debug/tests/unit/test_componentsRegistered.js new file mode 100644 index 0000000000..eaf1783cb7 --- /dev/null +++ b/layout/tools/layout-debug/tests/unit/test_componentsRegistered.js @@ -0,0 +1,6 @@ +function run_test() { + Assert.ok("@mozilla.org/layout-debug/layout-debuggingtools;1" in Cc); + Assert.ok( + "@mozilla.org/commandlinehandler/general-startup;1?type=layoutdebug" in Cc + ); +} diff --git a/layout/tools/layout-debug/tests/unit/xpcshell.ini b/layout/tools/layout-debug/tests/unit/xpcshell.ini new file mode 100644 index 0000000000..46035d5f13 --- /dev/null +++ b/layout/tools/layout-debug/tests/unit/xpcshell.ini @@ -0,0 +1,4 @@ +[DEFAULT] +head = + +[test_componentsRegistered.js] diff --git a/layout/tools/layout-debug/ui/content/layoutdebug.js b/layout/tools/layout-debug/ui/content/layoutdebug.js new file mode 100644 index 0000000000..214e97f5d8 --- /dev/null +++ b/layout/tools/layout-debug/ui/content/layoutdebug.js @@ -0,0 +1,522 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +var gArgs; +var gBrowser; +var gURLBar; +var gDebugger; +var gMultiProcessBrowser = window.docShell.QueryInterface(Ci.nsILoadContext) + .useRemoteTabs; +var gFissionBrowser = window.docShell.QueryInterface(Ci.nsILoadContext) + .useRemoteSubframes; +var gWritingProfile = false; +var gWrittenProfile = false; + +const { E10SUtils } = ChromeUtils.import( + "resource://gre/modules/E10SUtils.jsm" +); +const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm"); +const { Preferences } = ChromeUtils.import( + "resource://gre/modules/Preferences.jsm" +); +const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); + +const FEATURES = { + paintFlashing: "nglayout.debug.paint_flashing", + paintDumping: "nglayout.debug.paint_dumping", + invalidateDumping: "nglayout.debug.invalidate_dumping", + eventDumping: "nglayout.debug.event_dumping", + motionEventDumping: "nglayout.debug.motion_event_dumping", + crossingEventDumping: "nglayout.debug.crossing_event_dumping", + reflowCounts: "layout.reflow.showframecounts", +}; + +const COMMANDS = [ + "dumpContent", + "dumpFrames", + "dumpFramesInCSSPixels", + "dumpTextRuns", + "dumpViews", + "dumpStyleSheets", + "dumpMatchedRules", + "dumpComputedStyles", + "dumpReflowStats", +]; + +class Debugger { + constructor() { + this._flags = new Map(); + this._visualDebugging = false; + this._visualEventDebugging = false; + this._pagedMode = false; + this._attached = false; + + for (let [name, pref] of Object.entries(FEATURES)) { + this._flags.set(name, !!Preferences.get(pref, false)); + } + + this.attachBrowser(); + } + + detachBrowser() { + if (!this._attached) { + return; + } + gBrowser.removeProgressListener(this._progressListener); + this._progressListener = null; + this._attached = false; + } + + attachBrowser() { + if (this._attached) { + throw "already attached"; + } + this._progressListener = new nsLDBBrowserContentListener(); + gBrowser.addProgressListener(this._progressListener); + this._attached = true; + } + + dumpProcessIDs() { + let parentPid = Services.appinfo.processID; + let [contentPid, ...framePids] = E10SUtils.getBrowserPids( + gBrowser, + gFissionBrowser + ); + + dump(`Parent pid: ${parentPid}\n`); + dump(`Content pid: ${contentPid || "-"}\n`); + if (gFissionBrowser) { + dump(`Subframe pids: ${framePids.length ? framePids.join(", ") : "-"}\n`); + } + } + + get visualDebugging() { + return this._visualDebugging; + } + + set visualDebugging(v) { + v = !!v; + this._visualDebugging = v; + this._sendMessage("setVisualDebugging", v); + } + + get visualEventDebugging() { + return this._visualEventDebugging; + } + + set visualEventDebugging(v) { + v = !!v; + this._visualEventDebugging = v; + this._sendMessage("setVisualEventDebugging", v); + } + + get pagedMode() { + return this._pagedMode; + } + + set pagedMode(v) { + v = !!v; + this._pagedMode = v; + this.setPagedMode(this._pagedMode); + } + + setPagedMode(v) { + this._sendMessage("setPagedMode", v); + } + + async _sendMessage(name, arg) { + await this._sendMessageTo(gBrowser.browsingContext, name, arg); + } + + async _sendMessageTo(context, name, arg) { + let global = context.currentWindowGlobal; + if (global) { + await global + .getActor("LayoutDebug") + .sendQuery("LayoutDebug:Call", { name, arg }); + } + + for (let c of context.children) { + await this._sendMessageTo(c, name, arg); + } + } +} + +for (let [name, pref] of Object.entries(FEATURES)) { + Object.defineProperty(Debugger.prototype, name, { + get: function() { + return this._flags.get(name); + }, + set: function(v) { + v = !!v; + Preferences.set(pref, v); + this._flags.set(name, v); + // XXX PresShell should watch for this pref change itself. + if (name == "reflowCounts") { + this._sendMessage("setReflowCounts", v); + } + this._sendMessage("forceRefresh"); + }, + }); +} + +for (let name of COMMANDS) { + Debugger.prototype[name] = function() { + this._sendMessage(name); + }; +} + +function autoCloseIfNeeded(aCrash) { + if (!gArgs.autoclose) { + return; + } + setTimeout(function() { + if (aCrash) { + let browser = document.createXULElement("browser"); + // FIXME(emilio): we could use gBrowser if we bothered get the process switches right. + // + // Doesn't seem worth for this particular case. + document.documentElement.appendChild(browser); + browser.loadURI("about:crashparent", { + triggeringPrincipal: Services.scriptSecurityManager.getSystemPrincipal(), + }); + return; + } + if (gArgs.profile && Services.profiler) { + dumpProfile(); + } else { + Services.startup.quit(Ci.nsIAppStartup.eAttemptQuit); + } + }, gArgs.delay * 1000); +} + +function nsLDBBrowserContentListener() { + this.init(); +} + +nsLDBBrowserContentListener.prototype = { + init: function() { + this.mStatusText = document.getElementById("status-text"); + this.mForwardButton = document.getElementById("forward-button"); + this.mBackButton = document.getElementById("back-button"); + this.mStopButton = document.getElementById("stop-button"); + }, + + QueryInterface: ChromeUtils.generateQI([ + "nsIWebProgressListener", + "nsISupportsWeakReference", + ]), + + // nsIWebProgressListener implementation + onStateChange: function(aWebProgress, aRequest, aStateFlags, aStatus) { + if (!(aStateFlags & Ci.nsIWebProgressListener.STATE_IS_NETWORK)) { + return; + } + + if (aStateFlags & Ci.nsIWebProgressListener.STATE_START) { + this.setButtonEnabled(this.mStopButton, true); + this.setButtonEnabled(this.mForwardButton, gBrowser.canGoForward); + this.setButtonEnabled(this.mBackButton, gBrowser.canGoBack); + this.mStatusText.value = "loading..."; + this.mLoading = true; + } else if (aStateFlags & Ci.nsIWebProgressListener.STATE_STOP) { + this.setButtonEnabled(this.mStopButton, false); + this.mStatusText.value = gURLBar.value + " loaded"; + this.mLoading = false; + + if (gDebugger.pagedMode) { + // Change to paged mode after the page is loaded. + gDebugger.setPagedMode(true); + } + + if (gBrowser.currentURI.spec != "about:blank") { + // We check for about:blank just to avoid one or two STATE_STOP + // notifications that occur before the loadURI() call completes. + // This does mean that --autoclose doesn't work when the URL on + // the command line is about:blank (or not specified), but that's + // not a big deal. + autoCloseIfNeeded(false); + } + } + }, + + onProgressChange: function( + aWebProgress, + aRequest, + aCurSelfProgress, + aMaxSelfProgress, + aCurTotalProgress, + aMaxTotalProgress + ) {}, + + onLocationChange: function(aWebProgress, aRequest, aLocation, aFlags) { + gURLBar.value = aLocation.spec; + this.setButtonEnabled(this.mForwardButton, gBrowser.canGoForward); + this.setButtonEnabled(this.mBackButton, gBrowser.canGoBack); + }, + + onStatusChange: function(aWebProgress, aRequest, aStatus, aMessage) { + this.mStatusText.value = aMessage; + }, + + onSecurityChange: function(aWebProgress, aRequest, aState) {}, + + onContentBlockingEvent: function(aWebProgress, aRequest, aEvent) {}, + + // non-interface methods + setButtonEnabled: function(aButtonElement, aEnabled) { + if (aEnabled) { + aButtonElement.removeAttribute("disabled"); + } else { + aButtonElement.setAttribute("disabled", "true"); + } + }, + + mStatusText: null, + mForwardButton: null, + mBackButton: null, + mStopButton: null, + + mLoading: false, +}; + +function parseArguments() { + let args = { + url: null, + autoclose: false, + delay: 0, + paged: false, + }; + if (window.arguments) { + args.url = window.arguments[0]; + for (let i = 1; i < window.arguments.length; ++i) { + let arg = window.arguments[i]; + if (/^autoclose=(.*)$/.test(arg)) { + args.autoclose = true; + args.delay = +RegExp.$1; + } else if (/^profile=(.*)$/.test(arg)) { + args.profile = true; + args.profileFilename = RegExp.$1; + } else if (/^paged$/.test(arg)) { + args.paged = true; + } else { + throw `Unknown option ${arg}`; + } + } + } + return args; +} + +const TabCrashedObserver = { + observe(subject, topic, data) { + switch (topic) { + case "ipc:content-shutdown": + subject.QueryInterface(Ci.nsIPropertyBag2); + if (!subject.get("abnormal")) { + return; + } + break; + case "oop-frameloader-crashed": + break; + } + autoCloseIfNeeded(true); + }, +}; + +function OnLDBLoad() { + gBrowser = document.getElementById("browser"); + gURLBar = document.getElementById("urlbar"); + + try { + ChromeUtils.registerWindowActor("LayoutDebug", { + child: { + moduleURI: "resource://gre/actors/LayoutDebugChild.jsm", + }, + allFrames: true, + }); + } catch (ex) { + // Only register the actor once. + } + + gDebugger = new Debugger(); + + Services.obs.addObserver(TabCrashedObserver, "ipc:content-shutdown"); + Services.obs.addObserver(TabCrashedObserver, "oop-frameloader-crashed"); + + // Pretend slightly to be like a normal browser, so that SessionStore.jsm + // doesn't get too confused. The effect is that we'll never switch process + // type when navigating, and for layout debugging purposes we don't bother + // about getting that right. + gBrowser.getTabForBrowser = function() { + return null; + }; + + gArgs = parseArguments(); + + if (gArgs.profile) { + if (Services.profiler) { + let env = Cc["@mozilla.org/process/environment;1"].getService( + Ci.nsIEnvironment + ); + if (!env.exists("MOZ_PROFILER_SYMBOLICATE")) { + dump( + "Warning: MOZ_PROFILER_SYMBOLICATE environment variable not set; " + + "profile will not be symbolicated.\n" + ); + } + Services.profiler.StartProfiler( + 1 << 20, + 1, + ["default"], + ["GeckoMain", "Compositor", "Renderer", "RenderBackend", "StyleThread"] + ); + if (gArgs.url) { + // Switch to the right kind of content process, and wait a bit so that + // the profiler has had a chance to attach to it. + updateBrowserRemotenessByURL(gArgs.url); + setTimeout(() => loadURI(gArgs.url), 3000); + return; + } + } else { + dump("Cannot profile Layout Debugger; profiler was not compiled in.\n"); + } + } + + // The URI is not loaded yet. Just set the internal variable. + gDebugger._pagedMode = gArgs.paged; + + if (gArgs.url) { + loadURI(gArgs.url); + } + + // Some command line arguments may toggle menu items. Call this after + // processing all the arguments. + checkPersistentMenus(); +} + +function checkPersistentMenu(item) { + var menuitem = document.getElementById("menu_" + item); + menuitem.setAttribute("checked", gDebugger[item]); +} + +function checkPersistentMenus() { + // Restore the toggles that are stored in prefs. + checkPersistentMenu("paintFlashing"); + checkPersistentMenu("paintDumping"); + checkPersistentMenu("invalidateDumping"); + checkPersistentMenu("eventDumping"); + checkPersistentMenu("motionEventDumping"); + checkPersistentMenu("crossingEventDumping"); + checkPersistentMenu("reflowCounts"); + checkPersistentMenu("pagedMode"); +} + +function dumpProfile() { + gWritingProfile = true; + + let cwd = Services.dirsvc.get("CurWorkD", Ci.nsIFile).path; + let filename = OS.Path.join(cwd, gArgs.profileFilename); + + dump(`Writing profile to ${filename}...\n`); + + Services.profiler.dumpProfileToFileAsync(filename).then(function() { + gWritingProfile = false; + gWrittenProfile = true; + dump(`done\n`); + Services.startup.quit(Ci.nsIAppStartup.eAttemptQuit); + }); +} + +function OnLDBBeforeUnload(event) { + if (gArgs.profile && Services.profiler) { + if (gWrittenProfile) { + // We've finished writing the profile. Allow the window to close. + return; + } + + event.preventDefault(); + + if (gWritingProfile) { + // Wait for the profile to finish being written out. + return; + } + + // The dumpProfileToFileAsync call can block for a while, so run it off a + // timeout to avoid annoying the window manager if we're doing this in + // response to clicking the window's close button. + setTimeout(dumpProfile, 0); + } +} + +function OnLDBUnload() { + gDebugger.detachBrowser(); + Services.obs.removeObserver(TabCrashedObserver, "ipc:content-shutdown"); + Services.obs.removeObserver(TabCrashedObserver, "oop-frameloader-crashed"); +} + +function toggle(menuitem) { + // trim the initial "menu_" + var feature = menuitem.id.substring(5); + gDebugger[feature] = menuitem.getAttribute("checked") == "true"; +} + +function openFile() { + var fp = Cc["@mozilla.org/filepicker;1"].createInstance(Ci.nsIFilePicker); + fp.init(window, "Select a File", Ci.nsIFilePicker.modeOpen); + fp.appendFilters(Ci.nsIFilePicker.filterHTML | Ci.nsIFilePicker.filterAll); + fp.open(rv => { + if ( + rv == Ci.nsIFilePicker.returnOK && + fp.fileURL.spec && + fp.fileURL.spec.length > 0 + ) { + loadURI(fp.fileURL.spec); + } + }); +} + +// A simplified version of the function with the same name in tabbrowser.js. +function updateBrowserRemotenessByURL(aURL) { + let oa = E10SUtils.predictOriginAttributes({ browser: gBrowser }); + let remoteType = E10SUtils.getRemoteTypeForURI( + aURL, + gMultiProcessBrowser, + gFissionBrowser, + gBrowser.remoteType, + gBrowser.currentURI, + oa + ); + if (gBrowser.remoteType != remoteType) { + gDebugger.detachBrowser(); + if (remoteType == E10SUtils.NOT_REMOTE) { + gBrowser.removeAttribute("remote"); + gBrowser.removeAttribute("remoteType"); + } else { + gBrowser.setAttribute("remote", "true"); + gBrowser.setAttribute("remoteType", remoteType); + } + gBrowser.changeRemoteness({ remoteType }); + gBrowser.construct(); + gDebugger.attachBrowser(); + } +} + +function loadURI(aURL) { + // We don't bother trying to handle navigations within the browser to new URLs + // that should be loaded in a different process. + updateBrowserRemotenessByURL(aURL); + gBrowser.loadURI(aURL, { + triggeringPrincipal: Services.scriptSecurityManager.getSystemPrincipal(), + }); +} + +function focusURLBar() { + gURLBar.focus(); + gURLBar.select(); +} + +function go() { + loadURI(gURLBar.value); + gBrowser.focus(); +} diff --git a/layout/tools/layout-debug/ui/content/layoutdebug.xhtml b/layout/tools/layout-debug/ui/content/layoutdebug.xhtml new file mode 100644 index 0000000000..4908f6237e --- /dev/null +++ b/layout/tools/layout-debug/ui/content/layoutdebug.xhtml @@ -0,0 +1,137 @@ +<?xml version="1.0"?> +<!-- vim: set shiftwidth=2 tabstop=8 expandtab : + - + - + - This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> + +<!DOCTYPE window SYSTEM "chrome://layoutdebug/locale/layoutdebug.dtd"> + +<?xml-stylesheet href="chrome://global/skin/global.css" type="text/css" ?> + +<!-- + + NOTE: Because this window is used for layout regression tests, the + persist attribute should never be used on anything. Otherwise there + is a risk of running baseline and verify runs under different + conditions. + +--> + +<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul" + xmlns:html="http://www.w3.org/1999/xhtml" + id="main-window" + align="stretch" + title="&ldb.MainWindow.title;" + titlemodifier="&ldb.MainWindow.title;" + titlemenuseparator=" — " + windowtype="mozapp:layoutdebug" + onload="OnLDBLoad();" + onclose="OnLDBBeforeUnload(event);" + onunload="OnLDBUnload();" + width="1024" height="768" + screenX="4" screenY="4" + > + + <script src="chrome://layoutdebug/content/layoutdebug.js"/> + + <commandset id="tasksCommands"> + <command id="cmd_open" oncommand="openFile();"/> + <command id="cmd_close" oncommand="window.close();"/> + <command id="cmd_focusURLBar" oncommand="focusURLBar();"/> + <command id="cmd_reload" oncommand="gBrowser.reload();"/> + <command id="cmd_dumpContent" oncommand="gDebugger.dumpContent();"/> + <command id="cmd_dumpFrames" oncommand="gDebugger.dumpFrames();"/> + <command id="cmd_dumpFramesInCSSPixels" oncommand="gDebugger.dumpFramesInCSSPixels();"/> + <command id="cmd_dumpTextRuns" oncommand="gDebugger.dumpTextRuns();"/> + </commandset> + + <keyset id="tasksKeys"> + <key id="key_open" key="O" modifiers="accel" command="cmd_open"/> + <key id="key_close" key="W" modifiers="accel" command="cmd_close"/> + <key id="key_focusURLBar" key="L" modifiers="accel" command="cmd_focusURLBar"/> + <key id="key_reload" key="R" modifiers="accel" command="cmd_reload"/> + <key id="key_dumpContent" key="D" modifiers="accel" command="cmd_dumpContent"/> <!-- "D" means DOM tree --> + <key id="key_dumpFrames" key="F" modifiers="accel" command="cmd_dumpFrames"/> + <key id="key_dumpFramesInCSSPixels" key="P" modifiers="accel" command="cmd_dumpFramesInCSSPixels"/> + <key id="key_dumpTextRuns" key="T" modifiers="accel" command="cmd_dumpTextRuns"/> + </keyset> + + <vbox flex="1"> + + <toolbox> + <toolbar type="menubar"> + <menubar id="main-menubar"> + <menu id="menu_file" label="File" accesskey="F"> + <menupopup id="menu_FilePopup"> + <menuitem id="menu_open" label="Open File…" accesskey="O" key="key_open" command="cmd_open"/> + <menuitem id="menu_close" label="Close" accesskey="C" key="key_close" command="cmd_close"/> + </menupopup> + </menu> + <menu label="&ldb.ToggleMenu.label;" + accesskey="&ldb.ToggleMenu.accesskey;"> + <menupopup> + <menuitem type="checkbox" id="menu_visualDebugging" label="&ldb.visualDebugging.label;" accesskey="&ldb.visualDebugging.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_visualEventDebugging" label="&ldb.visualEventDebugging.label;" accesskey="&ldb.visualEventDebugging.accesskey;" oncommand="toggle(this);" /> + <menuseparator /> + <menuitem type="checkbox" id="menu_paintFlashing" label="&ldb.paintFlashing.label;" accesskey="&ldb.paintFlashing.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_paintDumping" label="&ldb.paintDumping.label;" accesskey="&ldb.paintDumping.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_invalidateDumping" label="&ldb.invalidateDumping.label;" accesskey="&ldb.invalidateDumping.accesskey;" oncommand="toggle(this);" /> + <menuseparator /> + <menuitem type="checkbox" id="menu_eventDumping" label="&ldb.eventDumping.label;" accesskey="&ldb.eventDumping.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_motionEventDumping" label="&ldb.motionEventDumping.label;" accesskey="&ldb.motionEventDumping.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_crossingEventDumping" label="&ldb.crossingEventDumping.label;" accesskey="&ldb.crossingEventDumping.accesskey;" oncommand="toggle(this);" /> + <menuseparator /> + <menuitem type="checkbox" id="menu_reflowCounts" label="&ldb.reflowCounts.label;" accesskey="&ldb.reflowCounts.accesskey;" oncommand="toggle(this);" /> + <menuitem type="checkbox" id="menu_pagedMode" label="&ldb.pagedMode.label;" accesskey="&ldb.pagedMode.accesskey;" oncommand="toggle(this);" /> + </menupopup> + </menu> + <menu label="&ldb.DumpMenu.label;" + accesskey="&ldb.DumpMenu.accesskey;"> + <menupopup> + <menuitem id="menu_processIDs" label="Process IDs" accesskey="P" oncommand="gDebugger.dumpProcessIDs();" /> + <menuitem id="menu_dumpContent" label="&ldb.dumpContent.label;" accesskey="&ldb.dumpContent.accesskey;" oncommand="gDebugger.dumpContent();" /> + <menuitem id="menu_dumpFrames" label="&ldb.dumpFrames.label;" accesskey="&ldb.dumpFrames.accesskey;" oncommand="gDebugger.dumpFrames();" /> + <menuitem id="menu_dumpFramesInCSSPixels" label="&ldb.dumpFramesInCSSPixels.label;" accesskey="&ldb.dumpFramesInCSSPixels.accesskey;" oncommand="gDebugger.dumpFramesInCSSPixels();" /> + <menuitem id="menu_dumpTextRuns" label="&ldb.dumpTextRuns.label;" accesskey="&ldb.dumpTextRuns.accesskey;" oncommand="gDebugger.dumpTextRuns();" /> + <menuitem id="menu_dumpViews" label="&ldb.dumpViews.label;" accesskey="&ldb.dumpViews.accesskey;" oncommand="gDebugger.dumpViews();" /> + <menuseparator /> + <menuitem id="menu_dumpStyleSheets" label="&ldb.dumpStyleSheets.label;" accesskey="&ldb.dumpStyleSheets.accesskey;" oncommand="gDebugger.dumpStyleSheets();" /> + <menuitem id="menu_dumpMatchedRules" label="&ldb.dumpMatchedRules.label;" accesskey="&ldb.dumpMatchedRules.accesskey;" oncommand="gDebugger.dumpMatchedRules();" /> + <menuitem id="menu_dumpComputedStyles" label="&ldb.dumpComputedStyles.label;" accesskey="&ldb.dumpComputedStyles.accesskey;" oncommand="gDebugger.dumpComputedStyles();" /> + <menuseparator /> + <menuitem id="menu_dumpReflowStats" label="&ldb.dumpReflowStats.label;" accesskey="&ldb.dumpReflowStats.accesskey;" oncommand="gDebugger.dumpReflowStats();" /> + </menupopup> + </menu> + <menu id="tasksMenu"/> + <menu id="windowMenu"/> + <menu id="menu_Help"/> + </menubar> + </toolbar> + + <toolbar> + <toolbarbutton id="back-button" class="toolbarbutton-1" + label="&ldb.BackButton.label;" + oncommand="gBrowser.goBack();" /> + <toolbarbutton id="forward-button" class="toolbarbutton-1" + label="&ldb.ForwardButton.label;" + oncommand="gBrowser.goForward();" /> + <toolbarbutton id="reload-button" class="toolbarbutton-1" + label="&ldb.ReloadButton.label;" + command="cmd_reload" /> + <toolbarbutton id="stop-button" class="toolbarbutton-1" + label="&ldb.StopButton.label;" + oncommand="gBrowser.stop();" /> + + <html:input id="urlbar" style="-moz-box-flex: 1;" onkeypress="if (event.keyCode == 13) go();"/> + </toolbar> + </toolbox> + + <browser flex="1" id="browser" type="content" primary="true" remote="true" remoteType="web"/> + + <hbox> + <description id="status-text" value="" /> + </hbox> + </vbox> +</window> diff --git a/layout/tools/layout-debug/ui/jar.mn b/layout/tools/layout-debug/ui/jar.mn new file mode 100644 index 0000000000..f18ce29291 --- /dev/null +++ b/layout/tools/layout-debug/ui/jar.mn @@ -0,0 +1,10 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +layoutdebug.jar: +% content layoutdebug %content/layoutdebug/ +% locale layoutdebug en-US %locale/en-US/layoutdebug/ + content/layoutdebug/layoutdebug.xhtml (content/layoutdebug.xhtml) + content/layoutdebug/layoutdebug.js (content/layoutdebug.js) + locale/en-US/layoutdebug/layoutdebug.dtd (locale/en-US/layoutdebug.dtd) diff --git a/layout/tools/layout-debug/ui/locale/en-US/layoutdebug.dtd b/layout/tools/layout-debug/ui/locale/en-US/layoutdebug.dtd new file mode 100644 index 0000000000..62864a389d --- /dev/null +++ b/layout/tools/layout-debug/ui/locale/en-US/layoutdebug.dtd @@ -0,0 +1,59 @@ +<!-- + - + - This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> + +<!ENTITY ldb.MainWindow.title "Layout Debugger"> + +<!ENTITY ldb.BackButton.label "Back"> +<!ENTITY ldb.ForwardButton.label "Forward"> +<!ENTITY ldb.ReloadButton.label "Reload"> +<!ENTITY ldb.StopButton.label "Stop"> + + +<!ENTITY ldb.ToggleMenu.label "Toggle"> +<!ENTITY ldb.ToggleMenu.accesskey "T"> + +<!ENTITY ldb.visualDebugging.label "Visual Debugging"> +<!ENTITY ldb.visualDebugging.accesskey "V"> +<!ENTITY ldb.visualEventDebugging.label "Visual Event Debugging"> +<!ENTITY ldb.visualEventDebugging.accesskey "E"> +<!ENTITY ldb.paintFlashing.label "Paint Flashing"> +<!ENTITY ldb.paintFlashing.accesskey "F"> +<!ENTITY ldb.paintDumping.label "Paint Dumping"> +<!ENTITY ldb.paintDumping.accesskey "P"> +<!ENTITY ldb.invalidateDumping.label "Invalidate Dumping"> +<!ENTITY ldb.invalidateDumping.accesskey "I"> +<!ENTITY ldb.eventDumping.label "Event Dumping"> +<!ENTITY ldb.eventDumping.accesskey "E"> +<!ENTITY ldb.motionEventDumping.label "Motion Event Dumping"> +<!ENTITY ldb.motionEventDumping.accesskey "M"> +<!ENTITY ldb.crossingEventDumping.label "Crossing Event Dumping"> +<!ENTITY ldb.crossingEventDumping.accesskey "C"> +<!ENTITY ldb.reflowCounts.label "Reflow Counts"> +<!ENTITY ldb.reflowCounts.accesskey "R"> +<!ENTITY ldb.pagedMode.label "Paged Mode"> +<!ENTITY ldb.pagedMode.accesskey "G"> + +<!ENTITY ldb.DumpMenu.label "Dump"> +<!ENTITY ldb.DumpMenu.accesskey "D"> + +<!ENTITY ldb.dumpContent.label "Content"> +<!ENTITY ldb.dumpContent.accesskey "C"> +<!ENTITY ldb.dumpFrames.label "Frames (app units)"> +<!ENTITY ldb.dumpFrames.accesskey "F"> +<!ENTITY ldb.dumpFramesInCSSPixels.label "Frames (CSS pixels)"> +<!ENTITY ldb.dumpFramesInCSSPixels.accesskey "P"> +<!ENTITY ldb.dumpTextRuns.label "Text Runs"> +<!ENTITY ldb.dumpTextRuns.accesskey "T"> +<!ENTITY ldb.dumpViews.label "Views and Widgets"> +<!ENTITY ldb.dumpViews.accesskey "V"> +<!ENTITY ldb.dumpStyleSheets.label "Style Sheets"> +<!ENTITY ldb.dumpStyleSheets.accesskey "S"> +<!ENTITY ldb.dumpMatchedRules.label "Matched CSS Rules"> +<!ENTITY ldb.dumpMatchedRules.accesskey "M"> +<!ENTITY ldb.dumpComputedStyles.label "Style Contexts"> +<!ENTITY ldb.dumpComputedStyles.accesskey "x"> +<!ENTITY ldb.dumpReflowStats.label "Reflow Statistics"> +<!ENTITY ldb.dumpReflowStats.accesskey "R"> diff --git a/layout/tools/layout-debug/ui/moz.build b/layout/tools/layout-debug/ui/moz.build new file mode 100644 index 0000000000..d988c0ff9b --- /dev/null +++ b/layout/tools/layout-debug/ui/moz.build @@ -0,0 +1,7 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +JAR_MANIFESTS += ["jar.mn"] diff --git a/layout/tools/recording/RecordingCmdLine.jsm b/layout/tools/recording/RecordingCmdLine.jsm new file mode 100644 index 0000000000..c22e6504e9 --- /dev/null +++ b/layout/tools/recording/RecordingCmdLine.jsm @@ -0,0 +1,69 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function RecordingCmdLineHandler() {} +RecordingCmdLineHandler.prototype = { + /* nsISupports */ + QueryInterface: ChromeUtils.generateQI(["nsICommandLineHandler"]), + + /* nsICommandLineHandler */ + handle: function handler_handle(cmdLine) { + var args = {}; + args.wrappedJSObject = args; + try { + var uristr = cmdLine.handleFlagWithParam("recording", false); + if (uristr == null) { + return; + } + try { + args.uri = cmdLine.resolveURI(uristr).spec; + } catch (e) { + return; + } + } catch (e) { + cmdLine.handleFlag("recording", true); + } + + /** + * Manipulate preferences by adding to the *default* branch. Adding + * to the default branch means the changes we make won't get written + * back to user preferences. + * + * We want to do this here rather than in reftest.js because it's + * important to set the recording pref before the platform Init gets + * called. + */ + var prefs = Cc["@mozilla.org/preferences-service;1"].getService( + Ci.nsIPrefService + ); + var branch = prefs.getDefaultBranch(""); + + try { + var outputstr = cmdLine.handleFlagWithParam("recording-output", false); + if (outputstr != null) { + branch.setCharPref("gfx.2d.recordingfile", outputstr); + } + } catch (e) {} + + branch.setBoolPref("gfx.2d.recording", true); + + var wwatch = Cc["@mozilla.org/embedcomp/window-watcher;1"].getService( + Ci.nsIWindowWatcher + ); + wwatch.openWindow( + null, + "chrome://recording/content/recording.xhtml", + "_blank", + "chrome,dialog=no,all", + args + ); + cmdLine.preventDefault = true; + }, + + helpInfo: + " --recording <file> Record drawing for a given URL.\n" + + " --recording-output <file> Specify destination file for a drawing recording.\n", +}; + +var EXPORTED_SYMBOLS = ["RecordingCmdLineHandler"]; diff --git a/layout/tools/recording/components.conf b/layout/tools/recording/components.conf new file mode 100644 index 0000000000..31f191623a --- /dev/null +++ b/layout/tools/recording/components.conf @@ -0,0 +1,15 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Classes = [ + { + 'cid': '{86FB70EC-90FF-45AD-A1C1-F77D3C1184E9}', + 'contract_ids': ['@mozilla.org/commandlinehandler/general-startup;1?type=recording'], + 'jsm': 'resource://gre/modules/RecordingCmdLine.jsm', + 'constructor': 'RecordingCmdLineHandler', + 'categories': {'command-line-handler': 'm-recording'}, + }, +] diff --git a/layout/tools/recording/jar.mn b/layout/tools/recording/jar.mn new file mode 100644 index 0000000000..bc66f5ceb6 --- /dev/null +++ b/layout/tools/recording/jar.mn @@ -0,0 +1,4 @@ +recording.jar: +% content recording %content/ + content/recording.xhtml (recording.xhtml) + content/recording.js (recording.js) diff --git a/layout/tools/recording/moz.build b/layout/tools/recording/moz.build new file mode 100644 index 0000000000..5a278090f0 --- /dev/null +++ b/layout/tools/recording/moz.build @@ -0,0 +1,15 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +EXTRA_JS_MODULES += [ + "RecordingCmdLine.jsm", +] + +XPCOM_MANIFESTS += [ + "components.conf", +] + +JAR_MANIFESTS += ["jar.mn"] diff --git a/layout/tools/recording/recording.js b/layout/tools/recording/recording.js new file mode 100644 index 0000000000..5c663a6479 --- /dev/null +++ b/layout/tools/recording/recording.js @@ -0,0 +1,52 @@ +/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- / +/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const NS_GFXINFO_CONTRACTID = "@mozilla.org/gfx/info;1"; + +var gContainingWindow = null; + +var gBrowser; + +function OnDocumentLoad(evt) { + if ( + evt.target != gBrowser.contentDocument || + evt.target.location == "about:blank" + ) { + return; + } + gBrowser.removeEventListener("load", OnDocumentLoad, true); + gContainingWindow.close(); +} + +this.OnRecordingLoad = function OnRecordingLoad(win) { + if (win === undefined || win == null) { + win = window; + } + if (gContainingWindow == null && win != null) { + gContainingWindow = win; + } + + gBrowser = gContainingWindow.document.getElementById("browser"); + + var gfxInfo = + NS_GFXINFO_CONTRACTID in Cc && + Cc[NS_GFXINFO_CONTRACTID].getService(Ci.nsIGfxInfo); + var info = gfxInfo.getInfo(); + dump(info.AzureContentBackend + "\n"); + if (info.AzureContentBackend == "none") { + alert("Page recordings may only be made with Azure content enabled."); + gContainingWindow.close(); + return; + } + + gBrowser.addEventListener("load", OnDocumentLoad, true); + + var args = window.arguments[0].wrappedJSObject; + + gBrowser.loadURI(args.uri, { + triggeringPrincipal: Services.scriptSecurityManager.createNullPrincipal({}), + }); +}; diff --git a/layout/tools/recording/recording.xhtml b/layout/tools/recording/recording.xhtml new file mode 100644 index 0000000000..040f5a212d --- /dev/null +++ b/layout/tools/recording/recording.xhtml @@ -0,0 +1,13 @@ +<!-- vim: set shiftwidth=4 tabstop=8 autoindent expandtab: --> +<!-- This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> +<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul" + id="recording-window" + hidechrome="true" + onload="OnRecordingLoad();" + style="background:white; overflow:hidden; width:800px; height:600px;" + > + <script src="recording.js" /> + <browser id="browser" type="content" primary="true" style="min-width: 1024px; min-height: 768px; max-width: 1024px; max-height: 768px"/> +</window> diff --git a/layout/tools/reftest/README.txt b/layout/tools/reftest/README.txt new file mode 100644 index 0000000000..aac9587085 --- /dev/null +++ b/layout/tools/reftest/README.txt @@ -0,0 +1,717 @@ +Layout Engine Visual Tests (reftest) +L. David Baron <dbaron@dbaron.org>, Mozilla Corporation +July 19, 2006 + +This code is designed to run tests of Mozilla's layout engine. These +tests consist of an HTML (or other format) file along with a reference +in the same format. The tests are run based on a manifest file, and for +each test, PASS or FAIL is reported, and UNEXPECTED is reported if the +result (PASS or FAIL) was not the expected result noted in the manifest. + +Images of the display of both tests are captured, and most test types +involve comparing these images (e.g., test types == or !=) to determine +whether the test passed. The captures of the tests are taken in a +viewport that is 800 pixels wide and 1000 pixels tall, so any content +outside that area will be ignored (except for any scrollbars that are +displayed). Ideally, however, tests should be written so that they fit +within 600x600, since we may in the future want to switch to 600x600 to +match http://lists.w3.org/Archives/Public/www-style/2012Sep/0562.html . + +Why this way? +============= + +Writing HTML tests where the reference rendering is also in HTML is +harder than simply writing bits of HTML that can be regression-tested by +comparing the rendering of an older build to that of a newer build +(perhaps using stored reference images from the older build). However, +comparing across time has major disadvantages: + + * Comparisons across time either require two runs for every test, or + they require stored reference images appropriate for the platform and + configuration (often limiting testing to a very specific + configuration). + + * Comparisons across time may fail due to expected changes, for + example, changes in the default style sheet for HTML, changes in the + appearance of form controls, or changes in default preferences like + default font size or default colors. + +Using tests for which the pass criteria were explicitly chosen allows +running tests at any time to see whether they still pass. + +Manifest Format +=============== + +The test manifest format is a plain text file. A line starting with a +"#" is a comment. Lines may be commented using whitespace followed by +a "#" and the comment. Each non-blank line (after removal of comments) +must be one of the following: + +1. Inclusion of another manifest + + <skip-type>* include <relative_path> + + <skip-type> is one of the skip or skip-if items (see their definitions + in <failure-type> below). If any of the skip types evaluate to true (i.e. + they are a plain "skip" or they are a "skip-if" with a condition that + evaluates to true), then the include statement is skipped. Otherwise, + reftests in the specified manifest are included in the set of reftests + that are run. + +2. A test item + + [ <failure-type> | <preference> ]* [<http>] <type> <url> <url_ref> + + where + + a. <failure-type> (optional) is one of the following: + + fails The test passes if the images of the two renderings DO NOT + meet the conditions specified in the <type>. + + fails-if(condition) If the condition is met, the test passes if the + images of the two renderings DO NOT meet the + conditions of <type>. If the condition is not met, + the test passes if the conditions of <type> are met. + + needs-focus The test fails or times out if the reftest window is not + focused. + + random The results of the test are random and therefore not to be + considered in the output. + + random-if(condition) The results of the test are random if a given + condition is met. + + silentfail This test may fail silently, and if that happens it should + count as if the test passed. This is useful for cases where + silent failure is the intended behavior (for example, in + an out of memory situation in JavaScript, we stop running + the script silently and immediately, in hopes of reclaiming + enough memory to keep the browser functioning). + + silentfail-if(condition) This test may fail silently if the condition + is met. + + skip This test should not be run. This is useful when a test fails in a + catastrophic way, such as crashing or hanging the browser. Using + 'skip' is preferred to simply commenting out the test because we + want to report the test failure at the end of the test run. + + skip-if(condition) If the condition is met, the test is not run. This is + useful if, for example, the test crashes only on a + particular platform (i.e. it allows us to get test + coverage on the other platforms). + + slow The test may take a long time to run, so run it if slow tests are + either enabled or not disabled (test manifest interpreters may + choose whether or not to run such tests by default). + + slow-if(condition) If the condition is met, the test is treated as if + 'slow' had been specified. This is useful for tests + which are slow only on particular platforms (e.g. a + test which exercised out-of-memory behavior might be + fast on a 32-bit system but inordinately slow on a + 64-bit system). + + fuzzy(minDiff-maxDiff,minPixelCount-maxPixelCount) + This allows a test to pass if the pixel value differences are between + minDiff and maxDiff, inclusive, and the total number of different + pixels is between minPixelCount and maxPixelCount, inclusive. + It can also be used with '!=' to ensure that the difference is + outside the specified interval. Note that with '!=' tests the + minimum bounds of the ranges must be zero. + + Fuzzy tends to be used for two different sorts of cases. The main + case is tests that are expected to be equal, but actually fail in a + minor way (e.g., an antialiasing difference), and we want to ensure + that the test doesn't regress further so we don't want to mark the + test as failing. For these cases, test annotations should be the + tightest bounds possible: if the behavior is entirely deterministic + this means a range like fuzzy(1-1,8-8), and if at all possible, the + ranges should not include 0. In cases where the test only sometimes + fails, this unfortunately requires using 0 in both ranges, which + means that we won't get reports of an unexpected pass if the problem + is fixed (allowing us to remove the fuzzy() annotation and expect + the test to pass from then on). + + The second case where fuzzy is used is tests that are supposed + to allow some amount of variability (i.e., tests where the + specification allows variability such that we can't assert + that all pixels are the same). Such tests should generally be + avoided (for example, by covering up the pixels that can vary + with another element), but when they are needed, the ranges in + the fuzzy() annotation should generally include 0. + + fuzzy-if(condition,minDiff-maxDiff,minPixelCount-maxPixelCount) + If the condition is met, the test is treated as if 'fuzzy' had been + specified. This is useful if there are differences on particular + platforms. See fuzzy() above. + + require-or(cond1&&cond2&&...,fallback) + Require some particular setup be performed or environmental + condition(s) made true (eg setting debug mode) before the test + is run. If any condition is unknown, unimplemented, or fails, + revert to the fallback failure-type. + Example: require-or(debugMode,skip) + + asserts(count) + Loading the test and reference is known to assert exactly + count times. + NOTE: An asserts() notation with a non-zero count or maxCount + suppresses use of a cached canvas for the test with the + annotation. However, if later occurrences of the same test + are not annotated, they will use the cached canvas + (potentially from the load that asserted). This allows + repeated use of the same test or reference to be annotated + correctly (which may be particularly useful when the uses are + in different subdirectories that can be tested independently), + but does not force them to be, nor does it force suppression + of caching for a common reference when it is the test that + asserts. + + asserts(minCount-maxCount) + Loading the test and reference is known to assert between + minCount and maxCount times, inclusive. + NOTE: See above regarding canvas caching. + + asserts-if(condition,count) + asserts-if(condition,minCount-maxCount) + Same as above, but only if condition is true. + + noautofuzz + Disables the autofuzzing behaviour hard-coded in the reftest harness + for specific platform configurations. The autofuzzing is intended to + compensate for inherent nondeterminism that results in intermittently + fuzzy results (with small amounts of fuzz) across many/all tests on + a given platform. Specifying 'noautofuzz' on the test will disable + the autofuzzing for that test and require an exact match. + + Conditions are JavaScript expressions *without spaces* in them. + They are evaluated in a sandbox in which a limited set of + variables are defined. See the BuildConditionSandbox function in + layout/tools/reftest.js for details. + + Examples of using conditions: + fails-if(winWidget) == test reference + asserts-if(cocoaWidget,2) load crashtest + + b. <preference> (optional) is a string of the form + + pref(<name>,<value>) + test-pref(<name>,<value>) + ref-pref(<name>,<value>) + + where <name> is the name of a preference setting, as seen in + about:config, and <value> is the value to which this preference should + be set. <value> may be a boolean (true/false), an integer, or a + quoted string *without spaces*, according to the type of the preference. + + The preference will be set to the specified value prior to + rendering the test and/or reference canvases (pref() applies to + both, test-pref() only to the test, and ref-pref() only to the + reference), and will be restored afterwards so that following + tests are not affected. Note that this feature is only useful for + "live" preferences that take effect immediately, without requiring + a browser restart. + + c. <http>, if present, is one of the strings (sans quotes) "HTTP" or + "HTTP(..)" or "HTTP(../..)" or "HTTP(../../..)", etc. , indicating that + the test should be run over an HTTP server because it requires certain + HTTP headers or a particular HTTP status. (Don't use this if your test + doesn't require this functionality, because it unnecessarily slows down + the test.) + + With "HTTP", HTTP tests have the restriction that any resource an HTTP + test accesses must be accessed using a relative URL, and the test and + the resource must be within the directory containing the reftest + manifest that describes the test (or within a descendant directory). + The variants "HTTP(..)", etc., can be used to relax this restriction by + allowing resources in the parent directory, etc. + + To modify the HTTP status or headers of a resource named FOO, create a + sibling file named FOO^headers^ with the following contents: + + [<http-status>] + <http-header>* + + <http-status> A line of the form "HTTP ###[ <description>]", where + ### indicates the desired HTTP status and <description> + indicates a desired HTTP status description, if any. + If this line is omitted, the default is "HTTP 200 OK". + <http-header> A line in standard HTTP header line format, i.e. + "Field-Name: field-value". You may not repeat the use + of a Field-Name and must coalesce such headers together, + and each header must be specified on a single line, but + otherwise the format exactly matches that from HTTP + itself. + + HTTP tests may also incorporate SJS files. SJS files provide similar + functionality to CGI scripts, in that the response they produce can be + dependent on properties of the incoming request. Currently these + properties are restricted to method type and headers, but eventually + it should be possible to examine data in the body of the request as + well when computing the generated response. An SJS file is a JavaScript + file with a .sjs extension which defines a global |handleRequest| + function (called every time that file is loaded during reftests) in this + format: + + function handleRequest(request, response) + { + response.setStatusLine(request.httpVersion, 200, "OK"); + + // You *probably* want this, or else you'll get bitten if you run + // reftest multiple times with the same profile. + response.setHeader("Cache-Control", "no-cache"); + + response.write("any ASCII data you want"); + + var outputStream = response.bodyOutputStream; + // ...anything else you want to do, synchronously... + } + + For more details on exactly which functions and properties are available + on request/response in handleRequest, see the nsIHttpRe(quest|sponse) + definitions in <netwerk/test/httpserver/nsIHttpServer.idl>. + + d. <type> is one of the following: + + == The test passes if the images of the two renderings are the + SAME. + != The test passes if the images of the two renderings are + DIFFERENT. + load The test passes unconditionally if the page loads. url_ref + must be omitted, and the test cannot be marked as fails or + random. (Used to test for crashes, hangs, assertions, and + leaks.) + script The loaded page records the test's pass or failure status + in a JavaScript data structure accessible through the following + API. + + getTestCases() returns an array of test result objects + representing the results of the tests performed by the page. + + Each test result object has two methods: + + testPassed() returns true if the test result object passed, + otherwise it returns false. + + testDescription() returns a string describing the test + result. + + url_ref must be omitted. The test may be marked as fails or + random. (Used to test the JavaScript Engine.) + print The test passes if the printouts (as PDF) of the two renderings + are the SAME by applying the following comparisons: + + - The number of pages generated for both printouts must match. + - The text content of both printouts must match (rasterized text + does not match real text). + + You can specify a print range by setting the reftest-print-range + attribute on the document element. Example: + + <html reftest-print-range="2-3"> + ... + + The following example would lead to a single page print: + + <html reftest-print-range="2-2"> + ... + + You can also print selected elements only: + + <html reftest-print-range="selection"> + ... + + Make sure to include code in your test that actually selects something. + + Future additions to the set of comparisons might include: + + - Matching the paper size + - Validating printed headers and footers + - Testing (fuzzy) position of elements + - Testing specific print related CSS properties + - ... + + The main difference between 'print' and '=='/'!=' reftests is that + 'print' makes us compare the structure of print results (by parsing + the output PDF) rather than taking screenshots and comparing pixel + values. This allows us to test for common printing related issues + like text being rasterized when it shouldn't. This difference in + behavior is also why this is its own reftest operator, rather than + a flavor of ==/!=. It would be somewhat misleading to list these + print reftests as ==/!=, because they don't actually check for + pixel matching. + + See the chapter about Pagination Tests if you are looking for testing + layout in pagination mode. + + e. <url> is either a relative file path or an absolute URL for the + test page + + f. <url_ref> is either a relative file path or an absolute URL for + the reference page + + The only difference between <url> and <url_ref> is that results of + the test are reported using <url> only. + +3. Specification of a url prefix + + url-prefix <string> + + <string> will be prepended to relative <url> and <url_ref> for all following + test items in the manifest. + + <string> will not be prepended to the relative path when including another + manifest, e.g. include <relative_path>. + + <string> will not be prepended to any <url> or <url_ref> matching the pattern + /^\w+:/. This will prevent the prefix from being applied to any absolute url + containing a protocol such as data:, about:, or http:. + + While the typical use of url-prefix is expected to be as the first line of + a manifest, it is legal to use it anywhere in a manifest. Subsequent uses + of url-prefix overwrite any existing values. + +4. Specification of defaults + + defaults [<failure-type> | <preference> | <http>] + + where <failure-type>, <preference> and <http> are defined above. + + The default settings will be used for all following test items in the manifest. + Any test specific settings will override the defaults, just as later items + within a line override earlier ones. + + A defaults line with no settings will reset the defaults to be empty. + + As with url-prefix, defaults will often be used at the start of a manifest file + so that it applies to all test items, but it is legal for defaults to appear + anywhere in the manifest. A subsequent defaults will reset any previous default + settings and overwrite them with the new settings. + + It is invalid to set non-skip defaults before an include line, just as it is + invalid to specify non-skip settings directly on the include line itself. If a + manifest needs to use both defaults and include, the include should appear + before the defaults. If it's important to specify the include later on in the + manifest, a blank defaults line directly preceding the include can be used to + reset the defaults. + +This test manifest format could be used by other harnesses, such as ones +that do not depend on XUL, or even ones testing other layout engines. + +Running Tests +============= + +(If you're not using a DEBUG build, first set browser.dom.window.dump.enabled, +devtools.console.stdout.chrome and devtools.console.stdout.content to true (in +about:config, in the profile you'll be using to run the tests). +Create the option as a new boolean if it doesn't exist already. If you skip +this step you won't get any output in the terminal.) + +At some point in the future there will hopefully be a cleaner way to do +this. For now, go to your object directory, and run (perhaps using +MOZ_NO_REMOTE=1 or the -profile <directory> option) + +./firefox -reftest /path/to/srcdir/mozilla/layout/reftests/reftest.list > reftest.out + +and then search/grep reftest.out for "UNEXPECTED". + +There are two scripts provided to convert the reftest.out to HTML. +clean-reftest-output.pl converts reftest.out into simple HTML, stripping +lines from the log that aren't relevant. reftest-to-html.pl converts +the output into html that makes it easier to visually check for +failures. + +Testable Areas +============== + +This framework is capable of testing many areas of the layout engine. +It is particularly well-suited to testing dynamic change handling (by +comparison to the static end-result as a reference) and incremental +layout (comparison of a script-interrupted layout to one that was not). +However, it is also possible to write tests for many other things that +can be described in terms of equivalence, for example: + + * CSS cascading could be tested by comparing the result of a + complicated set of style rules that makes a word green to <span + style="color:green">word</span>. + + * <canvas> compositing operators could be tested by comparing the + result of drawing using canvas to a block-level element with the + desired color as a CSS background-color. + + * CSS counters could be tested by comparing the text output by counters + with a page containing the text written out + + * complex margin collapsing could be tested by comparing the complex + case to a case where the margin is written out, or where the margin + space is created by an element with 'height' and transparent + background + +When it is not possible to test by equivalence, it may be possible to +test by non-equivalence. For example, testing justification in cases +with more than two words, or more than three different words, is +difficult. However, it is simple to test that justified text is at +least displayed differently from left-, center-, or right-aligned text. + +Writing Tests +============= + +When writing tests for this framework, it is important for the test to +depend only on behaviors that are known to be correct and permanent. +For example, tests should not depend on default font sizes, default +margins of the body element, the default style sheet used for HTML, the +default appearance of form controls, or anything else that can be +avoided. + +In general, the best way to achieve this is to make the test and the +reference identical in as many aspects as possible. For example: + + Good test markup: + <div style="color:green"><table><tr><td><span>green + </span></td></tr></table></div> + + Good reference markup: + <div><table><tr><td><span style="color:green">green + </span></td></tr></table></div> + + BAD reference markup: + <!-- 3px matches the default cellspacing and cellpadding --> + <div style="color:green; padding: 3px">green + </div> + + BAD test markup: + <!-- span doesn't change the positioning, so skip it --> + <div style="color:green"><table><tr><td>green + </td></tr></table></div> + +Asynchronous Tests: class="reftest-wait" +======================================== + +Normally reftest takes a snapshot of the given markup's rendering right +after the load event fires for content. If your test needs to postpone +the moment the snapshot is taken, it should make sure a class +'reftest-wait' is on the root element by the moment the load event +fires. The easiest way to do this is to put it in the markup, e.g.: + <html class="reftest-wait"> + +When your test is ready, you should remove this class from the root +element, for example using this code: + document.documentElement.className = ""; + + +Note that in layout tests it is often enough to trigger layout using + document.body.offsetWidth // HTML example + +When possible, you should use this technique instead of making your +test async. + +Invalidation Tests: MozReftestInvalidate Event +============================================== + +When a test (or reference) uses reftest-wait, reftest tracks invalidation +via MozAfterPaint and updates the test image in the same way that +a regular window would be repainted. Therefore it is possible to test +invalidation-related bugs by setting up initial content and then +dynamically modifying it before removing reftest-wait. However, it is +important to get the timing of these dynamic modifications right so that +the test doesn't accidentally pass because a full repaint of the window +was already pending. To help with this, reftest fires one MozReftestInvalidate +event at the document root element for a reftest-wait test when it is safe to +make changes that should test invalidation. The event bubbles up to the +document and window so you can set listeners there too. For example, + +function doTest() { + document.body.style.border = ""; + document.documentElement.removeAttribute('class'); +} +document.addEventListener("MozReftestInvalidate", doTest, false); + +Painting Tests: class="reftest-no-paint" +======================================== + +If an element shouldn't be painted, set the class "reftest-no-paint" on it +when doing an invalidation test. Causing a repaint in your +MozReftestInvalidate handler (for example, by changing the body's background +colour) will accurately test whether the element is painted. + +Display List Tests: class="reftest-[no-]display-list" +===================================================== + +These classes work similarly to reftest-no-paint, but check if the element has +display items created or not. These classes are useful for checking the behaviour +of retained display lists, where the display list is incrementally updated by +changes, rather than thrown out and rebuilt from scratch. + +Opaque Layer Tests: class="reftest-opaque-layer" +================================================ + +If an element should be assigned to a PaintedLayer that's opaque, set the class +"reftest-opaque-layer" on it. This checks whether the layer is opaque during +the last paint of the test, and it works whether your test is an invalidation +test or not. In order to pass the test, the element has to have a primary +frame, and that frame's display items must all be assigned to a single painted +layer and no other layers, so it can't be used on elements that create stacking +contexts (active or inactive). + +Layerization Tests: reftest-assigned-layer="layer-name" +======================================================= + +If two elements should be assigned to the same PaintedLayer, choose any string +value as the layer name and set the attribute reftest-assigned-layer="yourname" +on both elements. Reftest will check whether all elements with the same +reftest-assigned-layer value share the same layer. It will also test whether +elements with different reftest-assigned-layer values are assigned to different +layers. +The same restrictions as with class="reftest-opaque-layer" apply: All elements +must have a primary frame, and that frame's display items must all be assigned +to the same PaintedLayer and no other layers. If these requirements are not +met, the test will fail. + +Snapshot The Whole Window: class="reftest-snapshot-all" +======================================================= + +In a reftest-wait test, to disable testing of invalidation and force the final +snapshot to be taken of the whole window, set the "reftest-snapshot-all" +class on the root element. + +Avoid triggering flushes: class="reftest-no-flush" +================================================== + +The reftest harness normally triggers flushes by calling +getBoundingClientRect on the root element. If the root element of the +test has class="reftest-no-flush", it doesn't do this. + +This is useful for testing animations on the compositor thread, since +the flushing will cause a main thread style update. + +Zoom Tests: reftest-zoom="<float>" +================================== + +When the root element of a test has a "reftest-zoom" attribute, that zoom +factor is applied when rendering the test. The corresponds to the desktop "full +zoom" style zoom. The reftest document will be 800 device pixels wide by 1000 +device pixels high. The reftest harness assumes that the CSS pixel dimensions +are 800/zoom and 1000/zoom. For best results therefore, choose zoom factors +that do not require rounding when we calculate the number of appunits per +device pixel; i.e. the zoom factor should divide 60, so 60/zoom is an integer. + +Setting Scrollport Size: reftest-scrollport-w/h="<int>" +======================================================= + +If either of the "reftest-scrollport-w" and "reftest-scrollport-h" attributes on +the root element are non-zero, sets the scroll-position-clamping scroll-port +size to the given size in CSS pixels. This does not affect the size of the +snapshot that is taken. + +Setting Resolution: reftest-resolution="<float>" +================================================ + +If the root element of a test has a "reftest-resolution" attribute, the page +is rendered with the specified resolution (as if the user pinch-zoomed in +to that scale). Note that the difference between reftest-async-zoom and +reftest-resolution is that reftest-async-zoom only applies the scale in +the compositor, while reftest-resolution causes the page to be paint at that +resolution. This attribute can be used together with initial-scale in meta +viewport tag, in such cases initial-scale is applied first then +reftest-resolution changes the scale. + +This attributes requires the pref apz.allow_zooming=true to have an effect. + +Setting Async Scroll Mode: reftest-async-scroll attribute +========================================================= + +If the "reftest-async-scroll" attribute is set on the root element, we try to +enable async scrolling and zooming for the document. This is unsupported in many +configurations. + +Setting Displayport Dimensions: reftest-displayport-x/y/w/h="<int>" +=================================================================== + +If any of the "reftest-displayport-x", "reftest-displayport-y", +"reftest-displayport-w" and "reftest-displayport-h" attributes on the root +element are nonzero, sets the displayport dimensions to the given bounds in +CSS pixels. This does not affect the size of the snapshot that is taken. + +When the "reftest-async-scroll" attribute is set on the root element, *all* +elements in the document are checked for "reftest-displayport-x/y/w/h" and have +displayports set on them when those attributes are present. + +Testing Async Scrolling: reftest-async-scroll-x/y="<int>" +========================================================= + +When the "reftest-async-scroll" attribute is set on the root element, for any +element where either the "reftest-async-scroll-x" or "reftest-async-scroll-y +attributes are nonzero, at the end of the test take the snapshot with the given +offset (in CSS pixels) added to the async scroll offset. + +Testing Async Zooming: reftest-async-zoom="<float>" +========================================================= + +When the "reftest-async-zoom" attribute is present on the root element then at +the end of the test take the snapshot with the given async zoom on top of any +existing zoom. Content is not re-rendered at the new zoom level. This +corresponds to the mobile style "pinch zoom" style of zoom. This is unsupported +in many configurations, and any tests using this will probably want to have +pref(apz.allow_zooming,true) on them. + +Pagination Tests: class="reftest-paged" +===================================== + +Now that the patch for bug 374050 has landed +(https://bugzilla.mozilla.org/show_bug.cgi?id=374050), it is possible to +create reftests that run in a paginated context. + +The page size used is 5in wide and 3in tall (with the default half-inch +margins). This is to allow tests to have less text and to make the +entire test fit on the screen. + +There is a layout/reftests/printing directory for pagination reftests; however, +there is nothing special about this directory. You can put pagination reftests +anywhere that is appropriate. + +The suggested first lines for any pagination test is +<!DOCTYPE html><html class="reftest-paged"> +<style>html{font-size:12pt}</style> + +The reftest-paged class on the root element triggers the reftest to +switch into page mode. Fixing the font size is suggested, although not +required, because the pages are a fixed size in inches. The switch to page mode +happens on load if the reftest-wait class is not present; otherwise it happens +immediately after firing the MozReftestInvalidate event. + +The underlying layout support for this mode isn't really complete; it +doesn't use exactly the same codepath as real print preview/print. In +particular, scripting and frames are likely to cause problems; it is untested, +though. That said, it should be sufficient for testing layout issues related +to pagination. + +Plugin and IPC Process Crash Tests: class="reftest-expect-process-crash" +======================================================================== + +If you are running a test that causes an out-of-process plugin or IPC process +under Electrolysis to crash as part of a reftest, this will cause process +crash minidump files to be left in the profile directory. The test +infrastructure that runs the reftests will notice these minidump files and +dump out information from them, and these additional error messages in the logs +can end up erroneously being associated with other errors from the reftest run. +They are also confusing, since the appearance of "PROCESS-CRASH" messages in +the test run output can seem like a real problem, when in fact it is the +expected behavior. + +To indicate to the reftest framework that a test is expecting a plugin or +IPC process crash, have the test include "reftest-expect-process-crash" as +one of the root element's classes by the time the test has finished. This will +cause any minidump files that are generated while running the test to be removed +and they won't cause any error messages in the test run output. + +Skip Forcing A Content Process Layer-Tree Update: reftest-no-sync-layers attribute +================================================================================== + +Normally when an multi-process reftest test ends, we force the content process +to push a layer-tree update to the compositor before taking the snapshot. +Setting the "reftest-no-sync-layers" attribute on the root element skips this +step, enabling testing that layer-tree updates are being correctly generated. +However the test must manually wait for a MozAfterPaint event before ending. diff --git a/layout/tools/reftest/ReftestFissionChild.jsm b/layout/tools/reftest/ReftestFissionChild.jsm new file mode 100644 index 0000000000..46892feb53 --- /dev/null +++ b/layout/tools/reftest/ReftestFissionChild.jsm @@ -0,0 +1,289 @@ +var EXPORTED_SYMBOLS = ["ReftestFissionChild"]; + +class ReftestFissionChild extends JSWindowActorChild { + + forwardAfterPaintEventToParent(rects, originalTargetUri, dispatchToSelfAsWell) { + if (dispatchToSelfAsWell) { + let event = new this.contentWindow.CustomEvent("Reftest:MozAfterPaintFromChild", + {bubbles: true, detail: {rects, originalTargetUri}}); + this.contentWindow.dispatchEvent(event); + } + + let parentContext = this.browsingContext.parent; + if (parentContext) { + try { + this.sendAsyncMessage("ForwardAfterPaintEvent", + {toBrowsingContext: parentContext, fromBrowsingContext: this.browsingContext, + rects, originalTargetUri}); + } catch (e) { + // |this| can be destroyed here and unable to send messages, which is + // not a problem, the reftest harness probably torn down the page and + // moved on to the next test. + Cu.reportError(e); + } + } + } + + handleEvent(evt) { + switch (evt.type) { + case "MozAfterPaint": + // We want to forward any after paint events to our parent document so that + // that it reaches the root content document where the main reftest harness + // code (reftest-content.js) will process it and update the canvas. + var rects = []; + for (let r of evt.clientRects) { + rects.push({ left: r.left, top: r.top, right: r.right, bottom: r.bottom }); + } + this.forwardAfterPaintEventToParent(rects, this.document.documentURI, /* dispatchToSelfAsWell */ false); + break; + } + } + + transformRect(transform, rect) { + let p1 = transform.transformPoint({x: rect.left, y: rect.top}); + let p2 = transform.transformPoint({x: rect.right, y: rect.top}); + let p3 = transform.transformPoint({x: rect.left, y: rect.bottom}); + let p4 = transform.transformPoint({x: rect.right, y: rect.bottom}); + let quad = new DOMQuad(p1, p2, p3, p4); + return quad.getBounds(); + } + + SetupDisplayportRoot() { + let returnStrings = {infoStrings: [], errorStrings: []}; + + let contentRootElement = this.contentWindow.document.documentElement; + if (!contentRootElement) { + return Promise.resolve(returnStrings); + } + + // If we don't have the reftest-async-scroll attribute we only look at + // the root element for potential display ports to set. + if (!contentRootElement.hasAttribute("reftest-async-scroll")) { + let winUtils = this.contentWindow.windowUtils; + this.setupDisplayportForElement(contentRootElement, winUtils, returnStrings); + return Promise.resolve(returnStrings); + } + + // Send a msg to the parent side to get the parent side to tell all + // process roots to do the displayport setting. + let browsingContext = this.browsingContext; + let promise = this.sendQuery("TellChildrenToSetupDisplayport", {browsingContext}); + return promise.then(function(result) { + for (let errorString of result.errorStrings) { + returnStrings.errorStrings.push(errorString); + } + for (let infoString of result.infoStrings) { + returnStrings.infoStrings.push(infoString); + } + return returnStrings; + }, + function(reason) { + returnStrings.errorStrings.push("SetupDisplayport SendQuery to parent promise rejected: " + reason); + return returnStrings; + }); + } + + attrOrDefault(element, attr, def) { + return element.hasAttribute(attr) ? Number(element.getAttribute(attr)) : def; + } + + setupDisplayportForElement(element, winUtils, returnStrings) { + var dpw = this.attrOrDefault(element, "reftest-displayport-w", 0); + var dph = this.attrOrDefault(element, "reftest-displayport-h", 0); + var dpx = this.attrOrDefault(element, "reftest-displayport-x", 0); + var dpy = this.attrOrDefault(element, "reftest-displayport-y", 0); + if (dpw !== 0 || dph !== 0 || dpx != 0 || dpy != 0) { + returnStrings.infoStrings.push("Setting displayport to <x="+ dpx +", y="+ dpy +", w="+ dpw +", h="+ dph +">"); + winUtils.setDisplayPortForElement(dpx, dpy, dpw, dph, element, 1); + } + } + + setupDisplayportForElementSubtree(element, winUtils, returnStrings) { + this.setupDisplayportForElement(element, winUtils, returnStrings); + for (let c = element.firstElementChild; c; c = c.nextElementSibling) { + this.setupDisplayportForElementSubtree(c, winUtils, returnStrings); + } + if (typeof element.contentDocument !== "undefined" && + element.contentDocument) { + returnStrings.infoStrings.push("setupDisplayportForElementSubtree descending into subdocument"); + this.setupDisplayportForElementSubtree(element.contentDocument.documentElement, + element.contentWindow.windowUtils, returnStrings); + } + } + + setupAsyncScrollOffsetsForElement(element, winUtils, allowFailure, returnStrings) { + let sx = this.attrOrDefault(element, "reftest-async-scroll-x", 0); + let sy = this.attrOrDefault(element, "reftest-async-scroll-y", 0); + if (sx != 0 || sy != 0) { + try { + // This might fail when called from RecordResult since layers + // may not have been constructed yet + winUtils.setAsyncScrollOffset(element, sx, sy); + return true; + } catch (e) { + if (allowFailure) { + returnStrings.infoStrings.push("setupAsyncScrollOffsetsForElement error calling setAsyncScrollOffset: " + e); + } else { + returnStrings.errorStrings.push("setupAsyncScrollOffsetsForElement error calling setAsyncScrollOffset: " + e); + } + } + } + return false; + } + + setupAsyncScrollOffsetsForElementSubtree(element, winUtils, allowFailure, returnStrings) { + let updatedAny = this.setupAsyncScrollOffsetsForElement(element, winUtils, returnStrings); + for (let c = element.firstElementChild; c; c = c.nextElementSibling) { + if (this.setupAsyncScrollOffsetsForElementSubtree(c, winUtils, allowFailure, returnStrings)) { + updatedAny = true; + } + } + if (typeof element.contentDocument !== "undefined" && + element.contentDocument) { + returnStrings.infoStrings.push("setupAsyncScrollOffsetsForElementSubtree Descending into subdocument"); + if (this.setupAsyncScrollOffsetsForElementSubtree(element.contentDocument.documentElement, + element.contentWindow.windowUtils, allowFailure, returnStrings)) { + updatedAny = true; + } + } + return updatedAny; + } + + receiveMessage(msg) { + switch (msg.name) { + case "ForwardAfterPaintEventToSelfAndParent": + { + // The embedderElement can be null if the child we got this from was removed. + // Not much we can do to transform the rects, but it doesn't matter, the rects + // won't reach reftest-content.js. + if (msg.data.fromBrowsingContext.embedderElement == null) { + this.forwardAfterPaintEventToParent(msg.data.rects, msg.data.originalTargetUri, + /* dispatchToSelfAsWell */ true); + return; + } + + // Transform the rects from fromBrowsingContext to us. + // We first translate from the content rect to the border rect of the iframe. + let style = this.contentWindow.getComputedStyle(msg.data.fromBrowsingContext.embedderElement); + let translate = new DOMMatrixReadOnly().translate( + parseFloat(style.paddingLeft) + parseFloat(style.borderLeftWidth), + parseFloat(style.paddingTop) + parseFloat(style.borderTopWidth)); + + // Then we transform from the iframe to our root frame. + // We are guaranteed to be the process with the embedderElement for fromBrowsingContext. + let transform = msg.data.fromBrowsingContext.embedderElement.getTransformToViewport(); + let combined = translate.multiply(transform); + + let newrects = msg.data.rects.map(r => this.transformRect(combined, r)) + + this.forwardAfterPaintEventToParent(newrects, msg.data.originalTargetUri, /* dispatchToSelfAsWell */ true); + break; + } + + case "EmptyMessage": + return undefined; + case "UpdateLayerTree": + { + let errorStrings = []; + try { + if (this.manager.isProcessRoot) { + this.contentWindow.windowUtils.updateLayerTree(); + } + } catch (e) { + errorStrings.push("updateLayerTree failed: " + e); + } + return Promise.resolve({errorStrings}); + } + case "FlushRendering": + { + let errorStrings = []; + let warningStrings = []; + let infoStrings = []; + + try { + let ignoreThrottledAnimations = msg.data.ignoreThrottledAnimations; + + if (this.manager.isProcessRoot) { + var anyPendingPaintsGeneratedInDescendants = false; + + function flushWindow(win) { + var utils = win.windowUtils; + var afterPaintWasPending = utils.isMozAfterPaintPending; + + var root = win.document.documentElement; + if (root && !root.classList.contains("reftest-no-flush")) { + try { + if (ignoreThrottledAnimations) { + utils.flushLayoutWithoutThrottledAnimations(); + } else { + root.getBoundingClientRect(); + } + } catch (e) { + warningStrings.push("flushWindow failed: " + e + "\n"); + } + } + + if (!afterPaintWasPending && utils.isMozAfterPaintPending) { + infoStrings.push("FlushRendering generated paint for window " + win.location.href); + anyPendingPaintsGeneratedInDescendants = true; + } + + for (let i = 0; i < win.frames.length; ++i) { + try { + if (!Cu.isRemoteProxy(win.frames[i])) { + flushWindow(win.frames[i]); + } + } catch (e) { + Cu.reportError(e); + } + } + } + + // `contentWindow` will be null if the inner window for this actor + // has been navigated away from. + if (this.contentWindow) { + flushWindow(this.contentWindow); + } + + if (anyPendingPaintsGeneratedInDescendants && + !this.contentWindow.windowUtils.isMozAfterPaintPending) { + warningStrings.push("Internal error: descendant frame generated a MozAfterPaint event, but the root document doesn't have one!"); + } + + } + } catch (e) { + errorStrings.push("flushWindow failed: " + e); + } + return Promise.resolve({errorStrings, warningStrings, infoStrings}); + } + + case "SetupDisplayport": + { + let contentRootElement = this.document.documentElement; + let winUtils = this.contentWindow.windowUtils; + let returnStrings = {infoStrings: [], errorStrings: []}; + if (!contentRootElement) { + return Promise.resolve(returnStrings); + } + this.setupDisplayportForElementSubtree(contentRootElement, winUtils, returnStrings); + return Promise.resolve(returnStrings); + } + + case "SetupAsyncScrollOffsets": + { + let returns = {infoStrings: [], errorStrings: [], updatedAny: false}; + let contentRootElement = this.document.documentElement; + + if (!contentRootElement) { + return returns; + } + + let winUtils = this.contentWindow.windowUtils; + + returns.updatedAny = this.setupAsyncScrollOffsetsForElementSubtree(contentRootElement, winUtils, msg.data.allowFailure, returns); + return returns; + } + + } + } +} diff --git a/layout/tools/reftest/ReftestFissionParent.jsm b/layout/tools/reftest/ReftestFissionParent.jsm new file mode 100644 index 0000000000..703e3b8193 --- /dev/null +++ b/layout/tools/reftest/ReftestFissionParent.jsm @@ -0,0 +1,238 @@ +var EXPORTED_SYMBOLS = ["ReftestFissionParent"]; + +class ReftestFissionParent extends JSWindowActorParent { + + tellChildrenToFlushRendering(browsingContext, ignoreThrottledAnimations) { + let promises = []; + this.tellChildrenToFlushRenderingRecursive(browsingContext, ignoreThrottledAnimations, promises); + return Promise.allSettled(promises); + } + + tellChildrenToFlushRenderingRecursive(browsingContext, ignoreThrottledAnimations, promises) { + let cwg = browsingContext.currentWindowGlobal; + if (cwg && cwg.isProcessRoot) { + let a = cwg.getActor("ReftestFission"); + if (a) { + let responsePromise = a.sendQuery("FlushRendering", {ignoreThrottledAnimations}); + promises.push(responsePromise); + } + } + + for (let context of browsingContext.children) { + this.tellChildrenToFlushRenderingRecursive(context, ignoreThrottledAnimations, promises); + } + } + + // not including browsingContext + getNearestProcessRootProperDescendants(browsingContext) { + let result = []; + for (let context of browsingContext.children) { + this.getNearestProcessRootProperDescendantsRecursive(context, result); + } + return result; + } + + getNearestProcessRootProperDescendantsRecursive(browsingContext, result) { + let cwg = browsingContext.currentWindowGlobal; + if (cwg && cwg.isProcessRoot) { + result.push(browsingContext); + return; + } + for (let context of browsingContext.children) { + this.getNearestProcessRootProperDescendantsRecursive(context, result); + } + } + + // tell children and itself + async tellChildrenToUpdateLayerTree(browsingContext) { + let errorStrings = []; + let infoStrings = []; + + let cwg = browsingContext.currentWindowGlobal; + if (!cwg || !cwg.isProcessRoot) { + if (cwg) { + errorStrings.push("tellChildrenToUpdateLayerTree called on a non process root?"); + } + return {errorStrings, infoStrings}; + } + + let actor = cwg.getActor("ReftestFission"); + if (!actor) { + return {errorStrings, infoStrings}; + } + + // When we paint a document we also update the EffectsInfo visible rect in + // nsSubDocumentFrame for any remote subdocuments. This visible rect is + // used to limit painting for the subdocument in the subdocument's process. + // So we want to ensure that the IPC message that updates the visible rect + // to the subdocument's process arrives before we paint the subdocument + // (otherwise our painting might not be up to date). We do this by sending, + // and waiting for reply, an "EmptyMessage" to every direct descendant that + // is in another process. Since we send the "EmptyMessage" after the + // visible rect update message we know that the visible rect will be + // updated by the time we hear back from the "EmptyMessage". Then we can + // ask the subdocument process to paint. + + try { + let result = await actor.sendQuery("UpdateLayerTree"); + errorStrings.push(...result.errorStrings); + } catch (e) { + infoStrings.push("tellChildrenToUpdateLayerTree UpdateLayerTree msg to child rejected: " + e); + } + + let descendants = actor.getNearestProcessRootProperDescendants(browsingContext); + for (let context of descendants) { + let cwg2 = context.currentWindowGlobal; + if (cwg2) { + if (!cwg2.isProcessRoot) { + errorStrings.push("getNearestProcessRootProperDescendants returned a non process root?"); + } + let actor2 = cwg2.getActor("ReftestFission"); + if (actor2) { + try { + await actor2.sendQuery("EmptyMessage"); + } catch(e) { + infoStrings.push("tellChildrenToUpdateLayerTree EmptyMessage msg to child rejected: " + e); + } + + try { + let result2 = await actor2.tellChildrenToUpdateLayerTree(context); + errorStrings.push(...result2.errorStrings); + infoStrings.push(...result2.infoStrings); + } catch (e) { + errorStrings.push("tellChildrenToUpdateLayerTree recursive tellChildrenToUpdateLayerTree call rejected: " + e); + } + + } + } + } + + return {errorStrings, infoStrings}; + } + + tellChildrenToSetupDisplayport(browsingContext, promises) { + let cwg = browsingContext.currentWindowGlobal; + if (cwg && cwg.isProcessRoot) { + let a = cwg.getActor("ReftestFission"); + if (a) { + let responsePromise = a.sendQuery("SetupDisplayport"); + promises.push(responsePromise); + } + } + + for (let context of browsingContext.children) { + this.tellChildrenToSetupDisplayport(context, promises); + } + } + + tellChildrenToSetupAsyncScrollOffsets(browsingContext, allowFailure, promises) { + let cwg = browsingContext.currentWindowGlobal; + if (cwg && cwg.isProcessRoot) { + let a = cwg.getActor("ReftestFission"); + if (a) { + let responsePromise = a.sendQuery("SetupAsyncScrollOffsets", {allowFailure}); + promises.push(responsePromise); + } + } + + for (let context of browsingContext.children) { + this.tellChildrenToSetupAsyncScrollOffsets(context, allowFailure, promises); + } + } + + + receiveMessage(msg) { + switch (msg.name) { + case "ForwardAfterPaintEvent": + { + let cwg = msg.data.toBrowsingContext.currentWindowGlobal; + if (cwg) { + let a = cwg.getActor("ReftestFission"); + if (a) { + a.sendAsyncMessage("ForwardAfterPaintEventToSelfAndParent", msg.data); + } + } + break; + } + case "FlushRendering": + { + let promise = this.tellChildrenToFlushRendering(msg.data.browsingContext, msg.data.ignoreThrottledAnimations); + return promise.then(function (results) { + let errorStrings = []; + let warningStrings = []; + let infoStrings = []; + for (let r of results) { + if (r.status != "fulfilled") { + if (r.status == "pending") { + errorStrings.push("FlushRendering sendQuery to child promise still pending?"); + } else { + // We expect actors to go away causing sendQuery's to fail, so + // just note it. + infoStrings.push("FlushRendering sendQuery to child promise rejected: " + r.reason); + } + continue; + } + + errorStrings.push(...r.value.errorStrings); + warningStrings.push(...r.value.warningStrings); + infoStrings.push(...r.value.infoStrings); + } + return {errorStrings, warningStrings, infoStrings}; + }); + } + case "UpdateLayerTree": + { + return this.tellChildrenToUpdateLayerTree(msg.data.browsingContext); + } + case "TellChildrenToSetupDisplayport": + { + let promises = []; + this.tellChildrenToSetupDisplayport(msg.data.browsingContext, promises); + return Promise.allSettled(promises).then(function (results) { + let errorStrings = []; + let infoStrings = []; + for (let r of results) { + if (r.status != "fulfilled") { + // We expect actors to go away causing sendQuery's to fail, so + // just note it. + infoStrings.push("SetupDisplayport sendQuery to child promise rejected: " + r.reason); + continue; + } + + errorStrings.push(...r.value.errorStrings); + infoStrings.push(...r.value.infoStrings); + } + return {errorStrings, infoStrings} + }); + } + + case "SetupAsyncScrollOffsets": + { + let promises = []; + this.tellChildrenToSetupAsyncScrollOffsets(this.manager.browsingContext, msg.data.allowFailure, promises); + return Promise.allSettled(promises).then(function (results) { + let errorStrings = []; + let infoStrings = []; + let updatedAny = false; + for (let r of results) { + if (r.status != "fulfilled") { + // We expect actors to go away causing sendQuery's to fail, so + // just note it. + infoStrings.push("SetupAsyncScrollOffsets sendQuery to child promise rejected: " + r.reason); + continue; + } + + errorStrings.push(...r.value.errorStrings); + infoStrings.push(...r.value.infoStrings); + if (r.value.updatedAny) { + updatedAny = true; + } + } + return {errorStrings, infoStrings, updatedAny}; + }); + } + + } + } + +} diff --git a/layout/tools/reftest/api.js b/layout/tools/reftest/api.js new file mode 100644 index 0000000000..bfb04e7b3f --- /dev/null +++ b/layout/tools/reftest/api.js @@ -0,0 +1,155 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const Cm = Components.manager; + +const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); +const { XPCOMUtils } = ChromeUtils.import( + "resource://gre/modules/XPCOMUtils.jsm" +); +var OnRefTestLoad, OnRefTestUnload; + +XPCOMUtils.defineLazyServiceGetter( + this, + "resProto", + "@mozilla.org/network/protocol;1?name=resource", + "nsISubstitutingProtocolHandler" +); + +XPCOMUtils.defineLazyServiceGetter( + this, + "aomStartup", + "@mozilla.org/addons/addon-manager-startup;1", + "amIAddonManagerStartup" +); + +function processTerminated() { + return new Promise(resolve => { + Services.obs.addObserver(function observe(subject, topic) { + if (topic == "ipc:content-shutdown") { + Services.obs.removeObserver(observe, topic); + resolve(); + } + }, "ipc:content-shutdown"); + }); +} + +function startAndroid(win) { + // Add setTimeout here because windows.innerWidth/Height are not set yet. + win.setTimeout(function() { + OnRefTestLoad(win); + }, 0); +} + +function GetMainWindow() { + let win = Services.wm.getMostRecentWindow("navigator:browser"); + if (!win) { + // There is no navigator:browser in the geckoview TestRunnerActivity; + // try navigator.geckoview instead. + win = Services.wm.getMostRecentWindow("navigator:geckoview"); + } + return win; +} + +this.reftest = class extends ExtensionAPI { + onStartup() { + let uri = Services.io.newURI( + "chrome/reftest/res/", + null, + this.extension.rootURI + ); + resProto.setSubstitutionWithFlags( + "reftest", + uri, + resProto.ALLOW_CONTENT_ACCESS + ); + + const manifestURI = Services.io.newURI( + "manifest.json", + null, + this.extension.rootURI + ); + this.chromeHandle = aomStartup.registerChrome(manifestURI, [ + [ + "content", + "reftest", + "chrome/reftest/content/", + "contentaccessible=yes", + ], + ]); + + // Starting tests is handled quite differently on android and desktop. + // On Android, OnRefTestLoad() takes over the main browser window so + // we just need to call it as soon as the browser window is available. + // On desktop, a separate window (dummy) is created and explicitly given + // focus (see bug 859339 for details), then tests are launched in a new + // top-level window. + let win = GetMainWindow(); + if (Services.appinfo.OS == "Android") { + ({ OnRefTestLoad, OnRefTestUnload } = ChromeUtils.import( + "resource://reftest/reftest.jsm" + )); + if (win) { + startAndroid(win); + } else { + // The window type parameter is only available once the window's document + // element has been created. The main window has already been created + // however and it is in an in-between state which means that you can't + // find it by its type nor will domwindowcreated be fired. + // So we listen to either initial-document-element-inserted which + // indicates when it's okay to search for the main window by type again. + Services.obs.addObserver(function observer(aSubject, aTopic, aData) { + Services.obs.removeObserver(observer, aTopic); + startAndroid(GetMainWindow()); + }, "initial-document-element-inserted"); + } + return; + } + + Services.io.manageOfflineStatus = false; + Services.io.offline = false; + + let dummy = Services.ww.openWindow( + null, + "about:blank", + "dummy", + "chrome,dialog=no,left=800,height=200,width=200,all", + null + ); + dummy.onload = async function() { + // Close pre-existing window + win.close(); + + const { PerTestCoverageUtils } = ChromeUtils.import( + "resource://reftest/PerTestCoverageUtils.jsm" + ); + if (PerTestCoverageUtils.enabled) { + // In PerTestCoverage mode, wait for the process belonging to the window we just closed + // to be terminated, to avoid its shutdown interfering when we reset the counters. + await processTerminated(); + } + + dummy.focus(); + Services.ww.openWindow( + null, + "chrome://reftest/content/reftest.xhtml", + "_blank", + "chrome,dialog=no,all", + {} + ); + }; + } + + onShutdown() { + resProto.setSubstitution("reftest", null); + + this.chromeHandle.destruct(); + this.chromeHandle = null; + + if (Services.appinfo.OS == "Android") { + OnRefTestUnload(); + Cu.unload("resource://reftest/reftest.jsm"); + } + } +}; diff --git a/layout/tools/reftest/chrome/binding.xml b/layout/tools/reftest/chrome/binding.xml new file mode 100644 index 0000000000..b68d64bd12 --- /dev/null +++ b/layout/tools/reftest/chrome/binding.xml @@ -0,0 +1,9 @@ +<?xml version="1.0"?> +<bindings xmlns="http://www.mozilla.org/xbl"> +<binding id="reftest-userxbl" bindToUntrustedContent="true"> +<implementation><constructor><![CDATA[ +this.style.backgroundColor = "lime"; +document.documentElement.removeAttribute("class"); +]]></constructor></implementation> +</binding> +</bindings> diff --git a/layout/tools/reftest/chrome/userContent-import.css b/layout/tools/reftest/chrome/userContent-import.css new file mode 100644 index 0000000000..e1936a02bc --- /dev/null +++ b/layout/tools/reftest/chrome/userContent-import.css @@ -0,0 +1,3 @@ +.reftest-usercss-import { + background-color: lime !important; +} diff --git a/layout/tools/reftest/chrome/userContent.css b/layout/tools/reftest/chrome/userContent.css new file mode 100644 index 0000000000..894cc24245 --- /dev/null +++ b/layout/tools/reftest/chrome/userContent.css @@ -0,0 +1,26 @@ +@import "invalid.css"; +@import "userContent-import.css"; + +.reftest-usercss { + background: lime !important; +} +.reftest-userxbl { + -moz-binding: url("binding.xml#reftest-userxbl") !important; +} +.RefTest-upperCase { + background: lime !important; +} +/* + * file: URLs have an empty domain. + * Android uses a special loopback-to-host address. + */ +@-moz-document domain(), domain(10.0.2.2) { + .reftest-domain { + background: lime !important; + } +} +@-moz-document domain(example.invalid) { + .reftest-xdomain { + background: red !important; + } +} diff --git a/layout/tools/reftest/clean-reftest-output.pl b/layout/tools/reftest/clean-reftest-output.pl new file mode 100755 index 0000000000..b1959281d5 --- /dev/null +++ b/layout/tools/reftest/clean-reftest-output.pl @@ -0,0 +1,38 @@ +#!/usr/bin/perl +# vim: set shiftwidth=4 tabstop=8 autoindent expandtab: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# This script is intended to be run over the standard output of a +# reftest run. It will extract the parts of the output run relevant to +# reftest and HTML-ize the URLs. + +use strict; + +print <<EOM +<html> +<head> +<title>reftest output</title> +</head> +<body> +<pre> +EOM +; + +while (<>) { + next unless /REFTEST/; + chomp; + chop if /\r$/; + s,(TEST-)([^\|]*) \| ([^\|]*) \|(.*),\1\2: <a href="\3">\3</a>\4,; + s,(IMAGE[^:]*): (data:.*),<a href="\2">\1</a>,; + print; + print "\n"; +} + +print <<EOM +</pre> +</body> +</html> +EOM +; diff --git a/layout/tools/reftest/globals.jsm b/layout/tools/reftest/globals.jsm new file mode 100644 index 0000000000..6ffe73b06c --- /dev/null +++ b/layout/tools/reftest/globals.jsm @@ -0,0 +1,167 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +var EXPORTED_SYMBOLS = []; + +for (let [key, val] of Object.entries({ + /* Constants */ + XHTML_NS: "http://www.w3.org/1999/xhtml", + XUL_NS: "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul", + + NS_LOCAL_FILE_CONTRACTID: "@mozilla.org/file/local;1", + NS_GFXINFO_CONTRACTID: "@mozilla.org/gfx/info;1", + IO_SERVICE_CONTRACTID: "@mozilla.org/network/io-service;1", + DEBUG_CONTRACTID: "@mozilla.org/xpcom/debug;1", + NS_DIRECTORY_SERVICE_CONTRACTID: "@mozilla.org/file/directory_service;1", + NS_OBSERVER_SERVICE_CONTRACTID: "@mozilla.org/observer-service;1", + + TYPE_REFTEST_EQUAL: '==', + TYPE_REFTEST_NOTEQUAL: '!=', + TYPE_LOAD: 'load', // test without a reference (just test that it does + // not assert, crash, hang, or leak) + TYPE_SCRIPT: 'script', // test contains individual test results + TYPE_PRINT: 'print', // test and reference will be printed to PDF's and + // compared structurally + + // keep this in sync with reftest-content.js + URL_TARGET_TYPE_TEST: 0, // first url + URL_TARGET_TYPE_REFERENCE: 1, // second url, if any + + // The order of these constants matters, since when we have a status + // listed for a *manifest*, we combine the status with the status for + // the test by using the *larger*. + // FIXME: In the future, we may also want to use this rule for combining + // statuses that are on the same line (rather than making the last one + // win). + EXPECTED_PASS: 0, + EXPECTED_FAIL: 1, + EXPECTED_RANDOM: 2, + EXPECTED_FUZZY: 3, + + // types of preference value we might want to set for a specific test + PREF_BOOLEAN: 0, + PREF_STRING: 1, + PREF_INTEGER: 2, + + FOCUS_FILTER_ALL_TESTS: "all", + FOCUS_FILTER_NEEDS_FOCUS_TESTS: "needs-focus", + FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS: "non-needs-focus", + + // "<!--CLEAR-->" + BLANK_URL_FOR_CLEARING: "data:text/html;charset=UTF-8,%3C%21%2D%2DCLEAR%2D%2D%3E", + + /* Globals */ + g: { + loadTimeout: 0, + timeoutHook: null, + remote: false, + ignoreWindowSize: false, + shuffle: false, + repeat: null, + runUntilFailure: false, + cleanupPendingCrashes: false, + totalChunks: 0, + thisChunk: 0, + containingWindow: null, + urlFilterRegex: {}, + contentGfxInfo: null, + focusFilterMode: "all", + compareRetainedDisplayLists: false, + isCoverageBuild: false, + + browser: undefined, + // Are we testing web content loaded in a separate process? + browserIsRemote: undefined, // bool + // Are we using <iframe mozbrowser>? + browserIsIframe: undefined, // bool + browserMessageManager: undefined, // bool + canvas1: undefined, + canvas2: undefined, + // gCurrentCanvas is non-null between InitCurrentCanvasWithSnapshot and the next + // RecordResult. + currentCanvas: null, + urls: undefined, + // Map from URI spec to the number of times it remains to be used + uriUseCounts: undefined, + // Map from URI spec to the canvas rendered for that URI + uriCanvases: undefined, + testResults: { + // Successful... + Pass: 0, + LoadOnly: 0, + // Unexpected... + Exception: 0, + FailedLoad: 0, + UnexpectedFail: 0, + UnexpectedPass: 0, + AssertionUnexpected: 0, + AssertionUnexpectedFixed: 0, + // Known problems... + KnownFail : 0, + AssertionKnown: 0, + Random : 0, + Skip: 0, + Slow: 0, + }, + totalTests: 0, + currentURL: undefined, + currentURLTargetType: undefined, + testLog: [], + logLevel: undefined, + logFile: null, + logger: undefined, + server: undefined, + count: 0, + assertionCount: 0, + + ioService: undefined, + debug: undefined, + windowUtils: undefined, + + slowestTestTime: 0, + slowestTestURL: undefined, + failedUseWidgetLayers: false, + + drawWindowFlags: undefined, + + expectingProcessCrash: false, + expectedCrashDumpFiles: [], + unexpectedCrashDumpFiles: {}, + crashDumpDir: undefined, + pendingCrashDumpDir: undefined, + failedNoPaint: false, + failedNoDisplayList: false, + failedDisplayList: false, + failedOpaqueLayer: false, + failedOpaqueLayerMessages: [], + failedAssignedLayer: false, + failedAssignedLayerMessages: [], + + startAfter: undefined, + suiteStarted: false, + manageSuite: false, + + // The enabled-state of the test-plugins, stored so they can be reset later + testPluginEnabledStates: null, + prefsToRestore: [], + httpServerPort: -1, + + // whether to run slow tests or not + runSlowTests: true, + + // whether we should skip caching canvases + noCanvasCache: false, + recycledCanvases: new Array(), + testPrintOutput: null, + + manifestsLoaded: {}, + // Only dump the sandbox once, because it doesn't depend on the + // manifest URL (yet!). + dumpedConditionSandbox: false, + } +})) { + this[key] = val; + EXPORTED_SYMBOLS.push(key); +} diff --git a/layout/tools/reftest/jar.mn b/layout/tools/reftest/jar.mn new file mode 100644 index 0000000000..b1803a0638 --- /dev/null +++ b/layout/tools/reftest/jar.mn @@ -0,0 +1,60 @@ +reftest.jar: +# Ref tests + content/moz-bool-pref.css (../../../layout/reftests/css-parsing/moz-bool-pref.css) + content/editor/reftests/xul (../../../editor/reftests/xul/*) + content/bidi (../../reftests/bidi/*) + content/box-ordinal (../../reftests/box-ordinal/*) + content/box-shadow (../../reftests/box-shadow/*) + content/bugs (../../reftests/bugs/*) + content/css-display (../../reftests/css-display/*) + content/forms/input/color (../../reftests/forms/input/color/*) + content/forms/input/file (../../reftests/forms/input/file/*) + content/forms/input/text (../../reftests/forms/input/text/*) + content/forms/placeholder (../../reftests/forms/placeholder/*) + content/forms/textbox (../../reftests/forms/textbox/*) + content/image-region (../../reftests/image-region/*) + content/invalidation (../../reftests/invalidation/*) + content/native-theme (../../reftests/native-theme/*) + content/reftest-sanity (../../reftests/reftest-sanity/*) + content/text-shadow (../../reftests/text-shadow/*) + content/writing-mode (../../reftests/writing-mode/*) + content/xul-document-load (../../reftests/xul-document-load/*) + content/xul (../../reftests/xul/*) + content/xul/reftest (../../xul/reftest/*) + content/fonts/fira (../../reftests/fonts/fira/*) + content/fonts/sil (../../reftests/fonts/sil/*) + content/toolkit/reftests (../../../toolkit/content/tests/reftests/*) + content/osx-theme (../../../toolkit/themes/osx/reftests/*) + content/reftest.xhtml (reftest.xhtml) + +# Crash tests + content/crashtests/dom/svg/crashtests (../../../dom/svg/crashtests/*) + content/crashtests/dom/html/crashtests (../../../dom/html/crashtests/*) + content/crashtests/dom/base/crashtests (../../../dom/base/crashtests/*) + content/crashtests/dom/xul/crashtests (../../../dom/xul/crashtests/*) + content/crashtests/dom/xml/crashtests (../../../dom/xml/crashtests/*) + content/crashtests/layout/forms/crashtests (../../../layout/forms/crashtests/*) + content/crashtests/layout/svg/crashtests (../../../layout/svg/crashtests/*) + content/crashtests/layout/tables/crashtests (../../../layout/tables/crashtests/*) + content/crashtests/layout/base/crashtests (../../../layout/base/crashtests/*) + content/crashtests/layout/xul/tree/crashtests (../../../layout/xul/tree/crashtests/*) + content/crashtests/layout/xul/crashtests (../../../layout/xul/crashtests/*) + content/crashtests/layout/generic/crashtests (../../../layout/generic/crashtests/*) + content/crashtests/layout/style/crashtests (../../../layout/style/crashtests/*) + content/crashtests/gfx/tests/crashtests (../../../gfx/tests/crashtests/*) + content/crashtests/accessible/tests/crashtests (../../../accessible/tests/crashtests/*) + content/crashtests/view/crashtests (../../../view/crashtests/*) + content/crashtests/widget/cocoa/crashtests (../../../widget/cocoa/crashtests/*) + + res/globals.jsm (globals.jsm) + res/reftest-content.js (reftest-content.js) + res/ReftestFissionParent.jsm (ReftestFissionParent.jsm) + res/ReftestFissionChild.jsm (ReftestFissionChild.jsm) + res/AsyncSpellCheckTestHelper.jsm (../../../editor/AsyncSpellCheckTestHelper.jsm) + res/httpd.jsm (../../../netwerk/test/httpserver/httpd.js) + res/StructuredLog.jsm (../../../testing/modules/StructuredLog.jsm) + res/PerTestCoverageUtils.jsm (../../../tools/code-coverage/PerTestCoverageUtils.jsm) + res/input.css (../../../editor/reftests/xul/input.css) + res/progress.css (../../../layout/reftests/forms/progress/style.css) +* res/manifest.jsm (manifest.jsm) +* res/reftest.jsm (reftest.jsm) diff --git a/layout/tools/reftest/mach_commands.py b/layout/tools/reftest/mach_commands.py new file mode 100644 index 0000000000..7c6baa2846 --- /dev/null +++ b/layout/tools/reftest/mach_commands.py @@ -0,0 +1,305 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, unicode_literals, print_function + +import os +import re +import sys +from argparse import Namespace + +from mozbuild.base import ( + MachCommandBase, + MachCommandConditions as conditions, + MozbuildObject, +) + +from mach.decorators import ( + CommandProvider, + Command, +) + + +parser = None + + +class ReftestRunner(MozbuildObject): + """Easily run reftests. + + This currently contains just the basics for running reftests. We may want + to hook up result parsing, etc. + """ + + def __init__(self, *args, **kwargs): + MozbuildObject.__init__(self, *args, **kwargs) + + # TODO Bug 794506 remove once mach integrates with virtualenv. + build_path = os.path.join(self.topobjdir, "build") + if build_path not in sys.path: + sys.path.append(build_path) + + self.tests_dir = os.path.join(self.topobjdir, "_tests") + self.reftest_dir = os.path.join(self.tests_dir, "reftest") + + def _make_shell_string(self, s): + return "'%s'" % re.sub("'", r"'\''", s) + + def _setup_objdir(self, args): + # reftest imports will happen from the objdir + sys.path.insert(0, self.reftest_dir) + + tests = os.path.join(self.reftest_dir, "tests") + if not os.path.isdir(tests) and not os.path.islink(tests): + # This symbolic link is used by the desktop tests to + # locate the actual test files when running using file:. + os.symlink(self.topsrcdir, tests) + + def run_desktop_test(self, **kwargs): + """Runs a reftest, in desktop Firefox.""" + import runreftest + + args = Namespace(**kwargs) + if args.suite not in ("reftest", "crashtest", "jstestbrowser"): + raise Exception("None or unrecognized reftest suite type.") + + default_manifest = { + "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"), + "crashtest": (self.topsrcdir, "testing", "crashtest", "crashtests.list"), + "jstestbrowser": ( + self.topobjdir, + "dist", + "test-stage", + "jsreftest", + "tests", + "js", + "src", + "tests", + "jstests.list", + ), + } + + args.extraProfileFiles.append(os.path.join(self.topobjdir, "dist", "plugins")) + args.symbolsPath = os.path.join(self.topobjdir, "dist", "crashreporter-symbols") + args.sandboxReadWhitelist.extend([self.topsrcdir, self.topobjdir]) + + if not args.tests: + args.tests = [os.path.join(*default_manifest[args.suite])] + + if args.suite == "jstestbrowser": + args.extraProfileFiles.append( + os.path.join( + self.topobjdir, + "dist", + "test-stage", + "jsreftest", + "tests", + "js", + "src", + "tests", + "user.js", + ) + ) + + self.log_manager.enable_unstructured() + try: + rv = runreftest.run_test_harness(parser, args) + finally: + self.log_manager.disable_unstructured() + + return rv + + def run_android_test(self, **kwargs): + """Runs a reftest, in an Android application.""" + + args = Namespace(**kwargs) + if args.suite not in ("reftest", "crashtest", "jstestbrowser"): + raise Exception("None or unrecognized reftest suite type.") + + self._setup_objdir(args) + import remotereftest + + default_manifest = { + "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"), + "crashtest": (self.topsrcdir, "testing", "crashtest", "crashtests.list"), + "jstestbrowser": ( + self.topobjdir, + "dist", + "test-stage", + "jsreftest", + "tests", + "js", + "src", + "tests", + "jstests.list", + ), + } + + if not args.tests: + args.tests = [os.path.join(*default_manifest[args.suite])] + + args.extraProfileFiles.append( + os.path.join(self.topsrcdir, "mobile", "android", "fonts") + ) + + hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales") + + for (dirpath, dirnames, filenames) in os.walk(hyphenation_path): + for filename in filenames: + if filename.endswith(".dic"): + args.extraProfileFiles.append(os.path.join(dirpath, filename)) + + if not args.httpdPath: + args.httpdPath = os.path.join(self.tests_dir, "modules") + if not args.symbolsPath: + args.symbolsPath = os.path.join(self.topobjdir, "crashreporter-symbols") + if not args.xrePath: + args.xrePath = os.environ.get("MOZ_HOST_BIN") + if not args.app: + args.app = "org.mozilla.geckoview.test" + if not args.utilityPath: + args.utilityPath = args.xrePath + args.ignoreWindowSize = True + args.printDeviceInfo = False + + from mozrunner.devices.android_device import get_adb_path + + if not args.adb_path: + args.adb_path = get_adb_path(self) + + if "geckoview" not in args.app: + args.e10s = False + print("using e10s=False for non-geckoview app") + + # A symlink and some path manipulations are required so that test + # manifests can be found both locally and remotely (via a url) + # using the same relative path. + if args.suite == "jstestbrowser": + staged_js_dir = os.path.join( + self.topobjdir, "dist", "test-stage", "jsreftest" + ) + tests = os.path.join(self.reftest_dir, "jsreftest") + if not os.path.isdir(tests) and not os.path.islink(tests): + os.symlink(staged_js_dir, tests) + args.extraProfileFiles.append( + os.path.join(staged_js_dir, "tests", "js", "src", "tests", "user.js") + ) + else: + tests = os.path.join(self.reftest_dir, "tests") + if not os.path.isdir(tests) and not os.path.islink(tests): + os.symlink(self.topsrcdir, tests) + for i, path in enumerate(args.tests): + # Non-absolute paths are relative to the packaged directory, which + # has an extra tests/ at the start + if os.path.exists(os.path.abspath(path)): + path = os.path.relpath(path, os.path.join(self.topsrcdir)) + args.tests[i] = os.path.join("tests", path) + + self.log_manager.enable_unstructured() + try: + rv = remotereftest.run_test_harness(parser, args) + finally: + self.log_manager.disable_unstructured() + + return rv + + +def process_test_objects(kwargs): + """|mach test| works by providing a test_objects argument, from + which the test path must be extracted and converted into a normal + reftest tests argument.""" + + if "test_objects" in kwargs: + if kwargs["tests"] is None: + kwargs["tests"] = [] + kwargs["tests"].extend(item["path"] for item in kwargs["test_objects"]) + del kwargs["test_objects"] + + +def get_parser(): + import reftestcommandline + + global parser + here = os.path.abspath(os.path.dirname(__file__)) + build_obj = MozbuildObject.from_environment(cwd=here) + if conditions.is_android(build_obj): + parser = reftestcommandline.RemoteArgumentsParser() + else: + parser = reftestcommandline.DesktopArgumentsParser() + return parser + + +@CommandProvider +class MachCommands(MachCommandBase): + @Command( + "reftest", + category="testing", + description="Run reftests (layout and graphics correctness).", + parser=get_parser, + ) + def run_reftest(self, **kwargs): + kwargs["suite"] = "reftest" + return self._run_reftest(**kwargs) + + @Command( + "jstestbrowser", + category="testing", + description="Run js/src/tests in the browser.", + parser=get_parser, + ) + def run_jstestbrowser(self, **kwargs): + if "--enable-js-shell" not in self.mozconfig["configure_args"]: + raise Exception( + "jstestbrowser requires --enable-js-shell be specified in mozconfig." + ) + self._mach_context.commands.dispatch( + "build", self._mach_context, what=["stage-jstests"] + ) + kwargs["suite"] = "jstestbrowser" + return self._run_reftest(**kwargs) + + @Command( + "crashtest", + category="testing", + description="Run crashtests (Check if crashes on a page).", + parser=get_parser, + ) + def run_crashtest(self, **kwargs): + kwargs["suite"] = "crashtest" + return self._run_reftest(**kwargs) + + def _run_reftest(self, **kwargs): + kwargs["topsrcdir"] = self.topsrcdir + process_test_objects(kwargs) + reftest = self._spawn(ReftestRunner) + # Unstructured logging must be enabled prior to calling + # adb which uses an unstructured logger in its constructor. + reftest.log_manager.enable_unstructured() + if conditions.is_android(self): + from mozrunner.devices.android_device import ( + verify_android_device, + InstallIntent, + ) + + install = ( + InstallIntent.NO if kwargs.get("no_install") else InstallIntent.YES + ) + verbose = False + if ( + kwargs.get("log_mach_verbose") + or kwargs.get("log_tbpl_level") == "debug" + or kwargs.get("log_mach_level") == "debug" + or kwargs.get("log_raw_level") == "debug" + ): + verbose = True + verify_android_device( + self, + install=install, + xre=True, + network=True, + app=kwargs["app"], + device_serial=kwargs["deviceSerial"], + verbose=verbose, + ) + return reftest.run_android_test(**kwargs) + return reftest.run_desktop_test(**kwargs) diff --git a/layout/tools/reftest/mach_test_package_commands.py b/layout/tools/reftest/mach_test_package_commands.py new file mode 100644 index 0000000000..220d0f8b46 --- /dev/null +++ b/layout/tools/reftest/mach_test_package_commands.py @@ -0,0 +1,122 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, unicode_literals, print_function + +import os +import sys +from argparse import Namespace +from functools import partial + +from mach.decorators import ( + CommandProvider, + Command, +) +from mozbuild.base import MachCommandBase + +here = os.path.abspath(os.path.dirname(__file__)) +logger = None + + +def run_reftest(context, **kwargs): + import mozinfo + from mozlog.commandline import setup_logging + + if not kwargs.get("log"): + kwargs["log"] = setup_logging("reftest", kwargs, {"mach": sys.stdout}) + global logger + logger = kwargs["log"] + + args = Namespace(**kwargs) + args.e10s = context.mozharness_config.get("e10s", args.e10s) + + if not args.tests: + args.tests = [os.path.join("layout", "reftests", "reftest.list")] + + test_root = os.path.join(context.package_root, "reftest", "tests") + normalize = partial(context.normalize_test_path, test_root) + args.tests = map(normalize, args.tests) + + if kwargs.get("allow_software_gl_layers"): + os.environ["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1" + + if mozinfo.info.get("buildapp") == "mobile/android": + return run_reftest_android(context, args) + return run_reftest_desktop(context, args) + + +def run_reftest_desktop(context, args): + from runreftest import run_test_harness + + args.app = args.app or context.firefox_bin + args.extraProfileFiles.append(os.path.join(context.bin_dir, "plugins")) + args.utilityPath = context.bin_dir + args.sandboxReadWhitelist.append(context.mozharness_workdir) + args.extraPrefs.append("layers.acceleration.force-enabled=true") + + logger.info("mach calling runreftest with args: " + str(args)) + + return run_test_harness(parser, args) + + +def run_reftest_android(context, args): + from remotereftest import run_test_harness + + args.app = args.app or "org.mozilla.geckoview.test" + args.utilityPath = context.hostutils + args.xrePath = context.hostutils + args.httpdPath = context.module_dir + args.ignoreWindowSize = True + args.printDeviceInfo = False + + config = context.mozharness_config + if config: + host = os.environ.get("HOST_IP", "10.0.2.2") + args.remoteWebServer = config.get("remote_webserver", host) + args.httpPort = config.get("http_port", 8854) + args.sslPort = config.get("ssl_port", 4454) + args.adb_path = config["exes"]["adb"] % { + "abs_work_dir": context.mozharness_workdir + } + args.deviceSerial = os.environ.get("DEVICE_SERIAL", "emulator-5554") + + logger.info("mach calling remotereftest with args: " + str(args)) + + return run_test_harness(parser, args) + + +def add_global_arguments(parser): + parser.add_argument("--test-suite") + parser.add_argument("--reftest-suite") + parser.add_argument("--download-symbols") + parser.add_argument("--allow-software-gl-layers", action="store_true") + parser.add_argument("--no-run-tests", action="store_true") + + +def setup_argument_parser(): + import mozinfo + import reftestcommandline + + global parser + mozinfo.find_and_update_from_json(here) + if mozinfo.info.get("buildapp") == "mobile/android": + parser = reftestcommandline.RemoteArgumentsParser() + else: + parser = reftestcommandline.DesktopArgumentsParser() + add_global_arguments(parser) + return parser + + +@CommandProvider +class ReftestCommands(MachCommandBase): + @Command( + "reftest", + category="testing", + description="Run the reftest harness.", + parser=setup_argument_parser, + ) + def reftest(self, **kwargs): + self._mach_context.activate_mozharness_venv() + kwargs["suite"] = "reftest" + return run_reftest(self._mach_context, **kwargs) diff --git a/layout/tools/reftest/manifest.jsm b/layout/tools/reftest/manifest.jsm new file mode 100644 index 0000000000..7f7c8d31ea --- /dev/null +++ b/layout/tools/reftest/manifest.jsm @@ -0,0 +1,782 @@ +/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- / +/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +var EXPORTED_SYMBOLS = ["ReadTopManifest", "CreateUrls"]; + +Cu.import("resource://reftest/globals.jsm", this); +Cu.import("resource://reftest/reftest.jsm", this); +Cu.import("resource://gre/modules/Services.jsm"); +Cu.import("resource://gre/modules/NetUtil.jsm"); + +const NS_SCRIPTSECURITYMANAGER_CONTRACTID = "@mozilla.org/scriptsecuritymanager;1"; +const NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX = "@mozilla.org/network/protocol;1?name="; +const NS_XREAPPINFO_CONTRACTID = "@mozilla.org/xre/app-info;1"; + +const RE_PROTOCOL = /^\w+:/; +const RE_PREF_ITEM = /^(|test-|ref-)pref\((.+?),(.*)\)$/; + + +function ReadTopManifest(aFileURL, aFilter, aManifestID) +{ + var url = g.ioService.newURI(aFileURL); + if (!url) + throw "Expected a file or http URL for the manifest."; + + g.manifestsLoaded = {}; + ReadManifest(url, aFilter, aManifestID); +} + +// Note: If you materially change the reftest manifest parsing, +// please keep the parser in layout/tools/reftest/__init__.py in sync. +function ReadManifest(aURL, aFilter, aManifestID) +{ + // Ensure each manifest is only read once. This assumes that manifests that + // are included with filters will be read via their include before they are + // read directly in the case of a duplicate + if (g.manifestsLoaded.hasOwnProperty(aURL.spec)) { + if (g.manifestsLoaded[aURL.spec] === null) + return; + else + aFilter = [aFilter[0], aFilter[1], true]; + } + g.manifestsLoaded[aURL.spec] = aFilter[1]; + + var secMan = Cc[NS_SCRIPTSECURITYMANAGER_CONTRACTID] + .getService(Ci.nsIScriptSecurityManager); + + var listURL = aURL; + var channel = NetUtil.newChannel({uri: aURL, + loadUsingSystemPrincipal: true}); + var inputStream = channel.open(); + if (channel instanceof Ci.nsIHttpChannel + && channel.responseStatus != 200) { + g.logger.error("HTTP ERROR : " + channel.responseStatus); + } + var streamBuf = getStreamContent(inputStream); + inputStream.close(); + var lines = streamBuf.split(/\n|\r|\r\n/); + + // The sandbox for fails-if(), etc., condition evaluation. This is not + // always required and so is created on demand. + var sandbox; + function GetOrCreateSandbox() { + if (!sandbox) { + sandbox = BuildConditionSandbox(aURL); + } + return sandbox; + } + + var lineNo = 0; + var urlprefix = ""; + var defaults = []; + var defaultTestPrefSettings = [], defaultRefPrefSettings = []; + if (g.compareRetainedDisplayLists) { + AddRetainedDisplayListTestPrefs(GetOrCreateSandbox(), defaultTestPrefSettings, + defaultRefPrefSettings); + } + for (var str of lines) { + ++lineNo; + if (str.charAt(0) == "#") + continue; // entire line was a comment + var i = str.search(/\s+#/); + if (i >= 0) + str = str.substring(0, i); + // strip leading and trailing whitespace + str = str.replace(/^\s*/, '').replace(/\s*$/, ''); + if (!str || str == "") + continue; + var items = str.split(/\s+/); // split on whitespace + + if (items[0] == "url-prefix") { + if (items.length != 2) + throw "url-prefix requires one url in manifest file " + aURL.spec + " line " + lineNo; + urlprefix = items[1]; + continue; + } + + if (items[0] == "defaults") { + items.shift(); + defaults = items; + continue; + } + + var expected_status = EXPECTED_PASS; + var allow_silent_fail = false; + var minAsserts = 0; + var maxAsserts = 0; + var needs_focus = false; + var slow = false; + var skip = false; + var testPrefSettings = defaultTestPrefSettings.concat(); + var refPrefSettings = defaultRefPrefSettings.concat(); + var fuzzy_delta = { min: 0, max: 2 }; + var fuzzy_pixels = { min: 0, max: 1 }; + var chaosMode = false; + var wrCapture = { test: false, ref: false }; + var nonSkipUsed = false; + var noAutoFuzz = false; + + var origLength = items.length; + items = defaults.concat(items); + while (items[0].match(/^(fails|needs-focus|random|skip|asserts|slow|require-or|silentfail|pref|test-pref|ref-pref|fuzzy|chaos-mode|wr-capture|wr-capture-ref|noautofuzz)/)) { + var item = items.shift(); + var stat; + var cond; + var m = item.match(/^(fails|random|skip|silentfail)-if(\(.*\))$/); + if (m) { + stat = m[1]; + // Note: m[2] contains the parentheses, and we want them. + cond = Cu.evalInSandbox(m[2], GetOrCreateSandbox()); + } else if (item.match(/^(fails|random|skip)$/)) { + stat = item; + cond = true; + } else if (item == "needs-focus") { + needs_focus = true; + cond = false; + } else if ((m = item.match(/^asserts\((\d+)(-\d+)?\)$/))) { + cond = false; + minAsserts = Number(m[1]); + maxAsserts = (m[2] == undefined) ? minAsserts + : Number(m[2].substring(1)); + } else if ((m = item.match(/^asserts-if\((.*?),(\d+)(-\d+)?\)$/))) { + cond = false; + if (Cu.evalInSandbox("(" + m[1] + ")", GetOrCreateSandbox())) { + minAsserts = Number(m[2]); + maxAsserts = + (m[3] == undefined) ? minAsserts + : Number(m[3].substring(1)); + } + } else if (item == "slow") { + cond = false; + slow = true; + } else if ((m = item.match(/^require-or\((.*?)\)$/))) { + var args = m[1].split(/,/); + if (args.length != 2) { + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": wrong number of args to require-or"; + } + var [precondition_str, fallback_action] = args; + var preconditions = precondition_str.split(/&&/); + cond = false; + for (var precondition of preconditions) { + if (precondition === "debugMode") { + // Currently unimplemented. Requires asynchronous + // JSD call + getting an event while no JS is running + stat = fallback_action; + cond = true; + break; + } else if (precondition === "true") { + // For testing + } else { + // Unknown precondition. Assume it is unimplemented. + stat = fallback_action; + cond = true; + break; + } + } + } else if ((m = item.match(/^slow-if\((.*?)\)$/))) { + cond = false; + if (Cu.evalInSandbox("(" + m[1] + ")", GetOrCreateSandbox())) + slow = true; + } else if (item == "silentfail") { + cond = false; + allow_silent_fail = true; + } else if ((m = item.match(RE_PREF_ITEM))) { + cond = false; + if (!AddPrefSettings(m[1], m[2], m[3], GetOrCreateSandbox(), + testPrefSettings, refPrefSettings)) { + throw "Error in pref value in manifest file " + aURL.spec + " line " + lineNo; + } + } else if ((m = item.match(/^fuzzy\((\d+)-(\d+),(\d+)-(\d+)\)$/))) { + cond = false; + expected_status = EXPECTED_FUZZY; + fuzzy_delta = ExtractRange(m, 1); + fuzzy_pixels = ExtractRange(m, 3); + } else if ((m = item.match(/^fuzzy-if\((.*?),(\d+)-(\d+),(\d+)-(\d+)\)$/))) { + cond = false; + if (Cu.evalInSandbox("(" + m[1] + ")", GetOrCreateSandbox())) { + expected_status = EXPECTED_FUZZY; + fuzzy_delta = ExtractRange(m, 2); + fuzzy_pixels = ExtractRange(m, 4); + } + } else if (item == "chaos-mode") { + cond = false; + chaosMode = true; + } else if (item == "wr-capture") { + cond = false; + wrCapture.test = true; + } else if (item == "wr-capture-ref") { + cond = false; + wrCapture.ref = true; + } else if (item == "noautofuzz") { + cond = false; + noAutoFuzz = true; + } else { + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unexpected item " + item; + } + + if (stat != "skip") { + nonSkipUsed = true; + } + + if (cond) { + if (stat == "fails") { + expected_status = EXPECTED_FAIL; + } else if (stat == "random") { + expected_status = EXPECTED_RANDOM; + } else if (stat == "skip") { + skip = true; + } else if (stat == "silentfail") { + allow_silent_fail = true; + } + } + } + + if (items.length > origLength) { + // Implies we broke out of the loop before we finished processing + // defaults. This means defaults contained an invalid token. + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": invalid defaults token '" + items[0] + "'"; + } + + if (minAsserts > maxAsserts) { + throw "Bad range in manifest file " + aURL.spec + " line " + lineNo; + } + + var runHttp = false; + var httpDepth; + if (items[0] == "HTTP") { + runHttp = (aURL.scheme == "file"); // We can't yet run the local HTTP server + // for non-local reftests. + httpDepth = 0; + items.shift(); + } else if (items[0].match(/HTTP\(\.\.(\/\.\.)*\)/)) { + // Accept HTTP(..), HTTP(../..), HTTP(../../..), etc. + runHttp = (aURL.scheme == "file"); // We can't yet run the local HTTP server + // for non-local reftests. + httpDepth = (items[0].length - 5) / 3; + items.shift(); + } + + // do not prefix the url for include commands or urls specifying + // a protocol + if (urlprefix && items[0] != "include") { + if (items.length > 1 && !items[1].match(RE_PROTOCOL)) { + items[1] = urlprefix + items[1]; + } + if (items.length > 2 && !items[2].match(RE_PROTOCOL)) { + items[2] = urlprefix + items[2]; + } + } + + var principal = secMan.createContentPrincipal(aURL, {}); + + if (items[0] == "include") { + if (items.length != 2) + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to include"; + if (runHttp) + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": use of include with http"; + + // If the expected_status is EXPECTED_PASS (the default) then allow + // the include. If 'skip' is true, that means there was a skip + // or skip-if annotation (with a true condition) on this include + // statement, so we should skip the include. Any other expected_status + // is disallowed since it's nonintuitive as to what the intended + // effect is. + if (nonSkipUsed) { + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": include statement with annotation other than 'skip' or 'skip-if'"; + } else if (skip) { + g.logger.info("Skipping included manifest at " + aURL.spec + " line " + lineNo + " due to matching skip condition"); + } else { + // poor man's assertion + if (expected_status != EXPECTED_PASS) { + throw "Error in manifest file parsing code: we should never get expected_status=" + expected_status + " when nonSkipUsed=false (from " + aURL.spec + " line " + lineNo + ")"; + } + + var incURI = g.ioService.newURI(items[1], null, listURL); + secMan.checkLoadURIWithPrincipal(principal, incURI, + Ci.nsIScriptSecurityManager.DISALLOW_SCRIPT); + + // Cannot use nsIFile or similar to manipulate the manifest ID; although it appears + // path-like, it does not refer to an actual path in the filesystem. + var newManifestID = aManifestID; + var included = items[1]; + // Remove included manifest file name. + // eg. dir1/dir2/reftest.list -> dir1/dir2 + var pos = included.lastIndexOf("/"); + if (pos <= 0) { + included = ""; + } else { + included = included.substring(0, pos); + } + // Simplify references to parent directories. + // eg. dir1/dir2/../dir3 -> dir1/dir3 + while (included.startsWith("../")) { + pos = newManifestID.lastIndexOf("/"); + if (pos < 0) { + pos = 0; + } + newManifestID = newManifestID.substring(0, pos); + included = included.substring(3); + } + // Use a new manifest ID if the included manifest is in a different directory. + if (included.length > 0) { + if (newManifestID.length > 0) { + newManifestID = newManifestID + "/" + included; + } else { + // parent directory includes may refer to the topsrcdir + newManifestID = included; + } + } + ReadManifest(incURI, aFilter, newManifestID); + } + } else if (items[0] == TYPE_LOAD || items[0] == TYPE_SCRIPT) { + var type = items[0]; + if (items.length != 2) + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to " + type; + if (type == TYPE_LOAD && expected_status != EXPECTED_PASS) + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect known failure type for load test"; + AddTestItem({ type: type, + expected: expected_status, + manifest: aURL.spec, + manifestID: TestIdentifier(aURL.spec, aManifestID), + allowSilentFail: allow_silent_fail, + minAsserts: minAsserts, + maxAsserts: maxAsserts, + needsFocus: needs_focus, + slow: slow, + skip: skip, + prefSettings1: testPrefSettings, + prefSettings2: refPrefSettings, + fuzzyMinDelta: fuzzy_delta.min, + fuzzyMaxDelta: fuzzy_delta.max, + fuzzyMinPixels: fuzzy_pixels.min, + fuzzyMaxPixels: fuzzy_pixels.max, + runHttp: runHttp, + httpDepth: httpDepth, + url1: items[1], + url2: null, + chaosMode: chaosMode, + wrCapture: wrCapture, + noAutoFuzz: noAutoFuzz }, aFilter, aManifestID); + } else if (items[0] == TYPE_REFTEST_EQUAL || items[0] == TYPE_REFTEST_NOTEQUAL || items[0] == TYPE_PRINT) { + if (items.length != 3) + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to " + items[0]; + + if (items[0] == TYPE_REFTEST_NOTEQUAL && + expected_status == EXPECTED_FUZZY && + (fuzzy_delta.min > 0 || fuzzy_pixels.min > 0)) { + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": minimum fuzz must be zero for tests of type " + items[0]; + } + + var type = items[0]; + if (g.compareRetainedDisplayLists) { + type = TYPE_REFTEST_EQUAL; + + // We expect twice as many assertion failures when comparing + // tests because we run each test twice. + minAsserts *= 2; + maxAsserts *= 2; + + // Skip the test if it is expected to fail in both modes. + // It would unexpectedly "pass" in comparison mode mode when + // comparing the two failures, which is not a useful result. + if (expected_status === EXPECTED_FAIL || + expected_status === EXPECTED_RANDOM) { + skip = true; + } + } + + AddTestItem({ type: type, + expected: expected_status, + manifest: aURL.spec, + manifestID: TestIdentifier(aURL.spec, aManifestID), + allowSilentFail: allow_silent_fail, + minAsserts: minAsserts, + maxAsserts: maxAsserts, + needsFocus: needs_focus, + slow: slow, + skip: skip, + prefSettings1: testPrefSettings, + prefSettings2: refPrefSettings, + fuzzyMinDelta: fuzzy_delta.min, + fuzzyMaxDelta: fuzzy_delta.max, + fuzzyMinPixels: fuzzy_pixels.min, + fuzzyMaxPixels: fuzzy_pixels.max, + runHttp: runHttp, + httpDepth: httpDepth, + url1: items[1], + url2: items[2], + chaosMode: chaosMode, + wrCapture: wrCapture, + noAutoFuzz: noAutoFuzz }, aFilter, aManifestID); + } else { + throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unknown test type " + items[0]; + } + } +} + +// Read all available data from an input stream and return it +// as a string. +function getStreamContent(inputStream) +{ + var streamBuf = ""; + var sis = Cc["@mozilla.org/scriptableinputstream;1"]. + createInstance(Ci.nsIScriptableInputStream); + sis.init(inputStream); + + var available; + while ((available = sis.available()) != 0) { + streamBuf += sis.read(available); + } + + return streamBuf; +} + +// Build the sandbox for fails-if(), etc., condition evaluation. +function BuildConditionSandbox(aURL) { + var sandbox = new Cu.Sandbox(aURL.spec); + var xr = Cc[NS_XREAPPINFO_CONTRACTID].getService(Ci.nsIXULRuntime); + var appInfo = Cc[NS_XREAPPINFO_CONTRACTID].getService(Ci.nsIXULAppInfo); + sandbox.isDebugBuild = g.debug.isDebugBuild; + sandbox.isCoverageBuild = g.isCoverageBuild; + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + var env = Cc["@mozilla.org/process/environment;1"]. + getService(Ci.nsIEnvironment); + + sandbox.xulRuntime = Cu.cloneInto({widgetToolkit: xr.widgetToolkit, OS: xr.OS, XPCOMABI: xr.XPCOMABI}, sandbox); + + var testRect = g.browser.getBoundingClientRect(); + sandbox.smallScreen = false; + if (g.containingWindow.innerWidth < 800 || g.containingWindow.innerHeight < 1000) { + sandbox.smallScreen = true; + } + + var gfxInfo = (NS_GFXINFO_CONTRACTID in Cc) && Cc[NS_GFXINFO_CONTRACTID].getService(Ci.nsIGfxInfo); + let readGfxInfo = function (obj, key) { + if (g.contentGfxInfo && (key in g.contentGfxInfo)) { + return g.contentGfxInfo[key]; + } + return obj[key]; + } + + try { + sandbox.d2d = readGfxInfo(gfxInfo, "D2DEnabled"); + sandbox.dwrite = readGfxInfo(gfxInfo, "DWriteEnabled"); + sandbox.embeddedInFirefoxReality = readGfxInfo(gfxInfo, "EmbeddedInFirefoxReality"); + } catch (e) { + sandbox.d2d = false; + sandbox.dwrite = false; + sandbox.embeddedInFirefoxReality = false; + } + + var info = gfxInfo.getInfo(); + var canvasBackend = readGfxInfo(info, "AzureCanvasBackend"); + var contentBackend = readGfxInfo(info, "AzureContentBackend"); + + sandbox.gpuProcess = gfxInfo.usingGPUProcess; + sandbox.azureCairo = canvasBackend == "cairo"; + sandbox.azureSkia = canvasBackend == "skia"; + sandbox.skiaContent = contentBackend == "skia"; + sandbox.azureSkiaGL = false; + // true if we are using the same Azure backend for rendering canvas and content + sandbox.contentSameGfxBackendAsCanvas = contentBackend == canvasBackend + || (contentBackend == "none" && canvasBackend == "cairo"); + + sandbox.remoteCanvas = prefs.getBoolPref("gfx.canvas.remote") && sandbox.d2d && sandbox.gpuProcess; + + sandbox.layersGPUAccelerated = + g.windowUtils.layerManagerType != "Basic"; + sandbox.d3d11 = + g.windowUtils.layerManagerType == "Direct3D 11"; + sandbox.d3d9 = + g.windowUtils.layerManagerType == "Direct3D 9"; + sandbox.layersOpenGL = + g.windowUtils.layerManagerType == "OpenGL"; + sandbox.swgl = + g.windowUtils.layerManagerType == "WebRender (Software)" + || g.windowUtils.layerManagerType == "WebRender (Software D3D11)"; + sandbox.webrender = + g.windowUtils.layerManagerType == "WebRender" || sandbox.swgl; + sandbox.layersOMTC = + g.windowUtils.layerManagerRemote == true; + sandbox.advancedLayers = + g.windowUtils.usingAdvancedLayers == true; + sandbox.layerChecksEnabled = !sandbox.webrender; + + sandbox.retainedDisplayList = + prefs.getBoolPref("layout.display-list.retain"); + + sandbox.usesOverlayScrollbars = g.windowUtils.usesOverlayScrollbars; + + // Shortcuts for widget toolkits. + sandbox.Android = xr.OS == "Android"; + sandbox.cocoaWidget = xr.widgetToolkit == "cocoa"; + sandbox.gtkWidget = xr.widgetToolkit == "gtk"; + sandbox.qtWidget = xr.widgetToolkit == "qt"; + sandbox.winWidget = xr.widgetToolkit == "windows"; + + sandbox.is64Bit = xr.is64Bit; + + // GeckoView is currently uniquely identified by "android + e10s" but + // we might want to make this condition more precise in the future. + sandbox.geckoview = (sandbox.Android && g.browserIsRemote); + + // Scrollbars that are semi-transparent. See bug 1169666. + sandbox.transparentScrollbars = xr.widgetToolkit == "gtk"; + + var sysInfo = Cc["@mozilla.org/system-info;1"].getService(Ci.nsIPropertyBag2); + if (sandbox.Android) { + // This is currently used to distinguish Android 4.0.3 (SDK version 15) + // and later from Android 2.x + sandbox.AndroidVersion = sysInfo.getPropertyAsInt32("version"); + + sandbox.emulator = readGfxInfo(gfxInfo, "adapterDeviceID").includes("Android Emulator"); + sandbox.device = !sandbox.emulator; + } + + sandbox.MinGW = sandbox.winWidget && sysInfo.getPropertyAsBool("isMinGW"); + +#if MOZ_ASAN + sandbox.AddressSanitizer = true; +#else + sandbox.AddressSanitizer = false; +#endif + +#if MOZ_WEBRTC + sandbox.webrtc = true; +#else + sandbox.webrtc = false; +#endif + + let retainedDisplayListsEnabled = prefs.getBoolPref("layout.display-list.retain", false); + sandbox.retainedDisplayLists = retainedDisplayListsEnabled && !g.compareRetainedDisplayLists; + sandbox.compareRetainedDisplayLists = g.compareRetainedDisplayLists; + +#ifdef RELEASE_OR_BETA + sandbox.release_or_beta = true; +#else + sandbox.release_or_beta = false; +#endif + + var hh = Cc[NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX + "http"]. + getService(Ci.nsIHttpProtocolHandler); + var httpProps = ["userAgent", "appName", "appVersion", "vendor", + "vendorSub", "product", "productSub", "platform", + "oscpu", "language", "misc"]; + sandbox.http = new sandbox.Object(); + httpProps.forEach((x) => sandbox.http[x] = hh[x]); + + // Set OSX to be the Mac OS X version, as an integer, or undefined + // for other platforms. The integer is formed by 100 times the + // major version plus the minor version, so 1006 for 10.6, 1010 for + // 10.10, etc. + var osxmatch = /Mac OS X (\d+).(\d+)$/.exec(hh.oscpu); + sandbox.OSX = osxmatch ? parseInt(osxmatch[1]) * 100 + parseInt(osxmatch[2]) : undefined; + + // Plugins are no longer supported. Don't try to use TestPlugin. + sandbox.haveTestPlugin = false; + + // Set a flag on sandbox if the windows default theme is active + sandbox.windowsDefaultTheme = g.containingWindow.matchMedia("(-moz-windows-default-theme)").matches; + + try { + sandbox.nativeThemePref = !prefs.getBoolPref("widget.disable-native-theme-for-content"); + } catch (e) { + sandbox.nativeThemePref = true; + } + sandbox.gpuProcessForceEnabled = prefs.getBoolPref("layers.gpu-process.force-enabled", false); + + sandbox.prefs = Cu.cloneInto({ + getBoolPref: function(p) { return prefs.getBoolPref(p); }, + getIntPref: function(p) { return prefs.getIntPref(p); } + }, sandbox, { cloneFunctions: true }); + + // Tests shouldn't care about this except for when they need to + // crash the content process + sandbox.browserIsRemote = g.browserIsRemote; + sandbox.browserIsFission = g.browserIsFission; + + try { + sandbox.asyncPan = g.containingWindow.docShell.asyncPanZoomEnabled; + } catch (e) { + sandbox.asyncPan = false; + } + + // Graphics features + sandbox.usesRepeatResampling = sandbox.d2d; + + // Running in a test-verify session? + sandbox.verify = prefs.getBoolPref("reftest.verify", false); + + // Running with a variant enabled? + sandbox.fission = Services.appinfo.fissionAutostart; + sandbox.serviceWorkerE10s = prefs.getBoolPref("dom.serviceWorkers.parent_intercept", false); + + if (!g.dumpedConditionSandbox) { + g.logger.info("Dumping JSON representation of sandbox"); + g.logger.info(JSON.stringify(Cu.waiveXrays(sandbox))); + g.dumpedConditionSandbox = true; + } + + return sandbox; +} + +function AddRetainedDisplayListTestPrefs(aSandbox, aTestPrefSettings, + aRefPrefSettings) { + AddPrefSettings("test-", "layout.display-list.retain", "true", aSandbox, + aTestPrefSettings, aRefPrefSettings); + AddPrefSettings("ref-", "layout.display-list.retain", "false", aSandbox, + aTestPrefSettings, aRefPrefSettings); +} + +function AddPrefSettings(aWhere, aPrefName, aPrefValExpression, aSandbox, aTestPrefSettings, aRefPrefSettings) { + var prefVal = Cu.evalInSandbox("(" + aPrefValExpression + ")", aSandbox); + var prefType; + var valType = typeof(prefVal); + if (valType == "boolean") { + prefType = PREF_BOOLEAN; + } else if (valType == "string") { + prefType = PREF_STRING; + } else if (valType == "number" && (parseInt(prefVal) == prefVal)) { + prefType = PREF_INTEGER; + } else { + return false; + } + var setting = { name: aPrefName, + type: prefType, + value: prefVal }; + + if (g.compareRetainedDisplayLists && aPrefName != "layout.display-list.retain") { + // ref-pref() is ignored, test-pref() and pref() are added to both + if (aWhere != "ref-") { + aTestPrefSettings.push(setting); + aRefPrefSettings.push(setting); + } + } else { + if (aWhere != "ref-") { + aTestPrefSettings.push(setting); + } + if (aWhere != "test-") { + aRefPrefSettings.push(setting); + } + } + return true; +} + +function ExtractRange(matches, startIndex) { + return { + min: Number(matches[startIndex]), + max: Number(matches[startIndex + 1]) + }; +} + +function ServeTestBase(aURL, depth) { + var listURL = aURL.QueryInterface(Ci.nsIFileURL); + var directory = listURL.file.parent; + + // Allow serving a tree that's an ancestor of the directory containing + // the files so that they can use resources in ../ (etc.). + var dirPath = "/"; + while (depth > 0) { + dirPath = "/" + directory.leafName + dirPath; + directory = directory.parent; + --depth; + } + + g.count++; + var path = "/" + Date.now() + "/" + g.count; + g.server.registerDirectory(path + "/", directory); + + var secMan = Cc[NS_SCRIPTSECURITYMANAGER_CONTRACTID] + .getService(Ci.nsIScriptSecurityManager); + + var testbase = g.ioService.newURI("http://localhost:" + g.httpServerPort + + path + dirPath); + var testBasePrincipal = secMan.createContentPrincipal(testbase, {}); + + // Give the testbase URI access to XUL and XBL + Services.perms.addFromPrincipal(testBasePrincipal, "allowXULXBL", Services.perms.ALLOW_ACTION); + return testbase; +} + +function CreateUrls(test) { + let secMan = Cc[NS_SCRIPTSECURITYMANAGER_CONTRACTID] + .getService(Ci.nsIScriptSecurityManager); + + let manifestURL = g.ioService.newURI(test.manifest); + + let testbase = manifestURL; + if (test.runHttp) + testbase = ServeTestBase(manifestURL, test.httpDepth) + + function FileToURI(file) + { + if (file === null) + return file; + + var testURI = g.ioService.newURI(file, null, testbase); + let isChromeOrViewSource = testURI.scheme == "chrome" || testURI.scheme == "view-source"; + let principal = isChromeOrViewSource ? secMan.getSystemPrincipal() : + secMan.createContentPrincipal(manifestURL, {}); + secMan.checkLoadURIWithPrincipal(principal, testURI, + Ci.nsIScriptSecurityManager.DISALLOW_SCRIPT); + return testURI; + } + + let files = [test.url1, test.url2]; + [test.url1, test.url2] = files.map(FileToURI); + + return test; +} + +function TestIdentifier(aUrl, aManifestID) { + // Construct a platform-independent and location-independent test identifier for + // a url; normally the identifier looks like a posix-compliant relative file + // path. + // Test urls may be simple file names, chrome: urls with full paths, about:blank, etc. + if (aUrl.startsWith("about:") || aUrl.startsWith("data:")) { + return aUrl; + } + var pos = aUrl.lastIndexOf("/"); + var url = (pos < 0) ? aUrl : aUrl.substring(pos + 1); + return (aManifestID + "/" + url); +} + +function AddTestItem(aTest, aFilter, aManifestID) { + if (!aFilter) + aFilter = [null, [], false]; + + var identifier = TestIdentifier(aTest.url1, aManifestID); + if (aTest.url2 !== null) { + identifier = [identifier, aTest.type, TestIdentifier(aTest.url2, aManifestID)]; + } + + var {url1, url2} = CreateUrls(Object.assign({}, aTest)); + + var globalFilter = aFilter[0]; + var manifestFilter = aFilter[1]; + var invertManifest = aFilter[2]; + if (globalFilter && !globalFilter.test(url1.spec)) + return; + if (manifestFilter && !(invertManifest ^ manifestFilter.test(url1.spec))) + return; + if (g.focusFilterMode == FOCUS_FILTER_NEEDS_FOCUS_TESTS && + !aTest.needsFocus) + return; + if (g.focusFilterMode == FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS && + aTest.needsFocus) + return; + + aTest.identifier = identifier; + g.urls.push(aTest); + // Periodically log progress to avoid no-output timeout on slow platforms. + // No-output timeouts during manifest parsing have been a problem for + // jsreftests on Android/debug. Any logging resets the no-output timer, + // even debug logging which is normally not displayed. + if ((g.urls.length % 5000) == 0) + g.logger.debug(g.urls.length + " tests found..."); +} diff --git a/layout/tools/reftest/manifest.json b/layout/tools/reftest/manifest.json new file mode 100644 index 0000000000..b7648de87c --- /dev/null +++ b/layout/tools/reftest/manifest.json @@ -0,0 +1,22 @@ +{ + "manifest_version": 2, + "name": "Reftest", + "version": "1.0", + + "applications": { + "gecko": { + "id": "reftest@mozilla.org" + } + }, + + "experiment_apis": { + "reftest": { + "schema": "schema.json", + "parent": { + "scopes": ["addon_parent"], + "script": "api.js", + "events": ["startup"] + } + } + } +} diff --git a/layout/tools/reftest/moz.build b/layout/tools/reftest/moz.build new file mode 100644 index 0000000000..21053755de --- /dev/null +++ b/layout/tools/reftest/moz.build @@ -0,0 +1,36 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +with Files("**"): + BUG_COMPONENT = ("Testing", "Reftest") + SCHEDULES.exclusive = ["reftest", "crashtest"] + +XPI_NAME = "reftest" +USE_EXTENSION_MANIFEST = True +JAR_MANIFESTS += ["jar.mn"] +FINAL_TARGET_FILES += [ + "api.js", + "manifest.json", + "schema.json", +] + +TEST_HARNESS_FILES.reftest += [ + "/build/pgo/server-locations.txt", + "/testing/mochitest/server.js", + "mach_test_package_commands.py", + "output.py", + "reftestcommandline.py", + "remotereftest.py", + "runreftest.py", +] + +TEST_HARNESS_FILES.reftest.chrome += [ + "chrome/binding.xml", + "chrome/userContent-import.css", + "chrome/userContent.css", +] + +TEST_HARNESS_FILES.reftest.manifest += ["reftest/__init__.py"] diff --git a/layout/tools/reftest/output.py b/layout/tools/reftest/output.py new file mode 100644 index 0000000000..b181b3fb54 --- /dev/null +++ b/layout/tools/reftest/output.py @@ -0,0 +1,192 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import json +import threading +from collections import defaultdict + +from mozlog.formatters import TbplFormatter +from mozrunner.utils import get_stack_fixer_function + + +class ReftestFormatter(TbplFormatter): + """ + Formatter designed to preserve the legacy "tbpl" format in reftest. + + This is needed for both the reftest-analyzer and mozharness log parsing. + We can change this format when both reftest-analyzer and mozharness have + been changed to read structured logs. + """ + + def __call__(self, data): + if "component" in data and data["component"] == "mozleak": + # Output from mozleak requires that no prefix be added + # so that mozharness will pick up these failures. + return "%s\n" % data["message"] + + formatted = TbplFormatter.__call__(self, data) + + if formatted is None: + return + if data["action"] == "process_output": + return formatted + return "REFTEST %s" % formatted + + def log(self, data): + prefix = "%s |" % data["level"].upper() + return "%s %s\n" % (prefix, data["message"]) + + def _format_status(self, data): + extra = data.get("extra", {}) + status = data["status"] + + status_msg = "TEST-" + if "expected" in data: + status_msg += "UNEXPECTED-%s" % status + else: + if status not in ("PASS", "SKIP"): + status_msg += "KNOWN-" + status_msg += status + if extra.get("status_msg") == "Random": + status_msg += "(EXPECTED RANDOM)" + return status_msg + + def test_status(self, data): + extra = data.get("extra", {}) + test = data["test"] + + status_msg = self._format_status(data) + output_text = "%s | %s | %s" % ( + status_msg, + test, + data.get("subtest", "unknown test"), + ) + if data.get("message"): + output_text += " | %s" % data["message"] + + if "reftest_screenshots" in extra: + screenshots = extra["reftest_screenshots"] + image_1 = screenshots[0]["screenshot"] + + if len(screenshots) == 3: + image_2 = screenshots[2]["screenshot"] + output_text += ( + "\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n" + "REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s" + ) % (image_1, image_2) + elif len(screenshots) == 1: + output_text += "\nREFTEST IMAGE: data:image/png;base64,%s" % image_1 + + return output_text + "\n" + + def test_end(self, data): + status = data["status"] + test = data["test"] + + output_text = "" + if status != "OK": + status_msg = self._format_status(data) + output_text = "%s | %s | %s" % (status_msg, test, data.get("message", "")) + + if output_text: + output_text += "\nREFTEST " + output_text += "TEST-END | %s" % test + return "%s\n" % output_text + + def process_output(self, data): + return "%s\n" % data["data"] + + def suite_end(self, data): + lines = [] + summary = data["extra"]["results"] + summary["success"] = summary["Pass"] + summary["LoadOnly"] + lines.append( + "Successful: %(success)s (%(Pass)s pass, %(LoadOnly)s load only)" % summary + ) + summary["unexpected"] = ( + summary["Exception"] + + summary["FailedLoad"] + + summary["UnexpectedFail"] + + summary["UnexpectedPass"] + + summary["AssertionUnexpected"] + + summary["AssertionUnexpectedFixed"] + ) + lines.append( + ( + "Unexpected: %(unexpected)s (%(UnexpectedFail)s unexpected fail, " + "%(UnexpectedPass)s unexpected pass, " + "%(AssertionUnexpected)s unexpected asserts, " + "%(FailedLoad)s failed load, " + "%(Exception)s exception)" + ) + % summary + ) + summary["known"] = ( + summary["KnownFail"] + + summary["AssertionKnown"] + + summary["Random"] + + summary["Skip"] + + summary["Slow"] + ) + lines.append( + ( + "Known problems: %(known)s (" + + "%(KnownFail)s known fail, " + + "%(AssertionKnown)s known asserts, " + + "%(Random)s random, " + + "%(Skip)s skipped, " + + "%(Slow)s slow)" + ) + % summary + ) + lines = ["REFTEST INFO | %s" % s for s in lines] + lines.append("REFTEST SUITE-END | Shutdown") + return "INFO | Result summary:\n{}\n".format("\n".join(lines)) + + +class OutputHandler(object): + """Process the output of a process during a test run and translate + raw data logged from reftest.js to an appropriate structured log action, + where applicable. + """ + + def __init__(self, log, utilityPath, symbolsPath=None): + self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath) + self.log = log + self.proc_name = None + self.results = defaultdict(int) + + def __call__(self, line): + # need to return processed messages to appease remoteautomation.py + if not line.strip(): + return [] + line = line.decode("utf-8", errors="replace") + + try: + data = json.loads(line) + except ValueError: + self.verbatim(line) + return [line] + + if isinstance(data, dict) and "action" in data: + if data["action"] == "results": + for k, v in data["results"].items(): + self.results[k] += v + else: + self.log.log_raw(data) + else: + self.verbatim(json.dumps(data)) + + return [data] + + def write(self, data): + return self.__call__(data) + + def verbatim(self, line): + if self.stack_fixer_function: + line = self.stack_fixer_function(line) + name = self.proc_name or threading.current_thread().name + self.log.process_output(name, line) diff --git a/layout/tools/reftest/reftest-analyzer-structured.xhtml b/layout/tools/reftest/reftest-analyzer-structured.xhtml new file mode 100644 index 0000000000..871b1bc08b --- /dev/null +++ b/layout/tools/reftest/reftest-analyzer-structured.xhtml @@ -0,0 +1,649 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- -*- Mode: HTML; tab-width: 2; indent-tabs-mode: nil; -*- --> +<!-- vim: set shiftwidth=2 tabstop=2 autoindent expandtab: --> +<!-- This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> +<!-- + +Features to add: +* make the left and right parts of the viewer independently scrollable +* make the test list filterable +** default to only showing unexpecteds +* add other ways to highlight differences other than circling? +* add zoom/pan to images +* Add ability to load log via XMLHttpRequest (also triggered via URL param) +* color the test list based on pass/fail and expected/unexpected/random/skip +* ability to load multiple logs ? +** rename them by clicking on the name and editing +** turn the test list into a collapsing tree view +** move log loading into popup from viewer UI + +--> +<!DOCTYPE html> +<html lang="en-US" xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml"> +<head> + <title>Reftest analyzer</title> + <style type="text/css"><![CDATA[ + + html, body { margin: 0; } + html { padding: 0; } + body { padding: 4px; } + + #pixelarea, #itemlist, #images { position: absolute; } + #itemlist, #images { overflow: auto; } + #pixelarea { top: 0; left: 0; width: 320px; height: 84px; overflow: visible } + #itemlist { top: 84px; left: 0; width: 320px; bottom: 0; } + #images { top: 0; bottom: 0; left: 320px; right: 0; } + + #leftpane { width: 320px; } + #images { position: fixed; top: 10px; left: 340px; } + + form#imgcontrols { margin: 0; display: block; } + + #itemlist > table { border-collapse: collapse; } + #itemlist > table > tbody > tr > td { border: 1px solid; padding: 1px; } + #itemlist td.activeitem { background-color: yellow; } + + /* + #itemlist > table > tbody > tr.pass > td.url { background: lime; } + #itemlist > table > tbody > tr.fail > td.url { background: red; } + */ + + #magnification > svg { display: block; width: 84px; height: 84px; } + + #pixelinfo { font: small sans-serif; position: absolute; width: 200px; left: 84px; } + #pixelinfo table { border-collapse: collapse; } + #pixelinfo table th { white-space: nowrap; text-align: left; padding: 0; } + #pixelinfo table td { font-family: monospace; padding: 0 0 0 0.25em; } + + #pixelhint { display: inline; color: #88f; cursor: help; } + #pixelhint > * { display: none; position: absolute; margin: 8px 0 0 8px; padding: 4px; width: 400px; background: #ffa; color: black; box-shadow: 3px 3px 2px #888; z-index: 1; } + #pixelhint:hover { color: #000; } + #pixelhint:hover > * { display: block; } + #pixelhint p { margin: 0; } + #pixelhint p + p { margin-top: 1em; } + + ]]></style> + <script type="text/javascript"><![CDATA[ + +var XLINK_NS = "http://www.w3.org/1999/xlink"; +var SVG_NS = "http://www.w3.org/2000/svg"; +var IMAGE_NOT_AVAILABLE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKAAAAASCAYAAADczdVTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAHy0lEQVRoge2aX2hb5xnGf2dYabROgqQkpMuKnWUJLmxHMFaa/SscteQiF5EvUgqLctEVrDJKK1+MolzkQr4IctgW+SLIheJc1BpFpswJw92FbaZsTCGTL0465AtntUekJdJ8lByVHbnnwLsLKbKdSJbiZBVjeuAYn+/P+z3fc97vfd9zbEVEhB566BK+1m0CPfx/o+eAPXQVbR3QqVapOl8FlR46h0O1Wu02iacCZfsasMKEz8vbx1JYE6fY/dXx6mEbFObPcvDVDBlznpc9G+2r8xNcvLqK2w39r4UI+fs7tFjmytgFFu718865EIebPGincI3zFz7Bcrtx97/GL0P+p+IPbSOgRwXtW3vpewqL/a/g5rgf39hit2m0hGUAHOHrrq3trmef4/lDB7Ay57n01zuPZXPX7jUunv+Yf9ktR7D/0CHca7/n3KXPsHbAuynkCWCZptgiImKLaVqP9NuW1bT9ceybpr3j+WJbYrVa3rbEatGZi2uixvWdrysilmWKae2M+5PqlktoosayLfubcrN10dAk24aynUsIxMVsadwUs+EX7dEyAlaXLqMoCj6fj5HkUqO9MD+Govjx+xXcXi+uoRAhvwuv182Z8Ws4AJUlxoZ8uNxuvF43ii/EtdXNNUuV68lR/IqC4gsxPj7KkE/BF5qmClRXrzFSt+/1ulDOjLNU6eQ4OcyPDqH4hhg5O4LicuN2K4xcvk6jjHUKJM8O1fvcKMoZkouFOq1VPp1OcuXGAvrvfsv0lWmSySTzN0sdH+jyYhK/ouB2e/G6XfjPJikBVG8SUhT8fl99nwVGfQp+vx+f4iO5VO1AtwJjfgXF58M/kqSVJP9ef0xuAI6NlwWmL41xxqeg+PyMXr72yBqW3cI4JaZHh1DcXrxeLy5liORiB7q1PiZFyeV0mQqz9TRZeUmFVUGLSjqdkgCIFp2RTCosEJOiiIihSyKWkDl9WYrFnCQCCNF0w0QmHhBQJTEzJ+nZSQmAoEYks2KIGBkJgASiM5I3LbGMnCSCCEQl38GJMvMZiag1e+nlFcmmIgKaZEwREaPGhWGZ1VfEMFZkNj4sgCSyhoihSzwSlqCGoAUlEo1IJByW+Oxyh+dZJJ+eklhiRnIrRcnrM6KCxLOmiNiipyICSGR2pTY2O1m7T2XEsNrrJmJLfjkn6amwoMbFaMEhG28eAVtzExErW3sOBCWVzkpmNiEqCOEZ2RyLTT3eJAKaMhVEUMOSXjHEtg3JTIUFkNTK9rGwbQrWm2xGb6QoWxIqEtdtEWO28aDtoi6JSFCAjUtL1AUzJA4SSW/IZ2VjjU0V0zEBJBiJSzwWk1g8IZEAAmrdidrBkoSKxB4IW08tGVNEzIxoIJM5a8v4SQ1RY5lGSy6x8xScz6QkHFBre1Zre49nH+y1KDEQLV7TcyU1LBCtHVppp9smxk2dYAMtHXA7blZWNJDZ4sZ4MxPbdHjrbc3WNuvOq4YlkYhLLBaXeKx2sLcrBUS2ScFtUbUBh3WgajvgOYgGuKjw4Rsqb1uvkssbWLbJXFQFqL/I9IEKa2WzYcqy16E2BNteB1R+cuwoRwcHGRx4nlfenWMuPclRDx3goSraqd+7Gj/Y5d76SrXLu3VKLYW1rMZbo/QpB4+9zt6fT1I0Law/LRMBaLzC7ePNuSgL7/2GpcotLr7+AZG5t9gH0Fa3zuFq1tiWG4DKs5tebV1NDDW1XYd26iWO9A8wODjAUfUN5ubm+Ch4ZFuuLRzQoVwqUCqXyN9fg3tFSuUShVIZhyr5O2vo94o42DwD/PP23fq8Bf5urLO+BoHBwxzc20c++wcmz+lAkWLFATwcf3+YDwIDhMYmuDw+wt5j5+C5ZwDYP/gSoLP6xX5+fOIkJ47/lIP8g49/Nc3tDj59OZUiRR3uFYsAVO/eZoE1yvkyeA6gAaff+zU3SxUcp8LilQucnoFTP3hhix19/garlQqFW9eZOBti9Mqt9mubXwBw+NALeDC4cfVDzgP3i3keUN/nf4uo+hEver/DRaK84/9mY/72uoFTKVMolVn5/HPgPvlSmVKhRL2bSrlEqVyidH8N/d7t2u/lakfcKneLgM4rvxhncbXA6tI8kTffB+0NjnrAqZYplcrk83ceXdtzgB+psHD7S/pfPs7JkydQB1x8dnWS2SVje9GaxkVLl+DmNNC4NJn/S6JxH5nJyNRwrW7Qi7oMgxBMyd9molvmRKO1cExgshG6l9NTEhkOynAkLlOJoKBuhPV8ZlK0h9aNTqVbv3ltEK/VIiAQEN0yZVLbuM+aImLoEgts3VdsJrfFil1M1/ZSv9RAROaWO8n/hkyF1Q3bgeFGygvPrDRG5Wcf1IJbq9rlNrrNbra96aqlUVMSWrNnNiw5uw23T/4o4Xq7FtA29h2My3K9WtETgRZr13UxdIk+pGswkpCcsX0N2OZD9BOgWqFsgWePp20KWb0ywkDgEIa8y55Gq0O5XKHP7cGz++l/haxWylgOuD17aG7eoVpxwL27RX8b27jZ42n1qdahXKrg2bfnUW0eQ7edoD232l+/LPp2pHvNfh8eT2f8/3sO2AZLyRAvns6gqToLOgxP6Uz87HvdoNJDF9E1B6ysLrLw5yW+3PUNvv3dH/L9wX3doNFDl9E1B+yhB+j9O1YPXcZ/AAl9BWJNvZE7AAAAAElFTkSuQmCC"; + +var gPhases = null; + +var gIDCache = {}; + +var gMagPixPaths = []; // 2D array of array-of-two <path> objects used in the pixel magnifier +var gMagWidth = 5; // number of zoomed in pixels to show horizontally +var gMagHeight = 5; // number of zoomed in pixels to show vertically +var gMagZoom = 16; // size of the zoomed in pixels +var gImage1Data; // ImageData object for the reference image +var gImage2Data; // ImageData object for the test output image +var gFlashingPixels = []; // array of <path> objects that should be flashed due to pixel color mismatch +var gParams; + +function ID(id) { + if (!(id in gIDCache)) + gIDCache[id] = document.getElementById(id); + return gIDCache[id]; +} + +function hash_parameters() { + var result = { }; + var params = window.location.hash.substr(1).split(/[&;]/); + for (var i = 0; i < params.length; i++) { + var parts = params[i].split("="); + result[parts[0]] = unescape(unescape(parts[1])); + } + return result; +} + +function load() { + gPhases = [ ID("entry"), ID("loading"), ID("viewer") ]; + build_mag(); + gParams = hash_parameters(); + if (gParams.log) { + show_phase("loading"); + process_log(gParams.log); + } else if (gParams.logurl) { + show_phase("loading"); + var req = new XMLHttpRequest(); + req.onreadystatechange = function() { + if (req.readyState === 4) { + process_log(req.responseText); + } + }; + req.open('GET', gParams.logurl, true); + req.send(); + } + window.addEventListener('keypress', handle_keyboard_shortcut); + ID("image1").addEventListener('error', image_load_error); + ID("image2").addEventListener('error', image_load_error); +} + +function image_load_error(e) { + e.target.setAttributeNS(XLINK_NS, "xlink:href", IMAGE_NOT_AVAILABLE); +} + +function build_mag() { + var mag = ID("mag"); + + var r = document.createElementNS(SVG_NS, "rect"); + r.setAttribute("x", gMagZoom * -gMagWidth / 2); + r.setAttribute("y", gMagZoom * -gMagHeight / 2); + r.setAttribute("width", gMagZoom * gMagWidth); + r.setAttribute("height", gMagZoom * gMagHeight); + mag.appendChild(r); + + mag.setAttribute("transform", "translate(" + (gMagZoom * (gMagWidth / 2) + 1) + "," + (gMagZoom * (gMagHeight / 2) + 1) + ")"); + + for (var x = 0; x < gMagWidth; x++) { + gMagPixPaths[x] = []; + for (var y = 0; y < gMagHeight; y++) { + var p1 = document.createElementNS(SVG_NS, "path"); + p1.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "h" + -gMagZoom + "v" + gMagZoom); + p1.setAttribute("stroke", "black"); + p1.setAttribute("stroke-width", "1px"); + p1.setAttribute("fill", "#aaa"); + + var p2 = document.createElementNS(SVG_NS, "path"); + p2.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "v" + gMagZoom + "h" + -gMagZoom); + p2.setAttribute("stroke", "black"); + p2.setAttribute("stroke-width", "1px"); + p2.setAttribute("fill", "#888"); + + mag.appendChild(p1); + mag.appendChild(p2); + gMagPixPaths[x][y] = [p1, p2]; + } + } + + var flashedOn = false; + setInterval(function() { + flashedOn = !flashedOn; + flash_pixels(flashedOn); + }, 500); +} + +function show_phase(phaseid) { + for (var i in gPhases) { + var phase = gPhases[i]; + phase.style.display = (phase.id == phaseid) ? "" : "none"; + } + + if (phase == "viewer") + ID("images").style.display = "none"; +} + +function fileentry_changed() { + show_phase("loading"); + var input = ID("fileentry"); + var files = input.files; + if (files.length > 0) { + // Only handle the first file; don't handle multiple selection. + // The parts of the log we care about are ASCII-only. Since we + // can ignore lines we don't care about, best to read in as + // iso-8859-1, which guarantees we don't get decoding errors. + var fileReader = new FileReader(); + fileReader.onload = function(e) { + var log = null; + + log = e.target.result; + + if (log) + process_log(log); + else + show_phase("entry"); + } + fileReader.readAsText(files[0], "iso-8859-1"); + } + // So the user can process the same filename again (after + // overwriting the log), clear the value on the form input so we + // will always get an onchange event. + input.value = ""; +} + +function log_pasted() { + show_phase("loading"); + var entry = ID("logentry"); + var log = entry.value; + entry.value = ""; + process_log(log); +} + +var gTestItems; + +function process_log(contents) { + var lines = contents.split(/[\r\n]+/); + gTestItems = []; + for (var j in lines) { + var line = lines[j]; + try { + var data = JSON.parse(line); + } catch(e) { + continue; + } + // Ignore duplicated output in logcat. + if (!data.action == "test_end" && data.status != "FAIL") + continue; + + if (!data.hasOwnProperty("extra") || + !data.extra.hasOwnProperty("reftest_screenshots")) { + continue; + } + + var url = data.test; + var screenshots = data.extra.reftest_screenshots; + gTestItems.push( + { + pass: data.status === "PASS", + // only one of the following three should ever be true + unexpected: data.hasOwnProperty("expected"), + random: false, + skip: data.status == "SKIP", + url: url, + images: [], + imageLabels: [] + }); + + var item = gTestItems[gTestItems.length - 1]; + item.images.push("data:image/png;base64," + screenshots[0].screenshot); + item.imageLabels.push(screenshots[0].url); + if (screenshots.length > 1) { + item.images.push("data:image/png;base64," + screenshots[2].screenshot); + item.imageLabels.push(screenshots[2].url); + } + } + build_viewer(); +} + +function build_viewer() { + if (gTestItems.length == 0) { + show_phase("entry"); + return; + } + + var cell = ID("itemlist"); + while (cell.childNodes.length > 0) + cell.removeChild(cell.childNodes[cell.childNodes.length - 1]); + + var table = document.createElement("table"); + var tbody = document.createElement("tbody"); + table.appendChild(tbody); + + for (var i in gTestItems) { + var item = gTestItems[i]; + + // optional url filter for only showing unexpected results + if (parseInt(gParams.only_show_unexpected) && !item.unexpected) + continue; + + // XXX regardless skip expected pass items until we have filtering UI + if (item.pass && !item.unexpected) + continue; + + var tr = document.createElement("tr"); + var rowclass = item.pass ? "pass" : "fail"; + var td; + var text; + + td = document.createElement("td"); + text = ""; + if (item.unexpected) { text += "!"; rowclass += " unexpected"; } + if (item.random) { text += "R"; rowclass += " random"; } + if (item.skip) { text += "S"; rowclass += " skip"; } + td.appendChild(document.createTextNode(text)); + tr.appendChild(td); + + td = document.createElement("td"); + td.id = "item" + i; + td.className = "url"; + // Only display part of URL after "/mozilla/". + var match = item.url.match(/\/mozilla\/(.*)/); + text = document.createTextNode(match ? match[1] : item.url); + if (item.images.length > 0) { + var a = document.createElement("a"); + a.href = "javascript:show_images(" + i + ")"; + a.appendChild(text); + td.appendChild(a); + } else { + td.appendChild(text); + } + tr.appendChild(td); + + tbody.appendChild(tr); + } + + cell.appendChild(table); + + show_phase("viewer"); +} + +function get_image_data(src, whenReady) { + var img = new Image(); + img.onload = function() { + var canvas = document.createElement("canvas"); + canvas.width = img.naturalWidth; + canvas.height = img.naturalHeight; + + var ctx = canvas.getContext("2d"); + ctx.drawImage(img, 0, 0); + + whenReady(ctx.getImageData(0, 0, img.naturalWidth, img.naturalHeight)); + }; + img.src = src; +} + +function sync_svg_size(imageData) { + // We need the size of the 'svg' and its 'image' elements to match the size + // of the ImageData objects that we're going to read pixels from or else our + // magnify() function will be very broken. + ID("svg").setAttribute("width", imageData.width); + ID("svg").setAttribute("height", imageData.height); +} + +function show_images(i) { + var item = gTestItems[i]; + var cell = ID("images"); + + // Remove activeitem class from any existing elements + var activeItems = document.querySelectorAll(".activeitem"); + for (var activeItemIdx = activeItems.length; activeItemIdx-- != 0;) { + activeItems[activeItemIdx].classList.remove("activeitem"); + } + + ID("item" + i).classList.add("activeitem"); + ID("image1").style.display = ""; + ID("image2").style.display = "none"; + ID("diffrect").style.display = "none"; + ID("imgcontrols").reset(); + + ID("image1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]); + // Making the href be #image1 doesn't seem to work + ID("feimage1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]); + if (item.images.length == 1) { + ID("imgcontrols").style.display = "none"; + } else { + ID("imgcontrols").style.display = ""; + + ID("image2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]); + // Making the href be #image2 doesn't seem to work + ID("feimage2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]); + + ID("label1").textContent = 'Image ' + item.imageLabels[0]; + ID("label2").textContent = 'Image ' + item.imageLabels[1]; + } + + cell.style.display = ""; + + get_image_data(item.images[0], function(data) { gImage1Data = data; sync_svg_size(gImage1Data); }); + get_image_data(item.images[1], function(data) { gImage2Data = data }); +} + +function show_image(i) { + if (i == 1) { + ID("image1").style.display = ""; + ID("image2").style.display = "none"; + } else { + ID("image1").style.display = "none"; + ID("image2").style.display = ""; + } +} + +function handle_keyboard_shortcut(event) { + switch (event.charCode) { + case 49: // "1" key + document.getElementById("radio1").checked = true; + show_image(1); + break; + case 50: // "2" key + document.getElementById("radio2").checked = true; + show_image(2); + break; + case 100: // "d" key + document.getElementById("differences").click(); + break; + case 112: // "p" key + shift_images(-1); + break; + case 110: // "n" key + shift_images(1); + break; + } +} + +function shift_images(dir) { + var activeItem = document.querySelector(".activeitem"); + if (!activeItem) { + return; + } + for (var elm = activeItem; elm; elm = elm.parentElement) { + if (elm.tagName != "tr") { + continue; + } + elm = dir > 0 ? elm.nextElementSibling : elm.previousElementSibling; + if (elm) { + elm.getElementsByTagName("a")[0].click(); + } + return; + } +} + +function show_differences(cb) { + ID("diffrect").style.display = cb.checked ? "" : "none"; +} + +function flash_pixels(on) { + var stroke = on ? "red" : "black"; + var strokeWidth = on ? "2px" : "1px"; + for (var i = 0; i < gFlashingPixels.length; i++) { + gFlashingPixels[i].setAttribute("stroke", stroke); + gFlashingPixels[i].setAttribute("stroke-width", strokeWidth); + } +} + +function cursor_point(evt) { + var m = evt.target.getScreenCTM().inverse(); + var p = ID("svg").createSVGPoint(); + p.x = evt.clientX; + p.y = evt.clientY; + p = p.matrixTransform(m); + return { x: Math.floor(p.x), y: Math.floor(p.y) }; +} + +function hex2(i) { + return (i < 16 ? "0" : "") + i.toString(16); +} + +function canvas_pixel_as_hex(data, x, y) { + var offset = (y * data.width + x) * 4; + var r = data.data[offset]; + var g = data.data[offset + 1]; + var b = data.data[offset + 2]; + return "#" + hex2(r) + hex2(g) + hex2(b); +} + +function hex_as_rgb(hex) { + return "rgb(" + [parseInt(hex.substring(1, 3), 16), parseInt(hex.substring(3, 5), 16), parseInt(hex.substring(5, 7), 16)] + ")"; +} + +function magnify(evt) { + var { x: x, y: y } = cursor_point(evt); + var centerPixelColor1, centerPixelColor2; + + var dx_lo = -Math.floor(gMagWidth / 2); + var dx_hi = Math.floor(gMagWidth / 2); + var dy_lo = -Math.floor(gMagHeight / 2); + var dy_hi = Math.floor(gMagHeight / 2); + + flash_pixels(false); + gFlashingPixels = []; + for (var j = dy_lo; j <= dy_hi; j++) { + for (var i = dx_lo; i <= dx_hi; i++) { + var px = x + i; + var py = y + j; + var p1 = gMagPixPaths[i + dx_hi][j + dy_hi][0]; + var p2 = gMagPixPaths[i + dx_hi][j + dy_hi][1]; + // Here we just use the dimensions of gImage1Data since we expect test + // and reference to have the same dimensions. + if (px < 0 || py < 0 || px >= gImage1Data.width || py >= gImage1Data.height) { + p1.setAttribute("fill", "#aaa"); + p2.setAttribute("fill", "#888"); + } else { + var color1 = canvas_pixel_as_hex(gImage1Data, x + i, y + j); + var color2 = canvas_pixel_as_hex(gImage2Data, x + i, y + j); + p1.setAttribute("fill", color1); + p2.setAttribute("fill", color2); + if (color1 != color2) { + gFlashingPixels.push(p1, p2); + p1.parentNode.appendChild(p1); + p2.parentNode.appendChild(p2); + } + if (i == 0 && j == 0) { + centerPixelColor1 = color1; + centerPixelColor2 = color2; + } + } + } + } + flash_pixels(true); + show_pixelinfo(x, y, centerPixelColor1, hex_as_rgb(centerPixelColor1), centerPixelColor2, hex_as_rgb(centerPixelColor2)); +} + +function show_pixelinfo(x, y, pix1rgb, pix1hex, pix2rgb, pix2hex) { + var pixelinfo = ID("pixelinfo"); + ID("coords").textContent = [x, y]; + ID("pix1hex").textContent = pix1hex; + ID("pix1rgb").textContent = pix1rgb; + ID("pix2hex").textContent = pix2hex; + ID("pix2rgb").textContent = pix2rgb; +} + + ]]></script> + +</head> +<body onload="load()"> + +<div id="entry"> + +<h1>Reftest analyzer: load raw structured log</h1> + +<p>Either paste your log into this textarea:<br /> +<textarea cols="80" rows="10" id="logentry"/><br/> +<input type="button" value="Process pasted log" onclick="log_pasted()" /></p> + +<p>... or load it from a file:<br/> +<input type="file" id="fileentry" onchange="fileentry_changed()" /> +</p> +</div> + +<div id="loading" style="display:none">Loading log...</div> + +<div id="viewer" style="display:none"> + <div id="pixelarea"> + <div id="pixelinfo"> + <table> + <tbody> + <tr><th>Pixel at:</th><td colspan="2" id="coords"/></tr> + <tr><th>Image 1:</th><td id="pix1rgb"></td><td id="pix1hex"></td></tr> + <tr><th>Image 2:</th><td id="pix2rgb"></td><td id="pix2hex"></td></tr> + </tbody> + </table> + <div> + <div id="pixelhint">★ + <div> + <p>Move the mouse over the reftest image on the right to show + magnified pixels on the left. The color information above is for + the pixel centered in the magnified view.</p> + <p>Image 1 is shown in the upper triangle of each pixel and Image 2 + is shown in the lower triangle.</p> + </div> + </div> + </div> + </div> + <div id="magnification"> + <svg xmlns="http://www.w3.org/2000/svg" width="84" height="84" shape-rendering="optimizeSpeed"> + <g id="mag"/> + </svg> + </div> + </div> + <div id="itemlist"></div> + <div id="images" style="display:none"> + <form id="imgcontrols"> + <input id="radio1" type="radio" name="which" value="0" onchange="show_image(1)" checked="checked" /><label id="label1" title="1" for="radio1">Image 1</label> + <input id="radio2" type="radio" name="which" value="1" onchange="show_image(2)" /><label id="label2" title="2" for="radio2">Image 2</label> + <label><input id="differences" type="checkbox" onchange="show_differences(this)" />Circle differences</label> + </form> + <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="800" height="1000" id="svg"> + <defs> + <!-- use sRGB to avoid loss of data --> + <filter id="showDifferences" x="0%" y="0%" width="100%" height="100%" + style="color-interpolation-filters: sRGB"> + <feImage id="feimage1" result="img1" xlink:href="#image1" /> + <feImage id="feimage2" result="img2" xlink:href="#image2" /> + <!-- inv1 and inv2 are the images with RGB inverted --> + <feComponentTransfer result="inv1" in="img1"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <feComponentTransfer result="inv2" in="img2"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <!-- w1 will have non-white pixels anywhere that img2 + is brighter than img1, and w2 for the reverse. + It would be nice not to have to go through these + intermediate states, but feComposite + type="arithmetic" can't transform the RGB channels + and leave the alpha channel untouched. --> + <feComposite result="w1" in="img1" in2="inv2" operator="arithmetic" k2="1" k3="1" /> + <feComposite result="w2" in="img2" in2="inv1" operator="arithmetic" k2="1" k3="1" /> + <!-- c1 will have non-black pixels anywhere that img2 + is brighter than img1, and c2 for the reverse --> + <feComponentTransfer result="c1" in="w1"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <feComponentTransfer result="c2" in="w2"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <!-- c will be nonblack (and fully on) for every pixel+component where there are differences --> + <feComposite result="c" in="c1" in2="c2" operator="arithmetic" k2="255" k3="255" /> + <!-- a will be opaque for every pixel with differences and transparent for all others --> + <feColorMatrix result="a" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0" /> + + <!-- a, dilated by 1 pixel --> + <feMorphology result="dila1" in="a" operator="dilate" radius="1" /> + <!-- a, dilated by 2 pixels --> + <feMorphology result="dila2" in="dila1" operator="dilate" radius="1" /> + + <!-- all the pixels in the 2-pixel dilation of a but not in the 1-pixel dilation, to highlight the diffs --> + <feComposite result="highlight" in="dila2" in2="dila1" operator="out" /> + + <feFlood result="red" flood-color="red" /> + <feComposite result="redhighlight" in="red" in2="highlight" operator="in" /> + <feFlood result="black" flood-color="black" flood-opacity="0.5" /> + <feMerge> + <feMergeNode in="black" /> + <feMergeNode in="redhighlight" /> + </feMerge> + </filter> + </defs> + <g onmousemove="magnify(evt)"> + <image x="0" y="0" width="100%" height="100%" id="image1" /> + <image x="0" y="0" width="100%" height="100%" id="image2" /> + </g> + <rect id="diffrect" filter="url(#showDifferences)" pointer-events="none" x="0" y="0" width="100%" height="100%" /> + </svg> + </div> +</div> + +</body> +</html> diff --git a/layout/tools/reftest/reftest-analyzer.xhtml b/layout/tools/reftest/reftest-analyzer.xhtml new file mode 100644 index 0000000000..4c7b26511a --- /dev/null +++ b/layout/tools/reftest/reftest-analyzer.xhtml @@ -0,0 +1,934 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- -*- Mode: HTML; tab-width: 2; indent-tabs-mode: nil; -*- --> +<!-- vim: set shiftwidth=2 tabstop=2 autoindent expandtab: --> +<!-- This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> +<!-- + +Features to add: +* make the left and right parts of the viewer independently scrollable +* make the test list filterable +** default to only showing unexpecteds +* add other ways to highlight differences other than circling? +* add zoom/pan to images +* Add ability to load log via XMLHttpRequest (also triggered via URL param) +* color the test list based on pass/fail and expected/unexpected/random/skip +* ability to load multiple logs ? +** rename them by clicking on the name and editing +** turn the test list into a collapsing tree view +** move log loading into popup from viewer UI + +--> +<!DOCTYPE html> +<html lang="en-US" xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml"> +<head> + <title>Reftest analyzer</title> + <style type="text/css"><![CDATA[ + + html, body { margin: 0; } + html { padding: 0; } + body { padding: 4px; } + + #pixelarea, #itemlist, #images { position: absolute; } + #itemlist, #images { overflow: auto; } + #pixelarea { top: 0; left: 0; width: 320px; height: 84px; overflow: visible } + #itemlist { top: 84px; left: 0; width: 320px; bottom: 0; } + #images { top: 0; bottom: 0; left: 320px; right: 0; } + + #leftpane { width: 320px; } + #images { position: fixed; top: 10px; left: 340px; } + + form#imgcontrols { margin: 0; display: block; } + + #itemlist > table { border-collapse: collapse; } + #itemlist > table > tbody > tr > td { border: 1px solid; padding: 1px; } + #itemlist td.activeitem { background-color: yellow; } + + /* + #itemlist > table > tbody > tr.pass > td.url { background: lime; } + #itemlist > table > tbody > tr.fail > td.url { background: red; } + */ + + #magnification > svg { display: block; width: 84px; height: 84px; } + + #pixelinfo { font: small sans-serif; position: absolute; width: 200px; left: 84px; } + #pixelinfo table { border-collapse: collapse; } + #pixelinfo table th { white-space: nowrap; text-align: left; padding: 0; } + #pixelinfo table td { font-family: monospace; padding: 0 0 0 0.25em; } + + #pixelhint { display: inline; color: #88f; cursor: help; } + #pixelhint > * { display: none; position: absolute; margin: 8px 0 0 8px; padding: 4px; width: 400px; background: #ffa; color: black; box-shadow: 3px 3px 2px #888; z-index: 1; } + #pixelhint:hover { color: #000; } + #pixelhint:hover > * { display: block; } + #pixelhint p { margin: 0; } + #pixelhint p + p { margin-top: 1em; } + + ]]></style> + <script type="text/javascript"><![CDATA[ + +var XLINK_NS = "http://www.w3.org/1999/xlink"; +var SVG_NS = "http://www.w3.org/2000/svg"; +var IMAGE_NOT_AVAILABLE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKAAAAASCAYAAADczdVTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAHy0lEQVRoge2aX2hb5xnGf2dYabROgqQkpMuKnWUJLmxHMFaa/SscteQiF5EvUgqLctEVrDJKK1+MolzkQr4IctgW+SLIheJc1BpFpswJw92FbaZsTCGTL0465AtntUekJdJ8lByVHbnnwLsLKbKdSJbiZBVjeuAYn+/P+z3fc97vfd9zbEVEhB566BK+1m0CPfx/o+eAPXQVbR3QqVapOl8FlR46h0O1Wu02iacCZfsasMKEz8vbx1JYE6fY/dXx6mEbFObPcvDVDBlznpc9G+2r8xNcvLqK2w39r4UI+fs7tFjmytgFFu718865EIebPGincI3zFz7Bcrtx97/GL0P+p+IPbSOgRwXtW3vpewqL/a/g5rgf39hit2m0hGUAHOHrrq3trmef4/lDB7Ay57n01zuPZXPX7jUunv+Yf9ktR7D/0CHca7/n3KXPsHbAuynkCWCZptgiImKLaVqP9NuW1bT9ceybpr3j+WJbYrVa3rbEatGZi2uixvWdrysilmWKae2M+5PqlktoosayLfubcrN10dAk24aynUsIxMVsadwUs+EX7dEyAlaXLqMoCj6fj5HkUqO9MD+Govjx+xXcXi+uoRAhvwuv182Z8Ws4AJUlxoZ8uNxuvF43ii/EtdXNNUuV68lR/IqC4gsxPj7KkE/BF5qmClRXrzFSt+/1ulDOjLNU6eQ4OcyPDqH4hhg5O4LicuN2K4xcvk6jjHUKJM8O1fvcKMoZkouFOq1VPp1OcuXGAvrvfsv0lWmSySTzN0sdH+jyYhK/ouB2e/G6XfjPJikBVG8SUhT8fl99nwVGfQp+vx+f4iO5VO1AtwJjfgXF58M/kqSVJP9ef0xuAI6NlwWmL41xxqeg+PyMXr72yBqW3cI4JaZHh1DcXrxeLy5liORiB7q1PiZFyeV0mQqz9TRZeUmFVUGLSjqdkgCIFp2RTCosEJOiiIihSyKWkDl9WYrFnCQCCNF0w0QmHhBQJTEzJ+nZSQmAoEYks2KIGBkJgASiM5I3LbGMnCSCCEQl38GJMvMZiag1e+nlFcmmIgKaZEwREaPGhWGZ1VfEMFZkNj4sgCSyhoihSzwSlqCGoAUlEo1IJByW+Oxyh+dZJJ+eklhiRnIrRcnrM6KCxLOmiNiipyICSGR2pTY2O1m7T2XEsNrrJmJLfjkn6amwoMbFaMEhG28eAVtzExErW3sOBCWVzkpmNiEqCOEZ2RyLTT3eJAKaMhVEUMOSXjHEtg3JTIUFkNTK9rGwbQrWm2xGb6QoWxIqEtdtEWO28aDtoi6JSFCAjUtL1AUzJA4SSW/IZ2VjjU0V0zEBJBiJSzwWk1g8IZEAAmrdidrBkoSKxB4IW08tGVNEzIxoIJM5a8v4SQ1RY5lGSy6x8xScz6QkHFBre1Zre49nH+y1KDEQLV7TcyU1LBCtHVppp9smxk2dYAMtHXA7blZWNJDZ4sZ4MxPbdHjrbc3WNuvOq4YlkYhLLBaXeKx2sLcrBUS2ScFtUbUBh3WgajvgOYgGuKjw4Rsqb1uvkssbWLbJXFQFqL/I9IEKa2WzYcqy16E2BNteB1R+cuwoRwcHGRx4nlfenWMuPclRDx3goSraqd+7Gj/Y5d76SrXLu3VKLYW1rMZbo/QpB4+9zt6fT1I0Law/LRMBaLzC7ePNuSgL7/2GpcotLr7+AZG5t9gH0Fa3zuFq1tiWG4DKs5tebV1NDDW1XYd26iWO9A8wODjAUfUN5ubm+Ch4ZFuuLRzQoVwqUCqXyN9fg3tFSuUShVIZhyr5O2vo94o42DwD/PP23fq8Bf5urLO+BoHBwxzc20c++wcmz+lAkWLFATwcf3+YDwIDhMYmuDw+wt5j5+C5ZwDYP/gSoLP6xX5+fOIkJ47/lIP8g49/Nc3tDj59OZUiRR3uFYsAVO/eZoE1yvkyeA6gAaff+zU3SxUcp8LilQucnoFTP3hhix19/garlQqFW9eZOBti9Mqt9mubXwBw+NALeDC4cfVDzgP3i3keUN/nf4uo+hEver/DRaK84/9mY/72uoFTKVMolVn5/HPgPvlSmVKhRL2bSrlEqVyidH8N/d7t2u/lakfcKneLgM4rvxhncbXA6tI8kTffB+0NjnrAqZYplcrk83ceXdtzgB+psHD7S/pfPs7JkydQB1x8dnWS2SVje9GaxkVLl+DmNNC4NJn/S6JxH5nJyNRwrW7Qi7oMgxBMyd9molvmRKO1cExgshG6l9NTEhkOynAkLlOJoKBuhPV8ZlK0h9aNTqVbv3ltEK/VIiAQEN0yZVLbuM+aImLoEgts3VdsJrfFil1M1/ZSv9RAROaWO8n/hkyF1Q3bgeFGygvPrDRG5Wcf1IJbq9rlNrrNbra96aqlUVMSWrNnNiw5uw23T/4o4Xq7FtA29h2My3K9WtETgRZr13UxdIk+pGswkpCcsX0N2OZD9BOgWqFsgWePp20KWb0ywkDgEIa8y55Gq0O5XKHP7cGz++l/haxWylgOuD17aG7eoVpxwL27RX8b27jZ42n1qdahXKrg2bfnUW0eQ7edoD232l+/LPp2pHvNfh8eT2f8/3sO2AZLyRAvns6gqToLOgxP6Uz87HvdoNJDF9E1B6ysLrLw5yW+3PUNvv3dH/L9wX3doNFDl9E1B+yhB+j9O1YPXcZ/AAl9BWJNvZE7AAAAAElFTkSuQmCC"; + +var gPhases = null; + +var gIDCache = {}; + +var gMagPixPaths = []; // 2D array of array-of-two <path> objects used in the pixel magnifier +var gMagWidth = 5; // number of zoomed in pixels to show horizontally +var gMagHeight = 5; // number of zoomed in pixels to show vertically +var gMagZoom = 16; // size of the zoomed in pixels +var gImage1Data; // ImageData object for the reference image +var gImage2Data; // ImageData object for the test output image +var gFlashingPixels = []; // array of <path> objects that should be flashed due to pixel color mismatch +var gParams; + +function ID(id) { + if (!(id in gIDCache)) + gIDCache[id] = document.getElementById(id); + return gIDCache[id]; +} + +function hash_parameters() { + var result = { }; + var params = window.location.hash.substr(1).split(/[&;]/); + for (var i = 0; i < params.length; i++) { + var parts = params[i].split("="); + result[parts[0]] = unescape(unescape(parts[1])); + } + return result; +} + +function load() { + gPhases = [ ID("entry"), ID("loading"), ID("viewer") ]; + build_mag(); + gParams = hash_parameters(); + if (gParams.log) { + show_phase("loading"); + process_log(gParams.log); + } else if (gParams.logurl) { + show_phase("loading"); + var req = new XMLHttpRequest(); + req.onreadystatechange = function() { + if (req.readyState === 4) { + process_log(req.responseText); + } + }; + req.open('GET', gParams.logurl, true); + req.send(); + } + window.addEventListener('keypress', handle_keyboard_shortcut); + window.addEventListener('keydown', handle_keydown); + ID("image1").addEventListener('error', image_load_error); + ID("image2").addEventListener('error', image_load_error); +} + +function image_load_error(e) { + e.target.setAttributeNS(XLINK_NS, "xlink:href", IMAGE_NOT_AVAILABLE); +} + +function build_mag() { + var mag = ID("mag"); + + var r = document.createElementNS(SVG_NS, "rect"); + r.setAttribute("x", gMagZoom * -gMagWidth / 2); + r.setAttribute("y", gMagZoom * -gMagHeight / 2); + r.setAttribute("width", gMagZoom * gMagWidth); + r.setAttribute("height", gMagZoom * gMagHeight); + mag.appendChild(r); + + mag.setAttribute("transform", "translate(" + (gMagZoom * (gMagWidth / 2) + 1) + "," + (gMagZoom * (gMagHeight / 2) + 1) + ")"); + + for (var x = 0; x < gMagWidth; x++) { + gMagPixPaths[x] = []; + for (var y = 0; y < gMagHeight; y++) { + var p1 = document.createElementNS(SVG_NS, "path"); + p1.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "h" + -gMagZoom + "v" + gMagZoom); + p1.setAttribute("stroke", "black"); + p1.setAttribute("stroke-width", "1px"); + p1.setAttribute("fill", "#aaa"); + + var p2 = document.createElementNS(SVG_NS, "path"); + p2.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "v" + gMagZoom + "h" + -gMagZoom); + p2.setAttribute("stroke", "black"); + p2.setAttribute("stroke-width", "1px"); + p2.setAttribute("fill", "#888"); + + mag.appendChild(p1); + mag.appendChild(p2); + gMagPixPaths[x][y] = [p1, p2]; + } + } + + var flashedOn = false; + setInterval(function() { + flashedOn = !flashedOn; + flash_pixels(flashedOn); + }, 500); +} + +function show_phase(phaseid) { + for (var i in gPhases) { + var phase = gPhases[i]; + phase.style.display = (phase.id == phaseid) ? "" : "none"; + } + + if (phase == "viewer") + ID("images").style.display = "none"; +} + +function fileentry_changed() { + show_phase("loading"); + var input = ID("fileentry"); + var files = input.files; + if (files.length > 0) { + // Only handle the first file; don't handle multiple selection. + // The parts of the log we care about are ASCII-only. Since we + // can ignore lines we don't care about, best to read in as + // iso-8859-1, which guarantees we don't get decoding errors. + var fileReader = new FileReader(); + fileReader.onload = function(e) { + var log = null; + + log = e.target.result; + + if (log) + process_log(log); + else + show_phase("entry"); + } + fileReader.readAsText(files[0], "iso-8859-1"); + } + // So the user can process the same filename again (after + // overwriting the log), clear the value on the form input so we + // will always get an onchange event. + input.value = ""; +} + +function log_pasted() { + show_phase("loading"); + var entry = ID("logentry"); + var log = entry.value; + entry.value = ""; + process_log(log); +} + +var gTestItems; + +// This function is not used in production code, but can be invoked manually +// from the devtools console in order to test changes to the parsing regexes +// in process_log. +function test_parsing() { + // Note that the logs in these testcases have been manually edited to strip + // out stuff for brevity. + var testcases = [ + { "name": "empty log", + "log": "", + "expected": { "pass": 0, "unexpected": 0, "random": 0, "skip": 0 }, + "expected_images": 0, + }, + { "name": "android log", + "log": `[task 2018-12-28T10:36:45.718Z] 10:36:45 INFO - REFTEST TEST-START | a == b +[task 2018-12-28T10:36:45.719Z] 10:36:45 INFO - REFTEST TEST-LOAD | a | 78 / 275 (28%) +[task 2018-12-28T10:36:56.138Z] 10:36:56 INFO - REFTEST TEST-LOAD | b | 78 / 275 (28%) +[task 2018-12-28T10:37:06.559Z] 10:37:06 INFO - REFTEST TEST-UNEXPECTED-FAIL | a == b | image comparison, max difference: 255, number of differing pixels: 5950 +[task 2018-12-28T10:37:06.568Z] 10:37:06 INFO - REFTEST IMAGE 1 (TEST): data:image/png;base64, +[task 2018-12-28T10:37:06.577Z] 10:37:06 INFO - REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, +[task 2018-12-28T10:37:06.577Z] 10:37:06 INFO - REFTEST INFO | Saved log: stuff trimmed here +[task 2018-12-28T10:37:06.582Z] 10:37:06 INFO - REFTEST TEST-END | a == b +[task 2018-12-28T10:37:06.583Z] 10:37:06 INFO - REFTEST TEST-START | a2 == b2 +[task 2018-12-28T10:37:06.583Z] 10:37:06 INFO - REFTEST TEST-LOAD | a2 | 79 / 275 (28%) +[task 2018-12-28T10:37:06.584Z] 10:37:06 INFO - REFTEST TEST-LOAD | b2 | 79 / 275 (28%) +[task 2018-12-28T10:37:16.982Z] 10:37:16 INFO - REFTEST TEST-PASS | a2 == b2 | image comparison, max difference: 0, number of differing pixels: 0 +[task 2018-12-28T10:37:16.982Z] 10:37:16 INFO - REFTEST TEST-END | a2 == b2`, + "expected": { "pass": 1, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + { "name": "local reftest run (Linux)", + "log": `REFTEST TEST-START | file:///a == file:///b +REFTEST TEST-LOAD | file:///a | 73 / 86 (84%) +REFTEST TEST-LOAD | file:///b | 73 / 86 (84%) +REFTEST TEST-PASS | file:///a == file:///b | image comparison, max difference: 0, number of differing pixels: 0 +REFTEST TEST-END | file:///a == file:///b`, + "expected": { "pass": 1, "unexpected": 0, "random": 0, "skip": 0 }, + "expected_images": 0, + }, + { "name": "wpt reftests (Linux automation)", + "log": `16:50:43 INFO - TEST-START | /a +16:50:43 INFO - PID 4276 | 1548694243694 Marionette INFO Testing http://web-platform.test:8000/a == http://web-platform.test:8000/b +16:50:43 INFO - PID 4276 | 1548694243963 Marionette INFO No differences allowed +16:50:44 INFO - TEST-PASS | /a | took 370ms +16:50:44 INFO - TEST-START | /a2 +16:50:44 INFO - PID 4276 | 1548694244066 Marionette INFO Testing http://web-platform.test:8000/a2 == http://web-platform.test:8000/b2 +16:50:44 INFO - PID 4276 | 1548694244792 Marionette INFO No differences allowed +16:50:44 INFO - PID 4276 | 1548694244792 Marionette INFO Found 28 pixels different, maximum difference per channel 14 +16:50:44 INFO - TEST-UNEXPECTED-FAIL | /a2 | Testing http://web-platform.test:8000/a2 == http://web-platform.test:8000/b2 +16:50:44 INFO - REFTEST IMAGE 1 (TEST): data:image/png;base64, +16:50:44 INFO - REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, +16:50:44 INFO - TEST-INFO took 840ms`, + "expected": { "pass": 1, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + { "name": "windows log", + "log": `12:17:14 INFO - REFTEST TEST-START | a == b +12:17:14 INFO - REFTEST TEST-LOAD | a | 1603 / 2053 (78%) +12:17:14 INFO - REFTEST TEST-LOAD | b | 1603 / 2053 (78%) +12:17:14 INFO - REFTEST TEST-PASS(EXPECTED RANDOM) | a == b | image comparison, max difference: 0, number of differing pixels: 0 +12:17:14 INFO - REFTEST TEST-END | a == b +12:17:14 INFO - REFTEST TEST-START | a2 == b2 +12:17:14 INFO - REFTEST TEST-LOAD | a2 | 1604 / 2053 (78%) +12:17:14 INFO - REFTEST TEST-LOAD | b2 | 1604 / 2053 (78%) +12:17:14 INFO - REFTEST TEST-UNEXPECTED-FAIL | a2 == b2 | image comparison, max difference: 255, number of differing pixels: 9976 +12:17:14 INFO - REFTEST IMAGE 1 (TEST): data:image/png;base64, +12:17:14 INFO - REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, +12:17:14 INFO - REFTEST INFO | Saved log: stuff trimmed here +12:17:14 INFO - REFTEST TEST-END | a2 == b2 +12:01:09 INFO - REFTEST TEST-START | a3 == b3 +12:01:09 INFO - REFTEST TEST-LOAD | a3 | 66 / 189 (34%) +12:01:09 INFO - REFTEST TEST-LOAD | b3 | 66 / 189 (34%) +12:01:09 INFO - REFTEST TEST-KNOWN-FAIL | a3 == b3 | image comparison, max difference: 255, number of differing pixels: 9654 +12:01:09 INFO - REFTEST TEST-END | a3 == b3`, + "expected": { "pass": 1, "unexpected": 1, "random": 1, "skip": 0 }, + "expected_images": 2, + }, + { "name": "webrender wrench log (windows)", + "log": `[task 2018-12-29T04:29:48.800Z] REFTEST a == b +[task 2018-12-29T04:29:48.984Z] REFTEST a2 == b2 +[task 2018-12-29T04:29:49.053Z] REFTEST TEST-UNEXPECTED-FAIL | a2 == b2 | image comparison, max difference: 255, number of differing pixels: 3128 +[task 2018-12-29T04:29:49.053Z] REFTEST IMAGE 1 (TEST): data:image/png; +[task 2018-12-29T04:29:49.053Z] REFTEST IMAGE 2 (REFERENCE): data:image/png; +[task 2018-12-29T04:29:49.053Z] REFTEST TEST-END | a2 == b2`, + "expected": { "pass": 0, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + { "name": "wpt reftests (Linux local; Bug 1530008)", + "log": `SUITE-START | Running 1 tests +TEST-START | /css/css-backgrounds/border-image-6.html +TEST-UNEXPECTED-FAIL | /css/css-backgrounds/border-image-6.html | Testing http://web-platform.test:8000/css/css-backgrounds/border-image-6.html == http://web-platform.test:8000/css/css-backgrounds/border-image-6-ref.html +REFTEST IMAGE 1 (TEST): data:image/png;base64, +REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, +TEST-INFO took 425ms +SUITE-END | took 2s`, + "expected": { "pass": 0, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + { "name": "wpt reftests (taskcluster log from macOS CI)", + "log": `[task 2020-06-26T01:35:29.065Z] 01:35:29 INFO - TEST-START | /html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html +[task 2020-06-26T01:35:29.065Z] 01:35:29 INFO - PID 1353 | 1593135329040 Marionette INFO Testing http://web-platform.test:8000/html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html == http://web-platform.test:8000/html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values-ref.html +[task 2020-06-26T01:35:29.673Z] 01:35:29 INFO - PID 1353 | 1593135329633 Marionette INFO No differences allowed +[task 2020-06-26T01:35:29.726Z] 01:35:29 INFO - TEST-KNOWN-INTERMITTENT-FAIL | /html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html | took 649ms +[task 2020-06-26T01:35:29.726Z] 01:35:29 INFO - REFTEST IMAGE 1 (TEST): data:image/png; +[task 2020-06-26T01:35:29.726Z] 01:35:29 INFO - REFTEST IMAGE 2 (REFERENCE): data:image/png;`, + "expected": { "pass": 0, "unexpected": 0, "random": 1, "skip": 0 }, + "expected_images": 2, + }, + { "name": "wpt reftests (taskcluster log from Windows CI)", + "log": `[task 2020-06-26T01:41:19.205Z] 01:41:19 INFO - TEST-START | /html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html +[task 2020-06-26T01:41:19.214Z] 01:41:19 INFO - PID 5920 | 1593135679202 Marionette WARN [24] http://web-platform.test:8000/css/WOFF2/metadatadisplay-schema-license-022-ref.xht overflows viewport (width: 783, height: 731) +[task 2020-06-26T01:41:19.214Z] 01:41:19 INFO - PID 9692 | 1593135679208 Marionette INFO Testing http://web-platform.test:8000/html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html == http://web-platform.test:8000/html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values-ref.html +[task 2020-06-26T01:41:19.638Z] 01:41:19 INFO - PID 9692 | 1593135679627 Marionette INFO No differences allowed +[task 2020-06-26T01:41:19.688Z] 01:41:19 INFO - TEST-KNOWN-INTERMITTENT-PASS | /html/rendering/non-replaced-elements/the-page/iframe-scrolling-attribute-values.html | took 474ms +[task 2020-06-26T01:41:19.688Z] 01:41:19 INFO - REFTEST IMAGE 1 (TEST): data:image/png; +[task 2020-06-26T01:41:19.689Z] 01:41:19 INFO - REFTEST IMAGE 2 (REFERENCE): data:image/png;`, + "expected": { "pass": 1, "unexpected": 0, "random": 1, "skip": 0 }, + "expected_images": 2, + }, + { "name": "local reftest run with timestamps (Linux; Bug 1167712)", + "log": ` 0:05.21 REFTEST TEST-START | a + 0:05.21 REFTEST REFTEST TEST-LOAD | a | 0 / 1 (0%) + 0:05.27 REFTEST REFTEST TEST-LOAD | b | 0 / 1 (0%) + 0:05.66 REFTEST TEST-UNEXPECTED-FAIL | a | image comparison (==), max difference: 106, number of differing pixels: 800 + 0:05.67 REFTEST REFTEST IMAGE 1 (TEST): data:image/png;base64, + 0:05.67 REFTEST REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, + 0:05.73 REFTEST REFTEST TEST-END | a`, + "expected": { "pass": 0, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + { "name": "reftest run with whitespace compressed (Treeherder; Bug 1084322)", + "log": ` REFTEST TEST-START | a +REFTEST TEST-LOAD | a | 0 / 1 (0%) +REFTEST TEST-LOAD | b | 0 / 1 (0%) +REFTEST TEST-UNEXPECTED-FAIL | a | image comparison (==), max difference: 106, number of differing pixels: 800 +REFTEST REFTEST IMAGE 1 (TEST): data:image/png;base64, +REFTEST REFTEST IMAGE 2 (REFERENCE): data:image/png;base64, +REFTEST REFTEST TEST-END | a`, + "expected": { "pass": 0, "unexpected": 1, "random": 0, "skip": 0 }, + "expected_images": 2, + }, + ]; + + var current_test = 0; + + // Override the build_viewer function invoked at the end of process_log to + // actually just check the results of parsing. + build_viewer = function() { + var expected = testcases[current_test].expected; + var expected_images = testcases[current_test].expected_images; + for (var result of gTestItems) { + for (let type in expected) { // type is "pass", "unexpected" etc. + if (result[type]) { + expected[type]--; + } + } + } + var failed = false; + for (let type in expected) { + if (expected[type] != 0) { + console.log(`Failure: for testcase ${testcases[current_test].name} got ${expected[type]} fewer ${type} results than expected!`); + failed = true; + } + } + + let total_images = 0; + for (var result of gTestItems) { + total_images += result.images.length; + } + if (total_images !== expected_images) { + console.log(`Failure: for testcase ${testcases[current_test].name} got ${total_images} images, expected ${expected_images}`); + failed = true; + } + + if (!failed) { + console.log(`Success for testcase ${testcases[current_test].name}`); + } + }; + + while (current_test < testcases.length) { + process_log(testcases[current_test].log); + current_test++; + } +} + +function process_log(contents) { + var lines = contents.split(/[\r\n]+/); + gTestItems = []; + for (var j in lines) { + + // !!!!!! + // When making any changes to this code, please add a test to the + // test_parsing function above, and ensure all existing tests pass. + // !!!!!! + + var line = lines[j]; + // Ignore duplicated output in logcat. + if (line.match(/I\/Gecko.*?REFTEST/)) + continue; + var match = line.match(/^.*?(?:REFTEST\s+)+(.*)$/); + if (!match) { + // WPT reftests don't always have the "REFTEST" prefix but do have + // mozharness prefixing. Trying to match both prefixes optionally with a + // single regex either makes an unreadable mess or matches everything so + // we do them separately. + match = line.match(/^(?:.*? (?:INFO|ERROR) -\s+)(.*)$/); + } + if (match) + line = match[1]; + match = line.match(/^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-FAIL|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO|TEST-KNOWN-INTERMITTENT-FAIL|TEST-KNOWN-INTERMITTENT-PASS)(\(EXPECTED RANDOM\)|) \| ([^\|]+)(?: \|(.*)|$)/); + if (match) { + var state = match[1]; + var random = match[2]; + var url = match[3]; + var extra = match[4]; + gTestItems.push( + { + pass: !state.match(/DEBUG-INFO$|FAIL$/), + // only one of the following three should ever be true + unexpected: !!state.match(/^TEST-UNEXPECTED/), + random: (random == "(EXPECTED RANDOM)" || state == "TEST-KNOWN-INTERMITTENT-FAIL" || state == "TEST-KNOWN-INTERMITTENT-PASS"), + skip: (extra == " (SKIP)"), + url: url, + images: [], + imageLabels: [] + }); + continue; + } + match = line.match(/^IMAGE([^:]*): (data:.*)$/); + if (match) { + var item = gTestItems[gTestItems.length - 1]; + item.images.push(match[2]); + item.imageLabels.push(match[1]); + } + } + + build_viewer(); +} + +function build_viewer() { + if (gTestItems.length == 0) { + show_phase("entry"); + return; + } + + var cell = ID("itemlist"); + while (cell.childNodes.length > 0) + cell.removeChild(cell.childNodes[cell.childNodes.length - 1]); + + var table = document.createElement("table"); + var tbody = document.createElement("tbody"); + table.appendChild(tbody); + + for (var i in gTestItems) { + var item = gTestItems[i]; + + // optional url filter for only showing unexpected results + if (parseInt(gParams.only_show_unexpected) && !item.unexpected) + continue; + + // XXX regardless skip expected pass items until we have filtering UI + if (item.pass && !item.unexpected) + continue; + + var tr = document.createElement("tr"); + var rowclass = item.pass ? "pass" : "fail"; + var td; + var text; + + td = document.createElement("td"); + text = ""; + if (item.unexpected) { text += "!"; rowclass += " unexpected"; } + if (item.random) { text += "R"; rowclass += " random"; } + if (item.skip) { text += "S"; rowclass += " skip"; } + td.appendChild(document.createTextNode(text)); + tr.appendChild(td); + + td = document.createElement("td"); + td.id = "item" + i; + td.className = "url"; + // Only display part of URL after "/mozilla/". + var match = item.url.match(/\/mozilla\/(.*)/); + text = document.createTextNode(match ? match[1] : item.url); + if (item.images.length > 0) { + var a = document.createElement("a"); + a.href = "javascript:show_images(" + i + ")"; + a.appendChild(text); + td.appendChild(a); + } else { + td.appendChild(text); + } + tr.appendChild(td); + + tbody.appendChild(tr); + } + + cell.appendChild(table); + + show_phase("viewer"); +} + +function get_image_data(src, whenReady) { + var img = new Image(); + img.onload = function() { + var canvas = document.createElement("canvas"); + canvas.width = img.naturalWidth; + canvas.height = img.naturalHeight; + + var ctx = canvas.getContext("2d"); + ctx.drawImage(img, 0, 0); + + whenReady(ctx.getImageData(0, 0, img.naturalWidth, img.naturalHeight)); + }; + img.src = src; +} + +function sync_svg_size(imageData) { + // We need the size of the 'svg' and its 'image' elements to match the size + // of the ImageData objects that we're going to read pixels from or else our + // magnify() function will be very broken. + ID("svg").setAttribute("width", imageData.width); + ID("svg").setAttribute("height", imageData.height); +} + +function show_images(i) { + var item = gTestItems[i]; + var cell = ID("images"); + + // Remove activeitem class from any existing elements + var activeItems = document.querySelectorAll(".activeitem"); + for (var activeItemIdx = activeItems.length; activeItemIdx-- != 0;) { + activeItems[activeItemIdx].classList.remove("activeitem"); + } + + ID("item" + i).classList.add("activeitem"); + ID("image1").style.display = ""; + ID("image2").style.display = "none"; + ID("diffrect").style.display = "none"; + ID("imgcontrols").reset(); + ID("pixel-differences").textContent = ""; + + ID("image1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]); + // Making the href be #image1 doesn't seem to work + ID("feimage1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]); + if (item.images.length == 1) { + ID("imgcontrols").style.display = "none"; + } else { + ID("imgcontrols").style.display = ""; + + ID("image2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]); + // Making the href be #image2 doesn't seem to work + ID("feimage2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]); + + ID("label1").textContent = 'Image ' + item.imageLabels[0]; + ID("label2").textContent = 'Image ' + item.imageLabels[1]; + } + + cell.style.display = ""; + + let loaded = [false, false]; + + function images_loaded(id) { + loaded[id] = true; + if (loaded.every(x => x)) { + update_pixel_difference_text() + } + } + + get_image_data(item.images[0], function(data) { gImage1Data = data; sync_svg_size(gImage1Data); images_loaded(0)}); + get_image_data(item.images[1], function(data) { gImage2Data = data; images_loaded(1)}); + +} + +function update_pixel_difference_text() { + let differenceText; + if (gImage1Data.height !== gImage2Data.height || + gImage1Data.width !== gImage2Data.width) { + differenceText = "Images are different sizes" + } else { + let [numPixels, maxPerChannel] = get_pixel_differences(); + if (!numPixels) { + differenceText = "Images are identical"; + } else { + differenceText = `Maximum difference per channel ${maxPerChannel}, ${numPixels} pixels differ`; + } + } + // Disable this for now, because per bug 1633504, the numbers may be + // inaccurate and dependent on the browser's configuration. + // ID("pixel-differences").textContent = differenceText; +} + +function get_pixel_differences() { + let numPixels = 0; + let maxPerChannel = 0; + for (var i=0; i<gImage1Data.data.length; i+=4) { + let r1 = gImage1Data.data[i]; + let r2 = gImage2Data.data[i]; + let g1 = gImage1Data.data[i+1]; + let g2 = gImage2Data.data[i+1]; + let b1 = gImage1Data.data[i+2]; + let b2 = gImage2Data.data[i+2]; + // Ignore alpha. + if (r1 == r2 && g1 == g2 && b1 == b2) { + continue; + } + numPixels += 1; + let maxDiff = Math.max(Math.abs(r1-r2), + Math.abs(g1-g2), + Math.abs(b1-b2)); + if (maxDiff > maxPerChannel) { + maxPerChannel = maxDiff + } + } + return [numPixels, maxPerChannel]; +} + +function show_image(i) { + if (i == 1) { + ID("image1").style.display = ""; + ID("image2").style.display = "none"; + } else { + ID("image1").style.display = "none"; + ID("image2").style.display = ""; + } +} + +function handle_keyboard_shortcut(event) { + switch (event.charCode) { + case 49: // "1" key + document.getElementById("radio1").checked = true; + show_image(1); + break; + case 50: // "2" key + document.getElementById("radio2").checked = true; + show_image(2); + break; + case 100: // "d" key + document.getElementById("differences").click(); + break; + case 112: // "p" key + shift_images(-1); + break; + case 110: // "n" key + shift_images(1); + break; + } +} + +function handle_keydown(event) { + switch (event.keyCode) { + case 37: // left arrow + move_pixel(-1, 0); + break; + case 38: // up arrow + move_pixel(0,-1); + break; + case 39: // right arrow + move_pixel(1, 0); + break; + case 40: // down arrow + move_pixel(0, 1); + break; + } +} + +function shift_images(dir) { + var activeItem = document.querySelector(".activeitem"); + if (!activeItem) { + return; + } + for (var elm = activeItem; elm; elm = elm.parentElement) { + if (elm.tagName != "tr") { + continue; + } + elm = dir > 0 ? elm.nextElementSibling : elm.previousElementSibling; + if (elm) { + elm.getElementsByTagName("a")[0].click(); + } + return; + } +} + +function show_differences(cb) { + ID("diffrect").style.display = cb.checked ? "" : "none"; +} + +function flash_pixels(on) { + var stroke = on ? "red" : "black"; + var strokeWidth = on ? "2px" : "1px"; + for (var i = 0; i < gFlashingPixels.length; i++) { + gFlashingPixels[i].setAttribute("stroke", stroke); + gFlashingPixels[i].setAttribute("stroke-width", strokeWidth); + } +} + +function cursor_point(evt) { + var m = evt.target.getScreenCTM().inverse(); + var p = ID("svg").createSVGPoint(); + p.x = evt.clientX; + p.y = evt.clientY; + p = p.matrixTransform(m); + return { x: Math.floor(p.x), y: Math.floor(p.y) }; +} + +function hex2(i) { + return (i < 16 ? "0" : "") + i.toString(16); +} + +function canvas_pixel_as_hex(data, x, y) { + var offset = (y * data.width + x) * 4; + var r = data.data[offset]; + var g = data.data[offset + 1]; + var b = data.data[offset + 2]; + return "#" + hex2(r) + hex2(g) + hex2(b); +} + +function hex_as_rgb(hex) { + return "rgb(" + [parseInt(hex.substring(1, 3), 16), parseInt(hex.substring(3, 5), 16), parseInt(hex.substring(5, 7), 16)] + ")"; +} + +function magnify(evt) { + var { x: x, y: y } = cursor_point(evt); + do_magnify(x, y); +} + +function do_magnify(x, y) { + var centerPixelColor1, centerPixelColor2; + + var dx_lo = -Math.floor(gMagWidth / 2); + var dx_hi = Math.floor(gMagWidth / 2); + var dy_lo = -Math.floor(gMagHeight / 2); + var dy_hi = Math.floor(gMagHeight / 2); + + flash_pixels(false); + gFlashingPixels = []; + for (var j = dy_lo; j <= dy_hi; j++) { + for (var i = dx_lo; i <= dx_hi; i++) { + var px = x + i; + var py = y + j; + var p1 = gMagPixPaths[i + dx_hi][j + dy_hi][0]; + var p2 = gMagPixPaths[i + dx_hi][j + dy_hi][1]; + // Here we just use the dimensions of gImage1Data since we expect test + // and reference to have the same dimensions. + if (px < 0 || py < 0 || px >= gImage1Data.width || py >= gImage1Data.height) { + p1.setAttribute("fill", "#aaa"); + p2.setAttribute("fill", "#888"); + } else { + var color1 = canvas_pixel_as_hex(gImage1Data, x + i, y + j); + var color2 = canvas_pixel_as_hex(gImage2Data, x + i, y + j); + p1.setAttribute("fill", color1); + p2.setAttribute("fill", color2); + if (color1 != color2) { + gFlashingPixels.push(p1, p2); + p1.parentNode.appendChild(p1); + p2.parentNode.appendChild(p2); + } + if (i == 0 && j == 0) { + centerPixelColor1 = color1; + centerPixelColor2 = color2; + } + } + } + } + flash_pixels(true); + show_pixelinfo(x, y, centerPixelColor1, hex_as_rgb(centerPixelColor1), centerPixelColor2, hex_as_rgb(centerPixelColor2)); +} + +function show_pixelinfo(x, y, pix1rgb, pix1hex, pix2rgb, pix2hex) { + var pixelinfo = ID("pixelinfo"); + ID("coords").textContent = [x, y]; + ID("pix1hex").textContent = pix1hex; + ID("pix1rgb").textContent = pix1rgb; + ID("pix2hex").textContent = pix2hex; + ID("pix2rgb").textContent = pix2rgb; +} + +function move_pixel(deltax, deltay) { + coords = ID("coords").textContent.split(','); + x = parseInt(coords[0]); + y = parseInt(coords[1]); + if (isNaN(x) || isNaN(y)) { + return; + } + x = x + deltax; + y = y + deltay; + if (x >= 0 && y >= 0 && x < gImage1Data.width && y < gImage1Data.height) { + do_magnify(x, y); + } +} + + ]]></script> + +</head> +<body onload="load()"> + +<div id="entry"> + +<h1>Reftest analyzer: load reftest log</h1> + +<p>Either paste your log into this textarea:<br /> +<textarea cols="80" rows="10" id="logentry"/><br/> +<input type="button" value="Process pasted log" onclick="log_pasted()" /></p> + +<p>... or load it from a file:<br/> +<input type="file" id="fileentry" onchange="fileentry_changed()" /> +</p> +</div> + +<div id="loading" style="display:none">Loading log...</div> + +<div id="viewer" style="display:none"> + <div id="pixelarea"> + <div id="pixelinfo"> + <table> + <tbody> + <tr><th>Pixel at:</th><td colspan="2" id="coords"/></tr> + <tr><th>Image 1:</th><td id="pix1rgb"></td><td id="pix1hex"></td></tr> + <tr><th>Image 2:</th><td id="pix2rgb"></td><td id="pix2hex"></td></tr> + </tbody> + </table> + <div> + <div id="pixelhint">★ + <div> + <p>Move the mouse over the reftest image on the right to show + magnified pixels on the left. The color information above is for + the pixel centered in the magnified view.</p> + <p>Image 1 is shown in the upper triangle of each pixel and Image 2 + is shown in the lower triangle.</p> + </div> + </div> + </div> + </div> + <div id="magnification"> + <svg xmlns="http://www.w3.org/2000/svg" width="84" height="84" shape-rendering="optimizeSpeed"> + <g id="mag"/> + </svg> + </div> + </div> + <div id="itemlist"></div> + <div id="images" style="display:none"> + <form id="imgcontrols"> + <input id="radio1" type="radio" name="which" value="0" onchange="show_image(1)" checked="checked" /><label id="label1" title="1" for="radio1">Image 1</label> + <input id="radio2" type="radio" name="which" value="1" onchange="show_image(2)" /><label id="label2" title="2" for="radio2">Image 2</label> + <label><input id="differences" type="checkbox" onchange="show_differences(this)" />Circle differences</label> + </form> + <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="800" height="1000" id="svg"> + <defs> + <!-- use sRGB to avoid loss of data --> + <filter id="showDifferences" x="0%" y="0%" width="100%" height="100%" + style="color-interpolation-filters: sRGB"> + <feImage id="feimage1" result="img1" xlink:href="#image1" /> + <feImage id="feimage2" result="img2" xlink:href="#image2" /> + <!-- inv1 and inv2 are the images with RGB inverted --> + <feComponentTransfer result="inv1" in="img1"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <feComponentTransfer result="inv2" in="img2"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <!-- w1 will have non-white pixels anywhere that img2 + is brighter than img1, and w2 for the reverse. + It would be nice not to have to go through these + intermediate states, but feComposite + type="arithmetic" can't transform the RGB channels + and leave the alpha channel untouched. --> + <feComposite result="w1" in="img1" in2="inv2" operator="arithmetic" k2="1" k3="1" /> + <feComposite result="w2" in="img2" in2="inv1" operator="arithmetic" k2="1" k3="1" /> + <!-- c1 will have non-black pixels anywhere that img2 + is brighter than img1, and c2 for the reverse --> + <feComponentTransfer result="c1" in="w1"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <feComponentTransfer result="c2" in="w2"> + <feFuncR type="linear" slope="-1" intercept="1" /> + <feFuncG type="linear" slope="-1" intercept="1" /> + <feFuncB type="linear" slope="-1" intercept="1" /> + </feComponentTransfer> + <!-- c will be nonblack (and fully on) for every pixel+component where there are differences --> + <feComposite result="c" in="c1" in2="c2" operator="arithmetic" k2="255" k3="255" /> + <!-- a will be opaque for every pixel with differences and transparent for all others --> + <feColorMatrix result="a" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0" /> + + <!-- a, dilated by 1 pixel --> + <feMorphology result="dila1" in="a" operator="dilate" radius="1" /> + <!-- a, dilated by 2 pixels --> + <feMorphology result="dila2" in="dila1" operator="dilate" radius="1" /> + + <!-- all the pixels in the 2-pixel dilation of a but not in the 1-pixel dilation, to highlight the diffs --> + <feComposite result="highlight" in="dila2" in2="dila1" operator="out" /> + + <feFlood result="red" flood-color="red" /> + <feComposite result="redhighlight" in="red" in2="highlight" operator="in" /> + <feFlood result="black" flood-color="black" flood-opacity="0.5" /> + <feMerge> + <feMergeNode in="black" /> + <feMergeNode in="redhighlight" /> + </feMerge> + </filter> + </defs> + <g onmousemove="magnify(evt)"> + <image x="0" y="0" width="100%" height="100%" id="image1" /> + <image x="0" y="0" width="100%" height="100%" id="image2" /> + </g> + <rect id="diffrect" filter="url(#showDifferences)" pointer-events="none" x="0" y="0" width="100%" height="100%" /> + </svg> + <div id="pixel-differences"></div> + </div> +</div> + +</body> +</html> diff --git a/layout/tools/reftest/reftest-content.js b/layout/tools/reftest/reftest-content.js new file mode 100644 index 0000000000..94e502d178 --- /dev/null +++ b/layout/tools/reftest/reftest-content.js @@ -0,0 +1,1668 @@ +/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- / +/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const XHTML_NS = "http://www.w3.org/1999/xhtml"; + +const DEBUG_CONTRACTID = "@mozilla.org/xpcom/debug;1"; +const PRINTSETTINGS_CONTRACTID = "@mozilla.org/gfx/printsettings-service;1"; +const ENVIRONMENT_CONTRACTID = "@mozilla.org/process/environment;1"; +const NS_OBSERVER_SERVICE_CONTRACTID = "@mozilla.org/observer-service;1"; +const NS_GFXINFO_CONTRACTID = "@mozilla.org/gfx/info;1"; +const IO_SERVICE_CONTRACTID = "@mozilla.org/network/io-service;1" + +// "<!--CLEAR-->" +const BLANK_URL_FOR_CLEARING = "data:text/html;charset=UTF-8,%3C%21%2D%2DCLEAR%2D%2D%3E"; + +Cu.import("resource://gre/modules/Timer.jsm"); +Cu.import("resource://reftest/AsyncSpellCheckTestHelper.jsm"); +Cu.import("resource://gre/modules/Services.jsm"); + +// This will load chrome Custom Elements inside chrome documents: +ChromeUtils.import("resource://gre/modules/CustomElementsListener.jsm", null); + +var gBrowserIsRemote; +var gIsWebRenderEnabled; +var gHaveCanvasSnapshot = false; +// Plugin layers can be updated asynchronously, so to make sure that all +// layer surfaces have the right content, we need to listen for explicit +// "MozPaintWait" and "MozPaintWaitFinished" events that signal when it's OK +// to take snapshots. We cannot take a snapshot while the number of +// "MozPaintWait" events fired exceeds the number of "MozPaintWaitFinished" +// events fired. We count the number of such excess events here. When +// the counter reaches zero we call gExplicitPendingPaintsCompleteHook. +var gExplicitPendingPaintCount = 0; +var gExplicitPendingPaintsCompleteHook; +var gCurrentURL; +var gCurrentURLRecordResults; +var gCurrentURLTargetType; +var gCurrentTestType; +var gTimeoutHook = null; +var gFailureTimeout = null; +var gFailureReason; +var gAssertionCount = 0; + +var gDebug; +var gVerbose = false; + +var gCurrentTestStartTime; +var gClearingForAssertionCheck = false; + +const TYPE_LOAD = 'load'; // test without a reference (just test that it does + // not assert, crash, hang, or leak) +const TYPE_SCRIPT = 'script'; // test contains individual test results +const TYPE_PRINT = 'print'; // test and reference will be printed to PDF's and + // compared structurally + +// keep this in sync with globals.jsm +const URL_TARGET_TYPE_TEST = 0; // first url +const URL_TARGET_TYPE_REFERENCE = 1; // second url, if any + +function webNavigation() { + return docShell.QueryInterface(Ci.nsIWebNavigation); +} + +function webProgress() { + return docShell.QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIWebProgress); +} + +function windowUtilsForWindow(w) { + return w.windowUtils; +} + +function windowUtils() { + return windowUtilsForWindow(content); +} + +function IDForEventTarget(event) +{ + try { + return "'" + event.target.getAttribute('id') + "'"; + } catch (ex) { + return "<unknown>"; + } +} + +function PaintWaitListener(event) +{ + LogInfo("MozPaintWait received for ID " + IDForEventTarget(event)); + gExplicitPendingPaintCount++; +} + +function PaintWaitFinishedListener(event) +{ + LogInfo("MozPaintWaitFinished received for ID " + IDForEventTarget(event)); + gExplicitPendingPaintCount--; + if (gExplicitPendingPaintCount < 0) { + LogWarning("Underrun in gExplicitPendingPaintCount\n"); + gExplicitPendingPaintCount = 0; + } + if (gExplicitPendingPaintCount == 0 && + gExplicitPendingPaintsCompleteHook) { + gExplicitPendingPaintsCompleteHook(); + } +} + +var progressListener = { + onStateChange(webprogress, request, flags, status) { + let uri; + try { + request.QueryInterface(Ci.nsIChannel); + uri = request.originalURI.spec; + } catch (ex) { + return; + } + const WPL = Ci.nsIWebProgressListener; + const endFlags = WPL.STATE_STOP | WPL.STATE_IS_WINDOW | WPL.STATE_IS_NETWORK; + if ((flags & endFlags) == endFlags) { + OnDocumentLoad(uri); + } + }, + QueryInterface: ChromeUtils.generateQI([ + "nsIWebProgressListener", + "nsISupportsWeakReference", + ]), +}; + +function OnInitialLoad() +{ + removeEventListener("load", OnInitialLoad, true); + + gDebug = Cc[DEBUG_CONTRACTID].getService(Ci.nsIDebug2); + if (gDebug.isDebugBuild) { + gAssertionCount = gDebug.assertionCount; + } + var env = Cc[ENVIRONMENT_CONTRACTID].getService(Ci.nsIEnvironment); + gVerbose = !!env.get("MOZ_REFTEST_VERBOSE"); + + RegisterMessageListeners(); + + var initInfo = SendContentReady(); + gBrowserIsRemote = initInfo.remote; + + webProgress().addProgressListener(progressListener, Ci.nsIWebProgress.NOTIFY_STATE_WINDOW); + + addEventListener("MozPaintWait", PaintWaitListener, true); + addEventListener("MozPaintWaitFinished", PaintWaitFinishedListener, true); + + LogInfo("Using browser remote="+ gBrowserIsRemote +"\n"); +} + +function SetFailureTimeout(cb, timeout, uri) +{ + var targetTime = Date.now() + timeout; + + var wrapper = function() { + // Timeouts can fire prematurely in some cases (e.g. in chaos mode). If this + // happens, set another timeout for the remaining time. + let remainingMs = targetTime - Date.now(); + if (remainingMs > 0) { + SetFailureTimeout(cb, remainingMs); + } else { + cb(); + } + } + + // Once OnDocumentLoad is called to handle the 'load' event it will update + // this error message to reflect what stage of the processing it has reached + // as it advances to each stage in turn. + gFailureReason = "timed out after " + timeout + + " ms waiting for 'load' event for " + uri; + gFailureTimeout = setTimeout(wrapper, timeout); +} + +function StartTestURI(type, uri, uriTargetType, timeout) +{ + // The GC is only able to clean up compartments after the CC runs. Since + // the JS ref tests disable the normal browser chrome and do not otherwise + // create substatial DOM garbage, the CC tends not to run enough normally. + windowUtils().runNextCollectorTimer(); + + // Reset gExplicitPendingPaintCount in case there was a timeout or + // the count is out of sync for some other reason + if (gExplicitPendingPaintCount != 0) { + LogWarning("Resetting gExplicitPendingPaintCount to zero (currently " + + gExplicitPendingPaintCount + "\n"); + gExplicitPendingPaintCount = 0; + } + + gCurrentTestType = type; + gCurrentURL = uri; + gCurrentURLTargetType = uriTargetType; + gCurrentURLRecordResults = 0; + + gCurrentTestStartTime = Date.now(); + if (gFailureTimeout != null) { + SendException("program error managing timeouts\n"); + } + SetFailureTimeout(LoadFailed, timeout, uri); + + LoadURI(gCurrentURL); +} + +function setupTextZoom(contentRootElement) { + if (!contentRootElement || !contentRootElement.hasAttribute('reftest-text-zoom')) + return; + docShell.browsingContext.textZoom = + contentRootElement.getAttribute('reftest-text-zoom'); +} + +function setupFullZoom(contentRootElement) { + if (!contentRootElement || !contentRootElement.hasAttribute('reftest-zoom')) + return; + docShell.browsingContext.fullZoom = + contentRootElement.getAttribute('reftest-zoom'); +} + +function resetZoomAndTextZoom() { + docShell.browsingContext.fullZoom = 1.0; + docShell.browsingContext.textZoom = 1.0; +} + +function doPrintMode(contentRootElement) { + // use getAttribute because className works differently in HTML and SVG + if (contentRootElement && + contentRootElement.hasAttribute('class')) { + var classList = contentRootElement.getAttribute('class').split(/\s+/); + if (classList.includes("reftest-print")) { + SendException("reftest-print is obsolete, use reftest-paged instead"); + return; + } + return classList.includes("reftest-paged"); + } +} + +function setupPrintMode() { + var PSSVC = + Cc[PRINTSETTINGS_CONTRACTID].getService(Ci.nsIPrintSettingsService); + var ps = PSSVC.newPrintSettings; + ps.paperWidth = 5; + ps.paperHeight = 3; + + // Override any os-specific unwriteable margins + ps.unwriteableMarginTop = 0; + ps.unwriteableMarginLeft = 0; + ps.unwriteableMarginBottom = 0; + ps.unwriteableMarginRight = 0; + + ps.headerStrLeft = ""; + ps.headerStrCenter = ""; + ps.headerStrRight = ""; + ps.footerStrLeft = ""; + ps.footerStrCenter = ""; + ps.footerStrRight = ""; + + ps.printBGColors = true; + ps.printBGImages = true; + + docShell.contentViewer.setPageModeForTesting(/* aPageMode */ true, ps); +} + +// Message the parent process to ask it to print the current page to a PDF file. +function printToPdf() { + let currentDoc = content.document; + let isPrintSelection = false; + let printRange = ''; + + if (currentDoc) { + let contentRootElement = currentDoc.documentElement; + printRange = contentRootElement.getAttribute("reftest-print-range") || ''; + } + + if (printRange) { + if (printRange === 'selection') { + isPrintSelection = true; + } else if (!printRange.split(',').every(range => /^[1-9]\d*-[1-9]\d*$/.test(range))) { + SendException("invalid value for reftest-print-range"); + return; + } + } + + SendStartPrint(isPrintSelection, printRange); +} + +function attrOrDefault(element, attr, def) { + return element.hasAttribute(attr) ? Number(element.getAttribute(attr)) : def; +} + +function setupViewport(contentRootElement) { + if (!contentRootElement) { + return; + } + + var sw = attrOrDefault(contentRootElement, "reftest-scrollport-w", 0); + var sh = attrOrDefault(contentRootElement, "reftest-scrollport-h", 0); + if (sw !== 0 || sh !== 0) { + LogInfo("Setting viewport to <w=" + sw + ", h=" + sh + ">"); + windowUtils().setVisualViewportSize(sw, sh); + } + + var res = attrOrDefault(contentRootElement, "reftest-resolution", 1); + if (res !== 1) { + LogInfo("Setting resolution to " + res); + windowUtils().setResolutionAndScaleTo(res); + } + + // XXX support viewconfig when needed +} + + +function setupDisplayport(contentRootElement) { + let promise = content.windowGlobalChild.getActor("ReftestFission").SetupDisplayportRoot(); + return promise.then(function(result) { + for (let errorString of result.errorStrings) { + LogError(errorString); + } + for (let infoString of result.infoStrings) { + LogInfo(infoString); + } + }, + function(reason) { + LogError("SetupDisplayportRoot returned promise rejected: " + reason); + }); +} + +// Returns whether any offsets were updated +function setupAsyncScrollOffsets(options) { + let currentDoc = content.document; + let contentRootElement = currentDoc ? currentDoc.documentElement : null; + + if (!contentRootElement || !contentRootElement.hasAttribute("reftest-async-scroll")) { + return Promise.resolve(false); + } + + let allowFailure = options.allowFailure; + let promise = content.windowGlobalChild.getActor("ReftestFission").sendQuery("SetupAsyncScrollOffsets", {allowFailure}); + return promise.then(function(result) { + for (let errorString of result.errorStrings) { + LogError(errorString); + } + for (let infoString of result.infoStrings) { + LogInfo(infoString); + } + return result.updatedAny; + }, + function(reason) { + LogError("SetupAsyncScrollOffsets SendQuery to parent promise rejected: " + reason); + return false; + }); +} + +function setupAsyncZoom(options) { + var currentDoc = content.document; + var contentRootElement = currentDoc ? currentDoc.documentElement : null; + + if (!contentRootElement || !contentRootElement.hasAttribute('reftest-async-zoom')) + return false; + + var zoom = attrOrDefault(contentRootElement, "reftest-async-zoom", 1); + if (zoom != 1) { + try { + windowUtils().setAsyncZoom(contentRootElement, zoom); + return true; + } catch (e) { + if (!options.allowFailure) { + throw e; + } + } + } + return false; +} + + +function resetDisplayportAndViewport() { + // XXX currently the displayport configuration lives on the + // presshell and so is "reset" on nav when we get a new presshell. +} + +function shouldWaitForExplicitPaintWaiters() { + return gExplicitPendingPaintCount > 0; +} + +function shouldWaitForPendingPaints() { + // if gHaveCanvasSnapshot is false, we're not taking snapshots so + // there is no need to wait for pending paints to be flushed. + return gHaveCanvasSnapshot && windowUtils().isMozAfterPaintPending; +} + +function shouldWaitForReftestWaitRemoval(contentRootElement) { + // use getAttribute because className works differently in HTML and SVG + return contentRootElement && + contentRootElement.hasAttribute('class') && + contentRootElement.getAttribute('class').split(/\s+/) + .includes("reftest-wait"); +} + +function shouldSnapshotWholePage(contentRootElement) { + // use getAttribute because className works differently in HTML and SVG + return contentRootElement && + contentRootElement.hasAttribute('class') && + contentRootElement.getAttribute('class').split(/\s+/) + .includes("reftest-snapshot-all"); +} + +function shouldNotFlush(contentRootElement) { + // use getAttribute because className works differently in HTML and SVG + return contentRootElement && + contentRootElement.hasAttribute('class') && + contentRootElement.getAttribute('class').split(/\s+/) + .includes("reftest-no-flush"); +} + +function getNoPaintElements(contentRootElement) { + return contentRootElement.getElementsByClassName('reftest-no-paint'); +} +function getNoDisplayListElements(contentRootElement) { + return contentRootElement.getElementsByClassName('reftest-no-display-list'); +} +function getDisplayListElements(contentRootElement) { + return contentRootElement.getElementsByClassName('reftest-display-list'); +} + +function getOpaqueLayerElements(contentRootElement) { + return contentRootElement.getElementsByClassName('reftest-opaque-layer'); +} + +function getAssignedLayerMap(contentRootElement) { + var layerNameToElementsMap = {}; + var elements = contentRootElement.querySelectorAll('[reftest-assigned-layer]'); + for (var i = 0; i < elements.length; ++i) { + var element = elements[i]; + var layerName = element.getAttribute('reftest-assigned-layer'); + if (!(layerName in layerNameToElementsMap)) { + layerNameToElementsMap[layerName] = []; + } + layerNameToElementsMap[layerName].push(element); + } + return layerNameToElementsMap; +} + +const FlushMode = { + ALL: 0, + IGNORE_THROTTLED_ANIMATIONS: 1 +}; + +// Initial state. When the document has loaded and all MozAfterPaint events and +// all explicit paint waits are flushed, we can fire the MozReftestInvalidate +// event and move to the next state. +const STATE_WAITING_TO_FIRE_INVALIDATE_EVENT = 0; +// When reftest-wait has been removed from the root element, we can move to the +// next state. +const STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL = 1; +// When spell checking is done on all spell-checked elements, we can move to the +// next state. +const STATE_WAITING_FOR_SPELL_CHECKS = 2; +// When any pending compositor-side repaint requests have been flushed, we can +// move to the next state. +const STATE_WAITING_FOR_APZ_FLUSH = 3; +// When all MozAfterPaint events and all explicit paint waits are flushed, we're +// done and can move to the COMPLETED state. +const STATE_WAITING_TO_FINISH = 4; +const STATE_COMPLETED = 5; + +function FlushRendering(aFlushMode) { + let browsingContext = content.docShell.browsingContext; + let ignoreThrottledAnimations = (aFlushMode === FlushMode.IGNORE_THROTTLED_ANIMATIONS); + let promise = content.windowGlobalChild.getActor("ReftestFission").sendQuery("FlushRendering", {browsingContext, ignoreThrottledAnimations}); + return promise.then(function(result) { + for (let errorString of result.errorStrings) { + LogError(errorString); + } + for (let warningString of result.warningStrings) { + LogWarning(warningString); + } + for (let infoString of result.infoStrings) { + LogInfo(infoString); + } + }, function(reason) { + // We expect actors to go away causing sendQuery's to fail, so + // just note it. + LogInfo("FlushRendering sendQuery to parent rejected: " + reason); + }); +} + +function WaitForTestEnd(contentRootElement, inPrintMode, spellCheckedElements, forURL) { + // WaitForTestEnd works via the MakeProgress function below. It is responsible for + // moving through the states listed above and calling FlushRendering. We also listen + // for a number of events, the most important of which is the AfterPaintListener, + // which is responsible for updating the canvas after paints. In a fission world + // FlushRendering and updating the canvas must necessarily be async operations. + // During these async operations we want to wait for them to finish and we don't + // want to try to do anything else (what would we even want to do while only some of + // the processes involved have flushed layout or updated their layer trees?). So + // we call OperationInProgress whenever we are about to go back to the event loop + // during one of these calls, and OperationCompleted when it finishes. This prevents + // anything else from running while we wait and getting us into a confused state. We + // then record anything that happens while we are waiting to make sure that the + // right actions are triggered. The possible actions are basically calling + // MakeProgress from a setTimeout, and updating the canvas for an after paint event. + // The after paint listener just stashes the rects and we update them after a + // completed MakeProgress call. This is handled by + // HandlePendingTasksAfterMakeProgress, which also waits for any pending after paint + // events. The general sequence of events is: + // - MakeProgress + // - HandlePendingTasksAfterMakeProgress + // - wait for after paint event if one is pending + // - update canvas for after paint events we have received + // - MakeProgress + // etc + + function CheckForLivenessOfContentRootElement() { + if (contentRootElement && Cu.isDeadWrapper(contentRootElement)) { + contentRootElement = null; + } + } + + var setTimeoutCallMakeProgressWhenComplete = false; + + var operationInProgress = false; + function OperationInProgress() { + if (operationInProgress != false) { + LogWarning("Nesting atomic operations?"); + } + operationInProgress = true; + } + function OperationCompleted() { + if (operationInProgress != true) { + LogWarning("Mismatched OperationInProgress/OperationCompleted calls?"); + } + operationInProgress = false; + if (setTimeoutCallMakeProgressWhenComplete) { + setTimeoutCallMakeProgressWhenComplete = false; + setTimeout(CallMakeProgress, 0); + } + } + function AssertNoOperationInProgress() { + if (operationInProgress) { + LogWarning("AssertNoOperationInProgress but operationInProgress"); + } + } + + var updateCanvasPending = false; + var updateCanvasRects = []; + + var stopAfterPaintReceived = false; + var currentDoc = content.document; + var state = STATE_WAITING_TO_FIRE_INVALIDATE_EVENT; + + var setTimeoutMakeProgressPending = false; + + function CallSetTimeoutMakeProgress() { + if (setTimeoutMakeProgressPending) { + return; + } + setTimeoutMakeProgressPending = true; + setTimeout(CallMakeProgress, 0); + } + + // This should only ever be called from a timeout. + function CallMakeProgress() { + if (operationInProgress) { + setTimeoutCallMakeProgressWhenComplete = true; + return; + } + setTimeoutMakeProgressPending = false; + MakeProgress(); + } + + var waitingForAnAfterPaint = false; + + // Updates the canvas if there are pending updates for it. Checks if we + // need to call MakeProgress. + function HandlePendingTasksAfterMakeProgress() { + AssertNoOperationInProgress(); + + if ((state == STATE_WAITING_TO_FIRE_INVALIDATE_EVENT || state == STATE_WAITING_TO_FINISH) && + shouldWaitForPendingPaints()) { + LogInfo("HandlePendingTasksAfterMakeProgress waiting for a MozAfterPaint"); + // We are in a state where we wait for MozAfterPaint to clear and a + // MozAfterPaint event is pending, give it a chance to fire, but don't + // let anything else run. + waitingForAnAfterPaint = true; + OperationInProgress(); + return; + } + + if (updateCanvasPending) { + LogInfo("HandlePendingTasksAfterMakeProgress updating canvas"); + updateCanvasPending = false; + let rects = updateCanvasRects; + updateCanvasRects = []; + OperationInProgress(); + CheckForLivenessOfContentRootElement(); + let promise = SendUpdateCanvasForEvent(forURL, rects, contentRootElement); + promise.then(function () { + OperationCompleted(); + // After paint events are fired immediately after a paint (one + // of the things that can call us). Don't confuse ourselves by + // firing synchronously if we triggered the paint ourselves. + CallSetTimeoutMakeProgress(); + }); + } + } + + // true if rectA contains rectB + function Contains(rectA, rectB) { + return (rectA.left <= rectB.left && rectB.right <= rectA.right && rectA.top <= rectB.top && rectB.bottom <= rectA.bottom); + } + // true if some rect in rectList contains rect + function ContainedIn(rectList, rect) { + for (let i = 0; i < rectList.length; ++i) { + if (Contains(rectList[i], rect)) { + return true; + } + } + return false; + } + + function AfterPaintListener(event) { + LogInfo("AfterPaintListener in " + event.target.document.location.href); + if (event.target.document != currentDoc) { + // ignore paint events for subframes or old documents in the window. + // Invalidation in subframes will cause invalidation in the toplevel document anyway. + return; + } + + updateCanvasPending = true; + for (let r of event.clientRects) { + if (ContainedIn(updateCanvasRects, r)) { + continue; + } + + // Copy the rect; it's content and we are chrome, which means if the + // document goes away (and it can in some crashtests) our reference + // to it will be turned into a dead wrapper that we can't acccess. + updateCanvasRects.push({ left: r.left, top: r.top, right: r.right, bottom: r.bottom }); + } + + if (waitingForAnAfterPaint) { + waitingForAnAfterPaint = false; + OperationCompleted(); + } + + if (!operationInProgress) { + HandlePendingTasksAfterMakeProgress(); + } + // Otherwise we know that eventually after the operation finishes we + // will get a MakeProgress and/or HandlePendingTasksAfterMakeProgress + // call, so we don't need to do anything. + } + + function FromChildAfterPaintListener(event) { + LogInfo("FromChildAfterPaintListener from " + event.detail.originalTargetUri); + + updateCanvasPending = true; + for (let r of event.detail.rects) { + if (ContainedIn(updateCanvasRects, r)) { + continue; + } + + // Copy the rect; it's content and we are chrome, which means if the + // document goes away (and it can in some crashtests) our reference + // to it will be turned into a dead wrapper that we can't acccess. + updateCanvasRects.push({ left: r.left, top: r.top, right: r.right, bottom: r.bottom }); + } + + if (!operationInProgress) { + HandlePendingTasksAfterMakeProgress(); + } + // Otherwise we know that eventually after the operation finishes we + // will get a MakeProgress and/or HandlePendingTasksAfterMakeProgress + // call, so we don't need to do anything. + } + + function AttrModifiedListener() { + LogInfo("AttrModifiedListener fired"); + // Wait for the next return-to-event-loop before continuing --- for + // example, the attribute may have been modified in an subdocument's + // load event handler, in which case we need load event processing + // to complete and unsuppress painting before we check isMozAfterPaintPending. + CallSetTimeoutMakeProgress(); + } + + function ExplicitPaintsCompleteListener() { + LogInfo("ExplicitPaintsCompleteListener fired"); + // Since this can fire while painting, don't confuse ourselves by + // firing synchronously. It's fine to do this asynchronously. + CallSetTimeoutMakeProgress(); + } + + function RemoveListeners() { + // OK, we can end the test now. + removeEventListener("MozAfterPaint", AfterPaintListener, false); + removeEventListener("Reftest:MozAfterPaintFromChild", FromChildAfterPaintListener, false); + CheckForLivenessOfContentRootElement(); + if (contentRootElement) { + contentRootElement.removeEventListener("DOMAttrModified", AttrModifiedListener); + } + gExplicitPendingPaintsCompleteHook = null; + gTimeoutHook = null; + // Make sure we're in the COMPLETED state just in case + // (this may be called via the test-timeout hook) + state = STATE_COMPLETED; + } + + // Everything that could cause shouldWaitForXXX() to + // change from returning true to returning false is monitored via some kind + // of event listener which eventually calls this function. + function MakeProgress() { + if (state >= STATE_COMPLETED) { + LogInfo("MakeProgress: STATE_COMPLETED"); + return; + } + + LogInfo("MakeProgress"); + + // We don't need to flush styles any more when we are in the state + // after reftest-wait has removed. + OperationInProgress(); + let promise = Promise.resolve(undefined); + if (state != STATE_WAITING_TO_FINISH) { + // If we are waiting for the MozReftestInvalidate event we don't want + // to flush throttled animations. Flushing throttled animations can + // continue to cause new MozAfterPaint events even when all the + // rendering we're concerned about should have ceased. Since + // MozReftestInvalidate won't be sent until we finish waiting for all + // MozAfterPaint events, we should avoid flushing throttled animations + // here or else we'll never leave this state. + flushMode = (state === STATE_WAITING_TO_FIRE_INVALIDATE_EVENT) + ? FlushMode.IGNORE_THROTTLED_ANIMATIONS + : FlushMode.ALL; + promise = FlushRendering(flushMode); + } + promise.then(function () { + OperationCompleted(); + MakeProgress2(); + // If there is an operation in progress then we know there will be + // a MakeProgress call is will happen after it finishes. + if (!operationInProgress) { + HandlePendingTasksAfterMakeProgress(); + } + }); + } + + function MakeProgress2() { + switch (state) { + case STATE_WAITING_TO_FIRE_INVALIDATE_EVENT: { + LogInfo("MakeProgress: STATE_WAITING_TO_FIRE_INVALIDATE_EVENT"); + if (shouldWaitForExplicitPaintWaiters() || shouldWaitForPendingPaints() || + updateCanvasPending) { + gFailureReason = "timed out waiting for pending paint count to reach zero"; + if (shouldWaitForExplicitPaintWaiters()) { + gFailureReason += " (waiting for MozPaintWaitFinished)"; + LogInfo("MakeProgress: waiting for MozPaintWaitFinished"); + } + if (shouldWaitForPendingPaints()) { + gFailureReason += " (waiting for MozAfterPaint)"; + LogInfo("MakeProgress: waiting for MozAfterPaint"); + } + if (updateCanvasPending) { + gFailureReason += " (waiting for updateCanvasPending)"; + LogInfo("MakeProgress: waiting for updateCanvasPending"); + } + return; + } + + state = STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL; + CheckForLivenessOfContentRootElement(); + var hasReftestWait = shouldWaitForReftestWaitRemoval(contentRootElement); + // Notify the test document that now is a good time to test some invalidation + LogInfo("MakeProgress: dispatching MozReftestInvalidate"); + if (contentRootElement) { + var elements = getNoPaintElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + windowUtils().checkAndClearPaintedState(elements[i]); + } + elements = getNoDisplayListElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + windowUtils().checkAndClearDisplayListState(elements[i]); + } + elements = getDisplayListElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + windowUtils().checkAndClearDisplayListState(elements[i]); + } + var notification = content.document.createEvent("Events"); + notification.initEvent("MozReftestInvalidate", true, false); + contentRootElement.dispatchEvent(notification); + } + + if (!inPrintMode && doPrintMode(contentRootElement)) { + LogInfo("MakeProgress: setting up print mode"); + setupPrintMode(); + } + + if (hasReftestWait && !shouldWaitForReftestWaitRemoval(contentRootElement)) { + // MozReftestInvalidate handler removed reftest-wait. + // We expect something to have been invalidated... + OperationInProgress(); + let promise = FlushRendering(FlushMode.ALL); + promise.then(function () { + OperationCompleted(); + if (!updateCanvasPending && !shouldWaitForPendingPaints() && + !shouldWaitForExplicitPaintWaiters()) { + LogWarning("MozInvalidateEvent didn't invalidate"); + } + MakeProgress(); + }); + return; + } + // Try next state + MakeProgress(); + return; + } + + case STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL: + LogInfo("MakeProgress: STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL"); + CheckForLivenessOfContentRootElement(); + if (shouldWaitForReftestWaitRemoval(contentRootElement)) { + gFailureReason = "timed out waiting for reftest-wait to be removed"; + LogInfo("MakeProgress: waiting for reftest-wait to be removed"); + return; + } + + if (shouldNotFlush(contentRootElement)) { + // If reftest-no-flush is specified, we need to set + // updateCanvasPending explicitly to take the latest snapshot + // since animation changes on the compositor thread don't invoke + // any MozAfterPaint events at all. + // NOTE: We don't add any rects to updateCanvasRects here since + // SendUpdateCanvasForEvent() will handle this case properly + // without any rects. + updateCanvasPending = true; + } + // Try next state + state = STATE_WAITING_FOR_SPELL_CHECKS; + MakeProgress(); + return; + + case STATE_WAITING_FOR_SPELL_CHECKS: + LogInfo("MakeProgress: STATE_WAITING_FOR_SPELL_CHECKS"); + if (numPendingSpellChecks) { + gFailureReason = "timed out waiting for spell checks to end"; + LogInfo("MakeProgress: waiting for spell checks to end"); + return; + } + + state = STATE_WAITING_FOR_APZ_FLUSH; + LogInfo("MakeProgress: STATE_WAITING_FOR_APZ_FLUSH"); + gFailureReason = "timed out waiting for APZ flush to complete"; + + var os = Cc[NS_OBSERVER_SERVICE_CONTRACTID].getService(Ci.nsIObserverService); + var flushWaiter = function(aSubject, aTopic, aData) { + if (aTopic) LogInfo("MakeProgress: apz-repaints-flushed fired"); + os.removeObserver(flushWaiter, "apz-repaints-flushed"); + state = STATE_WAITING_TO_FINISH; + if (operationInProgress) { + CallSetTimeoutMakeProgress(); + } else { + MakeProgress(); + } + }; + os.addObserver(flushWaiter, "apz-repaints-flushed"); + + var willSnapshot = IsSnapshottableTestType(); + CheckForLivenessOfContentRootElement(); + var noFlush = !shouldNotFlush(contentRootElement); + if (noFlush && willSnapshot && windowUtils().flushApzRepaints()) { + LogInfo("MakeProgress: done requesting APZ flush"); + } else { + LogInfo("MakeProgress: APZ flush not required"); + flushWaiter(null, null, null); + } + return; + + case STATE_WAITING_FOR_APZ_FLUSH: + LogInfo("MakeProgress: STATE_WAITING_FOR_APZ_FLUSH"); + // Nothing to do here; once we get the apz-repaints-flushed event + // we will go to STATE_WAITING_TO_FINISH + return; + + case STATE_WAITING_TO_FINISH: + LogInfo("MakeProgress: STATE_WAITING_TO_FINISH"); + if (shouldWaitForExplicitPaintWaiters() || shouldWaitForPendingPaints() || + updateCanvasPending) { + gFailureReason = "timed out waiting for pending paint count to " + + "reach zero (after reftest-wait removed and switch to print mode)"; + if (shouldWaitForExplicitPaintWaiters()) { + gFailureReason += " (waiting for MozPaintWaitFinished)"; + LogInfo("MakeProgress: waiting for MozPaintWaitFinished"); + } + if (shouldWaitForPendingPaints()) { + gFailureReason += " (waiting for MozAfterPaint)"; + LogInfo("MakeProgress: waiting for MozAfterPaint"); + } + if (updateCanvasPending) { + gFailureReason += " (waiting for updateCanvasPending)"; + LogInfo("MakeProgress: waiting for updateCanvasPending"); + } + return; + } + CheckForLivenessOfContentRootElement(); + if (contentRootElement) { + var elements = getNoPaintElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + if (windowUtils().checkAndClearPaintedState(elements[i])) { + SendFailedNoPaint(); + } + } + // We only support retained display lists in the content process + // right now, so don't fail reftest-no-display-list tests when + // we don't have e10s. + if (gBrowserIsRemote) { + elements = getNoDisplayListElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + if (windowUtils().checkAndClearDisplayListState(elements[i])) { + SendFailedNoDisplayList(); + } + } + elements = getDisplayListElements(contentRootElement); + for (var i = 0; i < elements.length; ++i) { + if (!windowUtils().checkAndClearDisplayListState(elements[i])) { + SendFailedDisplayList(); + } + } + } + CheckLayerAssertions(contentRootElement); + } + + if (!IsSnapshottableTestType()) { + // If we're not snapshotting the test, at least do a sync round-trip + // to the compositor to ensure that all the rendering messages + // related to this test get processed. Otherwise problems triggered + // by this test may only manifest as failures in a later test. + LogInfo("MakeProgress: Doing sync flush to compositor"); + gFailureReason = "timed out while waiting for sync compositor flush" + windowUtils().syncFlushCompositor(); + } + + LogInfo("MakeProgress: Completed"); + state = STATE_COMPLETED; + gFailureReason = "timed out while taking snapshot (bug in harness?)"; + RemoveListeners(); + CheckForLivenessOfContentRootElement(); + CheckForProcessCrashExpectation(contentRootElement); + setTimeout(RecordResult, 0, forURL); + return; + } + } + + LogInfo("WaitForTestEnd: Adding listeners"); + addEventListener("MozAfterPaint", AfterPaintListener, false); + addEventListener("Reftest:MozAfterPaintFromChild", FromChildAfterPaintListener, false); + + // If contentRootElement is null then shouldWaitForReftestWaitRemoval will + // always return false so we don't need a listener anyway + CheckForLivenessOfContentRootElement(); + if (contentRootElement) { + contentRootElement.addEventListener("DOMAttrModified", AttrModifiedListener); + } + gExplicitPendingPaintsCompleteHook = ExplicitPaintsCompleteListener; + gTimeoutHook = RemoveListeners; + + // Listen for spell checks on spell-checked elements. + var numPendingSpellChecks = spellCheckedElements.length; + function decNumPendingSpellChecks() { + --numPendingSpellChecks; + if (operationInProgress) { + CallSetTimeoutMakeProgress(); + } else { + MakeProgress(); + } + } + for (let editable of spellCheckedElements) { + try { + onSpellCheck(editable, decNumPendingSpellChecks); + } catch (err) { + // The element may not have an editor, so ignore it. + setTimeout(decNumPendingSpellChecks, 0); + } + } + + // Take a full snapshot now that all our listeners are set up. This + // ensures it's impossible for us to miss updates between taking the snapshot + // and adding our listeners. + OperationInProgress(); + let promise = SendInitCanvasWithSnapshot(forURL); + promise.then(function () { + OperationCompleted(); + MakeProgress(); + }); +} + +async function OnDocumentLoad(uri) +{ + if (gClearingForAssertionCheck) { + if (uri == BLANK_URL_FOR_CLEARING) { + DoAssertionCheck(); + return; + } + + // It's likely the previous test document reloads itself and causes the + // attempt of loading blank page fails. In this case we should retry + // loading the blank page. + LogInfo("Retry loading a blank page"); + setTimeout(LoadURI, 0, BLANK_URL_FOR_CLEARING); + return; + } + + if (uri != gCurrentURL) { + LogInfo("OnDocumentLoad fired for previous document"); + // Ignore load events for previous documents. + return; + } + + var currentDoc = content && content.document; + + // Collect all editable, spell-checked elements. It may be the case that + // not all the elements that match this selector will be spell checked: for + // example, a textarea without a spellcheck attribute may have a parent with + // spellcheck=false, or script may set spellcheck=false on an element whose + // markup sets it to true. But that's OK since onSpellCheck detects the + // absence of spell checking, too. + var querySelector = + '*[class~="spell-checked"],' + + 'textarea:not([spellcheck="false"]),' + + 'input[spellcheck]:-moz-any([spellcheck=""],[spellcheck="true"]),' + + '*[contenteditable]:-moz-any([contenteditable=""],[contenteditable="true"])'; + var spellCheckedElements = currentDoc ? currentDoc.querySelectorAll(querySelector) : []; + + var contentRootElement = currentDoc ? currentDoc.documentElement : null; + currentDoc = null; + setupFullZoom(contentRootElement); + setupTextZoom(contentRootElement); + setupViewport(contentRootElement); + await setupDisplayport(contentRootElement); + var inPrintMode = false; + + async function AfterOnLoadScripts() { + // Regrab the root element, because the document may have changed. + var contentRootElement = + content.document ? content.document.documentElement : null; + + // "MozPaintWait" events are dispatched using a scriptrunner, so we + // receive then after painting has finished but before the main thread + // returns from the paint call. Then a "MozPaintWaitFinished" is + // dispatched to the main thread event loop. + // Before Fission both the FlushRendering and SendInitCanvasWithSnapshot + // calls were sync, but with Fission they must be async. So before Fission + // we got the MozPaintWait event but not the MozPaintWaitFinished event + // here (yet), which made us enter WaitForTestEnd. After Fission we get + // both MozPaintWait and MozPaintWaitFinished here. So to make this work + // the same way as before we just track if we got either event and go + // into reftest-wait mode. + var paintWaiterFinished = false; + + gExplicitPendingPaintsCompleteHook = function () { + LogInfo("PaintWaiters finished while we were sending initial snapshop in AfterOnLoadScripts"); + paintWaiterFinished = true; + } + + // Flush the document in case it got modified in a load event handler. + await FlushRendering(FlushMode.ALL); + + // Take a snapshot now. We need to do this before we check whether + // we should wait, since this might trigger dispatching of + // MozPaintWait events and make shouldWaitForExplicitPaintWaiters() true + // below. + let painted = await SendInitCanvasWithSnapshot(uri); + + gExplicitPendingPaintsCompleteHook = null; + + if (contentRootElement && Cu.isDeadWrapper(contentRootElement)) { + contentRootElement = null; + } + + if (paintWaiterFinished || shouldWaitForExplicitPaintWaiters() || + (!inPrintMode && doPrintMode(contentRootElement)) || + // If we didn't force a paint above, in + // InitCurrentCanvasWithSnapshot, so we should wait for a + // paint before we consider them done. + !painted) { + LogInfo("AfterOnLoadScripts belatedly entering WaitForTestEnd"); + // Go into reftest-wait mode belatedly. + WaitForTestEnd(contentRootElement, inPrintMode, [], uri); + } else { + CheckLayerAssertions(contentRootElement); + CheckForProcessCrashExpectation(contentRootElement); + RecordResult(uri); + } + } + + if (shouldWaitForReftestWaitRemoval(contentRootElement) || + shouldWaitForExplicitPaintWaiters() || + spellCheckedElements.length) { + // Go into reftest-wait mode immediately after painting has been + // unsuppressed, after the onload event has finished dispatching. + gFailureReason = "timed out waiting for test to complete (trying to get into WaitForTestEnd)"; + LogInfo("OnDocumentLoad triggering WaitForTestEnd"); + setTimeout(function () { WaitForTestEnd(contentRootElement, inPrintMode, spellCheckedElements, uri); }, 0); + } else { + if (doPrintMode(contentRootElement)) { + LogInfo("OnDocumentLoad setting up print mode"); + setupPrintMode(); + inPrintMode = true; + } + + // Since we can't use a bubbling-phase load listener from chrome, + // this is a capturing phase listener. So do setTimeout twice, the + // first to get us after the onload has fired in the content, and + // the second to get us after any setTimeout(foo, 0) in the content. + gFailureReason = "timed out waiting for test to complete (waiting for onload scripts to complete)"; + LogInfo("OnDocumentLoad triggering AfterOnLoadScripts"); + setTimeout(function () { setTimeout(AfterOnLoadScripts, 0); }, 0); + } +} + +function CheckLayerAssertions(contentRootElement) +{ + if (!contentRootElement) { + return; + } + if (gIsWebRenderEnabled) { + // WebRender doesn't use layers, so let's not try checking layers + // assertions. + return; + } + + var opaqueLayerElements = getOpaqueLayerElements(contentRootElement); + for (var i = 0; i < opaqueLayerElements.length; ++i) { + var elem = opaqueLayerElements[i]; + try { + if (!windowUtils().isPartOfOpaqueLayer(elem)) { + SendFailedOpaqueLayer(elementDescription(elem) + ' is not part of an opaque layer'); + } + } catch (e) { + SendFailedOpaqueLayer('got an exception while checking whether ' + elementDescription(elem) + ' is part of an opaque layer'); + } + } + var layerNameToElementsMap = getAssignedLayerMap(contentRootElement); + var oneOfEach = []; + // Check that elements with the same reftest-assigned-layer share the same PaintedLayer. + for (var layerName in layerNameToElementsMap) { + try { + var elements = layerNameToElementsMap[layerName]; + oneOfEach.push(elements[0]); + var numberOfLayers = windowUtils().numberOfAssignedPaintedLayers(elements); + if (numberOfLayers !== 1) { + SendFailedAssignedLayer('these elements are assigned to ' + numberOfLayers + + ' different layers, instead of sharing just one layer: ' + + elements.map(elementDescription).join(', ')); + } + } catch (e) { + SendFailedAssignedLayer('got an exception while checking whether these elements share a layer: ' + + elements.map(elementDescription).join(', ')); + } + } + // Check that elements with different reftest-assigned-layer are assigned to different PaintedLayers. + if (oneOfEach.length > 0) { + try { + var numberOfLayers = windowUtils().numberOfAssignedPaintedLayers(oneOfEach); + if (numberOfLayers !== oneOfEach.length) { + SendFailedAssignedLayer('these elements are assigned to ' + numberOfLayers + + ' different layers, instead of having none in common (expected ' + + oneOfEach.length + ' different layers): ' + + oneOfEach.map(elementDescription).join(', ')); + } + } catch (e) { + SendFailedAssignedLayer('got an exception while checking whether these elements are assigned to different layers: ' + + oneOfEach.map(elementDescription).join(', ')); + } + } +} + +function CheckForProcessCrashExpectation(contentRootElement) +{ + if (contentRootElement && + contentRootElement.hasAttribute('class') && + contentRootElement.getAttribute('class').split(/\s+/) + .includes("reftest-expect-process-crash")) { + SendExpectProcessCrash(); + } +} + +async function RecordResult(forURL) +{ + if (forURL != gCurrentURL) { + LogInfo("RecordResult fired for previous document"); + return; + } + + if (gCurrentURLRecordResults > 0) { + LogInfo("RecordResult fired extra times"); + FinishTestItem(); + return; + } + gCurrentURLRecordResults++; + + LogInfo("RecordResult fired"); + + var currentTestRunTime = Date.now() - gCurrentTestStartTime; + + clearTimeout(gFailureTimeout); + gFailureReason = null; + gFailureTimeout = null; + gCurrentURL = null; + gCurrentURLTargetType = undefined; + + if (gCurrentTestType == TYPE_PRINT) { + printToPdf(); + return; + } + if (gCurrentTestType == TYPE_SCRIPT) { + var error = ''; + var testwindow = content; + + if (testwindow.wrappedJSObject) + testwindow = testwindow.wrappedJSObject; + + var testcases; + if (!testwindow.getTestCases || typeof testwindow.getTestCases != "function") { + // Force an unexpected failure to alert the test author to fix the test. + error = "test must provide a function getTestCases(). (SCRIPT)\n"; + } + else if (!(testcases = testwindow.getTestCases())) { + // Force an unexpected failure to alert the test author to fix the test. + error = "test's getTestCases() must return an Array-like Object. (SCRIPT)\n"; + } + else if (testcases.length == 0) { + // This failure may be due to a JavaScript Engine bug causing + // early termination of the test. If we do not allow silent + // failure, the driver will report an error. + } + + var results = [ ]; + if (!error) { + // FIXME/bug 618176: temporary workaround + for (var i = 0; i < testcases.length; ++i) { + var test = testcases[i]; + results.push({ passed: test.testPassed(), + description: test.testDescription() }); + } + //results = testcases.map(function(test) { + // return { passed: test.testPassed(), + // description: test.testDescription() }; + } + + SendScriptResults(currentTestRunTime, error, results); + FinishTestItem(); + return; + } + + // Setup async scroll offsets now in case SynchronizeForSnapshot is not + // called (due to reftest-no-sync-layers being supplied, or in the single + // process case). + let changedAsyncScrollZoom = await setupAsyncScrollOffsets({allowFailure:true}); + if (setupAsyncZoom({allowFailure:true})) { + changedAsyncScrollZoom = true; + } + if (changedAsyncScrollZoom && !gBrowserIsRemote) { + sendAsyncMessage("reftest:UpdateWholeCanvasForInvalidation"); + } + + SendTestDone(currentTestRunTime); + FinishTestItem(); +} + +function LoadFailed() +{ + if (gTimeoutHook) { + gTimeoutHook(); + } + gFailureTimeout = null; + SendFailedLoad(gFailureReason); +} + +function FinishTestItem() +{ + gHaveCanvasSnapshot = false; +} + +function DoAssertionCheck() +{ + gClearingForAssertionCheck = false; + + var numAsserts = 0; + if (gDebug.isDebugBuild) { + var newAssertionCount = gDebug.assertionCount; + numAsserts = newAssertionCount - gAssertionCount; + gAssertionCount = newAssertionCount; + } + SendAssertionCount(numAsserts); +} + +function LoadURI(uri) +{ + let loadURIOptions = { + triggeringPrincipal: Services.scriptSecurityManager.getSystemPrincipal(), + }; + webNavigation().loadURI(uri, loadURIOptions); +} + +function LogError(str) +{ + if (gVerbose) { + sendSyncMessage("reftest:Log", { type: "error", msg: str }); + } else { + sendAsyncMessage("reftest:Log", { type: "error", msg: str }); + } +} + +function LogWarning(str) +{ + if (gVerbose) { + sendSyncMessage("reftest:Log", { type: "warning", msg: str }); + } else { + sendAsyncMessage("reftest:Log", { type: "warning", msg: str }); + } +} + +function LogInfo(str) +{ + if (gVerbose) { + sendSyncMessage("reftest:Log", { type: "info", msg: str }); + } else { + sendAsyncMessage("reftest:Log", { type: "info", msg: str }); + } +} + +function IsSnapshottableTestType() +{ + // Script, load-only, and PDF-print tests do not need any snapshotting. + return !(gCurrentTestType == TYPE_SCRIPT || + gCurrentTestType == TYPE_LOAD || + gCurrentTestType == TYPE_PRINT); +} + +const SYNC_DEFAULT = 0x0; +const SYNC_ALLOW_DISABLE = 0x1; +// Returns a promise that resolve when the snapshot is done. +function SynchronizeForSnapshot(flags) +{ + if (!IsSnapshottableTestType()) { + return Promise.resolve(undefined); + } + + if (flags & SYNC_ALLOW_DISABLE) { + var docElt = content.document.documentElement; + if (docElt && + (docElt.hasAttribute("reftest-no-sync-layers") || + shouldNotFlush(docElt))) { + LogInfo("Test file chose to skip SynchronizeForSnapshot"); + return Promise.resolve(undefined); + } + } + + let browsingContext = content.docShell.browsingContext; + let promise = content.windowGlobalChild.getActor("ReftestFission").sendQuery("UpdateLayerTree", {browsingContext}); + return promise.then(function (result) { + for (let errorString of result.errorStrings) { + LogError(errorString); + } + for (let infoString of result.infoStrings) { + LogInfo(infoString); + } + + // Setup async scroll offsets now, because any scrollable layers should + // have had their AsyncPanZoomControllers created. + return setupAsyncScrollOffsets({allowFailure:false}).then(function(result) { + setupAsyncZoom({allowFailure:false}); + }); + }, function(reason) { + // We expect actors to go away causing sendQuery's to fail, so + // just note it. + LogInfo("UpdateLayerTree sendQuery to parent rejected: " + reason); + + // Setup async scroll offsets now, because any scrollable layers should + // have had their AsyncPanZoomControllers created. + return setupAsyncScrollOffsets({allowFailure:false}).then(function(result) { + setupAsyncZoom({allowFailure:false}); + }); + }); +} + +function RegisterMessageListeners() +{ + addMessageListener( + "reftest:Clear", + function (m) { RecvClear() } + ); + addMessageListener( + "reftest:LoadScriptTest", + function (m) { RecvLoadScriptTest(m.json.uri, m.json.timeout); } + ); + addMessageListener( + "reftest:LoadPrintTest", + function (m) { RecvLoadPrintTest(m.json.uri, m.json.timeout); } + ); + addMessageListener( + "reftest:LoadTest", + function (m) { RecvLoadTest(m.json.type, m.json.uri, + m.json.uriTargetType, + m.json.timeout); } + ); + addMessageListener( + "reftest:ResetRenderingState", + function (m) { RecvResetRenderingState(); } + ); + addMessageListener( + "reftest:PrintDone", + function (m) { RecvPrintDone(m.json.status, m.json.fileName); } + ); +} + +function RecvClear() +{ + gClearingForAssertionCheck = true; + LoadURI(BLANK_URL_FOR_CLEARING); +} + +function RecvLoadTest(type, uri, uriTargetType, timeout) +{ + StartTestURI(type, uri, uriTargetType, timeout); +} + +function RecvLoadScriptTest(uri, timeout) +{ + StartTestURI(TYPE_SCRIPT, uri, URL_TARGET_TYPE_TEST, timeout); +} + +function RecvLoadPrintTest(uri, timeout) +{ + StartTestURI(TYPE_PRINT, uri, URL_TARGET_TYPE_TEST, timeout); +} + +function RecvResetRenderingState() +{ + resetZoomAndTextZoom(); + resetDisplayportAndViewport(); +} + +function RecvPrintDone(status, fileName) +{ + const currentTestRunTime = Date.now() - gCurrentTestStartTime; + SendPrintResult(currentTestRunTime, status, fileName); + FinishTestItem(); +} + +function SendAssertionCount(numAssertions) +{ + sendAsyncMessage("reftest:AssertionCount", { count: numAssertions }); +} + +function SendContentReady() +{ + let gfxInfo = (NS_GFXINFO_CONTRACTID in Cc) && Cc[NS_GFXINFO_CONTRACTID].getService(Ci.nsIGfxInfo); + let info = gfxInfo.getInfo(); + + // The webrender check has to be separate from the d2d checks + // since the d2d checks will throw an exception on non-windows platforms. + try { + gIsWebRenderEnabled = gfxInfo.WebRenderEnabled; + } catch (e) { + gIsWebRenderEnabled = false; + } + + try { + info.D2DEnabled = gfxInfo.D2DEnabled; + info.DWriteEnabled = gfxInfo.DWriteEnabled; + info.EmbeddedInFirefoxReality = gfxInfo.EmbeddedInFirefoxReality; + } catch (e) { + info.D2DEnabled = false; + info.DWriteEnabled = false; + info.EmbeddedInFirefoxReality = false; + } + + return sendSyncMessage("reftest:ContentReady", { 'gfx': info })[0]; +} + +function SendException(what) +{ + sendAsyncMessage("reftest:Exception", { what: what }); +} + +function SendFailedLoad(why) +{ + sendAsyncMessage("reftest:FailedLoad", { why: why }); +} + +function SendFailedNoPaint() +{ + sendAsyncMessage("reftest:FailedNoPaint"); +} + +function SendFailedNoDisplayList() +{ + sendAsyncMessage("reftest:FailedNoDisplayList"); +} + +function SendFailedDisplayList() +{ + sendAsyncMessage("reftest:FailedDisplayList"); +} + +function SendFailedOpaqueLayer(why) +{ + sendAsyncMessage("reftest:FailedOpaqueLayer", { why: why }); +} + +function SendFailedAssignedLayer(why) +{ + sendAsyncMessage("reftest:FailedAssignedLayer", { why: why }); +} + +// Returns a promise that resolves to a bool that indicates if a snapshot was taken. +function SendInitCanvasWithSnapshot(forURL) +{ + if (forURL != gCurrentURL) { + LogInfo("SendInitCanvasWithSnapshot called for previous document"); + // Lie and say we painted because it doesn't matter, this is a test we + // are already done with that is clearing out. Then AfterOnLoadScripts + // should finish quicker if that is who is calling us. + return Promise.resolve(true); + } + + // If we're in the same process as the top-level XUL window, then + // drawing that window will also update our layers, so no + // synchronization is needed. + // + // NB: this is a test-harness optimization only, it must not + // affect the validity of the tests. + if (gBrowserIsRemote) { + let promise = SynchronizeForSnapshot(SYNC_DEFAULT); + return promise.then(function () { + let ret = sendSyncMessage("reftest:InitCanvasWithSnapshot")[0]; + + gHaveCanvasSnapshot = ret.painted; + return ret.painted; + }); + } + + // For in-process browser, we have to make a synchronous request + // here to make the above optimization valid, so that MozWaitPaint + // events dispatched (synchronously) during painting are received + // before we check the paint-wait counter. For out-of-process + // browser though, it doesn't wrt correctness whether this request + // is sync or async. + let ret = sendSyncMessage("reftest:InitCanvasWithSnapshot")[0]; + + gHaveCanvasSnapshot = ret.painted; + return Promise.resolve(ret.painted); +} + +function SendScriptResults(runtimeMs, error, results) +{ + sendAsyncMessage("reftest:ScriptResults", + { runtimeMs: runtimeMs, error: error, results: results }); +} + +function SendStartPrint(isPrintSelection, printRange) +{ + sendAsyncMessage("reftest:StartPrint", { isPrintSelection, printRange }); +} + +function SendPrintResult(runtimeMs, status, fileName) +{ + sendAsyncMessage("reftest:PrintResult", + { runtimeMs: runtimeMs, status: status, fileName: fileName }); +} + +function SendExpectProcessCrash(runtimeMs) +{ + sendAsyncMessage("reftest:ExpectProcessCrash"); +} + +function SendTestDone(runtimeMs) +{ + sendAsyncMessage("reftest:TestDone", { runtimeMs: runtimeMs }); +} + +function roundTo(x, fraction) +{ + return Math.round(x/fraction)*fraction; +} + +function elementDescription(element) +{ + return '<' + element.localName + + [].slice.call(element.attributes).map((attr) => + ` ${attr.nodeName}="${attr.value}"`).join('') + + '>'; +} + +function SendUpdateCanvasForEvent(forURL, rectList, contentRootElement) +{ + if (forURL != gCurrentURL) { + LogInfo("SendUpdateCanvasForEvent called for previous document"); + // This is a test we are already done with that is clearing out. + // Don't do anything. + return Promise.resolve(undefined); + } + + var win = content; + var scale = docShell.browsingContext.fullZoom; + + var rects = [ ]; + if (shouldSnapshotWholePage(contentRootElement)) { + // See comments in SendInitCanvasWithSnapshot() re: the split + // logic here. + if (!gBrowserIsRemote) { + sendSyncMessage("reftest:UpdateWholeCanvasForInvalidation"); + } else { + let promise = SynchronizeForSnapshot(SYNC_ALLOW_DISABLE); + return promise.then(function () { + sendAsyncMessage("reftest:UpdateWholeCanvasForInvalidation"); + }); + } + return Promise.resolve(undefined); + } + + var message; + + if ((gIsWebRenderEnabled || shouldNotFlush(contentRootElement)) && + !windowUtils().isMozAfterPaintPending) { + // Webrender doesn't have invalidation, and animations on the compositor + // don't invoke any MozAfterEvent which means we have no invalidated + // rect so we just invalidate the whole screen once we don't have + // anymore paints pending. This will force the snapshot. + + LogInfo("Webrender enabled, sending update whole canvas for invalidation"); + message = "reftest:UpdateWholeCanvasForInvalidation"; + } else { + LogInfo("SendUpdateCanvasForEvent with " + rectList.length + " rects"); + for (var i = 0; i < rectList.length; ++i) { + var r = rectList[i]; + // Set left/top/right/bottom to "device pixel" boundaries + var left = Math.floor(roundTo(r.left * scale, 0.001)); + var top = Math.floor(roundTo(r.top * scale, 0.001)); + var right = Math.ceil(roundTo(r.right * scale, 0.001)); + var bottom = Math.ceil(roundTo(r.bottom * scale, 0.001)); + LogInfo("Rect: " + left + " " + top + " " + right + " " + bottom); + + rects.push({ left: left, top: top, right: right, bottom: bottom }); + } + + message = "reftest:UpdateCanvasForInvalidation"; + } + + // See comments in SendInitCanvasWithSnapshot() re: the split + // logic here. + if (!gBrowserIsRemote) { + sendSyncMessage(message, { rects: rects }); + } else { + let promise = SynchronizeForSnapshot(SYNC_ALLOW_DISABLE); + return promise.then(function () { + sendAsyncMessage(message, { rects: rects }); + }); + } + + return Promise.resolve(undefined); +} + +if (content.document.readyState == "complete") { + // load event has already fired for content, get started + OnInitialLoad(); +} else { + addEventListener("load", OnInitialLoad, true); +} diff --git a/layout/tools/reftest/reftest-to-html.pl b/layout/tools/reftest/reftest-to-html.pl new file mode 100755 index 0000000000..3fc2380e9e --- /dev/null +++ b/layout/tools/reftest/reftest-to-html.pl @@ -0,0 +1,118 @@ +#!/usr/bin/perl + +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +print <<EOD +<html> +<head> +<title>reftest output</title> +<style type="text/css"> +/* must be in this order */ +.PASS { background-color: green; } +.FAIL { background-color: red; } +.XFAIL { background-color: #999300; } +.WEIRDPASS { background-color: #00FFED; } +.PASSRANDOM { background-color: #598930; } +.FAILRANDOM, td.XFAILRANDOM { background-color: #99402A; } + +.FAILIMAGES { } +img { margin: 5px; width: 80px; height: 100px; } +img.testresult { border: 2px solid red; } +img.testref { border: 2px solid green; } +a { color: inherit; } +.always { display: inline ! important; } +</style> +</head> +<body> +<p> +<span class="PASS always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[0].style; if (s.display == 'none') s.display = null; else s.display = 'none';">PASS</span> +<span class="FAIL always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[1].style; if (s.display == 'none') s.display = null; else s.display = 'none';">UNEXPECTED FAIL</span> +<span class="XFAIL always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[2].style; if (s.display == 'none') s.display = null; else s.display = 'none';">KNOWN FAIL</span> +<span class="WEIRDPASS always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[3].style; if (s.display == 'none') s.display = null; else s.display = 'none';">UNEXPECTED PASS</span> +<span class="PASSRANDOM always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[4].style; if (s.display == 'none') s.display = null; else s.display = 'none';">PASS (Random)</span> +<span class="FAILRANDOM always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[5].style; if (s.display == 'none') s.display = null; else s.display = 'none';">FAIL (Random)</span> +</p> +<table> +EOD +; + +sub readcleanline { + my $l = <>; + chomp $l; + chop $l if ($l =~ /\r$/); + return $l; +} + +sub do_html { + my ($l) = @_; + + $l =~ s,(file:[^ ]*),<a href="\1">\1</a>,g; + $l =~ s,(data:[^ ]*),<a href="\1">\1</a>,g; + + return $l; +} + +$l = 0; + +while (<>) { + $l++; + next unless /^REFTEST/; + + chomp; + chop if /\r$/; + + s/^REFTEST *//; + + my $randomresult = 0; + if (/EXPECTED RANDOM/) { + s/\(EXPECTED RANDOM\)//; + $randomresult = 1; + } + + if (/^TEST-PASS \| (.*)$/) { + my $class = $randomresult ? "PASSRANDOM" : "PASS"; + print '<tr><td class="' . $class . '">' . do_html($1) . "</td></tr>\n"; + } elsif (/^TEST-UNEXPECTED-(....) \| (.*)$/) { + if ($randomresult) { + die "Error on line $l: UNEXPECTED with test marked random?!"; + } + my $class = ($1 eq "PASS") ? "WEIRDPASS" : "FAIL"; + print '<tr><td class="' . $class . '">' . do_html($2) . "</td></tr>\n"; + + # UNEXPECTED results can be followed by one or two images + $testline = &readcleanline; + + print '<tr><td class="FAILIMAGES">'; + + if ($testline =~ /REFTEST IMAGE: (data:.*)$/) { + print '<a href="' . $1 . '"><img class="testresult" src="' . $1 . '"></a>'; + } elsif ($testline =~ /REFTEST IMAGE 1 \(TEST\): (data:.*)$/) { + $refline = &readcleanline; + print '<a href="' . $1 . '"><img class="testresult" src="' . $1 . '"></a>'; + { + die "Error on line $l" unless $refline =~ /REFTEST IMAGE 2 \(REFERENCE\): (data:.*)$/; + print '<a href="' . $1 . '"><img class="testref" src="' . $1 . '"></a>'; + } + + } else { + die "Error on line $l"; + } + + print "</td></tr>\n"; + } elsif (/^TEST-KNOWN-FAIL \| (.*$)/) { + my $class = $randomresult ? "XFAILRANDOM" : "XFAIL"; + print '<tr><td class="' . $class . '">' . do_html($1) . "</td></tr>\n"; + } else { + print STDERR "Unknown Line: " . $_ . "\n"; + print "<tr><td><pre>" . $_ . "</pre></td></tr>\n"; + } +} + +print <<EOD +</table> +</body> +</html> +EOD +; diff --git a/layout/tools/reftest/reftest.jsm b/layout/tools/reftest/reftest.jsm new file mode 100644 index 0000000000..6d9eb85b38 --- /dev/null +++ b/layout/tools/reftest/reftest.jsm @@ -0,0 +1,1937 @@ +/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- / +/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +var EXPORTED_SYMBOLS = [ + "OnRefTestLoad", + "OnRefTestUnload", + "getTestPlugin" +]; + +Cu.import("resource://gre/modules/FileUtils.jsm"); +Cu.import("resource://reftest/globals.jsm", this); +Cu.import("resource://reftest/httpd.jsm", this); +Cu.import("resource://reftest/manifest.jsm", this); +Cu.import("resource://reftest/StructuredLog.jsm", this); +Cu.import("resource://reftest/PerTestCoverageUtils.jsm", this); +Cu.import("resource://gre/modules/Services.jsm"); +Cu.import('resource://gre/modules/XPCOMUtils.jsm'); + +const { E10SUtils } = ChromeUtils.import( + "resource://gre/modules/E10SUtils.jsm" +); + +XPCOMUtils.defineLazyGetter(this, "OS", function() { + const { OS } = Cu.import("resource://gre/modules/osfile.jsm"); + return OS; +}); + +function HasUnexpectedResult() +{ + return g.testResults.Exception > 0 || + g.testResults.FailedLoad > 0 || + g.testResults.UnexpectedFail > 0 || + g.testResults.UnexpectedPass > 0 || + g.testResults.AssertionUnexpected > 0 || + g.testResults.AssertionUnexpectedFixed > 0; +} + +// By default we just log to stdout +var gDumpFn = function(line) { + dump(line); + if (g.logFile) { + g.logFile.writeString(line); + } +} +var gDumpRawLog = function(record) { + // Dump JSON representation of data on a single line + var line = "\n" + JSON.stringify(record) + "\n"; + dump(line); + + if (g.logFile) { + g.logFile.writeString(line); + } +} +g.logger = new StructuredLogger('reftest', gDumpRawLog); +var logger = g.logger; + +function TestBuffer(str) +{ + logger.debug(str); + g.testLog.push(str); +} + +function isWebRenderOnAndroidDevice() { + var xr = Cc["@mozilla.org/xre/app-info;1"].getService(Ci.nsIXULRuntime); + // This is the best we can do for now; maybe in the future we'll have + // more correct detection of this case. + return xr.OS == "Android" && + g.browserIsRemote && + g.windowUtils.layerManagerType == "WebRender"; +} + +function FlushTestBuffer() +{ + // In debug mode, we've dumped all these messages already. + if (g.logLevel !== 'debug') { + for (var i = 0; i < g.testLog.length; ++i) { + logger.info("Saved log: " + g.testLog[i]); + } + } + g.testLog = []; +} + +function LogWidgetLayersFailure() +{ + logger.error( + "Screen resolution is too low - USE_WIDGET_LAYERS was disabled. " + + (g.browserIsRemote ? + "Since E10s is enabled, there is no fallback rendering path!" : + "The fallback rendering path is not reliably consistent with on-screen rendering.")); + + logger.error( + "If you cannot increase your screen resolution you can try reducing " + + "gecko's pixel scaling by adding something like '--setpref " + + "layout.css.devPixelsPerPx=1.0' to your './mach reftest' command " + + "(possibly as an alias in ~/.mozbuild/machrc). Note that this is " + + "inconsistent with CI testing, and may interfere with HighDPI/" + + "reftest-zoom tests."); +} + +function AllocateCanvas() +{ + if (g.recycledCanvases.length > 0) { + return g.recycledCanvases.shift(); + } + + var canvas = g.containingWindow.document.createElementNS(XHTML_NS, "canvas"); + var r = g.browser.getBoundingClientRect(); + canvas.setAttribute("width", Math.ceil(r.width)); + canvas.setAttribute("height", Math.ceil(r.height)); + + return canvas; +} + +function ReleaseCanvas(canvas) +{ + // store a maximum of 2 canvases, if we're not caching + if (!g.noCanvasCache || g.recycledCanvases.length < 2) { + g.recycledCanvases.push(canvas); + } +} + +function IDForEventTarget(event) +{ + try { + return "'" + event.target.getAttribute('id') + "'"; + } catch (ex) { + return "<unknown>"; + } +} + +function getTestPlugin(aName) { + var ph = Cc["@mozilla.org/plugin/host;1"].getService(Ci.nsIPluginHost); + var tags = ph.getPluginTags(); + + // Find the test plugin + for (var i = 0; i < tags.length; i++) { + if (tags[i].name == aName) + return tags[i]; + } + + logger.warning("Failed to find the test-plugin."); + return null; +} + +function OnRefTestLoad(win) +{ + g.crashDumpDir = Cc[NS_DIRECTORY_SERVICE_CONTRACTID] + .getService(Ci.nsIProperties) + .get("ProfD", Ci.nsIFile); + g.crashDumpDir.append("minidumps"); + + g.pendingCrashDumpDir = Cc[NS_DIRECTORY_SERVICE_CONTRACTID] + .getService(Ci.nsIProperties) + .get("UAppData", Ci.nsIFile); + g.pendingCrashDumpDir.append("Crash Reports"); + g.pendingCrashDumpDir.append("pending"); + + var env = Cc["@mozilla.org/process/environment;1"]. + getService(Ci.nsIEnvironment); + + g.browserIsRemote = Services.appinfo.browserTabsRemoteAutostart; + g.browserIsFission = Services.appinfo.fissionAutostart; + + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + g.browserIsIframe = prefs.getBoolPref("reftest.browser.iframe.enabled", false); + + g.logLevel = prefs.getStringPref("reftest.logLevel", "info"); + + if (win === undefined || win == null) { + win = window; + } + if (g.containingWindow == null && win != null) { + g.containingWindow = win; + } + + if (g.browserIsIframe) { + g.browser = g.containingWindow.document.createElementNS(XHTML_NS, "iframe"); + g.browser.setAttribute("mozbrowser", ""); + } else { + g.browser = g.containingWindow.document.createElementNS(XUL_NS, "xul:browser"); + } + g.browser.setAttribute("id", "browser"); + g.browser.setAttribute("type", "content"); + g.browser.setAttribute("primary", "true"); + g.browser.setAttribute("remote", g.browserIsRemote ? "true" : "false"); + // Make sure the browser element is exactly 800x1000, no matter + // what size our window is + g.browser.setAttribute("style", "padding: 0px; margin: 0px; border:none; min-width: 800px; min-height: 1000px; max-width: 800px; max-height: 1000px"); + + if (Services.appinfo.OS == "Android") { + let doc = g.containingWindow.document.getElementById('main-window'); + while (doc.hasChildNodes()) { + doc.firstChild.remove(); + } + doc.appendChild(g.browser); + // TODO Bug 1156817: reftests don't have most of GeckoView infra so we + // can't register this actor + ChromeUtils.unregisterWindowActor("LoadURIDelegate"); + ChromeUtils.unregisterWindowActor("WebBrowserChrome"); + } else { + document.getElementById("reftest-window").appendChild(g.browser); + } + + // reftests should have the test plugins enabled, not click-to-play + let plugin1 = getTestPlugin("Test Plug-in"); + let plugin2 = getTestPlugin("Second Test Plug-in"); + if (plugin1 && plugin2) { + g.testPluginEnabledStates = [plugin1.enabledState, plugin2.enabledState]; + plugin1.enabledState = Ci.nsIPluginTag.STATE_ENABLED; + plugin2.enabledState = Ci.nsIPluginTag.STATE_ENABLED; + } else { + logger.warning("Could not get test plugin tags."); + } + + g.browserMessageManager = g.browser.frameLoader.messageManager; + // The content script waits for the initial onload, then notifies + // us. + RegisterMessageListenersAndLoadContentScript(false); +} + +function InitAndStartRefTests() +{ + /* These prefs are optional, so we don't need to spit an error to the log */ + try { + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + } catch(e) { + logger.error("EXCEPTION: " + e); + } + + try { + prefs.setBoolPref("android.widget_paints_background", false); + } catch (e) {} + + // If fission is enabled, then also put data: URIs in the default web process, + // since most reftests run in the file process, and this will make data: + // <iframe>s OOP. + if (g.browserIsFission) { + prefs.setBoolPref("browser.tabs.remote.dataUriInDefaultWebProcess", true); + } + + /* set the g.loadTimeout */ + try { + g.loadTimeout = prefs.getIntPref("reftest.timeout"); + } catch(e) { + g.loadTimeout = 5 * 60 * 1000; //5 minutes as per bug 479518 + } + + /* Get the logfile for android tests */ + try { + var logFile = prefs.getStringPref("reftest.logFile"); + if (logFile) { + var f = FileUtils.File(logFile); + var out = FileUtils.openFileOutputStream(f, FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE); + g.logFile = Cc["@mozilla.org/intl/converter-output-stream;1"] + .createInstance(Ci.nsIConverterOutputStream); + g.logFile.init(out, null); + } + } catch(e) {} + + g.remote = prefs.getBoolPref("reftest.remote", false); + + g.ignoreWindowSize = prefs.getBoolPref("reftest.ignoreWindowSize", false); + + /* Support for running a chunk (subset) of tests. In separate try as this is optional */ + try { + g.totalChunks = prefs.getIntPref("reftest.totalChunks"); + g.thisChunk = prefs.getIntPref("reftest.thisChunk"); + } + catch(e) { + g.totalChunks = 0; + g.thisChunk = 0; + } + + try { + g.focusFilterMode = prefs.getStringPref("reftest.focusFilterMode"); + } catch(e) {} + + try { + g.isCoverageBuild = prefs.getBoolPref("reftest.isCoverageBuild"); + } catch(e) {} + + try { + g.compareRetainedDisplayLists = prefs.getBoolPref("reftest.compareRetainedDisplayLists"); + } catch (e) {} + + try { + // We have to set print.always_print_silent or a print dialog would + // appear for each print operation, which would interrupt the test run. + prefs.setBoolPref("print.always_print_silent", true); + } catch (e) { + /* uh oh, print reftests may not work... */ + logger.warning("Failed to set silent printing pref, EXCEPTION: " + e); + } + + g.windowUtils = g.containingWindow.windowUtils; + if (!g.windowUtils || !g.windowUtils.compareCanvases) + throw "nsIDOMWindowUtils inteface missing"; + + g.ioService = Cc[IO_SERVICE_CONTRACTID].getService(Ci.nsIIOService); + g.debug = Cc[DEBUG_CONTRACTID].getService(Ci.nsIDebug2); + + RegisterProcessCrashObservers(); + + if (g.remote) { + g.server = null; + } else { + g.server = new HttpServer(); + } + try { + if (g.server) + StartHTTPServer(); + } catch (ex) { + //g.browser.loadURI('data:text/plain,' + ex); + ++g.testResults.Exception; + logger.error("EXCEPTION: " + ex); + DoneTests(); + } + + // Focus the content browser. + if (g.focusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) { + var fm = Cc["@mozilla.org/focus-manager;1"].getService(Ci.nsIFocusManager); + if (fm.activeWindow != g.containingWindow) { + Focus(); + } + g.browser.addEventListener("focus", ReadTests, true); + g.browser.focus(); + } else { + ReadTests(); + } +} + +function StartHTTPServer() +{ + g.server.registerContentType("sjs", "sjs"); + g.server.start(-1); + g.httpServerPort = g.server.identity.primaryPort; +} + +// Perform a Fisher-Yates shuffle of the array. +function Shuffle(array) +{ + for (var i = array.length - 1; i > 0; i--) { + var j = Math.floor(Math.random() * (i + 1)); + var temp = array[i]; + array[i] = array[j]; + array[j] = temp; + } +} + +function ReadTests() { + try { + if (g.focusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) { + g.browser.removeEventListener("focus", ReadTests, true); + } + + g.urls = []; + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + + /* There are three modes implemented here: + * 1) reftest.manifests + * 2) reftest.manifests and reftest.manifests.dumpTests + * 3) reftest.tests + * + * The first will parse the specified manifests, then immediately + * run the tests. The second will parse the manifests, save the test + * objects to a file and exit. The third will load a file of test + * objects and run them. + * + * The latter two modes are used to pass test data back and forth + * with python harness. + */ + let manifests = prefs.getStringPref("reftest.manifests", null); + let dumpTests = prefs.getStringPref("reftest.manifests.dumpTests", null); + let testList = prefs.getStringPref("reftest.tests", null); + + if ((testList && manifests) || !(testList || manifests)) { + logger.error("Exactly one of reftest.manifests or reftest.tests must be specified."); + logger.debug("reftest.manifests is: " + manifests); + logger.error("reftest.tests is: " + testList); + DoneTests(); + } + + if (testList) { + logger.debug("Reading test objects from: " + testList); + let promise = OS.File.read(testList).then(function onSuccess(array) { + let decoder = new TextDecoder(); + g.urls = JSON.parse(decoder.decode(array)).map(CreateUrls); + StartTests(); + }).catch(function onFailure(e) { + logger.error("Failed to load test objects: " + e); + DoneTests(); + }); + } else if (manifests) { + // Parse reftest manifests + logger.debug("Reading " + manifests.length + " manifests"); + manifests = JSON.parse(manifests); + g.urlsFilterRegex = manifests[null]; + + var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null; + delete manifests[""]; + var manifestURLs = Object.keys(manifests); + + // Ensure we read manifests from higher up the directory tree first so that we + // process includes before reading the included manifest again + manifestURLs.sort(function(a,b) {return a.length - b.length}) + manifestURLs.forEach(function(manifestURL) { + logger.info("Reading manifest " + manifestURL); + var manifestInfo = manifests[manifestURL]; + var filter = manifestInfo[0] ? new RegExp(manifestInfo[0]) : null; + var manifestID = manifestInfo[1]; + ReadTopManifest(manifestURL, [globalFilter, filter, false], manifestID); + }); + + if (dumpTests) { + logger.debug("Dumping test objects to file: " + dumpTests); + let encoder = new TextEncoder(); + let tests = encoder.encode(JSON.stringify(g.urls)); + OS.File.writeAtomic(dumpTests, tests, {flush: true}).then( + function onSuccess() { + DoneTests(); + }, + function onFailure(reason) { + logger.error("failed to write test data: " + reason); + DoneTests(); + } + ) + } else { + logger.debug("Running " + g.urls.length + " test objects"); + g.manageSuite = true; + g.urls = g.urls.map(CreateUrls); + StartTests(); + } + } + } catch(e) { + ++g.testResults.Exception; + logger.error("EXCEPTION: " + e); + DoneTests(); + } +} + +function StartTests() +{ + /* These prefs are optional, so we don't need to spit an error to the log */ + try { + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + } catch(e) { + logger.error("EXCEPTION: " + e); + } + + g.noCanvasCache = prefs.getIntPref("reftest.nocache", false); + + g.shuffle = prefs.getBoolPref("reftest.shuffle", false); + + g.runUntilFailure = prefs.getBoolPref("reftest.runUntilFailure", false); + + g.verify = prefs.getBoolPref("reftest.verify", false); + + g.cleanupPendingCrashes = prefs.getBoolPref("reftest.cleanupPendingCrashes", false); + + // Check if there are any crash dump files from the startup procedure, before + // we start running the first test. Otherwise the first test might get + // blamed for producing a crash dump file when that was not the case. + CleanUpCrashDumpFiles(); + + // When we repeat this function is called again, so really only want to set + // g.repeat once. + if (g.repeat == null) { + g.repeat = prefs.getIntPref("reftest.repeat", 0); + } + + g.runSlowTests = prefs.getIntPref("reftest.skipslowtests", false); + + if (g.shuffle) { + g.noCanvasCache = true; + } + + try { + BuildUseCounts(); + + // Filter tests which will be skipped to get a more even distribution when chunking + // tURLs is a temporary array containing all active tests + var tURLs = new Array(); + for (var i = 0; i < g.urls.length; ++i) { + if (g.urls[i].skip) + continue; + + if (g.urls[i].needsFocus && !Focus()) + continue; + + if (g.urls[i].slow && !g.runSlowTests) + continue; + + tURLs.push(g.urls[i]); + } + + var numActiveTests = tURLs.length; + + if (g.totalChunks > 0 && g.thisChunk > 0) { + // Calculate start and end indices of this chunk if tURLs array were + // divided evenly + var testsPerChunk = tURLs.length / g.totalChunks; + var start = Math.round((g.thisChunk-1) * testsPerChunk); + var end = Math.round(g.thisChunk * testsPerChunk); + numActiveTests = end - start; + + // Map these indices onto the g.urls array. This avoids modifying the + // g.urls array which prevents skipped tests from showing up in the log + start = g.thisChunk == 1 ? 0 : g.urls.indexOf(tURLs[start]); + end = g.thisChunk == g.totalChunks ? g.urls.length : g.urls.indexOf(tURLs[end + 1]) - 1; + + logger.info("Running chunk " + g.thisChunk + " out of " + g.totalChunks + " chunks. " + + "tests " + (start+1) + "-" + end + "/" + g.urls.length); + + g.urls = g.urls.slice(start, end); + } + + if (g.manageSuite && !g.suiteStarted) { + var ids = {}; + g.urls.forEach(function(test) { + if (!(test.manifestID in ids)) { + ids[test.manifestID] = []; + } + ids[test.manifestID].push(test.identifier); + }); + var suite = prefs.getStringPref('reftest.suite', 'reftest'); + logger.suiteStart(ids, suite, {"skipped": g.urls.length - numActiveTests}); + g.suiteStarted = true + } + + if (g.shuffle) { + Shuffle(g.urls); + } + + g.totalTests = g.urls.length; + if (!g.totalTests && !g.verify && !g.repeat) + throw "No tests to run"; + + g.uriCanvases = {}; + + PerTestCoverageUtils.beforeTest() + .then(StartCurrentTest) + .catch(e => { + logger.error("EXCEPTION: " + e); + DoneTests(); + }); + } catch (ex) { + //g.browser.loadURI('data:text/plain,' + ex); + ++g.testResults.Exception; + logger.error("EXCEPTION: " + ex); + DoneTests(); + } +} + +function OnRefTestUnload() +{ + let plugin1 = getTestPlugin("Test Plug-in"); + let plugin2 = getTestPlugin("Second Test Plug-in"); + if (plugin1 && plugin2) { + plugin1.enabledState = g.testPluginEnabledStates[0]; + plugin2.enabledState = g.testPluginEnabledStates[1]; + } else { + logger.warning("Failed to get test plugin tags."); + } +} + +function AddURIUseCount(uri) +{ + if (uri == null) + return; + + var spec = uri.spec; + if (spec in g.uriUseCounts) { + g.uriUseCounts[spec]++; + } else { + g.uriUseCounts[spec] = 1; + } +} + +function BuildUseCounts() +{ + if (g.noCanvasCache) { + return; + } + + g.uriUseCounts = {}; + for (var i = 0; i < g.urls.length; ++i) { + var url = g.urls[i]; + if (!url.skip && + (url.type == TYPE_REFTEST_EQUAL || + url.type == TYPE_REFTEST_NOTEQUAL)) { + if (url.prefSettings1.length == 0) { + AddURIUseCount(g.urls[i].url1); + } + if (url.prefSettings2.length == 0) { + AddURIUseCount(g.urls[i].url2); + } + } + } +} + +// Return true iff this window is focused when this function returns. +function Focus() +{ + var fm = Cc["@mozilla.org/focus-manager;1"].getService(Ci.nsIFocusManager); + fm.focusedWindow = g.containingWindow; +#ifdef XP_MACOSX + try { + var dock = Cc["@mozilla.org/widget/macdocksupport;1"].getService(Ci.nsIMacDockSupport); + dock.activateApplication(true); + } catch(ex) { + } +#endif // XP_MACOSX + return true; +} + +function Blur() +{ + // On non-remote reftests, this will transfer focus to the dummy window + // we created to hold focus for non-needs-focus tests. Buggy tests + // (ones which require focus but don't request needs-focus) will then + // fail. + g.containingWindow.blur(); +} + +function StartCurrentTest() +{ + g.testLog = []; + + // make sure we don't run tests that are expected to kill the browser + while (g.urls.length > 0) { + var test = g.urls[0]; + logger.testStart(test.identifier); + if (test.skip) { + ++g.testResults.Skip; + logger.testEnd(test.identifier, "SKIP"); + g.urls.shift(); + } else if (test.needsFocus && !Focus()) { + // FIXME: Marking this as a known fail is dangerous! What + // if it starts failing all the time? + ++g.testResults.Skip; + logger.testEnd(test.identifier, "SKIP", null, "(COULDN'T GET FOCUS)"); + g.urls.shift(); + } else if (test.slow && !g.runSlowTests) { + ++g.testResults.Slow; + logger.testEnd(test.identifier, "SKIP", null, "(SLOW)"); + g.urls.shift(); + } else { + break; + } + } + + if ((g.urls.length == 0 && g.repeat == 0) || + (g.runUntilFailure && HasUnexpectedResult())) { + RestoreChangedPreferences(); + DoneTests(); + } else if (g.urls.length == 0 && g.repeat > 0) { + // Repeat + g.repeat--; + ReadTests(); + } else { + if (g.urls[0].chaosMode) { + g.windowUtils.enterChaosMode(); + } + if (!g.urls[0].needsFocus) { + Blur(); + } + var currentTest = g.totalTests - g.urls.length; + g.containingWindow.document.title = "reftest: " + currentTest + " / " + g.totalTests + + " (" + Math.floor(100 * (currentTest / g.totalTests)) + "%)"; + StartCurrentURI(URL_TARGET_TYPE_TEST); + } +} + +// A simplified version of the function with the same name in tabbrowser.js. +function updateBrowserRemotenessByURL(aBrowser, aURL) { + var oa = E10SUtils.predictOriginAttributes({ browser: aBrowser }); + let remoteType = E10SUtils.getRemoteTypeForURI( + aURL, + aBrowser.ownerGlobal.docShell.nsILoadContext.useRemoteTabs, + aBrowser.ownerGlobal.docShell.nsILoadContext.useRemoteSubframes, + aBrowser.remoteType, + aBrowser.currentURI, + oa + ); + // Things get confused if we switch to not-remote + // for chrome:// URIs, so lets not for now. + if (remoteType == E10SUtils.NOT_REMOTE && + g.browserIsRemote) { + remoteType = aBrowser.remoteType; + } + if (aBrowser.remoteType != remoteType) { + if (remoteType == E10SUtils.NOT_REMOTE) { + aBrowser.removeAttribute("remote"); + aBrowser.removeAttribute("remoteType"); + } else { + aBrowser.setAttribute("remote", "true"); + aBrowser.setAttribute("remoteType", remoteType); + } + aBrowser.changeRemoteness({ remoteType }); + aBrowser.construct(); + + g.browserMessageManager = aBrowser.frameLoader.messageManager; + RegisterMessageListenersAndLoadContentScript(true); + return new Promise(resolve => { g.resolveContentReady = resolve; }); + } + + return Promise.resolve(); +} + +async function StartCurrentURI(aURLTargetType) +{ + const isStartingRef = (aURLTargetType == URL_TARGET_TYPE_REFERENCE); + + g.currentURL = g.urls[0][isStartingRef ? "url2" : "url1"].spec; + g.currentURLTargetType = aURLTargetType; + + RestoreChangedPreferences(); + + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + + const prefSettings = + g.urls[0][isStartingRef ? "prefSettings2" : "prefSettings1"]; + + if (prefSettings.length > 0) { + var badPref = undefined; + try { + prefSettings.forEach(function(ps) { + let prefExists = false; + try { + let prefType = prefs.getPrefType(ps.name); + prefExists = (prefType != prefs.PREF_INVALID); + } catch (e) { + } + if (!prefExists) { + logger.info("Pref " + ps.name + " not found, will be added"); + } + + let oldVal = undefined; + if (prefExists) { + if (ps.type == PREF_BOOLEAN) { + try { + oldVal = prefs.getBoolPref(ps.name); + } catch (e) { + badPref = "boolean preference '" + ps.name + "'"; + throw "bad pref"; + } + } else if (ps.type == PREF_STRING) { + try { + oldVal = prefs.getStringPref(ps.name); + } catch (e) { + badPref = "string preference '" + ps.name + "'"; + throw "bad pref"; + } + } else if (ps.type == PREF_INTEGER) { + try { + oldVal = prefs.getIntPref(ps.name); + } catch (e) { + badPref = "integer preference '" + ps.name + "'"; + throw "bad pref"; + } + } else { + throw "internal error - unknown preference type"; + } + } + if (!prefExists || oldVal != ps.value) { + g.prefsToRestore.push( { name: ps.name, + type: ps.type, + value: oldVal, + prefExisted: prefExists } ); + var value = ps.value; + if (ps.type == PREF_BOOLEAN) { + prefs.setBoolPref(ps.name, value); + } else if (ps.type == PREF_STRING) { + prefs.setStringPref(ps.name, value); + value = '"' + value + '"'; + } else if (ps.type == PREF_INTEGER) { + prefs.setIntPref(ps.name, value); + } + logger.info("SET PREFERENCE pref(" + ps.name + "," + value + ")"); + } + }); + } catch (e) { + if (e == "bad pref") { + var test = g.urls[0]; + if (test.expected == EXPECTED_FAIL) { + logger.testEnd(test.identifier, "FAIL", "FAIL", + "(SKIPPED; " + badPref + " not known or wrong type)"); + ++g.testResults.Skip; + } else { + logger.testEnd(test.identifier, "FAIL", "PASS", + badPref + " not known or wrong type"); + ++g.testResults.UnexpectedFail; + } + + // skip the test that had a bad preference + g.urls.shift(); + StartCurrentTest(); + return; + } else { + throw e; + } + } + } + + if (prefSettings.length == 0 && + g.uriCanvases[g.currentURL] && + (g.urls[0].type == TYPE_REFTEST_EQUAL || + g.urls[0].type == TYPE_REFTEST_NOTEQUAL) && + g.urls[0].maxAsserts == 0) { + // Pretend the document loaded --- RecordResult will notice + // there's already a canvas for this URL + g.containingWindow.setTimeout(RecordResult, 0); + } else { + var currentTest = g.totalTests - g.urls.length; + // Log this to preserve the same overall log format, + // should be removed if the format is updated + gDumpFn("REFTEST TEST-LOAD | " + g.currentURL + " | " + currentTest + " / " + g.totalTests + + " (" + Math.floor(100 * (currentTest / g.totalTests)) + "%)\n"); + TestBuffer("START " + g.currentURL); + await updateBrowserRemotenessByURL(g.browser, g.currentURL); + + var type = g.urls[0].type + if (TYPE_SCRIPT == type) { + SendLoadScriptTest(g.currentURL, g.loadTimeout); + } else if (TYPE_PRINT == type) { + SendLoadPrintTest(g.currentURL, g.loadTimeout); + } else { + SendLoadTest(type, g.currentURL, g.currentURLTargetType, g.loadTimeout); + } + } +} + +function DoneTests() +{ + PerTestCoverageUtils.afterTest() + .catch(e => logger.error("EXCEPTION: " + e)) + .then(() => { + if (g.manageSuite) { + g.suiteStarted = false + logger.suiteEnd({'results': g.testResults}); + } else { + logger._logData('results', {results: g.testResults}); + } + logger.info("Slowest test took " + g.slowestTestTime + "ms (" + g.slowestTestURL + ")"); + logger.info("Total canvas count = " + g.recycledCanvases.length); + if (g.failedUseWidgetLayers) { + LogWidgetLayersFailure(); + } + + function onStopped() { + if (g.logFile) { + g.logFile.close(); + g.logFile = null; + } + let appStartup = Cc["@mozilla.org/toolkit/app-startup;1"].getService(Ci.nsIAppStartup); + appStartup.quit(Ci.nsIAppStartup.eForceQuit); + } + if (g.server) { + g.server.stop(onStopped); + } + else { + onStopped(); + } + }); +} + +function UpdateCanvasCache(url, canvas) +{ + var spec = url.spec; + + --g.uriUseCounts[spec]; + + if (g.uriUseCounts[spec] == 0) { + ReleaseCanvas(canvas); + delete g.uriCanvases[spec]; + } else if (g.uriUseCounts[spec] > 0) { + g.uriCanvases[spec] = canvas; + } else { + throw "Use counts were computed incorrectly"; + } +} + +// Recompute drawWindow flags for every drawWindow operation. +// We have to do this every time since our window can be +// asynchronously resized (e.g. by the window manager, to make +// it fit on screen) at unpredictable times. +// Fortunately this is pretty cheap. +function DoDrawWindow(ctx, x, y, w, h) +{ + var flags = ctx.DRAWWINDOW_DRAW_CARET | ctx.DRAWWINDOW_DRAW_VIEW; + var testRect = g.browser.getBoundingClientRect(); + if (g.ignoreWindowSize || + (0 <= testRect.left && + 0 <= testRect.top && + g.containingWindow.innerWidth >= testRect.right && + g.containingWindow.innerHeight >= testRect.bottom)) { + // We can use the window's retained layer manager + // because the window is big enough to display the entire + // browser element + flags |= ctx.DRAWWINDOW_USE_WIDGET_LAYERS; + } else if (g.browserIsRemote) { + logger.error(g.currentURL + " | can't drawWindow remote content"); + ++g.testResults.Exception; + } + + if (g.drawWindowFlags != flags) { + // Every time the flags change, dump the new state. + g.drawWindowFlags = flags; + var flagsStr = "DRAWWINDOW_DRAW_CARET | DRAWWINDOW_DRAW_VIEW"; + if (flags & ctx.DRAWWINDOW_USE_WIDGET_LAYERS) { + flagsStr += " | DRAWWINDOW_USE_WIDGET_LAYERS"; + } else { + // Output a special warning because we need to be able to detect + // this whenever it happens. + LogWidgetLayersFailure(); + g.failedUseWidgetLayers = true; + } + logger.info("drawWindow flags = " + flagsStr + + "; window size = " + g.containingWindow.innerWidth + "," + g.containingWindow.innerHeight + + "; test browser size = " + testRect.width + "," + testRect.height); + } + + TestBuffer("DoDrawWindow " + x + "," + y + "," + w + "," + h); + ctx.drawWindow(g.containingWindow, x, y, w, h, "rgb(255,255,255)", + g.drawWindowFlags); +} + +function InitCurrentCanvasWithSnapshot() +{ + TestBuffer("Initializing canvas snapshot"); + + if (g.urls[0].type == TYPE_LOAD || g.urls[0].type == TYPE_SCRIPT || g.urls[0].type == TYPE_PRINT) { + // We don't want to snapshot this kind of test + return false; + } + + if (!g.currentCanvas) { + g.currentCanvas = AllocateCanvas(); + } + + var ctx = g.currentCanvas.getContext("2d"); + DoDrawWindow(ctx, 0, 0, g.currentCanvas.width, g.currentCanvas.height); + return true; +} + +function UpdateCurrentCanvasForInvalidation(rects) +{ + TestBuffer("Updating canvas for invalidation"); + + if (!g.currentCanvas) { + return; + } + + var ctx = g.currentCanvas.getContext("2d"); + for (var i = 0; i < rects.length; ++i) { + var r = rects[i]; + // Set left/top/right/bottom to pixel boundaries + var left = Math.floor(r.left); + var top = Math.floor(r.top); + var right = Math.ceil(r.right); + var bottom = Math.ceil(r.bottom); + + // Clamp the values to the canvas size + left = Math.max(0, Math.min(left, g.currentCanvas.width)); + top = Math.max(0, Math.min(top, g.currentCanvas.height)); + right = Math.max(0, Math.min(right, g.currentCanvas.width)); + bottom = Math.max(0, Math.min(bottom, g.currentCanvas.height)); + + ctx.save(); + ctx.translate(left, top); + DoDrawWindow(ctx, left, top, right - left, bottom - top); + ctx.restore(); + } +} + +function UpdateWholeCurrentCanvasForInvalidation() +{ + TestBuffer("Updating entire canvas for invalidation"); + + if (!g.currentCanvas) { + return; + } + + var ctx = g.currentCanvas.getContext("2d"); + DoDrawWindow(ctx, 0, 0, g.currentCanvas.width, g.currentCanvas.height); +} + +function RecordResult(testRunTime, errorMsg, typeSpecificResults) +{ + TestBuffer("RecordResult fired"); + + // Keep track of which test was slowest, and how long it took. + if (testRunTime > g.slowestTestTime) { + g.slowestTestTime = testRunTime; + g.slowestTestURL = g.currentURL; + } + + // Not 'const ...' because of 'EXPECTED_*' value dependency. + var outputs = {}; + outputs[EXPECTED_PASS] = { + true: {s: ["PASS", "PASS"], n: "Pass"}, + false: {s: ["FAIL", "PASS"], n: "UnexpectedFail"} + }; + outputs[EXPECTED_FAIL] = { + true: {s: ["PASS", "FAIL"], n: "UnexpectedPass"}, + false: {s: ["FAIL", "FAIL"], n: "KnownFail"} + }; + outputs[EXPECTED_RANDOM] = { + true: {s: ["PASS", "PASS"], n: "Random"}, + false: {s: ["FAIL", "FAIL"], n: "Random"} + }; + // for EXPECTED_FUZZY we need special handling because we can have + // Pass, UnexpectedPass, or UnexpectedFail + + if ((g.currentURLTargetType == URL_TARGET_TYPE_TEST && g.urls[0].wrCapture.test) || + (g.currentURLTargetType == URL_TARGET_TYPE_REFERENCE && g.urls[0].wrCapture.ref)) { + logger.info("Running webrender capture"); + g.windowUtils.wrCapture(); + } + + var output; + var extra; + + if (g.urls[0].type == TYPE_LOAD) { + ++g.testResults.LoadOnly; + logger.testStatus(g.urls[0].identifier, "(LOAD ONLY)", "PASS", "PASS"); + g.currentCanvas = null; + FinishTestItem(); + return; + } + if (g.urls[0].type == TYPE_PRINT) { + switch (g.currentURLTargetType) { + case URL_TARGET_TYPE_TEST: + // First document has been loaded. + g.testPrintOutput = typeSpecificResults; + // Proceed to load the second document. + CleanUpCrashDumpFiles(); + StartCurrentURI(URL_TARGET_TYPE_REFERENCE); + break; + case URL_TARGET_TYPE_REFERENCE: + let pathToTestPdf = g.testPrintOutput; + let pathToRefPdf = typeSpecificResults; + comparePdfs(pathToTestPdf, pathToRefPdf, function(error, results) { + let expected = g.urls[0].expected; + // TODO: We should complain here if results is empty! + // (If it's empty, we'll spuriously succeed, regardless of + // our expectations) + if (error) { + output = outputs[expected][false]; + extra = { status_msg: output.n }; + ++g.testResults[output.n]; + logger.testEnd(g.urls[0].identifier, output.s[0], output.s[1], + error.message, null, extra); + } else { + let outputPair = outputs[expected]; + if (expected === EXPECTED_FAIL) { + let failureResults = results.filter(function (result) { return !result.passed }); + if (failureResults.length > 0) { + // We got an expected failure. Let's get rid of the + // passes from the results so we don't trigger + // TEST_UNEXPECTED_PASS logging for those. + results = failureResults; + } + // (else, we expected a failure but got none! + // Leave results untouched so we can log them.) + } + results.forEach(function(result) { + output = outputPair[result.passed]; + let extra = { status_msg: output.n }; + ++g.testResults[output.n]; + logger.testEnd(g.urls[0].identifier, output.s[0], output.s[1], + result.description, null, extra); + }); + } + FinishTestItem(); + }); + break; + default: + throw "Unexpected state."; + } + return; + } + if (g.urls[0].type == TYPE_SCRIPT) { + var expected = g.urls[0].expected; + + if (errorMsg) { + // Force an unexpected failure to alert the test author to fix the test. + expected = EXPECTED_PASS; + } else if (typeSpecificResults.length == 0) { + // This failure may be due to a JavaScript Engine bug causing + // early termination of the test. If we do not allow silent + // failure, report an error. + if (!g.urls[0].allowSilentFail) + errorMsg = "No test results reported. (SCRIPT)\n"; + else + logger.info("An expected silent failure occurred"); + } + + if (errorMsg) { + output = outputs[expected][false]; + extra = { status_msg: output.n }; + ++g.testResults[output.n]; + logger.testStatus(g.urls[0].identifier, errorMsg, output.s[0], output.s[1], null, null, extra); + FinishTestItem(); + return; + } + + var anyFailed = typeSpecificResults.some(function(result) { return !result.passed; }); + var outputPair; + if (anyFailed && expected == EXPECTED_FAIL) { + // If we're marked as expected to fail, and some (but not all) tests + // passed, treat those tests as though they were marked random + // (since we can't tell whether they were really intended to be + // marked failing or not). + outputPair = { true: outputs[EXPECTED_RANDOM][true], + false: outputs[expected][false] }; + } else { + outputPair = outputs[expected]; + } + var index = 0; + typeSpecificResults.forEach(function(result) { + var output = outputPair[result.passed]; + var extra = { status_msg: output.n }; + + ++g.testResults[output.n]; + logger.testStatus(g.urls[0].identifier, result.description + " item " + (++index), + output.s[0], output.s[1], null, null, extra); + }); + + if (anyFailed && expected == EXPECTED_PASS) { + FlushTestBuffer(); + } + + FinishTestItem(); + return; + } + + const isRecordingRef = + (g.currentURLTargetType == URL_TARGET_TYPE_REFERENCE); + const prefSettings = + g.urls[0][isRecordingRef ? "prefSettings2" : "prefSettings1"]; + + if (prefSettings.length == 0 && g.uriCanvases[g.currentURL]) { + g.currentCanvas = g.uriCanvases[g.currentURL]; + } + if (g.currentCanvas == null) { + logger.error(g.currentURL, "program error managing snapshots"); + ++g.testResults.Exception; + } + g[isRecordingRef ? "canvas2" : "canvas1"] = g.currentCanvas; + g.currentCanvas = null; + + ResetRenderingState(); + + switch (g.currentURLTargetType) { + case URL_TARGET_TYPE_TEST: + // First document has been loaded. + // Proceed to load the second document. + + CleanUpCrashDumpFiles(); + StartCurrentURI(URL_TARGET_TYPE_REFERENCE); + break; + case URL_TARGET_TYPE_REFERENCE: + // Both documents have been loaded. Compare the renderings and see + // if the comparison result matches the expected result specified + // in the manifest. + + // number of different pixels + var differences; + // whether the two renderings match: + var equal; + var maxDifference = {}; + // whether the allowed fuzziness from the annotations is exceeded + // by the actual comparison results + var fuzz_exceeded = false; + + // what is expected on this platform (PASS, FAIL, RANDOM, or FUZZY) + var expected = g.urls[0].expected; + + differences = g.windowUtils.compareCanvases(g.canvas1, g.canvas2, maxDifference); + + if (g.urls[0].noAutoFuzz) { + // Autofuzzing is disabled + } else if (isWebRenderOnAndroidDevice() && maxDifference.value <= 2 && differences > 0) { + // Autofuzz for WR on Android physical devices: Reduce any + // maxDifference of 2 to 0, because we get a lot of off-by-ones + // and off-by-twos that are very random and hard to annotate. + // In cases where the difference on any pixel component is more + // than 2 we require manual annotation. Note that this applies + // to both == tests and != tests, so != tests don't + // inadvertently pass due to a random off-by-one pixel + // difference. + logger.info(`REFTEST wr-on-android dropping fuzz of (${maxDifference.value}, ${differences}) to (0, 0)`); + maxDifference.value = 0; + differences = 0; + } + + equal = (differences == 0); + + if (maxDifference.value > 0 && equal) { + throw "Inconsistent result from compareCanvases."; + } + + if (expected == EXPECTED_FUZZY) { + logger.info(`REFTEST fuzzy test ` + + `(${g.urls[0].fuzzyMinDelta}, ${g.urls[0].fuzzyMinPixels}) <= ` + + `(${maxDifference.value}, ${differences}) <= ` + + `(${g.urls[0].fuzzyMaxDelta}, ${g.urls[0].fuzzyMaxPixels})`); + fuzz_exceeded = maxDifference.value > g.urls[0].fuzzyMaxDelta || + differences > g.urls[0].fuzzyMaxPixels; + equal = !fuzz_exceeded && + maxDifference.value >= g.urls[0].fuzzyMinDelta && + differences >= g.urls[0].fuzzyMinPixels; + } + + var failedExtraCheck = g.failedNoPaint || g.failedNoDisplayList || g.failedDisplayList || g.failedOpaqueLayer || g.failedAssignedLayer; + + // whether the comparison result matches what is in the manifest + var test_passed = (equal == (g.urls[0].type == TYPE_REFTEST_EQUAL)) && !failedExtraCheck; + + if (expected != EXPECTED_FUZZY) { + output = outputs[expected][test_passed]; + } else if (test_passed) { + output = {s: ["PASS", "PASS"], n: "Pass"}; + } else if (g.urls[0].type == TYPE_REFTEST_EQUAL && + !failedExtraCheck && + !fuzz_exceeded) { + // If we get here, that means we had an '==' type test where + // at least one of the actual difference values was below the + // allowed range, but nothing else was wrong. So let's produce + // UNEXPECTED-PASS in this scenario. Also, if we enter this + // branch, 'equal' must be false so let's assert that to guard + // against logic errors. + if (equal) { + throw "Logic error in reftest.jsm fuzzy test handling!"; + } + output = {s: ["PASS", "FAIL"], n: "UnexpectedPass"}; + } else { + // In all other cases we fail the test + output = {s: ["FAIL", "PASS"], n: "UnexpectedFail"}; + } + extra = { status_msg: output.n }; + + ++g.testResults[output.n]; + + // It's possible that we failed both an "extra check" and the normal comparison, but we don't + // have a way to annotate these separately, so just print an error for the extra check failures. + if (failedExtraCheck) { + var failures = []; + if (g.failedNoPaint) { + failures.push("failed reftest-no-paint"); + } + if (g.failedNoDisplayList) { + failures.push("failed reftest-no-display-list"); + } + if (g.failedDisplayList) { + failures.push("failed reftest-display-list"); + } + // The g.failed*Messages arrays will contain messages from both the test and the reference. + if (g.failedOpaqueLayer) { + failures.push("failed reftest-opaque-layer: " + g.failedOpaqueLayerMessages.join(", ")); + } + if (g.failedAssignedLayer) { + failures.push("failed reftest-assigned-layer: " + g.failedAssignedLayerMessages.join(", ")); + } + var failureString = failures.join(", "); + logger.testStatus(g.urls[0].identifier, failureString, output.s[0], output.s[1], null, null, extra); + } else { + var message = "image comparison, max difference: " + maxDifference.value + + ", number of differing pixels: " + differences; + if (!test_passed && expected == EXPECTED_PASS || + !test_passed && expected == EXPECTED_FUZZY || + test_passed && expected == EXPECTED_FAIL) { + if (!equal) { + extra.max_difference = maxDifference.value; + extra.differences = differences; + var image1 = g.canvas1.toDataURL(); + var image2 = g.canvas2.toDataURL(); + extra.reftest_screenshots = [ + {url:g.urls[0].identifier[0], + screenshot: image1.slice(image1.indexOf(",") + 1)}, + g.urls[0].identifier[1], + {url:g.urls[0].identifier[2], + screenshot: image2.slice(image2.indexOf(",") + 1)} + ]; + extra.image1 = image1; + extra.image2 = image2; + } else { + var image1 = g.canvas1.toDataURL(); + extra.reftest_screenshots = [ + {url:g.urls[0].identifier[0], + screenshot: image1.slice(image1.indexOf(",") + 1)} + ]; + extra.image1 = image1; + } + } + logger.testStatus(g.urls[0].identifier, message, output.s[0], output.s[1], null, null, extra); + + if (g.noCanvasCache) { + ReleaseCanvas(g.canvas1); + ReleaseCanvas(g.canvas2); + } else { + if (g.urls[0].prefSettings1.length == 0) { + UpdateCanvasCache(g.urls[0].url1, g.canvas1); + } + if (g.urls[0].prefSettings2.length == 0) { + UpdateCanvasCache(g.urls[0].url2, g.canvas2); + } + } + } + + if ((!test_passed && expected == EXPECTED_PASS) || (test_passed && expected == EXPECTED_FAIL)) { + FlushTestBuffer(); + } + + CleanUpCrashDumpFiles(); + FinishTestItem(); + break; + default: + throw "Unexpected state."; + } +} + +function LoadFailed(why) +{ + ++g.testResults.FailedLoad; + if (!why) { + // reftest-content.js sets an initial reason before it sets the + // timeout that will call us with the currently set reason, so we + // should never get here. If we do then there's a logic error + // somewhere. Perhaps tests are somehow running overlapped and the + // timeout for one test is not being cleared before the timeout for + // another is set? Maybe there's some sort of race? + logger.error("load failed with unknown reason (we should always have a reason!)"); + } + logger.testStatus(g.urls[0].identifier, "load failed: " + why, "FAIL", "PASS"); + FlushTestBuffer(); + FinishTestItem(); +} + +function RemoveExpectedCrashDumpFiles() +{ + if (g.expectingProcessCrash) { + for (let crashFilename of g.expectedCrashDumpFiles) { + let file = g.crashDumpDir.clone(); + file.append(crashFilename); + if (file.exists()) { + file.remove(false); + } + } + } + g.expectedCrashDumpFiles.length = 0; +} + +function FindUnexpectedCrashDumpFiles() +{ + if (!g.crashDumpDir.exists()) { + return; + } + + let entries = g.crashDumpDir.directoryEntries; + if (!entries) { + return; + } + + let foundCrashDumpFile = false; + while (entries.hasMoreElements()) { + let file = entries.nextFile; + let path = String(file.path); + if (path.match(/\.(dmp|extra)$/) && !g.unexpectedCrashDumpFiles[path]) { + if (!foundCrashDumpFile) { + ++g.testResults.UnexpectedFail; + foundCrashDumpFile = true; + if (g.currentURL) { + logger.testStatus(g.urls[0].identifier, "crash-check", "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!"); + } else { + logger.error("Harness startup left crash dumps behind, but we weren't expecting it to!"); + } + } + logger.info("Found unexpected crash dump file " + path); + g.unexpectedCrashDumpFiles[path] = true; + } + } +} + +function RemovePendingCrashDumpFiles() +{ + if (!g.pendingCrashDumpDir.exists()) { + return; + } + + let entries = g.pendingCrashDumpDir.directoryEntries; + while (entries.hasMoreElements()) { + let file = entries.nextFile; + if (file.isFile()) { + file.remove(false); + logger.info("This test left pending crash dumps; deleted "+file.path); + } + } +} + +function CleanUpCrashDumpFiles() +{ + RemoveExpectedCrashDumpFiles(); + FindUnexpectedCrashDumpFiles(); + if (g.cleanupPendingCrashes) { + RemovePendingCrashDumpFiles(); + } + g.expectingProcessCrash = false; +} + +function FinishTestItem() +{ + logger.testEnd(g.urls[0].identifier, "OK"); + + // Replace document with BLANK_URL_FOR_CLEARING in case there are + // assertions when unloading. + logger.debug("Loading a blank page"); + // After clearing, content will notify us of the assertion count + // and tests will continue. + SendClear(); + g.failedNoPaint = false; + g.failedNoDisplayList = false; + g.failedDisplayList = false; + g.failedOpaqueLayer = false; + g.failedOpaqueLayerMessages = []; + g.failedAssignedLayer = false; + g.failedAssignedLayerMessages = []; +} + +function DoAssertionCheck(numAsserts) +{ + if (g.debug.isDebugBuild) { + if (g.browserIsRemote) { + // Count chrome-process asserts too when content is out of + // process. + var newAssertionCount = g.debug.assertionCount; + var numLocalAsserts = newAssertionCount - g.assertionCount; + g.assertionCount = newAssertionCount; + + numAsserts += numLocalAsserts; + } + + var minAsserts = g.urls[0].minAsserts; + var maxAsserts = g.urls[0].maxAsserts; + + if (numAsserts < minAsserts) { + ++g.testResults.AssertionUnexpectedFixed; + } else if (numAsserts > maxAsserts) { + ++g.testResults.AssertionUnexpected; + } else if (numAsserts != 0) { + ++g.testResults.AssertionKnown; + } + logger.assertionCount(g.urls[0].identifier, numAsserts, minAsserts, maxAsserts); + } + + if (g.urls[0].chaosMode) { + g.windowUtils.leaveChaosMode(); + } + + // And start the next test. + g.urls.shift(); + StartCurrentTest(); +} + +function ResetRenderingState() +{ + SendResetRenderingState(); + // We would want to clear any viewconfig here, if we add support for it +} + +function RestoreChangedPreferences() +{ + if (g.prefsToRestore.length > 0) { + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + g.prefsToRestore.reverse(); + g.prefsToRestore.forEach(function(ps) { + if (ps.prefExisted) { + var value = ps.value; + if (ps.type == PREF_BOOLEAN) { + prefs.setBoolPref(ps.name, value); + } else if (ps.type == PREF_STRING) { + prefs.setStringPref(ps.name, value); + value = '"' + value + '"'; + } else if (ps.type == PREF_INTEGER) { + prefs.setIntPref(ps.name, value); + } + logger.info("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")"); + } else { + prefs.clearUserPref(ps.name); + logger.info("RESTORE PREFERENCE pref(" + ps.name + ", <no value set>) (clearing user pref)"); + } + }); + g.prefsToRestore = []; + } +} + +function RegisterMessageListenersAndLoadContentScript(aReload) +{ + g.browserMessageManager.addMessageListener( + "reftest:AssertionCount", + function (m) { RecvAssertionCount(m.json.count); } + ); + g.browserMessageManager.addMessageListener( + "reftest:ContentReady", + function (m) { return RecvContentReady(m.data); } + ); + g.browserMessageManager.addMessageListener( + "reftest:Exception", + function (m) { RecvException(m.json.what) } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedLoad", + function (m) { RecvFailedLoad(m.json.why); } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedNoPaint", + function (m) { RecvFailedNoPaint(); } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedNoDisplayList", + function (m) { RecvFailedNoDisplayList(); } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedDisplayList", + function (m) { RecvFailedDisplayList(); } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedOpaqueLayer", + function (m) { RecvFailedOpaqueLayer(m.json.why); } + ); + g.browserMessageManager.addMessageListener( + "reftest:FailedAssignedLayer", + function (m) { RecvFailedAssignedLayer(m.json.why); } + ); + g.browserMessageManager.addMessageListener( + "reftest:InitCanvasWithSnapshot", + function (m) { return RecvInitCanvasWithSnapshot(); } + ); + g.browserMessageManager.addMessageListener( + "reftest:Log", + function (m) { RecvLog(m.json.type, m.json.msg); } + ); + g.browserMessageManager.addMessageListener( + "reftest:ScriptResults", + function (m) { RecvScriptResults(m.json.runtimeMs, m.json.error, m.json.results); } + ); + g.browserMessageManager.addMessageListener( + "reftest:StartPrint", + function (m) { RecvStartPrint(m.json.isPrintSelection, m.json.printRange); } + ); + g.browserMessageManager.addMessageListener( + "reftest:PrintResult", + function (m) { RecvPrintResult(m.json.runtimeMs, m.json.status, m.json.fileName); } + ); + g.browserMessageManager.addMessageListener( + "reftest:TestDone", + function (m) { RecvTestDone(m.json.runtimeMs); } + ); + g.browserMessageManager.addMessageListener( + "reftest:UpdateCanvasForInvalidation", + function (m) { RecvUpdateCanvasForInvalidation(m.json.rects); } + ); + g.browserMessageManager.addMessageListener( + "reftest:UpdateWholeCanvasForInvalidation", + function (m) { RecvUpdateWholeCanvasForInvalidation(); } + ); + g.browserMessageManager.addMessageListener( + "reftest:ExpectProcessCrash", + function (m) { RecvExpectProcessCrash(); } + ); + + g.browserMessageManager.loadFrameScript("resource://reftest/reftest-content.js", true, true); + + if (aReload) { + return; + } + + ChromeUtils.registerWindowActor("ReftestFission", { + parent: { + moduleURI: "resource://reftest/ReftestFissionParent.jsm", + }, + child: { + moduleURI: "resource://reftest/ReftestFissionChild.jsm", + events: { + MozAfterPaint: {}, + }, + }, + allFrames: true, + includeChrome: true, + }); +} + +function RecvAssertionCount(count) +{ + DoAssertionCheck(count); +} + +function RecvContentReady(info) +{ + if (g.resolveContentReady) { + g.resolveContentReady(); + g.resolveContentReady = null; + } else { + g.contentGfxInfo = info.gfx; + InitAndStartRefTests(); + } + return { remote: g.browserIsRemote }; +} + +function RecvException(what) +{ + logger.error(g.currentURL + " | " + what); + ++g.testResults.Exception; +} + +function RecvFailedLoad(why) +{ + LoadFailed(why); +} + +function RecvFailedNoPaint() +{ + g.failedNoPaint = true; +} + +function RecvFailedNoDisplayList() +{ + g.failedNoDisplayList = true; +} + +function RecvFailedDisplayList() +{ + g.failedDisplayList = true; +} + +function RecvFailedOpaqueLayer(why) { + g.failedOpaqueLayer = true; + g.failedOpaqueLayerMessages.push(why); +} + +function RecvFailedAssignedLayer(why) { + g.failedAssignedLayer = true; + g.failedAssignedLayerMessages.push(why); +} + +function RecvInitCanvasWithSnapshot() +{ + var painted = InitCurrentCanvasWithSnapshot(); + return { painted: painted }; +} + +function RecvLog(type, msg) +{ + msg = "[CONTENT] " + msg; + if (type == "info") { + TestBuffer(msg); + } else if (type == "warning") { + logger.warning(msg); + } else if (type == "error") { + logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + g.currentURL + " | " + msg + "\n"); + ++g.testResults.Exception; + } else { + logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + g.currentURL + " | unknown log type " + type + "\n"); + ++g.testResults.Exception; + } +} + +function RecvScriptResults(runtimeMs, error, results) +{ + RecordResult(runtimeMs, error, results); +} + +function RecvStartPrint(isPrintSelection, printRange) +{ + let fileName =`reftest-print-${Date.now()}-`; + crypto.getRandomValues(new Uint8Array(4)).forEach(x => fileName += x.toString(16)); + fileName += ".pdf" + let file = Services.dirsvc.get("TmpD", Ci.nsIFile); + file.append(fileName); + + let PSSVC = Cc["@mozilla.org/gfx/printsettings-service;1"].getService(Ci.nsIPrintSettingsService); + let ps = PSSVC.newPrintSettings; + ps.printSilent = true; + ps.showPrintProgress = false; + ps.printBGImages = true; + ps.printBGColors = true; + ps.unwriteableMarginTop = 0; + ps.unwriteableMarginRight = 0; + ps.unwriteableMarginLeft = 0; + ps.unwriteableMarginBottom = 0; + ps.printToFile = true; + ps.toFileName = file.path; + ps.outputFormat = Ci.nsIPrintSettings.kOutputFormatPDF; + ps.printSelectionOnly = isPrintSelection; + if (printRange) { + ps.pageRanges = printRange.split(',').map(function(r) { + let range = r.split('-'); + return [+range[0] || 1, +range[1] || 1] + }).flat(); + } + + var prefs = Cc["@mozilla.org/preferences-service;1"]. + getService(Ci.nsIPrefBranch); + ps.printInColor = prefs.getBoolPref("print.print_in_color", true); + + g.browser.print(g.browser.outerWindowID, ps) + .then(() => SendPrintDone(Cr.NS_OK, file.path)) + .catch(exception => SendPrintDone(exception.code, file.path)); +} + +function RecvPrintResult(runtimeMs, status, fileName) +{ + if (!Components.isSuccessCode(status)) { + logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + g.currentURL + " | error during printing\n"); + ++g.testResults.Exception; + } + RecordResult(runtimeMs, '', fileName); +} + +function RecvTestDone(runtimeMs) +{ + RecordResult(runtimeMs, '', [ ]); +} + +function RecvUpdateCanvasForInvalidation(rects) +{ + UpdateCurrentCanvasForInvalidation(rects); +} + +function RecvUpdateWholeCanvasForInvalidation() +{ + UpdateWholeCurrentCanvasForInvalidation(); +} + +function OnProcessCrashed(subject, topic, data) +{ + let id; + let additionalDumps; + let propbag = subject.QueryInterface(Ci.nsIPropertyBag2); + + if (topic == "plugin-crashed") { + id = propbag.get("pluginDumpID"); + additionalDumps = propbag.getPropertyAsACString("additionalMinidumps"); + } else if (topic == "ipc:content-shutdown") { + id = propbag.get("dumpID"); + } + + if (id) { + g.expectedCrashDumpFiles.push(id + ".dmp"); + g.expectedCrashDumpFiles.push(id + ".extra"); + } + + if (additionalDumps && additionalDumps.length != 0) { + for (const name of additionalDumps.split(',')) { + g.expectedCrashDumpFiles.push(id + "-" + name + ".dmp"); + } + } +} + +function RegisterProcessCrashObservers() +{ + var os = Cc[NS_OBSERVER_SERVICE_CONTRACTID] + .getService(Ci.nsIObserverService); + os.addObserver(OnProcessCrashed, "plugin-crashed"); + os.addObserver(OnProcessCrashed, "ipc:content-shutdown"); +} + +function RecvExpectProcessCrash() +{ + g.expectingProcessCrash = true; +} + +function SendClear() +{ + g.browserMessageManager.sendAsyncMessage("reftest:Clear"); +} + +function SendLoadScriptTest(uri, timeout) +{ + g.browserMessageManager.sendAsyncMessage("reftest:LoadScriptTest", + { uri: uri, timeout: timeout }); +} + +function SendLoadPrintTest(uri, timeout) +{ + g.browserMessageManager.sendAsyncMessage("reftest:LoadPrintTest", + { uri: uri, timeout: timeout }); +} + +function SendLoadTest(type, uri, uriTargetType, timeout) +{ + g.browserMessageManager.sendAsyncMessage("reftest:LoadTest", + { type: type, uri: uri, + uriTargetType: uriTargetType, + timeout: timeout } + ); +} + +function SendResetRenderingState() +{ + g.browserMessageManager.sendAsyncMessage("reftest:ResetRenderingState"); +} + +function SendPrintDone(status, fileName) +{ + g.browserMessageManager.sendAsyncMessage("reftest:PrintDone", { status, fileName }); +} + +var pdfjsHasLoaded; + +function pdfjsHasLoadedPromise() { + if (pdfjsHasLoaded === undefined) { + pdfjsHasLoaded = new Promise((resolve, reject) => { + let doc = g.containingWindow.document; + const script = doc.createElement("script"); + script.src = "resource://pdf.js/build/pdf.js"; + script.onload = resolve; + script.onerror = () => reject(new Error("PDF.js script load failed.")); + doc.documentElement.appendChild(script); + }); + } + + return pdfjsHasLoaded; +} + +function readPdf(path, callback) { + OS.File.open(path, { read: true }).then(function (file) { + file.read().then(function (data) { + pdfjsLib.GlobalWorkerOptions.workerSrc = "resource://pdf.js/build/pdf.worker.js"; + pdfjsLib.getDocument({ + data: data + }).promise.then(function (pdf) { + callback(null, pdf); + }, function (e) { + callback(new Error(`Couldn't parse ${path}, exception: ${e}`)); + }); + return; + }, function (e) { + callback(new Error(`Couldn't read PDF ${path}, exception: ${e}`)); + }); + }); +} + +function comparePdfs(pathToTestPdf, pathToRefPdf, callback) { + pdfjsHasLoadedPromise().then(() => + Promise.all([pathToTestPdf, pathToRefPdf].map(function(path) { + return new Promise(function(resolve, reject) { + readPdf(path, function(error, pdf) { + // Resolve or reject outer promise. reject and resolve are + // passed to the callback function given as first arguments + // to the Promise constructor. + if (error) { + reject(error); + } else { + resolve(pdf); + } + }); + }); + }))).then(function(pdfs) { + let numberOfPages = pdfs[1].numPages; + let sameNumberOfPages = numberOfPages === pdfs[0].numPages; + + let resultPromises = [Promise.resolve({ + passed: sameNumberOfPages, + description: "Expected number of pages: " + numberOfPages + + ", got " + pdfs[0].numPages + })]; + + if (sameNumberOfPages) { + for (let i = 0; i < numberOfPages; i++) { + let pageNum = i + 1; + let testPagePromise = pdfs[0].getPage(pageNum); + let refPagePromise = pdfs[1].getPage(pageNum); + resultPromises.push(new Promise(function(resolve, reject) { + Promise.all([testPagePromise, refPagePromise]).then(function(pages) { + let testTextPromise = pages[0].getTextContent(); + let refTextPromise = pages[1].getTextContent(); + Promise.all([testTextPromise, refTextPromise]).then(function(texts) { + let testTextItems = texts[0].items; + let refTextItems = texts[1].items; + let testText; + let refText; + let passed = refTextItems.every(function(o, i) { + refText = o.str; + if (!testTextItems[i]) { + return false; + } + testText = testTextItems[i].str; + return testText === refText; + }); + let description; + if (passed) { + if (testTextItems.length > refTextItems.length) { + passed = false; + description = "Page " + pages[0].pageNumber + + " contains unexpected text like '" + + testTextItems[refTextItems.length].str + "'"; + } else { + description = "Page " + pages[0].pageNumber + + " contains same text" + } + } else { + description = "Expected page " + pages[0].pageNumber + + " to contain text '" + refText; + if (testText) { + description += "' but found '" + testText + + "' instead"; + } + } + resolve({ + passed: passed, + description: description + }); + }, reject); + }, reject); + })); + } + } + + Promise.all(resultPromises).then(function (results) { + callback(null, results); + }); + }, function(error) { + callback(error); + }); +} diff --git a/layout/tools/reftest/reftest.xhtml b/layout/tools/reftest/reftest.xhtml new file mode 100644 index 0000000000..924d9e73d2 --- /dev/null +++ b/layout/tools/reftest/reftest.xhtml @@ -0,0 +1,13 @@ +<!-- vim: set shiftwidth=4 tabstop=8 autoindent expandtab: --> +<!-- This Source Code Form is subject to the terms of the Mozilla Public + - License, v. 2.0. If a copy of the MPL was not distributed with this + - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> +<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul" + id="reftest-window" + hidechrome="true" + onload="OnRefTestLoad();" + onunload="OnRefTestUnload();" + style="background:white; overflow:hidden"> + <script type="application/ecmascript" src="resource://reftest/reftest.jsm" /> + <!-- The reftest browser element is dynamically created, here --> +</window> diff --git a/layout/tools/reftest/reftest/__init__.py b/layout/tools/reftest/reftest/__init__.py new file mode 100644 index 0000000000..601014a6ca --- /dev/null +++ b/layout/tools/reftest/reftest/__init__.py @@ -0,0 +1,165 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import unicode_literals, absolute_import, print_function + +import io +import os +import re +import six + +RE_COMMENT = re.compile(r"\s+#") +RE_HTTP = re.compile(r"HTTP\((\.\.(\/\.\.)*)\)") +RE_PROTOCOL = re.compile(r"^\w+:") +FAILURE_TYPES = ( + "fails", + "fails-if", + "needs-focus", + "random", + "random-if", + "silentfail", + "silentfail-if", + "skip", + "skip-if", + "slow", + "slow-if", + "fuzzy", + "fuzzy-if", + "require-or", + "asserts", + "asserts-if", +) +PREF_ITEMS = ( + "pref", + "test-pref", + "ref-pref", +) +RE_ANNOTATION = re.compile(r"(.*)\((.*)\)") + + +class ReftestManifest(object): + """Represents a parsed reftest manifest.""" + + def __init__(self, finder=None): + self.path = None + self.dirs = set() + self.files = set() + self.manifests = set() + self.tests = [] + self.finder = finder + + def load(self, path): + """Parse a reftest manifest file.""" + + def add_test(file, annotations, referenced_test=None): + # We can't package about:, data:, or chrome: URIs. + # Discarding data isn't correct for a parser. But retaining + # all data isn't currently a requirement. + if RE_PROTOCOL.match(file): + return + test = os.path.normpath(os.path.join(mdir, urlprefix + file)) + if test in self.files: + # if test path has already been added, make no changes, to + # avoid duplicate paths in self.tests + return + self.files.add(test) + self.dirs.add(os.path.dirname(test)) + test_dict = { + "path": test, + "here": os.path.dirname(test), + "manifest": normalized_path, + "name": os.path.basename(test), + "head": "", + "support-files": "", + "subsuite": "", + } + if referenced_test: + test_dict["referenced-test"] = referenced_test + for annotation in annotations: + m = RE_ANNOTATION.match(annotation) + if m: + if m.group(1) not in test_dict: + test_dict[m.group(1)] = m.group(2) + else: + test_dict[m.group(1)] += ";" + m.group(2) + else: + test_dict[annotation] = None + self.tests.append(test_dict) + + normalized_path = os.path.normpath(os.path.abspath(path)) + self.manifests.add(normalized_path) + if not self.path: + self.path = normalized_path + + mdir = os.path.dirname(normalized_path) + self.dirs.add(mdir) + + if self.finder: + lines = self.finder.get(path).read().splitlines() + else: + with io.open(path, "r", encoding="utf-8") as fh: + lines = fh.read().splitlines() + + urlprefix = "" + defaults = [] + for i, line in enumerate(lines): + lineno = i + 1 + line = six.ensure_text(line) + + # Entire line is a comment. + if line.startswith("#"): + continue + + # Comments can begin mid line. Strip them. + m = RE_COMMENT.search(line) + if m: + line = line[: m.start()] + line = line.strip() + if not line: + continue + + items = line.split() + if items[0] == "defaults": + defaults = items[1:] + continue + + items = defaults + items + annotations = [] + for i in range(len(items)): + item = items[i] + + if item.startswith(FAILURE_TYPES) or item.startswith(PREF_ITEMS): + annotations += [item] + continue + if item == "HTTP": + continue + + m = RE_HTTP.match(item) + if m: + # Need to package the referenced directory. + self.dirs.add(os.path.normpath(os.path.join(mdir, m.group(1)))) + continue + + if i < len(defaults): + raise ValueError( + "Error parsing manifest {}, line {}: " + "Invalid defaults token '{}'".format(path, lineno, item) + ) + + if item == "url-prefix": + urlprefix = items[i + 1] + break + + if item == "include": + self.load(os.path.join(mdir, items[i + 1])) + break + + if item == "load" or item == "script": + add_test(items[i + 1], annotations) + break + + if item == "==" or item == "!=" or item == "print": + add_test(items[i + 1], annotations) + add_test(items[i + 2], annotations, items[i + 1]) + break diff --git a/layout/tools/reftest/reftestcommandline.py b/layout/tools/reftest/reftestcommandline.py new file mode 100644 index 0000000000..b6fe7fa3d3 --- /dev/null +++ b/layout/tools/reftest/reftestcommandline.py @@ -0,0 +1,655 @@ +from __future__ import absolute_import, print_function + +import argparse +import os +import sys +from collections import OrderedDict +import mozinfo +import mozlog + +from six.moves.urllib.parse import urlparse + +here = os.path.abspath(os.path.dirname(__file__)) + + +class ReftestArgumentsParser(argparse.ArgumentParser): + def __init__(self, **kwargs): + super(ReftestArgumentsParser, self).__init__(**kwargs) + + # Try to import a MozbuildObject. Success indicates that we are + # running from a source tree. This allows some defaults to be set + # from the source tree. + try: + from mozbuild.base import MozbuildObject + + self.build_obj = MozbuildObject.from_environment(cwd=here) + except ImportError: + self.build_obj = None + + self.add_argument( + "--xre-path", + action="store", + type=str, + dest="xrePath", + # individual scripts will set a sane default + default=None, + help="absolute path to directory containing XRE (probably xulrunner)", + ) + + self.add_argument( + "--symbols-path", + action="store", + type=str, + dest="symbolsPath", + default=None, + help="absolute path to directory containing breakpad symbols, " + "or the URL of a zip file containing symbols", + ) + + self.add_argument( + "--debugger", + action="store", + dest="debugger", + help="use the given debugger to launch the application", + ) + + self.add_argument( + "--debugger-args", + action="store", + dest="debuggerArgs", + help="pass the given args to the debugger _before_ " + "the application on the command line", + ) + + self.add_argument( + "--debugger-interactive", + action="store_true", + dest="debuggerInteractive", + help="prevents the test harness from redirecting " + "stdout and stderr for interactive debuggers", + ) + + self.add_argument( + "--appname", + action="store", + type=str, + dest="app", + default=None, + help="absolute path to application, overriding default", + ) + + self.add_argument( + "--extra-profile-file", + action="append", + dest="extraProfileFiles", + default=[], + help="copy specified files/dirs to testing profile", + ) + + self.add_argument( + "--timeout", + action="store", + dest="timeout", + type=int, + default=300, # 5 minutes per bug 479518 + help="reftest will timeout in specified number of seconds. " + "[default %(default)s].", + ) + + self.add_argument( + "--leak-threshold", + action="store", + type=int, + dest="defaultLeakThreshold", + default=0, + help="fail if the number of bytes leaked in default " + "processes through refcounted objects (or bytes " + "in classes with MOZ_COUNT_CTOR and MOZ_COUNT_DTOR) " + "is greater than the given number", + ) + + self.add_argument( + "--utility-path", + action="store", + type=str, + dest="utilityPath", + default=self.build_obj.bindir if self.build_obj else None, + help="absolute path to directory containing utility " + "programs (xpcshell, ssltunnel, certutil)", + ) + + self.add_argument( + "--total-chunks", + type=int, + dest="totalChunks", + help="how many chunks to split the tests up into", + ) + + self.add_argument( + "--this-chunk", + type=int, + dest="thisChunk", + help="which chunk to run between 1 and --total-chunks", + ) + + self.add_argument( + "--log-file", + action="store", + type=str, + dest="logFile", + default=None, + help="file to log output to in addition to stdout", + ) + + self.add_argument( + "--skip-slow-tests", + dest="skipSlowTests", + action="store_true", + default=False, + help="skip tests marked as slow when running", + ) + + self.add_argument( + "--ignore-window-size", + dest="ignoreWindowSize", + action="store_true", + default=False, + help="ignore the window size, which may cause spurious " + "failures and passes", + ) + + self.add_argument( + "--install-extension", + action="append", + dest="extensionsToInstall", + default=[], + help="install the specified extension in the testing profile. " + "The extension file's name should be <id>.xpi where <id> is " + "the extension's id as indicated in its install.rdf. " + "An optional path can be specified too.", + ) + + self.add_argument( + "--marionette", + default=None, + help="host:port to use when connecting to Marionette", + ) + + self.add_argument( + "--marionette-socket-timeout", default=None, help=argparse.SUPPRESS + ) + + self.add_argument( + "--marionette-startup-timeout", default=None, help=argparse.SUPPRESS + ) + + self.add_argument( + "--setenv", + action="append", + type=str, + default=[], + dest="environment", + metavar="NAME=VALUE", + help="sets the given variable in the application's " "environment", + ) + + self.add_argument( + "--filter", + action="store", + type=str, + dest="filter", + help="specifies a regular expression (as could be passed to the JS " + "RegExp constructor) to test against URLs in the reftest manifest; " + "only test items that have a matching test URL will be run.", + ) + + self.add_argument( + "--shuffle", + action="store_true", + default=False, + dest="shuffle", + help="run reftests in random order", + ) + + self.add_argument( + "--run-until-failure", + action="store_true", + default=False, + dest="runUntilFailure", + help="stop running on the first failure. Useful for RR recordings.", + ) + + self.add_argument( + "--repeat", + action="store", + type=int, + default=0, + dest="repeat", + help="number of times the select test(s) will be executed. Useful for " + "finding intermittent failures.", + ) + + self.add_argument( + "--focus-filter-mode", + action="store", + type=str, + dest="focusFilterMode", + default="all", + help="filters tests to run by whether they require focus. " + "Valid values are `all', `needs-focus', or `non-needs-focus'. " + "Defaults to `all'.", + ) + + self.add_argument( + "--disable-e10s", + action="store_false", + default=True, + dest="e10s", + help="disables content processes", + ) + + self.add_argument( + "--enable-fission", + action="store_true", + default=False, + dest="fission", + help="Run tests with fission (site isolation) enabled.", + ) + + self.add_argument( + "--setpref", + action="append", + type=str, + default=[], + dest="extraPrefs", + metavar="PREF=VALUE", + help="defines an extra user preference", + ) + + self.add_argument( + "--reftest-extension-path", + action="store", + dest="reftestExtensionPath", + help="Path to the reftest extension", + ) + + self.add_argument( + "--special-powers-extension-path", + action="store", + dest="specialPowersExtensionPath", + help="Path to the special powers extension", + ) + + self.add_argument( + "--suite", + choices=["reftest", "crashtest", "jstestbrowser"], + default=None, + help=argparse.SUPPRESS, + ) + + self.add_argument( + "--cleanup-crashes", + action="store_true", + dest="cleanupCrashes", + default=False, + help="Delete pending crash reports before running tests.", + ) + + self.add_argument( + "--max-retries", + type=int, + dest="maxRetries", + default=4, + help="The maximum number of attempts to try and recover from a " + "crash before aborting the test run [default 4].", + ) + + self.add_argument( + "tests", + metavar="TEST_PATH", + nargs="*", + help="Path to test file, manifest file, or directory containing tests", + ) + + self.add_argument( + "--sandbox-read-whitelist", + action="append", + dest="sandboxReadWhitelist", + default=[], + help="Path to add to the sandbox whitelist.", + ) + + self.add_argument( + "--verify", + action="store_true", + default=False, + help="Run tests in verification mode: Run many times in different " + "ways, to see if there are intermittent failures.", + ) + + self.add_argument( + "--verify-max-time", + type=int, + default=3600, + help="Maximum time, in seconds, to run in --verify mode..", + ) + + self.add_argument( + "--enable-webrender", + action="store_true", + dest="enable_webrender", + default=False, + help="Enable the WebRender compositor in Gecko.", + ) + + self.add_argument( + "--headless", + action="store_true", + dest="headless", + default=False, + help="Run tests in headless mode.", + ) + + self.add_argument( + "--topsrcdir", + action="store", + type=str, + dest="topsrcdir", + default=None, + help="Path to source directory", + ) + + mozlog.commandline.add_logging_group(self) + + def get_ip(self): + import moznetwork + + if os.name != "nt": + return moznetwork.get_ip() + else: + self.error("ERROR: you must specify a --remote-webserver=<ip address>\n") + + def set_default_suite(self, options): + manifests = OrderedDict( + [ + ("reftest.list", "reftest"), + ("crashtests.list", "crashtest"), + ("jstests.list", "jstestbrowser"), + ] + ) + + for test_path in options.tests: + file_name = os.path.basename(test_path) + if file_name in manifests: + options.suite = manifests[file_name] + return + + for test_path in options.tests: + for manifest_file, suite in manifests.iteritems(): + if os.path.exists(os.path.join(test_path, manifest_file)): + options.suite = suite + return + + self.error( + "Failed to determine test suite; supply --suite to set this explicitly" + ) + + def validate(self, options, reftest): + if not options.tests: + # Can't just set this in the argument parser because mach will set a default + self.error( + "Must supply at least one path to a manifest file, " + "test directory, or test file to run." + ) + + if options.suite is None: + self.set_default_suite(options) + + if options.totalChunks is not None and options.thisChunk is None: + self.error("thisChunk must be specified when totalChunks is specified") + + if options.totalChunks: + if not 1 <= options.thisChunk <= options.totalChunks: + self.error("thisChunk must be between 1 and totalChunks") + + if options.fission and not options.e10s: + self.error("Fission is not supported without e10s.") + + if options.logFile: + options.logFile = reftest.getFullPath(options.logFile) + + if options.xrePath is not None: + if not os.access(options.xrePath, os.F_OK): + self.error("--xre-path '%s' not found" % options.xrePath) + if not os.path.isdir(options.xrePath): + self.error("--xre-path '%s' is not a directory" % options.xrePath) + options.xrePath = reftest.getFullPath(options.xrePath) + + if options.reftestExtensionPath is None: + if self.build_obj is not None: + reftestExtensionPath = os.path.join( + self.build_obj.distdir, "xpi-stage", "reftest" + ) + else: + reftestExtensionPath = os.path.join(here, "reftest") + options.reftestExtensionPath = os.path.normpath(reftestExtensionPath) + + if options.specialPowersExtensionPath is None and options.suite in [ + "crashtest", + "jstestbrowser", + ]: + if self.build_obj is not None: + specialPowersExtensionPath = os.path.join( + self.build_obj.distdir, "xpi-stage", "specialpowers" + ) + else: + specialPowersExtensionPath = os.path.join(here, "specialpowers") + options.specialPowersExtensionPath = os.path.normpath( + specialPowersExtensionPath + ) + + options.leakThresholds = { + "default": options.defaultLeakThreshold, + "tab": options.defaultLeakThreshold, + } + + if mozinfo.isWin: + if mozinfo.info["bits"] == 32: + # See bug 1408554. + options.leakThresholds["tab"] = 3000 + else: + # See bug 1404482. + options.leakThresholds["tab"] = 100 + + if options.topsrcdir is None: + if self.build_obj: + options.topsrcdir = self.build_obj.topsrcdir + else: + options.topsrcdir = os.getcwd() + + +class DesktopArgumentsParser(ReftestArgumentsParser): + def __init__(self, **kwargs): + super(DesktopArgumentsParser, self).__init__(**kwargs) + + self.add_argument( + "--run-tests-in-parallel", + action="store_true", + default=False, + dest="runTestsInParallel", + help="run tests in parallel if possible", + ) + + def _prefs_gpu(self): + if mozinfo.os != "win": + return ["layers.acceleration.force-enabled=true"] + return [] + + def validate(self, options, reftest): + super(DesktopArgumentsParser, self).validate(options, reftest) + + if options.runTestsInParallel: + if options.logFile is not None: + self.error("cannot specify logfile with parallel tests") + if options.totalChunks is not None or options.thisChunk is not None: + self.error( + "cannot specify thisChunk or totalChunks with parallel tests" + ) + if options.focusFilterMode != "all": + self.error("cannot specify focusFilterMode with parallel tests") + if options.debugger is not None: + self.error("cannot specify a debugger with parallel tests") + + if options.debugger: + # valgrind and some debuggers may cause Gecko to start slowly. Make sure + # marionette waits long enough to connect. + options.marionette_startup_timeout = 900 + options.marionette_socket_timeout = 540 + + if not options.tests: + self.error("No test files specified.") + + if options.app is None: + if ( + self.build_obj + and self.build_obj.substs["MOZ_BUILD_APP"] != "mobile/android" + ): + from mozbuild.base import BinaryNotFoundException + + try: + bin_dir = self.build_obj.get_binary_path() + except BinaryNotFoundException as e: + print("{}\n\n{}\n".format(e, e.help()), file=sys.stderr) + sys.exit(1) + else: + bin_dir = None + + if bin_dir: + options.app = bin_dir + + if options.symbolsPath and len(urlparse(options.symbolsPath).scheme) < 2: + options.symbolsPath = reftest.getFullPath(options.symbolsPath) + + options.utilityPath = reftest.getFullPath(options.utilityPath) + + +class RemoteArgumentsParser(ReftestArgumentsParser): + def __init__(self, **kwargs): + super(RemoteArgumentsParser, self).__init__() + + # app, xrePath and utilityPath variables are set in main function + self.set_defaults( + logFile="reftest.log", app="", xrePath="", utilityPath="", localLogName=None + ) + + self.add_argument( + "--adbpath", + action="store", + type=str, + dest="adb_path", + default=None, + help="Path to adb binary.", + ) + + self.add_argument( + "--deviceSerial", + action="store", + type=str, + dest="deviceSerial", + help="adb serial number of remote device. This is required " + "when more than one device is connected to the host. " + "Use 'adb devices' to see connected devices.", + ) + + self.add_argument( + "--remote-webserver", + action="store", + type=str, + dest="remoteWebServer", + help="IP address of the remote web server.", + ) + + self.add_argument( + "--http-port", + action="store", + type=str, + dest="httpPort", + help="http port of the remote web server.", + ) + + self.add_argument( + "--ssl-port", + action="store", + type=str, + dest="sslPort", + help="ssl port of the remote web server.", + ) + + self.add_argument( + "--remoteTestRoot", + action="store", + type=str, + dest="remoteTestRoot", + help="Remote directory to use as test root " + "(eg. /data/local/tmp/test_root).", + ) + + self.add_argument( + "--httpd-path", + action="store", + type=str, + dest="httpdPath", + help="Path to the httpd.js file.", + ) + + self.add_argument( + "--no-device-info", + action="store_false", + dest="printDeviceInfo", + default=True, + help="Do not display verbose diagnostics about the remote device.", + ) + + self.add_argument( + "--no-install", + action="store_true", + default=False, + help="Skip the installation of the APK.", + ) + + def validate_remote(self, options): + DEFAULT_HTTP_PORT = 8888 + DEFAULT_SSL_PORT = 4443 + + if options.remoteWebServer is None: + options.remoteWebServer = self.get_ip() + + if options.remoteWebServer == "127.0.0.1": + self.error( + "ERROR: Either you specified the loopback for the remote webserver or ", + "your local IP cannot be detected. " + "Please provide the local ip in --remote-webserver", + ) + + if not options.httpPort: + options.httpPort = DEFAULT_HTTP_PORT + + if not options.sslPort: + options.sslPort = DEFAULT_SSL_PORT + + if options.xrePath is None: + self.error( + "ERROR: You must specify the path to the controller xre directory" + ) + else: + # Ensure xrepath is a full path + options.xrePath = os.path.abspath(options.xrePath) + + # httpd-path is specified by standard makefile targets and may be specified + # on the command line to select a particular version of httpd.js. If not + # specified, try to select the one from hostutils.zip, as required in + # bug 882932. + if not options.httpdPath: + options.httpdPath = os.path.join(options.utilityPath, "components") + + return options diff --git a/layout/tools/reftest/remotereftest.py b/layout/tools/reftest/remotereftest.py new file mode 100644 index 0000000000..ac2f0bbc2c --- /dev/null +++ b/layout/tools/reftest/remotereftest.py @@ -0,0 +1,581 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import datetime +import os +import posixpath +import shutil +import signal +import subprocess +import sys +import tempfile +import time +import traceback +from contextlib import closing + +from six.moves.urllib_request import urlopen + +from mozdevice import ADBDeviceFactory, ADBTimeoutError, RemoteProcessMonitor +import mozcrash + +from output import OutputHandler +from runreftest import RefTest, ReftestResolver, build_obj +import reftestcommandline + +# We need to know our current directory so that we can serve our test files from it. +SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__))) + + +class RemoteReftestResolver(ReftestResolver): + def absManifestPath(self, path): + script_abs_path = os.path.join(SCRIPT_DIRECTORY, path) + if os.path.exists(script_abs_path): + rv = script_abs_path + elif os.path.exists(os.path.abspath(path)): + rv = os.path.abspath(path) + else: + print("Could not find manifest %s" % script_abs_path, file=sys.stderr) + sys.exit(1) + return os.path.normpath(rv) + + def manifestURL(self, options, path): + # Dynamically build the reftest URL if possible, beware that + # args[0] should exist 'inside' webroot. It's possible for + # this url to have a leading "..", but reftest.js will fix + # that. Use the httpdPath to determine if we are running in + # production or locally. If we are running the jsreftests + # locally, strip text up to jsreftest. We want the docroot of + # the server to include a link jsreftest that points to the + # test-stage location of the test files. The desktop oriented + # setup has already created a link for tests which points + # directly into the source tree. For the remote tests we need + # a separate symbolic link to point to the staged test files. + if "jsreftest" not in path or os.environ.get("MOZ_AUTOMATION"): + relPath = os.path.relpath(path, SCRIPT_DIRECTORY) + else: + relPath = "jsreftest/" + path.split("jsreftest/")[-1] + return "http://%s:%s/%s" % (options.remoteWebServer, options.httpPort, relPath) + + +class ReftestServer: + """Web server used to serve Reftests, for closer fidelity to the real web. + It is virtually identical to the server used in mochitest and will only + be used for running reftests remotely. + Bug 581257 has been filed to refactor this wrapper around httpd.js into + it's own class and use it in both remote and non-remote testing.""" + + def __init__(self, options, scriptDir, log): + self.log = log + self.utilityPath = options.utilityPath + self.xrePath = options.xrePath + self.profileDir = options.serverProfilePath + self.webServer = options.remoteWebServer + self.httpPort = options.httpPort + self.scriptDir = scriptDir + self.httpdPath = os.path.abspath(options.httpdPath) + if options.remoteWebServer == "10.0.2.2": + # probably running an Android emulator and 10.0.2.2 will + # not be visible from host + shutdownServer = "127.0.0.1" + else: + shutdownServer = self.webServer + self.shutdownURL = "http://%(server)s:%(port)s/server/shutdown" % { + "server": shutdownServer, + "port": self.httpPort, + } + + def start(self): + "Run the Refest server, returning the process ID of the server." + + env = dict(os.environ) + env["XPCOM_DEBUG_BREAK"] = "warn" + bin_suffix = "" + if sys.platform in ("win32", "msys", "cygwin"): + env["PATH"] = env["PATH"] + ";" + self.xrePath + bin_suffix = ".exe" + else: + if "LD_LIBRARY_PATH" not in env or env["LD_LIBRARY_PATH"] is None: + env["LD_LIBRARY_PATH"] = self.xrePath + else: + env["LD_LIBRARY_PATH"] = ":".join( + [self.xrePath, env["LD_LIBRARY_PATH"]] + ) + + args = [ + "-g", + self.xrePath, + "-f", + os.path.join(self.httpdPath, "httpd.js"), + "-e", + "const _PROFILE_PATH = '%(profile)s';const _SERVER_PORT = " + "'%(port)s'; const _SERVER_ADDR ='%(server)s';" + % { + "profile": self.profileDir.replace("\\", "\\\\"), + "port": self.httpPort, + "server": self.webServer, + }, + "-f", + os.path.join(self.scriptDir, "server.js"), + ] + + xpcshell = os.path.join(self.utilityPath, "xpcshell" + bin_suffix) + + if not os.access(xpcshell, os.F_OK): + raise Exception("xpcshell not found at %s" % xpcshell) + if RemoteProcessMonitor.elf_arm(xpcshell): + raise Exception( + "xpcshell at %s is an ARM binary; please use " + "the --utility-path argument to specify the path " + "to a desktop version." % xpcshell + ) + + self._process = subprocess.Popen([xpcshell] + args, env=env) + pid = self._process.pid + if pid < 0: + self.log.error( + "TEST-UNEXPECTED-FAIL | remotereftests.py | Error starting server." + ) + return 2 + self.log.info("INFO | remotereftests.py | Server pid: %d" % pid) + + def ensureReady(self, timeout): + assert timeout >= 0 + + aliveFile = os.path.join(self.profileDir, "server_alive.txt") + i = 0 + while i < timeout: + if os.path.exists(aliveFile): + break + time.sleep(1) + i += 1 + else: + self.log.error( + "TEST-UNEXPECTED-FAIL | remotereftests.py | " + "Timed out while waiting for server startup." + ) + self.stop() + return 1 + + def stop(self): + if hasattr(self, "_process"): + try: + with closing(urlopen(self.shutdownURL)) as c: + c.read() + + rtncode = self._process.poll() + if rtncode is None: + self._process.terminate() + except Exception: + self.log.info("Failed to shutdown server at %s" % self.shutdownURL) + traceback.print_exc() + self._process.kill() + + +class RemoteReftest(RefTest): + use_marionette = False + resolver_cls = RemoteReftestResolver + + def __init__(self, options, scriptDir): + RefTest.__init__(self, options.suite) + self.run_by_manifest = False + self.scriptDir = scriptDir + self.localLogName = options.localLogName + + verbose = False + if ( + options.log_mach_verbose + or options.log_tbpl_level == "debug" + or options.log_mach_level == "debug" + or options.log_raw_level == "debug" + ): + verbose = True + print("set verbose!") + expected = options.app.split("/")[-1] + self.device = ADBDeviceFactory( + adb=options.adb_path or "adb", + device=options.deviceSerial, + test_root=options.remoteTestRoot, + verbose=verbose, + run_as_package=expected, + ) + if options.remoteTestRoot is None: + options.remoteTestRoot = posixpath.join(self.device.test_root, "reftest") + options.remoteProfile = posixpath.join(options.remoteTestRoot, "profile") + options.remoteLogFile = posixpath.join(options.remoteTestRoot, "reftest.log") + options.logFile = options.remoteLogFile + self.remoteProfile = options.remoteProfile + self.remoteTestRoot = options.remoteTestRoot + + if not options.ignoreWindowSize: + parts = self.device.get_info("screen")["screen"][0].split() + width = int(parts[0].split(":")[1]) + height = int(parts[1].split(":")[1]) + if width < 1366 or height < 1050: + self.error( + "ERROR: Invalid screen resolution %sx%s, " + "please adjust to 1366x1050 or higher" % (width, height) + ) + + self._populate_logger(options) + self.outputHandler = OutputHandler( + self.log, options.utilityPath, options.symbolsPath + ) + + self.SERVER_STARTUP_TIMEOUT = 90 + + self.remoteCache = os.path.join(options.remoteTestRoot, "cache/") + + # Check that Firefox is installed + expected = options.app.split("/")[-1] + if not self.device.is_app_installed(expected): + raise Exception("%s is not installed on this device" % expected) + self.device.run_as_package = expected + self.device.clear_logcat() + + self.device.rm(self.remoteCache, force=True, recursive=True) + + procName = options.app.split("/")[-1] + self.device.stop_application(procName) + if self.device.process_exist(procName): + self.log.error("unable to kill %s before starting tests!" % procName) + + def findPath(self, paths, filename=None): + for path in paths: + p = path + if filename: + p = os.path.join(p, filename) + if os.path.exists(self.getFullPath(p)): + return path + return None + + def startWebServer(self, options): + """ Create the webserver on the host and start it up """ + remoteXrePath = options.xrePath + remoteUtilityPath = options.utilityPath + + paths = [options.xrePath] + if build_obj: + paths.append(os.path.join(build_obj.topobjdir, "dist", "bin")) + options.xrePath = self.findPath(paths) + if options.xrePath is None: + print( + "ERROR: unable to find xulrunner path for %s, " + "please specify with --xre-path" % (os.name) + ) + return 1 + paths.append("bin") + paths.append(os.path.join("..", "bin")) + + xpcshell = "xpcshell" + if os.name == "nt": + xpcshell += ".exe" + + if options.utilityPath: + paths.insert(0, options.utilityPath) + options.utilityPath = self.findPath(paths, xpcshell) + if options.utilityPath is None: + print( + "ERROR: unable to find utility path for %s, " + "please specify with --utility-path" % (os.name) + ) + return 1 + + options.serverProfilePath = tempfile.mkdtemp() + self.server = ReftestServer(options, self.scriptDir, self.log) + retVal = self.server.start() + if retVal: + return retVal + retVal = self.server.ensureReady(self.SERVER_STARTUP_TIMEOUT) + if retVal: + return retVal + + options.xrePath = remoteXrePath + options.utilityPath = remoteUtilityPath + return 0 + + def stopWebServer(self, options): + self.server.stop() + + def killNamedProc(self, pname, orphans=True): + """ Kill processes matching the given command name """ + try: + import psutil + except ImportError as e: + self.log.warning("Unable to import psutil: %s" % str(e)) + self.log.warning("Unable to verify that %s is not already running." % pname) + return + + self.log.info("Checking for %s processes..." % pname) + + for proc in psutil.process_iter(): + try: + if proc.name() == pname: + procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"]) + if proc.ppid() == 1 or not orphans: + self.log.info("killing %s" % procd) + try: + os.kill( + proc.pid, getattr(signal, "SIGKILL", signal.SIGTERM) + ) + except Exception as e: + self.log.info( + "Failed to kill process %d: %s" % (proc.pid, str(e)) + ) + else: + self.log.info("NOT killing %s (not an orphan?)" % procd) + except Exception: + # may not be able to access process info for all processes + continue + + def createReftestProfile(self, options, **kwargs): + profile = RefTest.createReftestProfile( + self, + options, + server=options.remoteWebServer, + port=options.httpPort, + **kwargs + ) + profileDir = profile.profile + prefs = {} + prefs["app.update.url.android"] = "" + prefs["reftest.remote"] = True + prefs["datareporting.policy.dataSubmissionPolicyBypassAcceptance"] = True + # move necko cache to a location that can be cleaned up + prefs["browser.cache.disk.parent_directory"] = self.remoteCache + + prefs["layout.css.devPixelsPerPx"] = "1.0" + # Because Fennec is a little wacky (see bug 1156817) we need to load the + # reftest pages at 1.0 zoom, rather than zooming to fit the CSS viewport. + prefs["apz.allow_zooming"] = False + + # Set the extra prefs. + profile.set_preferences(prefs) + + try: + self.device.push(profileDir, options.remoteProfile) + # make sure the parent directories of the profile which + # may have been created by the push, also have their + # permissions set to allow access. + self.device.chmod(options.remoteTestRoot, recursive=True) + except Exception: + print("Automation Error: Failed to copy profiledir to device") + raise + + return profile + + def printDeviceInfo(self, printLogcat=False): + try: + if printLogcat: + logcat = self.device.get_logcat() + for l in logcat: + ul = l.decode("utf-8", errors="replace") + sl = ul.encode("iso8859-1", errors="replace") + print("%s\n" % sl) + print("Device info:") + devinfo = self.device.get_info() + for category in devinfo: + if type(devinfo[category]) is list: + print(" %s:" % category) + for item in devinfo[category]: + print(" %s" % item) + else: + print(" %s: %s" % (category, devinfo[category])) + print("Test root: %s" % self.device.test_root) + except ADBTimeoutError: + raise + except Exception as e: + print("WARNING: Error getting device information: %s" % str(e)) + + def environment(self, env=None, crashreporter=True, **kwargs): + # Since running remote, do not mimic the local env: do not copy os.environ + if env is None: + env = {} + + if crashreporter: + env["MOZ_CRASHREPORTER_NO_REPORT"] = "1" + env["MOZ_CRASHREPORTER"] = "1" + env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1" + else: + env["MOZ_CRASHREPORTER_DISABLE"] = "1" + + # Crash on non-local network connections by default. + # MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily + # enable non-local connections for the purposes of local testing. + # Don't override the user's choice here. See bug 1049688. + env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1") + + # Send an env var noting that we are in automation. Passing any + # value except the empty string will declare the value to exist. + # + # This may be used to disabled network connections during testing, e.g. + # Switchboard & telemetry uploads. + env.setdefault("MOZ_IN_AUTOMATION", "1") + + # Set WebRTC logging in case it is not set yet. + env.setdefault("R_LOG_LEVEL", "6") + env.setdefault("R_LOG_DESTINATION", "stderr") + env.setdefault("R_LOG_VERBOSE", "1") + + return env + + def buildBrowserEnv(self, options, profileDir): + browserEnv = RefTest.buildBrowserEnv(self, options, profileDir) + # remove desktop environment not used on device + if "XPCOM_MEM_BLOAT_LOG" in browserEnv: + del browserEnv["XPCOM_MEM_BLOAT_LOG"] + return browserEnv + + def runApp( + self, + options, + cmdargs=None, + timeout=None, + debuggerInfo=None, + symbolsPath=None, + valgrindPath=None, + valgrindArgs=None, + valgrindSuppFiles=None, + **profileArgs + ): + if cmdargs is None: + cmdargs = [] + + if self.use_marionette: + cmdargs.append("-marionette") + + binary = options.app + profile = self.createReftestProfile(options, **profileArgs) + + # browser environment + env = self.buildBrowserEnv(options, profile.profile) + + self.log.info("Running with e10s: {}".format(options.e10s)) + self.log.info("Running with fission: {}".format(options.fission)) + + rpm = RemoteProcessMonitor( + binary, + self.device, + self.log, + self.outputHandler, + options.remoteLogFile, + self.remoteProfile, + ) + startTime = datetime.datetime.now() + status = 0 + profileDirectory = self.remoteProfile + "/" + cmdargs.extend(("-no-remote", "-profile", profileDirectory)) + + pid = rpm.launch( + binary, + debuggerInfo, + None, + cmdargs, + env=env, + e10s=options.e10s, + ) + self.log.info("remotereftest.py | Application pid: %d" % pid) + if not rpm.wait(timeout): + status = 1 + self.log.info( + "remotereftest.py | Application ran for: %s" + % str(datetime.datetime.now() - startTime) + ) + crashed = self.check_for_crashes(symbolsPath, rpm.last_test_seen) + if crashed: + status = 1 + + self.cleanup(profile.profile) + return status + + def check_for_crashes(self, symbols_path, last_test_seen): + """ + Pull any minidumps from remote profile and log any associated crashes. + """ + try: + dump_dir = tempfile.mkdtemp() + remote_crash_dir = posixpath.join(self.remoteProfile, "minidumps") + if not self.device.is_dir(remote_crash_dir): + return False + self.device.pull(remote_crash_dir, dump_dir) + crashed = mozcrash.log_crashes( + self.log, dump_dir, symbols_path, test=last_test_seen + ) + finally: + try: + shutil.rmtree(dump_dir) + except Exception as e: + self.log.warning( + "unable to remove directory %s: %s" % (dump_dir, str(e)) + ) + return crashed + + def cleanup(self, profileDir): + self.device.rm(self.remoteTestRoot, force=True, recursive=True) + self.device.rm(self.remoteProfile, force=True, recursive=True) + self.device.rm(self.remoteCache, force=True, recursive=True) + RefTest.cleanup(self, profileDir) + + +def run_test_harness(parser, options): + reftest = RemoteReftest(options, SCRIPT_DIRECTORY) + parser.validate_remote(options) + parser.validate(options, reftest) + + # Hack in a symbolic link for jsreftest in the SCRIPT_DIRECTORY + # which is the document root for the reftest web server. This + # allows a separate redirection for the jsreftests which must + # run through the web server using the staged tests files and + # the desktop which will use the tests symbolic link to find + # the JavaScript tests. + jsreftest_target = str(os.path.join(SCRIPT_DIRECTORY, "jsreftest")) + if os.environ.get("MOZ_AUTOMATION"): + os.system("ln -s ../jsreftest " + jsreftest_target) + else: + jsreftest_source = os.path.join( + build_obj.topobjdir, "dist", "test-stage", "jsreftest" + ) + if not os.path.islink(jsreftest_target): + os.symlink(jsreftest_source, jsreftest_target) + + # Despite our efforts to clean up servers started by this script, in practice + # we still see infrequent cases where a process is orphaned and interferes + # with future tests, typically because the old server is keeping the port in use. + # Try to avoid those failures by checking for and killing servers before + # trying to start new ones. + reftest.killNamedProc("ssltunnel") + reftest.killNamedProc("xpcshell") + + # Start the webserver + retVal = reftest.startWebServer(options) + if retVal: + return retVal + + if options.printDeviceInfo and not options.verify: + reftest.printDeviceInfo() + + retVal = 0 + try: + if options.verify: + retVal = reftest.verifyTests(options.tests, options) + else: + retVal = reftest.runTests(options.tests, options) + except Exception: + print("Automation Error: Exception caught while running tests") + traceback.print_exc() + retVal = 1 + + reftest.stopWebServer(options) + + if options.printDeviceInfo and not options.verify: + reftest.printDeviceInfo(printLogcat=(retVal != 0)) + + return retVal + + +if __name__ == "__main__": + parser = reftestcommandline.RemoteArgumentsParser() + options = parser.parse_args() + sys.exit(run_test_harness(parser, options)) diff --git a/layout/tools/reftest/runreftest.py b/layout/tools/reftest/runreftest.py new file mode 100644 index 0000000000..5c97cffecb --- /dev/null +++ b/layout/tools/reftest/runreftest.py @@ -0,0 +1,1158 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +""" +Runs the reftest test harness. +""" +from __future__ import print_function + +from __future__ import absolute_import, print_function + +import copy +import json +import multiprocessing +import os +import platform +import posixpath +import re +import shutil +import signal +import subprocess +import sys +import tempfile +import threading +from collections import defaultdict +from datetime import datetime, timedelta + +SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__))) +if SCRIPT_DIRECTORY not in sys.path: + sys.path.insert(0, SCRIPT_DIRECTORY) + +import mozcrash +import mozdebug +import mozfile +import mozinfo +import mozleak +import mozlog +import mozprocess +import mozprofile +import mozrunner +from manifestparser import TestManifest, filters as mpf +from mozrunner.utils import get_stack_fixer_function, test_environment +from mozscreenshot import printstatus, dump_screen +from six import reraise, string_types +from six.moves import range + +try: + from marionette_driver.addons import Addons + from marionette_harness import Marionette +except ImportError as e: # noqa + # Defer ImportError until attempt to use Marionette. + # Python 3 deletes the exception once the except block + # is exited. Save a version to raise later. + e_save = ImportError(str(e)) + + def reraise_(*args, **kwargs): + raise (e_save) # noqa + + Marionette = reraise_ + +from output import OutputHandler, ReftestFormatter +import reftestcommandline + +here = os.path.abspath(os.path.dirname(__file__)) + +try: + from mozbuild.base import MozbuildObject + + build_obj = MozbuildObject.from_environment(cwd=here) +except ImportError: + build_obj = None + + +def categoriesToRegex(categoryList): + return "\\(" + ", ".join(["(?P<%s>\\d+) %s" % c for c in categoryList]) + "\\)" + + +summaryLines = [ + ("Successful", [("pass", "pass"), ("loadOnly", "load only")]), + ( + "Unexpected", + [ + ("fail", "unexpected fail"), + ("pass", "unexpected pass"), + ("asserts", "unexpected asserts"), + ("fixedAsserts", "unexpected fixed asserts"), + ("failedLoad", "failed load"), + ("exception", "exception"), + ], + ), + ( + "Known problems", + [ + ("knownFail", "known fail"), + ("knownAsserts", "known asserts"), + ("random", "random"), + ("skipped", "skipped"), + ("slow", "slow"), + ], + ), +] + + +if sys.version_info[0] == 3: + + def reraise_(tp_, value_, tb_=None): + if value_ is None: + value_ = tp_() + if value_.__traceback__ is not tb_: + raise value_.with_traceback(tb_) + raise value_ + + +else: + exec("def reraise_(tp_, value_, tb_=None):\n raise tp_, value_, tb_\n") + + +def update_mozinfo(): + """walk up directories to find mozinfo.json update the info""" + # TODO: This should go in a more generic place, e.g. mozinfo + + path = SCRIPT_DIRECTORY + dirs = set() + while path != os.path.expanduser("~"): + if path in dirs: + break + dirs.add(path) + path = os.path.split(path)[0] + mozinfo.find_and_update_from_json(*dirs) + + +# Python's print is not threadsafe. +printLock = threading.Lock() + + +class ReftestThread(threading.Thread): + def __init__(self, cmdargs): + threading.Thread.__init__(self) + self.cmdargs = cmdargs + self.summaryMatches = {} + self.retcode = -1 + for text, _ in summaryLines: + self.summaryMatches[text] = None + + def run(self): + with printLock: + print("Starting thread with", self.cmdargs) + sys.stdout.flush() + process = subprocess.Popen(self.cmdargs, stdout=subprocess.PIPE) + for chunk in self.chunkForMergedOutput(process.stdout): + with printLock: + print(chunk, end=" ") + sys.stdout.flush() + self.retcode = process.wait() + + def chunkForMergedOutput(self, logsource): + """Gather lines together that should be printed as one atomic unit. + Individual test results--anything between 'REFTEST TEST-START' and + 'REFTEST TEST-END' lines--are an atomic unit. Lines with data from + summaries are parsed and the data stored for later aggregation. + Other lines are considered their own atomic units and are permitted + to intermix freely.""" + testStartRegex = re.compile("^REFTEST TEST-START") + testEndRegex = re.compile("^REFTEST TEST-END") + summaryHeadRegex = re.compile("^REFTEST INFO \\| Result summary:") + summaryRegexFormatString = ( + "^REFTEST INFO \\| (?P<message>{text}): (?P<total>\\d+) {regex}" + ) + summaryRegexStrings = [ + summaryRegexFormatString.format( + text=text, regex=categoriesToRegex(categories) + ) + for (text, categories) in summaryLines + ] + summaryRegexes = [re.compile(regex) for regex in summaryRegexStrings] + + for line in logsource: + if testStartRegex.search(line) is not None: + chunkedLines = [line] + for lineToBeChunked in logsource: + chunkedLines.append(lineToBeChunked) + if testEndRegex.search(lineToBeChunked) is not None: + break + yield "".join(chunkedLines) + continue + + haveSuppressedSummaryLine = False + for regex in summaryRegexes: + match = regex.search(line) + if match is not None: + self.summaryMatches[match.group("message")] = match + haveSuppressedSummaryLine = True + break + if haveSuppressedSummaryLine: + continue + + if summaryHeadRegex.search(line) is None: + yield line + + +class ReftestResolver(object): + def defaultManifest(self, suite): + return { + "reftest": "reftest.list", + "crashtest": "crashtests.list", + "jstestbrowser": "jstests.list", + }[suite] + + def directoryManifest(self, suite, path): + return os.path.join(path, self.defaultManifest(suite)) + + def findManifest(self, suite, test_file, subdirs=True): + """Return a tuple of (manifest-path, filter-string) for running test_file. + + test_file is a path to a test or a manifest file + """ + rv = [] + default_manifest = self.defaultManifest(suite) + if not os.path.isabs(test_file): + test_file = self.absManifestPath(test_file) + + if os.path.isdir(test_file): + for dirpath, dirnames, filenames in os.walk(test_file): + if default_manifest in filenames: + rv.append((os.path.join(dirpath, default_manifest), None)) + # We keep recursing into subdirectories which means that in the case + # of include directives we get the same manifest multiple times. + # However reftest.js will only read each manifest once + + elif test_file.endswith(".list"): + if os.path.exists(test_file): + rv = [(test_file, None)] + else: + dirname, pathname = os.path.split(test_file) + found = True + while not os.path.exists(os.path.join(dirname, default_manifest)): + dirname, suffix = os.path.split(dirname) + pathname = posixpath.join(suffix, pathname) + if os.path.dirname(dirname) == dirname: + found = False + break + if found: + rv = [ + ( + os.path.join(dirname, default_manifest), + r".*(?:/|\\)%s(?:[#?].*)?$" % pathname.replace("?", "\?"), + ) + ] + + return rv + + def absManifestPath(self, path): + return os.path.normpath(os.path.abspath(path)) + + def manifestURL(self, options, path): + return "file://%s" % path + + def resolveManifests(self, options, tests): + suite = options.suite + manifests = {} + for testPath in tests: + for manifest, filter_str in self.findManifest(suite, testPath): + if manifest not in manifests: + manifests[manifest] = set() + manifests[manifest].add(filter_str) + manifests_by_url = {} + for key in manifests.keys(): + id = os.path.relpath( + os.path.abspath(os.path.dirname(key)), options.topsrcdir + ) + id = id.replace(os.sep, posixpath.sep) + if None in manifests[key]: + manifests[key] = (None, id) + else: + manifests[key] = ("|".join(list(manifests[key])), id) + url = self.manifestURL(options, key) + manifests_by_url[url] = manifests[key] + return manifests_by_url + + +class RefTest(object): + oldcwd = os.getcwd() + resolver_cls = ReftestResolver + use_marionette = True + + def __init__(self, suite): + update_mozinfo() + self.lastTestSeen = None + self.lastTest = None + self.haveDumpedScreen = False + self.resolver = self.resolver_cls() + self.log = None + self.outputHandler = None + self.testDumpFile = os.path.join(tempfile.gettempdir(), "reftests.json") + + self.run_by_manifest = True + if suite in ("crashtest", "jstestbrowser"): + self.run_by_manifest = False + + def _populate_logger(self, options): + if self.log: + return + + self.log = getattr(options, "log", None) + if self.log: + return + + mozlog.commandline.log_formatters["tbpl"] = ( + ReftestFormatter, + "Reftest specific formatter for the" + "benefit of legacy log parsers and" + "tools such as the reftest analyzer", + ) + fmt_options = {} + if not options.log_tbpl_level and os.environ.get("MOZ_REFTEST_VERBOSE"): + options.log_tbpl_level = fmt_options["level"] = "debug" + self.log = mozlog.commandline.setup_logging( + "reftest harness", options, {"tbpl": sys.stdout}, fmt_options + ) + + def getFullPath(self, path): + "Get an absolute path relative to self.oldcwd." + return os.path.normpath(os.path.join(self.oldcwd, os.path.expanduser(path))) + + def createReftestProfile( + self, + options, + tests=None, + manifests=None, + server="localhost", + port=0, + profile_to_clone=None, + prefs=None, + ): + """Sets up a profile for reftest. + + :param options: Object containing command line options + :param tests: List of test objects to run + :param manifests: List of manifest files to parse (only takes effect + if tests were not passed in) + :param server: Server name to use for http tests + :param profile_to_clone: Path to a profile to use as the basis for the + test profile + :param prefs: Extra preferences to set in the profile + """ + locations = mozprofile.permissions.ServerLocations() + locations.add_host(server, scheme="http", port=port) + locations.add_host(server, scheme="https", port=port) + + sandbox_whitelist_paths = options.sandboxReadWhitelist + if platform.system() == "Linux" or platform.system() in ( + "Windows", + "Microsoft", + ): + # Trailing slashes are needed to indicate directories on Linux and Windows + sandbox_whitelist_paths = map( + lambda p: os.path.join(p, ""), sandbox_whitelist_paths + ) + + addons = [] + if not self.use_marionette: + addons.append(options.reftestExtensionPath) + + if options.specialPowersExtensionPath is not None: + if not self.use_marionette: + addons.append(options.specialPowersExtensionPath) + + # Install distributed extensions, if application has any. + distExtDir = os.path.join( + options.app[: options.app.rfind(os.sep)], "distribution", "extensions" + ) + if os.path.isdir(distExtDir): + for f in os.listdir(distExtDir): + addons.append(os.path.join(distExtDir, f)) + + # Install custom extensions. + for f in options.extensionsToInstall: + addons.append(self.getFullPath(f)) + + kwargs = { + "addons": addons, + "locations": locations, + "whitelistpaths": sandbox_whitelist_paths, + } + if profile_to_clone: + profile = mozprofile.Profile.clone(profile_to_clone, **kwargs) + else: + profile = mozprofile.Profile(**kwargs) + + # First set prefs from the base profiles under testing/profiles. + + # In test packages used in CI, the profile_data directory is installed + # in the SCRIPT_DIRECTORY. + profile_data_dir = os.path.join(SCRIPT_DIRECTORY, "profile_data") + # If possible, read profile data from topsrcdir. This prevents us from + # requiring a re-build to pick up newly added extensions in the + # <profile>/extensions directory. + if build_obj: + path = os.path.join(build_obj.topsrcdir, "testing", "profiles") + if os.path.isdir(path): + profile_data_dir = path + # Still not found? Look for testing/profiles relative to layout/tools/reftest. + if not os.path.isdir(profile_data_dir): + path = os.path.abspath( + os.path.join(SCRIPT_DIRECTORY, "..", "..", "..", "testing", "profiles") + ) + if os.path.isdir(path): + profile_data_dir = path + + with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh: + base_profiles = json.load(fh)["reftest"] + + for name in base_profiles: + path = os.path.join(profile_data_dir, name) + profile.merge(path) + + # Second set preferences for communication between our command line + # arguments and the reftest harness. Preferences that are required for + # reftest to work should instead be set under srcdir/testing/profiles. + prefs = prefs or {} + prefs["reftest.timeout"] = options.timeout * 1000 + if options.logFile: + prefs["reftest.logFile"] = options.logFile + if options.ignoreWindowSize: + prefs["reftest.ignoreWindowSize"] = True + if options.shuffle: + prefs["reftest.shuffle"] = True + if options.repeat: + prefs["reftest.repeat"] = options.repeat + if options.runUntilFailure: + prefs["reftest.runUntilFailure"] = True + if not options.repeat: + prefs["reftest.repeat"] = 30 + if options.verify: + prefs["reftest.verify"] = True + if options.cleanupCrashes: + prefs["reftest.cleanupPendingCrashes"] = True + prefs["reftest.focusFilterMode"] = options.focusFilterMode + prefs["reftest.logLevel"] = options.log_tbpl_level or "info" + prefs["reftest.suite"] = options.suite + prefs["gfx.font_rendering.ahem_antialias_none"] = True + # Run the "deferred" font-loader immediately, because if it finishes + # mid-test, the extra reflow that is triggered can disrupt the test. + prefs["gfx.font_loader.delay"] = 0 + prefs["gfx.font_loader.interval"] = 0 + # Disable dark scrollbars because it's semi-transparent. + prefs["widget.disable-dark-scrollbar"] = True + prefs["reftest.isCoverageBuild"] = mozinfo.info.get("ccov", False) + + # Set tests to run or manifests to parse. + if tests: + testlist = os.path.join(profile.profile, "reftests.json") + with open(testlist, "w") as fh: + json.dump(tests, fh) + prefs["reftest.tests"] = testlist + elif manifests: + prefs["reftest.manifests"] = json.dumps(manifests) + + # Unconditionally update the e10s pref. + if options.e10s: + prefs["browser.tabs.remote.autostart"] = True + else: + prefs["browser.tabs.remote.autostart"] = False + + if options.fission: + prefs["fission.autostart"] = True + else: + prefs["fission.autostart"] = False + + if not self.run_by_manifest: + if options.totalChunks: + prefs["reftest.totalChunks"] = options.totalChunks + if options.thisChunk: + prefs["reftest.thisChunk"] = options.thisChunk + + # Bug 1262954: For winXP + e10s disable acceleration + if ( + platform.system() in ("Windows", "Microsoft") + and "5.1" in platform.version() + and options.e10s + ): + prefs["layers.acceleration.disabled"] = True + + # Bug 1300355: Disable canvas cache for win7 as it uses + # too much memory and causes OOMs. + if ( + platform.system() in ("Windows", "Microsoft") + and "6.1" in platform.version() + ): + prefs["reftest.nocache"] = True + + if options.marionette: + # options.marionette can specify host:port + port = options.marionette.split(":")[1] + prefs["marionette.port"] = int(port) + + # Enable tracing output for detailed failures in case of + # failing connection attempts, and hangs (bug 1397201) + prefs["marionette.log.level"] = "Trace" + + # Third, set preferences passed in via the command line. + for v in options.extraPrefs: + thispref = v.split("=") + if len(thispref) < 2: + print("Error: syntax error in --setpref=" + v) + sys.exit(1) + prefs[thispref[0]] = thispref[1].strip() + + for pref in prefs: + prefs[pref] = mozprofile.Preferences.cast(prefs[pref]) + profile.set_preferences(prefs) + + if os.path.join(here, "chrome") not in options.extraProfileFiles: + options.extraProfileFiles.append(os.path.join(here, "chrome")) + + self.copyExtraFilesToProfile(options, profile) + return profile + + def environment(self, **kwargs): + kwargs["log"] = self.log + return test_environment(**kwargs) + + def buildBrowserEnv(self, options, profileDir): + browserEnv = self.environment( + xrePath=options.xrePath, debugger=options.debugger + ) + browserEnv["XPCOM_DEBUG_BREAK"] = "stack" + if options.topsrcdir: + browserEnv["MOZ_DEVELOPER_REPO_DIR"] = options.topsrcdir + if hasattr(options, "topobjdir"): + browserEnv["MOZ_DEVELOPER_OBJ_DIR"] = options.topobjdir + + if mozinfo.info["asan"]: + # Disable leak checking for reftests for now + if "ASAN_OPTIONS" in browserEnv: + browserEnv["ASAN_OPTIONS"] += ":detect_leaks=0" + else: + browserEnv["ASAN_OPTIONS"] = "detect_leaks=0" + + # Set environment defaults for jstestbrowser. Keep in sync with the + # defaults used in js/src/tests/lib/tests.py. + if options.suite == "jstestbrowser": + browserEnv["TZ"] = "PST8PDT" + browserEnv["LC_ALL"] = "en_US.UTF-8" + + for v in options.environment: + ix = v.find("=") + if ix <= 0: + print("Error: syntax error in --setenv=" + v) + return None + browserEnv[v[:ix]] = v[ix + 1 :] + + # Enable leaks detection to its own log file. + self.leakLogFile = os.path.join(profileDir, "runreftest_leaks.log") + browserEnv["XPCOM_MEM_BLOAT_LOG"] = self.leakLogFile + + if options.enable_webrender: + browserEnv["MOZ_WEBRENDER"] = "1" + browserEnv["MOZ_ACCELERATED"] = "1" + else: + browserEnv["MOZ_WEBRENDER"] = "0" + + if options.headless: + browserEnv["MOZ_HEADLESS"] = "1" + + return browserEnv + + def cleanup(self, profileDir): + if profileDir: + shutil.rmtree(profileDir, True) + + def verifyTests(self, tests, options): + """ + Support --verify mode: Run test(s) many times in a variety of + configurations/environments in an effort to find intermittent + failures. + """ + + self._populate_logger(options) + + # options.log has done its work, in _populate_logger; remove it so that + # options can be deepcopied. An alternative would be to modify + # mozlog.structuredlog.StructuredLogger to support copy.deepcopy, + # https://docs.python.org/2.7/library/copy.html + if hasattr(options, "log"): + delattr(options, "log") + + # Number of times to repeat test(s) when running with --repeat + VERIFY_REPEAT = 10 + # Number of times to repeat test(s) when running test in separate browser + VERIFY_REPEAT_SINGLE_BROWSER = 5 + + def step1(): + stepOptions = copy.deepcopy(options) + stepOptions.repeat = VERIFY_REPEAT + stepOptions.runUntilFailure = True + result = self.runTests(tests, stepOptions) + return result + + def step2(): + stepOptions = copy.deepcopy(options) + for i in range(VERIFY_REPEAT_SINGLE_BROWSER): + result = self.runTests(tests, stepOptions) + if result != 0: + break + return result + + def step3(): + stepOptions = copy.deepcopy(options) + stepOptions.repeat = VERIFY_REPEAT + stepOptions.runUntilFailure = True + stepOptions.environment.append("MOZ_CHAOSMODE=0xfb") + result = self.runTests(tests, stepOptions) + return result + + def step4(): + stepOptions = copy.deepcopy(options) + stepOptions.environment.append("MOZ_CHAOSMODE=0xfb") + for i in range(VERIFY_REPEAT_SINGLE_BROWSER): + result = self.runTests(tests, stepOptions) + if result != 0: + break + return result + + steps = [ + ("1. Run each test %d times in one browser." % VERIFY_REPEAT, step1), + ( + "2. Run each test %d times in a new browser each time." + % VERIFY_REPEAT_SINGLE_BROWSER, + step2, + ), + ( + "3. Run each test %d times in one browser, in chaos mode." + % VERIFY_REPEAT, + step3, + ), + ( + "4. Run each test %d times in a new browser each time, in chaos mode." + % VERIFY_REPEAT_SINGLE_BROWSER, + step4, + ), + ] + + stepResults = {} + for (descr, step) in steps: + stepResults[descr] = "not run / incomplete" + + startTime = datetime.now() + maxTime = timedelta(seconds=options.verify_max_time) + finalResult = "PASSED" + for (descr, step) in steps: + if (datetime.now() - startTime) > maxTime: + self.log.info("::: Test verification is taking too long: Giving up!") + self.log.info( + "::: So far, all checks passed, but not all checks were run." + ) + break + self.log.info(":::") + self.log.info('::: Running test verification step "%s"...' % descr) + self.log.info(":::") + result = step() + if result != 0: + stepResults[descr] = "FAIL" + finalResult = "FAILED!" + break + stepResults[descr] = "Pass" + + self.log.info(":::") + self.log.info("::: Test verification summary for:") + self.log.info(":::") + for test in tests: + self.log.info("::: " + test) + self.log.info(":::") + for descr in sorted(stepResults.keys()): + self.log.info("::: %s : %s" % (descr, stepResults[descr])) + self.log.info(":::") + self.log.info("::: Test verification %s" % finalResult) + self.log.info(":::") + + return result + + def runTests(self, tests, options, cmdargs=None): + cmdargs = cmdargs or [] + self._populate_logger(options) + self.outputHandler = OutputHandler( + self.log, options.utilityPath, options.symbolsPath + ) + + if options.cleanupCrashes: + mozcrash.cleanup_pending_crash_reports() + + manifests = self.resolver.resolveManifests(options, tests) + if options.filter: + manifests[""] = (options.filter, None) + + if not getattr(options, "runTestsInParallel", False): + return self.runSerialTests(manifests, options, cmdargs) + + cpuCount = multiprocessing.cpu_count() + + # We have the directive, technology, and machine to run multiple test instances. + # Experimentation says that reftests are not overly CPU-intensive, so we can run + # multiple jobs per CPU core. + # + # Our Windows machines in automation seem to get upset when we run a lot of + # simultaneous tests on them, so tone things down there. + if sys.platform == "win32": + jobsWithoutFocus = cpuCount + else: + jobsWithoutFocus = 2 * cpuCount + + totalJobs = jobsWithoutFocus + 1 + perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)] + + host = "localhost" + port = 2828 + if options.marionette: + host, port = options.marionette.split(":") + + # First job is only needs-focus tests. Remaining jobs are + # non-needs-focus and chunked. + perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus") + for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1): + jobArgs[-1:-1] = [ + "--focus-filter-mode=non-needs-focus", + "--total-chunks=%d" % jobsWithoutFocus, + "--this-chunk=%d" % chunkNumber, + "--marionette=%s:%d" % (host, port), + ] + port += 1 + + for jobArgs in perProcessArgs: + try: + jobArgs.remove("--run-tests-in-parallel") + except Exception: + pass + jobArgs[0:0] = [sys.executable, "-u"] + + threads = [ReftestThread(args) for args in perProcessArgs[1:]] + for t in threads: + t.start() + + while True: + # The test harness in each individual thread will be doing timeout + # handling on its own, so we shouldn't need to worry about any of + # the threads hanging for arbitrarily long. + for t in threads: + t.join(10) + if not any(t.is_alive() for t in threads): + break + + # Run the needs-focus tests serially after the other ones, so we don't + # have to worry about races between the needs-focus tests *actually* + # needing focus and the dummy windows in the non-needs-focus tests + # trying to focus themselves. + focusThread = ReftestThread(perProcessArgs[0]) + focusThread.start() + focusThread.join() + + # Output the summaries that the ReftestThread filters suppressed. + summaryObjects = [defaultdict(int) for s in summaryLines] + for t in threads: + for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): + threadMatches = t.summaryMatches[text] + for (attribute, description) in categories: + amount = int(threadMatches.group(attribute) if threadMatches else 0) + summaryObj[attribute] += amount + amount = int(threadMatches.group("total") if threadMatches else 0) + summaryObj["total"] += amount + + print("REFTEST INFO | Result summary:") + for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): + details = ", ".join( + [ + "%d %s" % (summaryObj[attribute], description) + for (attribute, description) in categories + ] + ) + print( + "REFTEST INFO | " + + text + + ": " + + str(summaryObj["total"]) + + " (" + + details + + ")" + ) + + return int(any(t.retcode != 0 for t in threads)) + + def handleTimeout(self, timeout, proc, utilityPath, debuggerInfo): + """handle process output timeout""" + # TODO: bug 913975 : _processOutput should call self.processOutputLine + # one more time one timeout (I think) + self.log.error( + "%s | application timed out after %d seconds with no output" + % (self.lastTestSeen, int(timeout)) + ) + self.log.error("Force-terminating active process(es).") + self.killAndGetStack( + proc, utilityPath, debuggerInfo, dump_screen=not debuggerInfo + ) + + def dumpScreen(self, utilityPath): + if self.haveDumpedScreen: + self.log.info( + "Not taking screenshot here: see the one that was previously logged" + ) + return + self.haveDumpedScreen = True + dump_screen(utilityPath, self.log) + + def killAndGetStack(self, process, utilityPath, debuggerInfo, dump_screen=False): + """ + Kill the process, preferrably in a way that gets us a stack trace. + Also attempts to obtain a screenshot before killing the process + if specified. + """ + + if dump_screen: + self.dumpScreen(utilityPath) + + if mozinfo.info.get("crashreporter", True) and not debuggerInfo: + if mozinfo.isWin: + # We should have a "crashinject" program in our utility path + crashinject = os.path.normpath( + os.path.join(utilityPath, "crashinject.exe") + ) + if os.path.exists(crashinject): + status = subprocess.Popen([crashinject, str(process.pid)]).wait() + printstatus("crashinject", status) + if status == 0: + return + else: + try: + process.kill(sig=signal.SIGABRT) + except OSError: + # https://bugzilla.mozilla.org/show_bug.cgi?id=921509 + self.log.info("Can't trigger Breakpad, process no longer exists") + return + self.log.info("Can't trigger Breakpad, just killing process") + process.kill() + + def runApp( + self, + options, + cmdargs=None, + timeout=None, + debuggerInfo=None, + symbolsPath=None, + valgrindPath=None, + valgrindArgs=None, + valgrindSuppFiles=None, + **profileArgs + ): + + if cmdargs is None: + cmdargs = [] + cmdargs = cmdargs[:] + + if self.use_marionette: + cmdargs.append("-marionette") + + binary = options.app + profile = self.createReftestProfile(options, **profileArgs) + + # browser environment + env = self.buildBrowserEnv(options, profile.profile) + + self.log.info("Running with e10s: {}".format(options.e10s)) + self.log.info("Running with fission: {}".format(options.fission)) + + def timeoutHandler(): + self.handleTimeout(timeout, proc, options.utilityPath, debuggerInfo) + + interactive = False + debug_args = None + if debuggerInfo: + interactive = debuggerInfo.interactive + debug_args = [debuggerInfo.path] + debuggerInfo.args + + def record_last_test(message): + """Records the last test seen by this harness for the benefit of crash logging.""" + + def testid(test): + if " " in test: + return test.split(" ")[0] + return test + + if message["action"] == "test_start": + self.lastTestSeen = testid(message["test"]) + elif message["action"] == "test_end": + if self.lastTest and message["test"] == self.lastTest: + self.lastTestSeen = "Last test finished" + else: + self.lastTestSeen = "{} (finished)".format(testid(message["test"])) + + self.log.add_handler(record_last_test) + + kp_kwargs = { + "kill_on_timeout": False, + "cwd": SCRIPT_DIRECTORY, + "onTimeout": [timeoutHandler], + "processOutputLine": [self.outputHandler], + } + + if mozinfo.isWin or mozinfo.isMac: + # Prevents log interleaving on Windows at the expense of losing + # true log order. See bug 798300 and bug 1324961 for more details. + kp_kwargs["processStderrLine"] = [self.outputHandler] + + if interactive: + # If an interactive debugger is attached, + # don't use timeouts, and don't capture ctrl-c. + timeout = None + signal.signal(signal.SIGINT, lambda sigid, frame: None) + + runner_cls = mozrunner.runners.get( + mozinfo.info.get("appname", "firefox"), mozrunner.Runner + ) + runner = runner_cls( + profile=profile, + binary=binary, + process_class=mozprocess.ProcessHandlerMixin, + cmdargs=cmdargs, + env=env, + process_args=kp_kwargs, + ) + runner.start( + debug_args=debug_args, interactive=interactive, outputTimeout=timeout + ) + proc = runner.process_handler + self.outputHandler.proc_name = "GECKO({})".format(proc.pid) + + # Used to defer a possible IOError exception from Marionette + marionette_exception = None + + if self.use_marionette: + marionette_args = { + "socket_timeout": options.marionette_socket_timeout, + "startup_timeout": options.marionette_startup_timeout, + "symbols_path": options.symbolsPath, + } + if options.marionette: + host, port = options.marionette.split(":") + marionette_args["host"] = host + marionette_args["port"] = int(port) + + try: + marionette = Marionette(**marionette_args) + marionette.start_session() + + addons = Addons(marionette) + if options.specialPowersExtensionPath: + addons.install(options.specialPowersExtensionPath, temp=True) + + addons.install(options.reftestExtensionPath, temp=True) + + marionette.delete_session() + except IOError: + # Any IOError as thrown by Marionette means that something is + # wrong with the process, like a crash or the socket is no + # longer open. We defer raising this specific error so that + # post-test checks for leaks and crashes are performed and + # reported first. + marionette_exception = sys.exc_info() + + status = runner.wait() + runner.process_handler = None + self.outputHandler.proc_name = None + + if status: + msg = ( + "TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" + % (self.lastTestSeen, status) + ) + # use process_output so message is logged verbatim + self.log.process_output(None, msg) + + crashed = mozcrash.log_crashes( + self.log, + os.path.join(profile.profile, "minidumps"), + options.symbolsPath, + test=self.lastTestSeen, + ) + if not status and crashed: + status = 1 + + runner.cleanup() + self.cleanup(profile.profile) + + if marionette_exception is not None: + exc, value, tb = marionette_exception + raise reraise(exc, value, tb) + + self.log.info("Process mode: {}".format("e10s" if options.e10s else "non-e10s")) + return status + + def getActiveTests(self, manifests, options, testDumpFile=None): + # These prefs will cause reftest.jsm to parse the manifests, + # dump the resulting tests to a file, and exit. + prefs = { + "reftest.manifests": json.dumps(manifests), + "reftest.manifests.dumpTests": testDumpFile or self.testDumpFile, + } + cmdargs = [] + self.runApp(options, cmdargs=cmdargs, prefs=prefs) + + if not os.path.isfile(self.testDumpFile): + print("Error: parsing manifests failed!") + sys.exit(1) + + with open(self.testDumpFile, "r") as fh: + tests = json.load(fh) + + if os.path.isfile(self.testDumpFile): + mozfile.remove(self.testDumpFile) + + for test in tests: + # Name and path are expected by manifestparser, but not used in reftest. + test["name"] = test["path"] = test["url1"] + + mp = TestManifest(strict=False) + mp.tests = tests + + filters = [] + if options.totalChunks: + filters.append( + mpf.chunk_by_manifest(options.thisChunk, options.totalChunks) + ) + + tests = mp.active_tests(exists=False, filters=filters) + return tests + + def runSerialTests(self, manifests, options, cmdargs=None): + debuggerInfo = None + if options.debugger: + debuggerInfo = mozdebug.get_debugger_info( + options.debugger, options.debuggerArgs, options.debuggerInteractive + ) + + def run(**kwargs): + if kwargs.get("tests"): + self.lastTest = kwargs["tests"][-1]["identifier"] + if not isinstance(self.lastTest, string_types): + self.lastTest = " ".join(self.lastTest) + + status = self.runApp( + options, + manifests=manifests, + cmdargs=cmdargs, + # We generally want the JS harness or marionette + # to handle timeouts if they can. + # The default JS harness timeout is currently + # 300 seconds (default options.timeout). + # The default Marionette socket timeout is + # currently 360 seconds. + # Give the JS harness extra time to deal with + # its own timeouts and try to usually exceed + # the 360 second marionette socket timeout. + # See bug 479518 and bug 1414063. + timeout=options.timeout + 70.0, + debuggerInfo=debuggerInfo, + symbolsPath=options.symbolsPath, + **kwargs + ) + + mozleak.process_leak_log( + self.leakLogFile, + leak_thresholds=options.leakThresholds, + stack_fixer=get_stack_fixer_function( + options.utilityPath, options.symbolsPath + ), + ) + return status + + if not self.run_by_manifest: + return run() + + tests = self.getActiveTests(manifests, options) + tests_by_manifest = defaultdict(list) + ids_by_manifest = defaultdict(list) + for t in tests: + tests_by_manifest[t["manifest"]].append(t) + test_id = t["identifier"] + if not isinstance(test_id, string_types): + test_id = " ".join(test_id) + ids_by_manifest[t["manifestID"]].append(test_id) + + self.log.suite_start(ids_by_manifest, name=options.suite) + + overall = 0 + for manifest, tests in tests_by_manifest.items(): + self.log.info("Running tests in {}".format(manifest)) + status = run(tests=tests) + overall = overall or status + + self.log.suite_end(extra={"results": self.outputHandler.results}) + return overall + + def copyExtraFilesToProfile(self, options, profile): + "Copy extra files or dirs specified on the command line to the testing profile." + profileDir = profile.profile + for f in options.extraProfileFiles: + abspath = self.getFullPath(f) + if os.path.isfile(abspath): + if os.path.basename(abspath) == "user.js": + extra_prefs = mozprofile.Preferences.read_prefs(abspath) + profile.set_preferences(extra_prefs) + elif os.path.basename(abspath).endswith(".dic"): + hyphDir = os.path.join(profileDir, "hyphenation") + if not os.path.exists(hyphDir): + os.makedirs(hyphDir) + shutil.copy2(abspath, hyphDir) + else: + shutil.copy2(abspath, profileDir) + elif os.path.isdir(abspath): + dest = os.path.join(profileDir, os.path.basename(abspath)) + shutil.copytree(abspath, dest) + else: + self.log.warning( + "runreftest.py | Failed to copy %s to profile" % abspath + ) + continue + + +def run_test_harness(parser, options): + reftest = RefTest(options.suite) + parser.validate(options, reftest) + + # We have to validate options.app here for the case when the mach + # command is able to find it after argument parsing. This can happen + # when running from a tests archive. + if not options.app: + parser.error("could not find the application path, --appname must be specified") + + options.app = reftest.getFullPath(options.app) + if not os.path.exists(options.app): + parser.error( + "Error: Path %(app)s doesn't exist. Are you executing " + "$objdir/_tests/reftest/runreftest.py?" % {"app": options.app} + ) + + if options.xrePath is None: + options.xrePath = os.path.dirname(options.app) + + if options.verify: + result = reftest.verifyTests(options.tests, options) + else: + result = reftest.runTests(options.tests, options) + + return result + + +if __name__ == "__main__": + parser = reftestcommandline.DesktopArgumentsParser() + options = parser.parse_args() + sys.exit(run_test_harness(parser, options)) diff --git a/layout/tools/reftest/schema.json b/layout/tools/reftest/schema.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/layout/tools/reftest/schema.json @@ -0,0 +1 @@ +[] diff --git a/layout/tools/reftest/selftest/conftest.py b/layout/tools/reftest/selftest/conftest.py new file mode 100644 index 0000000000..74067e9787 --- /dev/null +++ b/layout/tools/reftest/selftest/conftest.py @@ -0,0 +1,149 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import json +import os +from argparse import Namespace + +try: + # Python2 + from cStringIO import StringIO +except ImportError: + # Python3 + from io import StringIO + +import mozinfo +import pytest +from manifestparser import expression +from moztest.selftest.fixtures import binary, setup_test_harness # noqa + +here = os.path.abspath(os.path.dirname(__file__)) +setup_args = [False, "reftest", "reftest"] + + +@pytest.fixture(scope="module") +def normalize(): + """A function that can take a relative path and append it to the 'files' + directory which contains the data necessary to run these tests. + """ + + def inner(path): + if os.path.isabs(path): + return path + return os.path.join(here, "files", path) + + return inner + + +@pytest.fixture +def parser(setup_test_harness): + setup_test_harness(*setup_args) + cmdline = pytest.importorskip("reftestcommandline") + return cmdline.DesktopArgumentsParser() + + +@pytest.fixture +def get_reftest(setup_test_harness, binary, parser): + setup_test_harness(*setup_args) + runreftest = pytest.importorskip("runreftest") + harness_root = runreftest.SCRIPT_DIRECTORY + + build = parser.build_obj + options = vars(parser.parse_args([])) + options.update( + { + "app": binary, + "focusFilterMode": "non-needs-focus", + "suite": "reftest", + } + ) + + if not os.path.isdir(build.bindir): + package_root = os.path.dirname(harness_root) + options.update( + { + "extraProfileFiles": [os.path.join(package_root, "bin", "plugins")], + "reftestExtensionPath": os.path.join(harness_root, "reftest"), + "sandboxReadWhitelist": [here, os.environ["PYTHON_TEST_TMP"]], + "utilityPath": os.path.join(package_root, "bin"), + "specialPowersExtensionPath": os.path.join( + harness_root, "specialpowers" + ), + } + ) + + if "MOZ_FETCHES_DIR" in os.environ: + options["sandboxReadWhitelist"].append(os.environ["MOZ_FETCHES_DIR"]) + else: + options.update( + { + "extraProfileFiles": [os.path.join(build.topobjdir, "dist", "plugins")], + "sandboxReadWhitelist": [build.topobjdir, build.topsrcdir], + "specialPowersExtensionPath": os.path.join( + build.distdir, "xpi-stage", "specialpowers" + ), + } + ) + + def inner(**opts): + options.update(opts) + config = Namespace(**options) + + # This is pulled from `runreftest.run_test_harness` minus some error + # checking that isn't necessary in this context. It should stay roughly + # in sync. + reftest = runreftest.RefTest(config.suite) + parser.validate(config, reftest) + + config.app = reftest.getFullPath(config.app) + assert os.path.exists(config.app) + + if config.xrePath is None: + config.xrePath = os.path.dirname(config.app) + + return reftest, config + + return inner + + +@pytest.fixture # noqa: F811 +def runtests(get_reftest, normalize): + def inner(*tests, **opts): + assert len(tests) > 0 + opts["tests"] = map(normalize, tests) + + buf = StringIO() + opts["log_raw"] = [buf] + + reftest, options = get_reftest(**opts) + result = reftest.runTests(options.tests, options) + + out = json.loads("[" + ",".join(buf.getvalue().splitlines()) + "]") + buf.close() + return result, out + + return inner + + +@pytest.fixture(autouse=True) # noqa: F811 +def skip_using_mozinfo(request, setup_test_harness): + """Gives tests the ability to skip based on values from mozinfo. + + Example: + @pytest.mark.skip_mozinfo("!e10s || os == 'linux'") + def test_foo(): + pass + """ + + setup_test_harness(*setup_args) + runreftest = pytest.importorskip("runreftest") + runreftest.update_mozinfo() + + skip_mozinfo = request.node.get_marker("skip_mozinfo") + if skip_mozinfo: + value = skip_mozinfo.args[0] + if expression.parse(value, **mozinfo.info): + pytest.skip("skipped due to mozinfo match: \n{}".format(value)) diff --git a/layout/tools/reftest/selftest/files/assert.html b/layout/tools/reftest/selftest/files/assert.html new file mode 100644 index 0000000000..c9aedcf116 --- /dev/null +++ b/layout/tools/reftest/selftest/files/assert.html @@ -0,0 +1,7 @@ +<script> +const Cc = SpecialPowers.Cc; +const Ci = SpecialPowers.Ci; + +let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2); +debug.assertion('failed assertion check', 'false', 'assert.html', 6); +</script> diff --git a/layout/tools/reftest/selftest/files/crash.html b/layout/tools/reftest/selftest/files/crash.html new file mode 100644 index 0000000000..897863024b --- /dev/null +++ b/layout/tools/reftest/selftest/files/crash.html @@ -0,0 +1,7 @@ +<script> +const Cc = SpecialPowers.Cc; +const Ci = SpecialPowers.Ci; + +let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2); +debug.abort('crash.html', 6); +</script> diff --git a/layout/tools/reftest/selftest/files/defaults.list b/layout/tools/reftest/selftest/files/defaults.list new file mode 100644 index 0000000000..d947eb8d1d --- /dev/null +++ b/layout/tools/reftest/selftest/files/defaults.list @@ -0,0 +1,7 @@ +# test defaults +defaults pref(foo.bar,true) +== foo.html foo-ref.html + +# reset defaults +defaults +== bar.html bar-ref.html diff --git a/layout/tools/reftest/selftest/files/failure-type-interactions.list b/layout/tools/reftest/selftest/files/failure-type-interactions.list new file mode 100644 index 0000000000..c820e8f13f --- /dev/null +++ b/layout/tools/reftest/selftest/files/failure-type-interactions.list @@ -0,0 +1,11 @@ +# interactions between skip and fail +skip-if(true) fails == skip-if_fails.html ref.html +skip-if(true) fails-if(true) == skip-if_fails-if.html ref.html +skip fails == skip_fails.html ref.html +skip-if(false) fails == fails.html ref.html +fails skip-if(true) == fails_skip-if.html ref.html +fails-if(true) skip-if(true) == fails-if_skip-if.html ref.html +fails skip == fails_skip.html ref.html +fails-if(false) skip == skip.html ref.html +skip-if(true) fails skip-if(false) == skip-if-true_fails_skip-if-false ref.html +skip-if(false) fails skip-if(true) == skip-if-false_fails_skip-if-true ref.html diff --git a/layout/tools/reftest/selftest/files/green.html b/layout/tools/reftest/selftest/files/green.html new file mode 100644 index 0000000000..d1695cb8b8 --- /dev/null +++ b/layout/tools/reftest/selftest/files/green.html @@ -0,0 +1,6 @@ +<!DOCTYPE html> +<html> +<body> +<div style="color: green">Text</div> +</body> +</html> diff --git a/layout/tools/reftest/selftest/files/invalid-defaults-include.list b/layout/tools/reftest/selftest/files/invalid-defaults-include.list new file mode 100644 index 0000000000..408d3214f6 --- /dev/null +++ b/layout/tools/reftest/selftest/files/invalid-defaults-include.list @@ -0,0 +1,4 @@ +# can't use defaults prior to include +defaults pref(foo.bar,1) +== foo.html bar.html +include defaults.list diff --git a/layout/tools/reftest/selftest/files/invalid-defaults.list b/layout/tools/reftest/selftest/files/invalid-defaults.list new file mode 100644 index 0000000000..7bb8d060da --- /dev/null +++ b/layout/tools/reftest/selftest/files/invalid-defaults.list @@ -0,0 +1,3 @@ +# invalid tokens in defaults +defaults skip-if(true) == foo.html bar.html +== foo.html bar.html diff --git a/layout/tools/reftest/selftest/files/invalid-include.list b/layout/tools/reftest/selftest/files/invalid-include.list new file mode 100644 index 0000000000..cd1c1f0939 --- /dev/null +++ b/layout/tools/reftest/selftest/files/invalid-include.list @@ -0,0 +1,2 @@ +# non-skip items are not allowed with include +pref(foo.bar,1) include defaults.list diff --git a/layout/tools/reftest/selftest/files/leaks.log b/layout/tools/reftest/selftest/files/leaks.log new file mode 100644 index 0000000000..af832f149d --- /dev/null +++ b/layout/tools/reftest/selftest/files/leaks.log @@ -0,0 +1,73 @@ +== BloatView: ALL (cumulative) LEAK AND BLOAT STATISTICS, default process 1148 + |<----------------Class--------------->|<-----Bytes------>|<----Objects---->| + | | Per-Inst Leaked| Total Rem| + 0 |TOTAL | 19 19915|16007885 378| + 68 |CacheEntry | 208 208| 521 1| + 71 |CacheEntryHandle | 16 16| 7692 1| + 72 |CacheFile | 264 264| 521 1| + 80 |CacheFileMetadata | 176 176| 521 1| + 81 |CacheFileOutputStream | 64 64| 463 1| + 90 |CacheStorageService | 256 256| 1 1| + 97 |CancelableRunnable | 24 24| 45614 1| + 103 |ChannelEventQueue | 96 96| 996 1| + 131 |CondVar | 36 360| 822 10| + 148 |ConsoleReportCollector | 60 120| 1380 2| + 150 |ContentParent | 1712 1712| 2 1| + 188 |DataStorage | 284 852| 3 3| + 320 |HttpBaseChannel | 1368 1368| 1143 1| + 321 |HttpChannelParent | 176 176| 996 1| + 322 |HttpChannelParentListener | 48 48| 961 1| + 342 |IdlePeriod | 12 36| 166 3| + 369 |InterceptedChannelBase | 240 240| 18 1| + 389 |LoadContext | 72 144| 1023 2| + 391 |LoadInfo | 144 144| 3903 1| + 427 |Mutex | 44 1012| 14660 23| + 439 |NullPrincipalURI | 80 80| 421 1| + 468 |PBrowserParent | 312 312| 21 1| + 479 |PContentParent | 1428 1428| 2 1| + 486 |PHttpChannelParent | 24 24| 996 1| + 527 |PollableEvent | 12 12| 1 1| + 576 |ReentrantMonitor | 24 72| 6922 3| + 577 |RefCountedMonitor | 84 84| 154 1| + 583 |RequestContextService | 60 60| 1 1| + 592 |Runnable | 20 40| 178102 2| + 627 |Service | 128 128| 1 1| + 646 |SharedMemory | 16 16| 1636 1| + 675 |StringAdopt | 1 3| 17087 3| + 688 |TabParent | 976 976| 21 1| + 699 |ThirdPartyUtil | 16 16| 1 1| + 875 |ipc::MessageChannel | 208 208| 166 1| + 920 |nsAuthURLParser | 12 12| 2 1| + 974 |nsCategoryObserver | 72 72| 8 1| + 976 |nsChannelClassifier | 28 28| 918 1| +1004 |CookiePermission | 40 40| 1 1| +1005 |CookieService | 80 80| 1 1| +1010 |nsDNSService | 140 140| 1 1| +1066 |nsEffectiveTLDService | 20 20| 1 1| +1134 |nsHttpAuthCache::OriginClearObserver | 16 32| 2 2| +1135 |nsHttpChannel | 1816 1816| 1143 1| +1136 |nsHttpChannelAuthProvider | 148 148| 1012 1| +1138 |nsHttpConnectionInfo | 128 128| 1021 1| +1139 |nsHttpConnectionMgr | 304 304| 1 1| +1141 |nsHttpHandler | 544 544| 1 1| +1142 |nsHttpRequestHead | 92 92| 1190 1| +1145 |nsIDNService | 56 56| 1 1| +1146 |nsIOService | 176 176| 1 1| +1176 |nsJSPrincipals | 16 64| 12583 4| +1186 |nsLocalFile | 88 264| 13423 3| +1192 |nsMainThreadPtrHolder<T> | 20 80| 2253 4| +1222 |nsNodeWeakReference | 16 16| 919 1| +1223 |nsNotifyAddrListener | 112 112| 1 1| +1241 |PermissionManager | 136 136| 1 1| +1248 |nsPrefBranch | 76 76| 63 1| +1257 |nsProxyInfo | 72 72| 1098 1| +1265 |nsRedirectHistoryEntry | 32 32| 69 1| +1307 |nsSiteSecurityService | 56 56| 1 1| +1311 |nsSocketTransportService | 208 208| 1 1| +1313 |nsStandardURL | 196 1372| 59651 7| +1319 |nsStreamConverterService | 48 48| 1 1| +1324 |nsStringBuffer | 8 1688| 722245 211| +1371 |nsTArray_base | 4 136| 3419841 34| +1380 |nsThread | 304 912| 165 3| +1416 |nsWeakReference | 20 180| 1388 9| +nsTraceRefcnt::DumpStatistics: 1489 entries diff --git a/layout/tools/reftest/selftest/files/red.html b/layout/tools/reftest/selftest/files/red.html new file mode 100644 index 0000000000..a9db5be4df --- /dev/null +++ b/layout/tools/reftest/selftest/files/red.html @@ -0,0 +1,6 @@ +<!DOCTYPE html> +<html> +<body> +<div style="color: red">Text</div> +</body> +</html> diff --git a/layout/tools/reftest/selftest/files/reftest-assert.list b/layout/tools/reftest/selftest/files/reftest-assert.list new file mode 100644 index 0000000000..38fd3f57aa --- /dev/null +++ b/layout/tools/reftest/selftest/files/reftest-assert.list @@ -0,0 +1 @@ +load assert.html diff --git a/layout/tools/reftest/selftest/files/reftest-crash.list b/layout/tools/reftest/selftest/files/reftest-crash.list new file mode 100644 index 0000000000..3e27bcba75 --- /dev/null +++ b/layout/tools/reftest/selftest/files/reftest-crash.list @@ -0,0 +1 @@ +load crash.html diff --git a/layout/tools/reftest/selftest/files/reftest-fail.list b/layout/tools/reftest/selftest/files/reftest-fail.list new file mode 100644 index 0000000000..c5259b6601 --- /dev/null +++ b/layout/tools/reftest/selftest/files/reftest-fail.list @@ -0,0 +1,3 @@ +== green.html red.html +!= green.html green.html +!= red.html red.html diff --git a/layout/tools/reftest/selftest/files/reftest-pass.list b/layout/tools/reftest/selftest/files/reftest-pass.list new file mode 100644 index 0000000000..51512c1202 --- /dev/null +++ b/layout/tools/reftest/selftest/files/reftest-pass.list @@ -0,0 +1,3 @@ +== green.html green.html +== red.html red.html +!= green.html red.html diff --git a/layout/tools/reftest/selftest/files/scripttest-pass.html b/layout/tools/reftest/selftest/files/scripttest-pass.html new file mode 100644 index 0000000000..e0371b3518 --- /dev/null +++ b/layout/tools/reftest/selftest/files/scripttest-pass.html @@ -0,0 +1,17 @@ +<!DOCTYPE html> +<html> +<head> +<title>scripttest-pass</title> +<script type="text/javascript"> +function getTestCases() +{ + return [ + { testPassed: (function () { return true; }), testDescription: (function () { return "passed"; }) } + ]; +} +</script> +</head> +<body> +<h1>scripttest-pass</h1> +</body> +</html> diff --git a/layout/tools/reftest/selftest/files/types.list b/layout/tools/reftest/selftest/files/types.list new file mode 100644 index 0000000000..7622564527 --- /dev/null +++ b/layout/tools/reftest/selftest/files/types.list @@ -0,0 +1,5 @@ +== green.html green.html +!= green.html red.html +load green.html +script scripttest-pass.html +print green.html green.html diff --git a/layout/tools/reftest/selftest/python.ini b/layout/tools/reftest/selftest/python.ini new file mode 100644 index 0000000000..eb4bbc7e89 --- /dev/null +++ b/layout/tools/reftest/selftest/python.ini @@ -0,0 +1,8 @@ +[DEFAULT] +subsuite=reftest +sequential=true +skip-if = python == 3 + +[test_python_manifest_parser.py] +[test_reftest_manifest_parser.py] +[test_reftest_output.py] diff --git a/layout/tools/reftest/selftest/test_python_manifest_parser.py b/layout/tools/reftest/selftest/test_python_manifest_parser.py new file mode 100644 index 0000000000..339aadfeb3 --- /dev/null +++ b/layout/tools/reftest/selftest/test_python_manifest_parser.py @@ -0,0 +1,39 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import mozunit +import pytest + + +@pytest.fixture +def parse(normalize): + reftest = pytest.importorskip("reftest") + + def inner(path): + mp = reftest.ReftestManifest() + mp.load(normalize(path)) + return mp + + return inner + + +def test_parse_defaults(parse): + mp = parse("defaults.list") + assert len(mp.tests) == 4 + + for test in mp.tests: + if test["name"].startswith("foo"): + assert test["pref"] == "foo.bar,true" + else: + assert "pref" not in test + + # invalid defaults + with pytest.raises(ValueError): + parse("invalid-defaults.list") + + +if __name__ == "__main__": + mozunit.main() diff --git a/layout/tools/reftest/selftest/test_reftest_manifest_parser.py b/layout/tools/reftest/selftest/test_reftest_manifest_parser.py new file mode 100644 index 0000000000..c1b715cb28 --- /dev/null +++ b/layout/tools/reftest/selftest/test_reftest_manifest_parser.py @@ -0,0 +1,74 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import mozunit +import pytest + + +@pytest.fixture +def parse(get_reftest, normalize): + output = pytest.importorskip("output") + + reftest, options = get_reftest(tests=["dummy"]) + reftest._populate_logger(options) + reftest.outputHandler = output.OutputHandler( + reftest.log, options.utilityPath, options.symbolsPath + ) + + def resolve(path): + path = normalize(path) + return "file://{}".format(path) + + def inner(*manifests): + assert len(manifests) > 0 + manifests = {m: (None, "id") for m in map(resolve, manifests)} + return reftest.getActiveTests(manifests, options) + + return inner + + +def test_parse_test_types(parse): + tests = parse("types.list") + assert tests[0]["type"] == "==" + assert tests[1]["type"] == "!=" + assert tests[2]["type"] == "load" + assert tests[3]["type"] == "script" + assert tests[4]["type"] == "print" + + +def test_parse_failure_type_interactions(parse): + """Tests interactions between skip and fails.""" + tests = parse("failure-type-interactions.list") + for t in tests: + if "skip" in t["name"]: + assert t["skip"] + else: + assert not t["skip"] + + # 0 => EXPECTED_PASS, 1 => EXPECTED_FAIL + if "fails" in t["name"]: + assert t["expected"] == 1 + else: + assert t["expected"] == 0 + + +def test_parse_invalid_manifests(parse): + # XXX We should assert that the output contains the appropriate error + # message, but we seem to be hitting an issue in pytest that is preventing + # us from capturing the Gecko output with the capfd fixture. See: + # https://github.com/pytest-dev/pytest/issues/5997 + with pytest.raises(SystemExit): + parse("invalid-defaults.list") + + with pytest.raises(SystemExit): + parse("invalid-defaults-include.list") + + with pytest.raises(SystemExit): + parse("invalid-include.list") + + +if __name__ == "__main__": + mozunit.main() diff --git a/layout/tools/reftest/selftest/test_reftest_output.py b/layout/tools/reftest/selftest/test_reftest_output.py new file mode 100644 index 0000000000..3f3d8966e0 --- /dev/null +++ b/layout/tools/reftest/selftest/test_reftest_output.py @@ -0,0 +1,164 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function + +import os + +try: + # Python2 + from cStringIO import StringIO +except ImportError: + # Python3 + from io import StringIO +from functools import partial + +import mozunit +import pytest +from moztest.selftest.output import get_mozharness_status, filter_action + +from mozharness.base.log import INFO, WARNING, ERROR +from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WARNING, TBPL_FAILURE + +here = os.path.abspath(os.path.dirname(__file__)) +get_mozharness_status = partial(get_mozharness_status, "reftest") + + +def test_output_pass(runtests): + status, lines = runtests("reftest-pass.list") + assert status == 0 + + tbpl_status, log_level, summary = get_mozharness_status(lines, status) + assert tbpl_status == TBPL_SUCCESS + assert log_level in (INFO, WARNING) + + test_status = filter_action("test_status", lines) + assert len(test_status) == 3 + assert all(t["status"] == "PASS" for t in test_status) + + test_end = filter_action("test_end", lines) + assert len(test_end) == 3 + assert all(t["status"] == "OK" for t in test_end) + + +def test_output_fail(runtests): + formatter = pytest.importorskip("output").ReftestFormatter() + + status, lines = runtests("reftest-fail.list") + assert status == 0 + + buf = StringIO() + tbpl_status, log_level, summary = get_mozharness_status( + lines, status, formatter=formatter, buf=buf + ) + + assert tbpl_status == TBPL_WARNING + assert log_level == WARNING + + test_status = filter_action("test_status", lines) + assert len(test_status) == 3 + assert all(t["status"] == "FAIL" for t in test_status) + assert all("reftest_screenshots" in t["extra"] for t in test_status) + + test_end = filter_action("test_end", lines) + assert len(test_end) == 3 + assert all(t["status"] == "OK" for t in test_end) + + # ensure screenshots were printed + formatted = buf.getvalue() + assert "REFTEST IMAGE 1" in formatted + assert "REFTEST IMAGE 2" in formatted + + +@pytest.mark.skip_mozinfo("!crashreporter") +def test_output_crash(runtests): + status, lines = runtests( + "reftest-crash.list", environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"] + ) + assert status == 1 + + tbpl_status, log_level, summary = get_mozharness_status(lines, status) + assert tbpl_status == TBPL_FAILURE + assert log_level == ERROR + + crash = filter_action("crash", lines) + assert len(crash) == 1 + assert crash[0]["action"] == "crash" + assert crash[0]["signature"] + assert crash[0]["minidump_path"] + + lines = filter_action("test_end", lines) + assert len(lines) == 0 + + +@pytest.mark.skip_mozinfo("!asan") +def test_output_asan(runtests): + status, lines = runtests( + "reftest-crash.list", environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"] + ) + assert status == 0 + + tbpl_status, log_level, summary = get_mozharness_status(lines, status) + assert tbpl_status == TBPL_FAILURE + assert log_level == ERROR + + crash = filter_action("crash", lines) + assert len(crash) == 0 + + process_output = filter_action("process_output", lines) + assert any("ERROR: AddressSanitizer" in l["data"] for l in process_output) + + +@pytest.mark.skip_mozinfo("!debug") +def test_output_assertion(runtests): + status, lines = runtests("reftest-assert.list") + assert status == 0 + + tbpl_status, log_level, summary = get_mozharness_status(lines, status) + assert tbpl_status == TBPL_WARNING + assert log_level == WARNING + + test_status = filter_action("test_status", lines) + assert len(test_status) == 1 + assert test_status[0]["status"] == "PASS" + + test_end = filter_action("test_end", lines) + assert len(test_end) == 1 + assert test_end[0]["status"] == "OK" + + assertions = filter_action("assertion_count", lines) + assert len(assertions) == 1 + assert assertions[0]["count"] == 1 + + +@pytest.mark.skip_mozinfo("!debug") +def test_output_leak(monkeypatch, runtests): + # Monkeypatch mozleak so we always process a failing leak log + # instead of the actual one. + import mozleak + + old_process_leak_log = mozleak.process_leak_log + + def process_leak_log(*args, **kwargs): + return old_process_leak_log( + os.path.join(here, "files", "leaks.log"), *args[1:], **kwargs + ) + + monkeypatch.setattr("mozleak.process_leak_log", process_leak_log) + + status, lines = runtests("reftest-pass.list") + assert status == 0 + + tbpl_status, log_level, summary = get_mozharness_status(lines, status) + assert tbpl_status == TBPL_WARNING + assert log_level == WARNING + + leaks = filter_action("mozleak_total", lines) + assert len(leaks) == 1 + assert leaks[0]["process"] == "default" + assert leaks[0]["bytes"] == 19915 + + +if __name__ == "__main__": + mozunit.main() |