diff options
Diffstat (limited to '')
46 files changed, 9172 insertions, 0 deletions
diff --git a/tools/profiler/tests/browser/browser.ini b/tools/profiler/tests/browser/browser.ini new file mode 100644 index 0000000000..afbccd1b00 --- /dev/null +++ b/tools/profiler/tests/browser/browser.ini @@ -0,0 +1,18 @@ +[DEFAULT] +skip-if = tsan # TSan times out on pretty much all of these tests +support-files = + ../shared-head.js + head.js + do_work_500ms.html + fixed_height.html + multi_frame.html + simple.html + single_frame.html + +[browser_test_feature_ipcmessages.js] +[browser_test_feature_jsallocations.js] +[browser_test_feature_nostacksampling.js] +[browser_test_feature_preferencereads.js] +[browser_test_markers_parent_process.js] +[browser_test_profile_single_frame_page_info.js] +[browser_test_profile_multi_frame_page_info.js] diff --git a/tools/profiler/tests/browser/browser_test_feature_ipcmessages.js b/tools/profiler/tests/browser/browser_test_feature_ipcmessages.js new file mode 100644 index 0000000000..b199f74198 --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_feature_ipcmessages.js @@ -0,0 +1,110 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +requestLongerTimeout(10); + +async function waitForLoad() { + return SpecialPowers.spawn(gBrowser.selectedBrowser, [], () => { + return new Promise(function(resolve) { + if (content.document.readyState !== "complete") { + content.document.addEventListener("readystatechange", () => { + if (content.document.readyState === "complete") { + resolve(); + } + }); + } else { + resolve(); + } + }); + }); +} + +/** + * Test the IPCMessages feature. + */ +add_task(async function test_profile_feature_ipcmessges() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok( + !Services.profiler.IsActive(), + "The profiler is not currently active" + ); + + const url = BASE_URL + "simple.html"; + + info("Open a tab while profiling IPC messages."); + startProfiler({ features: ["threads", "leaf", "ipcmessages"] }); + info("Started the profiler sucessfully! Now, let's open a tab."); + + await BrowserTestUtils.withNewTab(url, async contentBrowser => { + info("We opened a tab!"); + const contentPid = await SpecialPowers.spawn( + contentBrowser, + [], + () => Services.appinfo.processID + ); + info("Now let's wait until it's fully loaded."); + await waitForLoad(); + + info( + "Check that some IPC profile markers were generated when " + + "the feature is enabled." + ); + { + const { parentThread, contentThread } = await stopProfilerAndGetThreads( + contentPid + ); + + Assert.greater( + getPayloadsOfType(parentThread, "IPC").length, + 0, + "IPC profile markers were recorded for the parent process' main " + + "thread when the IPCMessages feature was turned on." + ); + + Assert.greater( + getPayloadsOfType(contentThread, "IPC").length, + 0, + "IPC profile markers were recorded for the content process' main " + + "thread when the IPCMessages feature was turned on." + ); + } + }); + + info("Now open a tab without profiling IPC messages."); + startProfiler({ features: ["threads", "leaf"] }); + + await BrowserTestUtils.withNewTab(url, async contentBrowser => { + const contentPid = await SpecialPowers.spawn( + contentBrowser, + [], + () => Services.appinfo.processID + ); + await waitForLoad(); + + info( + "Check that no IPC profile markers were recorded when the " + + "feature is turned off." + ); + { + const { parentThread, contentThread } = await stopProfilerAndGetThreads( + contentPid + ); + Assert.equal( + getPayloadsOfType(parentThread, "IPC").length, + 0, + "No IPC profile markers were recorded for the parent process' main " + + "thread when the IPCMessages feature was turned off." + ); + + Assert.equal( + getPayloadsOfType(contentThread, "IPC").length, + 0, + "No IPC profile markers were recorded for the content process' main " + + "thread when the IPCMessages feature was turned off." + ); + } + }); +}); diff --git a/tools/profiler/tests/browser/browser_test_feature_jsallocations.js b/tools/profiler/tests/browser/browser_test_feature_jsallocations.js new file mode 100644 index 0000000000..ae925e1fcd --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_feature_jsallocations.js @@ -0,0 +1,81 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +requestLongerTimeout(10); + +/** + * Test the JS Allocations feature. This is done as a browser test to ensure that + * we realistically try out how the JS allocations are running. This ensures that + * we are collecting allocations for the content process and the parent process. + */ +add_task(async function test_profile_feature_jsallocations() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok( + !Services.profiler.IsActive(), + "The profiler is not currently active" + ); + + startProfiler({ features: ["threads", "js", "jsallocations"] }); + + const url = BASE_URL + "do_work_500ms.html"; + await BrowserTestUtils.withNewTab(url, async contentBrowser => { + const contentPid = await SpecialPowers.spawn( + contentBrowser, + [], + () => Services.appinfo.processID + ); + + // Wait 500ms so that the tab finishes executing. + await wait(500); + + // Check that we can get some allocations when the feature is turned on. + { + const { + parentThread, + contentThread, + } = await stopProfilerNowAndGetThreads(contentPid); + Assert.greater( + getPayloadsOfType(parentThread, "JS allocation").length, + 0, + "Allocations were recorded for the parent process' main thread when the " + + "JS Allocation feature was turned on." + ); + Assert.greater( + getPayloadsOfType(contentThread, "JS allocation").length, + 0, + "Allocations were recorded for the content process' main thread when the " + + "JS Allocation feature was turned on." + ); + } + + startProfiler({ features: ["threads", "js"] }); + // Now reload the tab with a clean run. + gBrowser.reload(); + await wait(500); + + // Check that no allocations were recorded, and allocation tracking was correctly + // turned off. + { + const { + parentThread, + contentThread, + } = await stopProfilerNowAndGetThreads(contentPid); + Assert.equal( + getPayloadsOfType(parentThread, "JS allocation").length, + 0, + "No allocations were recorded for the parent processes' main thread when " + + "JS allocation was not turned on." + ); + + Assert.equal( + getPayloadsOfType(contentThread, "JS allocation").length, + 0, + "No allocations were recorded for the content processes' main thread when " + + "JS allocation was not turned on." + ); + } + }); +}); diff --git a/tools/profiler/tests/browser/browser_test_feature_nostacksampling.js b/tools/profiler/tests/browser/browser_test_feature_nostacksampling.js new file mode 100644 index 0000000000..0fdae614ee --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_feature_nostacksampling.js @@ -0,0 +1,78 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Test the No Stack Sampling feature. + */ +add_task(async function test_profile_feature_nostacksampling() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok( + !Services.profiler.IsActive(), + "The profiler is not currently active" + ); + + startProfiler({ features: ["threads", "js", "nostacksampling"] }); + + const url = BASE_URL + "do_work_500ms.html"; + await BrowserTestUtils.withNewTab(url, async contentBrowser => { + const contentPid = await SpecialPowers.spawn( + contentBrowser, + [], + () => Services.appinfo.processID + ); + + // Wait 500ms so that the tab finishes executing. + await wait(500); + + // Check that we can get no stacks when the feature is turned on. + { + const { + parentThread, + contentThread, + } = await stopProfilerNowAndGetThreads(contentPid); + Assert.equal( + parentThread.samples.data.length, + 0, + "Stack samples were recorded from the parent process' main thread" + + "when the No Stack Sampling feature was turned on." + ); + Assert.equal( + contentThread.samples.data.length, + 0, + "Stack samples were recorded from the content process' main thread" + + "when the No Stack Sampling feature was turned on." + ); + } + + // Flush out any straggling allocation markers that may have not been collected + // yet by starting and stopping the profiler once. + startProfiler({ features: ["threads", "js"] }); + + // Now reload the tab with a clean run. + gBrowser.reload(); + await wait(500); + + // Check that stack samples were recorded. + { + const { parentThread, contentThread } = await stopProfilerAndGetThreads( + contentPid + ); + Assert.greater( + parentThread.samples.data.length, + 0, + "No Stack samples were recorded from the parent process' main thread" + + "when the No Stack Sampling feature was not turned on." + ); + + Assert.greater( + contentThread.samples.data.length, + 0, + "No Stack samples were recorded from the content process' main thread" + + "when the No Stack Sampling feature was not turned on." + ); + } + }); +}); diff --git a/tools/profiler/tests/browser/browser_test_feature_preferencereads.js b/tools/profiler/tests/browser/browser_test_feature_preferencereads.js new file mode 100644 index 0000000000..ff1b869a47 --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_feature_preferencereads.js @@ -0,0 +1,109 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +requestLongerTimeout(10); + +function countDpiPrefReadsInThread(thread) { + let count = 0; + for (let payload of getPayloadsOfType(thread, "PreferenceRead")) { + if (payload.prefName === "layout.css.dpi") { + count++; + } + } + return count; +} + +async function waitForPaintAfterLoad() { + return SpecialPowers.spawn(gBrowser.selectedBrowser, [], () => { + return new Promise(function(resolve) { + function listener() { + if (content.document.readyState == "complete") { + content.requestAnimationFrame(() => content.setTimeout(resolve, 0)); + } + } + if (content.document.readyState != "complete") { + content.document.addEventListener("readystatechange", listener); + } else { + listener(); + } + }); + }); +} + +/** + * Test the PreferenceRead feature. + */ +add_task(async function test_profile_feature_preferencereads() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok( + !Services.profiler.IsActive(), + "The profiler is not currently active" + ); + + startProfiler({ features: ["threads", "leaf", "preferencereads"] }); + + const url = BASE_URL + "fixed_height.html"; + await BrowserTestUtils.withNewTab(url, async contentBrowser => { + const contentPid = await SpecialPowers.spawn( + contentBrowser, + [], + () => Services.appinfo.processID + ); + + await waitForPaintAfterLoad(); + + // Check that some PreferenceRead profile markers were generated when the + // feature is enabled. + { + const { contentThread } = await stopProfilerNowAndGetThreads(contentPid); + + const timesReadDpiInContent = countDpiPrefReadsInThread(contentThread); + + Assert.greater( + timesReadDpiInContent, + 0, + "PreferenceRead profile markers for layout.css.dpi were recorded " + + "when the PreferenceRead feature was turned on." + ); + } + + startProfiler({ features: ["threads", "leaf"] }); + // Now reload the tab with a clean run. + await ContentTask.spawn(contentBrowser, null, () => { + return new Promise(resolve => { + addEventListener("pageshow", () => resolve(), { + capturing: true, + once: true, + }); + content.location.reload(); + }); + }); + + await waitForPaintAfterLoad(); + + // Check that no PreferenceRead markers were recorded when the feature + // is turned off. + { + const { + parentThread, + contentThread, + } = await stopProfilerNowAndGetThreads(contentPid); + Assert.equal( + getPayloadsOfType(parentThread, "PreferenceRead").length, + 0, + "No PreferenceRead profile markers for layout.css.dpi were recorded " + + "when the PreferenceRead feature was turned on." + ); + + Assert.equal( + getPayloadsOfType(contentThread, "PreferenceRead").length, + 0, + "No PreferenceRead profile markers for layout.css.dpi were recorded " + + "when the PreferenceRead feature was turned on." + ); + } + }); +}); diff --git a/tools/profiler/tests/browser/browser_test_markers_parent_process.js b/tools/profiler/tests/browser/browser_test_markers_parent_process.js new file mode 100644 index 0000000000..177222c3a9 --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_markers_parent_process.js @@ -0,0 +1,40 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +add_task(async function test_markers_parent_process() { + info("Test markers that are generated by the browser's parent process."); + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + info("Start the profiler in nostacksampling mode."); + startProfiler({ features: ["nostacksampling"] }); + + info("Dispatch a DOMEvent"); + window.dispatchEvent(new Event("synthetic")); + + info("Stop the profiler and get the profile."); + const profile = await stopAndGetProfile(); + + const markers = getInflatedMarkerData(profile.threads[0]); + { + const domEventStart = markers.find( + ({ phase, data }) => + phase === INTERVAL_START && data?.eventType === "synthetic" + ); + const domEventEnd = markers.find( + ({ phase, data }) => + phase === INTERVAL_END && data?.eventType === "synthetic" + ); + ok(domEventStart, "A start DOMEvent was generated"); + ok(domEventEnd, "An end DOMEvent was generated"); + ok( + domEventEnd.data.latency > 0, + "DOMEvent had a a latency value generated." + ); + ok(domEventEnd.data.type === "DOMEvent"); + ok(domEventEnd.name === "DOMEvent"); + } + // Add more marker tests. +}); diff --git a/tools/profiler/tests/browser/browser_test_profile_multi_frame_page_info.js b/tools/profiler/tests/browser/browser_test_profile_multi_frame_page_info.js new file mode 100644 index 0000000000..2549c83e39 --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_profile_multi_frame_page_info.js @@ -0,0 +1,88 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +if (SpecialPowers.useRemoteSubframes) { + // Bug 1586105: these tests could time out in some extremely slow conditions, + // when fission is enabled. + // Requesting a longer timeout should make it pass. + requestLongerTimeout(2); +} + +add_task(async function test_profile_multi_frame_page_info() { + // Requesting the complete log to be able to debug Bug 1586105. + SimpleTest.requestCompleteLog(); + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok(!Services.profiler.IsActive()); + info("Clear the previous pages just in case we still some open tabs."); + await Services.profiler.ClearAllPages(); + + info( + "Start the profiler to test the page information with multi frame page." + ); + startProfiler(); + + info("Open a tab with multi_frame.html in it."); + // multi_frame.html embeds single_frame.html inside an iframe. + const url = BASE_URL + "multi_frame.html"; + await BrowserTestUtils.withNewTab(url, async function(contentBrowser) { + const contentPid = await SpecialPowers.spawn(contentBrowser, [], () => { + return Services.appinfo.processID; + }); + + info("Capture the profile data."); + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + + let foundPage = 0; + // We need to find the correct content process for that tab. + let contentProcess = profile.processes.find( + p => p.threads[0].pid == contentPid + ); + + if (!contentProcess) { + throw new Error( + `Could not find the content process with given pid: ${contentPid}` + ); + } + + info( + "Check if the captured pages are the ones with correct values we created." + ); + + let parentPage; + for (const page of contentProcess.pages) { + // Parent page + if (page.url == url) { + Assert.equal(page.url, url); + Assert.equal(typeof page.browsingContextID, "number"); + Assert.equal(typeof page.innerWindowID, "number"); + // Top level document will have no embedder. + Assert.equal(page.embedderInnerWindowID, 0); + parentPage = page; + foundPage++; + break; + } + } + + Assert.notEqual(typeof parentPage, "undefined"); + + for (const page of contentProcess.pages) { + // Child page (iframe) + if (page.url == BASE_URL + "single_frame.html") { + Assert.equal(page.url, BASE_URL + "single_frame.html"); + Assert.equal(typeof page.browsingContextID, "number"); + Assert.equal(typeof page.innerWindowID, "number"); + Assert.equal(typeof page.embedderInnerWindowID, "number"); + Assert.notEqual(typeof parentPage, "undefined"); + Assert.equal(page.embedderInnerWindowID, parentPage.innerWindowID); + foundPage++; + break; + } + } + + Assert.equal(foundPage, 2); + }); +}); diff --git a/tools/profiler/tests/browser/browser_test_profile_single_frame_page_info.js b/tools/profiler/tests/browser/browser_test_profile_single_frame_page_info.js new file mode 100644 index 0000000000..4acd6df51d --- /dev/null +++ b/tools/profiler/tests/browser/browser_test_profile_single_frame_page_info.js @@ -0,0 +1,67 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +if (SpecialPowers.useRemoteSubframes) { + // Bug 1586105: these tests could time out in some extremely slow conditions, + // when fission is enabled. + // Requesting a longer timeout should make it pass. + requestLongerTimeout(2); +} + +add_task(async function test_profile_single_frame_page_info() { + // Requesting the complete log to be able to debug Bug 1586105. + SimpleTest.requestCompleteLog(); + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + Assert.ok(!Services.profiler.IsActive()); + info("Clear the previous pages just in case we still some open tabs."); + await Services.profiler.ClearAllPages(); + + info( + "Start the profiler to test the page information with single frame page." + ); + startProfiler(); + + info("Open a tab with single_frame.html in it."); + const url = BASE_URL + "single_frame.html"; + await BrowserTestUtils.withNewTab(url, async function(contentBrowser) { + const contentPid = await SpecialPowers.spawn(contentBrowser, [], () => { + return Services.appinfo.processID; + }); + + info("Capture the profile data."); + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + + let pageFound = false; + // We need to find the correct content process for that tab. + let contentProcess = profile.processes.find( + p => p.threads[0].pid == contentPid + ); + + if (!contentProcess) { + throw new Error( + `Could not find the content process with given pid: ${contentPid}` + ); + } + + info( + "Check if the captured page is the one with correct values we created." + ); + + for (const page of contentProcess.pages) { + if (page.url == url) { + Assert.equal(page.url, url); + Assert.equal(typeof page.browsingContextID, "number"); + Assert.equal(typeof page.innerWindowID, "number"); + // Top level document will have no embedder. + Assert.equal(page.embedderInnerWindowID, 0); + pageFound = true; + break; + } + } + Assert.equal(pageFound, true); + }); +}); diff --git a/tools/profiler/tests/browser/do_work_500ms.html b/tools/profiler/tests/browser/do_work_500ms.html new file mode 100644 index 0000000000..9713a80671 --- /dev/null +++ b/tools/profiler/tests/browser/do_work_500ms.html @@ -0,0 +1,41 @@ +<!DOCTYPE html> +<html> +<head> + <meta charset="utf-8"> + <title>Do some work for 500ms</title> + <script> + const milliseconds = 500; + const millisecondsPerBatch = 10; + const end = Date.now() + milliseconds; + window.total = 0; + let i = 0; + + /** + * Do work for a set number of milliseconds, but only do the work in batches + * so the browser does not get unresponsive. + */ + function doWork() { + const batchEnd = Date.now() + millisecondsPerBatch; + // Do some work for a set amount of time. + while (Date.now() < end) { + // Do some kind of work that is non-deterministic to guard against optimizations. + window.total += Math.random(); + i++; + + // Check if a batch is done yet. + if (Date.now() > batchEnd) { + // Defer the rest of the work into a micro task. Keep on doing this until + // the total milliseconds have elapsed. + setTimeout(doWork, 0); + return; + } + } + } + + doWork(); + </script> +</head> +<body> + Do some work for 500ms. +</body> +</html> diff --git a/tools/profiler/tests/browser/fixed_height.html b/tools/profiler/tests/browser/fixed_height.html new file mode 100644 index 0000000000..7d21f3b746 --- /dev/null +++ b/tools/profiler/tests/browser/fixed_height.html @@ -0,0 +1,18 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta charset="utf-8"/> + <!-- + Using an absolute value here should invoke Firefox to get + the layout.css.dpi preference. + --> + <style type="text/css"> + div.fixed_height { + height: 15in; + } + </style> + </head> + <body> + <div class="fixed_height">Testing</div> + </body> +</html> diff --git a/tools/profiler/tests/browser/head.js b/tools/profiler/tests/browser/head.js new file mode 100644 index 0000000000..eebcf4d383 --- /dev/null +++ b/tools/profiler/tests/browser/head.js @@ -0,0 +1,70 @@ +/* import-globals-from ../shared-head.js */ + +Services.scriptloader.loadSubScript( + "chrome://mochitests/content/browser/tools/profiler/tests/browser/shared-head.js", + this +); + +const { BrowserTestUtils } = ChromeUtils.import( + "resource://testing-common/BrowserTestUtils.jsm" +); + +const BASE_URL = "http://example.com/browser/tools/profiler/tests/browser/"; + +registerCleanupFunction(() => { + if (Services.profiler.IsActive()) { + info( + "The profiler was found to still be running at the end of the test, which means that some error likely occured. Let's stop it to prevent issues with following tests!" + ); + Services.profiler.StopProfiler(); + } +}); + +/** + * This is a helper function that will stop the profiler of the browser running + * with PID contentPid. + * This happens immediately, without waiting for any sampling to happen or + * finish. Use stopProfilerAndGetThreads (without "Now") below instead to wait + * for samples before stopping. + * + * @param {number} contentPid + * @returns {Promise} + */ +async function stopProfilerNowAndGetThreads(contentPid) { + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + + const parentThread = profile.threads[0]; + const contentProcess = profile.processes.find( + p => p.threads[0].pid == contentPid + ); + if (!contentProcess) { + throw new Error("Could not find the content process."); + } + const contentThread = contentProcess.threads[0]; + + if (!parentThread) { + throw new Error("The parent thread was not found in the profile."); + } + + if (!contentThread) { + throw new Error("The content thread was not found in the profile."); + } + + return { parentThread, contentThread }; +} + +/** + * This is a helper function that will stop the profiler of the browser running + * with PID contentPid. + * As opposed to stopProfilerNowAndGetThreads (with "Now") above, the profiler + * in that PID will not stop until there is at least one periodic sample taken. + * + * @param {number} contentPid + * @returns {Promise} + */ +async function stopProfilerAndGetThreads(contentPid) { + await Services.profiler.waitOnePeriodicSampling(); + + return stopProfilerNowAndGetThreads(contentPid); +} diff --git a/tools/profiler/tests/browser/multi_frame.html b/tools/profiler/tests/browser/multi_frame.html new file mode 100644 index 0000000000..b2efcedd50 --- /dev/null +++ b/tools/profiler/tests/browser/multi_frame.html @@ -0,0 +1,11 @@ +<!DOCTYPE html> +<html> +<head> + <meta charset="utf-8"> + <title>Multi Frame</title> +</head> +<body> + Multi Frame + <iframe src="single_frame.html"></iframe> +</body> +</html> diff --git a/tools/profiler/tests/browser/simple.html b/tools/profiler/tests/browser/simple.html new file mode 100644 index 0000000000..f7c32d02c3 --- /dev/null +++ b/tools/profiler/tests/browser/simple.html @@ -0,0 +1,9 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta charset="utf-8"/> + </head> + <body> + Testing + </body> +</html> diff --git a/tools/profiler/tests/browser/single_frame.html b/tools/profiler/tests/browser/single_frame.html new file mode 100644 index 0000000000..ebdfc41da2 --- /dev/null +++ b/tools/profiler/tests/browser/single_frame.html @@ -0,0 +1,10 @@ +<!DOCTYPE html> +<html> +<head> + <meta charset="utf-8"> + <title>Single Frame</title> +</head> +<body> + Single Frame +</body> +</html> diff --git a/tools/profiler/tests/chrome/chrome.ini b/tools/profiler/tests/chrome/chrome.ini new file mode 100644 index 0000000000..76c0b2eaf1 --- /dev/null +++ b/tools/profiler/tests/chrome/chrome.ini @@ -0,0 +1,7 @@ +[DEFAULT] +support-files=profiler_test_utils.js + +[test_profile_worker_bug_1428076.html] +skip-if = os == 'android' && processor == 'arm' # Bug 1541291 +[test_profile_worker.html] +skip-if = os == 'android' && processor == 'arm' # Bug 1541291 diff --git a/tools/profiler/tests/chrome/profiler_test_utils.js b/tools/profiler/tests/chrome/profiler_test_utils.js new file mode 100644 index 0000000000..b89a25995a --- /dev/null +++ b/tools/profiler/tests/chrome/profiler_test_utils.js @@ -0,0 +1,64 @@ +"use strict"; + +(function() { + const { Services } = ChromeUtils.import( + "resource://gre/modules/Services.jsm" + ); + + function startProfiler(settings) { + Services.profiler.StartProfiler( + settings.entries, + settings.interval, + settings.features, + settings.threads, + 0, + settings.duration + ); + + info("Profiler has started"); + } + + function getProfile() { + const profile = Services.profiler.getProfileData(); + info( + "We got a profile, run the mochitest with `--keep-open true` to see the logged profile in the Web Console." + ); + + // Run the mochitest with `--keep-open true` to see the logged profile in the + // Web console. + console.log(profile); + + return profile; + } + + function stopProfiler() { + Services.profiler.StopProfiler(); + info("Profiler has stopped"); + } + + function end(error) { + if (error) { + ok(false, `We got an error: ${error}`); + } else { + ok(true, "We ran the whole process"); + } + SimpleTest.finish(); + } + + async function runTest(settings, workload) { + SimpleTest.waitForExplicitFinish(); + try { + await startProfiler(settings); + await workload(); + await getProfile(); + await stopProfiler(); + await end(); + } catch (e) { + // By catching and handling the error, we're being nice to mochitest + // runners: instead of waiting for the timeout, we fail right away. + await end(e); + } + } + + window.runTest = runTest; +})(); diff --git a/tools/profiler/tests/chrome/test_profile_worker.html b/tools/profiler/tests/chrome/test_profile_worker.html new file mode 100644 index 0000000000..d32d36bb41 --- /dev/null +++ b/tools/profiler/tests/chrome/test_profile_worker.html @@ -0,0 +1,66 @@ +<!DOCTYPE HTML> +<html> +<!-- +https://bugzilla.mozilla.org/show_bug.cgi?id=1428076 +--> +<head> + <meta charset="utf-8"> + <title>Test for Bug 1428076</title> + <link rel="stylesheet" type="text/css" href="chrome://global/skin"/> + <link rel="stylesheet" type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css"/> +</head> +<body> +<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1428076">Mozilla Bug 1428076</a> + +<script src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script> +<script type="application/javascript" src="profiler_test_utils.js"></script> +<script type="application/javascript"> +/* globals runTest */ + +"use strict"; + +const settings = { + entries: 1000000, // 9MB + interval: 1, // ms + features: ["js", "threads", "leaf", "stackwalk"], + threads: ["GeckoMain", "Compositor", "Worker"], // most common combination +}; + +const workerCode = ` + console.log('hello world'); + setTimeout(() => postMessage('message from worker'), 50); +`; + +function startWorker() { + // We use a Blob for the worker content to avoid an external JS file, and data + // URLs seem to be blocked in a chrome environment. + const workerContent = new Blob( + [ workerCode ], + { type: "application/javascript" } + ); + const blobURL = URL.createObjectURL(workerContent); + + // We start a worker and then terminate it right away to trigger our bug. + info("Starting the worker..."); + const myWorker = new Worker(blobURL); + return { worker: myWorker, url: blobURL }; +} + +function workload() { + const { worker, url } = startWorker(); + + return new Promise(resolve => { + worker.onmessage = () => { + info("Got a message, terminating the worker."); + worker.terminate(); + URL.revokeObjectURL(url); + resolve(); + }; + }); +} + +runTest(settings, workload); + +</script> +</body> +</html> diff --git a/tools/profiler/tests/chrome/test_profile_worker_bug_1428076.html b/tools/profiler/tests/chrome/test_profile_worker_bug_1428076.html new file mode 100644 index 0000000000..30245b5b46 --- /dev/null +++ b/tools/profiler/tests/chrome/test_profile_worker_bug_1428076.html @@ -0,0 +1,58 @@ +<!DOCTYPE HTML> +<html> +<!-- +https://bugzilla.mozilla.org/show_bug.cgi?id=1428076 +--> +<head> + <meta charset="utf-8"> + <title>Test for Bug 1428076</title> + <link rel="stylesheet" type="text/css" href="chrome://global/skin"/> + <link rel="stylesheet" type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css"/> +</head> +<body> +<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1428076">Mozilla Bug 1428076</a> + +<script src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script> +<script type="application/javascript" src="profiler_test_utils.js"></script> +<script type="application/javascript"> +/** Test for Bug 1428076 **/ + +/* globals runTest */ + +"use strict"; + +const settings = { + entries: 1000000, // 9MB + interval: 1, // ms + features: ["js", "threads", "leaf", "stackwalk"], + threads: ["GeckoMain", "Compositor", "Worker"], // most common combination +}; + +function workload() { + // We use a Blob for the worker content to avoid an external JS file, and data + // URLs seem to be blocked in a chrome environment. + const workerContent = new Blob( + [ "console.log('hello world!')" ], + { type: "application/javascript" } + ); + const blobURL = URL.createObjectURL(workerContent); + + // We start a worker and then terminate it right away to trigger our bug. + info("Starting the worker, and terminate it right away."); + const myWorker = new Worker(blobURL); + myWorker.terminate(); + + URL.revokeObjectURL(blobURL); + + // We're deferring some little time so that the worker has the time to be + // properly cleaned up and the profiler actually saves the worker data. + return new Promise(resolve => { + setTimeout(resolve, 50); + }); +} + +runTest(settings, workload); + +</script> +</body> +</html> diff --git a/tools/profiler/tests/gtest/GeckoProfiler.cpp b/tools/profiler/tests/gtest/GeckoProfiler.cpp new file mode 100644 index 0000000000..bacf34e3d5 --- /dev/null +++ b/tools/profiler/tests/gtest/GeckoProfiler.cpp @@ -0,0 +1,2284 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This file tests a lot of the profiler_*() functions in GeckoProfiler.h. +// Most of the tests just check that nothing untoward (e.g. crashes, deadlocks) +// happens when calling these functions. They don't do much inspection of +// profiler internals. + +#include "GeckoProfiler.h" +#include "mozilla/ProfilerMarkerTypes.h" +#include "mozilla/ProfilerMarkers.h" +#include "platform.h" +#include "ProfileBuffer.h" + +#include "js/Initialization.h" +#include "js/Printf.h" +#include "jsapi.h" +#include "json/json.h" +#include "mozilla/Atomics.h" +#include "mozilla/BlocksRingBuffer.h" +#include "mozilla/ProfileBufferEntrySerializationGeckoExtensions.h" +#include "mozilla/ProfileJSONWriter.h" +#include "mozilla/UniquePtrExtensions.h" +#include "mozilla/net/HttpBaseChannel.h" +#include "nsIThread.h" +#include "nsThreadUtils.h" + +#include "gtest/gtest.h" + +#include <cstring> +#include <set> +#include <thread> + +// Note: profiler_init() has already been called in XRE_main(), so we can't +// test it here. Likewise for profiler_shutdown(), and AutoProfilerInit +// (which is just an RAII wrapper for profiler_init() and profiler_shutdown()). + +using namespace mozilla; + +TEST(BaseProfiler, BlocksRingBuffer) +{ + constexpr uint32_t MBSize = 256; + uint8_t buffer[MBSize * 3]; + for (size_t i = 0; i < MBSize * 3; ++i) { + buffer[i] = uint8_t('A' + i); + } + BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex, + &buffer[MBSize], MakePowerOfTwo32<MBSize>()); + + { + nsCString cs("nsCString"_ns); + nsString s(u"nsString"_ns); + nsAutoCString acs("nsAutoCString"_ns); + nsAutoString as(u"nsAutoString"_ns); + nsAutoCStringN<8> acs8("nsAutoCStringN"_ns); + nsAutoStringN<8> as8(u"nsAutoStringN"_ns); + JS::UniqueChars jsuc = JS_smprintf("%s", "JS::UniqueChars"); + + rb.PutObjects(cs, s, acs, as, acs8, as8, jsuc); + } + + rb.ReadEach([](ProfileBufferEntryReader& aER) { + ASSERT_EQ(aER.ReadObject<nsCString>(), "nsCString"_ns); + ASSERT_EQ(aER.ReadObject<nsString>(), u"nsString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoCString>(), "nsAutoCString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoString>(), u"nsAutoString"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoCStringN<8>>(), "nsAutoCStringN"_ns); + ASSERT_EQ(aER.ReadObject<nsAutoStringN<8>>(), u"nsAutoStringN"_ns); + auto jsuc2 = aER.ReadObject<JS::UniqueChars>(); + ASSERT_TRUE(!!jsuc2); + ASSERT_TRUE(strcmp(jsuc2.get(), "JS::UniqueChars") == 0); + }); + + // Everything around the sub-buffer should be unchanged. + for (size_t i = 0; i < MBSize; ++i) { + ASSERT_EQ(buffer[i], uint8_t('A' + i)); + } + for (size_t i = MBSize * 2; i < MBSize * 3; ++i) { + ASSERT_EQ(buffer[i], uint8_t('A' + i)); + } +} + +// Common JSON checks. + +// Does the GETTER return a non-null TYPE? (Non-critical) +#define EXPECT_HAS_JSON(GETTER, TYPE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).is##TYPE()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) \ + << #GETTER " didn't return type " #TYPE; \ + } \ + } while (false) + +// Does the GETTER return a non-null TYPE? (Critical) +#define ASSERT_HAS_JSON(GETTER, TYPE) \ + do { \ + ASSERT_FALSE((GETTER).isNull()); \ + ASSERT_TRUE((GETTER).is##TYPE()); \ + } while (false) + +// Does the GETTER return a non-null TYPE? (Critical) +// If yes, store the value into VARIABLE. +#define GET_JSON(VARIABLE, GETTER, TYPE) \ + ASSERT_HAS_JSON(GETTER, TYPE); \ + const Json::Value& VARIABLE = (GETTER) + +// Checks that the GETTER's value is present, is of the expected TYPE, and has +// the expected VALUE. (Non-critical) +#define EXPECT_EQ_JSON(GETTER, TYPE, VALUE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).is##TYPE()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) \ + << #GETTER " didn't return type " #TYPE; \ + } else { \ + EXPECT_EQ((GETTER).as##TYPE(), (VALUE)); \ + } \ + } while (false) + +// Checks that the GETTER's value is present, and is a valid index into the +// STRINGTABLE array, pointing at the expected STRING. +#define EXPECT_EQ_STRINGTABLE(GETTER, STRINGTABLE, STRING) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).isUInt()) { \ + EXPECT_TRUE((GETTER).isUInt()) << #GETTER " didn't return an index"; \ + } else { \ + EXPECT_LT((GETTER).asUInt(), (STRINGTABLE).size()); \ + EXPECT_EQ_JSON((STRINGTABLE)[(GETTER).asUInt()], String, (STRING)); \ + } \ + } while (false) + +#define EXPECT_JSON_ARRAY_CONTAINS(GETTER, TYPE, VALUE) \ + do { \ + if ((GETTER).isNull()) { \ + EXPECT_FALSE((GETTER).isNull()) << #GETTER " doesn't exist or is null"; \ + } else if (!(GETTER).isArray()) { \ + EXPECT_TRUE((GETTER).is##TYPE()) << #GETTER " is not an array"; \ + } else if (const Json::ArrayIndex size = (GETTER).size(); size == 0u) { \ + EXPECT_NE(size, 0u) << #GETTER " is an empty array"; \ + } else { \ + bool found = false; \ + for (Json::ArrayIndex i = 0; i < size; ++i) { \ + if (!(GETTER)[i].is##TYPE()) { \ + EXPECT_TRUE((GETTER)[i].is##TYPE()) \ + << #GETTER "[" << i << "] is not " #TYPE; \ + break; \ + } \ + if ((GETTER)[i].as##TYPE() == (VALUE)) { \ + found = true; \ + break; \ + } \ + } \ + EXPECT_TRUE(found) << #GETTER " doesn't contain " #VALUE; \ + } \ + } while (false) + +// Check that the given process root contains all the expected properties. +static void JSONRootCheck(const Json::Value& aRoot, + bool aWithMainThread = true) { + ASSERT_TRUE(aRoot.isObject()); + + EXPECT_HAS_JSON(aRoot["libs"], Array); + + GET_JSON(meta, aRoot["meta"], Object); + EXPECT_HAS_JSON(meta["version"], UInt); + EXPECT_HAS_JSON(meta["startTime"], Double); + + EXPECT_HAS_JSON(aRoot["pages"], Array); + + EXPECT_HAS_JSON(aRoot["profilerOverhead"], Object); + + GET_JSON(threads, aRoot["threads"], Array); + const Json::ArrayIndex threadCount = threads.size(); + for (Json::ArrayIndex i = 0; i < threadCount; ++i) { + GET_JSON(thread, threads[i], Object); + EXPECT_HAS_JSON(thread["processType"], String); + EXPECT_HAS_JSON(thread["name"], String); + EXPECT_HAS_JSON(thread["registerTime"], Double); + EXPECT_HAS_JSON(thread["samples"], Object); + EXPECT_HAS_JSON(thread["markers"], Object); + EXPECT_HAS_JSON(thread["pid"], UInt); + EXPECT_HAS_JSON(thread["tid"], UInt); + EXPECT_HAS_JSON(thread["stackTable"], Object); + EXPECT_HAS_JSON(thread["frameTable"], Object); + EXPECT_HAS_JSON(thread["stringTable"], Array); + } + + if (aWithMainThread) { + ASSERT_GT(threadCount, 0u); + GET_JSON(thread0, threads[0], Object); + EXPECT_EQ_JSON(thread0["name"], String, "GeckoMain"); + } + + EXPECT_HAS_JSON(aRoot["pausedRanges"], Array); + + const Json::Value& processes = aRoot["processes"]; + if (!processes.isNull()) { + ASSERT_TRUE(processes.isArray()); + const Json::ArrayIndex processCount = processes.size(); + for (Json::ArrayIndex i = 0; i < processCount; ++i) { + GET_JSON(process, processes[i], Object); + JSONRootCheck(process, aWithMainThread); + } + } +} + +// Check that various expected top properties are in the JSON, and then call the +// provided `aJSONCheckFunction` with the JSON root object. +template <typename JSONCheckFunction> +void JSONOutputCheck(const char* aOutput, + JSONCheckFunction&& aJSONCheckFunction) { + ASSERT_NE(aOutput, nullptr); + + // Extract JSON. + Json::Value parsedRoot; + Json::CharReaderBuilder builder; + const std::unique_ptr<Json::CharReader> reader(builder.newCharReader()); + ASSERT_TRUE( + reader->parse(aOutput, strchr(aOutput, '\0'), &parsedRoot, nullptr)); + + JSONRootCheck(parsedRoot); + + std::forward<JSONCheckFunction>(aJSONCheckFunction)(parsedRoot); +} + +typedef Vector<const char*> StrVec; + +static void InactiveFeaturesAndParamsCheck() { + int entries; + Maybe<double> duration; + double interval; + uint32_t features; + StrVec filters; + uint64_t activeBrowsingContextID; + + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::NativeAllocations)); + + profiler_get_start_params(&entries, &duration, &interval, &features, &filters, + &activeBrowsingContextID); + + ASSERT_TRUE(entries == 0); + ASSERT_TRUE(duration == Nothing()); + ASSERT_TRUE(interval == 0); + ASSERT_TRUE(features == 0); + ASSERT_TRUE(filters.empty()); + ASSERT_TRUE(activeBrowsingContextID == 0); +} + +static void ActiveParamsCheck(int aEntries, double aInterval, + uint32_t aFeatures, const char** aFilters, + size_t aFiltersLen, + uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration = Nothing()) { + int entries; + Maybe<double> duration; + double interval; + uint32_t features; + StrVec filters; + uint64_t activeBrowsingContextID; + + profiler_get_start_params(&entries, &duration, &interval, &features, &filters, + &activeBrowsingContextID); + + ASSERT_TRUE(entries == aEntries); + ASSERT_TRUE(duration == aDuration); + ASSERT_TRUE(interval == aInterval); + ASSERT_TRUE(features == aFeatures); + ASSERT_TRUE(filters.length() == aFiltersLen); + ASSERT_TRUE(activeBrowsingContextID == aActiveBrowsingContextID); + for (size_t i = 0; i < aFiltersLen; i++) { + ASSERT_TRUE(strcmp(filters[i], aFilters[i]) == 0); + } +} + +TEST(GeckoProfiler, Utilities) +{ + // We'll assume that this test runs in the main thread (which should be true + // when called from the `main` function). + const int mainThreadId = profiler_current_thread_id(); + + MOZ_RELEASE_ASSERT(profiler_main_thread_id() == mainThreadId); + MOZ_RELEASE_ASSERT(profiler_is_main_thread()); + + std::thread testThread([&]() { + const int testThreadId = profiler_current_thread_id(); + MOZ_RELEASE_ASSERT(testThreadId != mainThreadId); + + MOZ_RELEASE_ASSERT(profiler_main_thread_id() != testThreadId); + MOZ_RELEASE_ASSERT(!profiler_is_main_thread()); + }); + testThread.join(); +} + +TEST(GeckoProfiler, FeaturesAndParams) +{ + InactiveFeaturesAndParamsCheck(); + + // Try a couple of features and filters. + { + uint32_t features = ProfilerFeature::JS | ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 100, + Some(PROFILER_DEFAULT_DURATION)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 100, + Some(PROFILER_DEFAULT_DURATION)); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try some different features and filters. + { + uint32_t features = + ProfilerFeature::MainThreadIO | ProfilerFeature::IPCMessages; + const char* filters[] = {"GeckoMain", "Foo", "Bar"}; + + // Testing with some arbitrary buffer size (as could be provided by + // external code), which we convert to the appropriate power of 2. + profiler_start(PowerOfTwo32(999999), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 123, Some(25.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + // Profiler::Threads is added because filters has multiple entries. + ActiveParamsCheck(PowerOfTwo32(999999).Value(), 3, + features | ProfilerFeature::Threads, filters, + MOZ_ARRAY_LENGTH(filters), 123, Some(25.0)); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try with no duration + { + uint32_t features = + ProfilerFeature::MainThreadIO | ProfilerFeature::IPCMessages; + const char* filters[] = {"GeckoMain", "Foo", "Bar"}; + + profiler_start(PowerOfTwo32(999999), 3, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + // Profiler::Threads is added because filters has multiple entries. + ActiveParamsCheck(PowerOfTwo32(999999).Value(), 3, + features | ProfilerFeature::Threads, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } + + // Try all supported features, and filters that match all threads. + { + uint32_t availableFeatures = profiler_get_available_features(); + const char* filters[] = {""}; + + profiler_start(PowerOfTwo32(88888), 10, availableFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0, Some(15.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PowerOfTwo32(88888).Value(), 10, availableFeatures, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(15.0)); + + // Don't call profiler_stop() here. + } + + // Try no features, and filters that match no threads. + { + uint32_t features = 0; + const char* filters[] = {"NoThreadWillMatchThis"}; + + // Second profiler_start() call in a row without an intervening + // profiler_stop(); this will do an implicit profiler_stop() and restart. + profiler_start(PowerOfTwo32(0), 0, features, filters, + MOZ_ARRAY_LENGTH(filters), 0, Some(0.0)); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + // Entries and intervals go to defaults if 0 is specified. + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, + features | ProfilerFeature::Threads, filters, + MOZ_ARRAY_LENGTH(filters), 0, Nothing()); + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + + // These calls are no-ops. + profiler_stop(); + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } +} + +TEST(GeckoProfiler, EnsureStarted) +{ + InactiveFeaturesAndParamsCheck(); + + uint32_t features = ProfilerFeature::JS | ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + { + // Inactive -> Active + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0, + Some(PROFILER_DEFAULT_DURATION)); + + ActiveParamsCheck( + PROFILER_DEFAULT_ENTRIES.Value(), PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(PROFILER_DEFAULT_DURATION)); + } + + { + // Active -> Active with same settings + + Maybe<ProfilerBufferInfo> info0 = profiler_get_buffer_info(); + ASSERT_TRUE(info0->mRangeEnd > 0); + + // First, write some samples into the buffer. + PR_Sleep(PR_MillisecondsToInterval(500)); + + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd > info0->mRangeEnd); + + // Call profiler_ensure_started with the same settings as before. + // This operation must not clear our buffer! + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0, + Some(PROFILER_DEFAULT_DURATION)); + + ActiveParamsCheck( + PROFILER_DEFAULT_ENTRIES.Value(), PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0, Some(PROFILER_DEFAULT_DURATION)); + + // Check that our position in the buffer stayed the same or advanced, but + // not by much, and the range-start after profiler_ensure_started shouldn't + // have passed the range-end before. + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeEnd >= info1->mRangeEnd); + ASSERT_TRUE(info2->mRangeEnd - info1->mRangeEnd < + info1->mRangeEnd - info0->mRangeEnd); + ASSERT_TRUE(info2->mRangeStart < info1->mRangeEnd); + } + + { + // Active -> Active with *different* settings + + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + + // Call profiler_ensure_started with a different feature set than the one + // it's currently running with. This is supposed to stop and restart the + // profiler, thereby discarding the buffer contents. + uint32_t differentFeatures = features | ProfilerFeature::Leaf; + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + differentFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, differentFeatures, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + // Check the the buffer was cleared, so its range-start should be at/after + // its range-end before. + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeStart >= info1->mRangeEnd); + } + + { + // Active -> Inactive + + profiler_stop(); + + InactiveFeaturesAndParamsCheck(); + } +} + +TEST(GeckoProfiler, MultiRegistration) +{ + // This whole test only checks that function calls don't crash, they don't + // actually verify that threads get profiled or not. + char top; + profiler_register_thread("Main thread again", &top); + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread, no unreg", &top); + }); + thread.join(); + } + + { + std::thread thread([]() { profiler_unregister_thread(); }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread 1st", &top); + profiler_unregister_thread(); + profiler_register_thread("thread 2nd", &top); + profiler_unregister_thread(); + }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread once", &top); + profiler_register_thread("thread again", &top); + profiler_unregister_thread(); + }); + thread.join(); + } + + { + std::thread thread([]() { + char top; + profiler_register_thread("thread to unreg twice", &top); + profiler_unregister_thread(); + profiler_unregister_thread(); + }); + thread.join(); + } +} + +TEST(GeckoProfiler, DifferentThreads) +{ + InactiveFeaturesAndParamsCheck(); + + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_TRUE(NS_SUCCEEDED(rv)); + + // Control the profiler on a background thread and verify flags on the + // main thread. + { + uint32_t features = ProfilerFeature::JS | ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + + thread->Dispatch( + NS_NewRunnableFunction("GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { + profiler_start(PROFILER_DEFAULT_ENTRIES, + PROFILER_DEFAULT_INTERVAL, + features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + }), + NS_DISPATCH_SYNC); + + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE(!profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + + thread->Dispatch( + NS_NewRunnableFunction("GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { profiler_stop(); }), + NS_DISPATCH_SYNC); + + InactiveFeaturesAndParamsCheck(); + } + + // Control the profiler on the main thread and verify flags on a + // background thread. + { + uint32_t features = ProfilerFeature::JS | ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + thread->Dispatch( + NS_NewRunnableFunction( + "GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { + ASSERT_TRUE(profiler_is_active()); + ASSERT_TRUE( + !profiler_feature_active(ProfilerFeature::MainThreadIO)); + ASSERT_TRUE( + !profiler_feature_active(ProfilerFeature::IPCMessages)); + + ActiveParamsCheck(PROFILER_DEFAULT_ENTRIES.Value(), + PROFILER_DEFAULT_INTERVAL, features, filters, + MOZ_ARRAY_LENGTH(filters), 0); + }), + NS_DISPATCH_SYNC); + + profiler_stop(); + + thread->Dispatch( + NS_NewRunnableFunction("GeckoProfiler_DifferentThreads_Test::TestBody", + [&]() { InactiveFeaturesAndParamsCheck(); }), + NS_DISPATCH_SYNC); + } + + thread->Shutdown(); +} + +TEST(GeckoProfiler, GetBacktrace) +{ + ASSERT_TRUE(!profiler_get_backtrace()); + + { + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + // These will be destroyed while the profiler is active. + static const int N = 100; + { + UniqueProfilerBacktrace u[N]; + for (int i = 0; i < N; i++) { + u[i] = profiler_get_backtrace(); + ASSERT_TRUE(u[i]); + } + } + + // These will be destroyed after the profiler stops. + UniqueProfilerBacktrace u[N]; + for (int i = 0; i < N; i++) { + u[i] = profiler_get_backtrace(); + ASSERT_TRUE(u[i]); + } + + profiler_stop(); + } + + ASSERT_TRUE(!profiler_get_backtrace()); +} + +TEST(GeckoProfiler, Pause) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_is_paused()); + ASSERT_TRUE(!profiler_can_accept_markers()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(!profiler_is_paused()); + ASSERT_TRUE(profiler_can_accept_markers()); + + // Check that we are writing samples while not paused. + Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info(); + PR_Sleep(PR_MillisecondsToInterval(500)); + Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd); + + // Check that we are writing markers while not paused. + info1 = profiler_get_buffer_info(); + PROFILER_MARKER_UNTYPED("Not paused", OTHER, {}); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd); + + profiler_pause(); + + ASSERT_TRUE(profiler_is_paused()); + ASSERT_TRUE(!profiler_can_accept_markers()); + + // Check that we are not writing samples while paused. + info1 = profiler_get_buffer_info(); + PR_Sleep(PR_MillisecondsToInterval(500)); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd); + + // Check that we are now writing markers while paused. + info1 = profiler_get_buffer_info(); + PROFILER_MARKER_UNTYPED("Paused", OTHER, {}); + info2 = profiler_get_buffer_info(); + ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd); + PROFILER_MARKER_UNTYPED("Paused v2", OTHER, {}); + Maybe<ProfilerBufferInfo> info3 = profiler_get_buffer_info(); + ASSERT_TRUE(info2->mRangeEnd == info3->mRangeEnd); + + profiler_resume(); + + ASSERT_TRUE(!profiler_is_paused()); + ASSERT_TRUE(profiler_can_accept_markers()); + + profiler_stop(); + + ASSERT_TRUE(!profiler_is_paused()); + ASSERT_TRUE(!profiler_can_accept_markers()); +} + +TEST(GeckoProfiler, Markers) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + PROFILER_MARKER("tracing event", OTHER, {}, Tracing, "A"); + PROFILER_MARKER("tracing start", OTHER, MarkerTiming::IntervalStart(), + Tracing, "A"); + PROFILER_MARKER("tracing end", OTHER, MarkerTiming::IntervalEnd(), Tracing, + "A"); + + auto bt = profiler_capture_backtrace(); + PROFILER_MARKER("tracing event with stack", OTHER, + MarkerStack::TakeBacktrace(std::move(bt)), Tracing, "B"); + + { AUTO_PROFILER_TRACING_MARKER("C", "auto tracing", OTHER); } + + PROFILER_MARKER_UNTYPED("M1", OTHER, {}); + PROFILER_MARKER_UNTYPED("M3", OTHER, {}); + + // Create three strings: two that are the maximum allowed length, and one that + // is one char longer. + static const size_t kMax = ProfileBuffer::kMaxFrameKeyLength; + UniquePtr<char[]> okstr1 = MakeUnique<char[]>(kMax); + UniquePtr<char[]> okstr2 = MakeUnique<char[]>(kMax); + UniquePtr<char[]> longstr = MakeUnique<char[]>(kMax + 1); + UniquePtr<char[]> longstrCut = MakeUnique<char[]>(kMax + 1); + for (size_t i = 0; i < kMax; i++) { + okstr1[i] = 'a'; + okstr2[i] = 'b'; + longstr[i] = 'c'; + longstrCut[i] = 'c'; + } + okstr1[kMax - 1] = '\0'; + okstr2[kMax - 1] = '\0'; + longstr[kMax] = '\0'; + longstrCut[kMax] = '\0'; + // Should be output as-is. + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, ""); + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, okstr1.get()); + // Should be output as label + space + okstr2. + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("okstr2", LAYOUT, okstr2.get()); + // Should be output with kMax length, ending with "...\0". + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("", LAYOUT, longstr.get()); + ASSERT_EQ(longstrCut[kMax - 4], 'c'); + longstrCut[kMax - 4] = '.'; + ASSERT_EQ(longstrCut[kMax - 3], 'c'); + longstrCut[kMax - 3] = '.'; + ASSERT_EQ(longstrCut[kMax - 2], 'c'); + longstrCut[kMax - 2] = '.'; + ASSERT_EQ(longstrCut[kMax - 1], 'c'); + longstrCut[kMax - 1] = '\0'; + + // Test basic markers 2.0. + MOZ_RELEASE_ASSERT( + profiler_add_marker("default-templated markers 2.0 with empty options", + geckoprofiler::category::OTHER, {})); + + PROFILER_MARKER_UNTYPED( + "default-templated markers 2.0 with option", OTHER, + MarkerStack::TakeBacktrace(profiler_capture_backtrace())); + + PROFILER_MARKER("explicitly-default-templated markers 2.0 with empty options", + OTHER, {}, NoPayload); + + MOZ_RELEASE_ASSERT(profiler_add_marker( + "explicitly-default-templated markers 2.0 with option", + geckoprofiler::category::OTHER, {}, + ::geckoprofiler::markers::NoPayload{})); + + // Used in markers below. + TimeStamp ts1 = TimeStamp::NowUnfuzzed(); + + // Sleep briefly to ensure a sample is taken and the pending markers are + // processed. + PR_Sleep(PR_MillisecondsToInterval(500)); + + // Used in markers below. + TimeStamp ts2 = TimeStamp::NowUnfuzzed(); + // ts1 and ts2 should be different thanks to the sleep. + EXPECT_NE(ts1, ts2); + + // Test most marker payloads. + + // Keep this one first! (It's used to record `ts1` and `ts2`, to compare + // to serialized numbers in other markers.) + MOZ_RELEASE_ASSERT(profiler_add_marker( + "FirstMarker", geckoprofiler::category::OTHER, + MarkerTiming::Interval(ts1, ts2), geckoprofiler::markers::TextMarker{}, + "First Marker")); + + // User-defined marker type with different properties, and fake schema. + struct GtestMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, int aInt, + double aDouble, const mozilla::ProfilerString8View& aText, + const mozilla::ProfilerString8View& aUniqueText, + const mozilla::TimeStamp& aTime) { + aWriter.NullProperty("null"); + aWriter.BoolProperty("bool-false", false); + aWriter.BoolProperty("bool-true", true); + aWriter.IntProperty("int", aInt); + aWriter.DoubleProperty("double", aDouble); + aWriter.StringProperty("text", aText); + aWriter.UniqueStringProperty("unique text", aUniqueText); + aWriter.UniqueStringProperty("unique text again", aUniqueText); + aWriter.TimeProperty("time", aTime); + } + static mozilla::MarkerSchema MarkerTypeDisplay() { + // Note: This is an test function that is not intended to actually output + // that correctly matches StreamJSONMarkerData data above! Instead we only + // test that it outputs the expected JSON at the end. + using MS = mozilla::MarkerSchema; + MS schema{MS::Location::markerChart, MS::Location::markerTable, + MS::Location::timelineOverview, MS::Location::timelineMemory, + MS::Location::timelineIPC, MS::Location::timelineFileIO, + MS::Location::stackChart}; + // All label functions. + schema.SetChartLabel("chart label"); + schema.SetTooltipLabel("tooltip label"); + schema.SetTableLabel("table label"); + // All data functions, all formats, all "searchable" values. + schema.AddKeyFormat("key with url", MS::Format::url); + schema.AddKeyLabelFormat("key with label filePath", "label filePath", + MS::Format::filePath); + schema.AddKeyFormatSearchable("key with string not-searchable", + MS::Format::string, + MS::Searchable::notSearchable); + schema.AddKeyLabelFormatSearchable("key with label duration searchable", + "label duration", MS::Format::duration, + MS::Searchable::searchable); + schema.AddKeyFormat("key with time", MS::Format::time); + schema.AddKeyFormat("key with seconds", MS::Format::seconds); + schema.AddKeyFormat("key with milliseconds", MS::Format::milliseconds); + schema.AddKeyFormat("key with microseconds", MS::Format::microseconds); + schema.AddKeyFormat("key with nanoseconds", MS::Format::nanoseconds); + schema.AddKeyFormat("key with bytes", MS::Format::bytes); + schema.AddKeyFormat("key with percentage", MS::Format::percentage); + schema.AddKeyFormat("key with integer", MS::Format::integer); + schema.AddKeyFormat("key with decimal", MS::Format::decimal); + schema.AddStaticLabelValue("static label", "static value"); + return schema; + } + }; + MOZ_RELEASE_ASSERT( + profiler_add_marker("Gtest custom marker", geckoprofiler::category::OTHER, + MarkerTiming::Interval(ts1, ts2), GtestMarker{}, 42, + 43.0, "gtest text", "gtest unique text", ts1)); + + // User-defined marker type with no data, special frontend schema. + struct GtestSpecialMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest-special"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter) {} + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + MOZ_RELEASE_ASSERT(profiler_add_marker("Gtest special marker", + geckoprofiler::category::OTHER, {}, + GtestSpecialMarker{})); + + // User-defined marker type that is never used, so it shouldn't appear in the + // output. + struct GtestUnusedMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("markers-gtest-unused"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter) {} + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + + // Make sure the compiler doesn't complain about this unused struct. + mozilla::Unused << GtestUnusedMarker{}; + + // Other markers in alphabetical order of payload class names. + + nsCOMPtr<nsIURI> uri; + ASSERT_TRUE( + NS_SUCCEEDED(NS_NewURI(getter_AddRefs(uri), "http://mozilla.org/"_ns))); + // The marker name will be "Load <aChannelId>: <aURI>". + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 1, + /* NetworkLoadType aType */ NetworkLoadType::LOAD_START, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheHit, + /* uint64_t aInnerWindowID */ 78 + /* const mozilla::net::TimingStruct* aTimings = nullptr */ + /* nsIURI* aRedirectURI = nullptr */ + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */); + + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 12, + /* NetworkLoadType aType */ NetworkLoadType::LOAD_STOP, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* nsIURI* aRedirectURI = nullptr */ nullptr, + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + nullptr, + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */ + Some(nsDependentCString("text/html"))); + + nsCOMPtr<nsIURI> redirectURI; + ASSERT_TRUE(NS_SUCCEEDED( + NS_NewURI(getter_AddRefs(redirectURI), "http://example.com/"_ns))); + profiler_add_network_marker( + /* nsIURI* aURI */ uri, + /* const nsACString& aRequestMethod */ "GET"_ns, + /* int32_t aPriority */ 34, + /* uint64_t aChannelId */ 123, + /* NetworkLoadType aType */ NetworkLoadType::LOAD_REDIRECT, + /* mozilla::TimeStamp aStart */ ts1, + /* mozilla::TimeStamp aEnd */ ts2, + /* int64_t aCount */ 56, + /* mozilla::net::CacheDisposition aCacheDisposition */ + net::kCacheUnresolved, + /* uint64_t aInnerWindowID */ 78, + /* const mozilla::net::TimingStruct* aTimings = nullptr */ nullptr, + /* nsIURI* aRedirectURI = nullptr */ redirectURI + /* mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> aSource = + nullptr */ + /* const mozilla::Maybe<nsDependentCString>& aContentType = + mozilla::Nothing() */); + + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text in main thread with stack", geckoprofiler::category::OTHER, + {MarkerStack::Capture(), MarkerTiming::Interval(ts1, ts2)}, + geckoprofiler::markers::TextMarker{}, "")); + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text from main thread with stack", geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + + std::thread registeredThread([]() { + AUTO_PROFILER_REGISTER_THREAD("Marker test sub-thread"); + // Marker in non-profiled thread won't be stored. + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text in registered thread with stack", geckoprofiler::category::OTHER, + MarkerStack::Capture(), geckoprofiler::markers::TextMarker{}, "")); + // Marker will be stored in main thread, with stack from registered thread. + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text from registered thread with stack", + geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + }); + registeredThread.join(); + + std::thread unregisteredThread([]() { + // Marker in unregistered thread won't be stored. + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text in unregistered thread with stack", + geckoprofiler::category::OTHER, MarkerStack::Capture(), + geckoprofiler::markers::TextMarker{}, "")); + // Marker will be stored in main thread, but stack cannot be captured in an + // unregistered thread. + MOZ_RELEASE_ASSERT(profiler_add_marker( + "Text from unregistered thread with stack", + geckoprofiler::category::OTHER, + MarkerOptions(MarkerThreadId::MainThread(), MarkerStack::Capture()), + geckoprofiler::markers::TextMarker{}, "")); + }); + unregisteredThread.join(); + + MOZ_RELEASE_ASSERT( + profiler_add_marker("Tracing", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::Tracing{}, "category")); + + MOZ_RELEASE_ASSERT( + profiler_add_marker("Text", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::TextMarker{}, "Text text")); + + MOZ_RELEASE_ASSERT(profiler_add_marker( + "MediaSample", geckoprofiler::category::OTHER, {}, + geckoprofiler::markers::MediaSampleMarker{}, 123, 456)); + + SpliceableChunkedJSONWriter w; + w.Start(); + EXPECT_TRUE(::profiler_stream_json_for_this_process(w)); + w.End(); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + ASSERT_TRUE(!!profile.get()); + + // Expected markers, in order. + enum State { + S_tracing_event, + S_tracing_start, + S_tracing_end, + S_tracing_event_with_stack, + S_tracing_auto_tracing_start, + S_tracing_auto_tracing_end, + S_M1, + S_M3, + S_Markers2DefaultEmptyOptions, + S_Markers2DefaultWithOptions, + S_Markers2ExplicitDefaultEmptyOptions, + S_Markers2ExplicitDefaultWithOptions, + S_FirstMarker, + S_CustomMarker, + S_SpecialMarker, + S_NetworkMarkerPayload_start, + S_NetworkMarkerPayload_stop, + S_NetworkMarkerPayload_redirect, + S_TextWithStack, + S_TextToMTWithStack, + S_RegThread_TextToMTWithStack, + S_UnregThread_TextToMTWithStack, + + S_LAST, + } state = State(0); + + // These will be set when first read from S_FirstMarker, then + // compared in following markers. + // TODO: Compute these values from the timestamps. + double ts1Double = 0.0; + double ts2Double = 0.0; + + JSONOutputCheck(profile.get(), [&](const Json::Value& root) { + { + GET_JSON(threads, root["threads"], Array); + ASSERT_EQ(threads.size(), 1u); + + { + GET_JSON(thread0, threads[0], Object); + + // Keep a reference to the string table in this block, it will be used + // below. + GET_JSON(stringTable, thread0["stringTable"], Array); + ASSERT_TRUE(stringTable.isArray()); + + // Test the expected labels in the string table. + bool foundEmpty = false; + bool foundOkstr1 = false; + bool foundOkstr2 = false; + const std::string okstr2Label = std::string("okstr2 ") + okstr2.get(); + bool foundTooLong = false; + for (const auto& s : stringTable) { + ASSERT_TRUE(s.isString()); + std::string sString = s.asString(); + if (sString.empty()) { + EXPECT_FALSE(foundEmpty); + foundEmpty = true; + } else if (sString == okstr1.get()) { + EXPECT_FALSE(foundOkstr1); + foundOkstr1 = true; + } else if (sString == okstr2Label) { + EXPECT_FALSE(foundOkstr2); + foundOkstr2 = true; + } else if (sString == longstrCut.get()) { + EXPECT_FALSE(foundTooLong); + foundTooLong = true; + } else { + EXPECT_NE(sString, longstr.get()); + } + } + EXPECT_TRUE(foundEmpty); + EXPECT_TRUE(foundOkstr1); + EXPECT_TRUE(foundOkstr2); + EXPECT_TRUE(foundTooLong); + + { + GET_JSON(markers, thread0["markers"], Object); + + { + GET_JSON(data, markers["data"], Array); + + for (const Json::Value& marker : data) { + // Name the indexes into the marker tuple: + // [name, startTime, endTime, phase, category, payload] + const unsigned int NAME = 0u; + const unsigned int START_TIME = 1u; + const unsigned int END_TIME = 2u; + const unsigned int PHASE = 3u; + const unsigned int CATEGORY = 4u; + const unsigned int PAYLOAD = 5u; + + const unsigned int PHASE_INSTANT = 0; + const unsigned int PHASE_INTERVAL = 1; + const unsigned int PHASE_START = 2; + const unsigned int PHASE_END = 3; + + const unsigned int SIZE_WITHOUT_PAYLOAD = 5u; + const unsigned int SIZE_WITH_PAYLOAD = 6u; + + ASSERT_TRUE(marker.isArray()); + // The payload is optional. + ASSERT_GE(marker.size(), SIZE_WITHOUT_PAYLOAD); + ASSERT_LE(marker.size(), SIZE_WITH_PAYLOAD); + + // root.threads[0].markers.data[i] is an array with 5 or 6 + // elements. + + ASSERT_TRUE(marker[NAME].isUInt()); // name id + GET_JSON(name, stringTable[marker[NAME].asUInt()], String); + std::string nameString = name.asString(); + + EXPECT_TRUE(marker[START_TIME].isNumeric()); + EXPECT_TRUE(marker[END_TIME].isNumeric()); + EXPECT_TRUE(marker[PHASE].isUInt()); + EXPECT_TRUE(marker[PHASE].asUInt() < 4); + EXPECT_TRUE(marker[CATEGORY].isUInt()); + +#define EXPECT_TIMING_INSTANT \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INSTANT); +#define EXPECT_TIMING_INTERVAL \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_NE(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INTERVAL); +#define EXPECT_TIMING_START \ + EXPECT_NE(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_START); +#define EXPECT_TIMING_END \ + EXPECT_EQ(marker[START_TIME].asDouble(), 0); \ + EXPECT_NE(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_END); + +#define EXPECT_TIMING_INSTANT_AT(t) \ + EXPECT_EQ(marker[START_TIME].asDouble(), t); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INSTANT); +#define EXPECT_TIMING_INTERVAL_AT(start, end) \ + EXPECT_EQ(marker[START_TIME].asDouble(), start); \ + EXPECT_EQ(marker[END_TIME].asDouble(), end); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_INTERVAL); +#define EXPECT_TIMING_START_AT(start) \ + EXPECT_EQ(marker[START_TIME].asDouble(), start); \ + EXPECT_EQ(marker[END_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_START); +#define EXPECT_TIMING_END_AT(end) \ + EXPECT_EQ(marker[START_TIME].asDouble(), 0); \ + EXPECT_EQ(marker[END_TIME].asDouble(), end); \ + EXPECT_EQ(marker[PHASE].asUInt(), PHASE_END); + + if (marker.size() == SIZE_WITHOUT_PAYLOAD) { + // root.threads[0].markers.data[i] is an array with 5 elements, + // so there is no payload. + if (nameString == "M1") { + ASSERT_EQ(state, S_M1); + state = State(state + 1); + } else if (nameString == "M3") { + ASSERT_EQ(state, S_M3); + state = State(state + 1); + } else if (nameString == + "default-templated markers 2.0 with empty options") { + EXPECT_EQ(state, S_Markers2DefaultEmptyOptions); + state = State(S_Markers2DefaultEmptyOptions + 1); +// TODO: Re-enable this when bug 1646714 lands, and check for stack. +#if 0 + } else if (nameString == + "default-templated markers 2.0 with option") { + EXPECT_EQ(state, S_Markers2DefaultWithOptions); + state = State(S_Markers2DefaultWithOptions + 1); +#endif + } else if (nameString == + "explicitly-default-templated markers 2.0 with " + "empty " + "options") { + EXPECT_EQ(state, S_Markers2ExplicitDefaultEmptyOptions); + state = State(S_Markers2ExplicitDefaultEmptyOptions + 1); + } else if (nameString == + "explicitly-default-templated markers 2.0 with " + "option") { + EXPECT_EQ(state, S_Markers2ExplicitDefaultWithOptions); + state = State(S_Markers2ExplicitDefaultWithOptions + 1); + } + } else { + // root.threads[0].markers.data[i] is an array with 6 elements, + // so there is a payload. + GET_JSON(payload, marker[PAYLOAD], Object); + + // root.threads[0].markers.data[i][PAYLOAD] is an object + // (payload). + + // It should at least have a "type" string. + GET_JSON(type, payload["type"], String); + std::string typeString = type.asString(); + + if (nameString == "tracing event") { + EXPECT_EQ(state, S_tracing_event); + state = State(S_tracing_event + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_INSTANT; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing start") { + EXPECT_EQ(state, S_tracing_start); + state = State(S_tracing_start + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_START; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing end") { + EXPECT_EQ(state, S_tracing_end); + state = State(S_tracing_end + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_END; + EXPECT_EQ_JSON(payload["category"], String, "A"); + EXPECT_TRUE(payload["stack"].isNull()); + + } else if (nameString == "tracing event with stack") { + EXPECT_EQ(state, S_tracing_event_with_stack); + state = State(S_tracing_event_with_stack + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_INSTANT; + EXPECT_EQ_JSON(payload["category"], String, "B"); + EXPECT_TRUE(payload["stack"].isObject()); + + } else if (nameString == "auto tracing") { + switch (state) { + case S_tracing_auto_tracing_start: + state = State(S_tracing_auto_tracing_start + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_START; + EXPECT_EQ_JSON(payload["category"], String, "C"); + EXPECT_TRUE(payload["stack"].isNull()); + break; + case S_tracing_auto_tracing_end: + state = State(S_tracing_auto_tracing_end + 1); + EXPECT_EQ(typeString, "tracing"); + EXPECT_TIMING_END; + EXPECT_EQ_JSON(payload["category"], String, "C"); + ASSERT_TRUE(payload["stack"].isNull()); + break; + default: + EXPECT_TRUE(state == S_tracing_auto_tracing_start || + state == S_tracing_auto_tracing_end); + break; + } + + } else if (nameString == + "default-templated markers 2.0 with option") { + // TODO: Remove this when bug 1646714 lands. + EXPECT_EQ(state, S_Markers2DefaultWithOptions); + state = State(S_Markers2DefaultWithOptions + 1); + EXPECT_EQ(typeString, "NoPayloadUserData"); + EXPECT_FALSE(payload["stack"].isNull()); + + } else if (nameString == "FirstMarker") { + // Record start and end times, to compare with timestamps in + // following markers. + EXPECT_EQ(state, S_FirstMarker); + ts1Double = marker[START_TIME].asDouble(); + ts2Double = marker[END_TIME].asDouble(); + state = State(S_FirstMarker + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_EQ_JSON(payload["name"], String, "First Marker"); + + } else if (nameString == "Gtest custom marker") { + EXPECT_EQ(state, S_CustomMarker); + state = State(S_CustomMarker + 1); + EXPECT_EQ(typeString, "markers-gtest"); + EXPECT_EQ(payload.size(), 1u + 9u); + EXPECT_TRUE(payload["null"].isNull()); + EXPECT_EQ_JSON(payload["bool-false"], Bool, false); + EXPECT_EQ_JSON(payload["bool-true"], Bool, true); + EXPECT_EQ_JSON(payload["int"], Int64, 42); + EXPECT_EQ_JSON(payload["double"], Double, 43.0); + EXPECT_EQ_JSON(payload["text"], String, "gtest text"); + // Unique strings can be fetched from the string table. + ASSERT_TRUE(payload["unique text"].isUInt()); + auto textIndex = payload["unique text"].asUInt(); + GET_JSON(uniqueText, stringTable[textIndex], String); + ASSERT_TRUE(uniqueText.isString()); + ASSERT_EQ(uniqueText.asString(), "gtest unique text"); + // The duplicate unique text should have the exact same index. + EXPECT_EQ_JSON(payload["unique text again"], UInt, textIndex); + EXPECT_EQ_JSON(payload["time"], Double, ts1Double); + + } else if (nameString == "Gtest special marker") { + EXPECT_EQ(state, S_SpecialMarker); + state = State(S_SpecialMarker + 1); + EXPECT_EQ(typeString, "markers-gtest-special"); + EXPECT_EQ(payload.size(), 1u) << "Only 'type' in the payload"; + + } else if (nameString == "Load 1: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_start); + state = State(S_NetworkMarkerPayload_start + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 1); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Hit"); + EXPECT_TRUE(payload["RedirectURI"].isNull()); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Load 12: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_stop); + state = State(S_NetworkMarkerPayload_stop + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 12); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_TRUE(payload["RedirectURI"].isNull()); + EXPECT_EQ_JSON(payload["contentType"], String, "text/html"); + + } else if (nameString == "Load 123: http://mozilla.org/") { + EXPECT_EQ(state, S_NetworkMarkerPayload_redirect); + state = State(S_NetworkMarkerPayload_redirect + 1); + EXPECT_EQ(typeString, "Network"); + EXPECT_EQ_JSON(payload["startTime"], Double, ts1Double); + EXPECT_EQ_JSON(payload["endTime"], Double, ts2Double); + EXPECT_EQ_JSON(payload["id"], Int64, 123); + EXPECT_EQ_JSON(payload["URI"], String, "http://mozilla.org/"); + EXPECT_EQ_JSON(payload["requestMethod"], String, "GET"); + EXPECT_EQ_JSON(payload["pri"], Int64, 34); + EXPECT_EQ_JSON(payload["count"], Int64, 56); + EXPECT_EQ_JSON(payload["cache"], String, "Unresolved"); + EXPECT_EQ_JSON(payload["RedirectURI"], String, + "http://example.com/"); + EXPECT_TRUE(payload["contentType"].isNull()); + + } else if (nameString == "Text in main thread with stack") { + EXPECT_EQ(state, S_TextWithStack); + state = State(S_TextWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_TIMING_INTERVAL_AT(ts1Double, ts2Double); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == "Text from main thread with stack") { + EXPECT_EQ(state, S_TextToMTWithStack); + state = State(S_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == + "Text in registered thread with stack") { + ADD_FAILURE() + << "Unexpected 'Text in registered thread with stack'"; + + } else if (nameString == + "Text from registered thread with stack") { + EXPECT_EQ(state, S_RegThread_TextToMTWithStack); + state = State(S_RegThread_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_FALSE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + + } else if (nameString == + "Text in unregistered thread with stack") { + ADD_FAILURE() + << "Unexpected 'Text in unregistered thread with stack'"; + + } else if (nameString == + "Text from unregistered thread with stack") { + EXPECT_EQ(state, S_UnregThread_TextToMTWithStack); + state = State(S_UnregThread_TextToMTWithStack + 1); + EXPECT_EQ(typeString, "Text"); + EXPECT_TRUE(payload["stack"].isNull()); + EXPECT_EQ_JSON(payload["name"], String, ""); + } + } // marker with payload + } // for (marker : data) + } // markers.data + } // markers + } // thread0 + } // threads + // We should have read all expected markers. + EXPECT_EQ(state, S_LAST); + + { + GET_JSON(meta, root["meta"], Object); + + { + GET_JSON(markerSchema, meta["markerSchema"], Array); + + std::set<std::string> testedSchemaNames; + + for (const Json::Value& schema : markerSchema) { + GET_JSON(name, schema["name"], String); + const std::string nameString = name.asString(); + + GET_JSON(display, schema["display"], Array); + + GET_JSON(data, schema["data"], Array); + + EXPECT_TRUE( + testedSchemaNames + .insert(std::string(nameString.data(), nameString.size())) + .second) + << "Each schema name should be unique (inserted once in the set)"; + + if (nameString == "Text") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "name"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Details"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "NoPayloadUserData") { + // TODO: Remove this when bug 1646714 lands. + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "FileIO") { + // These are defined in ProfilerIOInterposeObserver.cpp + + } else if (nameString == "tracing") { + EXPECT_EQ(display.size(), 3u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + EXPECT_EQ(display[2u].asString(), "timeline-overview"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "category"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Type"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "BHR-detected hang") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "MainThreadLongTask") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 1u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "category"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Type"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + } else if (nameString == "Log") { + EXPECT_EQ(display.size(), 1u); + EXPECT_EQ(display[0u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 2u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "module"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Module"); + EXPECT_EQ_JSON(data[0u]["format"], String, "string"); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "name"); + EXPECT_EQ_JSON(data[1u]["label"], String, "Name"); + EXPECT_EQ_JSON(data[1u]["format"], String, "string"); + + } else if (nameString == "MediaSample") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 2u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "sampleStartTimeUs"); + EXPECT_EQ_JSON(data[0u]["label"], String, "Sample start time"); + EXPECT_EQ_JSON(data[0u]["format"], String, "microseconds"); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "sampleEndTimeUs"); + EXPECT_EQ_JSON(data[1u]["label"], String, "Sample end time"); + EXPECT_EQ_JSON(data[1u]["format"], String, "microseconds"); + + } else if (nameString == "Budget") { + EXPECT_EQ(display.size(), 2u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "markers-gtest") { + EXPECT_EQ(display.size(), 7u); + EXPECT_EQ(display[0u].asString(), "marker-chart"); + EXPECT_EQ(display[1u].asString(), "marker-table"); + EXPECT_EQ(display[2u].asString(), "timeline-overview"); + EXPECT_EQ(display[3u].asString(), "timeline-memory"); + EXPECT_EQ(display[4u].asString(), "timeline-ipc"); + EXPECT_EQ(display[5u].asString(), "timeline-fileio"); + EXPECT_EQ(display[6u].asString(), "stack-chart"); + + EXPECT_EQ_JSON(schema["chartLabel"], String, "chart label"); + EXPECT_EQ_JSON(schema["tooltipLabel"], String, "tooltip label"); + EXPECT_EQ_JSON(schema["tableLabel"], String, "table label"); + + ASSERT_EQ(data.size(), 14u); + + ASSERT_TRUE(data[0u].isObject()); + EXPECT_EQ_JSON(data[0u]["key"], String, "key with url"); + EXPECT_TRUE(data[0u]["label"].isNull()); + EXPECT_EQ_JSON(data[0u]["format"], String, "url"); + EXPECT_TRUE(data[0u]["searchable"].isNull()); + + ASSERT_TRUE(data[1u].isObject()); + EXPECT_EQ_JSON(data[1u]["key"], String, "key with label filePath"); + EXPECT_EQ_JSON(data[1u]["label"], String, "label filePath"); + EXPECT_EQ_JSON(data[1u]["format"], String, "file-path"); + EXPECT_TRUE(data[1u]["searchable"].isNull()); + + ASSERT_TRUE(data[2u].isObject()); + EXPECT_EQ_JSON(data[2u]["key"], String, + "key with string not-searchable"); + EXPECT_TRUE(data[2u]["label"].isNull()); + EXPECT_EQ_JSON(data[2u]["format"], String, "string"); + EXPECT_EQ_JSON(data[2u]["searchable"], Bool, false); + + ASSERT_TRUE(data[3u].isObject()); + EXPECT_EQ_JSON(data[3u]["key"], String, + "key with label duration searchable"); + EXPECT_TRUE(data[3u]["label duration"].isNull()); + EXPECT_EQ_JSON(data[3u]["format"], String, "duration"); + EXPECT_EQ_JSON(data[3u]["searchable"], Bool, true); + + ASSERT_TRUE(data[4u].isObject()); + EXPECT_EQ_JSON(data[4u]["key"], String, "key with time"); + EXPECT_TRUE(data[4u]["label"].isNull()); + EXPECT_EQ_JSON(data[4u]["format"], String, "time"); + EXPECT_TRUE(data[4u]["searchable"].isNull()); + + ASSERT_TRUE(data[5u].isObject()); + EXPECT_EQ_JSON(data[5u]["key"], String, "key with seconds"); + EXPECT_TRUE(data[5u]["label"].isNull()); + EXPECT_EQ_JSON(data[5u]["format"], String, "seconds"); + EXPECT_TRUE(data[5u]["searchable"].isNull()); + + ASSERT_TRUE(data[6u].isObject()); + EXPECT_EQ_JSON(data[6u]["key"], String, "key with milliseconds"); + EXPECT_TRUE(data[6u]["label"].isNull()); + EXPECT_EQ_JSON(data[6u]["format"], String, "milliseconds"); + EXPECT_TRUE(data[6u]["searchable"].isNull()); + + ASSERT_TRUE(data[7u].isObject()); + EXPECT_EQ_JSON(data[7u]["key"], String, "key with microseconds"); + EXPECT_TRUE(data[7u]["label"].isNull()); + EXPECT_EQ_JSON(data[7u]["format"], String, "microseconds"); + EXPECT_TRUE(data[7u]["searchable"].isNull()); + + ASSERT_TRUE(data[8u].isObject()); + EXPECT_EQ_JSON(data[8u]["key"], String, "key with nanoseconds"); + EXPECT_TRUE(data[8u]["label"].isNull()); + EXPECT_EQ_JSON(data[8u]["format"], String, "nanoseconds"); + EXPECT_TRUE(data[8u]["searchable"].isNull()); + + ASSERT_TRUE(data[9u].isObject()); + EXPECT_EQ_JSON(data[9u]["key"], String, "key with bytes"); + EXPECT_TRUE(data[9u]["label"].isNull()); + EXPECT_EQ_JSON(data[9u]["format"], String, "bytes"); + EXPECT_TRUE(data[9u]["searchable"].isNull()); + + ASSERT_TRUE(data[10u].isObject()); + EXPECT_EQ_JSON(data[10u]["key"], String, "key with percentage"); + EXPECT_TRUE(data[10u]["label"].isNull()); + EXPECT_EQ_JSON(data[10u]["format"], String, "percentage"); + EXPECT_TRUE(data[10u]["searchable"].isNull()); + + ASSERT_TRUE(data[11u].isObject()); + EXPECT_EQ_JSON(data[11u]["key"], String, "key with integer"); + EXPECT_TRUE(data[11u]["label"].isNull()); + EXPECT_EQ_JSON(data[11u]["format"], String, "integer"); + EXPECT_TRUE(data[11u]["searchable"].isNull()); + + ASSERT_TRUE(data[12u].isObject()); + EXPECT_EQ_JSON(data[12u]["key"], String, "key with decimal"); + EXPECT_TRUE(data[12u]["label"].isNull()); + EXPECT_EQ_JSON(data[12u]["format"], String, "decimal"); + EXPECT_TRUE(data[12u]["searchable"].isNull()); + + ASSERT_TRUE(data[13u].isObject()); + EXPECT_EQ_JSON(data[13u]["label"], String, "static label"); + EXPECT_EQ_JSON(data[13u]["value"], String, "static value"); + + } else if (nameString == "markers-gtest-special") { + EXPECT_EQ(display.size(), 0u); + ASSERT_EQ(data.size(), 0u); + + } else if (nameString == "markers-gtest-unused") { + ADD_FAILURE() << "Schema for GtestUnusedMarker should not be here"; + + } else { + printf("FYI: Unknown marker schema '%s'\n", nameString.c_str()); + } + } + + // Check that we've got all expected schema. + EXPECT_TRUE(testedSchemaNames.find("Text") != testedSchemaNames.end()); + EXPECT_TRUE(testedSchemaNames.find("tracing") != + testedSchemaNames.end()); + EXPECT_TRUE(testedSchemaNames.find("MediaSample") != + testedSchemaNames.end()); + } // markerSchema + } // meta + }); + + Maybe<ProfilerBufferInfo> info = profiler_get_buffer_info(); + MOZ_RELEASE_ASSERT(info.isSome()); + printf("Profiler buffer range: %llu .. %llu (%llu bytes)\n", + static_cast<unsigned long long>(info->mRangeStart), + static_cast<unsigned long long>(info->mRangeEnd), + // sizeof(ProfileBufferEntry) == 9 + (static_cast<unsigned long long>(info->mRangeEnd) - + static_cast<unsigned long long>(info->mRangeStart)) * + 9); + printf("Stats: min(us) .. mean(us) .. max(us) [count]\n"); + printf("- Intervals: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mIntervalsUs.min, info->mIntervalsUs.sum / info->mIntervalsUs.n, + info->mIntervalsUs.max, info->mIntervalsUs.n); + printf("- Overheads: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mOverheadsUs.min, info->mOverheadsUs.sum / info->mOverheadsUs.n, + info->mOverheadsUs.max, info->mOverheadsUs.n); + printf(" - Locking: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mLockingsUs.min, info->mLockingsUs.sum / info->mLockingsUs.n, + info->mLockingsUs.max, info->mLockingsUs.n); + printf(" - Clearning: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mCleaningsUs.min, info->mCleaningsUs.sum / info->mCleaningsUs.n, + info->mCleaningsUs.max, info->mCleaningsUs.n); + printf(" - Counters: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mCountersUs.min, info->mCountersUs.sum / info->mCountersUs.n, + info->mCountersUs.max, info->mCountersUs.n); + printf(" - Threads: %7.1f .. %7.1f .. %7.1f [%u]\n", + info->mThreadsUs.min, info->mThreadsUs.sum / info->mThreadsUs.n, + info->mThreadsUs.max, info->mThreadsUs.n); + + profiler_stop(); + + // Try to add markers while the profiler is stopped. + PROFILER_MARKER_UNTYPED("marker after profiler_stop", OTHER); + + // Warning: this could be racy + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + // This last marker shouldn't get streamed. + SpliceableChunkedJSONWriter w2; + w2.Start(); + EXPECT_TRUE(::profiler_stream_json_for_this_process(w2)); + w2.End(); + UniquePtr<char[]> profile2 = w.ChunkedWriteFunc().CopyData(); + ASSERT_TRUE(!!profile2.get()); + EXPECT_TRUE( + std::string_view(profile2.get()).find("marker after profiler_stop") == + std::string_view::npos); + + profiler_stop(); +} + +#define COUNTER_NAME "TestCounter" +#define COUNTER_DESCRIPTION "Test of counters in profiles" +#define COUNTER_NAME2 "Counter2" +#define COUNTER_DESCRIPTION2 "Second Test of counters in profiles" + +PROFILER_DEFINE_COUNT_TOTAL(TestCounter, COUNTER_NAME, COUNTER_DESCRIPTION); +PROFILER_DEFINE_COUNT_TOTAL(TestCounter2, COUNTER_NAME2, COUNTER_DESCRIPTION2); + +TEST(GeckoProfiler, Counters) +{ + uint32_t features = ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + + // Inactive -> Active + profiler_ensure_started(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + AUTO_PROFILER_COUNT_TOTAL(TestCounter, 10); + PR_Sleep(PR_MillisecondsToInterval(200)); + AUTO_PROFILER_COUNT_TOTAL(TestCounter, 7); + PR_Sleep(PR_MillisecondsToInterval(200)); + AUTO_PROFILER_COUNT_TOTAL(TestCounter, -17); + PR_Sleep(PR_MillisecondsToInterval(200)); + + // Verify we got counters in the output + SpliceableChunkedJSONWriter w; + ASSERT_TRUE(::profiler_stream_json_for_this_process(w)); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + + // counter name and description should appear as is. + ASSERT_TRUE(strstr(profile.get(), COUNTER_NAME)); + ASSERT_TRUE(strstr(profile.get(), COUNTER_DESCRIPTION)); + ASSERT_FALSE(strstr(profile.get(), COUNTER_NAME2)); + ASSERT_FALSE(strstr(profile.get(), COUNTER_DESCRIPTION2)); + + AUTO_PROFILER_COUNT_TOTAL(TestCounter2, 10); + PR_Sleep(PR_MillisecondsToInterval(200)); + + ASSERT_TRUE(::profiler_stream_json_for_this_process(w)); + + profile = w.ChunkedWriteFunc().CopyData(); + ASSERT_TRUE(strstr(profile.get(), COUNTER_NAME)); + ASSERT_TRUE(strstr(profile.get(), COUNTER_DESCRIPTION)); + ASSERT_TRUE(strstr(profile.get(), COUNTER_NAME2)); + ASSERT_TRUE(strstr(profile.get(), COUNTER_DESCRIPTION2)); + + profiler_stop(); +} + +TEST(GeckoProfiler, Time) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + double t1 = profiler_time(); + double t2 = profiler_time(); + ASSERT_TRUE(t1 <= t2); + + // profiler_start() restarts the timer used by profiler_time(). + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + double t3 = profiler_time(); + double t4 = profiler_time(); + ASSERT_TRUE(t3 <= t4); + + profiler_stop(); + + double t5 = profiler_time(); + double t6 = profiler_time(); + ASSERT_TRUE(t4 <= t5 && t1 <= t6); +} + +TEST(GeckoProfiler, GetProfile) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_get_profile()); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + mozilla::Maybe<uint32_t> activeFeatures = profiler_features_if_active(); + ASSERT_TRUE(activeFeatures.isSome()); + // Not all platforms support stack-walking. + const bool hasStackWalk = ProfilerFeature::HasStackWalk(*activeFeatures); + // "threads" may automatically be added when `filters` is not empty. + const bool hasThreads = ProfilerFeature::HasThreads(*activeFeatures); + + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [&](const Json::Value& aRoot) { + GET_JSON(meta, aRoot["meta"], Object); + { + GET_JSON(configuration, meta["configuration"], Object); + { + GET_JSON(features, configuration["features"], Array); + { + EXPECT_EQ(features.size(), + (hasStackWalk ? 1u : 0u) + (hasThreads ? 1u : 0u)); + if (hasStackWalk) { + EXPECT_JSON_ARRAY_CONTAINS(features, String, "stackwalk"); + } + if (hasThreads) { + EXPECT_JSON_ARRAY_CONTAINS(features, String, "threads"); + } + } + GET_JSON(threads, configuration["threads"], Array); + { + EXPECT_EQ(threads.size(), 1u); + EXPECT_JSON_ARRAY_CONTAINS(threads, String, "GeckoMain"); + } + } + } + }); + + profiler_stop(); + + ASSERT_TRUE(!profiler_get_profile()); +} + +TEST(GeckoProfiler, StreamJSONForThisProcess) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + SpliceableChunkedJSONWriter w; + ASSERT_TRUE(!::profiler_stream_json_for_this_process(w)); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + w.Start(); + ASSERT_TRUE(::profiler_stream_json_for_this_process(w)); + w.End(); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + + JSONOutputCheck(profile.get(), [](const Json::Value&) {}); + + profiler_stop(); + + ASSERT_TRUE(!::profiler_stream_json_for_this_process(w)); +} + +TEST(GeckoProfiler, StreamJSONForThisProcessThreaded) +{ + // Same as the previous test, but calling some things on background threads. + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_TRUE(NS_SUCCEEDED(rv)); + + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + SpliceableChunkedJSONWriter w; + ASSERT_TRUE(!::profiler_stream_json_for_this_process(w)); + + // Start the profiler on the main thread. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + // Call profiler_stream_json_for_this_process on a background thread. + thread->Dispatch( + NS_NewRunnableFunction( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody", + [&]() { + w.Start(); + ASSERT_TRUE(::profiler_stream_json_for_this_process(w)); + w.End(); + }), + NS_DISPATCH_SYNC); + + UniquePtr<char[]> profile = w.ChunkedWriteFunc().CopyData(); + + JSONOutputCheck(profile.get(), [](const Json::Value&) {}); + + // Stop the profiler and call profiler_stream_json_for_this_process on a + // background thread. + thread->Dispatch( + NS_NewRunnableFunction( + "GeckoProfiler_StreamJSONForThisProcessThreaded_Test::TestBody", + [&]() { + profiler_stop(); + ASSERT_TRUE(!::profiler_stream_json_for_this_process(w)); + }), + NS_DISPATCH_SYNC); + thread->Shutdown(); + + // Call profiler_stream_json_for_this_process on the main thread. + ASSERT_TRUE(!::profiler_stream_json_for_this_process(w)); +} + +TEST(GeckoProfiler, ProfilingStack) +{ + uint32_t features = ProfilerFeature::StackWalk; + const char* filters[] = {"GeckoMain"}; + + AUTO_PROFILER_LABEL("A::B", OTHER); + + UniqueFreePtr<char> dynamic(strdup("dynamic")); + { + AUTO_PROFILER_LABEL_DYNAMIC_CSTR("A::C", JS, dynamic.get()); + AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING("A::C2", JS, + nsDependentCString(dynamic.get())); + AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING( + "A::C3", JS, NS_ConvertUTF8toUTF16(dynamic.get())); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + features, filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_get_backtrace()); + } + + AutoProfilerLabel label1("A", nullptr, JS::ProfilingCategoryPair::DOM); + AutoProfilerLabel label2("A", dynamic.get(), + JS::ProfilingCategoryPair::NETWORK); + ASSERT_TRUE(profiler_get_backtrace()); + + profiler_stop(); + + ASSERT_TRUE(!profiler_get_profile()); +} + +TEST(GeckoProfiler, Bug1355807) +{ + uint32_t features = ProfilerFeature::JS; + const char* manyThreadsFilter[] = {""}; + const char* fewThreadsFilter[] = {"GeckoMain"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + manyThreadsFilter, MOZ_ARRAY_LENGTH(manyThreadsFilter), 0); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + fewThreadsFilter, MOZ_ARRAY_LENGTH(fewThreadsFilter), 0); + + // In bug 1355807 this caused an assertion failure in StopJSSampling(). + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + fewThreadsFilter, MOZ_ARRAY_LENGTH(fewThreadsFilter), 0); + + profiler_stop(); +} + +class GTestStackCollector final : public ProfilerStackCollector { + public: + GTestStackCollector() : mSetIsMainThread(0), mFrames(0) {} + + virtual void SetIsMainThread() { mSetIsMainThread++; } + + virtual void CollectNativeLeafAddr(void* aAddr) { mFrames++; } + virtual void CollectJitReturnAddr(void* aAddr) { mFrames++; } + virtual void CollectWasmFrame(const char* aLabel) { mFrames++; } + virtual void CollectProfilingStackFrame( + const js::ProfilingStackFrame& aFrame) { + mFrames++; + } + + int mSetIsMainThread; + int mFrames; +}; + +void DoSuspendAndSample(int aTid, nsIThread* aThread) { + aThread->Dispatch( + NS_NewRunnableFunction("GeckoProfiler_SuspendAndSample_Test::TestBody", + [&]() { + uint32_t features = ProfilerFeature::Leaf; + GTestStackCollector collector; + profiler_suspend_and_sample_thread( + aTid, features, collector, + /* sampleNative = */ true); + + ASSERT_TRUE(collector.mSetIsMainThread == 1); + ASSERT_TRUE(collector.mFrames > 0); + }), + NS_DISPATCH_SYNC); +} + +TEST(GeckoProfiler, SuspendAndSample) +{ + nsCOMPtr<nsIThread> thread; + nsresult rv = NS_NewNamedThread("GeckoProfGTest", getter_AddRefs(thread)); + ASSERT_TRUE(NS_SUCCEEDED(rv)); + + int tid = profiler_current_thread_id(); + + ASSERT_TRUE(!profiler_is_active()); + + // Suspend and sample while the profiler is inactive. + DoSuspendAndSample(tid, thread); + + uint32_t features = ProfilerFeature::JS | ProfilerFeature::Threads; + const char* filters[] = {"GeckoMain", "Compositor"}; + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features, + filters, MOZ_ARRAY_LENGTH(filters), 0); + + ASSERT_TRUE(profiler_is_active()); + + // Suspend and sample while the profiler is active. + DoSuspendAndSample(tid, thread); + + profiler_stop(); + + ASSERT_TRUE(!profiler_is_active()); +} + +// Returns `static_cast<SamplingState>(-1)` if callback could not be installed. +static SamplingState WaitForSamplingState() { + Atomic<int> samplingState{-1}; + + if (!profiler_callback_after_sampling([&](SamplingState aSamplingState) { + samplingState = static_cast<int>(aSamplingState); + })) { + return static_cast<SamplingState>(-1); + } + + while (samplingState == -1) { + } + + return static_cast<SamplingState>(static_cast<int>(samplingState)); +} + +TEST(GeckoProfiler, PostSamplingCallback) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters), + 0); + { + // Stack sampling -> This label should appear at least once. + AUTO_PROFILER_LABEL("PostSamplingCallback completed", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + UniquePtr<char[]> profileCompleted = profiler_get_profile(); + JSONOutputCheck(profileCompleted.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + GET_JSON(thread0, threads[0], Object); + { + EXPECT_JSON_ARRAY_CONTAINS(thread0["stringTable"], String, + "PostSamplingCallback completed"); + } + } + }); + + profiler_pause(); + { + // Paused -> This label should not appear. + AUTO_PROFILER_LABEL("PostSamplingCallback paused", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingPaused); + } + UniquePtr<char[]> profilePaused = profiler_get_profile(); + JSONOutputCheck(profilePaused.get(), [](const Json::Value& aRoot) {}); + // This string shouldn't appear *anywhere* in the profile. + ASSERT_FALSE(strstr(profilePaused.get(), "PostSamplingCallback paused")); + + profiler_resume(); + { + // Stack sampling -> This label should appear at least once. + AUTO_PROFILER_LABEL("PostSamplingCallback resumed", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + UniquePtr<char[]> profileResumed = profiler_get_profile(); + JSONOutputCheck(profileResumed.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + GET_JSON(thread0, threads[0], Object); + { + EXPECT_JSON_ARRAY_CONTAINS(thread0["stringTable"], String, + "PostSamplingCallback resumed"); + } + } + }); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk | ProfilerFeature::NoStackSampling, + filters, MOZ_ARRAY_LENGTH(filters), 0); + { + // No stack sampling -> This label should not appear. + AUTO_PROFILER_LABEL("PostSamplingCallback completed (no stacks)", OTHER); + ASSERT_EQ(WaitForSamplingState(), SamplingState::NoStackSamplingCompleted); + } + UniquePtr<char[]> profileNoStacks = profiler_get_profile(); + JSONOutputCheck(profileNoStacks.get(), [](const Json::Value& aRoot) {}); + // This string shouldn't appear *anywhere* in the profile. + ASSERT_FALSE(strstr(profileNoStacks.get(), + "PostSamplingCallback completed (no stacks)")); + + // Note: There is no non-racy way to test for SamplingState::JustStopped, as + // it would require coordination between `profiler_stop()` and another thread + // doing `profiler_callback_after_sampling()` at just the right moment. + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); +} + +TEST(GeckoProfiler, BaseProfilerHandOff) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!baseprofiler::profiler_is_active()); + ASSERT_TRUE(!profiler_is_active()); + + // Start the Base Profiler. + baseprofiler::profiler_start( + PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters)); + + ASSERT_TRUE(baseprofiler::profiler_is_active()); + ASSERT_TRUE(!profiler_is_active()); + + // Add at least a marker, which should go straight into the buffer. + Maybe<baseprofiler::ProfilerBufferInfo> info0 = + baseprofiler::profiler_get_buffer_info(); + BASE_PROFILER_MARKER_UNTYPED("Marker from base profiler", OTHER, {}); + Maybe<baseprofiler::ProfilerBufferInfo> info1 = + baseprofiler::profiler_get_buffer_info(); + ASSERT_GT(info1->mRangeEnd, info0->mRangeEnd); + + // Start the Gecko Profiler, which should grab the Base Profiler profile and + // stop it. + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk, filters, MOZ_ARRAY_LENGTH(filters), + 0); + + ASSERT_TRUE(!baseprofiler::profiler_is_active()); + ASSERT_TRUE(profiler_is_active()); + + // Write some Gecko Profiler samples. + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + + // Check that the Gecko Profiler profile contains at least the Base Profiler + // main thread samples. + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [](const Json::Value& aRoot) { + GET_JSON(threads, aRoot["threads"], Array); + { + bool found = false; + for (const Json::Value& thread : threads) { + ASSERT_TRUE(thread.isObject()); + GET_JSON(name, thread["name"], String); + if (name.asString() == "GeckoMain (pre-xul)") { + found = true; + EXPECT_JSON_ARRAY_CONTAINS(thread["stringTable"], String, + "Marker from base profiler"); + break; + } + } + EXPECT_TRUE(found); + } + }); + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); +} + +TEST(GeckoProfiler, CPUUsage) +{ + const char* filters[] = {"GeckoMain"}; + + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); + + profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, + ProfilerFeature::StackWalk | ProfilerFeature::CPUUtilization, + filters, MOZ_ARRAY_LENGTH(filters), 0); + // Grab a few samples. + static constexpr unsigned MinSamplings = 10; + for (unsigned i = MinSamplings; i != 0; --i) { + ASSERT_EQ(WaitForSamplingState(), SamplingState::SamplingCompleted); + } + UniquePtr<char[]> profile = profiler_get_profile(); + JSONOutputCheck(profile.get(), [](const Json::Value& aRoot) { + // Check that the "cpu" feature is present. + GET_JSON(meta, aRoot["meta"], Object); + { + GET_JSON(configuration, meta["configuration"], Object); + { + GET_JSON(features, configuration["features"], Array); + { EXPECT_JSON_ARRAY_CONTAINS(features, String, "cpu"); } + } + } + + { + GET_JSON(sampleUnits, meta["sampleUnits"], Object); + { + EXPECT_EQ_JSON(sampleUnits["time"], String, "ms"); + EXPECT_EQ_JSON(sampleUnits["eventDelay"], String, "ms"); +#if defined(GP_OS_windows) || defined(GP_OS_darwin) || defined(GP_OS_linux) || \ + defined(GP_OS_android) || defined(GP_OS_freebsd) + // Note: The exact string is not important here. + EXPECT_TRUE(sampleUnits["threadCPUDelta"].isString()) + << "There should be a sampleUnits.threadCPUDelta on this platform"; +#else + EXPECT_FALSE(sampleUnits.isMember("threadCPUDelta")) + << "Unexpected sampleUnits.threadCPUDelta on this platform";; +#endif + } + } + + // Check that the sample schema contains "threadCPUDelta". + GET_JSON(threads, aRoot["threads"], Array); + { + GET_JSON(thread0, threads[0], Object); + { + GET_JSON(samples, thread0["samples"], Object); + { + Json::ArrayIndex threadCPUDeltaIndex = 0; + GET_JSON(schema, samples["schema"], Object); + { + GET_JSON(index, schema["threadCPUDelta"], UInt); + threadCPUDeltaIndex = index.asUInt(); + } + + unsigned threadCPUDeltaCount = 0; + GET_JSON(data, samples["data"], Array); + EXPECT_GE(data.size(), MinSamplings); + for (const Json::Value& sample : data) { + ASSERT_TRUE(sample.isArray()); + if (sample.isValidIndex(threadCPUDeltaIndex)) { + if (!sample[threadCPUDeltaIndex].isNull()) { + EXPECT_TRUE(sample[threadCPUDeltaIndex].isUInt64()); + ++threadCPUDeltaCount; + } + } + } + +#if defined(GP_OS_windows) || defined(GP_OS_darwin) || defined(GP_OS_linux) || \ + defined(GP_OS_android) || defined(GP_OS_freebsd) + EXPECT_GE(threadCPUDeltaCount, data.size() - 1u) + << "There should be 'threadCPUDelta' values in all but 1 samples"; +#else + // All "threadCPUDelta" data should be absent or null on unsupported + // platforms. + EXPECT_EQ(threadCPUDeltaCount, 0u); +#endif + } + } + } + }); + + // Note: There is no non-racy way to test for SamplingState::JustStopped, as + // it would require coordination between `profiler_stop()` and another thread + // doing `profiler_callback_after_sampling()` at just the right moment. + + profiler_stop(); + ASSERT_TRUE(!profiler_is_active()); + ASSERT_TRUE(!profiler_callback_after_sampling( + [&](SamplingState) { ASSERT_TRUE(false); })); +} diff --git a/tools/profiler/tests/gtest/LulTest.cpp b/tools/profiler/tests/gtest/LulTest.cpp new file mode 100644 index 0000000000..94a0b2fd29 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTest.cpp @@ -0,0 +1,51 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" +#include "mozilla/Atomics.h" +#include "LulMain.h" +#include "GeckoProfiler.h" // for TracingKind +#include "platform-linux-lul.h" // for read_procmaps + +// Set this to 0 to make LUL be completely silent during tests. +// Set it to 1 to get logging output from LUL, presumably for +// the purpose of debugging it. +#define DEBUG_LUL_TEST 0 + +// LUL needs a callback for its logging sink. +static void gtest_logging_sink_for_LulIntegration(const char* str) { + if (DEBUG_LUL_TEST == 0) { + return; + } + // Ignore any trailing \n, since LOG will add one anyway. + size_t n = strlen(str); + if (n > 0 && str[n - 1] == '\n') { + char* tmp = strdup(str); + tmp[n - 1] = 0; + fprintf(stderr, "LUL-in-gtest: %s\n", tmp); + free(tmp); + } else { + fprintf(stderr, "LUL-in-gtest: %s\n", str); + } +} + +TEST(LulIntegration, unwind_consistency) +{ + // Set up LUL and get it to read unwind info for libxul.so, which is + // all we care about here, plus (incidentally) practically every + // other object in the process too. + lul::LUL* lul = new lul::LUL(gtest_logging_sink_for_LulIntegration); + read_procmaps(lul); + + // Run unwind tests and receive information about how many there + // were and how many were successful. + lul->EnableUnwinding(); + int nTests = 0, nTestsPassed = 0; + RunLulUnitTests(&nTests, &nTestsPassed, lul); + EXPECT_TRUE(nTests == 6) << "Unexpected number of tests"; + EXPECT_TRUE(nTestsPassed == nTests) << "Not all tests passed"; + + delete lul; +} diff --git a/tools/profiler/tests/gtest/LulTestDwarf.cpp b/tools/profiler/tests/gtest/LulTestDwarf.cpp new file mode 100644 index 0000000000..b76f524483 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestDwarf.cpp @@ -0,0 +1,2724 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "LulCommonExt.h" +#include "LulDwarfExt.h" +#include "LulDwarfInt.h" +#include "LulTestInfrastructure.h" + +using lul_test::CFISection; +using lul_test::test_assembler::kBigEndian; +using lul_test::test_assembler::kLittleEndian; +using lul_test::test_assembler::Label; +using testing::_; +using testing::InSequence; +using testing::Return; +using testing::Sequence; +using testing::Test; + +#define PERHAPS_WRITE_DEBUG_FRAME_FILE(name, section) /**/ +#define PERHAPS_WRITE_EH_FRAME_FILE(name, section) /**/ + +// Set this to 0 to make LUL be completely silent during tests. +// Set it to 1 to get logging output from LUL, presumably for +// the purpose of debugging it. +#define DEBUG_LUL_TEST_DWARF 0 + +// LUL needs a callback for its logging sink. +static void gtest_logging_sink_for_LulTestDwarf(const char* str) { + if (DEBUG_LUL_TEST_DWARF == 0) { + return; + } + // Ignore any trailing \n, since LOG will add one anyway. + size_t n = strlen(str); + if (n > 0 && str[n - 1] == '\n') { + char* tmp = strdup(str); + tmp[n - 1] = 0; + fprintf(stderr, "LUL-in-gtest: %s\n", tmp); + free(tmp); + } else { + fprintf(stderr, "LUL-in-gtest: %s\n", str); + } +} + +namespace lul { + +class MockCallFrameInfoHandler : public CallFrameInfo::Handler { + public: + MOCK_METHOD6(Entry, + bool(size_t offset, uint64 address, uint64 length, uint8 version, + const std::string& augmentation, unsigned return_address)); + MOCK_METHOD2(UndefinedRule, bool(uint64 address, int reg)); + MOCK_METHOD2(SameValueRule, bool(uint64 address, int reg)); + MOCK_METHOD4(OffsetRule, + bool(uint64 address, int reg, int base_register, long offset)); + MOCK_METHOD4(ValOffsetRule, + bool(uint64 address, int reg, int base_register, long offset)); + MOCK_METHOD3(RegisterRule, bool(uint64 address, int reg, int base_register)); + MOCK_METHOD3(ExpressionRule, + bool(uint64 address, int reg, const std::string& expression)); + MOCK_METHOD3(ValExpressionRule, + bool(uint64 address, int reg, const std::string& expression)); + MOCK_METHOD0(End, bool()); + MOCK_METHOD2(PersonalityRoutine, bool(uint64 address, bool indirect)); + MOCK_METHOD2(LanguageSpecificDataArea, bool(uint64 address, bool indirect)); + MOCK_METHOD0(SignalHandler, bool()); +}; + +class MockCallFrameErrorReporter : public CallFrameInfo::Reporter { + public: + MockCallFrameErrorReporter() + : Reporter(gtest_logging_sink_for_LulTestDwarf, "mock filename", + "mock section") {} + MOCK_METHOD2(Incomplete, void(uint64, CallFrameInfo::EntryKind)); + MOCK_METHOD1(EarlyEHTerminator, void(uint64)); + MOCK_METHOD2(CIEPointerOutOfRange, void(uint64, uint64)); + MOCK_METHOD2(BadCIEId, void(uint64, uint64)); + MOCK_METHOD2(UnrecognizedVersion, void(uint64, int version)); + MOCK_METHOD2(UnrecognizedAugmentation, void(uint64, const string&)); + MOCK_METHOD2(InvalidPointerEncoding, void(uint64, uint8)); + MOCK_METHOD2(UnusablePointerEncoding, void(uint64, uint8)); + MOCK_METHOD2(RestoreInCIE, void(uint64, uint64)); + MOCK_METHOD3(BadInstruction, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(NoCFARule, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(EmptyStateStack, void(uint64, CallFrameInfo::EntryKind, uint64)); + MOCK_METHOD3(ClearingCFARule, void(uint64, CallFrameInfo::EntryKind, uint64)); +}; + +struct CFIFixture { + enum { kCFARegister = CallFrameInfo::Handler::kCFARegister }; + + CFIFixture() { + // Default expectations for the data handler. + // + // - Leave Entry and End without expectations, as it's probably a + // good idea to set those explicitly in each test. + // + // - Expect the *Rule functions to not be called, + // so that each test can simply list the calls they expect. + // + // I gather I could use StrictMock for this, but the manual seems + // to suggest using that only as a last resort, and this isn't so + // bad. + EXPECT_CALL(handler, UndefinedRule(_, _)).Times(0); + EXPECT_CALL(handler, SameValueRule(_, _)).Times(0); + EXPECT_CALL(handler, OffsetRule(_, _, _, _)).Times(0); + EXPECT_CALL(handler, ValOffsetRule(_, _, _, _)).Times(0); + EXPECT_CALL(handler, RegisterRule(_, _, _)).Times(0); + EXPECT_CALL(handler, ExpressionRule(_, _, _)).Times(0); + EXPECT_CALL(handler, ValExpressionRule(_, _, _)).Times(0); + EXPECT_CALL(handler, PersonalityRoutine(_, _)).Times(0); + EXPECT_CALL(handler, LanguageSpecificDataArea(_, _)).Times(0); + EXPECT_CALL(handler, SignalHandler()).Times(0); + + // Default expectations for the error/warning reporer. + EXPECT_CALL(reporter, Incomplete(_, _)).Times(0); + EXPECT_CALL(reporter, EarlyEHTerminator(_)).Times(0); + EXPECT_CALL(reporter, CIEPointerOutOfRange(_, _)).Times(0); + EXPECT_CALL(reporter, BadCIEId(_, _)).Times(0); + EXPECT_CALL(reporter, UnrecognizedVersion(_, _)).Times(0); + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, _)).Times(0); + EXPECT_CALL(reporter, InvalidPointerEncoding(_, _)).Times(0); + EXPECT_CALL(reporter, UnusablePointerEncoding(_, _)).Times(0); + EXPECT_CALL(reporter, RestoreInCIE(_, _)).Times(0); + EXPECT_CALL(reporter, BadInstruction(_, _, _)).Times(0); + EXPECT_CALL(reporter, NoCFARule(_, _, _)).Times(0); + EXPECT_CALL(reporter, EmptyStateStack(_, _, _)).Times(0); + EXPECT_CALL(reporter, ClearingCFARule(_, _, _)).Times(0); + } + + MockCallFrameInfoHandler handler; + MockCallFrameErrorReporter reporter; +}; + +class LulDwarfCFI : public CFIFixture, public Test {}; + +TEST_F(LulDwarfCFI, EmptyRegion) { + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + static const char data[1] = {42}; + + ByteReader reader(ENDIANNESS_BIG); + CallFrameInfo parser(data, 0, &reader, &handler, &reporter); + EXPECT_TRUE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteLength32) { + CFISection section(kBigEndian, 8); + section + // Not even long enough for an initial length. + .D16(0xa0f) + // Padding to keep valgrind happy. We subtract these off when we + // construct the parser. + .D16(0); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size() - 2, &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteLength64) { + CFISection section(kLittleEndian, 4); + section + // An incomplete 64-bit DWARF initial length. + .D32(0xffffffff) + .D32(0x71fbaec2) + // Padding to keep valgrind happy. We subtract these off when we + // construct the parser. + .D32(0); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size() - 4, &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, IncompleteId32) { + CFISection section(kBigEndian, 8); + section + .D32(3) // Initial length, not long enough for id + .D8(0xd7) + .D8(0xe5) + .D8(0xf1) // incomplete id + .CIEHeader(8727, 3983, 8889, 3, "") + .FinishEntry(); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, Incomplete(_, CallFrameInfo::kUnknown)) + .WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +TEST_F(LulDwarfCFI, BadId32) { + CFISection section(kBigEndian, 8); + section + .D32(0x100) // Initial length + .D32(0xe802fade) // bogus ID + .Append(0x100 - 4, 0x42); // make the length true + section.CIEHeader(1672, 9872, 8529, 3, "").FinishEntry(); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + EXPECT_CALL(reporter, CIEPointerOutOfRange(_, 0xe802fade)).WillOnce(Return()); + + string contents; + ASSERT_TRUE(section.GetContents(&contents)); + + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// A lone CIE shouldn't cause any handler calls. +TEST_F(LulDwarfCFI, SingleCIE) { + CFISection section(kLittleEndian, 4); + section.CIEHeader(0xffe799a8, 0x3398dcdd, 0x6e9683de, 3, ""); + section.Append(10, lul::DW_CFA_nop); + section.FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("SingleCIE", section); + + EXPECT_CALL(handler, Entry(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(handler, End()).Times(0); + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// One FDE, one CIE. +TEST_F(LulDwarfCFI, OneFDE) { + CFISection section(kBigEndian, 4); + Label cie; + section.Mark(&cie) + .CIEHeader(0x4be22f75, 0x2492236e, 0x6b6efb87, 3, "") + .FinishEntry() + .FDEHeader(cie, 0x7714740d, 0x3d5a10cd) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("OneFDE", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x7714740d, 0x3d5a10cd, 3, "", 0x6b6efb87)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// Two FDEs share a CIE. +TEST_F(LulDwarfCFI, TwoFDEsOneCIE) { + CFISection section(kBigEndian, 4); + Label cie; + section + // First FDE. readelf complains about this one because it makes + // a forward reference to its CIE. + .FDEHeader(cie, 0xa42744df, 0xa3b42121) + .FinishEntry() + // CIE. + .Mark(&cie) + .CIEHeader(0x04f7dc7b, 0x3d00c05f, 0xbd43cb59, 3, "") + .FinishEntry() + // Second FDE. + .FDEHeader(cie, 0x6057d391, 0x700f608d) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("TwoFDEsOneCIE", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xa42744df, 0xa3b42121, 3, "", 0xbd43cb59)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x6057d391, 0x700f608d, 3, "", 0xbd43cb59)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// Two FDEs, two CIEs. +TEST_F(LulDwarfCFI, TwoFDEsTwoCIEs) { + CFISection section(kLittleEndian, 8); + Label cie1, cie2; + section + // First CIE. + .Mark(&cie1) + .CIEHeader(0x694d5d45, 0x4233221b, 0xbf45e65a, 3, "") + .FinishEntry() + // First FDE which cites second CIE. readelf complains about + // this one because it makes a forward reference to its CIE. + .FDEHeader(cie2, 0x778b27dfe5871f05ULL, 0x324ace3448070926ULL) + .FinishEntry() + // Second FDE, which cites first CIE. + .FDEHeader(cie1, 0xf6054ca18b10bf5fULL, 0x45fdb970d8bca342ULL) + .FinishEntry() + // Second CIE. + .Mark(&cie2) + .CIEHeader(0xfba3fad7, 0x6287e1fd, 0x61d2c581, 2, "") + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("TwoFDEsTwoCIEs", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x778b27dfe5871f05ULL, 0x324ace3448070926ULL, + 2, "", 0x61d2c581)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xf6054ca18b10bf5fULL, 0x45fdb970d8bca342ULL, + 3, "", 0xbf45e65a)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_LITTLE); + reader.SetAddressSize(8); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// An FDE whose CIE specifies a version we don't recognize. +TEST_F(LulDwarfCFI, BadVersion) { + CFISection section(kBigEndian, 4); + Label cie1, cie2; + section.Mark(&cie1) + .CIEHeader(0xca878cf0, 0x7698ec04, 0x7b616f54, 0x52, "") + .FinishEntry() + // We should skip this entry, as its CIE specifies a version we + // don't recognize. + .FDEHeader(cie1, 0x08852292, 0x2204004a) + .FinishEntry() + // Despite the above, we should visit this entry. + .Mark(&cie2) + .CIEHeader(0x7c3ae7c9, 0xb9b9a512, 0x96cb3264, 3, "") + .FinishEntry() + .FDEHeader(cie2, 0x2094735a, 0x6e875501) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("BadVersion", section); + + EXPECT_CALL(reporter, UnrecognizedVersion(_, 0x52)).WillOnce(Return()); + + { + InSequence s; + // We should see no mention of the first FDE, but we should get + // a call to Entry for the second. + EXPECT_CALL(handler, Entry(_, 0x2094735a, 0x6e875501, 3, "", 0x96cb3264)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// An FDE whose CIE specifies an augmentation we don't recognize. +TEST_F(LulDwarfCFI, BadAugmentation) { + CFISection section(kBigEndian, 4); + Label cie1, cie2; + section.Mark(&cie1) + .CIEHeader(0x4be22f75, 0x2492236e, 0x6b6efb87, 3, "spaniels!") + .FinishEntry() + // We should skip this entry, as its CIE specifies an + // augmentation we don't recognize. + .FDEHeader(cie1, 0x7714740d, 0x3d5a10cd) + .FinishEntry() + // Despite the above, we should visit this entry. + .Mark(&cie2) + .CIEHeader(0xf8bc4399, 0x8cf09931, 0xf2f519b2, 3, "") + .FinishEntry() + .FDEHeader(cie2, 0x7bf0fda0, 0xcbcd28d8) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("BadAugmentation", section); + + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, "spaniels!")) + .WillOnce(Return()); + + { + InSequence s; + // We should see no mention of the first FDE, but we should get + // a call to Entry for the second. + EXPECT_CALL(handler, Entry(_, 0x7bf0fda0, 0xcbcd28d8, 3, "", 0xf2f519b2)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_FALSE(parser.Start()); +} + +// The return address column field is a byte in CFI version 1 +// (DWARF2), but a ULEB128 value in version 3 (DWARF3). +TEST_F(LulDwarfCFI, CIEVersion1ReturnColumn) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, using the version 1 format: return column is a ubyte. + .Mark(&cie) + // Use a value for the return column that is parsed differently + // as a ubyte and as a ULEB128. + .CIEHeader(0xbcdea24f, 0x5be28286, 0x9f, 1, "") + .FinishEntry() + // FDE, citing that CIE. + .FDEHeader(cie, 0xb8d347b5, 0x825e55dc) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("CIEVersion1ReturnColumn", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0xb8d347b5, 0x825e55dc, 1, "", 0x9f)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +// The return address column field is a byte in CFI version 1 +// (DWARF2), but a ULEB128 value in version 3 (DWARF3). +TEST_F(LulDwarfCFI, CIEVersion3ReturnColumn) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, using the version 3 format: return column is a ULEB128. + .Mark(&cie) + // Use a value for the return column that is parsed differently + // as a ubyte and as a ULEB128. + .CIEHeader(0x0ab4758d, 0xc010fdf7, 0x89, 3, "") + .FinishEntry() + // FDE, citing that CIE. + .FDEHeader(cie, 0x86763f2b, 0x2a66dc23) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("CIEVersion3ReturnColumn", section); + + { + InSequence s; + EXPECT_CALL(handler, Entry(_, 0x86763f2b, 0x2a66dc23, 3, "", 0x89)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + string contents; + EXPECT_TRUE(section.GetContents(&contents)); + ByteReader reader(ENDIANNESS_BIG); + reader.SetAddressSize(4); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter); + EXPECT_TRUE(parser.Start()); +} + +struct CFIInsnFixture : public CFIFixture { + CFIInsnFixture() : CFIFixture() { + data_factor = 0xb6f; + return_register = 0x9be1ed9f; + version = 3; + cfa_base_register = 0x383a3aa; + cfa_offset = 0xf748; + } + + // Prepare SECTION to receive FDE instructions. + // + // - Append a stock CIE header that establishes the fixture's + // code_factor, data_factor, return_register, version, and + // augmentation values. + // - Have the CIE set up a CFA rule using cfa_base_register and + // cfa_offset. + // - Append a stock FDE header, referring to the above CIE, for the + // fde_size bytes at fde_start. Choose fde_start and fde_size + // appropriately for the section's address size. + // - Set appropriate expectations on handler in sequence s for the + // frame description entry and the CIE's CFA rule. + // + // On return, SECTION is ready to have FDE instructions appended to + // it, and its FinishEntry member called. + void StockCIEAndFDE(CFISection* section) { + // Choose appropriate constants for our address size. + if (section->AddressSize() == 4) { + fde_start = 0xc628ecfbU; + fde_size = 0x5dee04a2; + code_factor = 0x60b; + } else { + assert(section->AddressSize() == 8); + fde_start = 0x0005c57ce7806bd3ULL; + fde_size = 0x2699521b5e333100ULL; + code_factor = 0x01008e32855274a8ULL; + } + + // Create the CIE. + (*section) + .Mark(&cie_label) + .CIEHeader(code_factor, data_factor, return_register, version, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(cfa_base_register) + .ULEB128(cfa_offset) + .FinishEntry(); + + // Create the FDE. + section->FDEHeader(cie_label, fde_start, fde_size); + + // Expect an Entry call for the FDE and a ValOffsetRule call for the + // CIE's CFA rule. + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, + cfa_base_register, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + } + + // Run the contents of SECTION through a CallFrameInfo parser, + // expecting parser.Start to return SUCCEEDS. Caller may optionally + // supply, via READER, its own ByteReader. If that's absent, a + // local one is used. + void ParseSection(CFISection* section, bool succeeds = true, + ByteReader* reader = nullptr) { + string contents; + EXPECT_TRUE(section->GetContents(&contents)); + lul::Endianness endianness; + if (section->endianness() == kBigEndian) + endianness = ENDIANNESS_BIG; + else { + assert(section->endianness() == kLittleEndian); + endianness = ENDIANNESS_LITTLE; + } + ByteReader local_reader(endianness); + ByteReader* reader_to_use = reader ? reader : &local_reader; + reader_to_use->SetAddressSize(section->AddressSize()); + CallFrameInfo parser(contents.data(), contents.size(), reader_to_use, + &handler, &reporter); + if (succeeds) + EXPECT_TRUE(parser.Start()); + else + EXPECT_FALSE(parser.Start()); + } + + Label cie_label; + Sequence s; + uint64 code_factor; + int data_factor; + unsigned return_register; + unsigned version; + unsigned cfa_base_register; + int cfa_offset; + uint64 fde_start, fde_size; +}; + +class LulDwarfCFIInsn : public CFIInsnFixture, public Test {}; + +TEST_F(LulDwarfCFIInsn, DW_CFA_set_loc) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_set_loc) + .D32(0xb1ee3e7a) + // Use DW_CFA_def_cfa to force a handler call that we can use to + // check the effect of the DW_CFA_set_loc. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x4defb431) + .ULEB128(0x6d17b0ee) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_set_loc", section); + + EXPECT_CALL(handler, + ValOffsetRule(0xb1ee3e7a, kCFARegister, 0x4defb431, 0x6d17b0ee)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section + .D8(lul::DW_CFA_advance_loc | 0x2a) + // Use DW_CFA_def_cfa to force a handler call that we can use to + // check the effect of the DW_CFA_advance_loc. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x5bbb3715) + .ULEB128(0x0186c7bf) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc", section); + + EXPECT_CALL(handler, ValOffsetRule(fde_start + 0x2a * code_factor, + kCFARegister, 0x5bbb3715, 0x0186c7bf)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc1) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc1) + .D8(0xd8) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x69d5696a) + .ULEB128(0x1eb7fc93) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc1", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0xd8 * code_factor), + kCFARegister, 0x69d5696a, 0x1eb7fc93)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc2) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc2) + .D16(0x3adb) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x3a368bed) + .ULEB128(0x3194ee37) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc2", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0x3adb * code_factor), + kCFARegister, 0x3a368bed, 0x3194ee37)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_advance_loc4) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_advance_loc4) + .D32(0x15813c88) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x135270c5) + .ULEB128(0x24bad7cb) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc4", section); + + EXPECT_CALL(handler, ValOffsetRule((fde_start + 0x15813c88ULL * code_factor), + kCFARegister, 0x135270c5, 0x24bad7cb)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_MIPS_advance_loc8) { + code_factor = 0x2d; + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_MIPS_advance_loc8) + .D64(0x3c4f3945b92c14ULL) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0xe17ed602) + .ULEB128(0x3d162e7f) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_advance_loc8", section); + + EXPECT_CALL(handler, + ValOffsetRule((fde_start + 0x3c4f3945b92c14ULL * code_factor), + kCFARegister, 0xe17ed602, 0x3d162e7f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa) + .ULEB128(0x4e363a85) + .ULEB128(0x815f9aa7) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("DW_CFA_def_cfa", section); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x4e363a85, 0x815f9aa7)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_sf) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_sf) + .ULEB128(0x8ccb32b7) + .LEB128(0x9ea) + .D8(lul::DW_CFA_def_cfa_sf) + .ULEB128(0x9b40f5da) + .LEB128(-0x40a2) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, 0x8ccb32b7, + 0x9ea * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, 0x9b40f5da, + -0x40a2 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_register) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_register).ULEB128(0x3e7e9363).FinishEntry(); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x3e7e9363, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// DW_CFA_def_cfa_register should have no effect when applied to a +// non-base/offset rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_registerBadRule) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_expression) + .Block("needle in a haystack") + .D8(lul::DW_CFA_def_cfa_register) + .ULEB128(0xf1b49e49) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, kCFARegister, + "needle in a haystack")) + .WillRepeatedly(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_offset).ULEB128(0x1e8e3b9b).FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + 0x1e8e3b9b)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offset_sf) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_offset_sf) + .LEB128(0x970) + .D8(lul::DW_CFA_def_cfa_offset_sf) + .LEB128(-0x2cd) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + 0x970 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, kCFARegister, cfa_base_register, + -0x2cd * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// DW_CFA_def_cfa_offset should have no effect when applied to a +// non-base/offset rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_offsetBadRule) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_expression) + .Block("six ways to Sunday") + .D8(lul::DW_CFA_def_cfa_offset) + .ULEB128(0x1e8e3b9b) + .FinishEntry(); + + EXPECT_CALL(handler, + ValExpressionRule(fde_start, kCFARegister, "six ways to Sunday")) + .WillRepeatedly(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_def_cfa_expression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_def_cfa_expression).Block("eating crow").FinishEntry(); + + EXPECT_CALL(handler, + ValExpressionRule(fde_start, kCFARegister, "eating crow")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_undefined) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined).ULEB128(0x300ce45d).FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x300ce45d)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_same_value) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value).ULEB128(0x3865a760).FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0x3865a760)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x2c).ULEB128(0x9f6).FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x2c, kCFARegister, 0x9f6 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset_extended) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset_extended) + .ULEB128(0x402b) + .ULEB128(0xb48) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x402b, kCFARegister, 0xb48 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_offset_extended_sf) { + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset_extended_sf) + .ULEB128(0x997c23ee) + .LEB128(0x2d00) + .D8(lul::DW_CFA_offset_extended_sf) + .ULEB128(0x9519eb82) + .LEB128(-0xa77) + .FinishEntry(); + + EXPECT_CALL(handler, OffsetRule(fde_start, 0x997c23ee, kCFARegister, + 0x2d00 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start, 0x9519eb82, kCFARegister, + -0xa77 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_offset) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x623562fe) + .ULEB128(0x673) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x623562fe, kCFARegister, + 0x673 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_offset_sf) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset_sf) + .ULEB128(0x6f4f) + .LEB128(0xaab) + .D8(lul::DW_CFA_val_offset_sf) + .ULEB128(0x2483) + .LEB128(-0x8a2) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x6f4f, kCFARegister, + 0xaab * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x2483, kCFARegister, + -0x8a2 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_register) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0x278d18f9) + .ULEB128(0x1a684414) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0x278d18f9, 0x1a684414)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_expression) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0xa1619fb2) + .Block("plus ça change, plus c'est la même chose") + .FinishEntry(); + + EXPECT_CALL(handler, + ExpressionRule(fde_start, 0xa1619fb2, + "plus ça change, plus c'est la même chose")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_val_expression) { + ByteReader reader(ENDIANNESS_BIG); + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0xc5e4a9e3) + .Block("he who has the gold makes the rules") + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0xc5e4a9e3, + "he who has the gold makes the rules")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restore) { + CFISection section(kLittleEndian, 8); + code_factor = 0x01bd188a9b1fa083ULL; + data_factor = -0x1ac8; + return_register = 0x8c35b049; + version = 2; + fde_start = 0x2d70fe998298bbb1ULL; + fde_size = 0x46ccc2e63cf0b108ULL; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "") + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x6ca1d50e) + .ULEB128(0x372e38e8) + // Provide an offset(N) rule for register 0x3c. + .D8(lul::DW_CFA_offset | 0x3c) + .ULEB128(0xb348) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide a new offset(N) rule for register 0x3c. + .D8(lul::DW_CFA_advance_loc | 0x13) + .D8(lul::DW_CFA_offset | 0x3c) + .ULEB128(0x9a50) + // At a third address, restore the original rule for register 0x3c. + .D8(lul::DW_CFA_advance_loc | 0x01) + .D8(lul::DW_CFA_restore | 0x3c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x6ca1d50e, 0x372e38e8)) + .WillOnce(Return(true)); + // CIE's rule for register 0x3c. + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x3c, kCFARegister, 0xb348 * data_factor)) + .WillOnce(Return(true)); + // FDE's rule for register 0x3c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x13 * code_factor, 0x3c, + kCFARegister, 0x9a50 * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's rule for register 0x3c. + EXPECT_CALL(handler, OffsetRule(fde_start + (0x13 + 0x01) * code_factor, + 0x3c, kCFARegister, 0xb348 * data_factor)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restoreNoRule) { + CFISection section(kBigEndian, 4); + code_factor = 0x005f78143c1c3b82ULL; + data_factor = 0x25d0; + return_register = 0xe8; + version = 1; + fde_start = 0x4062e30f; + fde_size = 0x5302a389; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "") + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x470aa334) + .ULEB128(0x099ef127) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide an offset(N) rule for register 0x2c. + .D8(lul::DW_CFA_advance_loc | 0x7) + .D8(lul::DW_CFA_offset | 0x2c) + .ULEB128(0x1f47) + // At a third address, restore the (missing) CIE rule for register 0x2c. + .D8(lul::DW_CFA_advance_loc | 0xb) + .D8(lul::DW_CFA_restore | 0x2c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x470aa334, 0x099ef127)) + .WillOnce(Return(true)); + // FDE's rule for register 0x2c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x7 * code_factor, 0x2c, + kCFARegister, 0x1f47 * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's (missing) rule for register 0x2c. + EXPECT_CALL(handler, + SameValueRule(fde_start + (0x7 + 0xb) * code_factor, 0x2c)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_restore_extended) { + CFISection section(kBigEndian, 4); + code_factor = 0x126e; + data_factor = -0xd8b; + return_register = 0x77711787; + version = 3; + fde_start = 0x01f55a45; + fde_size = 0x452adb80; + Label cie; + section.Mark(&cie) + .CIEHeader(code_factor, data_factor, return_register, version, "", + true /* dwarf64 */) + // Provide a CFA rule, because register rules require them. + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x56fa0edd) + .ULEB128(0x097f78a5) + // Provide an offset(N) rule for register 0x0f9b8a1c. + .D8(lul::DW_CFA_offset_extended) + .ULEB128(0x0f9b8a1c) + .ULEB128(0xc979) + .FinishEntry() + // In the FDE... + .FDEHeader(cie, fde_start, fde_size) + // At a second address, provide a new offset(N) rule for reg 0x0f9b8a1c. + .D8(lul::DW_CFA_advance_loc | 0x3) + .D8(lul::DW_CFA_offset_extended) + .ULEB128(0x0f9b8a1c) + .ULEB128(0x3b7b) + // At a third address, restore the original rule for register 0x0f9b8a1c. + .D8(lul::DW_CFA_advance_loc | 0x04) + .D8(lul::DW_CFA_restore_extended) + .ULEB128(0x0f9b8a1c) + .FinishEntry(); + + { + InSequence s; + EXPECT_CALL(handler, + Entry(_, fde_start, fde_size, version, "", return_register)) + .WillOnce(Return(true)); + // CIE's CFA rule. + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x56fa0edd, 0x097f78a5)) + .WillOnce(Return(true)); + // CIE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, OffsetRule(fde_start, 0x0f9b8a1c, kCFARegister, + 0xc979 * data_factor)) + .WillOnce(Return(true)); + // FDE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, OffsetRule(fde_start + 0x3 * code_factor, 0x0f9b8a1c, + kCFARegister, 0x3b7b * data_factor)) + .WillOnce(Return(true)); + // Restore CIE's rule for register 0x0f9b8a1c. + EXPECT_CALL(handler, + OffsetRule(fde_start + (0x3 + 0x4) * code_factor, 0x0f9b8a1c, + kCFARegister, 0xc979 * data_factor)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_remember_and_restore_state) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + + // We create a state, save it, modify it, and then restore. We + // refer to the state that is overridden the restore as the + // "outgoing" state, and the restored state the "incoming" state. + // + // Register outgoing incoming expect + // 1 offset(N) no rule new "same value" rule + // 2 register(R) offset(N) report changed rule + // 3 offset(N) offset(M) report changed offset + // 4 offset(N) offset(N) no report + // 5 offset(N) no rule new "same value" rule + section + // Create the "incoming" state, which we will save and later restore. + .D8(lul::DW_CFA_offset | 2) + .ULEB128(0x9806) + .D8(lul::DW_CFA_offset | 3) + .ULEB128(0x995d) + .D8(lul::DW_CFA_offset | 4) + .ULEB128(0x7055) + .D8(lul::DW_CFA_remember_state) + // Advance to a new instruction; an implementation could legitimately + // ignore all but the final rule for a given register at a given address. + .D8(lul::DW_CFA_advance_loc | 1) + // Create the "outgoing" state, which we will discard. + .D8(lul::DW_CFA_offset | 1) + .ULEB128(0xea1a) + .D8(lul::DW_CFA_register) + .ULEB128(2) + .ULEB128(0x1d2a3767) + .D8(lul::DW_CFA_offset | 3) + .ULEB128(0xdd29) + .D8(lul::DW_CFA_offset | 5) + .ULEB128(0xf1ce) + // At a third address, restore the incoming state. + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + uint64 addr = fde_start; + + // Expect the incoming rules to be reported. + EXPECT_CALL(handler, OffsetRule(addr, 2, kCFARegister, 0x9806 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0x995d * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 4, kCFARegister, 0x7055 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + + addr += code_factor; + + // After the save, we establish the outgoing rule set. + EXPECT_CALL(handler, OffsetRule(addr, 1, kCFARegister, 0xea1a * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(addr, 2, 0x1d2a3767)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0xdd29 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 5, kCFARegister, 0xf1ce * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + + addr += code_factor; + + // Finally, after the restore, expect to see the differences from + // the outgoing to the incoming rules reported. + EXPECT_CALL(handler, SameValueRule(addr, 1)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 2, kCFARegister, 0x9806 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(addr, 3, kCFARegister, 0x995d * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(addr, 5)) + .InSequence(s) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// Check that restoring a rule set reports changes to the CFA rule. +TEST_F(LulDwarfCFIInsn, DW_CFA_remember_and_restore_stateCFA) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + + section.D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_def_cfa_offset) + .ULEB128(0x90481102) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor, kCFARegister, + cfa_base_register, 0x90481102)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor * 2, kCFARegister, + cfa_base_register, cfa_offset)) + .InSequence(s) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_nop) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_nop) + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x3fb8d4f1) + .ULEB128(0x078dc67b) + .D8(lul::DW_CFA_nop) + .FinishEntry(); + + EXPECT_CALL(handler, + ValOffsetRule(fde_start, kCFARegister, 0x3fb8d4f1, 0x078dc67b)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_window_save) { + CFISection section(kBigEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_window_save).FinishEntry(); + + // Don't include all the rules in any particular sequence. + + // The caller's %o0-%o7 have become the callee's %i0-%i7. This is + // the GCC register numbering. + for (int i = 8; i < 16; i++) + EXPECT_CALL(handler, RegisterRule(fde_start, i, i + 16)) + .WillOnce(Return(true)); + // The caller's %l0-%l7 and %i0-%i7 have been saved at the top of + // its frame. + for (int i = 16; i < 32; i++) + EXPECT_CALL(handler, OffsetRule(fde_start, i, kCFARegister, (i - 16) * 4)) + .WillOnce(Return(true)); + + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_args_size) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_args_size) + .ULEB128(0xeddfa520) + // Verify that we see this, meaning we parsed the above properly. + .D8(lul::DW_CFA_offset | 0x23) + .ULEB128(0x269) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x23, kCFARegister, 0x269 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIInsn, DW_CFA_GNU_negative_offset_extended) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_GNU_negative_offset_extended) + .ULEB128(0x430cc87a) + .ULEB128(0x613) + .FinishEntry(); + + EXPECT_CALL(handler, OffsetRule(fde_start, 0x430cc87a, kCFARegister, + -0x613 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion); +} + +// Three FDEs: skip the second +TEST_F(LulDwarfCFIInsn, SkipFDE) { + CFISection section(kBigEndian, 4); + Label cie; + section + // CIE, used by all FDEs. + .Mark(&cie) + .CIEHeader(0x010269f2, 0x9177, 0xedca5849, 2, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(0x42ed390b) + .ULEB128(0x98f43aad) + .FinishEntry() + // First FDE. + .FDEHeader(cie, 0xa870ebdd, 0x60f6aa4) + .D8(lul::DW_CFA_register) + .ULEB128(0x3a860351) + .ULEB128(0x6c9a6bcf) + .FinishEntry() + // Second FDE. + .FDEHeader(cie, 0xc534f7c0, 0xf6552e9, true /* dwarf64 */) + .D8(lul::DW_CFA_register) + .ULEB128(0x1b62c234) + .ULEB128(0x26586b18) + .FinishEntry() + // Third FDE. + .FDEHeader(cie, 0xf681cfc8, 0x7e4594e) + .D8(lul::DW_CFA_register) + .ULEB128(0x26c53934) + .ULEB128(0x18eeb8a4) + .FinishEntry(); + + { + InSequence s; + + // Process the first FDE. + EXPECT_CALL(handler, Entry(_, 0xa870ebdd, 0x60f6aa4, 2, "", 0xedca5849)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ValOffsetRule(0xa870ebdd, kCFARegister, 0x42ed390b, 0x98f43aad)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(0xa870ebdd, 0x3a860351, 0x6c9a6bcf)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + // Skip the second FDE. + EXPECT_CALL(handler, Entry(_, 0xc534f7c0, 0xf6552e9, 2, "", 0xedca5849)) + .WillOnce(Return(false)); + + // Process the third FDE. + EXPECT_CALL(handler, Entry(_, 0xf681cfc8, 0x7e4594e, 2, "", 0xedca5849)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ValOffsetRule(0xf681cfc8, kCFARegister, 0x42ed390b, 0x98f43aad)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, RegisterRule(0xf681cfc8, 0x26c53934, 0x18eeb8a4)) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + } + + ParseSection(§ion); +} + +// Quit processing in the middle of an entry's instructions. +TEST_F(LulDwarfCFIInsn, QuitMidentry) { + CFISection section(kLittleEndian, 8); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xe0cf850d) + .ULEB128(0x15aab431) + .D8(lul::DW_CFA_expression) + .ULEB128(0x46750aa5) + .Block("meat") + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xe0cf850d, 0x15aab431)) + .InSequence(s) + .WillOnce(Return(false)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseSection(§ion, false); +} + +class LulDwarfCFIRestore : public CFIInsnFixture, public Test {}; + +TEST_F(LulDwarfCFIRestore, RestoreUndefinedRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined) + .ULEB128(0x0bac878e) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x0bac878e)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreUndefinedRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_undefined) + .ULEB128(0x7dedff5f) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_same_value) + .ULEB128(0x7dedff5f) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, UndefinedRule(fde_start, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(fde_start + code_factor, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + 2 * code_factor, 0x7dedff5f)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreSameValueRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value) + .ULEB128(0xadbc9b3a) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0xadbc9b3a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreSameValueRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_same_value) + .ULEB128(0x3d90dcb5) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x3d90dcb5) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, SameValueRule(fde_start, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SameValueRule(fde_start + 2 * code_factor, 0x3d90dcb5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x14) + .ULEB128(0xb6f) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x14, kCFARegister, 0xb6f * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0xeb7) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x21) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x21, kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0x21)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + 2 * code_factor, 0x21, + kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreOffsetRuleChangedOffset) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0x134) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_offset | 0x21) + .ULEB128(0xf4f) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, + OffsetRule(fde_start, 0x21, kCFARegister, 0x134 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + code_factor, 0x21, kCFARegister, + 0xf4f * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, OffsetRule(fde_start + 2 * code_factor, 0x21, + kCFARegister, 0x134 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x829caee6) + .ULEB128(0xe4c) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x829caee6, kCFARegister, + 0xe4c * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0xf17c36d6) + .ULEB128(0xeb7) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xf17c36d6) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0xf17c36d6, kCFARegister, + 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xf17c36d6)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + 2 * code_factor, 0xf17c36d6, + kCFARegister, 0xeb7 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreValOffsetRuleChangedValOffset) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_offset) + .ULEB128(0x2cf0ab1b) + .ULEB128(0x562) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_val_offset) + .ULEB128(0x2cf0ab1b) + .ULEB128(0xe88) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValOffsetRule(fde_start, 0x2cf0ab1b, kCFARegister, + 0x562 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + code_factor, 0x2cf0ab1b, + kCFARegister, 0xe88 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(fde_start + 2 * code_factor, 0x2cf0ab1b, + kCFARegister, 0x562 * data_factor)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleUnchanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0x77514acc) + .ULEB128(0x464de4ce) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0x77514acc, 0x464de4ce)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleChanged) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xe39acce5) + .ULEB128(0x095f1559) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xe39acce5) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xe39acce5, 0x095f1559)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xe39acce5)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + 2 * code_factor, 0xe39acce5, 0x095f1559)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreRegisterRuleChangedRegister) { + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_register) + .ULEB128(0xd40e21b1) + .ULEB128(0x16607d6a) + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_register) + .ULEB128(0xd40e21b1) + .ULEB128(0xbabb4742) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, RegisterRule(fde_start, 0xd40e21b1, 0x16607d6a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + code_factor, 0xd40e21b1, 0xbabb4742)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + RegisterRule(fde_start + 2 * code_factor, 0xd40e21b1, 0x16607d6a)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleUnchanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0x666ae152) + .Block("dwarf") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0x666ae152, "dwarf")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleChanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0xb5ca5c46) + .Block("elf") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xb5ca5c46) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0xb5ca5c46, "elf")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xb5ca5c46)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ExpressionRule(fde_start + 2 * code_factor, 0xb5ca5c46, "elf")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreExpressionRuleChangedExpression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_expression) + .ULEB128(0x500f5739) + .Block("smurf") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_expression) + .ULEB128(0x500f5739) + .Block("orc") + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ExpressionRule(fde_start, 0x500f5739, "smurf")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, + ExpressionRule(fde_start + code_factor, 0x500f5739, "orc")) + .InSequence(s) + .WillOnce(Return(true)); + // Expectations are not wishes. + EXPECT_CALL(handler, + ExpressionRule(fde_start + 2 * code_factor, 0x500f5739, "smurf")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleUnchanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0x666ae152) + .Block("hideous") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0x666ae152, "hideous")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleChanged) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0xb5ca5c46) + .Block("revolting") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_undefined) + .ULEB128(0xb5ca5c46) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("RestoreValExpressionRuleChanged", section); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0xb5ca5c46, "revolting")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(fde_start + code_factor, 0xb5ca5c46)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValExpressionRule(fde_start + 2 * code_factor, + 0xb5ca5c46, "revolting")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +TEST_F(LulDwarfCFIRestore, RestoreValExpressionRuleChangedValExpression) { + ByteReader reader(ENDIANNESS_LITTLE); + CFISection section(kLittleEndian, 4); + StockCIEAndFDE(§ion); + section.D8(lul::DW_CFA_val_expression) + .ULEB128(0x500f5739) + .Block("repulsive") + .D8(lul::DW_CFA_remember_state) + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_val_expression) + .ULEB128(0x500f5739) + .Block("nauseous") + .D8(lul::DW_CFA_advance_loc | 1) + .D8(lul::DW_CFA_restore_state) + .FinishEntry(); + + PERHAPS_WRITE_DEBUG_FRAME_FILE("RestoreValExpressionRuleChangedValExpression", + section); + + EXPECT_CALL(handler, ValExpressionRule(fde_start, 0x500f5739, "repulsive")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValExpressionRule(fde_start + code_factor, 0x500f5739, + "nauseous")) + .InSequence(s) + .WillOnce(Return(true)); + // Expectations are not wishes. + EXPECT_CALL(handler, ValExpressionRule(fde_start + 2 * code_factor, + 0x500f5739, "repulsive")) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).WillOnce(Return(true)); + + ParseSection(§ion, true, &reader); +} + +struct EHFrameFixture : public CFIInsnFixture { + EHFrameFixture() : CFIInsnFixture(), section(kBigEndian, 4, true) { + encoded_pointer_bases.cfi = 0x7f496cb2; + encoded_pointer_bases.text = 0x540f67b6; + encoded_pointer_bases.data = 0xe3eab768; + section.SetEncodedPointerBases(encoded_pointer_bases); + } + CFISection section; + CFISection::EncodedPointerBases encoded_pointer_bases; + + // Parse CFIInsnFixture::ParseSection, but parse the section as + // .eh_frame data, supplying stock base addresses. + void ParseEHFrameSection(CFISection* section, bool succeeds = true) { + EXPECT_TRUE(section->ContainsEHFrame()); + string contents; + EXPECT_TRUE(section->GetContents(&contents)); + lul::Endianness endianness; + if (section->endianness() == kBigEndian) + endianness = ENDIANNESS_BIG; + else { + assert(section->endianness() == kLittleEndian); + endianness = ENDIANNESS_LITTLE; + } + ByteReader reader(endianness); + reader.SetAddressSize(section->AddressSize()); + reader.SetCFIDataBase(encoded_pointer_bases.cfi, contents.data()); + reader.SetTextBase(encoded_pointer_bases.text); + reader.SetDataBase(encoded_pointer_bases.data); + CallFrameInfo parser(contents.data(), contents.size(), &reader, &handler, + &reporter, true); + if (succeeds) + EXPECT_TRUE(parser.Start()); + else + EXPECT_FALSE(parser.Start()); + } +}; + +class LulDwarfEHFrame : public EHFrameFixture, public Test {}; + +// A simple CIE, an FDE, and a terminator. +TEST_F(LulDwarfEHFrame, Terminator) { + Label cie; + section.Mark(&cie) + .CIEHeader(9968, 2466, 67, 1, "") + .D8(lul::DW_CFA_def_cfa) + .ULEB128(3772) + .ULEB128(1372) + .FinishEntry() + .FDEHeader(cie, 0x848037a1, 0x7b30475e) + .D8(lul::DW_CFA_set_loc) + .D32(0x17713850) + .D8(lul::DW_CFA_undefined) + .ULEB128(5721) + .FinishEntry() + .D32(0) // Terminate the sequence. + // This FDE should be ignored. + .FDEHeader(cie, 0xf19629fe, 0x439fb09b) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.Terminator", section); + + EXPECT_CALL(handler, Entry(_, 0x848037a1, 0x7b30475e, 1, "", 67)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0x848037a1, kCFARegister, 3772, 1372)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0x17713850, 5721)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(reporter, EarlyEHTerminator(_)).InSequence(s).WillOnce(Return()); + + ParseEHFrameSection(§ion); +} + +// The parser should recognize the Linux Standards Base 'z' augmentations. +TEST_F(LulDwarfEHFrame, SimpleFDE) { + lul::DwarfPointerEncoding lsda_encoding = lul::DwarfPointerEncoding( + lul::DW_EH_PE_indirect | lul::DW_EH_PE_datarel | lul::DW_EH_PE_sdata2); + lul::DwarfPointerEncoding fde_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_textrel | lul::DW_EH_PE_udata2); + + section.SetPointerEncoding(fde_encoding); + section.SetEncodedPointerBases(encoded_pointer_bases); + Label cie; + section.Mark(&cie) + .CIEHeader(4873, 7012, 100, 1, "zSLPR") + .ULEB128(7) // Augmentation data length + .D8(lsda_encoding) // LSDA pointer format + .D8(lul::DW_EH_PE_pcrel) // personality pointer format + .EncodedPointer(0x97baa00, lul::DW_EH_PE_pcrel) // and value + .D8(fde_encoding) // FDE pointer format + .D8(lul::DW_CFA_def_cfa) + .ULEB128(6706) + .ULEB128(31) + .FinishEntry() + .FDEHeader(cie, 0x540f6b56, 0xf686) + .ULEB128(2) // Augmentation data length + .EncodedPointer(0xe3eab475, lsda_encoding) // LSDA pointer, signed + .D8(lul::DW_CFA_set_loc) + .EncodedPointer(0x540fa4ce, fde_encoding) + .D8(lul::DW_CFA_undefined) + .ULEB128(0x675e) + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.SimpleFDE", section); + + EXPECT_CALL(handler, Entry(_, 0x540f6b56, 0xf686, 1, "zSLPR", 100)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, PersonalityRoutine(0x97baa00, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, LanguageSpecificDataArea(0xe3eab475, true)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SignalHandler()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0x540f6b56, kCFARegister, 6706, 31)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0x540fa4ce, 0x675e)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// Check that we can handle an empty 'z' augmentation. +TEST_F(LulDwarfEHFrame, EmptyZ) { + Label cie; + section.Mark(&cie) + .CIEHeader(5955, 5805, 228, 1, "z") + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_def_cfa) + .ULEB128(3629) + .ULEB128(247) + .FinishEntry() + .FDEHeader(cie, 0xda007738, 0xfb55c641) + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_advance_loc1) + .D8(11) + .D8(lul::DW_CFA_undefined) + .ULEB128(3769) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.EmptyZ", section); + + EXPECT_CALL(handler, Entry(_, 0xda007738, 0xfb55c641, 1, "z", 228)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, ValOffsetRule(0xda007738, kCFARegister, 3629, 247)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, UndefinedRule(0xda007738 + 11 * 5955, 3769)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// Check that we recognize bad 'z' augmentation characters. +TEST_F(LulDwarfEHFrame, BadZ) { + Label cie; + section.Mark(&cie) + .CIEHeader(6937, 1045, 142, 1, "zQ") + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_def_cfa) + .ULEB128(9006) + .ULEB128(7725) + .FinishEntry() + .FDEHeader(cie, 0x1293efa8, 0x236f53f2) + .ULEB128(0) // Augmentation data length + .D8(lul::DW_CFA_advance_loc | 12) + .D8(lul::DW_CFA_register) + .ULEB128(5667) + .ULEB128(3462) + .FinishEntry(); + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.BadZ", section); + + EXPECT_CALL(reporter, UnrecognizedAugmentation(_, "zQ")).WillOnce(Return()); + + ParseEHFrameSection(§ion, false); +} + +TEST_F(LulDwarfEHFrame, zL) { + Label cie; + lul::DwarfPointerEncoding lsda_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_funcrel | lul::DW_EH_PE_udata2); + section.Mark(&cie) + .CIEHeader(9285, 9959, 54, 1, "zL") + .ULEB128(1) // Augmentation data length + .D8(lsda_encoding) // encoding for LSDA pointer in FDE + + .FinishEntry() + .FDEHeader(cie, 0xd40091aa, 0x9aa6e746) + .ULEB128(2) // Augmentation data length + .EncodedPointer(0xd40099cd, lsda_encoding) // LSDA pointer + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zL", section); + + EXPECT_CALL(handler, Entry(_, 0xd40091aa, 0x9aa6e746, 1, "zL", 54)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, LanguageSpecificDataArea(0xd40099cd, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zP) { + Label cie; + lul::DwarfPointerEncoding personality_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_datarel | lul::DW_EH_PE_udata2); + section.Mark(&cie) + .CIEHeader(1097, 6313, 17, 1, "zP") + .ULEB128(3) // Augmentation data length + .D8(personality_encoding) // encoding for personality routine + .EncodedPointer(0xe3eaccac, personality_encoding) // value + .FinishEntry() + .FDEHeader(cie, 0x0c8350c9, 0xbef11087) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zP", section); + + EXPECT_CALL(handler, Entry(_, 0x0c8350c9, 0xbef11087, 1, "zP", 17)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, PersonalityRoutine(0xe3eaccac, false)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zR) { + Label cie; + lul::DwarfPointerEncoding pointer_encoding = + lul::DwarfPointerEncoding(lul::DW_EH_PE_textrel | lul::DW_EH_PE_sdata2); + section.SetPointerEncoding(pointer_encoding); + section.Mark(&cie) + .CIEHeader(8011, 5496, 75, 1, "zR") + .ULEB128(1) // Augmentation data length + .D8(pointer_encoding) // encoding for FDE addresses + .FinishEntry() + .FDEHeader(cie, 0x540f9431, 0xbd0) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zR", section); + + EXPECT_CALL(handler, Entry(_, 0x540f9431, 0xbd0, 1, "zR", 75)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +TEST_F(LulDwarfEHFrame, zS) { + Label cie; + section.Mark(&cie) + .CIEHeader(9217, 7694, 57, 1, "zS") + .ULEB128(0) // Augmentation data length + .FinishEntry() + .FDEHeader(cie, 0xd40091aa, 0x9aa6e746) + .ULEB128(0) // Augmentation data length + .FinishEntry() + .D32(0); // terminator + + PERHAPS_WRITE_EH_FRAME_FILE("EHFrame.zS", section); + + EXPECT_CALL(handler, Entry(_, 0xd40091aa, 0x9aa6e746, 1, "zS", 57)) + .InSequence(s) + .WillOnce(Return(true)); + EXPECT_CALL(handler, SignalHandler()).InSequence(s).WillOnce(Return(true)); + EXPECT_CALL(handler, End()).InSequence(s).WillOnce(Return(true)); + + ParseEHFrameSection(§ion); +} + +// These tests require manual inspection of the test output. +struct CFIReporterFixture { + CFIReporterFixture() + : reporter(gtest_logging_sink_for_LulTestDwarf, "test file name", + "test section name") {} + CallFrameInfo::Reporter reporter; +}; + +class LulDwarfCFIReporter : public CFIReporterFixture, public Test {}; + +TEST_F(LulDwarfCFIReporter, Incomplete) { + reporter.Incomplete(0x0102030405060708ULL, CallFrameInfo::kUnknown); +} + +TEST_F(LulDwarfCFIReporter, EarlyEHTerminator) { + reporter.EarlyEHTerminator(0x0102030405060708ULL); +} + +TEST_F(LulDwarfCFIReporter, CIEPointerOutOfRange) { + reporter.CIEPointerOutOfRange(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, BadCIEId) { + reporter.BadCIEId(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, UnrecognizedVersion) { + reporter.UnrecognizedVersion(0x0123456789abcdefULL, 43); +} + +TEST_F(LulDwarfCFIReporter, UnrecognizedAugmentation) { + reporter.UnrecognizedAugmentation(0x0123456789abcdefULL, "poodles"); +} + +TEST_F(LulDwarfCFIReporter, InvalidPointerEncoding) { + reporter.InvalidPointerEncoding(0x0123456789abcdefULL, 0x42); +} + +TEST_F(LulDwarfCFIReporter, UnusablePointerEncoding) { + reporter.UnusablePointerEncoding(0x0123456789abcdefULL, 0x42); +} + +TEST_F(LulDwarfCFIReporter, RestoreInCIE) { + reporter.RestoreInCIE(0x0123456789abcdefULL, 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, BadInstruction) { + reporter.BadInstruction(0x0123456789abcdefULL, CallFrameInfo::kFDE, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, NoCFARule) { + reporter.NoCFARule(0x0123456789abcdefULL, CallFrameInfo::kCIE, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, EmptyStateStack) { + reporter.EmptyStateStack(0x0123456789abcdefULL, CallFrameInfo::kTerminator, + 0xfedcba9876543210ULL); +} + +TEST_F(LulDwarfCFIReporter, ClearingCFARule) { + reporter.ClearingCFARule(0x0123456789abcdefULL, CallFrameInfo::kFDE, + 0xfedcba9876543210ULL); +} +class LulDwarfExpr : public Test {}; + +class MockSummariser : public Summariser { + public: + MockSummariser() : Summariser(nullptr, 0, nullptr) {} + MOCK_METHOD2(Entry, void(uintptr_t, uintptr_t)); + MOCK_METHOD0(End, void()); + MOCK_METHOD5(Rule, void(uintptr_t, int, LExprHow, int16_t, int64_t)); + MOCK_METHOD1(AddPfxInstr, uint32_t(PfxInstr)); +}; + +TEST_F(LulDwarfExpr, SimpleTransliteration) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_lit0) + .D8(DW_OP_lit31) + .D8(DW_OP_breg0 + 17) + .LEB128(-1234) + .D8(DW_OP_const4s) + .D32(0xFEDC9876) + .D8(DW_OP_deref) + .D8(DW_OP_and) + .D8(DW_OP_plus) + .D8(DW_OP_minus) + .D8(DW_OP_shl) + .D8(DW_OP_ge); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + // DW_OP_lit0 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 0))); + // DW_OP_lit31 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 31))); + // DW_OP_breg17 -1234 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_DwReg, 17))); + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, -1234))); + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Add))); + // DW_OP_const4s 0xFEDC9876 + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_SImm32, 0xFEDC9876))); + // DW_OP_deref + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Deref))); + // DW_OP_and + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_And))); + // DW_OP_plus + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Add))); + // DW_OP_minus + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Sub))); + // DW_OP_shl + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Shl))); + // DW_OP_ge + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_CmpGES))); + // required end marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_End))); + } + + int32_t ix = parseDwarfExpr(&summ, &reader, expr, false, false, false); + EXPECT_TRUE(ix >= 0); +} + +TEST_F(LulDwarfExpr, UnknownOpcode) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_lo_user - 1); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + } + + int32_t ix = parseDwarfExpr(&summ, &reader, expr, false, false, false); + EXPECT_TRUE(ix == -1); +} + +TEST_F(LulDwarfExpr, ExpressionOverrun) { + MockSummariser summ; + ByteReader reader(ENDIANNESS_LITTLE); + + CFISection section(kLittleEndian, 8); + section.D8(DW_OP_const4s).D8(0x12).D8(0x34).D8(0x56); + string expr; + bool ok = section.GetContents(&expr); + EXPECT_TRUE(ok); + + { + InSequence s; + // required start marker + EXPECT_CALL(summ, AddPfxInstr(PfxInstr(PX_Start, 0))); + // DW_OP_const4s followed by 3 (a.k.a. not enough) bytes + // We expect PfxInstr(PX_Simm32, not-known-for-sure-32-bit-immediate) + // Hence must use _ as the argument. + EXPECT_CALL(summ, AddPfxInstr(_)); + } + + int32_t ix = parseDwarfExpr(&summ, &reader, expr, false, false, false); + EXPECT_TRUE(ix == -1); +} + +// We'll need to mention specific Dwarf registers in the EvaluatePfxExpr tests, +// and those names are arch-specific, so a bit of macro magic is helpful. +#if defined(GP_ARCH_arm) +# define TESTED_REG_STRUCT_NAME r11 +# define TESTED_REG_DWARF_NAME DW_REG_ARM_R11 +#elif defined(GP_ARCH_arm64) +# define TESTED_REG_STRUCT_NAME x29 +# define TESTED_REG_DWARF_NAME DW_REG_AARCH64_X29 +#elif defined(GP_ARCH_amd64) || defined(GP_ARCH_x86) +# define TESTED_REG_STRUCT_NAME xbp +# define TESTED_REG_DWARF_NAME DW_REG_INTEL_XBP +#else +# error "Unknown plat" +#endif + +struct EvaluatePfxExprFixture { + // Creates: + // initial stack, AVMA 0x12345678, at offset 4 bytes = 0xdeadbeef + // initial regs, with XBP = 0x14141356 + // initial CFA = 0x5432ABCD + EvaluatePfxExprFixture() { + // The test stack. + si.mStartAvma = 0x12345678; + si.mLen = 0; +#define XX(_byte) \ + do { \ + si.mContents[si.mLen++] = (_byte); \ + } while (0) + XX(0x55); + XX(0x55); + XX(0x55); + XX(0x55); + if (sizeof(void*) == 8) { + // le64 + XX(0xEF); + XX(0xBE); + XX(0xAD); + XX(0xDE); + XX(0); + XX(0); + XX(0); + XX(0); + } else { + // le32 + XX(0xEF); + XX(0xBE); + XX(0xAD); + XX(0xDE); + } + XX(0xAA); + XX(0xAA); + XX(0xAA); + XX(0xAA); +#undef XX + // The initial CFA. + initialCFA = TaggedUWord(0x5432ABCD); + // The initial register state. + memset(®s, 0, sizeof(regs)); + regs.TESTED_REG_STRUCT_NAME = TaggedUWord(0x14141356); + } + + StackImage si; + TaggedUWord initialCFA; + UnwindRegs regs; +}; + +class LulDwarfEvaluatePfxExpr : public EvaluatePfxExprFixture, public Test {}; + +TEST_F(LulDwarfEvaluatePfxExpr, NormalEvaluation) { + vector<PfxInstr> instrs; + // Put some junk at the start of the insn sequence. + instrs.push_back(PfxInstr(PX_End)); + instrs.push_back(PfxInstr(PX_End)); + + // Now the real sequence + // stack is empty + instrs.push_back(PfxInstr(PX_Start, 1)); + // 0x5432ABCD + instrs.push_back(PfxInstr(PX_SImm32, 0x31415927)); + // 0x5432ABCD 0x31415927 + instrs.push_back(PfxInstr(PX_DwReg, TESTED_REG_DWARF_NAME)); + // 0x5432ABCD 0x31415927 0x14141356 + instrs.push_back(PfxInstr(PX_SImm32, 42)); + // 0x5432ABCD 0x31415927 0x14141356 42 + instrs.push_back(PfxInstr(PX_Sub)); + // 0x5432ABCD 0x31415927 0x1414132c + instrs.push_back(PfxInstr(PX_Add)); + // 0x5432ABCD 0x45556c53 + instrs.push_back(PfxInstr(PX_SImm32, si.mStartAvma + 4)); + // 0x5432ABCD 0x45556c53 0x1234567c + instrs.push_back(PfxInstr(PX_Deref)); + // 0x5432ABCD 0x45556c53 0xdeadbeef + instrs.push_back(PfxInstr(PX_SImm32, 0xFE01DC23)); + // 0x5432ABCD 0x45556c53 0xdeadbeef 0xFE01DC23 + instrs.push_back(PfxInstr(PX_And)); + // 0x5432ABCD 0x45556c53 0xde019c23 + instrs.push_back(PfxInstr(PX_SImm32, 7)); + // 0x5432ABCD 0x45556c53 0xde019c23 7 + instrs.push_back(PfxInstr(PX_Shl)); + // 0x5432ABCD 0x45556c53 0x6f00ce1180 + instrs.push_back(PfxInstr(PX_SImm32, 0x7fffffff)); + // 0x5432ABCD 0x45556c53 0x6f00ce1180 7fffffff + instrs.push_back(PfxInstr(PX_And)); + // 0x5432ABCD 0x45556c53 0x00ce1180 + instrs.push_back(PfxInstr(PX_Add)); + // 0x5432ABCD 0x46237dd3 + instrs.push_back(PfxInstr(PX_Sub)); + // 0xe0f2dfa + + instrs.push_back(PfxInstr(PX_End)); + + TaggedUWord res = EvaluatePfxExpr(2 /*offset of start insn*/, ®s, + initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res.Value() == 0xe0f2dfa); +} + +TEST_F(LulDwarfEvaluatePfxExpr, EmptySequence) { + vector<PfxInstr> instrs; + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, BogusStartPoint) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_SImm32, 42)); + instrs.push_back(PfxInstr(PX_SImm32, 24)); + instrs.push_back(PfxInstr(PX_SImm32, 4224)); + TaggedUWord res = EvaluatePfxExpr(1, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, MissingEndMarker) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_SImm32, 24)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackUnderflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackNoUnderflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 1 /*push the initial CFA*/)); + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == initialCFA); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackOverflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + for (int i = 0; i < 10 + 1; i++) { + instrs.push_back(PfxInstr(PX_SImm32, i + 100)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_FALSE(res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, StackNoOverflow) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + for (int i = 0; i < 10 + 0; i++) { + instrs.push_back(PfxInstr(PX_SImm32, i + 100)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == TaggedUWord(109)); +} + +TEST_F(LulDwarfEvaluatePfxExpr, OutOfRangeShl) { + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + instrs.push_back(PfxInstr(PX_SImm32, 1234)); + instrs.push_back(PfxInstr(PX_SImm32, 5678)); + instrs.push_back(PfxInstr(PX_Shl)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(!res.Valid()); +} + +TEST_F(LulDwarfEvaluatePfxExpr, TestCmpGES) { + const int32_t argsL[6] = {0, 0, 1, -2, -1, -2}; + const int32_t argsR[6] = {0, 1, 0, -2, -2, -1}; + // expecting: t f t t t f = 101110 = 0x2E + vector<PfxInstr> instrs; + instrs.push_back(PfxInstr(PX_Start, 0)); + // The "running total" + instrs.push_back(PfxInstr(PX_SImm32, 0)); + for (unsigned int i = 0; i < sizeof(argsL) / sizeof(argsL[0]); i++) { + // Shift the "running total" at the bottom of the stack left by one bit + instrs.push_back(PfxInstr(PX_SImm32, 1)); + instrs.push_back(PfxInstr(PX_Shl)); + // Push both test args and do the comparison + instrs.push_back(PfxInstr(PX_SImm32, argsL[i])); + instrs.push_back(PfxInstr(PX_SImm32, argsR[i])); + instrs.push_back(PfxInstr(PX_CmpGES)); + // Or the result into the running total + instrs.push_back(PfxInstr(PX_Or)); + } + instrs.push_back(PfxInstr(PX_End)); + TaggedUWord res = EvaluatePfxExpr(0, ®s, initialCFA, &si, instrs); + EXPECT_TRUE(res.Valid()); + EXPECT_TRUE(res == TaggedUWord(0x2E)); +} + +} // namespace lul diff --git a/tools/profiler/tests/gtest/LulTestInfrastructure.cpp b/tools/profiler/tests/gtest/LulTestInfrastructure.cpp new file mode 100644 index 0000000000..43154b06ad --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestInfrastructure.cpp @@ -0,0 +1,494 @@ +// Copyright (c) 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com> + +// Derived from: +// test_assembler.cc: Implementation of google_breakpad::TestAssembler. +// See test_assembler.h for details. + +// Derived from: +// cfi_assembler.cc: Implementation of google_breakpad::CFISection class. +// See cfi_assembler.h for details. + +#include "LulTestInfrastructure.h" + +namespace lul_test { +namespace test_assembler { + +using std::back_insert_iterator; + +Label::Label() : value_(new Binding()) {} +Label::Label(uint64_t value) : value_(new Binding(value)) {} +Label::Label(const Label& label) { + value_ = label.value_; + value_->Acquire(); +} +Label::~Label() { + if (value_->Release()) delete value_; +} + +Label& Label::operator=(uint64_t value) { + value_->Set(NULL, value); + return *this; +} + +Label& Label::operator=(const Label& label) { + value_->Set(label.value_, 0); + return *this; +} + +Label Label::operator+(uint64_t addend) const { + Label l; + l.value_->Set(this->value_, addend); + return l; +} + +Label Label::operator-(uint64_t subtrahend) const { + Label l; + l.value_->Set(this->value_, -subtrahend); + return l; +} + +// When NDEBUG is #defined, assert doesn't evaluate its argument. This +// means you can't simply use assert to check the return value of a +// function with necessary side effects. +// +// ALWAYS_EVALUATE_AND_ASSERT(x) evaluates x regardless of whether +// NDEBUG is #defined; when NDEBUG is not #defined, it further asserts +// that x is true. +#ifdef NDEBUG +# define ALWAYS_EVALUATE_AND_ASSERT(x) x +#else +# define ALWAYS_EVALUATE_AND_ASSERT(x) assert(x) +#endif + +uint64_t Label::operator-(const Label& label) const { + uint64_t offset; + ALWAYS_EVALUATE_AND_ASSERT(IsKnownOffsetFrom(label, &offset)); + return offset; +} + +bool Label::IsKnownConstant(uint64_t* value_p) const { + Binding* base; + uint64_t addend; + value_->Get(&base, &addend); + if (base != NULL) return false; + if (value_p) *value_p = addend; + return true; +} + +bool Label::IsKnownOffsetFrom(const Label& label, uint64_t* offset_p) const { + Binding *label_base, *this_base; + uint64_t label_addend, this_addend; + label.value_->Get(&label_base, &label_addend); + value_->Get(&this_base, &this_addend); + // If this and label are related, Get will find their final + // common ancestor, regardless of how indirect the relation is. This + // comparison also handles the constant vs. constant case. + if (this_base != label_base) return false; + if (offset_p) *offset_p = this_addend - label_addend; + return true; +} + +Label::Binding::Binding() : base_(this), addend_(), reference_count_(1) {} + +Label::Binding::Binding(uint64_t addend) + : base_(NULL), addend_(addend), reference_count_(1) {} + +Label::Binding::~Binding() { + assert(reference_count_ == 0); + if (base_ && base_ != this && base_->Release()) delete base_; +} + +void Label::Binding::Set(Binding* binding, uint64_t addend) { + if (!base_ && !binding) { + // We're equating two constants. This could be okay. + assert(addend_ == addend); + } else if (!base_) { + // We are a known constant, but BINDING may not be, so turn the + // tables and try to set BINDING's value instead. + binding->Set(NULL, addend_ - addend); + } else { + if (binding) { + // Find binding's final value. Since the final value is always either + // completely unconstrained or a constant, never a reference to + // another variable (otherwise, it wouldn't be final), this + // guarantees we won't create cycles here, even for code like this: + // l = m, m = n, n = l; + uint64_t binding_addend; + binding->Get(&binding, &binding_addend); + addend += binding_addend; + } + + // It seems likely that setting a binding to itself is a bug + // (although I can imagine this might turn out to be helpful to + // permit). + assert(binding != this); + + if (base_ != this) { + // Set the other bindings on our chain as well. Note that this + // is sufficient even though binding relationships form trees: + // All binding operations traverse their chains to the end, and + // all bindings related to us share some tail of our chain, so + // they will see the changes we make here. + base_->Set(binding, addend - addend_); + // We're not going to use base_ any more. + if (base_->Release()) delete base_; + } + + // Adopt BINDING as our base. Note that it should be correct to + // acquire here, after the release above, even though the usual + // reference-counting rules call for acquiring first, and then + // releasing: the self-reference assertion above should have + // complained if BINDING were 'this' or anywhere along our chain, + // so we didn't release BINDING. + if (binding) binding->Acquire(); + base_ = binding; + addend_ = addend; + } +} + +void Label::Binding::Get(Binding** base, uint64_t* addend) { + if (base_ && base_ != this) { + // Recurse to find the end of our reference chain (the root of our + // tree), and then rewrite every binding along the chain to refer + // to it directly, adjusting addends appropriately. (This is why + // this member function isn't this-const.) + Binding* final_base; + uint64_t final_addend; + base_->Get(&final_base, &final_addend); + if (final_base) final_base->Acquire(); + if (base_->Release()) delete base_; + base_ = final_base; + addend_ += final_addend; + } + *base = base_; + *addend = addend_; +} + +template <typename Inserter> +static inline void InsertEndian(test_assembler::Endianness endianness, + size_t size, uint64_t number, Inserter dest) { + assert(size > 0); + if (endianness == kLittleEndian) { + for (size_t i = 0; i < size; i++) { + *dest++ = (char)(number & 0xff); + number >>= 8; + } + } else { + assert(endianness == kBigEndian); + // The loop condition is odd, but it's correct for size_t. + for (size_t i = size - 1; i < size; i--) + *dest++ = (char)((number >> (i * 8)) & 0xff); + } +} + +Section& Section::Append(Endianness endianness, size_t size, uint64_t number) { + InsertEndian(endianness, size, number, + back_insert_iterator<string>(contents_)); + return *this; +} + +Section& Section::Append(Endianness endianness, size_t size, + const Label& label) { + // If this label's value is known, there's no reason to waste an + // entry in references_ on it. + uint64_t value; + if (label.IsKnownConstant(&value)) return Append(endianness, size, value); + + // This will get caught when the references are resolved, but it's + // nicer to find out earlier. + assert(endianness != kUnsetEndian); + + references_.push_back(Reference(contents_.size(), endianness, size, label)); + contents_.append(size, 0); + return *this; +} + +#define ENDIANNESS_L kLittleEndian +#define ENDIANNESS_B kBigEndian +#define ENDIANNESS(e) ENDIANNESS_##e + +#define DEFINE_SHORT_APPEND_NUMBER_ENDIAN(e, bits) \ + Section& Section::e##bits(uint##bits##_t v) { \ + InsertEndian(ENDIANNESS(e), bits / 8, v, \ + back_insert_iterator<string>(contents_)); \ + return *this; \ + } + +#define DEFINE_SHORT_APPEND_LABEL_ENDIAN(e, bits) \ + Section& Section::e##bits(const Label& v) { \ + return Append(ENDIANNESS(e), bits / 8, v); \ + } + +// Define L16, B32, and friends. +#define DEFINE_SHORT_APPEND_ENDIAN(e, bits) \ + DEFINE_SHORT_APPEND_NUMBER_ENDIAN(e, bits) \ + DEFINE_SHORT_APPEND_LABEL_ENDIAN(e, bits) + +DEFINE_SHORT_APPEND_LABEL_ENDIAN(L, 8); +DEFINE_SHORT_APPEND_LABEL_ENDIAN(B, 8); +DEFINE_SHORT_APPEND_ENDIAN(L, 16); +DEFINE_SHORT_APPEND_ENDIAN(L, 32); +DEFINE_SHORT_APPEND_ENDIAN(L, 64); +DEFINE_SHORT_APPEND_ENDIAN(B, 16); +DEFINE_SHORT_APPEND_ENDIAN(B, 32); +DEFINE_SHORT_APPEND_ENDIAN(B, 64); + +#define DEFINE_SHORT_APPEND_NUMBER_DEFAULT(bits) \ + Section& Section::D##bits(uint##bits##_t v) { \ + InsertEndian(endianness_, bits / 8, v, \ + back_insert_iterator<string>(contents_)); \ + return *this; \ + } +#define DEFINE_SHORT_APPEND_LABEL_DEFAULT(bits) \ + Section& Section::D##bits(const Label& v) { \ + return Append(endianness_, bits / 8, v); \ + } +#define DEFINE_SHORT_APPEND_DEFAULT(bits) \ + DEFINE_SHORT_APPEND_NUMBER_DEFAULT(bits) \ + DEFINE_SHORT_APPEND_LABEL_DEFAULT(bits) + +DEFINE_SHORT_APPEND_LABEL_DEFAULT(8) +DEFINE_SHORT_APPEND_DEFAULT(16); +DEFINE_SHORT_APPEND_DEFAULT(32); +DEFINE_SHORT_APPEND_DEFAULT(64); + +Section& Section::LEB128(long long value) { + while (value < -0x40 || 0x3f < value) { + contents_ += (value & 0x7f) | 0x80; + if (value < 0) + value = (value >> 7) | ~(((unsigned long long)-1) >> 7); + else + value = (value >> 7); + } + contents_ += value & 0x7f; + return *this; +} + +Section& Section::ULEB128(uint64_t value) { + while (value > 0x7f) { + contents_ += (value & 0x7f) | 0x80; + value = (value >> 7); + } + contents_ += value; + return *this; +} + +Section& Section::Align(size_t alignment, uint8_t pad_byte) { + // ALIGNMENT must be a power of two. + assert(((alignment - 1) & alignment) == 0); + size_t new_size = (contents_.size() + alignment - 1) & ~(alignment - 1); + contents_.append(new_size - contents_.size(), pad_byte); + assert((contents_.size() & (alignment - 1)) == 0); + return *this; +} + +bool Section::GetContents(string* contents) { + // For each label reference, find the label's value, and patch it into + // the section's contents. + for (size_t i = 0; i < references_.size(); i++) { + Reference& r = references_[i]; + uint64_t value; + if (!r.label.IsKnownConstant(&value)) { + fprintf(stderr, "Undefined label #%zu at offset 0x%zx\n", i, r.offset); + return false; + } + assert(r.offset < contents_.size()); + assert(contents_.size() - r.offset >= r.size); + InsertEndian(r.endianness, r.size, value, contents_.begin() + r.offset); + } + contents->clear(); + std::swap(contents_, *contents); + references_.clear(); + return true; +} + +} // namespace test_assembler +} // namespace lul_test + +namespace lul_test { + +CFISection& CFISection::CIEHeader(uint64_t code_alignment_factor, + int data_alignment_factor, + unsigned return_address_register, + uint8_t version, const string& augmentation, + bool dwarf64) { + assert(!entry_length_); + entry_length_ = new PendingLength(); + in_fde_ = false; + + if (dwarf64) { + D32(kDwarf64InitialLengthMarker); + D64(entry_length_->length); + entry_length_->start = Here(); + D64(eh_frame_ ? kEHFrame64CIEIdentifier : kDwarf64CIEIdentifier); + } else { + D32(entry_length_->length); + entry_length_->start = Here(); + D32(eh_frame_ ? kEHFrame32CIEIdentifier : kDwarf32CIEIdentifier); + } + D8(version); + AppendCString(augmentation); + ULEB128(code_alignment_factor); + LEB128(data_alignment_factor); + if (version == 1) + D8(return_address_register); + else + ULEB128(return_address_register); + return *this; +} + +CFISection& CFISection::FDEHeader(Label cie_pointer, uint64_t initial_location, + uint64_t address_range, bool dwarf64) { + assert(!entry_length_); + entry_length_ = new PendingLength(); + in_fde_ = true; + fde_start_address_ = initial_location; + + if (dwarf64) { + D32(0xffffffff); + D64(entry_length_->length); + entry_length_->start = Here(); + if (eh_frame_) + D64(Here() - cie_pointer); + else + D64(cie_pointer); + } else { + D32(entry_length_->length); + entry_length_->start = Here(); + if (eh_frame_) + D32(Here() - cie_pointer); + else + D32(cie_pointer); + } + EncodedPointer(initial_location); + // The FDE length in an .eh_frame section uses the same encoding as the + // initial location, but ignores the base address (selected by the upper + // nybble of the encoding), as it's a length, not an address that can be + // made relative. + EncodedPointer(address_range, DwarfPointerEncoding(pointer_encoding_ & 0x0f)); + return *this; +} + +CFISection& CFISection::FinishEntry() { + assert(entry_length_); + Align(address_size_, lul::DW_CFA_nop); + entry_length_->length = Here() - entry_length_->start; + delete entry_length_; + entry_length_ = NULL; + in_fde_ = false; + return *this; +} + +CFISection& CFISection::EncodedPointer(uint64_t address, + DwarfPointerEncoding encoding, + const EncodedPointerBases& bases) { + // Omitted data is extremely easy to emit. + if (encoding == lul::DW_EH_PE_omit) return *this; + + // If (encoding & lul::DW_EH_PE_indirect) != 0, then we assume + // that ADDRESS is the address at which the pointer is stored --- in + // other words, that bit has no effect on how we write the pointer. + encoding = DwarfPointerEncoding(encoding & ~lul::DW_EH_PE_indirect); + + // Find the base address to which this pointer is relative. The upper + // nybble of the encoding specifies this. + uint64_t base; + switch (encoding & 0xf0) { + case lul::DW_EH_PE_absptr: + base = 0; + break; + case lul::DW_EH_PE_pcrel: + base = bases.cfi + Size(); + break; + case lul::DW_EH_PE_textrel: + base = bases.text; + break; + case lul::DW_EH_PE_datarel: + base = bases.data; + break; + case lul::DW_EH_PE_funcrel: + base = fde_start_address_; + break; + case lul::DW_EH_PE_aligned: + base = 0; + break; + default: + abort(); + }; + + // Make ADDRESS relative. Yes, this is appropriate even for "absptr" + // values; see gcc/unwind-pe.h. + address -= base; + + // Align the pointer, if required. + if ((encoding & 0xf0) == lul::DW_EH_PE_aligned) Align(AddressSize()); + + // Append ADDRESS to this section in the appropriate form. For the + // fixed-width forms, we don't need to differentiate between signed and + // unsigned encodings, because ADDRESS has already been extended to 64 + // bits before it was passed to us. + switch (encoding & 0x0f) { + case lul::DW_EH_PE_absptr: + Address(address); + break; + + case lul::DW_EH_PE_uleb128: + ULEB128(address); + break; + + case lul::DW_EH_PE_sleb128: + LEB128(address); + break; + + case lul::DW_EH_PE_udata2: + case lul::DW_EH_PE_sdata2: + D16(address); + break; + + case lul::DW_EH_PE_udata4: + case lul::DW_EH_PE_sdata4: + D32(address); + break; + + case lul::DW_EH_PE_udata8: + case lul::DW_EH_PE_sdata8: + D64(address); + break; + + default: + abort(); + } + + return *this; +}; + +} // namespace lul_test diff --git a/tools/profiler/tests/gtest/LulTestInfrastructure.h b/tools/profiler/tests/gtest/LulTestInfrastructure.h new file mode 100644 index 0000000000..772579d538 --- /dev/null +++ b/tools/profiler/tests/gtest/LulTestInfrastructure.h @@ -0,0 +1,706 @@ +// -*- mode: C++ -*- + +// Copyright (c) 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Original author: Jim Blandy <jimb@mozilla.com> <jimb@red-bean.com> + +// Derived from: +// cfi_assembler.h: Define CFISection, a class for creating properly +// (and improperly) formatted DWARF CFI data for unit tests. + +// Derived from: +// test-assembler.h: interface to class for building complex binary streams. + +// To test the Breakpad symbol dumper and processor thoroughly, for +// all combinations of host system and minidump processor +// architecture, we need to be able to easily generate complex test +// data like debugging information and minidump files. +// +// For example, if we want our unit tests to provide full code +// coverage for stack walking, it may be difficult to persuade the +// compiler to generate every possible sort of stack walking +// information that we want to support; there are probably DWARF CFI +// opcodes that GCC never emits. Similarly, if we want to test our +// error handling, we will need to generate damaged minidumps or +// debugging information that (we hope) the client or compiler will +// never produce on its own. +// +// google_breakpad::TestAssembler provides a predictable and +// (relatively) simple way to generate complex formatted data streams +// like minidumps and CFI. Furthermore, because TestAssembler is +// portable, developers without access to (say) Visual Studio or a +// SPARC assembler can still work on test data for those targets. + +#ifndef LUL_TEST_INFRASTRUCTURE_H +#define LUL_TEST_INFRASTRUCTURE_H + +#include <string> +#include <vector> + +using std::string; +using std::vector; + +namespace lul_test { +namespace test_assembler { + +// A Label represents a value not yet known that we need to store in a +// section. As long as all the labels a section refers to are defined +// by the time we retrieve its contents as bytes, we can use undefined +// labels freely in that section's construction. +// +// A label can be in one of three states: +// - undefined, +// - defined as the sum of some other label and a constant, or +// - a constant. +// +// A label's value never changes, but it can accumulate constraints. +// Adding labels and integers is permitted, and yields a label. +// Subtracting a constant from a label is permitted, and also yields a +// label. Subtracting two labels that have some relationship to each +// other is permitted, and yields a constant. +// +// For example: +// +// Label a; // a's value is undefined +// Label b; // b's value is undefined +// { +// Label c = a + 4; // okay, even though a's value is unknown +// b = c + 4; // also okay; b is now a+8 +// } +// Label d = b - 2; // okay; d == a+6, even though c is gone +// d.Value(); // error: d's value is not yet known +// d - a; // is 6, even though their values are not known +// a = 12; // now b == 20, and d == 18 +// d.Value(); // 18: no longer an error +// b.Value(); // 20 +// d = 10; // error: d is already defined. +// +// Label objects' lifetimes are unconstrained: notice that, in the +// above example, even though a and b are only related through c, and +// c goes out of scope, the assignment to a sets b's value as well. In +// particular, it's not necessary to ensure that a Label lives beyond +// Sections that refer to it. +class Label { + public: + Label(); // An undefined label. + explicit Label(uint64_t value); // A label with a fixed value + Label(const Label& value); // A label equal to another. + ~Label(); + + Label& operator=(uint64_t value); + Label& operator=(const Label& value); + Label operator+(uint64_t addend) const; + Label operator-(uint64_t subtrahend) const; + uint64_t operator-(const Label& subtrahend) const; + + // We could also provide == and != that work on undefined, but + // related, labels. + + // Return true if this label's value is known. If VALUE_P is given, + // set *VALUE_P to the known value if returning true. + bool IsKnownConstant(uint64_t* value_p = NULL) const; + + // Return true if the offset from LABEL to this label is known. If + // OFFSET_P is given, set *OFFSET_P to the offset when returning true. + // + // You can think of l.KnownOffsetFrom(m, &d) as being like 'd = l-m', + // except that it also returns a value indicating whether the + // subtraction is possible given what we currently know of l and m. + // It can be possible even if we don't know l and m's values. For + // example: + // + // Label l, m; + // m = l + 10; + // l.IsKnownConstant(); // false + // m.IsKnownConstant(); // false + // uint64_t d; + // l.IsKnownOffsetFrom(m, &d); // true, and sets d to -10. + // l-m // -10 + // m-l // 10 + // m.Value() // error: m's value is not known + bool IsKnownOffsetFrom(const Label& label, uint64_t* offset_p = NULL) const; + + private: + // A label's value, or if that is not yet known, how the value is + // related to other labels' values. A binding may be: + // - a known constant, + // - constrained to be equal to some other binding plus a constant, or + // - unconstrained, and free to take on any value. + // + // Many labels may point to a single binding, and each binding may + // refer to another, so bindings and labels form trees whose leaves + // are labels, whose interior nodes (and roots) are bindings, and + // where links point from children to parents. Bindings are + // reference counted, allowing labels to be lightweight, copyable, + // assignable, placed in containers, and so on. + class Binding { + public: + Binding(); + explicit Binding(uint64_t addend); + ~Binding(); + + // Increment our reference count. + void Acquire() { reference_count_++; }; + // Decrement our reference count, and return true if it is zero. + bool Release() { return --reference_count_ == 0; } + + // Set this binding to be equal to BINDING + ADDEND. If BINDING is + // NULL, then set this binding to the known constant ADDEND. + // Update every binding on this binding's chain to point directly + // to BINDING, or to be a constant, with addends adjusted + // appropriately. + void Set(Binding* binding, uint64_t value); + + // Return what we know about the value of this binding. + // - If this binding's value is a known constant, set BASE to + // NULL, and set ADDEND to its value. + // - If this binding is not a known constant but related to other + // bindings, set BASE to the binding at the end of the relation + // chain (which will always be unconstrained), and set ADDEND to the + // value to add to that binding's value to get this binding's + // value. + // - If this binding is unconstrained, set BASE to this, and leave + // ADDEND unchanged. + void Get(Binding** base, uint64_t* addend); + + private: + // There are three cases: + // + // - A binding representing a known constant value has base_ NULL, + // and addend_ equal to the value. + // + // - A binding representing a completely unconstrained value has + // base_ pointing to this; addend_ is unused. + // + // - A binding whose value is related to some other binding's + // value has base_ pointing to that other binding, and addend_ + // set to the amount to add to that binding's value to get this + // binding's value. We only represent relationships of the form + // x = y+c. + // + // Thus, the bind_ links form a chain terminating in either a + // known constant value or a completely unconstrained value. Most + // operations on bindings do path compression: they change every + // binding on the chain to point directly to the final value, + // adjusting addends as appropriate. + Binding* base_; + uint64_t addend_; + + // The number of Labels and Bindings pointing to this binding. + // (When a binding points to itself, indicating a completely + // unconstrained binding, that doesn't count as a reference.) + int reference_count_; + }; + + // This label's value. + Binding* value_; +}; + +// Conventions for representing larger numbers as sequences of bytes. +enum Endianness { + kBigEndian, // Big-endian: the most significant byte comes first. + kLittleEndian, // Little-endian: the least significant byte comes first. + kUnsetEndian, // used internally +}; + +// A section is a sequence of bytes, constructed by appending bytes +// to the end. Sections have a convenient and flexible set of member +// functions for appending data in various formats: big-endian and +// little-endian signed and unsigned values of different sizes; +// LEB128 and ULEB128 values (see below), and raw blocks of bytes. +// +// If you need to append a value to a section that is not convenient +// to compute immediately, you can create a label, append the +// label's value to the section, and then set the label's value +// later, when it's convenient to do so. Once a label's value is +// known, the section class takes care of updating all previously +// appended references to it. +// +// Once all the labels to which a section refers have had their +// values determined, you can get a copy of the section's contents +// as a string. +// +// Note that there is no specified "start of section" label. This is +// because there are typically several different meanings for "the +// start of a section": the offset of the section within an object +// file, the address in memory at which the section's content appear, +// and so on. It's up to the code that uses the Section class to +// keep track of these explicitly, as they depend on the application. +class Section { + public: + explicit Section(Endianness endianness = kUnsetEndian) + : endianness_(endianness){}; + + // A base class destructor should be either public and virtual, + // or protected and nonvirtual. + virtual ~Section(){}; + + // Return the default endianness of this section. + Endianness endianness() const { return endianness_; } + + // Append the SIZE bytes at DATA to the end of this section. Return + // a reference to this section. + Section& Append(const string& data) { + contents_.append(data); + return *this; + }; + + // Append SIZE copies of BYTE to the end of this section. Return a + // reference to this section. + Section& Append(size_t size, uint8_t byte) { + contents_.append(size, (char)byte); + return *this; + } + + // Append NUMBER to this section. ENDIANNESS is the endianness to + // use to write the number. SIZE is the length of the number in + // bytes. Return a reference to this section. + Section& Append(Endianness endianness, size_t size, uint64_t number); + Section& Append(Endianness endianness, size_t size, const Label& label); + + // Append SECTION to the end of this section. The labels SECTION + // refers to need not be defined yet. + // + // Note that this has no effect on any Labels' values, or on + // SECTION. If placing SECTION within 'this' provides new + // constraints on existing labels' values, then it's up to the + // caller to fiddle with those labels as needed. + Section& Append(const Section& section); + + // Append the contents of DATA as a series of bytes terminated by + // a NULL character. + Section& AppendCString(const string& data) { + Append(data); + contents_ += '\0'; + return *this; + } + + // Append VALUE or LABEL to this section, with the given bit width and + // endianness. Return a reference to this section. + // + // The names of these functions have the form <ENDIANNESS><BITWIDTH>: + // <ENDIANNESS> is either 'L' (little-endian, least significant byte first), + // 'B' (big-endian, most significant byte first), or + // 'D' (default, the section's default endianness) + // <BITWIDTH> is 8, 16, 32, or 64. + // + // Since endianness doesn't matter for a single byte, all the + // <BITWIDTH>=8 functions are equivalent. + // + // These can be used to write both signed and unsigned values, as + // the compiler will properly sign-extend a signed value before + // passing it to the function, at which point the function's + // behavior is the same either way. + Section& L8(uint8_t value) { + contents_ += value; + return *this; + } + Section& B8(uint8_t value) { + contents_ += value; + return *this; + } + Section& D8(uint8_t value) { + contents_ += value; + return *this; + } + Section &L16(uint16_t), &L32(uint32_t), &L64(uint64_t), &B16(uint16_t), + &B32(uint32_t), &B64(uint64_t), &D16(uint16_t), &D32(uint32_t), + &D64(uint64_t); + Section &L8(const Label&label), &L16(const Label&label), + &L32(const Label&label), &L64(const Label&label), &B8(const Label&label), + &B16(const Label&label), &B32(const Label&label), &B64(const Label&label), + &D8(const Label&label), &D16(const Label&label), &D32(const Label&label), + &D64(const Label&label); + + // Append VALUE in a signed LEB128 (Little-Endian Base 128) form. + // + // The signed LEB128 representation of an integer N is a variable + // number of bytes: + // + // - If N is between -0x40 and 0x3f, then its signed LEB128 + // representation is a single byte whose value is N. + // + // - Otherwise, its signed LEB128 representation is (N & 0x7f) | + // 0x80, followed by the signed LEB128 representation of N / 128, + // rounded towards negative infinity. + // + // In other words, we break VALUE into groups of seven bits, put + // them in little-endian order, and then write them as eight-bit + // bytes with the high bit on all but the last. + // + // Note that VALUE cannot be a Label (we would have to implement + // relaxation). + Section& LEB128(long long value); + + // Append VALUE in unsigned LEB128 (Little-Endian Base 128) form. + // + // The unsigned LEB128 representation of an integer N is a variable + // number of bytes: + // + // - If N is between 0 and 0x7f, then its unsigned LEB128 + // representation is a single byte whose value is N. + // + // - Otherwise, its unsigned LEB128 representation is (N & 0x7f) | + // 0x80, followed by the unsigned LEB128 representation of N / + // 128, rounded towards negative infinity. + // + // Note that VALUE cannot be a Label (we would have to implement + // relaxation). + Section& ULEB128(uint64_t value); + + // Jump to the next location aligned on an ALIGNMENT-byte boundary, + // relative to the start of the section. Fill the gap with PAD_BYTE. + // ALIGNMENT must be a power of two. Return a reference to this + // section. + Section& Align(size_t alignment, uint8_t pad_byte = 0); + + // Return the current size of the section. + size_t Size() const { return contents_.size(); } + + // Return a label representing the start of the section. + // + // It is up to the user whether this label represents the section's + // position in an object file, the section's address in memory, or + // what have you; some applications may need both, in which case + // this simple-minded interface won't be enough. This class only + // provides a single start label, for use with the Here and Mark + // member functions. + // + // Ideally, we'd provide this in a subclass that actually knows more + // about the application at hand and can provide an appropriate + // collection of start labels. But then the appending member + // functions like Append and D32 would return a reference to the + // base class, not the derived class, and the chaining won't work. + // Since the only value here is in pretty notation, that's a fatal + // flaw. + Label start() const { return start_; } + + // Return a label representing the point at which the next Appended + // item will appear in the section, relative to start(). + Label Here() const { return start_ + Size(); } + + // Set *LABEL to Here, and return a reference to this section. + Section& Mark(Label* label) { + *label = Here(); + return *this; + } + + // If there are no undefined label references left in this + // section, set CONTENTS to the contents of this section, as a + // string, and clear this section. Return true on success, or false + // if there were still undefined labels. + bool GetContents(string* contents); + + private: + // Used internally. A reference to a label's value. + struct Reference { + Reference(size_t set_offset, Endianness set_endianness, size_t set_size, + const Label& set_label) + : offset(set_offset), + endianness(set_endianness), + size(set_size), + label(set_label) {} + + // The offset of the reference within the section. + size_t offset; + + // The endianness of the reference. + Endianness endianness; + + // The size of the reference. + size_t size; + + // The label to which this is a reference. + Label label; + }; + + // The default endianness of this section. + Endianness endianness_; + + // The contents of the section. + string contents_; + + // References to labels within those contents. + vector<Reference> references_; + + // A label referring to the beginning of the section. + Label start_; +}; + +} // namespace test_assembler +} // namespace lul_test + +namespace lul_test { + +using lul::DwarfPointerEncoding; +using lul_test::test_assembler::Endianness; +using lul_test::test_assembler::Label; +using lul_test::test_assembler::Section; + +class CFISection : public Section { + public: + // CFI augmentation strings beginning with 'z', defined by the + // Linux/IA-64 C++ ABI, can specify interesting encodings for + // addresses appearing in FDE headers and call frame instructions (and + // for additional fields whose presence the augmentation string + // specifies). In particular, pointers can be specified to be relative + // to various base address: the start of the .text section, the + // location holding the address itself, and so on. These allow the + // frame data to be position-independent even when they live in + // write-protected pages. These variants are specified at the + // following two URLs: + // + // http://refspecs.linux-foundation.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/dwarfext.html + // http://refspecs.linux-foundation.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html + // + // CFISection leaves the production of well-formed 'z'-augmented CIEs and + // FDEs to the user, but does provide EncodedPointer, to emit + // properly-encoded addresses for a given pointer encoding. + // EncodedPointer uses an instance of this structure to find the base + // addresses it should use; you can establish a default for all encoded + // pointers appended to this section with SetEncodedPointerBases. + struct EncodedPointerBases { + EncodedPointerBases() : cfi(), text(), data() {} + + // The starting address of this CFI section in memory, for + // DW_EH_PE_pcrel. DW_EH_PE_pcrel pointers may only be used in data + // that has is loaded into the program's address space. + uint64_t cfi; + + // The starting address of this file's .text section, for DW_EH_PE_textrel. + uint64_t text; + + // The starting address of this file's .got or .eh_frame_hdr section, + // for DW_EH_PE_datarel. + uint64_t data; + }; + + // Create a CFISection whose endianness is ENDIANNESS, and where + // machine addresses are ADDRESS_SIZE bytes long. If EH_FRAME is + // true, use the .eh_frame format, as described by the Linux + // Standards Base Core Specification, instead of the DWARF CFI + // format. + CFISection(Endianness endianness, size_t address_size, bool eh_frame = false) + : Section(endianness), + address_size_(address_size), + eh_frame_(eh_frame), + pointer_encoding_(lul::DW_EH_PE_absptr), + encoded_pointer_bases_(), + entry_length_(NULL), + in_fde_(false) { + // The 'start', 'Here', and 'Mark' members of a CFISection all refer + // to section offsets. + start() = 0; + } + + // Return this CFISection's address size. + size_t AddressSize() const { return address_size_; } + + // Return true if this CFISection uses the .eh_frame format, or + // false if it contains ordinary DWARF CFI data. + bool ContainsEHFrame() const { return eh_frame_; } + + // Use ENCODING for pointers in calls to FDEHeader and EncodedPointer. + void SetPointerEncoding(DwarfPointerEncoding encoding) { + pointer_encoding_ = encoding; + } + + // Use the addresses in BASES as the base addresses for encoded + // pointers in subsequent calls to FDEHeader or EncodedPointer. + // This function makes a copy of BASES. + void SetEncodedPointerBases(const EncodedPointerBases& bases) { + encoded_pointer_bases_ = bases; + } + + // Append a Common Information Entry header to this section with the + // given values. If dwarf64 is true, use the 64-bit DWARF initial + // length format for the CIE's initial length. Return a reference to + // this section. You should call FinishEntry after writing the last + // instruction for the CIE. + // + // Before calling this function, you will typically want to use Mark + // or Here to make a label to pass to FDEHeader that refers to this + // CIE's position in the section. + CFISection& CIEHeader(uint64_t code_alignment_factor, + int data_alignment_factor, + unsigned return_address_register, uint8_t version = 3, + const string& augmentation = "", bool dwarf64 = false); + + // Append a Frame Description Entry header to this section with the + // given values. If dwarf64 is true, use the 64-bit DWARF initial + // length format for the CIE's initial length. Return a reference to + // this section. You should call FinishEntry after writing the last + // instruction for the CIE. + // + // This function doesn't support entries that are longer than + // 0xffffff00 bytes. (The "initial length" is always a 32-bit + // value.) Nor does it support .debug_frame sections longer than + // 0xffffff00 bytes. + CFISection& FDEHeader(Label cie_pointer, uint64_t initial_location, + uint64_t address_range, bool dwarf64 = false); + + // Note the current position as the end of the last CIE or FDE we + // started, after padding with DW_CFA_nops for alignment. This + // defines the label representing the entry's length, cited in the + // entry's header. Return a reference to this section. + CFISection& FinishEntry(); + + // Append the contents of BLOCK as a DW_FORM_block value: an + // unsigned LEB128 length, followed by that many bytes of data. + CFISection& Block(const string& block) { + ULEB128(block.size()); + Append(block); + return *this; + } + + // Append ADDRESS to this section, in the appropriate size and + // endianness. Return a reference to this section. + CFISection& Address(uint64_t address) { + Section::Append(endianness(), address_size_, address); + return *this; + } + + // Append ADDRESS to this section, using ENCODING and BASES. ENCODING + // defaults to this section's default encoding, established by + // SetPointerEncoding. BASES defaults to this section's bases, set by + // SetEncodedPointerBases. If the DW_EH_PE_indirect bit is set in the + // encoding, assume that ADDRESS is where the true address is stored. + // Return a reference to this section. + // + // (C++ doesn't let me use default arguments here, because I want to + // refer to members of *this in the default argument expression.) + CFISection& EncodedPointer(uint64_t address) { + return EncodedPointer(address, pointer_encoding_, encoded_pointer_bases_); + } + CFISection& EncodedPointer(uint64_t address, DwarfPointerEncoding encoding) { + return EncodedPointer(address, encoding, encoded_pointer_bases_); + } + CFISection& EncodedPointer(uint64_t address, DwarfPointerEncoding encoding, + const EncodedPointerBases& bases); + + // Restate some member functions, to keep chaining working nicely. + CFISection& Mark(Label* label) { + Section::Mark(label); + return *this; + } + CFISection& D8(uint8_t v) { + Section::D8(v); + return *this; + } + CFISection& D16(uint16_t v) { + Section::D16(v); + return *this; + } + CFISection& D16(Label v) { + Section::D16(v); + return *this; + } + CFISection& D32(uint32_t v) { + Section::D32(v); + return *this; + } + CFISection& D32(const Label& v) { + Section::D32(v); + return *this; + } + CFISection& D64(uint64_t v) { + Section::D64(v); + return *this; + } + CFISection& D64(const Label& v) { + Section::D64(v); + return *this; + } + CFISection& LEB128(long long v) { + Section::LEB128(v); + return *this; + } + CFISection& ULEB128(uint64_t v) { + Section::ULEB128(v); + return *this; + } + + private: + // A length value that we've appended to the section, but is not yet + // known. LENGTH is the appended value; START is a label referring + // to the start of the data whose length was cited. + struct PendingLength { + Label length; + Label start; + }; + + // Constants used in CFI/.eh_frame data: + + // If the first four bytes of an "initial length" are this constant, then + // the data uses the 64-bit DWARF format, and the length itself is the + // subsequent eight bytes. + static const uint32_t kDwarf64InitialLengthMarker = 0xffffffffU; + + // The CIE identifier for 32- and 64-bit DWARF CFI and .eh_frame data. + static const uint32_t kDwarf32CIEIdentifier = ~(uint32_t)0; + static const uint64_t kDwarf64CIEIdentifier = ~(uint64_t)0; + static const uint32_t kEHFrame32CIEIdentifier = 0; + static const uint64_t kEHFrame64CIEIdentifier = 0; + + // The size of a machine address for the data in this section. + size_t address_size_; + + // If true, we are generating a Linux .eh_frame section, instead of + // a standard DWARF .debug_frame section. + bool eh_frame_; + + // The encoding to use for FDE pointers. + DwarfPointerEncoding pointer_encoding_; + + // The base addresses to use when emitting encoded pointers. + EncodedPointerBases encoded_pointer_bases_; + + // The length value for the current entry. + // + // Oddly, this must be dynamically allocated. Labels never get new + // values; they only acquire constraints on the value they already + // have, or assert if you assign them something incompatible. So + // each header needs truly fresh Label objects to cite in their + // headers and track their positions. The alternative is explicit + // destructor invocation and a placement new. Ick. + PendingLength* entry_length_; + + // True if we are currently emitting an FDE --- that is, we have + // called FDEHeader but have not yet called FinishEntry. + bool in_fde_; + + // If in_fde_ is true, this is its starting address. We use this for + // emitting DW_EH_PE_funcrel pointers. + uint64_t fde_start_address_; +}; + +} // namespace lul_test + +#endif // LUL_TEST_INFRASTRUCTURE_H diff --git a/tools/profiler/tests/gtest/ThreadProfileTest.cpp b/tools/profiler/tests/gtest/ThreadProfileTest.cpp new file mode 100644 index 0000000000..ef54de4940 --- /dev/null +++ b/tools/profiler/tests/gtest/ThreadProfileTest.cpp @@ -0,0 +1,57 @@ + +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfileBuffer.h" +#include "ThreadInfo.h" + +#include "mozilla/PowerOfTwo.h" +#include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h" +#include "mozilla/ProfileChunkedBuffer.h" + +#include "gtest/gtest.h" + +// Make sure we can record one entry and read it +TEST(ThreadProfile, InsertOneEntry) +{ + mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager( + 2 * (1 + uint32_t(sizeof(ProfileBufferEntry))) * 4, + 2 * (1 + uint32_t(sizeof(ProfileBufferEntry)))); + mozilla::ProfileChunkedBuffer profileChunkedBuffer( + mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager); + auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer); + pb->AddEntry(ProfileBufferEntry::Time(123.1)); + ProfileBufferEntry entry = pb->GetEntry(pb->BufferRangeStart()); + ASSERT_TRUE(entry.IsTime()); + ASSERT_EQ(123.1, entry.GetDouble()); +} + +// See if we can insert some entries +TEST(ThreadProfile, InsertEntriesNoWrap) +{ + mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager( + 100 * (1 + uint32_t(sizeof(ProfileBufferEntry))), + 100 * (1 + uint32_t(sizeof(ProfileBufferEntry))) / 4); + mozilla::ProfileChunkedBuffer profileChunkedBuffer( + mozilla::ProfileChunkedBuffer::ThreadSafety::WithMutex, chunkManager); + auto pb = mozilla::MakeUnique<ProfileBuffer>(profileChunkedBuffer); + const int test_size = 50; + for (int i = 0; i < test_size; i++) { + pb->AddEntry(ProfileBufferEntry::Time(i)); + } + int times = 0; + uint64_t readPos = pb->BufferRangeStart(); + while (readPos != pb->BufferRangeEnd()) { + ProfileBufferEntry entry = pb->GetEntry(readPos); + readPos++; + if (entry.GetKind() == ProfileBufferEntry::Kind::INVALID) { + continue; + } + ASSERT_TRUE(entry.IsTime()); + ASSERT_EQ(times, entry.GetDouble()); + times++; + } + ASSERT_EQ(test_size, times); +} diff --git a/tools/profiler/tests/gtest/moz.build b/tools/profiler/tests/gtest/moz.build new file mode 100644 index 0000000000..b8ea860514 --- /dev/null +++ b/tools/profiler/tests/gtest/moz.build @@ -0,0 +1,43 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, you can obtain one at http://mozilla.org/MPL/2.0/. + +if CONFIG["OS_TARGET"] in ("Android", "Linux") and CONFIG["CPU_ARCH"] in ( + "arm", + "aarch64", + "x86", + "x86_64", +): + UNIFIED_SOURCES += [ + "LulTest.cpp", + "LulTestDwarf.cpp", + "LulTestInfrastructure.cpp", + ] + +LOCAL_INCLUDES += [ + "/netwerk/base", + "/netwerk/protocol/http", + "/toolkit/components/jsoncpp/include", + "/tools/profiler/core", + "/tools/profiler/gecko", + "/tools/profiler/lul", +] + +if CONFIG["OS_TARGET"] != "Android": + UNIFIED_SOURCES += [ + "GeckoProfiler.cpp", + "ThreadProfileTest.cpp", + ] + +USE_LIBS += [ + "jsoncpp", +] + +include("/ipc/chromium/chromium-config.mozbuild") + +FINAL_LIBRARY = "xul-gtest" + +if CONFIG["CC_TYPE"] in ("clang", "gcc"): + CXXFLAGS += ["-Wno-error=shadow"] diff --git a/tools/profiler/tests/shared-head.js b/tools/profiler/tests/shared-head.js new file mode 100644 index 0000000000..ffcdc5d361 --- /dev/null +++ b/tools/profiler/tests/shared-head.js @@ -0,0 +1,216 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * This file contains utilities that can be shared between xpcshell tests and mochitests. + */ + +// The marker phases. +const INSTANT = 0; +const INTERVAL = 1; +const INTERVAL_START = 2; +const INTERVAL_END = 3; + +// This Services declaration may shadow another from head.js, so define it as +// a var rather than a const. +var { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); + +const defaultSettings = { + entries: 8 * 1024 * 1024, // 8M entries = 64MB + interval: 1, // ms + features: ["threads"], + threads: ["GeckoMain"], +}; + +function startProfiler(callersSettings) { + if (Services.profiler.IsActive()) { + throw new Error( + "The profiler must not be active before starting it in a test." + ); + } + const settings = Object.assign({}, defaultSettings, callersSettings); + Services.profiler.StartProfiler( + settings.entries, + settings.interval, + settings.features, + settings.threads, + 0, + settings.duration + ); +} + +/** + * This is a helper function be able to run `await wait(500)`. Unfortunately + * this is needed as the act of collecting functions relies on the periodic + * sampling of the threads. See: + * https://bugzilla.mozilla.org/show_bug.cgi?id=1529053 + * + * @param {number} time + * @returns {Promise} + */ +function wait(time) { + return new Promise(resolve => { + // eslint-disable-next-line mozilla/no-arbitrary-setTimeout + setTimeout(resolve, time); + }); +} + +/** + * Get the payloads of a type recursively, including from all subprocesses. + * + * @param {Object} profile The gecko profile. + * @param {string} type The marker payload type, e.g. "DiskIO". + * @param {Array} payloadTarget The recursive list of payloads. + * @return {Array} The final payloads. + */ +function getPayloadsOfTypeFromAllThreads(profile, type, payloadTarget = []) { + for (const { markers } of profile.threads) { + for (const markerTuple of markers.data) { + const payload = markerTuple[markers.schema.data]; + if (payload && payload.type === type) { + payloadTarget.push(payload); + } + } + } + + for (const subProcess of profile.processes) { + getPayloadsOfTypeFromAllThreads(subProcess, type, payloadTarget); + } + + return payloadTarget; +} + +/** + * Get the payloads of a type from a single thread. + * + * @param {Object} thread The thread from a profile. + * @param {string} type The marker payload type, e.g. "DiskIO". + * @return {Array} The payloads. + */ +function getPayloadsOfType(thread, type) { + const { markers } = thread; + const results = []; + for (const markerTuple of markers.data) { + const payload = markerTuple[markers.schema.data]; + if (payload && payload.type === type) { + results.push(payload); + } + } + return results; +} + +/** + * Applies the marker schema to create individual objects for each marker + * + * @param {Object} thread The thread from a profile. + * @return {InflatedMarker[]} The markers. + */ +function getInflatedMarkerData(thread) { + const { markers, stringTable } = thread; + return markers.data.map(markerTuple => { + const marker = {}; + for (const [key, tupleIndex] of Object.entries(markers.schema)) { + marker[key] = markerTuple[tupleIndex]; + if (key === "name") { + // Use the string from the string table. + marker[key] = stringTable[marker[key]]; + } + } + return marker; + }); +} + +/** + * It can be helpful to force the profiler to collect a JavaScript sample. This + * function spins on a while loop until at least one more sample is collected. + * + * @return {number} The index of the collected sample. + */ +function captureAtLeastOneJsSample() { + function getProfileSampleCount() { + const profile = Services.profiler.getProfileData(); + return profile.threads[0].samples.data.length; + } + + const sampleCount = getProfileSampleCount(); + // Create an infinite loop until a sample has been collected. + while (true) { + if (sampleCount < getProfileSampleCount()) { + return sampleCount; + } + } +} + +/** + * This function pauses the profiler before getting the profile. Then after the + * getting the data, the profiler is stopped, and all profiler data is removed. + * @returns {Promise<Profile>} + */ +async function stopAndGetProfile() { + Services.profiler.Pause(); + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + return profile; +} + +/** + * Verifies that a marker is an interval marker. + * + * @param {InflatedMarker} marker + * @returns {boolean} + */ +function isIntervalMarker(inflatedMarker) { + return ( + inflatedMarker.phase === 1 && + typeof inflatedMarker.startTime === "number" && + typeof inflatedMarker.endTime === "number" + ); +} + +/** + * @param {Profile} profile + * @returns {Thread[]} + */ +function getThreads(profile) { + const threads = []; + + function getThreadsRecursive(process) { + for (const thread of process.threads) { + threads.push(thread); + } + for (const subprocess of process.processes) { + getThreadsRecursive(subprocess); + } + } + + getThreadsRecursive(profile); + return threads; +} + +/** + * Find a specific marker schema from any process of a profile. + * + * @param {Profile} profile + * @param {string} name + * @returns {MarkerSchema} + */ +function getSchema(profile, name) { + { + const schema = profile.meta.markerSchema.find(s => s.name === name); + if (schema) { + return schema; + } + } + for (const subprocess of profile.processes) { + const schema = subprocess.meta.markerSchema.find(s => s.name === name); + if (schema) { + return schema; + } + } + console.error("Parent process schema", profile.meta.markerSchema); + for (const subprocess of profile.processes) { + console.error("Child process schema", subprocess.meta.markerSchema); + } + throw new Error(`Could not find a schema for "${name}".`); +} diff --git a/tools/profiler/tests/xpcshell/head.js b/tools/profiler/tests/xpcshell/head.js new file mode 100644 index 0000000000..fd133d1607 --- /dev/null +++ b/tools/profiler/tests/xpcshell/head.js @@ -0,0 +1,242 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* import-globals-from ../shared-head.js */ + +// This Services declaration may shadow another from head.js, so define it as +// a var rather than a const. +var { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); + +const { AppConstants } = ChromeUtils.import( + "resource://gre/modules/AppConstants.jsm" +); +const { setTimeout } = ChromeUtils.import("resource://gre/modules/Timer.jsm"); + +// Load the shared head +const sharedHead = do_get_file("shared-head.js", false); +if (!sharedHead) { + throw new Error("Could not load the shared head."); +} +Services.scriptloader.loadSubScript( + Services.io.newFileURI(sharedHead).spec, + this +); + +/** + * This function takes a thread, and a sample tuple from the "data" array, and + * inflates the frame to be an array of strings. + * + * @param {Object} thread - The thread from the profile. + * @param {Array} sample - The tuple from the thread.samples.data array. + * @returns {Array<string>} An array of function names. + */ +function getInflatedStackLocations(thread, sample) { + let stackTable = thread.stackTable; + let frameTable = thread.frameTable; + let stringTable = thread.stringTable; + let SAMPLE_STACK_SLOT = thread.samples.schema.stack; + let STACK_PREFIX_SLOT = stackTable.schema.prefix; + let STACK_FRAME_SLOT = stackTable.schema.frame; + let FRAME_LOCATION_SLOT = frameTable.schema.location; + + // Build the stack from the raw data and accumulate the locations in + // an array. + let stackIndex = sample[SAMPLE_STACK_SLOT]; + let locations = []; + while (stackIndex !== null) { + let stackEntry = stackTable.data[stackIndex]; + let frame = frameTable.data[stackEntry[STACK_FRAME_SLOT]]; + locations.push(stringTable[frame[FRAME_LOCATION_SLOT]]); + stackIndex = stackEntry[STACK_PREFIX_SLOT]; + } + + // The profiler tree is inverted, so reverse the array. + return locations.reverse(); +} + +/** + * This utility matches up stacks to see if they contain a certain sequence of + * stack frames. A correctly functioning profiler will have a certain sequence + * of stacks, but we can't always determine exactly which stacks will show up + * due to implementation changes, as well as memory addresses being arbitrary to + * that particular build. + * + * This function triggers a test failure with a nice debug message when it + * fails. + * + * @param {Array<string>} actualStackFrames - As generated by + * inflatedStackFrames. + * @param {Array<string | RegExp>} expectedStackFrames - Matches a subset of + * actualStackFrames + */ +function expectStackToContain( + actualStackFrames, + expectedStackFrames, + message = "The actual stack and expected stack do not match." +) { + // Log the stacks that are being passed to this assertion, as it could be + // useful for when these tests fail. + console.log("Actual stack: ", actualStackFrames); + console.log( + "Expected to contain: ", + expectedStackFrames.map(s => s.toString()) + ); + + let actualIndex = 0; + + // Start walking the expected stack and look for matches. + for ( + let expectedIndex = 0; + expectedIndex < expectedStackFrames.length; + expectedIndex++ + ) { + const expectedStackFrame = expectedStackFrames[expectedIndex]; + + while (true) { + // Make sure that we haven't run out of actual stack frames. + if (actualIndex >= actualStackFrames.length) { + info(`Could not find a match for: "${expectedStackFrame.toString()}"`); + Assert.ok(false, message); + } + + const actualStackFrame = actualStackFrames[actualIndex]; + actualIndex++; + + const itMatches = + typeof expectedStackFrame === "string" + ? expectedStackFrame === actualStackFrame + : actualStackFrame.match(expectedStackFrame); + + if (itMatches) { + // We found a match, break out of this loop. + break; + } + // Keep on looping looking for a match. + } + } + + Assert.ok(true, message); +} + +/** + * @param {Thread} thread + * @param {string} filename - The filename used to trigger FileIO. + * @returns {InflatedMarkers[]} + */ +function getInflatedFileIOMarkers(thread, filename) { + const markers = getInflatedMarkerData(thread); + return markers.filter( + marker => + marker.data?.type === "FileIO" && marker.data?.filename.endsWith(filename) + ); +} + +/** + * Checks properties common to all FileIO markers. + * + * @param {InflatedMarkers[]} markers + * @param {string} filename + */ +function checkInflatedFileIOMarkers(markers, filename) { + greater(markers.length, 0, "Found some markers"); + + // See IOInterposeObserver::Observation::ObservedOperationString + const validOperations = new Set([ + "write", + "fsync", + "close", + "stat", + "create/open", + "read", + ]); + const validSources = new Set(["PoisonIOInterposer", "NSPRIOInterposer"]); + + for (const marker of markers) { + try { + ok( + marker.name.startsWith("FileIO"), + "Has a marker.name that starts with FileIO" + ); + equal(marker.data.type, "FileIO", "Has a marker.data.type"); + ok(isIntervalMarker(marker), "All FileIO markers are interval markers"); + ok( + validOperations.has(marker.data.operation), + `The markers have a known operation - "${marker.data.operation}"` + ); + ok( + validSources.has(marker.data.source), + `The FileIO marker has a known source "${marker.data.source}"` + ); + ok(marker.data.filename.endsWith(filename)); + ok(Boolean(marker.data.stack), "A stack was collected"); + } catch (error) { + console.error("Failing inflated FileIO marker:", marker); + throw error; + } + } +} + +/** + * Do deep equality checks for schema, but then surface nice errors for a user to know + * what to do if the check fails. + */ +function checkSchema(actual, expected) { + const schemaName = expected.name; + info(`Checking marker schema for "${schemaName}"`); + + try { + ok( + actual, + `Schema was found for "${schemaName}". See the test output for more information.` + ); + // Check individual properties to surface easier to debug errors. + deepEqual( + expected.display, + actual.display, + `The "display" property for ${schemaName} schema matches. See the test output for more information.` + ); + if (expected.data) { + ok(actual.data, `Schema was found for "${schemaName}"`); + for (const expectedDatum of expected.data) { + const actualDatum = actual.data.find(d => d.key === expectedDatum.key); + deepEqual( + expectedDatum, + actualDatum, + `The "${schemaName}" field "${expectedDatum.key}" matches expectations. See the test output for more information.` + ); + } + equal( + expected.data.length, + actual.data.length, + "The expected and actual data have the same number of items" + ); + } + + // Finally do a true deep equal. + deepEqual(expected, actual, "The entire schema is deepEqual"); + } catch (error) { + // The test results are not very human readable. This is a bit of a hacky + // solution to make it more readable. + dump("-----------------------------------------------------\n"); + dump("The expected marker schema:\n"); + dump("-----------------------------------------------------\n"); + dump(JSON.stringify(expected, null, 2)); + dump("\n"); + dump("-----------------------------------------------------\n"); + dump("The actual marker schema:\n"); + dump("-----------------------------------------------------\n"); + dump(JSON.stringify(actual, null, 2)); + dump("\n"); + dump("-----------------------------------------------------\n"); + dump("A marker schema was not equal to expectations. If you\n"); + dump("are modifying the schema, then please copy and paste\n"); + dump("the new schema into this test.\n"); + dump("-----------------------------------------------------\n"); + dump("Copy this: " + JSON.stringify(actual)); + dump("\n"); + dump("-----------------------------------------------------\n"); + + throw error; + } +} diff --git a/tools/profiler/tests/xpcshell/test_active_configuration.js b/tools/profiler/tests/xpcshell/test_active_configuration.js new file mode 100644 index 0000000000..10c3ff6a0f --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_active_configuration.js @@ -0,0 +1,118 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + info( + "Checking that the profiler can fetch the information about the active " + + "configuration that is being used to power the profiler." + ); + + equal( + Services.profiler.activeConfiguration, + null, + "When the profile is off, there is no active configuration." + ); + + { + info("Start the profiler."); + const entries = 10000; + const interval = 1; + const threads = ["GeckoMain"]; + const features = ["js", "leaf", "threads"]; + const activeBrowsingContextID = 123; + Services.profiler.StartProfiler( + entries, + interval, + features, + threads, + activeBrowsingContextID + ); + + info("Generate the activeConfiguration."); + const { activeConfiguration } = Services.profiler; + const expectedConfiguration = { + interval, + threads, + features, + activeBrowsingContextID, + // The buffer is created as a power of two that can fit all of the entires + // into it. If the ratio of entries to buffer size ever changes, this setting + // will need to be updated. + capacity: Math.pow(2, 14), + }; + + deepEqual( + activeConfiguration, + expectedConfiguration, + "The active configuration matches configuration given." + ); + + info("Get the profile."); + const profile = Services.profiler.getProfileData(); + deepEqual( + profile.meta.configuration, + expectedConfiguration, + "The configuration also matches on the profile meta object." + ); + } + + { + const entries = 20000; + const interval = 0.5; + const threads = ["GeckoMain", "DOM Worker"]; + const features = ["threads"]; + const activeBrowsingContextID = 111; + const duration = 20; + + info("Restart the profiler with a new configuration."); + Services.profiler.StartProfiler( + entries, + interval, + features, + threads, + activeBrowsingContextID, + // Also start it with duration, this property is optional. + duration + ); + + info("Generate the activeConfiguration."); + const { activeConfiguration } = Services.profiler; + const expectedConfiguration = { + interval, + threads, + features, + activeBrowsingContextID, + duration, + // The buffer is created as a power of two that can fit all of the entires + // into it. If the ratio of entries to buffer size ever changes, this setting + // will need to be updated. + capacity: Math.pow(2, 15), + }; + + deepEqual( + activeConfiguration, + expectedConfiguration, + "The active configuration matches the new configuration." + ); + + info("Get the profile."); + const profile = Services.profiler.getProfileData(); + deepEqual( + profile.meta.configuration, + expectedConfiguration, + "The configuration also matches on the profile meta object." + ); + } + + Services.profiler.StopProfiler(); + + equal( + Services.profiler.activeConfiguration, + null, + "When the profile is off, there is no active configuration." + ); +} diff --git a/tools/profiler/tests/xpcshell/test_addProfilerMarker.js b/tools/profiler/tests/xpcshell/test_addProfilerMarker.js new file mode 100644 index 0000000000..f3cba5843d --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_addProfilerMarker.js @@ -0,0 +1,184 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Test that ChromeUtils.addProfilerMarker is working correctly. + */ + +const markerNamePrefix = "test_addProfilerMarker"; +const markerText = "Text payload"; +// The same startTime will be used for all markers with a duration, +// and we store this value globally so that expectDuration and +// expectNoDuration can access it. The value isn't set here as we +// want a start time after the profiler has started +var startTime; + +function expectNoDuration(marker) { + Assert.equal( + typeof marker.startTime, + "number", + "startTime should be a number" + ); + Assert.greater( + marker.startTime, + startTime, + "startTime should be after the begining of the test" + ); + Assert.equal(typeof marker.endTime, "number", "endTime should be a number"); + Assert.equal(marker.endTime, 0, "endTime should be 0"); +} + +function expectDuration(marker) { + Assert.equal( + typeof marker.startTime, + "number", + "startTime should be a number" + ); + Assert.equal( + Math.round(marker.startTime * 10 ** 6) / 10 ** 6, + startTime, + "startTime should be the expected time" + ); + Assert.equal(typeof marker.endTime, "number", "endTime should be a number"); + Assert.greater( + marker.endTime, + startTime, + "endTime should be after startTime" + ); +} + +function expectNoData(marker) { + Assert.equal( + typeof marker.data, + "undefined", + "The data property should be undefined" + ); +} + +function expectText(marker) { + Assert.equal( + typeof marker.data, + "object", + "The data property should be an object" + ); + Assert.equal(marker.data.type, "Text", "Should be a Text marker"); + Assert.equal( + marker.data.name, + markerText, + "The payload should contain the expected text" + ); +} + +function expectNoStack(marker) { + Assert.ok(!marker.data || !marker.data.stack, "There should be no stack"); +} + +function expectStack(marker) { + Assert.ok(marker.data.stack, "There should be a stack"); +} + +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + startProfiler(); + startTime = Math.round(Cu.now() * 10 ** 6) / 10 ** 6; + info("startTime used for markers with durations: " + startTime); + + /* Each call to testMarker will record a marker with a unique name. + * The testFunctions and testCases objects contain respectively test + * functions to verify that the marker found in the captured profile + * matches expectations, and a string that can be printed to describe + * in which way ChromeUtils.addProfilerMarker was called. */ + let testFunctions = {}; + let testCases = {}; + let markerId = 0; + function testMarker(args, checks) { + let name = markerNamePrefix + markerId++; + ChromeUtils.addProfilerMarker(name, ...args); + testFunctions[name] = checks; + testCases[name] = `ChromeUtils.addProfilerMarker(${[name, ...args] + .toSource() + .slice(1, -1)})`; + } + + info("Record markers without options object."); + testMarker([], m => { + expectNoDuration(m); + expectNoData(m); + }); + testMarker([startTime], m => { + expectDuration(m); + expectNoData(m); + }); + testMarker([undefined, markerText], m => { + expectNoDuration(m); + expectText(m); + }); + testMarker([startTime, markerText], m => { + expectDuration(m); + expectText(m); + }); + + info("Record markers providing the duration as the startTime property."); + testMarker([{ startTime }], m => { + expectDuration(m); + expectNoData(m); + }); + testMarker([{}, markerText], m => { + expectNoDuration(m); + expectText(m); + }); + testMarker([{ startTime }, markerText], m => { + expectDuration(m); + expectText(m); + }); + + info("Record markers to test the captureStack property."); + const captureStack = true; + testMarker([], expectNoStack); + testMarker([startTime, markerText], expectNoStack); + testMarker([{ captureStack: false }], expectNoStack); + testMarker([{ captureStack }], expectStack); + testMarker([{ startTime, captureStack }], expectStack); + testMarker([{ captureStack }, markerText], expectStack); + testMarker([{ startTime, captureStack }, markerText], expectStack); + + info("Record markers to test the category property"); + function testCategory(args, expectedCategory) { + testMarker(args, marker => { + Assert.equal(marker.category, expectedCategory); + }); + } + testCategory([], "JavaScript"); + testCategory([{ category: "Test" }], "Test"); + testCategory([{ category: "Test" }, markerText], "Test"); + testCategory([{ category: "JavaScript" }], "JavaScript"); + testCategory([{ category: "Other" }], "Other"); + testCategory([{ category: "DOM" }], "DOM"); + testCategory([{ category: "does not exist" }], "Other"); + + info("Capture the profile"); + const profile = await stopAndGetProfile(); + const mainThread = profile.threads.find(({ name }) => name === "GeckoMain"); + const markers = getInflatedMarkerData(mainThread).filter(m => + m.name.startsWith(markerNamePrefix) + ); + Assert.equal( + markers.length, + Object.keys(testFunctions).length, + `Found ${markers.length} test markers in the captured profile` + ); + + for (let marker of markers) { + marker.category = profile.meta.categories[marker.category].name; + info(`${testCases[marker.name]} -> ${marker.toSource()}`); + + testFunctions[marker.name](marker); + delete testFunctions[marker.name]; + } + + Assert.equal(0, Object.keys(testFunctions).length, "all markers were found"); +}); diff --git a/tools/profiler/tests/xpcshell/test_asm.js b/tools/profiler/tests/xpcshell/test_asm.js new file mode 100644 index 0000000000..fe04782c0d --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_asm.js @@ -0,0 +1,81 @@ +// Check that asm.js code shows up on the stack. +function run_test() { + // Just skip the test if the profiler component isn't present. + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + // This test assumes that it's starting on an empty profiler stack. + // (Note that the other profiler tests also assume the profiler + // isn't already started.) + Assert.ok(!Services.profiler.IsActive()); + + let jsFuns = Cu.getJSTestingFunctions(); + if (!jsFuns.isAsmJSCompilationAvailable()) { + return; + } + + const ms = 10; + Services.profiler.StartProfiler(10000, ms, ["js"]); + + let stack = null; + function ffi_function() { + var delayMS = 5; + while (1) { + let then = Date.now(); + do { + // do nothing + } while (Date.now() - then < delayMS); + + var thread0 = Services.profiler.getProfileData().threads[0]; + + if (delayMS > 30000) { + return; + } + + delayMS *= 2; + + if (thread0.samples.data.length == 0) { + continue; + } + + var lastSample = thread0.samples.data[thread0.samples.data.length - 1]; + stack = String(getInflatedStackLocations(thread0, lastSample)); + if (stack.includes("trampoline")) { + return; + } + } + } + + function asmjs_module(global, ffis) { + "use asm"; + var ffi = ffis.ffi; + function asmjs_function() { + ffi(); + } + return asmjs_function; + } + + Assert.ok(jsFuns.isAsmJSModule(asmjs_module)); + + var asmjs_function = asmjs_module(null, { ffi: ffi_function }); + Assert.ok(jsFuns.isAsmJSFunction(asmjs_function)); + + asmjs_function(); + + Assert.notEqual(stack, null); + + var i1 = stack.indexOf("entry trampoline"); + Assert.ok(i1 !== -1); + var i2 = stack.indexOf("asmjs_function"); + Assert.ok(i2 !== -1); + var i3 = stack.indexOf("exit trampoline"); + Assert.ok(i3 !== -1); + var i4 = stack.indexOf("ffi_function"); + Assert.ok(i4 !== -1); + Assert.ok(i1 < i2); + Assert.ok(i2 < i3); + Assert.ok(i3 < i4); + + Services.profiler.StopProfiler(); +} diff --git a/tools/profiler/tests/xpcshell/test_enterjit_osr.js b/tools/profiler/tests/xpcshell/test_enterjit_osr.js new file mode 100644 index 0000000000..87fc711138 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_enterjit_osr.js @@ -0,0 +1,56 @@ +// Check that the EnterJIT frame, added by the JIT trampoline and +// usable by a native unwinder to resume unwinding after encountering +// JIT code, is pushed as expected. +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + // This test assumes that it's starting on an empty profiler stack. + // (Note that the other profiler tests also assume the profiler + // isn't already started.) + Assert.ok(!Services.profiler.IsActive()); + + const ms = 5; + Services.profiler.StartProfiler(10000, ms, ["js"]); + + function has_arbitrary_name_in_stack() { + // A frame for |arbitrary_name| has been pushed. Do a sequence of + // increasingly long spins until we get a sample. + var delayMS = 5; + while (1) { + info("loop: ms = " + delayMS); + const then = Date.now(); + do { + let n = 10000; + // eslint-disable-next-line no-empty + while (--n) {} // OSR happens here + // Spin in the hope of getting a sample. + } while (Date.now() - then < delayMS); + let profile = Services.profiler.getProfileData().threads[0]; + + // Go through all of the stacks, and search for this function name. + for (const sample of profile.samples.data) { + const stack = getInflatedStackLocations(profile, sample); + info(`The following stack was found: ${stack}`); + for (var i = 0; i < stack.length; i++) { + if (stack[i].match(/arbitrary_name/)) { + // This JS sample was correctly found. + return true; + } + } + } + + // Continue running this function with an increasingly long delay. + delayMS *= 2; + if (delayMS > 30000) { + return false; + } + } + } + Assert.ok( + has_arbitrary_name_in_stack(), + "A JS frame was found before the test timeout." + ); + Services.profiler.StopProfiler(); +} diff --git a/tools/profiler/tests/xpcshell/test_enterjit_osr_disabling.js b/tools/profiler/tests/xpcshell/test_enterjit_osr_disabling.js new file mode 100644 index 0000000000..2d4ffe5a10 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_enterjit_osr_disabling.js @@ -0,0 +1,19 @@ +function run_test() { + // Just skip the test if the profiler component isn't present. + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + Assert.ok(!Services.profiler.IsActive()); + + Services.profiler.StartProfiler(100, 10, ["js"]); + // The function is entered with the profiler enabled + (function() { + Services.profiler.StopProfiler(); + let n = 10000; + // eslint-disable-next-line no-empty + while (--n) {} // OSR happens here with the profiler disabled. + // An assertion will fail when this function returns, if the + // profiler stack was misbalanced. + })(); +} diff --git a/tools/profiler/tests/xpcshell/test_enterjit_osr_enabling.js b/tools/profiler/tests/xpcshell/test_enterjit_osr_enabling.js new file mode 100644 index 0000000000..3084a00018 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_enterjit_osr_enabling.js @@ -0,0 +1,18 @@ +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + Assert.ok(!Services.profiler.IsActive()); + + // The function is entered with the profiler disabled. + (function() { + Services.profiler.StartProfiler(100, 10, ["js"]); + let n = 10000; + // eslint-disable-next-line no-empty + while (--n) {} // OSR happens here with the profiler enabled. + // An assertion will fail when this function returns, if the + // profiler stack was misbalanced. + })(); + Services.profiler.StopProfiler(); +} diff --git a/tools/profiler/tests/xpcshell/test_feature_fileioall.js b/tools/profiler/tests/xpcshell/test_feature_fileioall.js new file mode 100644 index 0000000000..97d55988d6 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_feature_fileioall.js @@ -0,0 +1,171 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This is only needed for getting a temp file location, which IOUtils +// cannot currently do. +const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm"); + +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + info( + "Test that off-main thread fileio is captured for a profiled thread, " + + "and that it will be sent to the main thread." + ); + const filename = "test_marker_fileio"; + const profile = await startProfilerAndTriggerFileIO({ + features: ["fileioall"], + threadsFilter: ["GeckoMain", "BackgroundThreadPool"], + filename, + }); + + const threads = getThreads(profile); + const mainThread = threads.find(thread => thread.name === "GeckoMain"); + const mainThreadFileIO = getInflatedFileIOMarkers(mainThread, filename); + let backgroundThread; + let backgroundThreadFileIO; + for (const thread of threads) { + // Check for FileIO in any of the background threads. + if (thread.name.startsWith("BackgroundThreadPool")) { + const markers = getInflatedFileIOMarkers(thread, filename); + if (markers.length > 0) { + backgroundThread = thread; + backgroundThreadFileIO = markers; + break; + } + } + } + + info("Check all of the main thread FileIO markers."); + checkInflatedFileIOMarkers(mainThreadFileIO, filename); + for (const { data, name } of mainThreadFileIO) { + equal( + name, + "FileIO (non-main thread)", + "The markers from off main thread are labeled as such." + ); + equal( + data.threadId, + backgroundThread.tid, + "The main thread FileIO markers were all sent from the background thread." + ); + } + + info("Check all of the background thread FileIO markers."); + checkInflatedFileIOMarkers(backgroundThreadFileIO, filename); + for (const { data, name } of backgroundThreadFileIO) { + equal( + name, + "FileIO", + "The markers on the thread where they were generated just say FileIO" + ); + equal( + data.threadId, + undefined, + "The background thread FileIO correctly excludes the threadId." + ); + } +}); + +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + info( + "Test that off-main thread fileio is captured for a thread that is not profiled, " + + "and that it will be sent to the main thread." + ); + const filename = "test_marker_fileio"; + const profile = await startProfilerAndTriggerFileIO({ + features: ["fileioall"], + threadsFilter: ["GeckoMain"], + filename, + }); + + const threads = getThreads(profile); + const mainThread = threads.find(thread => thread.name === "GeckoMain"); + const mainThreadFileIO = getInflatedFileIOMarkers(mainThread, filename); + + info("Check all of the main thread FileIO markers."); + checkInflatedFileIOMarkers(mainThreadFileIO, filename); + for (const { data, name } of mainThreadFileIO) { + equal( + name, + "FileIO (non-profiled thread)", + "The markers from off main thread are labeled as such." + ); + equal(typeof data.threadId, "number", "A thread ID is captured."); + } +}); + +/** + * @typedef {Object} TestConfig + * @prop {Array} features The list of profiler features + * @prop {string[]} threadsFilter The list of threads to profile + * @prop {string} filename A filename to trigger a write operation + */ + +/** + * Start the profiler and get FileIO markers. + * @param {TestConfig} + * @returns {Profile} + */ +async function startProfilerAndTriggerFileIO({ + features, + threadsFilter, + filename, +}) { + const entries = 10000; + const interval = 10; + Services.profiler.StartProfiler(entries, interval, features, threadsFilter); + + const tmpDir = OS.Constants.Path.tmpDir; + const path = OS.Path.join(tmpDir, filename); + + info(`Using a temporary file to test FileIO: ${path}`); + + if (fileExists(path)) { + console.warn( + "This test is triggering FileIO by writing to a file. However, the test found an " + + "existing file at the location it was trying to write to. This could happen " + + "because a previous run of the test failed to clean up after itself. This test " + + " will now clean up that file before running the test again." + ); + await removeFile(path); + } + + info("Write to the file, but do so using a background thread."); + + // IOUtils handles file operations using a background thread. + await IOUtils.write(path, new TextEncoder().encode("Test data.")); + const exists = await fileExists(path); + ok(exists, `Created temporary file at: ${path}`); + + info("Remove the file"); + await removeFile(path); + + // Pause the profiler as we don't need to collect more samples as we retrieve + // and serialize the profile. + Services.profiler.Pause(); + + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + return profile; +} + +async function fileExists(file) { + try { + let { type } = await IOUtils.stat(file); + return type === "regular"; + } catch (_error) { + return false; + } +} + +async function removeFile(file) { + await IOUtils.remove(file); + const exists = await fileExists(file); + ok(!exists, `Removed temporary file: ${file}`); +} diff --git a/tools/profiler/tests/xpcshell/test_feature_js.js b/tools/profiler/tests/xpcshell/test_feature_js.js new file mode 100644 index 0000000000..a2c328c051 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_feature_js.js @@ -0,0 +1,63 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Test that JS capturing works as expected. + */ +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + const entries = 10000; + const interval = 1; + const threads = []; + const features = ["js"]; + + Services.profiler.StartProfiler(entries, interval, features, threads); + + // Call the following to get a nice stack in the profiler: + // functionA -> functionB -> functionC -> captureAtLeastOneJsSample + const sampleIndex = await functionA(); + + const profile = await Services.profiler.getProfileDataAsync(); + const [thread] = profile.threads; + const { samples } = thread; + + const inflatedStackFrames = getInflatedStackLocations( + thread, + samples.data[sampleIndex] + ); + + expectStackToContain( + inflatedStackFrames, + [ + "(root)", + "js::RunScript", + // The following regexes match a string similar to: + // + // "functionA (/gecko/obj/_tests/xpcshell/tools/profiler/tests/xpcshell/test_feature_js.js:47:0)" + // + // this matches the script location + // | match the line number + // | | match the column number + // v v v + /^functionA \(.+test_feature_js\.js:\d+:\d+\)$/, + /^functionB \(.+test_feature_js\.js:\d+:\d+\)$/, + /^functionC \(.+test_feature_js\.js:\d+:\d+\)$/, + ], + "The stack contains a few frame labels, as well as the JS functions that we called." + ); +}); + +function functionA() { + return functionB(); +} + +function functionB() { + return functionC(); +} + +async function functionC() { + return captureAtLeastOneJsSample(); +} diff --git a/tools/profiler/tests/xpcshell/test_feature_mainthreadio.js b/tools/profiler/tests/xpcshell/test_feature_mainthreadio.js new file mode 100644 index 0000000000..d3fed91f83 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_feature_mainthreadio.js @@ -0,0 +1,125 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const { FileUtils } = ChromeUtils.import( + "resource://gre/modules/FileUtils.jsm" +); + +/** + * Test that the IOInterposer is working correctly to capture main thread IO. + * + * This test should not run on release or beta, as the IOInterposer is wrapped in + * an ifdef. + */ +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + { + const filename = "profiler-mainthreadio-test-firstrun"; + const { markers, schema } = await runProfilerWithFileIO( + ["mainthreadio"], + filename + ); + info("Check the FileIO markers when using the mainthreadio feature"); + checkInflatedFileIOMarkers(markers, filename); + + checkSchema(schema, { + name: "FileIO", + display: ["marker-chart", "marker-table", "timeline-fileio"], + data: [ + { + key: "operation", + label: "Operation", + format: "string", + searchable: true, + }, + { key: "source", label: "Source", format: "string", searchable: true }, + { + key: "filename", + label: "Filename", + format: "file-path", + searchable: true, + }, + ], + }); + } + + { + const filename = "profiler-mainthreadio-test-no-instrumentation"; + const { markers } = await runProfilerWithFileIO([], filename); + equal( + markers.length, + 0, + "No FileIO markers are found when the mainthreadio feature is not turned on " + + "in the profiler." + ); + } + + { + const filename = "profiler-mainthreadio-test-secondrun"; + const { markers } = await runProfilerWithFileIO(["mainthreadio"], filename); + info("Check the FileIO markers when re-starting the mainthreadio feature"); + checkInflatedFileIOMarkers(markers, filename); + } +}); + +/** + * Start the profiler and get FileIO markers and schema. + * + * @param {Array} features The list of profiler features + * @param {string} filename A filename to trigger a write operation + * @returns {{ + * markers: InflatedMarkers[]; + * schema: MarkerSchema; + * }} + */ +async function runProfilerWithFileIO(features, filename) { + const entries = 10000; + const interval = 10; + const threads = []; + Services.profiler.StartProfiler(entries, interval, features, threads); + + info("Get the file"); + const file = FileUtils.getFile("TmpD", [filename]); + if (file.exists()) { + console.warn( + "This test is triggering FileIO by writing to a file. However, the test found an " + + "existing file at the location it was trying to write to. This could happen " + + "because a previous run of the test failed to clean up after itself. This test " + + " will now clean up that file before running the test again." + ); + file.remove(false); + } + + info( + "Generate file IO on the main thread using FileUtils.openSafeFileOutputStream." + ); + const outputStream = FileUtils.openSafeFileOutputStream(file); + + const data = "Test data."; + info("Write to the file"); + outputStream.write(data, data.length); + + info("Close the file"); + FileUtils.closeSafeFileOutputStream(outputStream); + + info("Remove the file"); + file.remove(false); + + // Pause the profiler as we don't need to collect more samples as we retrieve + // and serialize the profile. + Services.profiler.Pause(); + + const profile = await Services.profiler.getProfileDataAsync(); + Services.profiler.StopProfiler(); + const mainThread = profile.threads.find(({ name }) => name === "GeckoMain"); + + const schema = getSchema(profile, "FileIO"); + + const markers = getInflatedFileIOMarkers(mainThread, filename); + + return { schema, markers }; +} diff --git a/tools/profiler/tests/xpcshell/test_feature_nativeallocations.js b/tools/profiler/tests/xpcshell/test_feature_nativeallocations.js new file mode 100644 index 0000000000..9893315417 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_feature_nativeallocations.js @@ -0,0 +1,164 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + if (!Services.profiler.GetFeatures().includes("nativeallocations")) { + Assert.ok( + true, + "Native allocations are not supported by this build, " + + "skip run the rest of the test." + ); + return; + } + + Assert.ok( + !Services.profiler.IsActive(), + "The profiler is not currently active" + ); + + info( + "Test that the profiler can install memory hooks and collect native allocation " + + "information in the marker payloads." + ); + { + info("Start the profiler."); + startProfiler({ + // Only instrument the main thread. + threads: ["GeckoMain"], + features: ["threads", "leaf", "nativeallocations"], + }); + + info( + "Do some JS work for a little bit. This will increase the amount of allocations " + + "that take place." + ); + doWork(); + + info("Get the profile data and analyze it."); + const profile = await stopAndGetProfile(); + + const { + allocationPayloads, + unmatchedAllocations, + logAllocationsAndDeallocations, + } = getAllocationInformation(profile); + + Assert.greater( + allocationPayloads.length, + 0, + "Native allocation payloads were recorded for the parent process' main thread when " + + "the Native Allocation feature was turned on." + ); + + if (unmatchedAllocations.length !== 0) { + info( + "There were unmatched allocations. Log all of the allocations and " + + "deallocations in order to aid debugging." + ); + logAllocationsAndDeallocations(); + ok( + false, + "Found a deallocation that did not have a matching allocation site. " + + "This could happen if balanced allocations is broken, or if the the " + + "buffer size of this test was too small, and some markers ended up " + + "rolling off." + ); + } + + ok(true, "All deallocation sites had matching allocations."); + } + + info("Restart the profiler, to ensure that we get no more allocations."); + { + startProfiler({ features: ["threads", "leaf"] }); + info("Do some work again."); + doWork(); + info("Wait for the periodic sampling."); + await Services.profiler.waitOnePeriodicSampling(); + + const profile = await stopAndGetProfile(); + const allocationPayloads = getPayloadsOfType( + profile.threads[0], + "Native allocation" + ); + + Assert.equal( + allocationPayloads.length, + 0, + "No native allocations were collected when the feature was disabled." + ); + } +}); + +function doWork() { + this.n = 0; + for (let i = 0; i < 1e5; i++) { + this.n += Math.random(); + } +} + +/** + * Extract the allocation payloads, and find the unmatched allocations. + */ +function getAllocationInformation(profile) { + // Get all of the allocation payloads. + const allocationPayloads = getPayloadsOfType( + profile.threads[0], + "Native allocation" + ); + + // Decide what is an allocation and deallocation. + const allocations = allocationPayloads.filter( + payload => ensureIsNumber(payload.size) >= 0 + ); + const deallocations = allocationPayloads.filter( + payload => ensureIsNumber(payload.size) < 0 + ); + + // Now determine the unmatched allocations by building a set + const allocationSites = new Set( + allocations.map(({ memoryAddress }) => memoryAddress) + ); + + const unmatchedAllocations = deallocations.filter( + ({ memoryAddress }) => !allocationSites.has(memoryAddress) + ); + + // Provide a helper to log out the allocations and deallocations on failure. + function logAllocationsAndDeallocations() { + for (const { memoryAddress } of allocations) { + console.log("Allocations", formatHex(memoryAddress)); + allocationSites.add(memoryAddress); + } + + for (const { memoryAddress } of deallocations) { + console.log("Deallocations", formatHex(memoryAddress)); + } + + for (const { memoryAddress } of unmatchedAllocations) { + console.log("Deallocation with no allocation", formatHex(memoryAddress)); + } + } + + return { + allocationPayloads, + unmatchedAllocations, + logAllocationsAndDeallocations, + }; +} + +function ensureIsNumber(value) { + if (typeof value !== "number") { + throw new Error(`Expected a number: ${value}`); + } + return value; +} + +function formatHex(number) { + return `0x${number.toString(16)}`; +} diff --git a/tools/profiler/tests/xpcshell/test_feature_stackwalking.js b/tools/profiler/tests/xpcshell/test_feature_stackwalking.js new file mode 100644 index 0000000000..628f46f417 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_feature_stackwalking.js @@ -0,0 +1,51 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Do a basic test to see if native frames are being collected for stackwalking. This + * test is fairly naive, as it does not attempt to check that these are valid symbols, + * only that some kind of stack walking is happening. It does this by making sure at + * least two native frames are collected. + */ +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + const entries = 10000; + const interval = 1; + const threads = []; + const features = ["stackwalk"]; + + Services.profiler.StartProfiler(entries, interval, features, threads); + const sampleIndex = await captureAtLeastOneJsSample(); + + const profile = await Services.profiler.getProfileDataAsync(); + const [thread] = profile.threads; + const { samples } = thread; + + const inflatedStackFrames = getInflatedStackLocations( + thread, + samples.data[sampleIndex] + ); + const nativeStack = /^0x[0-9a-f]+$/; + + expectStackToContain( + inflatedStackFrames, + [ + "(root)", + // There are probably more native stacks here. + nativeStack, + nativeStack, + // Since this is an xpcshell test we know that JavaScript will run: + "js::RunScript", + // There are probably more native stacks here. + nativeStack, + nativeStack, + ], + "Expected native stacks to be interleaved between some frame labels. There should" + + "be more than one native stack if stack walking is working correctly. There " + + "is no attempt here to determine if the memory addresses point to the correct " + + "symbols" + ); +}); diff --git a/tools/profiler/tests/xpcshell/test_get_features.js b/tools/profiler/tests/xpcshell/test_get_features.js new file mode 100644 index 0000000000..23aee089a3 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_get_features.js @@ -0,0 +1,12 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + var profilerFeatures = Services.profiler.GetFeatures(); + Assert.ok(profilerFeatures != null); +} diff --git a/tools/profiler/tests/xpcshell/test_merged_stacks.js b/tools/profiler/tests/xpcshell/test_merged_stacks.js new file mode 100644 index 0000000000..526807d80b --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_merged_stacks.js @@ -0,0 +1,75 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Test that we correctly merge the three stack types, JS, native, and frame labels. + */ +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + const entries = 10000; + const interval = 1; + const threads = []; + const features = ["js", "stackwalk"]; + + Services.profiler.StartProfiler(entries, interval, features, threads); + + // Call the following to get a nice stack in the profiler: + // functionA -> functionB -> functionC + const sampleIndex = await functionA(); + + const profile = await Services.profiler.getProfileDataAsync(); + const [thread] = profile.threads; + const { samples } = thread; + + const inflatedStackFrames = getInflatedStackLocations( + thread, + samples.data[sampleIndex] + ); + + const nativeStack = /^0x[0-9a-f]+$/; + + expectStackToContain( + inflatedStackFrames, + [ + "(root)", + nativeStack, + nativeStack, + // There are more native stacks and frame labels here, but we know some execute + // and then the "js::RunScript" frame label runs. + "js::RunScript", + nativeStack, + nativeStack, + // The following regexes match a string similar to: + // + // "functionA (/gecko/obj/_tests/xpcshell/tools/profiler/tests/xpcshell/test_merged_stacks.js:47:0)" + // + // this matches the script location + // | match the line number + // | | match the column number + // v v v + /^functionA \(.+test_merged_stacks\.js:\d+:\d+\)$/, + /^functionB \(.+test_merged_stacks\.js:\d+:\d+\)$/, + /^functionC \(.+test_merged_stacks\.js:\d+:\d+\)$/, + // After the JS frames, then there are a bunch of arbitrary native stack frames + // that run. + nativeStack, + nativeStack, + ], + "The stack contains a few frame labels, as well as the JS functions that we called." + ); +}); + +function functionA() { + return functionB(); +} + +function functionB() { + return functionC(); +} + +async function functionC() { + return captureAtLeastOneJsSample(); +} diff --git a/tools/profiler/tests/xpcshell/test_pause.js b/tools/profiler/tests/xpcshell/test_pause.js new file mode 100644 index 0000000000..394a805715 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_pause.js @@ -0,0 +1,82 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + Assert.ok(!Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + + Services.profiler.StartProfiler(1000, 10, []); + + // Default: Active and not paused. + Assert.ok(Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(!Services.profiler.IsSamplingPaused()); + + // Pause everything, implicitly pauses sampling. + Services.profiler.Pause(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + // While fully paused, pause and resume sampling only, no expected changes. + Services.profiler.PauseSampling(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + Services.profiler.ResumeSampling(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + // Resume everything. + Services.profiler.Resume(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(!Services.profiler.IsSamplingPaused()); + + // Pause sampling only. + Services.profiler.PauseSampling(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + // While sampling is paused, pause everything. + Services.profiler.Pause(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + // Resume, but sampling is still paused separately. + Services.profiler.Resume(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(Services.profiler.IsSamplingPaused()); + + // Resume sampling only. + Services.profiler.ResumeSampling(); + + Assert.ok(Services.profiler.IsActive()); + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(!Services.profiler.IsSamplingPaused()); + + Services.profiler.StopProfiler(); + Assert.ok(!Services.profiler.IsActive()); + // Stopping is not pausing. + Assert.ok(!Services.profiler.IsPaused()); + Assert.ok(!Services.profiler.IsSamplingPaused()); + + do_test_finished(); +} diff --git a/tools/profiler/tests/xpcshell/test_responsiveness.js b/tools/profiler/tests/xpcshell/test_responsiveness.js new file mode 100644 index 0000000000..48d2eccff9 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_responsiveness.js @@ -0,0 +1,53 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Test that we can measure non-zero event delays + */ + +add_task(async () => { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + const entries = 10000; + const interval = 1; + const threads = []; + const features = []; + + Services.profiler.StartProfiler(entries, interval, features, threads); + + await functionA(); + + const profile = await Services.profiler.getProfileDataAsync(); + const [thread] = profile.threads; + const { samples } = thread; + const message = "eventDelay > 0 not found."; + let SAMPLE_STACK_SLOT = thread.samples.schema.eventDelay; + + for (let i = 0; i < samples.data.length; i++) { + if (samples.data[i][SAMPLE_STACK_SLOT] > 0) { + Assert.ok(true, message); + return; + } + } + Assert.ok(false, message); +}); + +function doSyncWork(milliseconds) { + const start = Date.now(); + while (true) { + this.n = 0; + for (let i = 0; i < 1e5; i++) { + this.n += Math.random(); + } + if (Date.now() - start > milliseconds) { + return; + } + } +} + +function functionA() { + doSyncWork(100); + captureAtLeastOneJsSample(); +} diff --git a/tools/profiler/tests/xpcshell/test_run.js b/tools/profiler/tests/xpcshell/test_run.js new file mode 100644 index 0000000000..607c828677 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_run.js @@ -0,0 +1,41 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + Assert.ok(!Services.profiler.IsActive()); + + Services.profiler.StartProfiler(1000, 10, []); + + Assert.ok(Services.profiler.IsActive()); + + do_test_pending(); + + do_timeout(1000, function wait() { + // Check text profile format + var profileStr = Services.profiler.GetProfile(); + Assert.ok(profileStr.length > 10); + + // check json profile format + var profileObj = Services.profiler.getProfileData(); + Assert.notEqual(profileObj, null); + Assert.notEqual(profileObj.threads, null); + // We capture memory counters by default only when jemalloc is turned + // on (and it isn't for ASAN), so unless we can conditionalize for ASAN + // here we can't check that we're capturing memory counter data. + Assert.notEqual(profileObj.counters, null); + Assert.notEqual(profileObj.memory, null); + Assert.ok(profileObj.threads.length >= 1); + Assert.notEqual(profileObj.threads[0].samples, null); + // NOTE: The number of samples will be empty since we + // don't have any labels in the xpcshell code + + Services.profiler.StopProfiler(); + Assert.ok(!Services.profiler.IsActive()); + do_test_finished(); + }); +} diff --git a/tools/profiler/tests/xpcshell/test_shared_library.js b/tools/profiler/tests/xpcshell/test_shared_library.js new file mode 100644 index 0000000000..805047c363 --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_shared_library.js @@ -0,0 +1,25 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + var libs = Services.profiler.sharedLibraries; + + Assert.equal(typeof libs, "object"); + Assert.ok(Array.isArray(libs)); + Assert.equal(typeof libs, "object"); + Assert.ok(libs.length >= 1); + Assert.equal(typeof libs[0], "object"); + Assert.equal(typeof libs[0].name, "string"); + Assert.equal(typeof libs[0].path, "string"); + Assert.equal(typeof libs[0].debugName, "string"); + Assert.equal(typeof libs[0].debugPath, "string"); + Assert.equal(typeof libs[0].arch, "string"); + Assert.equal(typeof libs[0].start, "number"); + Assert.equal(typeof libs[0].end, "number"); + Assert.ok(libs[0].start <= libs[0].end); +} diff --git a/tools/profiler/tests/xpcshell/test_start.js b/tools/profiler/tests/xpcshell/test_start.js new file mode 100644 index 0000000000..c220ba22eb --- /dev/null +++ b/tools/profiler/tests/xpcshell/test_start.js @@ -0,0 +1,19 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function run_test() { + if (!AppConstants.MOZ_GECKO_PROFILER) { + return; + } + + Assert.ok(!Services.profiler.IsActive()); + + Services.profiler.StartProfiler(10, 100, []); + + Assert.ok(Services.profiler.IsActive()); + + Services.profiler.StopProfiler(); + + Assert.ok(!Services.profiler.IsActive()); +} diff --git a/tools/profiler/tests/xpcshell/xpcshell.ini b/tools/profiler/tests/xpcshell/xpcshell.ini new file mode 100644 index 0000000000..02b3f57640 --- /dev/null +++ b/tools/profiler/tests/xpcshell/xpcshell.ini @@ -0,0 +1,53 @@ +[DEFAULT] +head = head.js +support-files = + ../shared-head.js +skip-if = toolkit == 'android' + +[test_active_configuration.js] +[test_addProfilerMarker.js] +[test_start.js] +skip-if = true +[test_get_features.js] +[test_responsiveness.js] +skip-if = tsan # Times out on TSan, bug 1612707 +[test_shared_library.js] +[test_run.js] +skip-if = true +[test_pause.js] +[test_enterjit_osr.js] +skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1536652 +[test_enterjit_osr_disabling.js] +skip-if = !debug +[test_enterjit_osr_enabling.js] +skip-if = !debug +[test_asm.js] +[test_feature_mainthreadio.js] +skip-if = release_or_beta || (os == "win" && processor == "aarch64") # The IOInterposer is in an ifdef, aarch64 due to 1536657 +[test_feature_fileioall.js] +skip-if = release_or_beta || (os == "win" && processor == "aarch64") # The IOInterposer is in an ifdef, aarch64 due to 1536657 + +# The sanitizer checks appears to overwrite our own memory hooks in xpcshell tests, +# and no allocation markers are gathered. Skip this test in that configuration. +[test_feature_nativeallocations.js] +skip-if = asan || tsan + +# Native stackwalking is somewhat unreliable depending on the platform. +# +# We don't have frame pointers on macOS release and beta, so stack walking does not +# work. See Bug 1571216 for more details. +# +# Linux can be very unreliable when native stackwalking through JavaScript code. +# See Bug 1434402 for more details. +# +# For sanitizer builds, there were many intermittents, and we're not getting much +# additional coverage there, so it's better to be a bit more reliable. +[test_feature_stackwalking.js] +skip-if = (os == "mac" && release_or_beta) || (os == "linux" && release_or_beta && !debug) || asan || tsan + +[test_feature_js.js] +skip-if = tsan # Times out on TSan, bug 1612707 + +# See the comment on test_feature_stackwalking.js +[test_merged_stacks.js] +skip-if = (os == "mac" && release_or_beta) || (os == "linux" && release_or_beta && !debug) || asan || tsan |