summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/user-timing/measure.html
diff options
context:
space:
mode:
Diffstat (limited to 'testing/web-platform/tests/user-timing/measure.html')
-rw-r--r--testing/web-platform/tests/user-timing/measure.html362
1 files changed, 362 insertions, 0 deletions
diff --git a/testing/web-platform/tests/user-timing/measure.html b/testing/web-platform/tests/user-timing/measure.html
new file mode 100644
index 0000000000..40f71a3362
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure.html
@@ -0,0 +1,362 @@
+
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>window.performance User Timing measure() method is working properly</title>
+ <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
+ <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/common/performance-timeline-utils.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+
+ <script>
+ // test data
+ var startMarkName = "mark_start";
+ var startMarkValue;
+ var endMarkName = "mark_end";
+ var endMarkValue;
+ var measures;
+ var testThreshold = 20;
+
+ // test measures
+ var measureTestDelay = 200;
+ var TEST_MEASURES =
+ [
+ {
+ name: "measure_no_start_no_end",
+ startMark: undefined,
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_start_no_end",
+ startMark: "mark_start",
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_start_end",
+ startMark: "mark_start",
+ endMark: "mark_end",
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_no_start_end",
+ startMark: undefined,
+ endMark: "mark_end",
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ // intentional duplicate of the first measure, used to confirm names can be re-used
+ {
+ name: "measure_no_start_no_end",
+ startMark: undefined,
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ }
+ ];
+ // the index of the duplicate "measure_no_start_no_end"
+ const duplicate_index = TEST_MEASURES.map(m=>m.name).lastIndexOf('measure_no_start_no_end');
+
+ setup({explicit_done: true});
+
+ test_namespace();
+
+ function onload_test()
+ {
+ // test for existence of User Timing and Performance Timeline interface
+ if (!has_required_interfaces())
+ {
+ test_true(false,
+ "The User Timing and Performance Timeline interfaces, which are required for this test, " +
+ "are defined.");
+
+ done();
+ }
+ else
+ {
+ // create the start mark for the test measures
+ window.performance.mark(startMarkName);
+
+ // get the start mark's value
+ startMarkValue = window.performance.getEntriesByName(startMarkName)[0].startTime;
+
+ // create the test end mark using the test delay; this will allow for a significant difference between
+ // the mark values that should be represented in the duration of measures using these marks
+ step_timeout(measure_test_cb, measureTestDelay);
+ }
+ }
+
+ function measure_test_cb()
+ {
+ // create the end mark for the test measures
+ window.performance.mark(endMarkName);
+
+ // get the end mark's value
+ endMarkValue = window.performance.getEntriesByName(endMarkName)[0].startTime;
+
+ // loop through all measure scenarios and create the corresponding measures
+ for (var i in TEST_MEASURES)
+ {
+ var scenario = TEST_MEASURES[i];
+
+ if (scenario.startMark == undefined && scenario.endMark == undefined)
+ {
+ // both startMark and endMark are undefined, don't provide either parameters
+ window.performance.measure(scenario.name);
+
+ // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding
+ // to the navigationStart attribute with a timebase of the same attribute is used; this is
+ // equivalent to 0
+ scenario.startTime = 0;
+
+ // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to
+ // the current time with a timebase of the navigationStart attribute is used
+ scenario.duration = (new Date()) - window.performance.timing.navigationStart;
+ }
+ else if (scenario.startMark != undefined && scenario.endMark == undefined)
+ {
+ // only startMark is defined, provide startMark and don't provide endMark
+ window.performance.measure(scenario.name, scenario.startMark);
+
+ // when startMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the startMark
+ scenario.startTime = startMarkValue;
+
+ // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to
+ // the current time with a timebase of the navigationStart attribute is used
+ scenario.duration = window.performance.now() -
+ startMarkValue;
+ }
+ else if (scenario.startMark != undefined && scenario.endMark != undefined)
+ {
+ // both startMark and endMark are defined, provide both parameters
+ window.performance.measure(scenario.name, scenario.startMark, scenario.endMark);
+
+ // when startMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the startMark
+ scenario.startTime = startMarkValue;
+
+ // when endMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the endMark
+ scenario.duration = endMarkValue - startMarkValue;
+ }
+ else if (scenario.startMark == undefined && scenario.endMark != undefined)
+ {
+ // endMark is defined but startMark is undefined, provide both parameters
+ window.performance.measure(scenario.name, scenario.startMark, scenario.endMark);
+
+ // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding
+ // to the navigationStart attribute with a timebase of the same attribute is used; this is
+ // equivalent to 0
+ scenario.startTime = 0;
+
+ // when endMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the endMark
+ scenario.duration = endMarkValue;
+ } else
+ {
+ test_true(false, 'Test measure scenario unhandled');
+ }
+ }
+
+ // test that expected measures are returned by getEntriesByName
+ for (var i in TEST_MEASURES)
+ {
+ entries = window.performance.getEntriesByName(TEST_MEASURES[i].name);
+ // for all test measures, the test will be validate the test measure against the first entry returned
+ // by getEntriesByName(), except for the last measure, where since it is a duplicate measure, the test
+ // will validate it against the second entry returned by getEntriesByName()
+ test_measure(entries[(i == duplicate_index ? 1 : 0)],
+ "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\")[" +
+ (i == duplicate_index ? 1 : 0) + "]",
+ TEST_MEASURES[i].name,
+ TEST_MEASURES[i].startTime,
+ TEST_MEASURES[i].duration);
+ TEST_MEASURES[i].entryMatch = entries[(i == duplicate_index ? 1 : 0)];
+ }
+
+ // test that expected measures are returned by getEntriesByName with the entryType parameter provided
+ for (var i in TEST_MEASURES)
+ {
+ entries = window.performance.getEntriesByName(TEST_MEASURES[i].name, "measure");
+
+ test_true(match_entries(entries[(i == duplicate_index ? 1 : 0)], TEST_MEASURES[i].entryMatch),
+ "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\", \"measure\")[" +
+ (i == duplicate_index ? 1 : 0) + "] returns an object containing the \"" + TEST_MEASURES[i].name +
+ "\" measure in the correct order, and its value matches the \"" + TEST_MEASURES[i].name +
+ "\" measure returned by window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name +
+ "\")");
+ }
+
+ // test that expected measures are returned by getEntries
+ entries = get_test_entries(window.performance.getEntries(), "measure");
+
+ test_measure_list(entries, "window.performance.getEntries()", TEST_MEASURES);
+
+ // test that expected measures are returned by getEntriesByType
+ entries = window.performance.getEntriesByType("measure");
+
+ test_measure_list(entries, "window.performance.getEntriesByType(\"measure\")", TEST_MEASURES);
+
+ done();
+ }
+
+ function match_entries(entry1, entry2, threshold)
+ {
+ if (threshold == undefined)
+ {
+ threshold = 0;
+ }
+
+ var pass = true;
+
+ // match name
+ pass = pass && (entry1.name == entry2.name);
+
+ // match startTime
+ pass = pass && (Math.abs(entry1.startTime - entry2.startTime) <= testThreshold);
+
+ // match entryType
+ pass = pass && (entry1.entryType == entry2.entryType);
+
+ // match duration
+ pass = pass && (Math.abs(entry1.duration - entry2.duration) <= testThreshold);
+
+ return pass;
+ }
+
+ function test_measure(measureEntry, measureEntryCommand, expectedName, expectedStartTime, expectedDuration)
+ {
+ // test name
+ test_true(measureEntry.name == expectedName, measureEntryCommand + ".name == \"" + expectedName + "\"");
+
+ // test startTime; since for a mark, the startTime is always equal to a mark's value or the value of a
+ // navigation timing attribute, the actual startTime should match the expected value exactly
+ test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0,
+ measureEntryCommand + ".startTime is correct");
+
+ // test entryType
+ test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\"");
+
+ // test duration, allow for an acceptable threshold in the difference between the actual duration and the
+ // expected value for the duration
+ test_true(Math.abs(measureEntry.duration - expectedDuration) <= testThreshold, measureEntryCommand +
+ ".duration is approximately correct (up to " + testThreshold + "ms difference allowed)");
+ }
+
+ function test_measure_list(measureEntryList, measureEntryListCommand, measureScenarios)
+ {
+ // give all entries a "found" property that can be set to ensure it isn't tested twice
+ for (var i in measureEntryList)
+ {
+ measureEntryList[i].found = false;
+ }
+
+ for (var i in measureScenarios)
+ {
+ measureScenarios[i].found = false;
+
+ for (var j in measureEntryList)
+ {
+ if (match_entries(measureEntryList[j], measureScenarios[i]) && !measureEntryList[j].found)
+ {
+ test_true(match_entries(measureEntryList[j], measureScenarios[i].entryMatch),
+ measureEntryListCommand + " returns an object containing the \"" +
+ measureScenarios[i].name + "\" measure, and it's value matches the measure " +
+ "returned by window.performance.getEntriesByName(\"" + measureScenarios[i].name +
+ "\")[" + (i == duplicate_index ? 1 : 0) + "].");
+
+ measureEntryList[j].found = true;
+ measureScenarios[i].found = true;
+ break;
+ }
+ }
+
+ if (!measureScenarios[i].found)
+ {
+ test_true(false,
+ measureEntryListCommand + " returns an object containing the \"" +
+ measureScenarios[i].name + "\" measure.");
+ }
+ }
+
+ // verify order of output of getEntriesByType
+ var startTimeCurr = 0;
+ var pass = true;
+ for (var i in measureEntryList)
+ {
+ if (measureEntryList[i].startTime < startTimeCurr)
+ {
+ pass = false;
+ }
+ startTimeCurr = measureEntryList[i].startTime;
+ }
+ test_true(pass,
+ measureEntryListCommand + " returns an object containing all test " +
+ "measures in order.");
+ }
+
+ function get_test_entries(entryList, entryType)
+ {
+ var testEntries = new Array();
+
+ // filter entryList
+ for (var i in entryList)
+ {
+ if (entryList[i].entryType == entryType)
+ {
+ testEntries.push(entryList[i]);
+ }
+ }
+
+ return testEntries;
+ }
+ </script>
+ </head>
+ <body onload="onload_test();">
+ <h1>Description</h1>
+ <p>This test validates that the performance.measure() method is working properly. This test creates the
+ following measures to test this method:
+ <ul>
+ <li>"measure_no_start_no_end": created using a measure() call without a startMark or endMark
+ provided</li>
+ <li>"measure_start_no_end": created using a measure() call with only the startMark provided</li>
+ <li>"measure_start_end": created using a measure() call with both a startMark or endMark provided</li>
+ <li>"measure_no_start_end": created using a measure() call with only the endMark provided</li>
+ <li>"measure_no_start_no_end": duplicate of the first measure, used to confirm names can be re-used</li>
+ </ul>
+ After creating each measure, the existence of these measures is validated by calling
+ performance.getEntriesByName() (both with and without the entryType parameter provided),
+ performance.getEntriesByType(), and performance.getEntries()
+ </p>
+
+ <div id="log"></div>
+ </body>
+</html>