summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/user-timing
diff options
context:
space:
mode:
Diffstat (limited to 'testing/web-platform/tests/user-timing')
-rw-r--r--testing/web-platform/tests/user-timing/META.yml4
-rw-r--r--testing/web-platform/tests/user-timing/buffered-flag.any.js27
-rw-r--r--testing/web-platform/tests/user-timing/case-sensitivity.any.js25
-rw-r--r--testing/web-platform/tests/user-timing/clearMarks.html84
-rw-r--r--testing/web-platform/tests/user-timing/clearMeasures.html77
-rw-r--r--testing/web-platform/tests/user-timing/clear_all_marks.any.js17
-rw-r--r--testing/web-platform/tests/user-timing/clear_all_measures.any.js21
-rw-r--r--testing/web-platform/tests/user-timing/clear_non_existent_mark.any.js26
-rw-r--r--testing/web-platform/tests/user-timing/clear_non_existent_measure.any.js29
-rw-r--r--testing/web-platform/tests/user-timing/clear_one_mark.any.js26
-rw-r--r--testing/web-platform/tests/user-timing/clear_one_measure.any.js29
-rw-r--r--testing/web-platform/tests/user-timing/entry_type.any.js13
-rw-r--r--testing/web-platform/tests/user-timing/idlharness-shadowrealm.window.js2
-rw-r--r--testing/web-platform/tests/user-timing/idlharness.any.js33
-rw-r--r--testing/web-platform/tests/user-timing/invoke_with_timing_attributes.html35
-rw-r--r--testing/web-platform/tests/user-timing/invoke_with_timing_attributes.worker.js25
-rw-r--r--testing/web-platform/tests/user-timing/invoke_without_parameter.html26
-rw-r--r--testing/web-platform/tests/user-timing/mark-entry-constructor.any.js40
-rw-r--r--testing/web-platform/tests/user-timing/mark-errors.any.js50
-rw-r--r--testing/web-platform/tests/user-timing/mark-l3.any.js39
-rw-r--r--testing/web-platform/tests/user-timing/mark-measure-feature-detection.html36
-rw-r--r--testing/web-platform/tests/user-timing/mark-measure-return-objects.any.js37
-rw-r--r--testing/web-platform/tests/user-timing/mark.any.js118
-rw-r--r--testing/web-platform/tests/user-timing/mark.html58
-rw-r--r--testing/web-platform/tests/user-timing/mark_exceptions.html41
-rw-r--r--testing/web-platform/tests/user-timing/measure-exceptions.html49
-rw-r--r--testing/web-platform/tests/user-timing/measure-l3.any.js35
-rw-r--r--testing/web-platform/tests/user-timing/measure-with-dict.any.js112
-rw-r--r--testing/web-platform/tests/user-timing/measure.html362
-rw-r--r--testing/web-platform/tests/user-timing/measure_associated_with_navigation_timing.html66
-rw-r--r--testing/web-platform/tests/user-timing/measure_exception.html34
-rw-r--r--testing/web-platform/tests/user-timing/measure_exceptions_navigation_timing.html70
-rw-r--r--testing/web-platform/tests/user-timing/measure_navigation_timing.html205
-rw-r--r--testing/web-platform/tests/user-timing/measure_syntax_err.any.js33
-rw-r--r--testing/web-platform/tests/user-timing/measures.html66
-rw-r--r--testing/web-platform/tests/user-timing/performance-measure-invalid.worker.js16
-rw-r--r--testing/web-platform/tests/user-timing/resources/user-timing-helper.js30
-rw-r--r--testing/web-platform/tests/user-timing/resources/webperftestharness.js124
-rw-r--r--testing/web-platform/tests/user-timing/resources/webperftestharnessextension.js202
-rw-r--r--testing/web-platform/tests/user-timing/structured-serialize-detail.any.js66
-rw-r--r--testing/web-platform/tests/user-timing/supported-usertiming-types.any.js37
-rw-r--r--testing/web-platform/tests/user-timing/user-timing-tojson.html44
-rw-r--r--testing/web-platform/tests/user-timing/user_timing_exists.any.js12
43 files changed, 2481 insertions, 0 deletions
diff --git a/testing/web-platform/tests/user-timing/META.yml b/testing/web-platform/tests/user-timing/META.yml
new file mode 100644
index 0000000000..5cb2a789c0
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/META.yml
@@ -0,0 +1,4 @@
+spec: https://w3c.github.io/user-timing/
+suggested_reviewers:
+ - plehegar
+ - igrigorik
diff --git a/testing/web-platform/tests/user-timing/buffered-flag.any.js b/testing/web-platform/tests/user-timing/buffered-flag.any.js
new file mode 100644
index 0000000000..f938c8522d
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/buffered-flag.any.js
@@ -0,0 +1,27 @@
+async_test(t => {
+ // First observer creates second in callback to ensure the entry has been dispatched by the time
+ // the second observer begins observing.
+ new PerformanceObserver(() => {
+ // Second observer requires 'buffered: true' to see an entry.
+ new PerformanceObserver(t.step_func_done(list => {
+ const entries = list.getEntries();
+ assert_equals(entries.length, 1, 'There should be 1 mark entry.');
+ assert_equals(entries[0].entryType, 'mark');
+ })).observe({type: 'mark', buffered: true});
+ }).observe({entryTypes: ['mark']});
+ performance.mark('foo');
+}, 'PerformanceObserver with buffered flag sees previous marks');
+
+async_test(t => {
+ // First observer creates second in callback to ensure the entry has been dispatched by the time
+ // the second observer begins observing.
+ new PerformanceObserver(() => {
+ // Second observer requires 'buffered: true' to see an entry.
+ new PerformanceObserver(t.step_func_done(list => {
+ const entries = list.getEntries();
+ assert_equals(entries.length, 1, 'There should be 1 measure entry.');
+ assert_equals(entries[0].entryType, 'measure');
+ })).observe({type: 'measure', buffered: true});
+ }).observe({entryTypes: ['measure']});
+ performance.measure('bar');
+}, 'PerformanceObserver with buffered flag sees previous measures');
diff --git a/testing/web-platform/tests/user-timing/case-sensitivity.any.js b/testing/web-platform/tests/user-timing/case-sensitivity.any.js
new file mode 100644
index 0000000000..1c0b0dcac3
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/case-sensitivity.any.js
@@ -0,0 +1,25 @@
+ test(function () {
+ assert_equals(typeof self.performance, "object");
+ assert_equals(typeof self.performance.getEntriesByType, "function");
+
+ self.performance.mark("mark1");
+ self.performance.measure("measure1");
+
+ const type = [
+ 'mark',
+ 'measure',
+ ];
+ type.forEach(function(entryType) {
+ if (PerformanceObserver.supportedEntryTypes.includes(entryType)) {
+ const entryTypeUpperCased = entryType.toUpperCase();
+ const entryTypeCapitalized = entryType[0].toUpperCase() + entryType.substring(1);
+ const lowerList = self.performance.getEntriesByType(entryType);
+ const upperList = self.performance.getEntriesByType(entryTypeUpperCased);
+ const mixedList = self.performance.getEntriesByType(entryTypeCapitalized);
+
+ assert_greater_than(lowerList.length, 0, "Entries exist");
+ assert_equals(upperList.length, 0, "getEntriesByType('" + entryTypeCapitalized + "').length");
+ assert_equals(mixedList.length, 0, "getEntriesByType('" + entryTypeCapitalized + "').length");
+ }
+ });
+ }, "getEntriesByType values are case sensitive");
diff --git a/testing/web-platform/tests/user-timing/clearMarks.html b/testing/web-platform/tests/user-timing/clearMarks.html
new file mode 100644
index 0000000000..92c60a3bbb
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clearMarks.html
@@ -0,0 +1,84 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>functionality test of window.performance.clearMarks</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+<script>
+setup({ explicit_done: true });
+
+function onload_test()
+{
+ const entrylist_checker = new performance_entrylist_checker('mark');
+ const string_mark_names = mark_names.map(function (x) { return String(x)});
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+
+ for (let i = 0; i < mark_names.length; ++i)
+ {
+ performance.clearMarks(mark_names[i]);
+ const retained_entries = performance.getEntriesByType('mark');
+ const non_retained_entries = performance.getEntriesByName(mark_names[i], 'mark');
+ entrylist_checker.entrylist_check(retained_entries, mark_names.length - i - 1, string_mark_names,
+ 'First loop: checking entries after removing "' + mark_names[i] + '". ');
+ test_equals(non_retained_entries.length, 0,
+ 'First loop: marks that we cleared for "' + mark_names[i] + '" should not exist anymore.');
+ }
+
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ performance.clearMarks();
+ test_equals(performance.getEntriesByType('mark').length, 0, 'No marks should exist after we clear all.');
+
+ // Following cases test clear existed mark name that is tied for two times.
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+
+ for (let i = 0; i < mark_names.length; ++i)
+ {
+ performance.clearMarks(mark_names[i]);
+ const retained_entries = performance.getEntriesByType('mark');
+ const non_retained_entries = performance.getEntriesByName(mark_names[i], 'mark');
+ entrylist_checker.entrylist_check(retained_entries, (mark_names.length - i - 1) * 2, string_mark_names,
+ 'Second loop: checking entries after removing "' + mark_names[i] + '". ');
+ test_equals(non_retained_entries.length, 0,
+ 'Second loop: marks that we cleared for "' + mark_names[i] + '" should not exist anymore.');
+ }
+
+ // Following cases test clear functionality when mark names are tied for two times.
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ var entry_number_before_useless_clear = performance.getEntriesByType('Mark').length;
+ performance.clearMarks('NonExist');
+ var entry_number_after_useless_clear = performance.getEntriesByType('Mark').length;
+ test_equals(entry_number_before_useless_clear, entry_number_after_useless_clear, 'Nothing should happen if we clear a non-exist mark.');
+
+ performance.clearMarks();
+ test_equals(performance.getEntriesByType('mark').length, 0, 'No marks should exist when we clear all.');
+
+ done();
+}
+</script>
+</head>
+<body onload=onload_test()>
+ <h1>Description</h1>
+ <p>This test validates functionality of the interface window.performance.clearMarks.</p>
+ <div id="log"></div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/clearMeasures.html b/testing/web-platform/tests/user-timing/clearMeasures.html
new file mode 100644
index 0000000000..54d4100569
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clearMeasures.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>functionality test of window.performance.clearMeasures</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+<script>
+setup({ explicit_done: true });
+
+function onload_test()
+{
+ const context = new PerformanceContext(window.performance);
+ const entrylist_checker = new performance_entrylist_checker('measure');
+ const measure_names = measures.map(function(x) {return x[0];});
+
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures.forEach(context.initialMeasures, context);
+ for (let i = 0; i < measures.length; ++i)
+ {
+ context.clearMeasures(measures[i][0]);
+ const retained_entries = context.getEntriesByType('measure');
+ const non_retained_entries = context.getEntriesByName(measures[i][0], 'measure');
+ entrylist_checker.entrylist_check(retained_entries, measures.length - i - 1, measure_names,
+ 'First loop: checking entries after removing "' + measures[i][0] + '". ');
+ test_equals(non_retained_entries.length, 0,
+ 'First loop: measure "' + measures[i][0] + '" should not exist anymore after we cleared it.');
+ }
+
+ measures.forEach(context.initialMeasures, context);
+ context.clearMeasures();
+ test_equals(context.getEntriesByType('measure').length, 0, 'No measures should exist after we clear all (after first loop).');
+
+ // Following cases test clear existed measure name that is tied twice.
+ measures.forEach(context.initialMeasures, context);
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures.forEach(context.initialMeasures, context);
+ for (let i = 0; i < measures.length; ++i)
+ {
+ context.clearMeasures(measures[i][0]);
+ const retained_entries = context.getEntriesByType('measure');
+ const non_retained_entries = context.getEntriesByName(measures[i][0], 'measure');
+ entrylist_checker.entrylist_check(retained_entries, (measures.length - i - 1) * 2, measure_names,
+ 'Second loop: checking entries after removing "' + measures[i][0] + '". ');
+ test_equals(non_retained_entries.length, 0,
+ 'Second loop: measure "' + measures[i][0] +'" should not exist anymore after we cleared it.');
+ }
+
+ // Following cases test clear functionality when measure names are tied twice.
+ measures.forEach(context.initialMeasures, context);
+ measures.forEach(context.initialMeasures, context);
+ const entry_number_before_useless_clear = context.getEntriesByType('measure').length;
+ context.clearMeasures('NonExist');
+ const entry_number_after_useless_clear = context.getEntriesByType('measure').length;
+ test_equals(entry_number_before_useless_clear, entry_number_after_useless_clear, 'Nothing should happen if we clear a non-exist measure');
+ context.clearMeasures();
+ test_equals(context.getEntriesByType('measure').length, 0, 'No measures should exist when we clear all (after second loop).');
+
+ done();
+}
+</script>
+</head>
+<body onload=onload_test()>
+ <h1>Description</h1>
+ <p>This test validates functionality of the interface window.performance.clearMeasures.</p>
+ <div id="log"></div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/clear_all_marks.any.js b/testing/web-platform/tests/user-timing/clear_all_marks.any.js
new file mode 100644
index 0000000000..35cd2a04f6
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_all_marks.any.js
@@ -0,0 +1,17 @@
+test(function() {
+ self.performance.mark("mark1");
+ self.performance.mark("mark2");
+
+ // test that two marks have been created
+ var entries = self.performance.getEntriesByType("mark");
+ assert_equals(entries.length, 2, "Two marks have been created for this test.");
+
+ // clear all marks
+ self.performance.clearMarks();
+
+ // test that all marks were cleared
+ entries = self.performance.getEntriesByType("mark");
+
+ assert_equals(entries.length, 0, "All marks have been cleared.");
+
+}, "Clearing all marks remove all of them.");
diff --git a/testing/web-platform/tests/user-timing/clear_all_measures.any.js b/testing/web-platform/tests/user-timing/clear_all_measures.any.js
new file mode 100644
index 0000000000..32c993f282
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_all_measures.any.js
@@ -0,0 +1,21 @@
+test(function()
+{
+ self.performance.mark("mark1");
+ self.performance.measure("measure1", "mark1");
+ self.performance.mark("mark2");
+ self.performance.measure("measure2", "mark2");
+
+ // test that two measures have been created
+ var entries = self.performance.getEntriesByType("measure");
+ assert_equals(entries.length, 2, "Two measures have been created for this test.");
+
+ // clear all measures
+ self.performance.clearMeasures();
+
+ // test that all measures were cleared
+ entries = self.performance.getEntriesByType("measure");
+ assert_equals(entries.length, 0,
+ "After a call to self.performance.clearMeasures(), " +
+ "self.performance.getEntriesByType(\"measure\") returns an empty object.");
+
+}, "Clearing all marks remove all of them.");
diff --git a/testing/web-platform/tests/user-timing/clear_non_existent_mark.any.js b/testing/web-platform/tests/user-timing/clear_non_existent_mark.any.js
new file mode 100644
index 0000000000..c7d8b47861
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_non_existent_mark.any.js
@@ -0,0 +1,26 @@
+test(function() {
+ self.performance.mark("mark1");
+ self.performance.mark("mark2");
+
+ // test that two marks have been created
+ var entries = self.performance.getEntriesByType("mark");
+ assert_equals(entries.length, 2, "Two marks have been created for this test.");
+
+ // clear non-existent mark
+ self.performance.clearMarks("mark3");
+
+ // test that "mark1" still exists
+ entries = self.performance.getEntriesByName("mark1");
+ assert_equals(entries[0].name, "mark1",
+ "After a call to self.performance.clearMarks(\"mark3\"), where \"mark3" +
+ "\" is a non-existent mark, self.performance.getEntriesByName(\"mark1\") " +
+ "returns an object containing the \"mark1\" mark.");
+
+ // test that "mark2" still exists
+ entries = self.performance.getEntriesByName("mark2");
+ assert_equals(entries[0].name, "mark2",
+ "After a call to self.performance.clearMarks(\"mark3\"), where \"mark3" +
+ "\" is a non-existent mark, self.performance.getEntriesByName(\"mark2\") " +
+ "returns an object containing the \"mark2\" mark.");
+
+}, "Clearing a non-existent mark doesn't affect existing marks");
diff --git a/testing/web-platform/tests/user-timing/clear_non_existent_measure.any.js b/testing/web-platform/tests/user-timing/clear_non_existent_measure.any.js
new file mode 100644
index 0000000000..9de0b5f266
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_non_existent_measure.any.js
@@ -0,0 +1,29 @@
+test(function()
+{
+ self.performance.mark("mark1");
+ self.performance.measure("measure1", "mark1");
+ self.performance.mark("mark2");
+ self.performance.measure("measure2", "mark2");
+
+ // test that two measures have been created
+ var entries = self.performance.getEntriesByType("measure");
+ assert_equals(entries.length, 2, "Two measures have been created for this test.");
+
+ // clear non-existent measure
+ self.performance.clearMeasures("measure3");
+
+ // test that "measure1" still exists
+ entries = self.performance.getEntriesByName("measure1");
+ assert_equals(entries[0].name, "measure1",
+ "After a call to self.performance.clearMeasures(\"measure3\"), where \"measure3" +
+ "\" is a non-existent measure, self.performance.getEntriesByName(\"measure1\") " +
+ "returns an object containing the \"measure1\" measure.");
+
+ // test that "measure2" still exists
+ entries = self.performance.getEntriesByName("measure2");
+ assert_equals(entries[0].name, "measure2",
+ "After a call to self.performance.clearMeasures(\"measure3\"), where \"measure3" +
+ "\" is a non-existent measure, self.performance.getEntriesByName(\"measure2\") " +
+ "returns an object containing the \"measure2\" measure.");
+
+}, "Clearing a non-existent measure doesn't affect existing measures");
diff --git a/testing/web-platform/tests/user-timing/clear_one_mark.any.js b/testing/web-platform/tests/user-timing/clear_one_mark.any.js
new file mode 100644
index 0000000000..c180199d8c
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_one_mark.any.js
@@ -0,0 +1,26 @@
+test(function() {
+ self.performance.mark("mark1");
+ self.performance.mark("mark2");
+
+ // test that two marks have been created
+ var entries = self.performance.getEntriesByType("mark");
+ assert_equals(entries.length, 2, "Two marks have been created for this test.");
+
+ // clear existent mark
+ self.performance.clearMarks("mark1");
+
+ // test that "mark1" was cleared
+ entries = self.performance.getEntriesByName("mark1");
+
+ assert_equals(entries.length, 0,
+ "After a call to self.performance.clearMarks(\"mark1\"), " +
+ "window.performance.getEntriesByName(\"mark1\") returns an empty object.");
+
+ // test that "mark2" still exists
+ entries = self.performance.getEntriesByName("mark2");
+ assert_equals(entries[0].name, "mark2",
+ "After a call to self.performance.clearMarks(\"mark1\"), " +
+ "window.performance.getEntriesByName(\"mark2\") returns an object containing the " +
+ "\"mark2\" mark.");
+
+}, "Clearing an existent mark doesn't affect other existing marks");
diff --git a/testing/web-platform/tests/user-timing/clear_one_measure.any.js b/testing/web-platform/tests/user-timing/clear_one_measure.any.js
new file mode 100644
index 0000000000..a5e663772c
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/clear_one_measure.any.js
@@ -0,0 +1,29 @@
+test(function()
+{
+ self.performance.mark("mark1");
+ self.performance.measure("measure1", "mark1");
+ self.performance.mark("mark2");
+ self.performance.measure("measure2", "mark2");
+
+ // test that two measures have been created
+ var entries = self.performance.getEntriesByType("measure");
+ assert_equals(entries.length, 2, "Two measures have been created for this test.");
+
+ // clear existent measure
+ self.performance.clearMeasures("measure1");
+
+ // test that "measure1" was cleared
+ entries = self.performance.getEntriesByName("measure1");
+
+ assert_equals(entries.length, 0,
+ "After a call to self.performance.clearMeasures(\"measure1\"), " +
+ "self.performance.getEntriesByName(\"measure1\") returns an empty object.");
+
+ // test that "measure2" still exists
+ entries = self.performance.getEntriesByName("measure2");
+ assert_equals(entries[0].name, "measure2",
+ "After a call to self.performance.clearMeasures(\"measure1\"), " +
+ "self.performance.getEntriesByName(\"measure2\") returns an object containing the " +
+ "\"measure2\" measure.");
+
+}, "Clearing an existent measure doesn't affect other existing measures");
diff --git a/testing/web-platform/tests/user-timing/entry_type.any.js b/testing/web-platform/tests/user-timing/entry_type.any.js
new file mode 100644
index 0000000000..1e37453d09
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/entry_type.any.js
@@ -0,0 +1,13 @@
+test(function () {
+ self.performance.mark('mark');
+ var mark_entry = self.performance.getEntriesByName('mark')[0];
+
+ assert_equals(Object.prototype.toString.call(mark_entry), '[object PerformanceMark]', 'Class name of mark entry should be PerformanceMark.');
+}, "Validate the user timing entry type PerformanceMark");
+
+test(function () {
+ self.performance.measure('measure');
+ var measure_entry = self.performance.getEntriesByName('measure')[0];
+
+ assert_equals(Object.prototype.toString.call(measure_entry), '[object PerformanceMeasure]', 'Class name of measure entry should be PerformanceMeasure.');
+}, "Validate the user timing entry type PerformanceMeasure");
diff --git a/testing/web-platform/tests/user-timing/idlharness-shadowrealm.window.js b/testing/web-platform/tests/user-timing/idlharness-shadowrealm.window.js
new file mode 100644
index 0000000000..340da96f74
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/idlharness-shadowrealm.window.js
@@ -0,0 +1,2 @@
+// META: script=/resources/idlharness-shadowrealm.js
+idl_test_shadowrealm(["user-timing"], ["hr-time", "performance-timeline", "dom"]);
diff --git a/testing/web-platform/tests/user-timing/idlharness.any.js b/testing/web-platform/tests/user-timing/idlharness.any.js
new file mode 100644
index 0000000000..511f2d0455
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/idlharness.any.js
@@ -0,0 +1,33 @@
+// META: global=window,worker
+// META: script=/resources/WebIDLParser.js
+// META: script=/resources/idlharness.js
+// META: timeout=long
+
+// https://w3c.github.io/user-timing/
+
+'use strict';
+
+idl_test(
+ ['user-timing'],
+ ['hr-time', 'performance-timeline', 'dom'],
+ idl_array => {
+ try {
+ performance.mark('test');
+ performance.measure('test');
+ for (const m of performance.getEntriesByType('mark')) {
+ self.mark = m;
+ }
+ for (const m of performance.getEntriesByType('measure')) {
+ self.measure = m;
+ }
+ } catch (e) {
+ // Will be surfaced when mark is undefined below.
+ }
+
+ idl_array.add_objects({
+ Performance: ['performance'],
+ PerformanceMark: ['mark'],
+ PerformanceMeasure: ['measure'],
+ });
+ }
+);
diff --git a/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.html b/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.html
new file mode 100644
index 0000000000..1df94a3006
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>exception test of performance.mark and performance.measure</title>
+ <meta rel="help" href="https://w3c.github.io/user-timing/#extensions-performance-interface"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+ </head>
+ <body>
+ <h1>Description</h1>
+ <p>This test validates exception scenarios of invoking mark() and measure() with timing attributes as value.</p>
+ <div id="log"></div>
+ <script>
+function emit_test(attrName) {
+ test(function() {
+ assert_throws_dom("SyntaxError", function() { window.performance.mark(attrName); });
+ }, "performance.mark should throw if used with timing attribute " + attrName);
+}
+for (var i in timingAttributes) {
+ emit_test(timingAttributes[i]);
+}
+
+function emit_test2(attrName) {
+ test(function() {
+ window.performance.measure(attrName);
+ }, "performance.measure should not throw if used with timing attribute " + attrName);
+}
+for (var i in timingAttributes) {
+ emit_test2(timingAttributes[i]);
+}
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.worker.js b/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.worker.js
new file mode 100644
index 0000000000..32677c64d3
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/invoke_with_timing_attributes.worker.js
@@ -0,0 +1,25 @@
+importScripts("/resources/testharness.js");
+importScripts("resources/webperftestharness.js");
+
+function emit_test(attrName) {
+ test(function() {
+ performance.mark(attrName);
+ performance.clearMarks(attrName);
+ }, "performance.mark should not throw if used with timing attribute " + attrName
+ + " in workers");
+}
+for (var i in timingAttributes) {
+ emit_test(timingAttributes[i]);
+}
+
+function emit_test2(attrName) {
+ test(function() {
+ performance.measure(attrName);
+ performance.clearMeasures(attrName);
+ }, "performance.measure should not throw if used with timing attribute " + attrName
+ + " in workers");
+}
+for (var i in timingAttributes) {
+ emit_test2(timingAttributes[i]);
+}
+done();
diff --git a/testing/web-platform/tests/user-timing/invoke_without_parameter.html b/testing/web-platform/tests/user-timing/invoke_without_parameter.html
new file mode 100644
index 0000000000..114435e59b
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/invoke_without_parameter.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>exception test of performance.mark and performance.measure</title>
+ <link rel="author" title="Intel" href="http://www.intel.com/" />
+ <link rel="help" href="https://w3c.github.io/user-timing/#extensions-performance-interface"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+ </head>
+ <body>
+ <h1>Description</h1>
+ <p>This test validates exception scenarios of invoking mark() and measure() without parameter.</p>
+ <div id="log"></div>
+ <script>
+test(function () {
+ assert_throws_js(TypeError, function () { window.performance.mark() });
+}, "window.performance.mark() throws a TypeError exception when invoke without a parameter.");
+
+test(function () {
+ assert_throws_js(TypeError, function () { window.performance.measure(); });
+}, "window.performance.measure() throws a TypeError exception when invoke without a parameter.");
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/mark-entry-constructor.any.js b/testing/web-platform/tests/user-timing/mark-entry-constructor.any.js
new file mode 100644
index 0000000000..ef9c403dda
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark-entry-constructor.any.js
@@ -0,0 +1,40 @@
+// META: script=resources/user-timing-helper.js
+
+test(()=>{
+ const entry = new PerformanceMark("name");
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark"});
+}, "Mark entry can be created by 'new PerformanceMark(string)'.");
+
+test(()=>{
+ const entry = new PerformanceMark("name", {});
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark"});
+}, "Mark entry can be created by 'new PerformanceMark(string, {})'.");
+
+test(()=>{
+ const entry = new PerformanceMark("name", {startTime: 1});
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark", startTime: 1});
+}, "Mark entry can be created by 'new PerformanceMark(string, {startTime})'.");
+
+test(()=>{
+ const entry = new PerformanceMark("name", {detail: {info: "abc"}});
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark", detail: {info: "abc"}});
+}, "Mark entry can be created by 'new PerformanceMark(string, {detail})'.");
+
+test(()=>{
+ const entry =
+ new PerformanceMark("name", {startTime: 1, detail: {info: "abc"}});
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark", startTime: 1, detail: {info: "abc"}});
+}, "Mark entry can be created by " +
+ "'new PerformanceMark(string, {startTime, detail})'.");
+
+test(()=>{
+ const entry = new PerformanceMark("name");
+ assert_true(entry instanceof PerformanceMark);
+ checkEntry(entry, {name: "name", entryType: "mark"});
+ assert_equals(performance.getEntriesByName("name").length, 0);
+}, "Using new PerformanceMark() shouldn't add the entry to performance timeline.");
diff --git a/testing/web-platform/tests/user-timing/mark-errors.any.js b/testing/web-platform/tests/user-timing/mark-errors.any.js
new file mode 100644
index 0000000000..39bafc045c
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark-errors.any.js
@@ -0,0 +1,50 @@
+// If you're testing an API that constructs a PerformanceMark, add your test here.
+// See the for loop below for details.
+const markConstructionTests = [
+ {
+ testName: "Number should be rejected as the mark-options.",
+ testFunction: function(newMarkFunction) {
+ assert_throws_js(TypeError, function() { newMarkFunction("mark1", 123); }, "Number passed as a dict argument should cause type-error.");
+ },
+ },
+
+ {
+ testName: "NaN should be rejected as the mark-options.",
+ testFunction: function(newMarkFunction) {
+ assert_throws_js(TypeError, function() { newMarkFunction("mark1", NaN); }, "NaN passed as a dict argument should cause type-error.");
+ },
+ },
+
+ {
+ testName: "Infinity should be rejected as the mark-options.",
+ testFunction: function(newMarkFunction) {
+ assert_throws_js(TypeError, function() { newMarkFunction("mark1", Infinity); }, "Infinity passed as a dict argument should cause type-error.");
+ },
+ },
+
+ {
+ testName: "String should be rejected as the mark-options.",
+ testFunction: function(newMarkFunction) {
+ assert_throws_js(TypeError, function() { newMarkFunction("mark1", "string"); }, "String passed as a dict argument should cause type-error.")
+ },
+ },
+
+ {
+ testName: "Negative startTime in mark-options should be rejected",
+ testFunction: function(newMarkFunction) {
+ assert_throws_js(TypeError, function() { newMarkFunction("mark1", {startTime: -1}); }, "Negative startTime should cause type-error.")
+ },
+ },
+];
+
+// There are multiple function calls that can construct a mark using the same arguments so we run
+// each test on each construction method here, avoiding duplication.
+for (let testInfo of markConstructionTests) {
+ test(function() {
+ testInfo.testFunction(self.performance.mark);
+ }, `[performance.mark]: ${testInfo.testName}`);
+
+ test(function() {
+ testInfo.testFunction((markName, obj) => new PerformanceMark(markName, obj));
+ }, `[new PerformanceMark]: ${testInfo.testName}`);
+}
diff --git a/testing/web-platform/tests/user-timing/mark-l3.any.js b/testing/web-platform/tests/user-timing/mark-l3.any.js
new file mode 100644
index 0000000000..407a5c8bba
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark-l3.any.js
@@ -0,0 +1,39 @@
+// META: script=resources/user-timing-helper.js
+
+async_test(function (t) {
+ let mark_entries = [];
+ const expected_entries =
+ [{ entryType: "mark", name: "mark1", detail: null},
+ { entryType: "mark", name: "mark2", detail: null},
+ { entryType: "mark", name: "mark3", detail: null},
+ { entryType: "mark", name: "mark4", detail: null},
+ { entryType: "mark", name: "mark5", detail: null},
+ { entryType: "mark", name: "mark6", detail: {}},
+ { entryType: "mark", name: "mark7", detail: {info: 'abc'}},
+ { entryType: "mark", name: "mark8", detail: null, startTime: 234.56},
+ { entryType: "mark", name: "mark9", detail: {count: 3}, startTime: 345.67}];
+ const observer = new PerformanceObserver(
+ t.step_func(function (entryList, obs) {
+ mark_entries =
+ mark_entries.concat(entryList.getEntries());
+ if (mark_entries.length >= expected_entries.length) {
+ checkEntries(mark_entries, expected_entries);
+ observer.disconnect();
+ t.done();
+ }
+ })
+ );
+ self.performance.clearMarks();
+ observer.observe({entryTypes: ["mark"]});
+ const returned_entries = [];
+ returned_entries.push(self.performance.mark("mark1"));
+ returned_entries.push(self.performance.mark("mark2", undefined));
+ returned_entries.push(self.performance.mark("mark3", null));
+ returned_entries.push(self.performance.mark("mark4", {}));
+ returned_entries.push(self.performance.mark("mark5", {detail: null}));
+ returned_entries.push(self.performance.mark("mark6", {detail: {}}));
+ returned_entries.push(self.performance.mark("mark7", {detail: {info: 'abc'}}));
+ returned_entries.push(self.performance.mark("mark8", {startTime: 234.56}));
+ returned_entries.push(self.performance.mark("mark9", {detail: {count: 3}, startTime: 345.67}));
+ checkEntries(returned_entries, expected_entries);
+}, "mark entries' detail and startTime are customizable.");
diff --git a/testing/web-platform/tests/user-timing/mark-measure-feature-detection.html b/testing/web-platform/tests/user-timing/mark-measure-feature-detection.html
new file mode 100644
index 0000000000..6f1ad489e9
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark-measure-feature-detection.html
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML>
+<meta charset=utf-8>
+<title>User Timing: L2 vs L3 feature detection</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+ test(() => {
+ // Feature detection for PerformanceMark.
+ assert_equals(typeof(PerformanceMark.prototype), "object");
+ // Test for UserTiming L3.
+ if (PerformanceMark.prototype.hasOwnProperty('detail')) {
+ assert_equals(typeof(performance.mark("mark")), "object",
+ "performance.mark should return an object in UserTiming L3.");
+ }
+ // Test for UserTiming L2.
+ else {
+ assert_equals(typeof(performance.mark("mark")), "undefined",
+ "performance.mark should be void in UserTiming L2.");
+ }
+ }, "Test PerformanceMark existence and feature detection");
+
+ test(() => {
+ // Feature detection for PerformanceMeasure.
+ assert_equals(typeof(PerformanceMeasure.prototype), "object");
+ // Test for UserTiming L3.
+ if (PerformanceMeasure.prototype.hasOwnProperty('detail')) {
+ assert_equals(typeof(performance.measure("measure")), "object",
+ "performance.measure should return an object in UserTiming L3.");
+ }
+ // Test for UserTiming L2.
+ else {
+ assert_equals(typeof(performance.measure("measure")), "undefined",
+ "performance.measure should be void in UserTiming L2.");
+ }
+ }, "Test PerformanceMeasure existence and feature detection");
+</script>
diff --git a/testing/web-platform/tests/user-timing/mark-measure-return-objects.any.js b/testing/web-platform/tests/user-timing/mark-measure-return-objects.any.js
new file mode 100644
index 0000000000..bb15c58398
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark-measure-return-objects.any.js
@@ -0,0 +1,37 @@
+async_test(function (t) {
+ self.performance.clearMeasures();
+ const measure = self.performance.measure("measure1");
+ assert_true(measure instanceof PerformanceMeasure);
+ t.done();
+}, "L3: performance.measure(name) should return an entry.");
+
+async_test(function (t) {
+ self.performance.clearMeasures();
+ const measure = self.performance.measure("measure2",
+ { start: 12, end: 23 });
+ assert_true(measure instanceof PerformanceMeasure);
+ t.done();
+}, "L3: performance.measure(name, param1) should return an entry.");
+
+async_test(function (t) {
+ self.performance.clearMeasures();
+ self.performance.mark("1");
+ self.performance.mark("2");
+ const measure = self.performance.measure("measure3", "1", "2");
+ assert_true(measure instanceof PerformanceMeasure);
+ t.done();
+}, "L3: performance.measure(name, param1, param2) should return an entry.");
+
+async_test(function (t) {
+ self.performance.clearMarks();
+ const mark = self.performance.mark("mark1");
+ assert_true(mark instanceof PerformanceMark);
+ t.done();
+}, "L3: performance.mark(name) should return an entry.");
+
+async_test(function (t) {
+ self.performance.clearMarks();
+ const mark = self.performance.mark("mark2", { startTime: 34 });
+ assert_true(mark instanceof PerformanceMark);
+ t.done();
+}, "L3: performance.mark(name, param) should return an entry.");
diff --git a/testing/web-platform/tests/user-timing/mark.any.js b/testing/web-platform/tests/user-timing/mark.any.js
new file mode 100644
index 0000000000..7e814d2074
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark.any.js
@@ -0,0 +1,118 @@
+// test data
+var testThreshold = 20;
+
+var expectedTimes = new Array();
+
+function match_entries(entries, index)
+{
+ var entry = entries[index];
+ var match = self.performance.getEntriesByName("mark")[index];
+ assert_equals(entry.name, match.name, "entry.name");
+ assert_equals(entry.startTime, match.startTime, "entry.startTime");
+ assert_equals(entry.entryType, match.entryType, "entry.entryType");
+ assert_equals(entry.duration, match.duration, "entry.duration");
+}
+
+function filter_entries_by_type(entryList, entryType)
+{
+ var testEntries = new Array();
+
+ // filter entryList
+ for (var i in entryList)
+ {
+ if (entryList[i].entryType == entryType)
+ {
+ testEntries.push(entryList[i]);
+ }
+ }
+
+ return testEntries;
+}
+
+test(function () {
+ // create first mark
+ self.performance.mark("mark");
+
+ expectedTimes[0] = self.performance.now();
+
+ const entries = self.performance.getEntriesByName("mark");
+ assert_equals(entries.length, 1);
+}, "Entry 0 is properly created");
+
+test(function () {
+ // create second, duplicate mark
+ self.performance.mark("mark");
+
+ expectedTimes[1] = self.performance.now();
+
+ const entries = self.performance.getEntriesByName("mark");
+ assert_equals(entries.length, 2);
+
+}, "Entry 1 is properly created");
+
+function test_mark(index) {
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark");
+ assert_equals(entries[index].name, "mark", "Entry has the proper name");
+ }, "Entry " + index + " has the proper name");
+
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark");
+ assert_approx_equals(entries[index].startTime, expectedTimes[index], testThreshold);
+ }, "Entry " + index + " startTime is approximately correct (up to " + testThreshold +
+ "ms difference allowed)");
+
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark");
+ assert_equals(entries[index].entryType, "mark");
+ }, "Entry " + index + " has the proper entryType");
+
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark");
+ assert_equals(entries[index].duration, 0);
+ }, "Entry " + index + " duration == 0");
+
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark", "mark");
+ assert_equals(entries[index].name, "mark");
+ }, "getEntriesByName(\"mark\", \"mark\")[" + index + "] returns an " +
+ "object containing a \"mark\" mark");
+
+ test(function () {
+ const entries = self.performance.getEntriesByName("mark", "mark");
+ match_entries(entries, index);
+ }, "The mark returned by getEntriesByName(\"mark\", \"mark\")[" + index
+ + "] matches the mark returned by " +
+ "getEntriesByName(\"mark\")[" + index + "]");
+
+ test(function () {
+ const entries = filter_entries_by_type(self.performance.getEntries(), "mark");
+ assert_equals(entries[index].name, "mark");
+ }, "getEntries()[" + index + "] returns an " +
+ "object containing a \"mark\" mark");
+
+ test(function () {
+ const entries = filter_entries_by_type(self.performance.getEntries(), "mark");
+ match_entries(entries, index);
+ }, "The mark returned by getEntries()[" + index
+ + "] matches the mark returned by " +
+ "getEntriesByName(\"mark\")[" + index + "]");
+
+ test(function () {
+ const entries = self.performance.getEntriesByType("mark");
+ assert_equals(entries[index].name, "mark");
+ }, "getEntriesByType(\"mark\")[" + index + "] returns an " +
+ "object containing a \"mark\" mark");
+
+ test(function () {
+ const entries = self.performance.getEntriesByType("mark");
+ match_entries(entries, index);
+ }, "The mark returned by getEntriesByType(\"mark\")[" + index
+ + "] matches the mark returned by " +
+ "getEntriesByName(\"mark\")[" + index + "]");
+
+}
+
+for (var i = 0; i < expectedTimes.length; i++) {
+ test_mark(i);
+}
diff --git a/testing/web-platform/tests/user-timing/mark.html b/testing/web-platform/tests/user-timing/mark.html
new file mode 100644
index 0000000000..e03e9e6247
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>functionality test of window.performance.mark</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+<script>
+setup({ explicit_done: true });
+
+function onload_test()
+{
+ const entrylist_checker = new performance_entrylist_checker('mark');
+ const string_mark_names = mark_names.map(function (x) { return String(x)});
+
+ test_equals(performance.getEntriesByType("mark").length, 0, 'There should be ' + 0 + ' marks');
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ let mark_entrylist = performance.getEntriesByType('mark');
+
+ entrylist_checker.entrylist_check(mark_entrylist, mark_names.length, string_mark_names, 'Checking all entries.');
+
+ for (let i = 0; i < mark_entrylist.length; ++i)
+ {
+ const mark_entrylist_by_name = performance.getEntriesByName(mark_entrylist[i].name, 'mark');
+ entrylist_checker.entrylist_check(mark_entrylist_by_name, 1, string_mark_names,
+ 'First loop: checking entry of name "' + mark_entrylist[i].name + '".');
+ }
+
+ mark_names.forEach(function(name) {
+ performance.mark(name);
+ });
+ mark_entrylist = performance.getEntriesByType('mark');
+ entrylist_checker.entrylist_check(mark_entrylist, mark_names.length * 2, string_mark_names, 'Checking all doubly marked entries.');
+
+ for (let i = 0; i < mark_entrylist.length; ++i)
+ {
+ const mark_entrylist_by_name = performance.getEntriesByName(mark_entrylist[i].name, 'mark');
+ entrylist_checker.entrylist_check(mark_entrylist_by_name, 2, string_mark_names,
+ 'Second loop step ' + i + ': checking entries of name "' + mark_entrylist[i].name + '".');
+ }
+
+ done();
+}
+</script>
+</head>
+<body onload=onload_test()>
+ <h1>Description</h1>
+ <p>This test validates functionality of the interface window.performance.mark.</p>
+ <div id="log"></div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/mark_exceptions.html b/testing/web-platform/tests/user-timing/mark_exceptions.html
new file mode 100644
index 0000000000..b445c6b877
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/mark_exceptions.html
@@ -0,0 +1,41 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>window.performance User Timing mark() method is throwing the proper exceptions</title>
+ <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
+ <link rel="help" href="http://w3c.github.io/user-timing/#dom-performance-mark"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+
+ <script>
+function test_exception(attrName) {
+ test(function () {
+ assert_throws_dom("SyntaxError", function () {
+ window.performance.mark(attrName);
+ })
+ }, "window.performance.mark(\"" + attrName + "\") throws a SyntaxError exception.");
+}
+
+test(() => {
+ assert_throws_js(TypeError, function() {
+ window.performance.mark();
+ });
+}, 'window.performance.mark() throws a TypeError exception.')
+
+// loop through mark scenarios
+for (var i in timingAttributes) {
+ test_exception(timingAttributes[i]);
+}
+ </script>
+ </head>
+ <body>
+ <h1>Description</h1>
+ <p>This test validates that the performance.mark() method throws a SYNTAX_ERR exception whenever a navigation
+ timing attribute is provided for the name parameter.
+ </p>
+
+ <div id="log"></div>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure-exceptions.html b/testing/web-platform/tests/user-timing/measure-exceptions.html
new file mode 100644
index 0000000000..2836eaee2a
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure-exceptions.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<html>
+<head>
+ This tests that 'performance.measure' throws exceptions with reasonable messages.
+</head>
+<body>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+ window.performance.clearMarks();
+ window.performance.clearMeasures();
+
+ window.performance.mark('mark');
+
+ const eventMarks = [
+ 'unloadEventStart',
+ 'unloadEventEnd',
+ 'redirectStart',
+ 'redirectEnd',
+ 'secureConnectionStart',
+ 'domInteractive',
+ 'domContentLoadedEventStart',
+ 'domContentLoadedEventEnd',
+ 'domComplete',
+ 'loadEventStart',
+ 'loadEventEnd',
+ ];
+ eventMarks.forEach(function(name) {
+ test(()=>{
+ assert_throws_dom("InvalidAccessError", ()=>{
+ window.performance.measure("measuring", name, "mark");
+ }, "Should throw");
+ }, `Passing '${name}' as a mark to measure API should cause error when the mark is empty.`);
+ });
+
+ const args = [
+ 51.15, // Verify that number is parsed as string, not number.
+ "DoesNotExist", // Non-existant mark name should cause error.
+ ];
+ args.forEach(each => {
+ test(()=>{
+ assert_throws_dom("SyntaxError", ()=>{
+ window.performance.measure("measuring", each, "mark");
+ }, "Should throw");
+ }, `Passing ${each} as a mark to measure API should cause error.`);
+ });
+</script>
+</body>
+</html> \ No newline at end of file
diff --git a/testing/web-platform/tests/user-timing/measure-l3.any.js b/testing/web-platform/tests/user-timing/measure-l3.any.js
new file mode 100644
index 0000000000..642b55ab63
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure-l3.any.js
@@ -0,0 +1,35 @@
+// META: script=resources/user-timing-helper.js
+
+function endTime(entry) {
+ return entry.startTime + entry.duration;
+}
+
+test(function() {
+ performance.clearMarks();
+ performance.clearMeasures();
+ const markEntry = performance.mark("mark", {startTime: 123});
+ const measureEntry = performance.measure("A", undefined, "mark");
+ assert_equals(measureEntry.startTime, 0);
+ assert_equals(endTime(measureEntry), markEntry.startTime);
+}, "When the end mark is given and the start is unprovided, the end time of the measure entry should be the end mark's time, the start time should be 0.");
+
+test(function() {
+ performance.clearMarks();
+ performance.clearMeasures();
+ const markEntry = performance.mark("mark", {startTime: 123});
+ const endMin = Number(performance.now().toFixed(2));
+ const measureEntry = performance.measure("A", "mark", undefined);
+ const endMax = Number(performance.now().toFixed(2));
+ assert_equals(measureEntry.startTime, markEntry.startTime);
+ assert_greater_than_equal(Number(endTime(measureEntry).toFixed(2)), endMin);
+ assert_greater_than_equal(endMax, Number(endTime(measureEntry).toFixed(2)));
+}, "When the start mark is given and the end is unprovided, the start time of the measure entry should be the start mark's time, the end should be now.");
+
+test(function() {
+ performance.clearMarks();
+ performance.clearMeasures();
+ const markEntry = performance.mark("mark", {startTime: 123});
+ const measureEntry = performance.measure("A", "mark", "mark");
+ assert_equals(endTime(measureEntry), markEntry.startTime);
+ assert_equals(measureEntry.startTime, markEntry.startTime);
+}, "When start and end mark are both given, the start time and end time of the measure entry should be the the marks' time, repectively");
diff --git a/testing/web-platform/tests/user-timing/measure-with-dict.any.js b/testing/web-platform/tests/user-timing/measure-with-dict.any.js
new file mode 100644
index 0000000000..b452feb0de
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure-with-dict.any.js
@@ -0,0 +1,112 @@
+// META: script=resources/user-timing-helper.js
+
+function cleanupPerformanceTimeline() {
+ performance.clearMarks();
+ performance.clearMeasures();
+}
+
+async_test(function (t) {
+ this.add_cleanup(cleanupPerformanceTimeline);
+ let measureEntries = [];
+ const timeStamp1 = 784.4;
+ const timeStamp2 = 1234.5;
+ const timeStamp3 = 66.6;
+ const timeStamp4 = 5566;
+ const expectedEntries =
+ [{ entryType: "measure", name: "measure1", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure2", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure3", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure4", detail: null },
+ { entryType: "measure", name: "measure5", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure6", detail: null, startTime: timeStamp1 },
+ { entryType: "measure", name: "measure7", detail: null, startTime: timeStamp1, duration: timeStamp2 - timeStamp1 },
+ { entryType: "measure", name: "measure8", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure9", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure10", detail: null, startTime: timeStamp1 },
+ { entryType: "measure", name: "measure11", detail: null, startTime: timeStamp3 },
+ { entryType: "measure", name: "measure12", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure13", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure14", detail: null, startTime: timeStamp3, duration: timeStamp1 - timeStamp3 },
+ { entryType: "measure", name: "measure15", detail: null, startTime: timeStamp1, duration: timeStamp2 - timeStamp1 },
+ { entryType: "measure", name: "measure16", detail: null, startTime: timeStamp1 },
+ { entryType: "measure", name: "measure17", detail: { customInfo: 159 }, startTime: timeStamp3, duration: timeStamp2 - timeStamp3 },
+ { entryType: "measure", name: "measure18", detail: null, startTime: timeStamp1, duration: timeStamp2 - timeStamp1 },
+ { entryType: "measure", name: "measure19", detail: null, startTime: timeStamp1, duration: timeStamp2 - timeStamp1 },
+ { entryType: "measure", name: "measure20", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure21", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure22", detail: null, startTime: 0 },
+ { entryType: "measure", name: "measure23", detail: null, startTime: 0 }];
+ const observer = new PerformanceObserver(
+ t.step_func(function (entryList, obs) {
+ measureEntries =
+ measureEntries.concat(entryList.getEntries());
+ if (measureEntries.length >= expectedEntries.length) {
+ checkEntries(measureEntries, expectedEntries);
+ observer.disconnect();
+ t.done();
+ }
+ })
+ );
+ observer.observe({ entryTypes: ["measure"] });
+ self.performance.mark("mark1", { detail: { randomInfo: 3 }, startTime: timeStamp1 });
+ self.performance.mark("mark2", { startTime: timeStamp2 });
+
+ const returnedEntries = [];
+ returnedEntries.push(self.performance.measure("measure1"));
+ returnedEntries.push(self.performance.measure("measure2", undefined));
+ returnedEntries.push(self.performance.measure("measure3", null));
+ returnedEntries.push(self.performance.measure("measure4", 'mark1'));
+ returnedEntries.push(
+ self.performance.measure("measure5", null, 'mark1'));
+ returnedEntries.push(
+ self.performance.measure("measure6", 'mark1', undefined));
+ returnedEntries.push(
+ self.performance.measure("measure7", 'mark1', 'mark2'));
+ returnedEntries.push(
+ self.performance.measure("measure8", {}));
+ returnedEntries.push(
+ self.performance.measure("measure9", { start: undefined }));
+ returnedEntries.push(
+ self.performance.measure("measure10", { start: 'mark1' }));
+ returnedEntries.push(
+ self.performance.measure("measure11", { start: timeStamp3 }));
+ returnedEntries.push(
+ self.performance.measure("measure12", { end: undefined }));
+ returnedEntries.push(
+ self.performance.measure("measure13", { end: 'mark1' }));
+ returnedEntries.push(
+ self.performance.measure("measure14", { start: timeStamp3, end: 'mark1' }));
+ returnedEntries.push(
+ self.performance.measure("measure15", { start: timeStamp1, end: timeStamp2, detail: undefined }));
+ returnedEntries.push(
+ self.performance.measure("measure16", { start: 'mark1', end: undefined, detail: null }));
+ returnedEntries.push(
+ self.performance.measure("measure17", { start: timeStamp3, end: 'mark2', detail: { customInfo: 159 }}));
+ returnedEntries.push(
+ self.performance.measure("measure18", { start: timeStamp1, duration: timeStamp2 - timeStamp1 }));
+ returnedEntries.push(
+ self.performance.measure("measure19", { duration: timeStamp2 - timeStamp1, end: timeStamp2 }));
+ // {}, null, undefined, invalid-dict passed to startOrOptions are interpreted as start time being 0.
+ returnedEntries.push(self.performance.measure("measure20", {}, 'mark1'));
+ returnedEntries.push(self.performance.measure("measure21", null, 'mark1'));
+ returnedEntries.push(self.performance.measure("measure22", undefined, 'mark1'));
+ returnedEntries.push(self.performance.measure("measure23", { invalidDict:1 }, 'mark1'));
+ checkEntries(returnedEntries, expectedEntries);
+}, "measure entries' detail and start/end are customizable");
+
+test(function() {
+ this.add_cleanup(cleanupPerformanceTimeline);
+ assert_throws_js(TypeError, function() {
+ self.performance.measure("optionsAndNumberEnd", {'start': 2}, 12);
+ }, "measure should throw a TypeError when passed an options object and an end time");
+ assert_throws_js(TypeError, function() {
+ self.performance.measure("optionsAndMarkEnd", {'start': 2}, 'mark1');
+ }, "measure should throw a TypeError when passed an options object and an end mark");
+ assert_throws_js(TypeError, function() {
+ self.performance.measure("negativeStartInOptions", {'start': -1});
+ }, "measure cannot have a negative time stamp.");
+ assert_throws_js(TypeError, function() {
+ self.performance.measure("negativeEndInOptions", {'end': -1});
+ }, "measure cannot have a negative time stamp for end.");
+}, "measure should throw a TypeError when passed an invalid argument combination");
+
diff --git a/testing/web-platform/tests/user-timing/measure.html b/testing/web-platform/tests/user-timing/measure.html
new file mode 100644
index 0000000000..40f71a3362
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure.html
@@ -0,0 +1,362 @@
+
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>window.performance User Timing measure() method is working properly</title>
+ <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
+ <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/common/performance-timeline-utils.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+
+ <script>
+ // test data
+ var startMarkName = "mark_start";
+ var startMarkValue;
+ var endMarkName = "mark_end";
+ var endMarkValue;
+ var measures;
+ var testThreshold = 20;
+
+ // test measures
+ var measureTestDelay = 200;
+ var TEST_MEASURES =
+ [
+ {
+ name: "measure_no_start_no_end",
+ startMark: undefined,
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_start_no_end",
+ startMark: "mark_start",
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_start_end",
+ startMark: "mark_start",
+ endMark: "mark_end",
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ {
+ name: "measure_no_start_end",
+ startMark: undefined,
+ endMark: "mark_end",
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ },
+ // intentional duplicate of the first measure, used to confirm names can be re-used
+ {
+ name: "measure_no_start_no_end",
+ startMark: undefined,
+ endMark: undefined,
+ startTime: undefined,
+ duration: undefined,
+ entryType: "measure",
+ entryMatch: undefined,
+ order: undefined,
+ found: false
+ }
+ ];
+ // the index of the duplicate "measure_no_start_no_end"
+ const duplicate_index = TEST_MEASURES.map(m=>m.name).lastIndexOf('measure_no_start_no_end');
+
+ setup({explicit_done: true});
+
+ test_namespace();
+
+ function onload_test()
+ {
+ // test for existence of User Timing and Performance Timeline interface
+ if (!has_required_interfaces())
+ {
+ test_true(false,
+ "The User Timing and Performance Timeline interfaces, which are required for this test, " +
+ "are defined.");
+
+ done();
+ }
+ else
+ {
+ // create the start mark for the test measures
+ window.performance.mark(startMarkName);
+
+ // get the start mark's value
+ startMarkValue = window.performance.getEntriesByName(startMarkName)[0].startTime;
+
+ // create the test end mark using the test delay; this will allow for a significant difference between
+ // the mark values that should be represented in the duration of measures using these marks
+ step_timeout(measure_test_cb, measureTestDelay);
+ }
+ }
+
+ function measure_test_cb()
+ {
+ // create the end mark for the test measures
+ window.performance.mark(endMarkName);
+
+ // get the end mark's value
+ endMarkValue = window.performance.getEntriesByName(endMarkName)[0].startTime;
+
+ // loop through all measure scenarios and create the corresponding measures
+ for (var i in TEST_MEASURES)
+ {
+ var scenario = TEST_MEASURES[i];
+
+ if (scenario.startMark == undefined && scenario.endMark == undefined)
+ {
+ // both startMark and endMark are undefined, don't provide either parameters
+ window.performance.measure(scenario.name);
+
+ // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding
+ // to the navigationStart attribute with a timebase of the same attribute is used; this is
+ // equivalent to 0
+ scenario.startTime = 0;
+
+ // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to
+ // the current time with a timebase of the navigationStart attribute is used
+ scenario.duration = (new Date()) - window.performance.timing.navigationStart;
+ }
+ else if (scenario.startMark != undefined && scenario.endMark == undefined)
+ {
+ // only startMark is defined, provide startMark and don't provide endMark
+ window.performance.measure(scenario.name, scenario.startMark);
+
+ // when startMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the startMark
+ scenario.startTime = startMarkValue;
+
+ // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to
+ // the current time with a timebase of the navigationStart attribute is used
+ scenario.duration = window.performance.now() -
+ startMarkValue;
+ }
+ else if (scenario.startMark != undefined && scenario.endMark != undefined)
+ {
+ // both startMark and endMark are defined, provide both parameters
+ window.performance.measure(scenario.name, scenario.startMark, scenario.endMark);
+
+ // when startMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the startMark
+ scenario.startTime = startMarkValue;
+
+ // when endMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the endMark
+ scenario.duration = endMarkValue - startMarkValue;
+ }
+ else if (scenario.startMark == undefined && scenario.endMark != undefined)
+ {
+ // endMark is defined but startMark is undefined, provide both parameters
+ window.performance.measure(scenario.name, scenario.startMark, scenario.endMark);
+
+ // when startMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding
+ // to the navigationStart attribute with a timebase of the same attribute is used; this is
+ // equivalent to 0
+ scenario.startTime = 0;
+
+ // when endMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the endMark
+ scenario.duration = endMarkValue;
+ } else
+ {
+ test_true(false, 'Test measure scenario unhandled');
+ }
+ }
+
+ // test that expected measures are returned by getEntriesByName
+ for (var i in TEST_MEASURES)
+ {
+ entries = window.performance.getEntriesByName(TEST_MEASURES[i].name);
+ // for all test measures, the test will be validate the test measure against the first entry returned
+ // by getEntriesByName(), except for the last measure, where since it is a duplicate measure, the test
+ // will validate it against the second entry returned by getEntriesByName()
+ test_measure(entries[(i == duplicate_index ? 1 : 0)],
+ "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\")[" +
+ (i == duplicate_index ? 1 : 0) + "]",
+ TEST_MEASURES[i].name,
+ TEST_MEASURES[i].startTime,
+ TEST_MEASURES[i].duration);
+ TEST_MEASURES[i].entryMatch = entries[(i == duplicate_index ? 1 : 0)];
+ }
+
+ // test that expected measures are returned by getEntriesByName with the entryType parameter provided
+ for (var i in TEST_MEASURES)
+ {
+ entries = window.performance.getEntriesByName(TEST_MEASURES[i].name, "measure");
+
+ test_true(match_entries(entries[(i == duplicate_index ? 1 : 0)], TEST_MEASURES[i].entryMatch),
+ "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\", \"measure\")[" +
+ (i == duplicate_index ? 1 : 0) + "] returns an object containing the \"" + TEST_MEASURES[i].name +
+ "\" measure in the correct order, and its value matches the \"" + TEST_MEASURES[i].name +
+ "\" measure returned by window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name +
+ "\")");
+ }
+
+ // test that expected measures are returned by getEntries
+ entries = get_test_entries(window.performance.getEntries(), "measure");
+
+ test_measure_list(entries, "window.performance.getEntries()", TEST_MEASURES);
+
+ // test that expected measures are returned by getEntriesByType
+ entries = window.performance.getEntriesByType("measure");
+
+ test_measure_list(entries, "window.performance.getEntriesByType(\"measure\")", TEST_MEASURES);
+
+ done();
+ }
+
+ function match_entries(entry1, entry2, threshold)
+ {
+ if (threshold == undefined)
+ {
+ threshold = 0;
+ }
+
+ var pass = true;
+
+ // match name
+ pass = pass && (entry1.name == entry2.name);
+
+ // match startTime
+ pass = pass && (Math.abs(entry1.startTime - entry2.startTime) <= testThreshold);
+
+ // match entryType
+ pass = pass && (entry1.entryType == entry2.entryType);
+
+ // match duration
+ pass = pass && (Math.abs(entry1.duration - entry2.duration) <= testThreshold);
+
+ return pass;
+ }
+
+ function test_measure(measureEntry, measureEntryCommand, expectedName, expectedStartTime, expectedDuration)
+ {
+ // test name
+ test_true(measureEntry.name == expectedName, measureEntryCommand + ".name == \"" + expectedName + "\"");
+
+ // test startTime; since for a mark, the startTime is always equal to a mark's value or the value of a
+ // navigation timing attribute, the actual startTime should match the expected value exactly
+ test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0,
+ measureEntryCommand + ".startTime is correct");
+
+ // test entryType
+ test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\"");
+
+ // test duration, allow for an acceptable threshold in the difference between the actual duration and the
+ // expected value for the duration
+ test_true(Math.abs(measureEntry.duration - expectedDuration) <= testThreshold, measureEntryCommand +
+ ".duration is approximately correct (up to " + testThreshold + "ms difference allowed)");
+ }
+
+ function test_measure_list(measureEntryList, measureEntryListCommand, measureScenarios)
+ {
+ // give all entries a "found" property that can be set to ensure it isn't tested twice
+ for (var i in measureEntryList)
+ {
+ measureEntryList[i].found = false;
+ }
+
+ for (var i in measureScenarios)
+ {
+ measureScenarios[i].found = false;
+
+ for (var j in measureEntryList)
+ {
+ if (match_entries(measureEntryList[j], measureScenarios[i]) && !measureEntryList[j].found)
+ {
+ test_true(match_entries(measureEntryList[j], measureScenarios[i].entryMatch),
+ measureEntryListCommand + " returns an object containing the \"" +
+ measureScenarios[i].name + "\" measure, and it's value matches the measure " +
+ "returned by window.performance.getEntriesByName(\"" + measureScenarios[i].name +
+ "\")[" + (i == duplicate_index ? 1 : 0) + "].");
+
+ measureEntryList[j].found = true;
+ measureScenarios[i].found = true;
+ break;
+ }
+ }
+
+ if (!measureScenarios[i].found)
+ {
+ test_true(false,
+ measureEntryListCommand + " returns an object containing the \"" +
+ measureScenarios[i].name + "\" measure.");
+ }
+ }
+
+ // verify order of output of getEntriesByType
+ var startTimeCurr = 0;
+ var pass = true;
+ for (var i in measureEntryList)
+ {
+ if (measureEntryList[i].startTime < startTimeCurr)
+ {
+ pass = false;
+ }
+ startTimeCurr = measureEntryList[i].startTime;
+ }
+ test_true(pass,
+ measureEntryListCommand + " returns an object containing all test " +
+ "measures in order.");
+ }
+
+ function get_test_entries(entryList, entryType)
+ {
+ var testEntries = new Array();
+
+ // filter entryList
+ for (var i in entryList)
+ {
+ if (entryList[i].entryType == entryType)
+ {
+ testEntries.push(entryList[i]);
+ }
+ }
+
+ return testEntries;
+ }
+ </script>
+ </head>
+ <body onload="onload_test();">
+ <h1>Description</h1>
+ <p>This test validates that the performance.measure() method is working properly. This test creates the
+ following measures to test this method:
+ <ul>
+ <li>"measure_no_start_no_end": created using a measure() call without a startMark or endMark
+ provided</li>
+ <li>"measure_start_no_end": created using a measure() call with only the startMark provided</li>
+ <li>"measure_start_end": created using a measure() call with both a startMark or endMark provided</li>
+ <li>"measure_no_start_end": created using a measure() call with only the endMark provided</li>
+ <li>"measure_no_start_no_end": duplicate of the first measure, used to confirm names can be re-used</li>
+ </ul>
+ After creating each measure, the existence of these measures is validated by calling
+ performance.getEntriesByName() (both with and without the entryType parameter provided),
+ performance.getEntriesByType(), and performance.getEntries()
+ </p>
+
+ <div id="log"></div>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure_associated_with_navigation_timing.html b/testing/web-platform/tests/user-timing/measure_associated_with_navigation_timing.html
new file mode 100644
index 0000000000..a874ad9172
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure_associated_with_navigation_timing.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>functionality test of window.performance.measure</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+<script>
+setup({ explicit_done: true });
+
+function onload_test()
+{
+ const measures_for_timing_order = [
+ ['nav2now', 'navigationStart'],
+ ['loadTime', 'navigationStart', 'loadEventEnd'],
+ ['loadEventEnd2a', 'loadEventEnd', 'abc'],
+ ['nav2a', 'navigationStart', 'abc'],
+ ['domComplete2a', 'domComplete', 'abc'],
+ ['negativeValue', 1, 'navigationStart'],
+ ];
+ const context = new PerformanceContext(window.performance);
+
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures_for_timing_order.forEach(context.initialMeasures, context);
+ test_greater_than(context.getEntriesByName('nav2now', 'measure')[0].duration, 0, 'Measure of navigationStart to now should be positive value.');
+ test_greater_than(context.getEntriesByName('loadTime', 'measure')[0].duration, 0, 'Measure of navigationStart to loadEventEnd should be positive value.');
+ test_greater_than(0, context.getEntriesByName('negativeValue', 'measure')[0].duration, 'Measure of current mark to navigationStart should be negative value.');
+ test_equals(context.getEntriesByName('loadTime', 'measure')[0].duration + context.getEntriesByName('loadEventEnd2a', 'measure')[0].duration, context.getEntriesByName('nav2a', 'measure')[0].duration, 'loadTime plus loadEventEnd to a mark "a" should equal to navigationStart to "a".');
+
+ // We later assert that time has passed between setting one set of marks and another set.
+ // However, this assertion will fail if the test executes fast enough such that the marks occur
+ // at the same clock time. This is more likely in browsers such as Firefox that reduce the
+ // precision of the clock exposed through this API to mitigate timing attacks. To mitigate the
+ // test failure, we sleep. Firefox may round timestamps to the nearest millisecond in either
+ // direction - e.g. 10ms & 11.999ms may both round to 11ms - so we need to sleep at least 2ms to
+ // avoid test failures. To be safe, we sleep 3ms.
+ sleep_milliseconds(3);
+
+ // Following cases test for scenarios that measure names are tied twice.
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures_for_timing_order.forEach(context.initialMeasures, context);
+
+ test_greater_than(context.getEntriesByName('nav2now', 'measure')[1].duration, context.getEntriesByName('nav2now', 'measure')[0].duration, 'Second measure of current mark to navigationStart should be negative value.');
+ test_equals(context.getEntriesByName('loadTime', 'measure')[0].duration, context.getEntriesByName('loadTime', 'measure')[1].duration, 'Measures of loadTime should have same duration.');
+ test_greater_than(context.getEntriesByName('domComplete2a', 'measure')[1].duration, context.getEntriesByName('domComplete2a', 'measure')[0].duration, 'Measure from domComplete event to most recent mark "a" should have longer duration.');
+ test_greater_than(context.getEntriesByName('negativeValue', 'measure')[0].duration, context.getEntriesByName('negativeValue', 'measure')[1].duration, 'Measure from most recent mark to navigationStart should have longer duration.');
+
+ done();
+}
+</script>
+</head>
+<body onload="setTimeout(onload_test,0)">
+ <h1>Description</h1>
+ <p>This test validates functionality of the interface window.performance.measure using keywords from the Navigation Timing spec.</p>
+ <div id="log"></div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure_exception.html b/testing/web-platform/tests/user-timing/measure_exception.html
new file mode 100644
index 0000000000..5c1aa086c0
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure_exception.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>exception test of window.performance.measure</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+</head>
+
+<body>
+<h1>Description</h1>
+<p>This test validates all exception scenarios of method window.performance.measure in User Timing API</p>
+
+<div id="log"></div>
+<script>
+performance.mark('ExistMark');
+test_method_throw_exception('performance.measure()', TypeError);
+test_method_throw_exception('performance.measure("Exception1", "NonExistMark1")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception2", "NonExistMark1", "navigationStart")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception3", "navigationStart", "NonExistMark1")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception4", "NonExistMark1", "ExistMark")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception5", "ExistMark", "NonExistMark1")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception6", "NonExistMark1", "NonExistMark2")', 'SYNTAX_ERR');
+test_method_throw_exception('performance.measure("Exception7", "redirectStart")', 'INVALID_ACCESS_ERR');
+test_method_throw_exception('performance.measure("Exception8", {"detail": "non-empty"})', TypeError);
+test_method_throw_exception('performance.measure("Exception9", {"start": 1, "duration": 2, "end": 3})', TypeError);
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure_exceptions_navigation_timing.html b/testing/web-platform/tests/user-timing/measure_exceptions_navigation_timing.html
new file mode 100644
index 0000000000..b1868b2cb6
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure_exceptions_navigation_timing.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>window.performance User Timing measure() method is throwing the proper exceptions</title>
+ <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
+ <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+
+ <script>
+// test data
+var zeroedNavTimingAtt = undefined;
+
+setup(function () {
+ // for testing the INVALID_ACCESS_ERR exception, find a navigation timing attribute with a value of zero
+ for (var i in timingAttributes) {
+ if (window.performance.timing[timingAttributes[i]] == 0) {
+ zeroedNavTimingAtt = timingAttributes[i];
+ }
+ }
+ if (zeroedNavTimingAtt == undefined) {
+ throw new Error("A navigation timing attribute with a value of 0 was not found to test for the " +
+ "INVALID_ACCESS_ERR exception thrown by window.performance.measure().")
+ }
+});
+
+test(function () {
+ assert_throws_dom("InvalidAccessError", function () {
+ window.performance.measure("measure", zeroedNavTimingAtt);
+ });
+}, "window.performance.measure(\"measure\", \"" + zeroedNavTimingAtt + "\"), where \"" +
+ zeroedNavTimingAtt + "\" is a navigation timing attribute with a value of 0, throws a " +
+ "InvalidAccessError exception.");
+
+test(function () {
+ assert_throws_dom("InvalidAccessError", function () {
+ window.performance.measure("measure", zeroedNavTimingAtt, "responseEnd");
+ });
+}, "window.performance.measure(\"measure\", \"" + zeroedNavTimingAtt + "\", " +
+ "\"responseEnd\"), where \"" + zeroedNavTimingAtt + "\" is a navigation timing " +
+ "attribute with a value of 0, throws a InvalidAccessError exception.");
+
+test(function () {
+ assert_throws_dom("InvalidAccessError", function () {
+ window.performance.measure("measure", "navigationStart", zeroedNavTimingAtt);
+ });
+}, "window.performance.measure(\"measure\", \"navigationStart\", \"" + zeroedNavTimingAtt +
+ "\"), where \"" + zeroedNavTimingAtt + "\" is a navigation timing attribute with a " +
+ "value of 0, throws a InvalidAccessError exception.");
+
+test(function () {
+ assert_throws_dom("InvalidAccessError", function () {
+ window.performance.measure("measure", zeroedNavTimingAtt, zeroedNavTimingAtt);
+ });
+}, "window.performance.measure(\"measure\", \"" + zeroedNavTimingAtt + "\", \"" +
+ zeroedNavTimingAtt + "\"), where \"" + zeroedNavTimingAtt + "\" is a navigation timing " +
+ "attribute with a value of 0, throws a InvalidAccessError exception.");
+ </script>
+ </head>
+ <body>
+ <h1>Description</h1>
+ <p><code>window.performance.measure()</code> method throws a InvalidAccessError
+ whenever a navigation timing attribute with a value of zero is provided as the startMark or endMark.
+ </p>
+
+ <div id="log"></div>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure_navigation_timing.html b/testing/web-platform/tests/user-timing/measure_navigation_timing.html
new file mode 100644
index 0000000000..d6480d27a2
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure_navigation_timing.html
@@ -0,0 +1,205 @@
+
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>window.performance User Timing clearMeasures() method is working properly with navigation timing
+ attributes</title>
+ <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
+ <link rel="help" href="https://w3c.github.io/user-timing/#dom-performance-measure"/>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/common/performance-timeline-utils.js"></script>
+ <script src="resources/webperftestharness.js"></script>
+
+ <script>
+ // test data
+ var startMarkName = "mark_start";
+ var startMarkValue;
+ var endMarkName = "mark_end";
+ var endMarkValue;
+ var measures;
+ var testThreshold = 20;
+
+ // test measures
+ measureTestDelay = 200;
+ var TEST_MEASURES =
+ [
+ {
+ name: "measure_nav_start_no_end",
+ startMark: "navigationStart",
+ endMark: undefined,
+ exceptionTestMessage: "window.performance.measure(\"measure_nav_start_no_end\", " +
+ "\"navigationStart\") ran without throwing any exceptions.",
+ expectedStartTime: undefined,
+ expectedDuration: undefined,
+ entryMatch: undefined
+ },
+ {
+ name: "measure_nav_start_mark_end",
+ startMark: "navigationStart",
+ endMark: "mark_end",
+ exceptionTestMessage: "window.performance.measure(\"measure_nav_start_end\", \"navigationStart\", " +
+ "\"mark_end\") ran without throwing any exceptions.",
+ expectedStartTime: undefined,
+ expectedDuration: undefined,
+ entryMatch: undefined
+ },
+ {
+ name: "measure_mark_start_nav_end",
+ startMark: "mark_start",
+ endMark: "responseEnd",
+ exceptionTestMessage: "window.performance.measure(\"measure_start_nav_end\", \"mark_start\", " +
+ "\"responseEnd\") ran without throwing any exceptions.",
+ expectedStartTime: undefined,
+ expectedDuration: undefined,
+ entryMatch: undefined
+ },
+ {
+ name: "measure_nav_start_nav_end",
+ startMark: "navigationStart",
+ endMark: "responseEnd",
+ exceptionTestMessage: "window.performance.measure(\"measure_nav_start_nav_end\", " +
+ "\"navigationStart\", \"responseEnd\") ran without throwing any exceptions.",
+ expectedStartTime: undefined,
+ expectedDuration: undefined,
+ entryMatch: undefined
+ }
+ ];
+
+ setup({explicit_done: true});
+
+ test_namespace();
+
+ function onload_test()
+ {
+ // test for existence of User Timing and Performance Timeline interface
+ if (!has_required_interfaces())
+ {
+ test_true(false,
+ "The User Timing and Performance Timeline interfaces, which are required for this test, " +
+ "are defined.");
+
+ done();
+ }
+ else
+ {
+ // create the start mark for the test measures
+ window.performance.mark(startMarkName);
+
+ // get the start mark's value
+ startMarkValue = window.performance.getEntriesByName(startMarkName)[0].startTime;
+
+ // create the test end mark using the test delay; this will allow for a significant difference between
+ // the mark values that should be represented in the duration of measures using these marks
+ step_timeout(measure_test_cb, measureTestDelay);
+ }
+ }
+
+ function measure_test_cb()
+ {
+ // create the end mark for the test measures
+ window.performance.mark(endMarkName);
+
+ // get the end mark's value
+ endMarkValue = window.performance.getEntriesByName(endMarkName)[0].startTime;
+
+ // loop through measure scenarios
+ for (var i in TEST_MEASURES)
+ {
+ var scenario = TEST_MEASURES[i];
+
+ if (scenario.startMark != undefined && scenario.endMark == undefined)
+ {
+ // only startMark is defined, provide startMark and don't provide endMark
+ window.performance.measure(scenario.name, scenario.startMark);
+
+ // when startMark is provided to the measure() call, the value of the mark or navigation
+ // timing attribute whose name is provided is used for the startMark
+ scenario.expectedStartTime = (timingAttributes.indexOf(scenario.startMark) != -1 ?
+ window.performance.timing[scenario.startMark] -
+ window.performance.timing.navigationStart :
+ startMarkValue);
+
+ // when endMark isn't provided to the measure() call, a DOMHighResTimeStamp corresponding to
+ // the current time with a timebase of the navigationStart attribute is used
+ scenario.expectedDuration = ((new Date()) - window.performance.timing.navigationStart) -
+ scenario.expectedStartTime;
+ }
+ else if (scenario.startMark != undefined && scenario.endMark != undefined)
+ {
+ // both startMark and endMark are defined, provide both parameters
+ window.performance.measure(scenario.name, scenario.startMark, scenario.endMark);
+
+ // when startMark is provided to the measure() call, the value of the mark or navigation
+ // timing attribute whose name is provided is used for the startMark
+ scenario.expectedStartTime = (timingAttributes.indexOf(scenario.startMark) != -1 ?
+ window.performance.timing[scenario.startMark] -
+ window.performance.timing.navigationStart :
+ startMarkValue);
+
+ // when endMark is provided to the measure() call, the value of the mark whose name is
+ // provided is used for the startMark
+ scenario.expectedDuration = (timingAttributes.indexOf(scenario.endMark) != -1 ?
+ window.performance.timing[scenario.endMark] -
+ window.performance.timing.navigationStart :
+ endMarkValue) - scenario.expectedStartTime;
+ }
+ }
+
+ // test the test measures are returned by getEntriesByName
+ for (var i in TEST_MEASURES)
+ {
+ entries = window.performance.getEntriesByName(TEST_MEASURES[i].name);
+ test_measure(entries[0],
+ "window.performance.getEntriesByName(\"" + TEST_MEASURES[i].name + "\")[0]",
+ TEST_MEASURES[i].name,
+ TEST_MEASURES[i].expectedStartTime,
+ TEST_MEASURES[i].expectedDuration);
+ TEST_MEASURES[i].entryMatch = entries[0];
+ }
+
+ done();
+ }
+
+ function test_measure(measureEntry, measureEntryCommand, expectedName, expectedStartTime, expectedDuration)
+ {
+ // test name
+ test_true(measureEntry.name == expectedName, measureEntryCommand + ".name == \"" + expectedName + "\"");
+
+ // test startTime; since for a mark, the startTime is always equal to a mark's value or the value of a
+ // navigation timing attribute, the actual startTime should match the expected value exactly
+ test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0,
+ measureEntryCommand + ".startTime is correct");
+
+ // test entryType
+ test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\"");
+
+ // test duration, allow for an acceptable threshold in the difference between the actual duration and the
+ // expected value for the duration
+ test_true(Math.abs(measureEntry.duration - expectedDuration) <= testThreshold, measureEntryCommand +
+ ".duration is approximately correct (up to " + testThreshold + "ms difference allowed)");
+ }
+ </script>
+ </head>
+ <body onload="onload_test();">
+ <h1>Description</h1>
+ <p>This test validates that the performance.measure() method is working properly when navigation timing
+ attributes are used in place of mark names. This test creates the following measures to test this method:
+ <ul>
+ <li>"measure_nav_start_no_end": created using a measure() call with a navigation timing attribute
+ provided as the startMark and nothing provided as the endMark</li>
+ <li>"measure_nav_start_mark_end": created using a measure() call with a navigation timing attribute
+ provided as the startMark and a mark name provided as the endMark</li>
+ <li>"measure_mark_start_nav_end": created using a measure() call with a mark name provided as the
+ startMark and a navigation timing attribute provided as the endMark</li>
+ <li>"measure_nav_start_nav_end":created using a measure() call with a navigation timing attribute
+ provided as both the startMark and endMark</li>
+ </ul>
+ After creating each measure, the existence of these measures is validated by calling
+ performance.getEntriesByName() with each measure name
+ </p>
+
+ <div id="log"></div>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/measure_syntax_err.any.js b/testing/web-platform/tests/user-timing/measure_syntax_err.any.js
new file mode 100644
index 0000000000..9b762a4090
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measure_syntax_err.any.js
@@ -0,0 +1,33 @@
+test(function () {
+ self.performance.mark("existing_mark");
+ var entries = self.performance.getEntriesByName("existing_mark");
+ assert_equals(entries.length, 1);
+ self.performance.measure("measure", "existing_mark");
+}, "Create a mark \"existing_mark\"");
+test(function () {
+ assert_throws_dom("SyntaxError", function () {
+ self.performance.measure("measure", "mark");
+ });
+}, "self.performance.measure(\"measure\", \"mark\"), where \"mark\" is a non-existent mark, " +
+ "throws a SyntaxError exception.");
+
+test(function () {
+ assert_throws_dom("SyntaxError", function () {
+ self.performance.measure("measure", "mark", "existing_mark");
+ });
+}, "self.performance.measure(\"measure\", \"mark\", \"existing_mark\"), where \"mark\" is a " +
+ "non-existent mark, throws a SyntaxError exception.");
+
+test(function () {
+ assert_throws_dom("SyntaxError", function () {
+ self.performance.measure("measure", "existing_mark", "mark");
+ });
+}, "self.performance.measure(\"measure\", \"existing_mark\", \"mark\"), where \"mark\" " +
+ "is a non-existent mark, throws a SyntaxError exception.");
+
+test(function () {
+ assert_throws_dom("SyntaxError", function () {
+ self.performance.measure("measure", "mark", "mark");
+ });
+}, "self.performance.measure(\"measure\", \"mark\", \"mark\"), where \"mark\" is a " +
+ "non-existent mark, throws a SyntaxError exception.");
diff --git a/testing/web-platform/tests/user-timing/measures.html b/testing/web-platform/tests/user-timing/measures.html
new file mode 100644
index 0000000000..0de68965dd
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/measures.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="utf-8" />
+<title>functionality test of window.performance.measure</title>
+<link rel="author" title="Intel" href="http://www.intel.com/" />
+<link rel="help" href="http://www.w3.org/TR/user-timing/#extensions-performance-interface"/>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/performance-timeline-utils.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="resources/webperftestharnessextension.js"></script>
+<script>
+setup({ explicit_done: true });
+
+function onload_test()
+{
+ const context = new PerformanceContext(window.performance);
+ const entrylist_checker = new performance_entrylist_checker('measure');
+ const measure_names = measures.map(function(x) {return x[0];});
+
+ test_equals(context.getEntriesByType('measure').length, 0, 'There should be ' + 0 + ' entries returned.');
+
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures.forEach(context.initialMeasures, context);
+
+ let measure_entrylist = context.getEntriesByType('measure');
+ entrylist_checker.entrylist_check(measure_entrylist, measures.length, measure_names,
+ 'Checking all entries.');
+
+ for (let i = 0; i < measure_entrylist.length; ++i)
+ {
+ const measure_entrylist_by_name = context.getEntriesByName(measure_entrylist[i].name, 'measure');
+ entrylist_checker.entrylist_check(measure_entrylist_by_name, 1, measure_names,
+ 'First loop: checking entry of name "' + measure_entrylist[i].name + '".');
+ }
+
+ // Following cases test for scenarios that measure names are tied for two times
+ mark_names.forEach(function(name) {
+ context.mark(name);
+ });
+ measures.forEach(context.initialMeasures, context);
+
+ measure_entrylist = context.getEntriesByType('measure');
+ entrylist_checker.entrylist_check(measure_entrylist, measures.length * 2, measure_names,
+ 'Checking all doubly measured entries.');
+
+ for (let i = 0; i < measure_entrylist.length; ++i)
+ {
+ const measure_entrylist_by_name = context.getEntriesByName(measure_entrylist[i].name, 'measure');
+ entrylist_checker.entrylist_check(measure_entrylist_by_name, 2, measure_names,
+ 'Second loop step ' + i + ': checking entry of name "' + measure_entrylist[i].name + '".');
+ }
+
+ done();
+}
+</script>
+</head>
+<body onload=onload_test()>
+ <h1>Description</h1>
+ <p>This test validates functionality of the interface window.performance.measure.</p>
+ <div id="log"></div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/performance-measure-invalid.worker.js b/testing/web-platform/tests/user-timing/performance-measure-invalid.worker.js
new file mode 100644
index 0000000000..bab3c35dcb
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/performance-measure-invalid.worker.js
@@ -0,0 +1,16 @@
+importScripts("/resources/testharness.js");
+
+test(() => {
+ assert_throws_js(TypeError, () => {
+ performance.measure('name', 'navigationStart', 'navigationStart');
+ });
+}, "When converting 'navigationStart' to a timestamp, the global object has to be a Window object.");
+
+test(() => {
+ assert_throws_js(TypeError, () => {
+ performance.mark('navigationStart');
+ performance.measure('name', 'navigationStart', 'navigationStart');
+ });
+}, "When converting 'navigationStart' to a timestamp and a mark named 'navigationStart' exists, the global object has to be a Window object.");
+
+done();
diff --git a/testing/web-platform/tests/user-timing/resources/user-timing-helper.js b/testing/web-platform/tests/user-timing/resources/user-timing-helper.js
new file mode 100644
index 0000000000..8d43768ec2
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/resources/user-timing-helper.js
@@ -0,0 +1,30 @@
+// Compares a list of performance entries to a predefined one.
+// actualEntries is an array of performance entries from the user agent,
+// and expectedEntries is an array of performance entries minted by the test.
+// The comparison doesn't assert the order of the entries.
+function checkEntries(actualEntries, expectedEntries) {
+ assert_equals(actualEntries.length, expectedEntries.length,
+ `The length of actual and expected entries should match.
+ actual: ${JSON.stringify(actualEntries)},
+ expected: ${JSON.stringify(expectedEntries)}`);
+ const actualEntrySet = new Set(actualEntries.map(ae=>ae.name));
+ assert_equals(actualEntrySet.size, actualEntries.length, `Actual entry names are not unique: ${JSON.stringify(actualEntries)}`);
+ const expectedEntrySet = new Set(expectedEntries.map(ee=>ee.name));
+ assert_equals(expectedEntrySet.size, expectedEntries.length, `Expected entry names are not unique: ${JSON.stringify(expectedEntries)}`);
+ actualEntries.forEach(ae=>{
+ const expectedEntry = expectedEntries.find(e=>e.name === ae.name);
+ assert_true(!!expectedEntry, `Entry name '${ae.name}' was not found.`);
+ checkEntry(ae, expectedEntry);
+ });
+}
+
+function checkEntry(entry, {name, entryType, startTime, detail, duration}) {
+ assert_equals(entry.name, name);
+ assert_equals(entry.entryType, entryType);
+ if (startTime !== undefined)
+ assert_equals(entry.startTime, startTime);
+ if (detail !== undefined)
+ assert_equals(JSON.stringify(entry.detail), JSON.stringify(detail));
+ if (duration !== undefined)
+ assert_equals(entry.duration, duration);
+}
diff --git a/testing/web-platform/tests/user-timing/resources/webperftestharness.js b/testing/web-platform/tests/user-timing/resources/webperftestharness.js
new file mode 100644
index 0000000000..9627e18a03
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/resources/webperftestharness.js
@@ -0,0 +1,124 @@
+//
+// Helper functions for User Timing tests
+//
+
+var timingAttributes = [
+ "navigationStart",
+ "unloadEventStart",
+ "unloadEventEnd",
+ "redirectStart",
+ "redirectEnd",
+ "fetchStart",
+ "domainLookupStart",
+ "domainLookupEnd",
+ "connectStart",
+ "connectEnd",
+ "secureConnectionStart",
+ "requestStart",
+ "responseStart",
+ "responseEnd",
+ "domLoading",
+ "domInteractive",
+ "domContentLoadedEventStart",
+ "domContentLoadedEventEnd",
+ "domComplete",
+ "loadEventStart",
+ "loadEventEnd"
+];
+
+function has_required_interfaces()
+{
+ if (window.performance.mark == undefined ||
+ window.performance.clearMarks == undefined ||
+ window.performance.measure == undefined ||
+ window.performance.clearMeasures == undefined ||
+ window.performance.getEntriesByName == undefined ||
+ window.performance.getEntriesByType == undefined ||
+ window.performance.getEntries == undefined) {
+ return false;
+ }
+
+ return true;
+}
+
+function test_namespace(child_name, skip_root)
+{
+ if (skip_root === undefined) {
+ var msg = 'window.performance is defined';
+ wp_test(function () { assert_not_equals(performanceNamespace, undefined, msg); }, msg);
+ }
+
+ if (child_name !== undefined) {
+ var msg2 = 'window.performance.' + child_name + ' is defined';
+ wp_test(function() { assert_not_equals(performanceNamespace[child_name], undefined, msg2); }, msg2);
+ }
+}
+
+function test_attribute_exists(parent_name, attribute_name, properties)
+{
+ var msg = 'window.performance.' + parent_name + '.' + attribute_name + ' is defined.';
+ wp_test(function() { assert_not_equals(performanceNamespace[parent_name][attribute_name], undefined, msg); }, msg, properties);
+}
+
+function test_enum(parent_name, enum_name, value, properties)
+{
+ var msg = 'window.performance.' + parent_name + '.' + enum_name + ' is defined.';
+ wp_test(function() { assert_not_equals(performanceNamespace[parent_name][enum_name], undefined, msg); }, msg, properties);
+
+ msg = 'window.performance.' + parent_name + '.' + enum_name + ' = ' + value;
+ wp_test(function() { assert_equals(performanceNamespace[parent_name][enum_name], value, msg); }, msg, properties);
+}
+
+function test_timing_order(attribute_name, greater_than_attribute, properties)
+{
+ // ensure it's not 0 first
+ var msg = "window.performance.timing." + attribute_name + " > 0";
+ wp_test(function() { assert_true(performanceNamespace.timing[attribute_name] > 0, msg); }, msg, properties);
+
+ // ensure it's in the right order
+ msg = "window.performance.timing." + attribute_name + " >= window.performance.timing." + greater_than_attribute;
+ wp_test(function() { assert_true(performanceNamespace.timing[attribute_name] >= performanceNamespace.timing[greater_than_attribute], msg); }, msg, properties);
+}
+
+function test_timing_greater_than(attribute_name, greater_than, properties)
+{
+ var msg = "window.performance.timing." + attribute_name + " > " + greater_than;
+ test_greater_than(performanceNamespace.timing[attribute_name], greater_than, msg, properties);
+}
+
+function test_timing_equals(attribute_name, equals, msg, properties)
+{
+ var test_msg = msg || "window.performance.timing." + attribute_name + " == " + equals;
+ test_equals(performanceNamespace.timing[attribute_name], equals, test_msg, properties);
+}
+
+//
+// Non-test related helper functions
+//
+
+function sleep_milliseconds(n)
+{
+ var start = new Date().getTime();
+ while (true) {
+ if ((new Date().getTime() - start) >= n) break;
+ }
+}
+
+//
+// Common helper functions
+//
+
+function test_greater_than(value, greater_than, msg, properties)
+{
+ wp_test(function () { assert_greater_than(value, greater_than, msg); }, msg, properties);
+}
+
+function test_greater_or_equals(value, greater_than, msg, properties)
+{
+ wp_test(function () { assert_greater_than_equal(value, greater_than, msg); }, msg, properties);
+}
+
+function test_not_equals(value, notequals, msg, properties)
+{
+ wp_test(function() { assert_not_equals(value, notequals, msg); }, msg, properties);
+}
diff --git a/testing/web-platform/tests/user-timing/resources/webperftestharnessextension.js b/testing/web-platform/tests/user-timing/resources/webperftestharnessextension.js
new file mode 100644
index 0000000000..8640918d4f
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/resources/webperftestharnessextension.js
@@ -0,0 +1,202 @@
+//
+// Helper functions for User Timing tests
+//
+
+var mark_names = [
+ '',
+ '1',
+ 'abc',
+];
+
+var measures = [
+ [''],
+ ['2', 1],
+ ['aaa', 'navigationStart', ''],
+];
+
+function test_method_exists(method, method_name, properties)
+{
+ var msg;
+ if (typeof method === 'function')
+ msg = 'performance.' + method.name + ' is supported!';
+ else
+ msg = 'performance.' + method_name + ' is supported!';
+ wp_test(function() { assert_equals(typeof method, 'function', msg); }, msg, properties);
+}
+
+function test_method_throw_exception(func_str, exception, msg)
+{
+ let exception_name;
+ let test_func;
+ if (typeof exception == "function") {
+ exception_name = exception.name;
+ test_func = assert_throws_js;
+ } else {
+ exception_name = exception;
+ test_func = assert_throws_dom;
+ }
+ var msg = 'Invocation of ' + func_str + ' should throw ' + exception_name + ' Exception.';
+ wp_test(function() { test_func(exception, function() {eval(func_str)}, msg); }, msg);
+}
+
+function test_noless_than(value, greater_than, msg, properties)
+{
+ wp_test(function () { assert_true(value >= greater_than, msg); }, msg, properties);
+}
+
+function test_fail(msg, properties)
+{
+ wp_test(function() { assert_unreached(); }, msg, properties);
+}
+
+function test_resource_entries(entries, expected_entries)
+{
+ // This is slightly convoluted so that we can sort the output.
+ var actual_entries = {};
+ var origin = window.location.protocol + "//" + window.location.host;
+
+ for (var i = 0; i < entries.length; ++i) {
+ var entry = entries[i];
+ var found = false;
+ for (var expected_entry in expected_entries) {
+ if (entry.name == origin + expected_entry) {
+ found = true;
+ if (expected_entry in actual_entries) {
+ test_fail(expected_entry + ' is not expected to have duplicate entries');
+ }
+ actual_entries[expected_entry] = entry;
+ break;
+ }
+ }
+ if (!found) {
+ test_fail(entries[i].name + ' is not expected to be in the Resource Timing buffer');
+ }
+ }
+
+ sorted_urls = [];
+ for (var i in actual_entries) {
+ sorted_urls.push(i);
+ }
+ sorted_urls.sort();
+ for (var i in sorted_urls) {
+ var url = sorted_urls[i];
+ test_equals(actual_entries[url].initiatorType,
+ expected_entries[url],
+ origin + url + ' is expected to have initiatorType ' + expected_entries[url]);
+ }
+ for (var j in expected_entries) {
+ if (!(j in actual_entries)) {
+ test_fail(origin + j + ' is expected to be in the Resource Timing buffer');
+ }
+ }
+}
+
+function performance_entrylist_checker(type)
+{
+ const entryType = type;
+
+ function entry_check(entry, expectedNames, testDescription = '')
+ {
+ const msg = testDescription + 'Entry \"' + entry.name + '\" should be one that we have set.';
+ wp_test(function() { assert_in_array(entry.name, expectedNames, msg); }, msg);
+ test_equals(entry.entryType, entryType, testDescription + 'entryType should be \"' + entryType + '\".');
+ if (type === "measure") {
+ test_true(isFinite(entry.startTime), testDescription + 'startTime should be a number.');
+ test_true(isFinite(entry.duration), testDescription + 'duration should be a number.');
+ } else if (type === "mark") {
+ test_greater_than(entry.startTime, 0, testDescription + 'startTime should greater than 0.');
+ test_equals(entry.duration, 0, testDescription + 'duration of mark should be 0.');
+ }
+ }
+
+ function entrylist_order_check(entryList)
+ {
+ let inOrder = true;
+ for (let i = 0; i < entryList.length - 1; ++i)
+ {
+ if (entryList[i + 1].startTime < entryList[i].startTime) {
+ inOrder = false;
+ break;
+ }
+ }
+ return inOrder;
+ }
+
+ function entrylist_check(entryList, expectedLength, expectedNames, testDescription = '')
+ {
+ test_equals(entryList.length, expectedLength, testDescription + 'There should be ' + expectedLength + ' entries.');
+ test_true(entrylist_order_check(entryList), testDescription + 'Entries in entrylist should be in order.');
+ for (let i = 0; i < entryList.length; ++i)
+ {
+ entry_check(entryList[i], expectedNames, testDescription + 'Entry_list ' + i + '. ');
+ }
+ }
+
+ return{"entrylist_check":entrylist_check};
+}
+
+function PerformanceContext(context)
+{
+ this.performanceContext = context;
+}
+
+PerformanceContext.prototype =
+{
+
+ initialMeasures: function(item, index, array)
+ {
+ this.performanceContext.measure.apply(this.performanceContext, item);
+ },
+
+ mark: function()
+ {
+ this.performanceContext.mark.apply(this.performanceContext, arguments);
+ },
+
+ measure: function()
+ {
+ this.performanceContext.measure.apply(this.performanceContext, arguments);
+ },
+
+ clearMarks: function()
+ {
+ this.performanceContext.clearMarks.apply(this.performanceContext, arguments);
+ },
+
+ clearMeasures: function()
+ {
+ this.performanceContext.clearMeasures.apply(this.performanceContext, arguments);
+
+ },
+
+ getEntries: function()
+ {
+ return this.performanceContext.getEntries.apply(this.performanceContext, arguments);
+ },
+
+ getEntriesByType: function()
+ {
+ return this.performanceContext.getEntriesByType.apply(this.performanceContext, arguments);
+ },
+
+ getEntriesByName: function()
+ {
+ return this.performanceContext.getEntriesByName.apply(this.performanceContext, arguments);
+ },
+
+ setResourceTimingBufferSize: function()
+ {
+ return this.performanceContext.setResourceTimingBufferSize.apply(this.performanceContext, arguments);
+ },
+
+ registerResourceTimingBufferFullCallback: function(func)
+ {
+ this.performanceContext.onresourcetimingbufferfull = func;
+ },
+
+ clearResourceTimings: function()
+ {
+ this.performanceContext.clearResourceTimings.apply(this.performanceContext, arguments);
+ }
+
+};
diff --git a/testing/web-platform/tests/user-timing/structured-serialize-detail.any.js b/testing/web-platform/tests/user-timing/structured-serialize-detail.any.js
new file mode 100644
index 0000000000..dcceffde27
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/structured-serialize-detail.any.js
@@ -0,0 +1,66 @@
+test(function() {
+ performance.clearMarks();
+ const detail = { randomInfo: 123 }
+ const markEntry = new PerformanceMark("A", { detail });
+ assert_equals(markEntry.detail.randomInfo, detail.randomInfo);
+ assert_not_equals(markEntry.detail, detail);
+}, "The detail property in the mark constructor should be structured-clone.");
+
+test(function() {
+ performance.clearMarks();
+ const detail = { randomInfo: 123 }
+ const markEntry = performance.mark("A", { detail });
+ assert_equals(markEntry.detail.randomInfo, detail.randomInfo);
+ assert_not_equals(markEntry.detail, detail);
+}, "The detail property in the mark method should be structured-clone.");
+
+test(function() {
+ performance.clearMarks();
+ const markEntry = performance.mark("A");
+ assert_equals(markEntry.detail, null);
+}, "When accessing detail from a mark entry and the detail is not provided, just return a null value.");
+
+test(function() {
+ performance.clearMarks();
+ const detail = { unserializable: Symbol() };
+ assert_throws_dom("DataCloneError", ()=>{
+ new PerformanceMark("A", { detail });
+ }, "Trying to structured-serialize a Symbol.");
+}, "Mark: Throw an exception when the detail property cannot be structured-serialized.");
+
+test(function() {
+ performance.clearMeasures();
+ const detail = { randomInfo: 123 }
+ const measureEntry = performance.measure("A", { start: 0, detail });
+ assert_equals(measureEntry.detail.randomInfo, detail.randomInfo);
+ assert_not_equals(measureEntry.detail, detail);
+}, "The detail property in the measure method should be structured-clone.");
+
+test(function() {
+ performance.clearMeasures();
+ const detail = { randomInfo: 123 }
+ const measureEntry = performance.measure("A", { start: 0, detail });
+ assert_equals(measureEntry.detail, measureEntry.detail);
+}, "The detail property in the measure method should be the same reference.");
+
+test(function() {
+ performance.clearMeasures();
+ const measureEntry = performance.measure("A");
+ assert_equals(measureEntry.detail, null);
+}, "When accessing detail from a measure entry and the detail is not provided, just return a null value.");
+
+test(function() {
+ performance.clearMeasures();
+ const detail = { unserializable: Symbol() };
+ assert_throws_dom("DataCloneError", ()=>{
+ performance.measure("A", { start: 0, detail });
+ }, "Trying to structured-serialize a Symbol.");
+}, "Measure: Throw an exception when the detail property cannot be structured-serialized.");
+
+test(function() {
+ const bar = { 1: 2 };
+ const detail = { foo: 1, bar };
+ const mark = performance.mark("m", { detail });
+ detail.foo = 2;
+ assert_equals(mark.detail.foo, 1);
+}, "The detail object is cloned when passed to mark API.");
diff --git a/testing/web-platform/tests/user-timing/supported-usertiming-types.any.js b/testing/web-platform/tests/user-timing/supported-usertiming-types.any.js
new file mode 100644
index 0000000000..ea3b2fe9dc
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/supported-usertiming-types.any.js
@@ -0,0 +1,37 @@
+test(() => {
+ if (typeof PerformanceObserver.supportedEntryTypes === "undefined")
+ assert_unreached("supportedEntryTypes is not supported.");
+ const types = PerformanceObserver.supportedEntryTypes;
+ assert_true(types.includes("mark"),
+ "There should be 'mark' in PerformanceObserver.supportedEntryTypes");
+ assert_true(types.includes("measure"),
+ "There should be 'measure' in PerformanceObserver.supportedEntryTypes");
+ assert_greater_than(types.indexOf("measure"), types.indexOf('mark'),
+ "The 'measure' entry should appear after the 'mark' entry");
+}, "supportedEntryTypes contains 'mark' and 'measure'.");
+
+if (typeof PerformanceObserver.supportedEntryTypes !== "undefined") {
+ const entryTypes = {
+ "mark": () => {
+ performance.mark('foo');
+ },
+ "measure": () => {
+ performance.measure('bar');
+ }
+ }
+ for (let entryType in entryTypes) {
+ if (PerformanceObserver.supportedEntryTypes.includes(entryType)) {
+ promise_test(async() => {
+ await new Promise((resolve) => {
+ new PerformanceObserver(function (list, observer) {
+ observer.disconnect();
+ resolve();
+ }).observe({entryTypes: [entryType]});
+
+ // Force the PerformanceEntry.
+ entryTypes[entryType]();
+ })
+ }, `'${entryType}' entries should be observable.`)
+ }
+ }
+}
diff --git a/testing/web-platform/tests/user-timing/user-timing-tojson.html b/testing/web-platform/tests/user-timing/user-timing-tojson.html
new file mode 100644
index 0000000000..6aef7fa904
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/user-timing-tojson.html
@@ -0,0 +1,44 @@
+<!doctype html>
+<html>
+<head>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</head>
+<body>
+<script>
+const keys = [
+ 'name',
+ 'entryType',
+ 'startTime',
+ 'duration',
+];
+test(() => {
+ performance.mark('a');
+ const markEntries = performance.getEntriesByType('mark');
+ assert_equals(1, markEntries.length);
+ const markEntry = markEntries[0];
+ assert_equals(markEntry.entryType, 'mark');
+ assert_equals(typeof(markEntry.toJSON), 'function');
+ const markJSON = markEntry.toJSON();
+ assert_equals(typeof(markJSON), 'object');
+ for (const key of keys) {
+ assert_equals(markJSON[key], markEntry[key], `PerformanceMark ${key} entry does not match its toJSON value`);
+ }
+}, 'Test toJSON() in PerformanceMark');
+
+test(() => {
+ performance.measure('m');
+ const measureEntries = performance.getEntriesByType('measure');
+ assert_equals(1, measureEntries.length);
+ const measureEntry = measureEntries[0];
+ assert_equals(measureEntry.entryType, 'measure');
+ assert_equals(typeof(measureEntry.toJSON), 'function');
+ const measureJSON = measureEntry.toJSON();
+ assert_equals(typeof(measureJSON), 'object');
+ for (const key of keys) {
+ assert_equals(measureJSON[key], measureEntry[key], `PerformanceMeasure ${key} entry does not match its toJSON value`);
+ }
+}, 'Test toJSON() in PerformanceMeasure');
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/user-timing/user_timing_exists.any.js b/testing/web-platform/tests/user-timing/user_timing_exists.any.js
new file mode 100644
index 0000000000..adf9052ebd
--- /dev/null
+++ b/testing/web-platform/tests/user-timing/user_timing_exists.any.js
@@ -0,0 +1,12 @@
+test(function() {
+ assert_not_equals(self.performance.mark, undefined);
+}, "self.performance.mark is defined.");
+test(function() {
+ assert_not_equals(self.performance.clearMarks, undefined);
+}, "self.performance.clearMarks is defined.");
+test(function() {
+ assert_not_equals(self.performance.measure, undefined);
+}, "self.performance.measure is defined.");
+test(function() {
+ assert_not_equals(self.performance.clearMeasures, undefined);
+}, "self.performance.clearMeasures is defined.");