diff options
Diffstat (limited to 'testing/web-platform/tests/speech-api')
17 files changed, 880 insertions, 0 deletions
diff --git a/testing/web-platform/tests/speech-api/META.yml b/testing/web-platform/tests/speech-api/META.yml new file mode 100644 index 0000000000..2b7a03e245 --- /dev/null +++ b/testing/web-platform/tests/speech-api/META.yml @@ -0,0 +1,5 @@ +spec: https://wicg.github.io/speech-api/ +suggested_reviewers: + - andrenatal + - foolip + - marcoscaceres diff --git a/testing/web-platform/tests/speech-api/SpeechRecognition-abort-manual.https.html b/testing/web-platform/tests/speech-api/SpeechRecognition-abort-manual.https.html new file mode 100644 index 0000000000..3c9e1ab251 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechRecognition-abort-manual.https.html @@ -0,0 +1,43 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"> + <title>SpeechRecognition.abort</title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="webspeech.js"></script> + </head> + <body> + <p><b>Instructions:</b> Do NOT speak. Run test in silence. + This test may fail if too much noise.</p> + <div id="log"></div> + <div id="notes"></div> + <script> +var audioTest = new CycleTest('onaudio'); +reco.onaudiostart = audioTest.startEvent(); +reco.onaudioend = audioTest.endEvent(); + +reco.onsoundstart = neverFireEvent('onsoundstart'); +reco.onsoundend = neverFireEvent('onsoundend'); +reco.onspeechstart = neverFireEvent('onspeechstart'); +reco.onspeechend = neverFireEvent('onsspeechend'); +reco.onresult = neverFireEvent('onresult'); +reco.onnomatch = neverFireEvent('onnomatch'); + +var errorTest = new CountTest('onerror aborted', 1, 1); +reco.onerror = errorTest.test().step_func(function(event) { + errorTest.count(1); + assert_equals(typeof(event.message), 'string', 'typeof(event.message)'); + notes.innerHTML += 'onerror message is "' + event.message + '"' + '<br>'; + assert_equals(event.error, 'aborted', 'onerror event.error'); +}); + +reco.start(); + +function beginTest() { + audioTest.test.step_timeout(function() { reco.abort(); }, DELAY); +} + </script> + </body> +</html> + diff --git a/testing/web-platform/tests/speech-api/SpeechRecognition-basics.https.html b/testing/web-platform/tests/speech-api/SpeechRecognition-basics.https.html new file mode 100644 index 0000000000..dc5d3f5c1d --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechRecognition-basics.https.html @@ -0,0 +1,14 @@ +<!DOCTYPE html> +<title>SpeechRecognition basics</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +test(function() { + const reco = new SpeechRecognition(); + assert_equals(reco.grammars.length, 0, 'SpeechRecognition.grammars.length'); + assert_equals(reco.lang, '', 'SpeechRecognition.lang'); + assert_false(reco.continuous, 'SpeechRecognition.continuous'); + assert_false(reco.interimResults, 'SpeechRecognition.interimResults'); + assert_equals(reco.maxAlternatives, 1, 'SpeechRecognition.maxAlternatives'); +}); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechRecognition-onerror-manual.https.html b/testing/web-platform/tests/speech-api/SpeechRecognition-onerror-manual.https.html new file mode 100644 index 0000000000..b0d5d5ebed --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechRecognition-onerror-manual.https.html @@ -0,0 +1,42 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"> + <title>SpeechRecognition.onerror no-speech</title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script> + var TIMEOUT_OVERRIDE = 10000; // In milliseconds. + </script> + <script src='webspeech.js'></script> + </head> + <body> + <p><b>Instructions:</b> Do NOT speak. Run test in silence. + This test may fail if too much noise.</p> + <div id="log"></div> + <div id="notes"></div> + <script> +var audioTest = new CycleTest('onaudio'); +reco.onaudiostart = audioTest.startEvent(); +reco.onaudioend = audioTest.endEvent(); + +reco.onsoundstart = neverFireEvent('onsoundstart'); +reco.onsoundend = neverFireEvent('onsoundend'); +reco.onspeechstart = neverFireEvent('onspeechstart'); +reco.onspeechend = neverFireEvent('onsspeechend'); +reco.onresult = neverFireEvent('onresult'); +reco.onnomatch = neverFireEvent('onnomatch'); + +var errorTest = new CountTest('onerror no-speech', 1, 1); +reco.onerror = errorTest.test().step_func(function(event) { + errorTest.count(1); + assert_equals(typeof(event.message), 'string', 'typeof(event.message)'); + notes.innerHTML += 'onerror message is "' + event.message + '"' + '<br>'; + assert_equals(event.error, 'no-speech', 'onerror event.error'); +}); + +reco.start(); + </script> + </body> +</html> + diff --git a/testing/web-platform/tests/speech-api/SpeechRecognition-onresult-manual.https.html b/testing/web-platform/tests/speech-api/SpeechRecognition-onresult-manual.https.html new file mode 100644 index 0000000000..6a0877bbe1 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechRecognition-onresult-manual.https.html @@ -0,0 +1,157 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"> + <meta name="timeout" content="long"> + <title>SpeechRecognition.onresult</title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script> + var TIMEOUT_OVERRIDE = 60000; // In milliseconds. + </script> + <script src="webspeech.js"></script> + </head> + <body> + <b>Instructions:</b> + <p>Reload and re-run this test at least 4 times to cover all 4 combinations + of these checkboxes: + <input type="checkbox" id="continuous">continuous + <input type="checkbox" id="interim">interimResults + <button id="button" onclick="startButton()">Click and Speak</button> + <br> + You may also wish to test with various combinations of these: + maxAlternatives: + <input type="text" value="3" size="2" id="maxAlternatives">, + language: + <input type="text" value="en-us" size="7" id="language"> + </p> + <div id="results"></div> + <div id="log"></div> + <div id="notes"></div> + <script> +var audioTest = new CycleTest('onaudio'); +reco.onaudiostart = audioTest.startEvent(); +reco.onaudioend = audioTest.endEvent(); + +var soundTest = new CycleTest('onsound'); +reco.onsoundstart = soundTest.startEvent(); +reco.onsoundend = soundTest.endEvent(); + +var speechTest = new CycleTest('onspeech'); +reco.onspeechstart = speechTest.startEvent(); +reco.onspeechend = speechTest.endEvent(); + +reco.onerror = neverFireEvent('onerror'); +reco.onnomatch = neverFireEvent('onnomatch'); + +var lastIsFinal = -1; // Highest results index that has been marked isFinal. +var lastInterimCount = 0; // Number of results that are not marked isFinal. +var resultTest = new CountTest('onresult', 1, 9999); + +resultTest.whenDone = function() { + assert_equals(lastInterimCount, 0, 'Number of interim results pending'); +}; + +function appendAlternatives(array, results) { + for (var i = 0; i < reco.maxAlternatives; i++) { + if (i < results.length) { + array[i] += results[i].transcript; + } else { + array[i] += '<no alternative>'; + assert_true(i > 0, 'Must return at least one alternative.'); + } + } +} + +reco.onresult = resultTest.test().step_func(function(event) { + resultTest.count(1); + var final = new Array(); + var interim = new Array(); + for (var i = 0; i < reco.maxAlternatives; i++) { + final[i] = ''; + interim[i] = ''; + } + assert_true(event.resultIndex > lastIsFinal, 'resultIndex must not ' + + 'indicate a change in a result that was previously marked isFinal.'); + assert_true(event.resultIndex <= event.results.length, + 'resultIndex must not be greater than results.length.'); + for (var i = 0; i < event.results.length; ++i) { + assert_true(event.results[i].length <= reco.maxAlternatives, + 'Number of alternatives must not exceed maxAlternatives.'); + if (event.results[i].isFinal) { + appendAlternatives(final, event.results[i]); + assert_true(reco.continuous || i < 1, + 'When SpeechRecognition.continuous is false, no more than one ' + + 'SpeechRecognitionResult.isFinal true should be returned.'); + if (i > lastIsFinal) { + lastIsFinal = i; + } + } else { + appendAlternatives(interim, event.results[i]); + assert_true(i > lastIsFinal, 'A SpeechRecognitionResult was previously ' + + 'marked isFinal, but now is not marked isFinal.'); + } + lastInterimCount = event.results.length - lastIsFinal - 1; + assert_true(reco.interimResults || lastInterimCount == 0, + 'Should not return interim results when reco.interimResults is false.'); + } + for (var i = 0; i < reco.maxAlternatives; i++) { + document.getElementById('final_span_' + i).innerHTML = final[i]; + document.getElementById('interim_span_' + i).innerHTML = interim[i]; + } +}); + +function configureRecognition() { + var continuousBox = document.getElementById('continuous'); + var interimBox = document.getElementById('interim'); + var maxAlternativesInput = document.getElementById('maxAlternatives'); + var langInput = document.getElementById('language'); + reco.continuous = continuousBox.checked; + reco.interimResults = interimBox.checked; + reco.maxAlternatives = maxAlternativesInput.value; + reco.lang = langInput.value; + continuousBox.disabled = true; + interimBox.disabled = true; + maxAlternativesInput.disabled = true; + langInput.disabled = true; + test(function() { + assert_equals(reco.continuous, continuousBox.checked, + 'SpeechRecognition.continuous'); + assert_equals(reco.interimResults, interim.checked, + 'SpeechRecognition.interimResults'); + assert_equals(reco.maxAlternatives, parseInt(maxAlternativesInput.value), + 'SpeechRecognition.maxAlternatives'); + assert_equals(reco.lang, langInput.value, + 'SpeechRecognition.lang'); + }, 'SpeechRecognition settings'); +} + +var clicks = 0; +function startButton() { + var button = document.getElementById('button'); + if (++clicks == 1) { + configureRecognition(); + if (reco.continuous) { + button.innerHTML = 'Click when done speaking'; + } else { + button.hidden = true; + } + var results_html = ''; + for (var i = 0; i < reco.maxAlternatives; i++) { + results_html += '<div style="border:1px dotted gray; padding:10px; ' + + 'font-weight:bold">' + + '<span id="final_span_' + i + '"></span>' + + '<span id="interim_span_' + i + '" style="color:blue"></span>' + + '</div>'; + } + results.innerHTML = results_html; + reco.start(); + } else { + button.hidden = true; + reco.stop(); + } +} + </script> + </body> +</html> + diff --git a/testing/web-platform/tests/speech-api/SpeechRecognition-stop-manual.https.html b/testing/web-platform/tests/speech-api/SpeechRecognition-stop-manual.https.html new file mode 100644 index 0000000000..e4741b7fc6 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechRecognition-stop-manual.https.html @@ -0,0 +1,40 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"> + <title>SpeechRecognition.stop</title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="webspeech.js"></script> + </head> + <body> + <p><b>Instructions:</b> Do NOT speak. Run test in silence. + This test may fail if too much noise.</p> + <div id="log"></div> + <div id="notes"></div> + <script> +var audioTest = new CycleTest('onaudio'); +reco.onaudiostart = audioTest.startEvent(); +reco.onaudioend = audioTest.endEvent(); + +reco.onsoundstart = neverFireEvent('onsoundstart'); +reco.onsoundend = neverFireEvent('onsoundend'); +reco.onspeechstart = neverFireEvent('onspeechstart'); +reco.onspeechend = neverFireEvent('onsspeechend'); +reco.onresult = neverFireEvent('onresult'); +reco.onerror = neverFireEvent('onerror'); + +var nomatchTest = new CountTest('onnomatch', 0, 1); +reco.onnomatch = nomatchTest.test().step_func(function(event) { + nomatchTest.count(1); +}); + +reco.start(); + +function beginTest() { + setTimeout(function() { reco.stop(); }, DELAY); +} + </script> + </body> +</html> + diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesis-pause-resume.tentative.html b/testing/web-platform/tests/speech-api/SpeechSynthesis-pause-resume.tentative.html new file mode 100644 index 0000000000..a7aa2bbf6f --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesis-pause-resume.tentative.html @@ -0,0 +1,50 @@ +<!doctype html> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script src="/resources/testdriver.js"></script> +<script src="/resources/testdriver-vendor.js"></script> +<body> +<script> +// This test is tentative because timing isn't defined: +// https://github.com/w3c/speech-api/issues/39 +// https://github.com/w3c/speech-api/issues/40 +async_test(t => { + assert_false(speechSynthesis.paused, 'initial paused state'); + + test_driver.bless('speechSynthesis.speak', t.step_func(() => { + const sentence = `long sentence which will take at least a few seconds to + utter so that it's possible to pause and resume before the end`; + const utter = new SpeechSynthesisUtterance(sentence); + t.add_cleanup(() => speechSynthesis.pause()); + utter.onerror = t.unreached_func('error event'); + + speechSynthesis.speak(utter); + + assert_false(speechSynthesis.paused, 'paused state after speak()'); + + utter.onstart = t.step_func(() => { + utter.onstart = null; + assert_false(speechSynthesis.paused, 'paused state at start event'); + + speechSynthesis.pause(); + + // paused state changes async, right before the pause event + assert_false(speechSynthesis.paused, 'paused state after pause()'); + + utter.onpause = t.step_func(() => { + utter.onpause = null; + assert_true(speechSynthesis.paused, 'paused state at pause event'); + + speechSynthesis.resume(); + + // paused state changes async, right before the resume event + assert_true(speechSynthesis.paused, 'paused state after resume()'); + + utter.onresume = t.step_func_done(() => { + assert_false(speechSynthesis.paused, 'paused state at resume event'); + }); + }); + }); + })); +}, 'speechSynthesis.pause() and resume() state and events'); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-events.html b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-events.html new file mode 100644 index 0000000000..c559da1f92 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-events.html @@ -0,0 +1,22 @@ +<!doctype html> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script src="/resources/testdriver.js"></script> +<script src="/resources/testdriver-vendor.js"></script> +<body> +<script> +async function runStartEndTest(t, utterance) { + const eventWatcher = new EventWatcher(t, utterance, ['start', 'end', 'error']); + await test_driver.bless('speechSynthesis.speak', + () => speechSynthesis.speak(utterance)); + await eventWatcher.wait_for(['start', 'end']); +} +promise_test(async (t) => { + const utterance = new SpeechSynthesisUtterance(); + await runStartEndTest(t, utterance); +}, 'speechSynthesis.speak() fires start and end events with empty utterance'); +promise_test(async (t) => { + const utterance = new SpeechSynthesisUtterance('test'); + await runStartEndTest(t, utterance); +}, 'speechSynthesis.speak() fires start and end events'); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-twice.html b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-twice.html new file mode 100644 index 0000000000..3e0388b9cf --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-twice.html @@ -0,0 +1,22 @@ +<!doctype html> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script src="/resources/testdriver.js"></script> +<script src="/resources/testdriver-vendor.js"></script> +<body> +<script> +// using an utterance twice on the same SpeechSynthesis instance should work +// https://github.com/w3c/speech-api/issues/7 +async_test(t => { + test_driver.bless('speechSynthesis.speak', t.step_func(() => { + // the utterance is short to make the test faster + const utter = new SpeechSynthesisUtterance('1'); + speechSynthesis.speak(utter); + utter.onend = t.step_func(() => { + speechSynthesis.speak(utter); + // pass if the utterance finishes a second time + utter.onend = t.step_func_done(); + }); + })); +}, 'Using the same SpeechSynthesisUtterance twice for speechSynthesis.speak()'); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-without-activation-fails.tentative.html b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-without-activation-fails.tentative.html new file mode 100644 index 0000000000..1b86552a1c --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesis-speak-without-activation-fails.tentative.html @@ -0,0 +1,16 @@ +<!doctype html> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<body> +<script> +// TODO(csharrison): Make this test not tentative once +// https://github.com/w3c/speech-api/issues/35 is resolved. +async_test(t => { + const utter = new SpeechSynthesisUtterance('1'); + utter.onerror = t.step_func_done((e) => { + assert_equals(e.error, "not-allowed"); + }); + utter.onend = t.step_func_done(() => assert_unreached()); + speechSynthesis.speak(utter); +}, 'speechSynthesis.speak requires user activation'); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesisErrorEvent-constructor.html b/testing/web-platform/tests/speech-api/SpeechSynthesisErrorEvent-constructor.html new file mode 100644 index 0000000000..abefbf9a51 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesisErrorEvent-constructor.html @@ -0,0 +1,88 @@ +<!DOCTYPE html> +<meta charset="utf-8"> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +/* +[Exposed=Window, + Constructor(DOMString type, SpeechSynthesisErrorEventInit eventInitDict)] +interface SpeechSynthesisErrorEvent : SpeechSynthesisErrorEvent { + readonly attribute SpeechSynthesisErrorCode error; +}; +*/ +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisErrorEvent(); + }); +}, "SpeechSynthesisErrorEvent with no arguments throws TypeError"); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisErrorEvent("type"); + }); +}, "SpeechSynthesisErrorEvent with no eventInitDict throws TypeError"); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisErrorEvent("type", {}); + }); +}, `SpeechSynthesisErrorEvent with empty eventInitDict throws TypeError (requires + utterance and error)`); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisErrorEvent("type", {error:"not-allowed"}); + }); +}, `SpeechSynthesisErrorEvent with eventInitDict without utterance throws + TypeError`); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisErrorEvent("type", {utterance: new SpeechSynthesisUtterance()}); + }); +}, `SpeechSynthesisErrorEvent with eventInitDict without error throws + TypeError`); + +test(() => { + const utterance = new SpeechSynthesisUtterance("foo"); + const event = new SpeechSynthesisErrorEvent("type", {utterance: utterance, error:"not-allowed"}); + assert_equals(event.utterance, utterance); + assert_equals(event.error, "not-allowed"); + assert_equals(event.charIndex, 0); + assert_equals(event.elapsedTime, 0); + assert_equals(event.name, ""); +}, "SpeechSynthesisErrorEvent with eventInitDict having utterance and error"); + +test(() => { + const utterance = new SpeechSynthesisUtterance("foo"); + const event = new SpeechSynthesisErrorEvent("type", { + utterance: utterance, + charIndex: 5, + elapsedTime: 100, + name: "foo", + error: "synthesis-failed" + }); + assert_equals(event.bubbles, false); + assert_equals(event.cancelable, false); + assert_equals(event.type, "type"); + assert_equals(event.utterance, utterance); + assert_equals(event.charIndex, 5); + assert_equals(event.elapsedTime, 100); + assert_equals(event.name, "foo"); + assert_equals(event.error, "synthesis-failed"); +}, "SpeechSynthesisErrorEvent with custom eventInitDict"); + +test(() => { + function createEventFunc(error) { + return () => { + new SpeechSynthesisErrorEvent("type", { + utterance: new SpeechSynthesisUtterance(), + error: error + }); + }; + }; + assert_throws_js(TypeError, createEventFunc("")); + assert_throws_js(TypeError, createEventFunc("foo")); + assert_throws_js(TypeError, createEventFunc("bar")); +}, "SpeechSynthesisErrorEvent with wrong error enum"); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesisEvent-constructor.html b/testing/web-platform/tests/speech-api/SpeechSynthesisEvent-constructor.html new file mode 100644 index 0000000000..e6b29683f7 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesisEvent-constructor.html @@ -0,0 +1,70 @@ +<!DOCTYPE html> +<meta charset="utf-8"> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +/* +[Exposed=Window, + Constructor(DOMString type, SpeechSynthesisEventInit eventInitDict)] +interface SpeechSynthesisEvent : Event { + readonly attribute SpeechSynthesisUtterance utterance; + readonly attribute unsigned long charIndex; + readonly attribute float elapsedTime; + readonly attribute DOMString name; +}; +*/ +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisEvent(); + }); +}, "SpeechSynthesisEvent with no arguments throws TypeError"); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisEvent("type"); + }); +}, "SpeechSynthesisEvent with no eventInitDict throws TypeError"); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisEvent("type", {}); + }); +}, `SpeechSynthesisEvent with empty eventInitDict throws TypeError (requires + utterance)`); + +test(() => { + assert_throws_js(TypeError, () => { + new SpeechSynthesisEvent("type", {charIndex: 10, elapsedTime: 50, name:"foo"}); + }); +}, `SpeechSynthesisEvent with eventInitDict not having utterance throws + TypeError`); + +test(() => { + const utterance = new SpeechSynthesisUtterance("foo"); + const event = new SpeechSynthesisEvent("type", {utterance: utterance}); + assert_equals(event.utterance, utterance); + assert_equals(event.charIndex, 0); + assert_equals(event.charLength, 0); + assert_equals(event.elapsedTime, 0); + assert_equals(event.name, ""); +}, "SpeechSynthesisEvent with eventInitDict having an utterance"); + +test(() => { + const utterance = new SpeechSynthesisUtterance("foo"); + const event = new SpeechSynthesisEvent("type", { + utterance: utterance, + charIndex: 5, + charLength: 3, + elapsedTime: 100, + name: "foo" + }); + assert_equals(event.bubbles, false); + assert_equals(event.cancelable, false); + assert_equals(event.type, "type"); + assert_equals(event.utterance, utterance); + assert_equals(event.charIndex, 5); + assert_equals(event.charLength, 3); + assert_equals(event.elapsedTime, 100); + assert_equals(event.name, "foo"); +}, "SpeechSynthesisEvent with custom eventInitDict"); +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-basics.https.html b/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-basics.https.html new file mode 100644 index 0000000000..2fd394150e --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-basics.https.html @@ -0,0 +1,51 @@ +<!DOCTYPE html> +<title>SpeechSynthesisUtterance basics</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +const DEFAULTS = { + text: '', + lang: '', + voice: null, + volume: 1, + rate: 1, + pitch: 1, +}; + +for (const prop in DEFAULTS) { + test(function() { + const utt = new SpeechSynthesisUtterance(); + assert_equals(utt[prop], DEFAULTS[prop], prop); + }, `new SpeechSynthesisUtterance() default ${prop}`); +} + +test(function() { + const utt = new SpeechSynthesisUtterance("hello"); + assert_equals(utt.text, 'hello', 'text'); + // check that the other properties retain their defaults + for (const prop in DEFAULTS) { + if (prop != 'text') { + assert_equals(utt[prop], DEFAULTS[prop], prop); + } + } +}, 'new SpeechSynthesisUtterance("hello") text and defaults'); + +test(function() { + const utt = new SpeechSynthesisUtterance(null); + assert_equals(utt.text, 'null'); +}, 'new SpeechSynthesisUtterance(null)'); + +test(function() { + const utt = new SpeechSynthesisUtterance(undefined); + // See https://github.com/w3c/speech-api/pull/48. + assert_equals(utt.text, ''); +}, 'new SpeechSynthesisUtterance(undefined)'); + +test(function() { + const utt = new SpeechSynthesisUtterance(); + utt.text = 'word'; + assert_equals(utt.text, 'word'); +}, 'SpeechSynthesisUtterance text setter'); + +// TODO: setters https://github.com/w3c/speech-api/issues/29 +</script> diff --git a/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-volume-manual.html b/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-volume-manual.html new file mode 100644 index 0000000000..6031c1dad6 --- /dev/null +++ b/testing/web-platform/tests/speech-api/SpeechSynthesisUtterance-volume-manual.html @@ -0,0 +1,64 @@ +<!DOCTYPE html> +<html> +<head> + <title>5.2.3 SpeechSynthesisUtterance volume attribute test - Manual</title> + <style> + div, + #test { + display: block; + width: 640px; + word-break: break-all; + padding: 4px; + } + #test, + #volume { + background: skyblue; + font-weight: bold; + } + </style> + <script> + const text = "hello universe"; + const volumes = [0, 0.2, 0.4, 0.6, 1]; + + handleVoicesChanged = async _ => { + for (const volume of volumes) { + await new Promise(resolve => { + document.getElementById("volume").value = volume; + const utterance = new SpeechSynthesisUtterance(); + utterance.text = text; + utterance.volume = volume; + utterance.onend = resolve; + window.speechSynthesis.speak(utterance); + }); + }; + }; + onload = e => { + document.getElementById("test").onclick = _ => { + if (window.speechSynthesis.getVoices().length === 0) { + window.speechSynthesis.onvoiceschanged = handleVoicesChanged; + } else { + handleVoicesChanged() + } + }; + }; + </script> +</head> +<body> + <div> + <h3>Specification:</h3> + <a href="https://w3c.github.io/speech-api/speechapi.html#utterance-attributes"><b><code><i><u>volume</u></i></code> attribute</b></a> + <blockquote> + This attribute specifies the speaking volume for the utterance. It ranges between 0 and 1 inclusive, with 0 being the lowest volume and 1 the highest volume, with a default of 1. If SSML is used, this value will be overridden by prosody tags in the markup. + </blockquote> + </div> + <div id="test"> + Click to execute <code>window.speechSynthesis.speak()</code> with <code>volume attribute</code> set to <code>0, 0.2, 0.4, 0.6, 1.</code> + </div> + <br> + <div> + <label for="volume">Current volume: </label> + <input id="volume" readonly> + <h3>Manaul Test:</h3>Does the volume of audio output change? + </div> +</body> +</html> diff --git a/testing/web-platform/tests/speech-api/historical.html b/testing/web-platform/tests/speech-api/historical.html new file mode 100644 index 0000000000..99d2fab5f5 --- /dev/null +++ b/testing/web-platform/tests/speech-api/historical.html @@ -0,0 +1,34 @@ +<!doctype html> +<title>Historical Speech API features</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<div id="log"></div> +<script> +[ + "SpeechRecognitionError", + "webkitSpeechGrammar", + "webkitSpeechGrammarList", + "webkitSpeechRecognition", + "webkitSpeechRecognitionError", + "webkitSpeechRecognitionEvent", +].forEach(name => { + test(() => { + assert_false(name in window); + }, `${name} interface should not exist`); +}); + +test(() => { + assert_implements('SpeechRecognition' in window, 'SpeechRecognition exposed'); + assert_false("serviceURI" in SpeechRecognition.prototype); +}, "SpeechRecognition's serviceURI attribute should not exist"); + +[ + "interpretation", + "emma", +].forEach(name => { + test(() => { + assert_implements('SpeechRecognitionEvent' in window, 'SpeechRecognitionEvent exposed'); + assert_false(name in SpeechRecognitionEvent.prototype); + }, `SpeechRecognitionEvent's ${name} attribute should not exist`); +}); +</script> diff --git a/testing/web-platform/tests/speech-api/idlharness.window.js b/testing/web-platform/tests/speech-api/idlharness.window.js new file mode 100644 index 0000000000..77eb6a1be9 --- /dev/null +++ b/testing/web-platform/tests/speech-api/idlharness.window.js @@ -0,0 +1,51 @@ +// META: script=/resources/WebIDLParser.js +// META: script=/resources/idlharness.js +// META: timeout=long + +'use strict'; + +// https://w3c.github.io/speech-api/#dom-speechsynthesis-getvoices can +// return an empty list and a voiceschanged event is fired if the list of +// voices is determined asynchronously. +function getVoices() { + return new Promise(resolve => { + const voices = speechSynthesis.getVoices(); + if (voices.length) { + resolve(voices); + } else { + // wait for voiceschanged event + speechSynthesis.addEventListener('voiceschanged', () => { + resolve(speechSynthesis.getVoices()); + }, { once: true }); + } + }); +} + +idl_test( + ['speech-api'], + ['dom', 'html'], + (idl_array, t) => { + idl_array.add_objects({ + SpeechGrammar: ['new SpeechGrammar()'], + SpeechGrammarList: ['new SpeechGrammarList()'], + SpeechRecognition: ['new SpeechRecognition()'], + // TODO: SpeechRecognitionAlternative + // TODO: SpeechRecognitionErrorEvent + // TODO: SpeechRecognitionEvent + // TODO: SpeechRecognitionResult + // TODO: SpeechRecognitionResultList + SpeechSynthesis: ['speechSynthesis'], + // TODO: SpeechSynthesisErrorEvent + // TODO: SpeechSynthesisEvent + SpeechSynthesisUtterance: ['new SpeechSynthesisUtterance()'], + SpeechSynthesisVoice: ['voice'], + Window: ['self'], + }); + + const awaitVoice = getVoices().then(voices => self.voice = voices[0]); + const timeout = new Promise((_, reject) => { + t.step_timeout(() => reject('Timed out waiting for voice'), 3000); + }); + return Promise.race([awaitVoice, timeout]); + } +); diff --git a/testing/web-platform/tests/speech-api/webspeech.js b/testing/web-platform/tests/speech-api/webspeech.js new file mode 100644 index 0000000000..f2f51b694c --- /dev/null +++ b/testing/web-platform/tests/speech-api/webspeech.js @@ -0,0 +1,111 @@ +var DELAY = 500; // In milliseconds. +var TIMEOUT = 2000; // In milliseconds. Used for most tests. +if (typeof(TIMEOUT_OVERRIDE) != 'undefined') { + TIMEOUT = TIMEOUT_OVERRIDE; +} +GLOBAL_TIMEOUT = TIMEOUT + 2000; // In milliseconds. +setup({timeout: GLOBAL_TIMEOUT}); +var onstarted = false; +var neverFireTest = async_test('Events that should not fire'); +var onstartTest = async_test('onstart'); +var reco = new SpeechRecognition(); + +reco.onstart = onstartTest.step_func(function(event) { + assert_false(onstarted, 'onstart should only fire once.'); + onstarted = true; + onstartTest.done(); + beginTest(); +}); + +reco.onend = function() { + neverFireTest.done(); + for (var i = 0; i < doneOnEndTestList.length; i++) { + doneOnEndTestList[i].done(); + } +}; + +function neverFireEvent(name) { + return neverFireTest.step_func(function(event) { + assert_unreached(name + ' should not fire.'); + }); +} + +var doneOnEndTestList = []; // List of all test objects to call done at onend. + +// Tally calls to count() and test against min/max when test ends. +// A max value of 0 indicates no maximum. +function CountTest(name, min, max) { + doneOnEndTestList.push(this); + this.name = name; + this.min = min; + this.max = max; + this.sum = 0; + this.asyncTest = async_test(name); + + this.count = function(increment) { this.sum += increment; }; + + this.test = function() { return this.asyncTest; }; + + this.done = function() { + var cTest = this; + this.asyncTest.step(function() { + notes.innerHTML += cTest.name + ' occurred ' + cTest.sum + ' times.<br>'; + if (cTest.min == cTest.max) { + assert_equals(cTest.sum, cTest.min, cTest.name + ' occurred ' + + cTest.sum + ' times and should have occurred ' + + cTest.min + ' times.'); + } else { + assert_true(cTest.sum >= cTest.min, cTest.name + ' occurred ' + + cTest.sum + ' times and should have occurred at least ' + + cTest.min + ' times.'); + assert_true(cTest.max == 0 || cTest.sum <= cTest.max, cTest.name + + ' occurred ' + cTest.sum + + ' times and should have occurred at most ' + cTest.max + ' times.'); + } + if (cTest.whenDone) { + cTest.whenDone(); + } + }); + this.asyncTest.done(); + }; +} + +// Test for proper cycling of startEvent followed by endEvent. +function CycleTest(name) { + doneOnEndTestList.push(this); + this.name = name; + this.count = 0; // Counts number of start / end cycles. + this.started = false; // Tracks whether last event was a start or end event. + this.test = async_test(name + ' start/stop'); + + this.startEvent = function() { + var cycle = this; + return this.test.step_func(function(event) { + assert_true(onstarted, cycle.name + 'start fired before onstart.'); + assert_false(cycle.started, cycle.name + 'start fired twice without ' + + cycle.name + 'stop.'); + cycle.started = true; + }); + }; + + this.endEvent = function() { + var cycle = this; + return this.test.step_func(function(event) { + assert_true(cycle.started, cycle.name + 'end fired before ' + + cycle.name + 'start.'); + cycle.started = false; + cycle.count += 1; + }); + }; + + this.done = function() { + var cycle = this; + this.test.step(function() { + assert_false(cycle.started, cycle.name + 'start fired but not ' + + cycle.name + 'end.'); + assert_true(cycle.count > 0, cycle.name + 'start never fired.'); + notes.innerHTML += cycle.name + ' cycled ' + cycle.count + ' times.<br>'; + }); + this.test.done(); + }; +} |