summaryrefslogtreecommitdiffstats
path: root/dom/media
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:50 +0000
commitdef92d1b8e9d373e2f6f27c366d578d97d8960c6 (patch)
tree2ef34b9ad8bb9a9220e05d60352558b15f513894 /dom/media
parentAdding debian version 125.0.3-1. (diff)
downloadfirefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.tar.xz
firefox-def92d1b8e9d373e2f6f27c366d578d97d8960c6.zip
Merging upstream version 126.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media')
-rw-r--r--dom/media/AudioConverter.h1
-rw-r--r--dom/media/AudioPacketizer.h9
-rw-r--r--dom/media/CubebUtils.cpp56
-rw-r--r--dom/media/DOMMediaStream.cpp9
-rw-r--r--dom/media/DOMMediaStream.h10
-rw-r--r--dom/media/DecoderTraits.cpp5
-rw-r--r--dom/media/DeviceInputTrack.cpp24
-rw-r--r--dom/media/DeviceInputTrack.h19
-rw-r--r--dom/media/EncoderTraits.h5
-rw-r--r--dom/media/ExternalEngineStateMachine.cpp153
-rw-r--r--dom/media/ExternalEngineStateMachine.h20
-rw-r--r--dom/media/GraphDriver.cpp12
-rw-r--r--dom/media/MediaData.cpp9
-rw-r--r--dom/media/MediaData.h8
-rw-r--r--dom/media/MediaDecoder.cpp41
-rw-r--r--dom/media/MediaDecoder.h2
-rw-r--r--dom/media/MediaDecoderStateMachine.cpp33
-rw-r--r--dom/media/MediaDecoderStateMachine.h6
-rw-r--r--dom/media/MediaDecoderStateMachineBase.h3
-rw-r--r--dom/media/MediaFormatReader.cpp46
-rw-r--r--dom/media/MediaFormatReader.h19
-rw-r--r--dom/media/MediaInfo.h8
-rw-r--r--dom/media/MediaManager.cpp18
-rw-r--r--dom/media/MediaRecorder.cpp12
-rw-r--r--dom/media/MediaStreamWindowCapturer.cpp16
-rw-r--r--dom/media/MediaStreamWindowCapturer.h5
-rw-r--r--dom/media/MediaTrackGraph.cpp3
-rw-r--r--dom/media/MediaTrackGraph.h4
-rw-r--r--dom/media/MediaTrackGraphImpl.h9
-rw-r--r--dom/media/PeerConnection.sys.mjs59
-rw-r--r--dom/media/TimedPacketizer.h73
-rw-r--r--dom/media/VideoUtils.cpp7
-rw-r--r--dom/media/VideoUtils.h28
-rw-r--r--dom/media/autoplay/test/mochitest/mochitest.toml2
-rw-r--r--dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html4
-rw-r--r--dom/media/eme/EMEUtils.cpp79
-rw-r--r--dom/media/eme/EMEUtils.h12
-rw-r--r--dom/media/eme/KeySystemConfig.cpp363
-rw-r--r--dom/media/eme/KeySystemConfig.h36
-rw-r--r--dom/media/eme/MediaKeySession.cpp67
-rw-r--r--dom/media/eme/MediaKeySession.h6
-rw-r--r--dom/media/eme/MediaKeySystemAccess.cpp196
-rw-r--r--dom/media/eme/MediaKeySystemAccess.h16
-rw-r--r--dom/media/eme/MediaKeySystemAccessManager.cpp67
-rw-r--r--dom/media/eme/MediaKeySystemAccessManager.h2
-rw-r--r--dom/media/eme/mediafoundation/WMFCDMImpl.cpp155
-rw-r--r--dom/media/eme/mediafoundation/WMFCDMImpl.h24
-rw-r--r--dom/media/eme/mediafoundation/WMFCDMProxy.cpp2
-rw-r--r--dom/media/eme/metrics.yaml42
-rw-r--r--dom/media/gmp/ChromiumCDMChild.cpp2
-rw-r--r--dom/media/gmp/ChromiumCDMProxy.cpp9
-rw-r--r--dom/media/gmp/GMPChild.cpp11
-rw-r--r--dom/media/gmp/moz.build2
-rw-r--r--dom/media/gmp/mozIGeckoMediaPluginChromeService.idl4
-rw-r--r--dom/media/gmp/widevine-adapter/content_decryption_module.h1
-rw-r--r--dom/media/gtest/TestAudioInputProcessing.cpp127
-rw-r--r--dom/media/gtest/TestAudioPacketizer.cpp51
-rw-r--r--dom/media/gtest/TestAudioTrackGraph.cpp311
-rw-r--r--dom/media/gtest/TestDeviceInputTrack.cpp8
-rw-r--r--dom/media/gtest/TestMediaDataEncoder.cpp125
-rw-r--r--dom/media/hls/HLSDecoder.cpp46
-rw-r--r--dom/media/hls/HLSDecoder.h10
-rw-r--r--dom/media/hls/metrics.yaml70
-rw-r--r--dom/media/ipc/MFCDMChild.cpp34
-rw-r--r--dom/media/ipc/MFCDMChild.h3
-rw-r--r--dom/media/ipc/MFCDMParent.cpp229
-rw-r--r--dom/media/ipc/MFCDMParent.h12
-rw-r--r--dom/media/ipc/MFMediaEngineChild.cpp33
-rw-r--r--dom/media/ipc/MFMediaEngineChild.h9
-rw-r--r--dom/media/ipc/MFMediaEngineParent.cpp15
-rw-r--r--dom/media/ipc/MFMediaEngineParent.h2
-rw-r--r--dom/media/ipc/MediaIPCUtils.h3
-rw-r--r--dom/media/ipc/PMFCDM.ipdl11
-rw-r--r--dom/media/ipc/PMFMediaEngine.ipdl2
-rw-r--r--dom/media/ipc/RemoteMediaDataDecoder.cpp17
-rw-r--r--dom/media/ipc/RemoteMediaDataDecoder.h16
-rw-r--r--dom/media/mediacontrol/ContentMediaController.cpp6
-rw-r--r--dom/media/mediacontrol/ContentPlaybackController.cpp4
-rw-r--r--dom/media/mediacontrol/MediaControlKeyManager.cpp2
-rw-r--r--dom/media/mediacontrol/MediaControlUtils.h56
-rw-r--r--dom/media/mediacontrol/MediaStatusManager.cpp10
-rw-r--r--dom/media/mediacontrol/tests/browser/browser.toml1
-rw-r--r--dom/media/mediacontrol/tests/browser/file_error_media.html2
-rw-r--r--dom/media/metrics.yaml28
-rw-r--r--dom/media/moz.build1
-rw-r--r--dom/media/ogg/OggDecoder.cpp10
-rw-r--r--dom/media/platforms/EncoderConfig.cpp27
-rw-r--r--dom/media/platforms/EncoderConfig.h190
-rw-r--r--dom/media/platforms/PlatformEncoderModule.cpp33
-rw-r--r--dom/media/platforms/PlatformEncoderModule.h193
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.cpp12
-rw-r--r--dom/media/platforms/agnostic/bytestreams/H264.cpp3
-rw-r--r--dom/media/platforms/agnostic/bytestreams/H264.h36
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.cpp49
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.h1
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.cpp32
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.h2
-rw-r--r--dom/media/platforms/apple/AppleVTEncoder.cpp56
-rw-r--r--dom/media/platforms/apple/AppleVTEncoder.h5
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp458
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioEncoder.h70
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp20
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataEncoder.cpp495
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataEncoder.h107
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp18
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegEncoderModule.h4
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp3
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h5
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLog.h11
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegUtils.cpp23
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegUtils.h56
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp8
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp571
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h73
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg58/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg59/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg60/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/moz.build3
-rw-r--r--dom/media/platforms/ffmpeg/libav53/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/libav54/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/libav55/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/moz.build4
-rw-r--r--dom/media/platforms/moz.build2
-rw-r--r--dom/media/platforms/wmf/DXVA2Manager.cpp73
-rw-r--r--dom/media/platforms/wmf/MFCDMSession.cpp3
-rw-r--r--dom/media/platforms/wmf/MFMediaEngineStream.cpp9
-rw-r--r--dom/media/platforms/wmf/WMFDataEncoderUtils.h1
-rw-r--r--dom/media/platforms/wmf/WMFEncoderModule.cpp7
-rw-r--r--dom/media/platforms/wmf/WMFUtils.cpp4
-rw-r--r--dom/media/platforms/wrappers/MediaChangeMonitor.cpp7
-rw-r--r--dom/media/platforms/wrappers/MediaChangeMonitor.h30
-rw-r--r--dom/media/test/320x240.ogvbin28942 -> 0 bytes
-rw-r--r--dom/media/test/320x240.webmbin0 -> 50163 bytes
-rw-r--r--dom/media/test/320x240.webm^headers^ (renamed from dom/media/test/320x240.ogv^headers^)0
-rw-r--r--dom/media/test/448636.ogvbin7799 -> 0 bytes
-rw-r--r--dom/media/test/bogus.ogv45
-rw-r--r--dom/media/test/browser/browser_glean_first_frame_loaded_time.js12
-rw-r--r--dom/media/test/browser/head.js118
-rw-r--r--dom/media/test/bug482461-theora.ogvbin280904 -> 0 bytes
-rw-r--r--dom/media/test/bug482461.ogvbin305785 -> 0 bytes
-rw-r--r--dom/media/test/bug495129.ogvbin122207 -> 0 bytes
-rw-r--r--dom/media/test/bug498380.ogvbin65535 -> 0 bytes
-rw-r--r--dom/media/test/bug498855-1.ogvbin20480 -> 0 bytes
-rw-r--r--dom/media/test/bug498855-2.ogvbin20480 -> 0 bytes
-rw-r--r--dom/media/test/bug498855-3.ogvbin20480 -> 0 bytes
-rw-r--r--dom/media/test/bug499519.ogvbin20480 -> 0 bytes
-rw-r--r--dom/media/test/bug500311.ogvbin55834 -> 0 bytes
-rw-r--r--dom/media/test/bug504613.ogvbin35000 -> 0 bytes
-rw-r--r--dom/media/test/bug504644.ogvbin131114 -> 0 bytes
-rw-r--r--dom/media/test/bug504843.ogvbin65536 -> 0 bytes
-rw-r--r--dom/media/test/bug506094.ogvbin8195 -> 0 bytes
-rw-r--r--dom/media/test/bug516323.indexed.ogvbin162193 -> 0 bytes
-rw-r--r--dom/media/test/bug516323.ogvbin161789 -> 0 bytes
-rw-r--r--dom/media/test/bug523816.ogvbin40585 -> 0 bytes
-rw-r--r--dom/media/test/bug556821.ogvbin196608 -> 0 bytes
-rw-r--r--dom/media/test/bug557094.ogvbin76966 -> 0 bytes
-rw-r--r--dom/media/test/can_play_type_ogg.js21
-rw-r--r--dom/media/test/chained-video.ogvbin57906 -> 0 bytes
-rw-r--r--dom/media/test/crashtests/576612-1.html15
-rw-r--r--dom/media/test/make-headers.sh4
-rw-r--r--dom/media/test/manifest.js283
-rw-r--r--dom/media/test/mochitest.toml67
-rw-r--r--dom/media/test/mochitest_background_video.toml68
-rw-r--r--dom/media/test/mochitest_bugs.toml68
-rw-r--r--dom/media/test/mochitest_compat.toml72
-rw-r--r--dom/media/test/mochitest_eme.toml68
-rw-r--r--dom/media/test/mochitest_eme_compat.toml68
-rw-r--r--dom/media/test/mochitest_media_recorder.toml68
-rw-r--r--dom/media/test/mochitest_seek.toml68
-rw-r--r--dom/media/test/mochitest_stream.toml76
-rw-r--r--dom/media/test/multiple-bos.oggbin33045 -> 0 bytes
-rw-r--r--dom/media/test/reftest/color_quads/reftest.list2
-rw-r--r--dom/media/test/reftest/reftest.list6
-rw-r--r--dom/media/test/sample-fisbone-skeleton4.ogvbin8747 -> 0 bytes
-rw-r--r--dom/media/test/sample-fisbone-wrong-header.ogvbin8703 -> 0 bytes
-rw-r--r--dom/media/test/seek-short.ogvbin79921 -> 0 bytes
-rw-r--r--dom/media/test/seek.ogvbin285310 -> 0 bytes
-rw-r--r--dom/media/test/seekLies.sjs4
-rw-r--r--dom/media/test/short-video.ogvbin16049 -> 0 bytes
-rw-r--r--dom/media/test/test_bug1248229.html2
-rw-r--r--dom/media/test/test_closing_connections.html2
-rw-r--r--dom/media/test/test_decoder_disable.html4
-rw-r--r--dom/media/test/test_error_in_video_document.html9
-rw-r--r--dom/media/test/test_load_same_resource.html2
-rw-r--r--dom/media/test/test_media_selection.html2
-rw-r--r--dom/media/test/test_preload_suspend.html2
-rw-r--r--dom/media/test/test_standalone.html63
-rw-r--r--dom/media/test/test_streams_element_capture.html5
-rw-r--r--dom/media/test/test_streams_element_capture_twice.html2
-rw-r--r--dom/media/test/test_videoDocumentTitle.html4
-rw-r--r--dom/media/test/test_video_stats_resistfingerprinting.html1
-rw-r--r--dom/media/utils/PerformanceRecorder.cpp2
-rw-r--r--dom/media/utils/PerformanceRecorder.h1
-rw-r--r--dom/media/utils/TelemetryProbesReporter.cpp71
-rw-r--r--dom/media/utils/TelemetryProbesReporter.h22
-rw-r--r--dom/media/webaudio/MediaStreamAudioSourceNode.cpp18
-rw-r--r--dom/media/webaudio/MediaStreamAudioSourceNode.h27
-rw-r--r--dom/media/webaudio/test/mochitest_audio.toml1
-rw-r--r--dom/media/webaudio/test/test_mediaDecoding.html12
-rw-r--r--dom/media/webcodecs/AudioData.cpp84
-rw-r--r--dom/media/webcodecs/AudioData.h7
-rw-r--r--dom/media/webcodecs/AudioDecoder.cpp69
-rw-r--r--dom/media/webcodecs/AudioEncoder.cpp488
-rw-r--r--dom/media/webcodecs/AudioEncoder.h76
-rw-r--r--dom/media/webcodecs/DecoderTemplate.cpp4
-rw-r--r--dom/media/webcodecs/DecoderTypes.h44
-rw-r--r--dom/media/webcodecs/EncoderTemplate.cpp522
-rw-r--r--dom/media/webcodecs/EncoderTemplate.h56
-rw-r--r--dom/media/webcodecs/EncoderTypes.h74
-rw-r--r--dom/media/webcodecs/VideoDecoder.cpp34
-rw-r--r--dom/media/webcodecs/VideoEncoder.cpp42
-rw-r--r--dom/media/webcodecs/VideoEncoder.h2
-rw-r--r--dom/media/webcodecs/WebCodecsUtils.cpp71
-rw-r--r--dom/media/webcodecs/WebCodecsUtils.h17
-rw-r--r--dom/media/webcodecs/crashtests/1881079.html35
-rw-r--r--dom/media/webcodecs/crashtests/crashtests.list4
-rw-r--r--dom/media/webcodecs/moz.build2
-rw-r--r--dom/media/webrtc/MediaEnginePrefs.h2
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.cpp279
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.h57
-rw-r--r--dom/media/webrtc/jsapi/PeerConnectionCtx.cpp5
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp16
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp2
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h2
-rw-r--r--dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/AudioConduit.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoConduit.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp17
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h2
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp20
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h2
-rw-r--r--dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html9
-rw-r--r--dom/media/webrtc/third_party_build/default_config_env20
-rw-r--r--dom/media/webrtc/third_party_build/elm_rebase.sh15
-rw-r--r--dom/media/webrtc/third_party_build/fetch_github_repo.py4
-rw-r--r--dom/media/webrtc/third_party_build/vendor-libwebrtc.py1
-rw-r--r--dom/media/webrtc/transport/test/ice_unittest.cpp19
-rw-r--r--dom/media/webspeech/recognition/SpeechRecognition.cpp25
-rw-r--r--dom/media/webspeech/recognition/SpeechRecognition.h22
-rw-r--r--dom/media/webspeech/synth/nsISynthVoiceRegistry.idl4
-rw-r--r--dom/media/webvtt/TextTrack.cpp36
242 files changed, 5599 insertions, 3788 deletions
diff --git a/dom/media/AudioConverter.h b/dom/media/AudioConverter.h
index 0ace580b26..c8cbbb7949 100644
--- a/dom/media/AudioConverter.h
+++ b/dom/media/AudioConverter.h
@@ -137,7 +137,6 @@ class AudioConverter {
AlignedBuffer<Value> temp = buffer.Forget();
Process(temp, temp.Data(), SamplesInToFrames(temp.Length()));
return AudioDataBuffer<Format, Value>(std::move(temp));
- ;
}
return Process(buffer);
}
diff --git a/dom/media/AudioPacketizer.h b/dom/media/AudioPacketizer.h
index 8df04c0c5c..17579618ea 100644
--- a/dom/media/AudioPacketizer.h
+++ b/dom/media/AudioPacketizer.h
@@ -100,11 +100,15 @@ class AudioPacketizer {
return out;
}
- void Output(OutputType* aOutputBuffer) {
+ // Return the number of actual frames dequeued -- this can be lower than the
+ // packet size when underruning or draining.
+ size_t Output(OutputType* aOutputBuffer) {
uint32_t samplesNeeded = mPacketSize * mChannels;
+ size_t rv = 0;
// Under-run. Pad the end of the buffer with silence.
if (AvailableSamples() < samplesNeeded) {
+ rv = AvailableSamples() / mChannels;
#ifdef LOG_PACKETIZER_UNDERRUN
char buf[256];
snprintf(buf, 256,
@@ -115,6 +119,8 @@ class AudioPacketizer {
uint32_t zeros = samplesNeeded - AvailableSamples();
PodZero(aOutputBuffer + AvailableSamples(), zeros);
samplesNeeded -= zeros;
+ } else {
+ rv = mPacketSize;
}
if (ReadIndex() + samplesNeeded <= mLength) {
ConvertAudioSamples<InputType, OutputType>(mStorage.get() + ReadIndex(),
@@ -128,6 +134,7 @@ class AudioPacketizer {
mStorage.get(), aOutputBuffer + firstPartLength, secondPartLength);
}
mReadIndex += samplesNeeded;
+ return rv;
}
void Clear() {
diff --git a/dom/media/CubebUtils.cpp b/dom/media/CubebUtils.cpp
index bad1ab649d..dbdab3a56a 100644
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -300,12 +300,10 @@ RefPtr<CubebHandle> GetCubeb() {
// This is only exported when running tests.
void ForceSetCubebContext(cubeb* aCubebContext) {
+ RefPtr<CubebHandle> oldHandle; // For release without sMutex
StaticMutexAutoLock lock(sMutex);
- if (aCubebContext) {
- sCubebHandle = new CubebHandle(aCubebContext);
- } else {
- sCubebHandle = nullptr;
- }
+ oldHandle = sCubebHandle.forget();
+ sCubebHandle = aCubebContext ? new CubebHandle(aCubebContext) : nullptr;
sCubebState = CubebState::Initialized;
}
@@ -384,10 +382,33 @@ int CubebStreamInit(cubeb* context, cubeb_stream** stream,
if (ms) {
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
}
- return cubeb_stream_init(context, stream, stream_name, input_device,
- input_stream_params, output_device,
- output_stream_params, latency_frames, data_callback,
- state_callback, user_ptr);
+ cubeb_stream_params inputParamData;
+ cubeb_stream_params outputParamData;
+ cubeb_stream_params* inputParamPtr = input_stream_params;
+ cubeb_stream_params* outputParamPtr = output_stream_params;
+ if (input_stream_params && !output_stream_params) {
+ inputParamData = *input_stream_params;
+ inputParamData.rate = llround(
+ static_cast<double>(StaticPrefs::media_cubeb_input_drift_factor()) *
+ inputParamData.rate);
+ MOZ_LOG(
+ gCubebLog, LogLevel::Info,
+ ("CubebStreamInit input stream rate %" PRIu32, inputParamData.rate));
+ inputParamPtr = &inputParamData;
+ } else if (output_stream_params && !input_stream_params) {
+ outputParamData = *output_stream_params;
+ outputParamData.rate = llround(
+ static_cast<double>(StaticPrefs::media_cubeb_output_drift_factor()) *
+ outputParamData.rate);
+ MOZ_LOG(
+ gCubebLog, LogLevel::Info,
+ ("CubebStreamInit output stream rate %" PRIu32, outputParamData.rate));
+ outputParamPtr = &outputParamData;
+ }
+
+ return cubeb_stream_init(
+ context, stream, stream_name, input_device, inputParamPtr, output_device,
+ outputParamPtr, latency_frames, data_callback, state_callback, user_ptr);
}
void InitBrandName() {
@@ -655,15 +676,20 @@ uint32_t GetCubebMTGLatencyInFrames(cubeb_stream_params* params) {
}
static const char* gInitCallbackPrefs[] = {
- PREF_VOLUME_SCALE, PREF_CUBEB_OUTPUT_DEVICE,
- PREF_CUBEB_LATENCY_PLAYBACK, PREF_CUBEB_LATENCY_MTG,
- PREF_CUBEB_BACKEND, PREF_CUBEB_FORCE_NULL_CONTEXT,
- PREF_CUBEB_SANDBOX, PREF_AUDIOIPC_STACK_SIZE,
- PREF_AUDIOIPC_SHM_AREA_SIZE, nullptr,
+ PREF_VOLUME_SCALE,
+ PREF_CUBEB_OUTPUT_DEVICE,
+ PREF_CUBEB_LATENCY_PLAYBACK,
+ PREF_CUBEB_LATENCY_MTG,
+ PREF_CUBEB_BACKEND,
+ PREF_CUBEB_FORCE_SAMPLE_RATE,
+ PREF_CUBEB_FORCE_NULL_CONTEXT,
+ PREF_CUBEB_SANDBOX,
+ PREF_AUDIOIPC_STACK_SIZE,
+ PREF_AUDIOIPC_SHM_AREA_SIZE,
+ nullptr,
};
static const char* gCallbackPrefs[] = {
- PREF_CUBEB_FORCE_SAMPLE_RATE,
// We don't want to call the callback on startup, because the pref is the
// empty string by default ("", which means "logging disabled"). Because the
// logging can be enabled via environment variables (MOZ_LOG="module:5"),
diff --git a/dom/media/DOMMediaStream.cpp b/dom/media/DOMMediaStream.cpp
index 5031882c19..2c1e2ea514 100644
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -99,6 +99,7 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(DOMMediaStream,
tmp->Destroy();
NS_IMPL_CYCLE_COLLECTION_UNLINK(mTracks)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mConsumersToKeepAlive)
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(mTrackListeners)
NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
@@ -106,6 +107,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(DOMMediaStream,
DOMEventTargetHelper)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTracks)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mConsumersToKeepAlive)
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTrackListeners)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_ADDREF_INHERITED(DOMMediaStream, DOMEventTargetHelper)
@@ -115,6 +117,13 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DOMMediaStream)
NS_INTERFACE_MAP_ENTRY_CONCRETE(DOMMediaStream)
NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
+NS_IMPL_CYCLE_COLLECTION(DOMMediaStream::TrackListener)
+NS_IMPL_CYCLE_COLLECTING_ADDREF(DOMMediaStream::TrackListener)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(DOMMediaStream::TrackListener)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DOMMediaStream::TrackListener)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
DOMMediaStream::DOMMediaStream(nsPIDOMWindowInner* aWindow)
: DOMEventTargetHelper(aWindow),
mPlaybackTrackListener(MakeAndAddRef<PlaybackTrackListener>(this)) {
diff --git a/dom/media/DOMMediaStream.h b/dom/media/DOMMediaStream.h
index b0a9f895bb..bfce7b65f0 100644
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -59,9 +59,10 @@ class DOMMediaStream : public DOMEventTargetHelper,
public:
typedef dom::MediaTrackConstraints MediaTrackConstraints;
- class TrackListener {
+ class TrackListener : public nsISupports {
public:
- virtual ~TrackListener() = default;
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_CLASS(TrackListener)
/**
* Called when the DOMMediaStream has a live track added, either by
@@ -94,6 +95,9 @@ class DOMMediaStream : public DOMEventTargetHelper,
* Called when the DOMMediaStream has become inaudible.
*/
virtual void NotifyInaudible(){};
+
+ protected:
+ virtual ~TrackListener() = default;
};
explicit DOMMediaStream(nsPIDOMWindowInner* aWindow);
@@ -236,7 +240,7 @@ class DOMMediaStream : public DOMEventTargetHelper,
nsTArray<nsCOMPtr<nsISupports>> mConsumersToKeepAlive;
// The track listeners subscribe to changes in this stream's track set.
- nsTArray<TrackListener*> mTrackListeners;
+ nsTArray<RefPtr<TrackListener>> mTrackListeners;
// True if this stream has live tracks.
bool mActive = false;
diff --git a/dom/media/DecoderTraits.cpp b/dom/media/DecoderTraits.cpp
index af4d08ae4b..2ad76587ae 100644
--- a/dom/media/DecoderTraits.cpp
+++ b/dom/media/DecoderTraits.cpp
@@ -6,6 +6,7 @@
#include "DecoderTraits.h"
#include "MediaContainerType.h"
+#include "mozilla/glean/GleanMetrics.h"
#include "mozilla/Preferences.h"
#include "OggDecoder.h"
@@ -127,11 +128,11 @@ static CanPlayStatus CanHandleCodecsType(
static CanPlayStatus CanHandleMediaType(
const MediaContainerType& aType, DecoderDoctorDiagnostics* aDiagnostics) {
if (DecoderTraits::IsHttpLiveStreamingType(aType)) {
- Telemetry::Accumulate(Telemetry::MEDIA_HLS_CANPLAY_REQUESTED, true);
+ glean::hls::canplay_requested.Add();
}
#ifdef MOZ_ANDROID_HLS_SUPPORT
if (HLSDecoder::IsSupportedType(aType)) {
- Telemetry::Accumulate(Telemetry::MEDIA_HLS_CANPLAY_SUPPORTED, true);
+ glean::hls::canplay_supported.Add();
return CANPLAY_MAYBE;
}
#endif
diff --git a/dom/media/DeviceInputTrack.cpp b/dom/media/DeviceInputTrack.cpp
index 87d1ae73ab..5d69f7107a 100644
--- a/dom/media/DeviceInputTrack.cpp
+++ b/dom/media/DeviceInputTrack.cpp
@@ -127,28 +127,42 @@ NotNull<AudioDataListener*> DeviceInputConsumerTrack::GetAudioDataListener()
return WrapNotNull(mListener.get());
}
-bool DeviceInputConsumerTrack::ConnectToNativeDevice() const {
+bool DeviceInputConsumerTrack::ConnectedToNativeDevice() const {
MOZ_ASSERT(NS_IsMainThread());
return mDeviceInputTrack && mDeviceInputTrack->AsNativeInputTrack();
}
-bool DeviceInputConsumerTrack::ConnectToNonNativeDevice() const {
+bool DeviceInputConsumerTrack::ConnectedToNonNativeDevice() const {
MOZ_ASSERT(NS_IsMainThread());
return mDeviceInputTrack && mDeviceInputTrack->AsNonNativeInputTrack();
}
+DeviceInputTrack* DeviceInputConsumerTrack::GetDeviceInputTrackGraphThread()
+ const {
+ AssertOnGraphThread();
+
+ if (mInputs.IsEmpty()) {
+ return nullptr;
+ }
+ MOZ_ASSERT(mInputs.Length() == 1);
+ MediaTrack* track = mInputs[0]->GetSource();
+ MOZ_ASSERT(track->AsDeviceInputTrack());
+ return static_cast<DeviceInputTrack*>(track);
+}
+
void DeviceInputConsumerTrack::GetInputSourceData(AudioSegment& aOutput,
- const MediaInputPort* aPort,
GraphTime aFrom,
GraphTime aTo) const {
AssertOnGraphThread();
MOZ_ASSERT(aOutput.IsEmpty());
+ MOZ_ASSERT(mInputs.Length() == 1);
- MediaTrack* source = aPort->GetSource();
+ MediaInputPort* port = mInputs[0];
+ MediaTrack* source = port->GetSource();
GraphTime next;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval =
- MediaInputPort::GetNextInputInterval(aPort, t);
+ MediaInputPort::GetNextInputInterval(port, t);
interval.mEnd = std::min(interval.mEnd, aTo);
const bool inputEnded =
diff --git a/dom/media/DeviceInputTrack.h b/dom/media/DeviceInputTrack.h
index 6206dc0dfc..0a92ded13c 100644
--- a/dom/media/DeviceInputTrack.h
+++ b/dom/media/DeviceInputTrack.h
@@ -44,8 +44,7 @@ class NonNativeInputTrack;
// } else {
// MOZ_ASSERT(mInputs.Length() == 1);
// AudioSegment data;
-// DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
-// aTo);
+// DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
// // You can do audio data processing before appending to mSegment here.
// GetData<AudioSegment>()->AppendFrom(&data);
// }
@@ -77,20 +76,22 @@ class DeviceInputConsumerTrack : public ProcessedMediaTrack {
void DisconnectDeviceInput();
Maybe<CubebUtils::AudioDeviceID> DeviceId() const;
NotNull<AudioDataListener*> GetAudioDataListener() const;
- bool ConnectToNativeDevice() const;
- bool ConnectToNonNativeDevice() const;
+ bool ConnectedToNativeDevice() const;
+ bool ConnectedToNonNativeDevice() const;
// Any thread:
DeviceInputConsumerTrack* AsDeviceInputConsumerTrack() override {
return this;
}
- protected:
// Graph thread API:
- // Get the data in [aFrom, aTo) from aPort->GetSource() to aOutput. aOutput
- // needs to be empty.
- void GetInputSourceData(AudioSegment& aOutput, const MediaInputPort* aPort,
- GraphTime aFrom, GraphTime aTo) const;
+ DeviceInputTrack* GetDeviceInputTrackGraphThread() const;
+
+ protected:
+ // Get the data in [aFrom, aTo) from the device input to aOutput. aOutput
+ // needs to be empty. A device input must be connected. Graph thread.
+ void GetInputSourceData(AudioSegment& aOutput, GraphTime aFrom,
+ GraphTime aTo) const;
// Main Thread variables:
RefPtr<MediaInputPort> mPort;
diff --git a/dom/media/EncoderTraits.h b/dom/media/EncoderTraits.h
index d96bc37e4a..ead78a8c40 100644
--- a/dom/media/EncoderTraits.h
+++ b/dom/media/EncoderTraits.h
@@ -12,12 +12,13 @@
namespace mozilla::EncoderSupport {
-bool Supports(const RefPtr<dom::VideoEncoderConfigInternal>& aEncoderConfigInternal) {
+template <typename T>
+bool Supports(const RefPtr<T>& aEncoderConfigInternal) {
RefPtr<PEMFactory> factory = new PEMFactory();
EncoderConfig config = aEncoderConfigInternal->ToEncoderConfig();
return factory->Supports(config);
}
-} // namespace mozilla
+} // namespace mozilla::EncoderSupport
#endif
diff --git a/dom/media/ExternalEngineStateMachine.cpp b/dom/media/ExternalEngineStateMachine.cpp
index acfc1f5fa2..1728728dc6 100644
--- a/dom/media/ExternalEngineStateMachine.cpp
+++ b/dom/media/ExternalEngineStateMachine.cpp
@@ -164,11 +164,11 @@ void ExternalEngineStateMachine::ChangeStateTo(State aNextState) {
LOG("Change state : '%s' -> '%s' (play-state=%d)", StateToStr(mState.mName),
StateToStr(aNextState), mPlayState.Ref());
// Assert the possible state transitions.
- MOZ_ASSERT_IF(mState.IsInitEngine(), aNextState == State::ReadingMetadata ||
+ MOZ_ASSERT_IF(
+ mState.IsReadingMetadata(),
+ aNextState == State::InitEngine || aNextState == State::ShutdownEngine);
+ MOZ_ASSERT_IF(mState.IsInitEngine(), aNextState == State::RunningEngine ||
aNextState == State::ShutdownEngine);
- MOZ_ASSERT_IF(mState.IsReadingMetadata(),
- aNextState == State::RunningEngine ||
- aNextState == State::ShutdownEngine);
MOZ_ASSERT_IF(mState.IsRunningEngine(),
aNextState == State::SeekingData ||
aNextState == State::ShutdownEngine ||
@@ -183,8 +183,8 @@ void ExternalEngineStateMachine::ChangeStateTo(State aNextState) {
aNextState == State::SeekingData || aNextState == State::ShutdownEngine);
if (aNextState == State::SeekingData) {
mState = StateObject({StateObject::SeekingData()});
- } else if (aNextState == State::ReadingMetadata) {
- mState = StateObject({StateObject::ReadingMetadata()});
+ } else if (aNextState == State::InitEngine) {
+ mState = StateObject({StateObject::InitEngine()});
} else if (aNextState == State::RunningEngine) {
mState = StateObject({StateObject::RunningEngine()});
} else if (aNextState == State::ShutdownEngine) {
@@ -200,8 +200,8 @@ ExternalEngineStateMachine::ExternalEngineStateMachine(
MediaDecoder* aDecoder, MediaFormatReader* aReader)
: MediaDecoderStateMachineBase(aDecoder, aReader) {
LOG("Created ExternalEngineStateMachine");
- MOZ_ASSERT(mState.IsInitEngine());
- InitEngine();
+ MOZ_ASSERT(mState.IsReadingMetadata());
+ ReadMetadata();
}
ExternalEngineStateMachine::~ExternalEngineStateMachine() {
@@ -214,8 +214,9 @@ void ExternalEngineStateMachine::InitEngine() {
mEngine.reset(new MFMediaEngineWrapper(this, mFrameStats));
#endif
if (mEngine) {
+ MOZ_ASSERT(mInfo);
auto* state = mState.AsInitEngine();
- state->mInitPromise = mEngine->Init(!mMinimizePreroll);
+ state->mInitPromise = mEngine->Init(*mInfo, !mMinimizePreroll);
state->mInitPromise
->Then(OwnerThread(), __func__, this,
&ExternalEngineStateMachine::OnEngineInitSuccess,
@@ -235,14 +236,10 @@ void ExternalEngineStateMachine::OnEngineInitSuccess() {
mReader->UpdateMediaEngineId(mEngine->Id());
state->mInitPromise = nullptr;
if (mState.IsInitEngine()) {
- ChangeStateTo(State::ReadingMetadata);
- ReadMetadata();
+ StartRunningEngine();
return;
}
- // We just recovered from CDM process crash, so we need to update the media
- // info to the new CDM process.
- MOZ_ASSERT(mInfo);
- mEngine->SetMediaInfo(*mInfo);
+ // We just recovered from CDM process crash, seek to previous position.
SeekTarget target(mCurrentPosition.Ref(), SeekTarget::Type::Accurate);
Seek(target);
}
@@ -260,13 +257,17 @@ void ExternalEngineStateMachine::OnEngineInitFailure() {
}
void ExternalEngineStateMachine::ReadMetadata() {
- AssertOnTaskQueue();
+ MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mState.IsReadingMetadata());
- mReader->ReadMetadata()
- ->Then(OwnerThread(), __func__, this,
- &ExternalEngineStateMachine::OnMetadataRead,
- &ExternalEngineStateMachine::OnMetadataNotRead)
- ->Track(mState.AsReadingMetadata()->mMetadataRequest);
+ Unused << OwnerThread()->Dispatch(NS_NewRunnableFunction(
+ "ExternalEngineStateMachine::ReadMetadata",
+ [self = RefPtr<ExternalEngineStateMachine>{this}, this] {
+ mReader->ReadMetadata()
+ ->Then(OwnerThread(), __func__, this,
+ &ExternalEngineStateMachine::OnMetadataRead,
+ &ExternalEngineStateMachine::OnMetadataNotRead)
+ ->Track(mState.AsReadingMetadata()->mMetadataRequest);
+ }));
}
void ExternalEngineStateMachine::OnMetadataRead(MetadataHolder&& aMetadata) {
@@ -303,8 +304,6 @@ void ExternalEngineStateMachine::OnMetadataRead(MetadataHolder&& aMetadata) {
}
#endif
- mEngine->SetMediaInfo(*mInfo);
-
if (Info().mMetadataDuration.isSome()) {
mDuration = Info().mMetadataDuration;
} else if (Info().mUnadjustedMetadataEndTime.isSome()) {
@@ -333,7 +332,8 @@ void ExternalEngineStateMachine::OnMetadataRead(MetadataHolder&& aMetadata) {
mMetadataLoadedEvent.Notify(std::move(aMetadata.mInfo),
std::move(aMetadata.mTags),
MediaDecoderEventVisibility::Observable);
- StartRunningEngine();
+ ChangeStateTo(State::InitEngine);
+ InitEngine();
}
void ExternalEngineStateMachine::OnMetadataNotRead(const MediaResult& aError) {
@@ -365,6 +365,58 @@ bool ExternalEngineStateMachine::IsFormatSupportedByExternalEngine(
#endif
}
+RefPtr<MediaDecoder::SeekPromise> ExternalEngineStateMachine::InvokeSeek(
+ const SeekTarget& aTarget) {
+ return InvokeAsync(
+ OwnerThread(), __func__,
+ [self = RefPtr<ExternalEngineStateMachine>(this), this,
+ target = aTarget]() -> RefPtr<MediaDecoder::SeekPromise> {
+ AssertOnTaskQueue();
+ if (!mEngine || !mEngine->IsInited()) {
+ LOG("Can't perform seek (%" PRId64 ") now, add a pending seek task",
+ target.GetTime().ToMicroseconds());
+ // We haven't added any pending seek before
+ if (mPendingSeek.mPromise.IsEmpty()) {
+ mPendingTasks.AppendElement(NS_NewRunnableFunction(
+ "ExternalEngineStateMachine::InvokeSeek",
+ [self = RefPtr{this}, this] {
+ if (!mPendingSeek.Exists()) {
+ return;
+ }
+ Seek(*mPendingSeek.mTarget)
+ ->Then(OwnerThread(), __func__,
+ [self = RefPtr{this},
+ this](const MediaDecoder::SeekPromise::
+ ResolveOrRejectValue& aVal) {
+ mPendingSeekRequest.Complete();
+ if (aVal.IsResolve()) {
+ mPendingSeek.Resolve(__func__);
+ } else {
+ mPendingSeek.RejectIfExists(__func__);
+ }
+ mPendingSeek = SeekJob();
+ })
+ ->Track(mPendingSeekRequest);
+ }));
+ } else {
+ // Reject previous pending promise, as we will create a new one
+ LOG("Replace previous pending seek with a new one");
+ mPendingSeek.RejectIfExists(__func__);
+ mPendingSeekRequest.DisconnectIfExists();
+ }
+ mPendingSeek.mTarget = Some(target);
+ return mPendingSeek.mPromise.Ensure(__func__);
+ }
+ if (mPendingSeek.Exists()) {
+ LOG("Discard pending seek because another new seek happens");
+ mPendingSeek.RejectIfExists(__func__);
+ mPendingSeek = SeekJob();
+ mPendingSeekRequest.DisconnectIfExists();
+ }
+ return self->Seek(target);
+ });
+}
+
RefPtr<MediaDecoder::SeekPromise> ExternalEngineStateMachine::Seek(
const SeekTarget& aTarget) {
AssertOnTaskQueue();
@@ -570,11 +622,15 @@ RefPtr<ShutdownPromise> ExternalEngineStateMachine::Shutdown() {
mSetCDMProxyPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_ABORT_ERR, __func__);
mSetCDMProxyRequest.DisconnectIfExists();
- mInitEngineForCDMRequest.DisconnectIfExists();
+
+ mPendingSeek.RejectIfExists(__func__);
+ mPendingSeekRequest.DisconnectIfExists();
mPendingTasks.Clear();
- mEngine->Shutdown();
+ if (mEngine) {
+ mEngine->Shutdown();
+ }
auto* state = mState.AsShutdownEngine();
state->mShutdown = mReader->Shutdown()->Then(
@@ -622,8 +678,7 @@ void ExternalEngineStateMachine::BufferedRangeUpdated() {
return; \
} \
/* Initialzation is not done yet, postpone the operation */ \
- if ((mState.IsInitEngine() || mState.IsRecoverEngine()) && \
- mState.AsInitEngine()->mInitPromise) { \
+ if (!mEngine || !mEngine->IsInited()) { \
LOG("%s is called before init", __func__); \
mPendingTasks.AppendElement(NewRunnableMethod( \
__func__, this, &ExternalEngineStateMachine::Func)); \
@@ -1213,30 +1268,25 @@ RefPtr<SetCDMPromise> ExternalEngineStateMachine::SetCDMProxy(
return SetCDMPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
}
- if (mState.IsInitEngine() && mState.AsInitEngine()->mInitPromise) {
+ if (!mEngine || !mEngine->IsInited()) {
LOG("SetCDMProxy is called before init");
- mState.AsInitEngine()
- ->mInitPromise
- ->Then(
- OwnerThread(), __func__,
- [self = RefPtr{this}, proxy = RefPtr{aProxy}, this](
- const GenericNonExclusivePromise::ResolveOrRejectValue& aVal) {
- mInitEngineForCDMRequest.Complete();
- SetCDMProxy(proxy)
- ->Then(OwnerThread(), __func__,
- [self = RefPtr{this}, this](
- const SetCDMPromise::ResolveOrRejectValue& aVal) {
- mSetCDMProxyRequest.Complete();
- if (aVal.IsResolve()) {
- mSetCDMProxyPromise.Resolve(true, __func__);
- } else {
- mSetCDMProxyPromise.Reject(
- NS_ERROR_DOM_MEDIA_CDM_ERR, __func__);
- }
- })
- ->Track(mSetCDMProxyRequest);
- })
- ->Track(mInitEngineForCDMRequest);
+ mPendingTasks.AppendElement(NS_NewRunnableFunction(
+ "ExternalEngineStateMachine::SetCDMProxy",
+ [self = RefPtr{this}, proxy = RefPtr{aProxy}, this] {
+ SetCDMProxy(proxy)
+ ->Then(OwnerThread(), __func__,
+ [self = RefPtr{this},
+ this](const SetCDMPromise::ResolveOrRejectValue& aVal) {
+ mSetCDMProxyRequest.Complete();
+ if (aVal.IsResolve()) {
+ mSetCDMProxyPromise.Resolve(true, __func__);
+ } else {
+ mSetCDMProxyPromise.Reject(NS_ERROR_DOM_MEDIA_CDM_ERR,
+ __func__);
+ }
+ })
+ ->Track(mSetCDMProxyRequest);
+ }));
return mSetCDMProxyPromise.Ensure(__func__);
}
@@ -1244,6 +1294,7 @@ RefPtr<SetCDMPromise> ExternalEngineStateMachine::SetCDMProxy(
mKeySystem = NS_ConvertUTF16toUTF8(aProxy->KeySystem());
LOG("SetCDMProxy=%p (key-system=%s)", aProxy, mKeySystem.get());
MOZ_DIAGNOSTIC_ASSERT(mEngine);
+ // TODO : we should check the result of setting CDM proxy in the MFCDM process
if (!mEngine->SetCDMProxy(aProxy)) {
LOG("Failed to set CDM proxy on the engine");
return SetCDMPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CDM_ERR, __func__);
diff --git a/dom/media/ExternalEngineStateMachine.h b/dom/media/ExternalEngineStateMachine.h
index 79183f894d..83250b0f3c 100644
--- a/dom/media/ExternalEngineStateMachine.h
+++ b/dom/media/ExternalEngineStateMachine.h
@@ -56,6 +56,9 @@ class ExternalEngineStateMachine final
ExternalEngineStateMachine(MediaDecoder* aDecoder,
MediaFormatReader* aReader);
+ RefPtr<MediaDecoder::SeekPromise> InvokeSeek(
+ const SeekTarget& aTarget) override;
+
RefPtr<GenericPromise> InvokeSetSink(
const RefPtr<AudioDeviceInfo>& aSink) override;
@@ -178,9 +181,9 @@ class ExternalEngineStateMachine final
// crashes.
struct RecoverEngine : public InitEngine {};
- StateObject() : mData(InitEngine()), mName(State::InitEngine){};
- explicit StateObject(ReadingMetadata&& aArg)
- : mData(std::move(aArg)), mName(State::ReadingMetadata){};
+ StateObject() : mData(ReadingMetadata()), mName(State::ReadingMetadata){};
+ explicit StateObject(InitEngine&& aArg)
+ : mData(std::move(aArg)), mName(State::InitEngine){};
explicit StateObject(RunningEngine&& aArg)
: mData(std::move(aArg)), mName(State::RunningEngine){};
explicit StateObject(SeekingData&& aArg)
@@ -308,7 +311,11 @@ class ExternalEngineStateMachine final
// Only used if setting CDM happens before the engine finishes initialization.
MozPromiseHolder<SetCDMPromise> mSetCDMProxyPromise;
MozPromiseRequestHolder<SetCDMPromise> mSetCDMProxyRequest;
- MozPromiseRequestHolder<GenericNonExclusivePromise> mInitEngineForCDMRequest;
+
+ // If seek happens while the engine is still initializing, then we would
+ // postpone the seek until the engine is ready.
+ SeekJob mPendingSeek;
+ MozPromiseRequestHolder<MediaDecoder::SeekPromise> mPendingSeekRequest;
// It would be zero for audio-only playback.
gfx::IntSize mVideoDisplay;
@@ -332,9 +339,11 @@ class ExternalPlaybackEngine {
virtual ~ExternalPlaybackEngine() = default;
// Init the engine and specify the preload request.
- virtual RefPtr<GenericNonExclusivePromise> Init(bool aShouldPreload) = 0;
+ virtual RefPtr<GenericNonExclusivePromise> Init(const MediaInfo& aInfo,
+ bool aShouldPreload) = 0;
virtual void Shutdown() = 0;
virtual uint64_t Id() const = 0;
+ virtual bool IsInited() const = 0;
// Following methods should only be called after successfully initialize the
// external engine.
@@ -347,7 +356,6 @@ class ExternalPlaybackEngine {
virtual void SetPreservesPitch(bool aPreservesPitch) = 0;
virtual media::TimeUnit GetCurrentPosition() = 0;
virtual void NotifyEndOfStream(TrackInfo::TrackType aType) = 0;
- virtual void SetMediaInfo(const MediaInfo& aInfo) = 0;
virtual bool SetCDMProxy(CDMProxy* aProxy) = 0;
virtual void NotifyResizing(uint32_t aWidth, uint32_t aHeight) = 0;
diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp
index 36c5b58864..744de30bb5 100644
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -482,15 +482,9 @@ AudioCallbackDriver::AudioCallbackDriver(
"Invalid output channel count");
MOZ_ASSERT(mOutputChannelCount <= 8);
- bool allowVoice = StaticPrefs::
- media_getusermedia_microphone_prefer_voice_stream_with_processing_enabled();
-#ifdef MOZ_WIDGET_COCOA
- // Using the VoiceProcessingIO audio unit on MacOS 12 causes crashes in
- // OS code.
- allowVoice = allowVoice && nsCocoaFeatures::macOSVersionMajor() != 12;
-#endif
-
- if (aAudioInputType == AudioInputType::Voice && allowVoice) {
+ if (aAudioInputType == AudioInputType::Voice &&
+ StaticPrefs::
+ media_getusermedia_microphone_prefer_voice_stream_with_processing_enabled()) {
LOG(LogLevel::Debug, ("VOICE."));
mInputDevicePreference = CUBEB_DEVICE_PREF_VOICE;
CubebUtils::SetInCommunication(true);
diff --git a/dom/media/MediaData.cpp b/dom/media/MediaData.cpp
index fa545604e6..15774ec533 100644
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -62,6 +62,15 @@ Span<AudioDataValue> AudioData::Data() const {
return Span{GetAdjustedData(), mFrames * mChannels};
}
+nsCString AudioData::ToString() const {
+ nsCString rv;
+ rv.AppendPrintf("AudioData: %s %s %" PRIu32 " frames %" PRIu32 "Hz, %" PRIu32
+ "ch",
+ mTime.ToString().get(), mDuration.ToString().get(), mFrames,
+ mRate, mChannels);
+ return rv;
+}
+
void AudioData::SetOriginalStartTime(const media::TimeUnit& aStartTime) {
MOZ_ASSERT(mTime == mOriginalTime,
"Do not call this if data has been trimmed!");
diff --git a/dom/media/MediaData.h b/dom/media/MediaData.h
index 3ae8c1dbc2..f9f1aad2f1 100644
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -23,6 +23,7 @@
# include "mozilla/gfx/Rect.h"
# include "nsString.h"
# include "nsTArray.h"
+# include "EncoderConfig.h"
namespace mozilla {
@@ -379,7 +380,7 @@ class NullData : public MediaData {
static const Type sType = Type::NULL_DATA;
};
-// Holds chunk a decoded audio frames.
+// Holds chunk a decoded interleaved audio frames.
class AudioData : public MediaData {
public:
AudioData(int64_t aOffset, const media::TimeUnit& aTime,
@@ -389,6 +390,8 @@ class AudioData : public MediaData {
static const Type sType = Type::AUDIO_DATA;
static const char* sTypeName;
+ nsCString ToString() const;
+
// Access the buffer as a Span.
Span<AudioDataValue> Data() const;
@@ -721,6 +724,9 @@ class MediaRawData final : public MediaData {
// Currently this is only used for the media engine DRM playback.
bool mShouldCopyCryptoToRemoteRawData = false;
+ // Config used to encode this packet.
+ UniquePtr<const EncoderConfig> mConfig;
+
// It's only used when the remote decoder reconstructs the media raw data.
CryptoSample& GetWritableCrypto() { return mCryptoInternal; }
diff --git a/dom/media/MediaDecoder.cpp b/dom/media/MediaDecoder.cpp
index 23c30eed2a..159c6a6121 100644
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -893,9 +893,44 @@ void MediaDecoder::FirstFrameLoaded(
// We only care about video first frame.
if (mInfo->HasVideo() && mMDSMCreationTime) {
- mTelemetryProbesReporter->OntFirstFrameLoaded(
- TimeStamp::Now() - *mMDSMCreationTime, IsMSE(),
- mDecoderStateMachine->IsExternalEngineStateMachine());
+ auto info = MakeUnique<dom::MediaDecoderDebugInfo>();
+ RequestDebugInfo(*info)->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [self = RefPtr<MediaDecoder>{this}, this, now = TimeStamp::Now(),
+ creationTime = *mMDSMCreationTime, result = std::move(info)](
+ GenericPromise::ResolveOrRejectValue&& aValue) mutable {
+ if (IsShutdown()) {
+ return;
+ }
+ if (aValue.IsReject()) {
+ NS_WARNING("Failed to get debug info for the first frame probe!");
+ return;
+ }
+ auto firstFrameLoadedTime = (now - creationTime).ToMilliseconds();
+ MOZ_ASSERT(result->mReader.mTotalReadMetadataTimeMs >= 0.0);
+ MOZ_ASSERT(result->mReader.mTotalWaitingForVideoDataTimeMs >= 0.0);
+ MOZ_ASSERT(result->mStateMachine.mTotalBufferingTimeMs >= 0.0);
+
+ using FirstFrameLoadedFlag =
+ TelemetryProbesReporter::FirstFrameLoadedFlag;
+ TelemetryProbesReporter::FirstFrameLoadedFlagSet flags;
+ if (IsMSE()) {
+ flags += FirstFrameLoadedFlag::IsMSE;
+ }
+ if (mDecoderStateMachine->IsExternalEngineStateMachine()) {
+ flags += FirstFrameLoadedFlag::IsExternalEngineStateMachine;
+ }
+ if (IsHLSDecoder()) {
+ flags += FirstFrameLoadedFlag::IsHLS;
+ }
+ if (result->mReader.mVideoHardwareAccelerated) {
+ flags += FirstFrameLoadedFlag::IsHardwareDecoding;
+ }
+ mTelemetryProbesReporter->OntFirstFrameLoaded(
+ firstFrameLoadedTime, result->mReader.mTotalReadMetadataTimeMs,
+ result->mReader.mTotalWaitingForVideoDataTimeMs,
+ result->mStateMachine.mTotalBufferingTimeMs, flags, *mInfo);
+ });
mMDSMCreationTime.reset();
}
diff --git a/dom/media/MediaDecoder.h b/dom/media/MediaDecoder.h
index a5494e9a84..033e751533 100644
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -452,6 +452,8 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo);
+ virtual bool IsHLSDecoder() const { return false; }
+
protected:
virtual ~MediaDecoder();
diff --git a/dom/media/MediaDecoderStateMachine.cpp b/dom/media/MediaDecoderStateMachine.cpp
index f3cd79047b..7c84690832 100644
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2331,16 +2331,16 @@ class MediaDecoderStateMachine::NextFrameSeekingState
}
// Otherwise, we need to do the seek operation asynchronously for a special
- // case (bug504613.ogv) which has no data at all, the 1st seekToNextFrame()
- // operation reaches the end of the media. If we did the seek operation
- // synchronously, we immediately resolve the SeekPromise in mSeekJob and
- // then switch to the CompletedState which dispatches an "ended" event.
- // However, the ThenValue of the SeekPromise has not yet been set, so the
- // promise resolving is postponed and then the JS developer receives the
- // "ended" event before the seek promise is resolved.
- // An asynchronous seek operation helps to solve this issue since while the
- // seek is actually performed, the ThenValue of SeekPromise has already
- // been set so that it won't be postponed.
+ // case (video with no data)which has no data at all, the 1st
+ // seekToNextFrame() operation reaches the end of the media. If we did the
+ // seek operation synchronously, we immediately resolve the SeekPromise in
+ // mSeekJob and then switch to the CompletedState which dispatches an
+ // "ended" event. However, the ThenValue of the SeekPromise has not yet been
+ // set, so the promise resolving is postponed and then the JS developer
+ // receives the "ended" event before the seek promise is resolved. An
+ // asynchronous seek operation helps to solve this issue since while the
+ // seek is actually performed, the ThenValue of SeekPromise has already been
+ // set so that it won't be postponed.
RefPtr<Runnable> r = mAsyncSeekTask = new AysncNextFrameSeekTask(this);
nsresult rv = OwnerThread()->Dispatch(r.forget());
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
@@ -3377,6 +3377,7 @@ void MediaDecoderStateMachine::BufferingState::Step() {
}
SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
+ mMaster->mTotalBufferingDuration += (now - mBufferingStart);
SetDecodingState();
}
@@ -3471,13 +3472,15 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
mIsMSE(aDecoder->IsMSE()),
mShouldResistFingerprinting(aDecoder->ShouldResistFingerprinting()),
mSeamlessLoopingAllowed(false),
+ mTotalBufferingDuration(TimeDuration::Zero()),
INIT_MIRROR(mStreamName, nsAutoString()),
INIT_MIRROR(mSinkDevice, nullptr),
INIT_MIRROR(mOutputCaptureState, MediaDecoder::OutputCaptureState::None),
INIT_MIRROR(mOutputDummyTrack, nullptr),
INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
- INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE) {
+ INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
+ mShuttingDown(false) {
MOZ_COUNT_CTOR(MediaDecoderStateMachine);
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@@ -3812,6 +3815,7 @@ void MediaDecoderStateMachine::VolumeChanged() {
RefPtr<ShutdownPromise> MediaDecoderStateMachine::Shutdown() {
AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Shutdown", MEDIA_PLAYBACK);
MOZ_ASSERT(OnTaskQueue());
+ mShuttingDown = true;
return mStateObj->HandleShutdown();
}
@@ -4694,10 +4698,15 @@ void MediaDecoderStateMachine::GetDebugInfo(
aInfo.mVideoCompleted = mVideoCompleted;
mStateObj->GetDebugInfo(aInfo.mStateObj);
mMediaSink->GetDebugInfo(aInfo.mMediaSink);
+ aInfo.mTotalBufferingTimeMs = mTotalBufferingDuration.ToMilliseconds();
}
RefPtr<GenericPromise> MediaDecoderStateMachine::RequestDebugInfo(
dom::MediaDecoderStateMachineDebugInfo& aInfo) {
+ if (mShuttingDown) {
+ return GenericPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
+ }
+
RefPtr<GenericPromise::Private> p = new GenericPromise::Private(__func__);
RefPtr<MediaDecoderStateMachine> self = this;
nsresult rv = OwnerThread()->Dispatch(
@@ -4707,7 +4716,7 @@ RefPtr<GenericPromise> MediaDecoderStateMachine::RequestDebugInfo(
p->Resolve(true, __func__);
}),
AbstractThread::TailDispatch);
- MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
return p;
}
diff --git a/dom/media/MediaDecoderStateMachine.h b/dom/media/MediaDecoderStateMachine.h
index bcedf1790a..aa7919aa23 100644
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -17,6 +17,7 @@
# include "MediaStatistics.h"
# include "MediaTimer.h"
# include "SeekJob.h"
+# include "mozilla/Atomics.h"
# include "mozilla/Attributes.h"
# include "mozilla/ReentrantMonitor.h"
# include "mozilla/StateMirroring.h"
@@ -530,6 +531,9 @@ class MediaDecoderStateMachine
// logic until the media loops back.
bool mBypassingSkipToNextKeyFrameCheck = false;
+ // The total amount of time we've spent on the buffering state.
+ TimeDuration mTotalBufferingDuration;
+
private:
// Audio stream name
Mirror<nsAutoString> mStreamName;
@@ -559,6 +563,8 @@ class MediaDecoderStateMachine
// after Initialization. TaskQueue thread only.
bool mIsMediaSinkSuspended = false;
+ Atomic<bool> mShuttingDown;
+
public:
AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
return &mCanonicalOutputPrincipal;
diff --git a/dom/media/MediaDecoderStateMachineBase.h b/dom/media/MediaDecoderStateMachineBase.h
index 5872151015..fb172b2d22 100644
--- a/dom/media/MediaDecoderStateMachineBase.h
+++ b/dom/media/MediaDecoderStateMachineBase.h
@@ -85,7 +85,8 @@ class MediaDecoderStateMachineBase {
RefPtr<ShutdownPromise> BeginShutdown();
// Seeks to the decoder to aTarget asynchronously.
- RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
+ virtual RefPtr<MediaDecoder::SeekPromise> InvokeSeek(
+ const SeekTarget& aTarget);
virtual size_t SizeOfVideoQueue() const = 0;
virtual size_t SizeOfAudioQueue() const = 0;
diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp
index 9553e67b00..7eb8e4e5e2 100644
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -891,7 +891,10 @@ MediaFormatReader::MediaFormatReader(MediaFormatReaderInit& aInit,
"MediaFormatReader::mBuffered (Canonical)"),
mFrameStats(aInit.mFrameStats),
mMediaDecoderOwnerID(aInit.mMediaDecoderOwnerID),
- mTrackingId(std::move(aInit.mTrackingId)) {
+ mTrackingId(std::move(aInit.mTrackingId)),
+ mReadMetadataStartTime(Nothing()),
+ mReadMetaDataTime(TimeDuration::Zero()),
+ mTotalWaitingForVideoDataTime(TimeDuration::Zero()) {
MOZ_ASSERT(aDemuxer);
MOZ_COUNT_CTOR(MediaFormatReader);
DDLINKCHILD("audio decoder data", "MediaFormatReader::DecoderDataWithPromise",
@@ -1162,6 +1165,10 @@ MediaFormatReader::AsyncReadMetadata() {
return MetadataPromise::CreateAndResolve(std::move(metadata), __func__);
}
+ if (!mReadMetadataStartTime) {
+ mReadMetadataStartTime = Some(TimeStamp::Now());
+ }
+
RefPtr<MetadataPromise> p = mMetadataPromise.Ensure(__func__);
mDemuxer->Init()
@@ -1342,6 +1349,11 @@ void MediaFormatReader::MaybeResolveMetadataPromise() {
&MediaFormatReader::NotifyTrackInfoUpdated);
mIsWatchingWorkingInfo = true;
+ if (mReadMetadataStartTime) {
+ mReadMetaDataTime = TimeStamp::Now() - *mReadMetadataStartTime;
+ mReadMetadataStartTime.reset();
+ }
+
mMetadataPromise.Resolve(std::move(metadata), __func__);
}
@@ -1490,7 +1502,7 @@ void MediaFormatReader::OnDemuxFailed(TrackType aTrack,
aTrack == TrackType::kVideoTrack ? "video_demux_interruption"
: "audio_demux_interruption",
aError);
- if (!decoder.mWaitingForData) {
+ if (!decoder.mWaitingForDataStartTime) {
decoder.RequestDrain();
}
NotifyEndOfStream(aTrack);
@@ -1500,7 +1512,7 @@ void MediaFormatReader::OnDemuxFailed(TrackType aTrack,
aTrack == TrackType::kVideoTrack ? "video_demux_interruption"
: "audio_demux_interruption",
aError);
- if (!decoder.mWaitingForData) {
+ if (!decoder.mWaitingForDataStartTime) {
decoder.RequestDrain();
}
NotifyWaitingForData(aTrack);
@@ -1783,7 +1795,7 @@ void MediaFormatReader::NotifyError(TrackType aTrack,
void MediaFormatReader::NotifyWaitingForData(TrackType aTrack) {
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
- decoder.mWaitingForData = true;
+ decoder.mWaitingForDataStartTime = Some(TimeStamp::Now());
if (decoder.mTimeThreshold) {
decoder.mTimeThreshold.ref().mWaiting = true;
}
@@ -1848,7 +1860,7 @@ bool MediaFormatReader::UpdateReceivedNewData(TrackType aTrack) {
return false;
}
- // We do not want to clear mWaitingForData while there are pending
+ // We do not want to clear mWaitingForDataStartTime while there are pending
// demuxing or seeking operations that could affect the value of this flag.
// This is in order to ensure that we will retry once they complete as we may
// now have new data that could potentially allow those operations to
@@ -1870,7 +1882,7 @@ bool MediaFormatReader::UpdateReceivedNewData(TrackType aTrack) {
}
if (decoder.HasPendingDrain()) {
- // We do not want to clear mWaitingForData or mDemuxEOS while
+ // We do not want to clear mWaitingForDataStartTime or mDemuxEOS while
// a drain is in progress in order to properly complete the operation.
return false;
}
@@ -1879,7 +1891,11 @@ bool MediaFormatReader::UpdateReceivedNewData(TrackType aTrack) {
if (decoder.mTimeThreshold) {
decoder.mTimeThreshold.ref().mWaiting = false;
}
- decoder.mWaitingForData = false;
+ if (aTrack == TrackType::kVideoTrack && decoder.mWaitingForDataStartTime) {
+ mTotalWaitingForVideoDataTime +=
+ TimeStamp::Now() - *decoder.mWaitingForDataStartTime;
+ }
+ decoder.mWaitingForDataStartTime.reset();
if (decoder.HasFatalError()) {
return false;
@@ -2390,7 +2406,7 @@ void MediaFormatReader::Update(TrackType aTrack) {
NotifyDecoderBenchmarkStore();
}
decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
- } else if (decoder.mWaitingForData) {
+ } else if (decoder.mWaitingForDataStartTime) {
if (decoder.mDrainState == DrainState::DrainCompleted &&
decoder.mLastDecodedSampleTime && !decoder.mNextStreamSourceID) {
// We have completed draining the decoder following WaitingForData.
@@ -2562,8 +2578,9 @@ void MediaFormatReader::Update(TrackType aTrack) {
decoder.mNumSamplesOutput, uint32_t(size_t(decoder.mSizeOfQueue)),
decoder.mDecodeRequest.Exists(), decoder.mFlushing,
decoder.mDescription.get(), uint32_t(decoder.mOutput.Length()),
- decoder.mWaitingForData, decoder.mDemuxEOS, int32_t(decoder.mDrainState),
- decoder.mLastStreamSourceID, IsDecoderWaitingForCDM(aTrack));
+ !!decoder.mWaitingForDataStartTime, decoder.mDemuxEOS,
+ int32_t(decoder.mDrainState), decoder.mLastStreamSourceID,
+ IsDecoderWaitingForCDM(aTrack));
if (IsWaitingOnCDMResource() || !ResolveSetCDMPromiseIfDone(aTrack)) {
// If the content is encrypted, MFR won't start to create decoder until
@@ -2576,7 +2593,7 @@ void MediaFormatReader::Update(TrackType aTrack) {
(decoder.IsWaitingForKey())) {
// Nothing more we can do at present.
LOGV("Still waiting for data or key. data(%d)/key(%d)",
- decoder.mWaitingForData, decoder.mWaitingForKey);
+ !!decoder.mWaitingForDataStartTime, decoder.mWaitingForKey);
return;
}
@@ -3319,7 +3336,7 @@ void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) {
aInfo.mAudioState.mQueueSize =
AssertedCast<int32_t>(size_t(mAudio.mSizeOfQueue));
aInfo.mAudioState.mPending = AssertedCast<int>(mAudio.mOutput.Length());
- aInfo.mAudioState.mWaitingForData = mAudio.mWaitingForData;
+ aInfo.mAudioState.mWaitingForData = !!mAudio.mWaitingForDataStartTime;
aInfo.mAudioState.mDemuxEOS = mAudio.mDemuxEOS;
aInfo.mAudioState.mDrainState = int32_t(mAudio.mDrainState);
aInfo.mAudioState.mWaitingForKey = mAudio.mWaitingForKey;
@@ -3359,12 +3376,15 @@ void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) {
aInfo.mVideoState.mQueueSize =
AssertedCast<int32_t>(size_t(mVideo.mSizeOfQueue));
aInfo.mVideoState.mPending = AssertedCast<int32_t>(mVideo.mOutput.Length());
- aInfo.mVideoState.mWaitingForData = mVideo.mWaitingForData;
+ aInfo.mVideoState.mWaitingForData = !!mVideo.mWaitingForDataStartTime;
aInfo.mVideoState.mDemuxEOS = mVideo.mDemuxEOS;
aInfo.mVideoState.mDrainState = int32_t(mVideo.mDrainState);
aInfo.mVideoState.mWaitingForKey = mVideo.mWaitingForKey;
aInfo.mVideoState.mLastStreamSourceID =
AssertedCast<int64_t>(mVideo.mLastStreamSourceID);
+ aInfo.mTotalReadMetadataTimeMs = mReadMetaDataTime.ToMilliseconds();
+ aInfo.mTotalWaitingForVideoDataTimeMs =
+ mTotalWaitingForVideoDataTime.ToMilliseconds();
}
CopyUTF8toUTF16(videoDecoderName, aInfo.mVideoDecoderName);
diff --git a/dom/media/MediaFormatReader.h b/dom/media/MediaFormatReader.h
index fcc3f20036..5c4e04172d 100644
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -21,6 +21,7 @@
# include "mozilla/StateMirroring.h"
# include "mozilla/StaticPrefs_media.h"
# include "mozilla/TaskQueue.h"
+# include "mozilla/TimeStamp.h"
# include "mozilla/ThreadSafeWeakPtr.h"
# include "mozilla/dom/MediaDebugInfoBinding.h"
@@ -370,7 +371,7 @@ class MediaFormatReader final
mCodecName(""),
mUpdateScheduled(false),
mDemuxEOS(false),
- mWaitingForData(false),
+ mWaitingForDataStartTime(Nothing()),
mWaitingForKey(false),
mReceivedNewData(false),
mFlushing(false),
@@ -426,7 +427,7 @@ class MediaFormatReader final
// Only accessed from reader's task queue.
bool mUpdateScheduled;
bool mDemuxEOS;
- bool mWaitingForData;
+ Maybe<TimeStamp> mWaitingForDataStartTime;
bool mWaitingForKey;
bool mReceivedNewData;
@@ -446,7 +447,7 @@ class MediaFormatReader final
bool IsWaitingForData() const {
MOZ_ASSERT(mOwner->OnTaskQueue());
- return mWaitingForData;
+ return !!mWaitingForDataStartTime;
}
bool IsWaitingForKey() const {
@@ -583,7 +584,7 @@ class MediaFormatReader final
void ResetState() {
MOZ_ASSERT(mOwner->OnTaskQueue());
mDemuxEOS = false;
- mWaitingForData = false;
+ mWaitingForDataStartTime.reset();
mQueuedSamples.Clear();
mDecodeRequest.DisconnectIfExists();
mDrainRequest.DisconnectIfExists();
@@ -885,6 +886,16 @@ class MediaFormatReader final
Maybe<uint64_t> mMediaEngineId;
const Maybe<TrackingId> mTrackingId;
+
+ // The start time of reading the metdata and how long does it take. This
+ // measurement includes the time of downloading media resource over the
+ // internet.
+ Maybe<TimeStamp> mReadMetadataStartTime;
+ TimeDuration mReadMetaDataTime;
+
+ // The total amount of time we have been waiting for the video data due to
+ // lacking of data.
+ TimeDuration mTotalWaitingForVideoDataTime;
};
} // namespace mozilla
diff --git a/dom/media/MediaInfo.h b/dom/media/MediaInfo.h
index 73704d1593..7ab5df4e0a 100644
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -109,12 +109,16 @@ struct FlacCodecSpecificData {
RefPtr<MediaByteBuffer> mStreamInfoBinaryBlob{new MediaByteBuffer};
};
-struct Mp3CodecSpecificData {
+struct Mp3CodecSpecificData final {
bool operator==(const Mp3CodecSpecificData& rhs) const {
return mEncoderDelayFrames == rhs.mEncoderDelayFrames &&
mEncoderPaddingFrames == rhs.mEncoderPaddingFrames;
}
+ auto MutTiedFields() {
+ return std::tie(mEncoderDelayFrames, mEncoderPaddingFrames);
+ }
+
// The number of frames that should be skipped from the beginning of the
// decoded stream.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1566389 for more info.
@@ -558,7 +562,7 @@ class AudioInfo : public TrackInfo {
bool operator==(const AudioInfo& rhs) const;
- static const uint32_t MAX_RATE = 640000;
+ static const uint32_t MAX_RATE = 768000;
static const uint32_t MAX_CHANNEL_COUNT = 256;
bool IsValid() const override {
diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp
index 422769587a..fb4384c826 100644
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -2227,6 +2227,7 @@ MediaManager::MediaManager(already_AddRefed<TaskQueue> aMediaThread)
mPrefs.mNoiseOn = false;
mPrefs.mTransientOn = false;
mPrefs.mAgc2Forced = false;
+ mPrefs.mExpectDrift = -1; // auto
#ifdef MOZ_WEBRTC
mPrefs.mAgc =
webrtc::AudioProcessing::Config::GainController1::Mode::kAdaptiveDigital;
@@ -3482,17 +3483,26 @@ void MediaManager::GetPrefs(nsIPrefBranch* aBranch, const char* aData) {
&mPrefs.mTransientOn);
GetPrefBool(aBranch, "media.getusermedia.agc2_forced", aData,
&mPrefs.mAgc2Forced);
+ // Use 0 or 1 to force to false or true
+ // EchoCanceller3Config::echo_removal_control.has_clock_drift.
+ // -1 is the default, which means automatically set has_clock_drift as
+ // deemed appropriate.
+ GetPref(aBranch, "media.getusermedia.audio.processing.aec.expect_drift",
+ aData, &mPrefs.mExpectDrift);
GetPref(aBranch, "media.getusermedia.agc", aData, &mPrefs.mAgc);
GetPref(aBranch, "media.getusermedia.noise", aData, &mPrefs.mNoise);
GetPref(aBranch, "media.getusermedia.channels", aData, &mPrefs.mChannels);
#endif
LOG("%s: default prefs: %dx%d @%dfps, %dHz test tones, aec: %s, "
- "agc: %s, hpf: %s, noise: %s, agc level: %d, agc version: %s, noise "
- "level: %d, transient: %s, channels %d",
+ "agc: %s, hpf: %s, noise: %s, drift: %s, agc level: %d, agc version: %s, "
+ "noise level: %d, transient: %s, channels %d",
__FUNCTION__, mPrefs.mWidth, mPrefs.mHeight, mPrefs.mFPS, mPrefs.mFreq,
mPrefs.mAecOn ? "on" : "off", mPrefs.mAgcOn ? "on" : "off",
- mPrefs.mHPFOn ? "on" : "off", mPrefs.mNoiseOn ? "on" : "off", mPrefs.mAgc,
- mPrefs.mAgc2Forced ? "2" : "1", mPrefs.mNoise,
+ mPrefs.mHPFOn ? "on" : "off", mPrefs.mNoiseOn ? "on" : "off",
+ mPrefs.mExpectDrift < 0 ? "auto"
+ : mPrefs.mExpectDrift ? "on"
+ : "off",
+ mPrefs.mAgc, mPrefs.mAgc2Forced ? "2" : "1", mPrefs.mNoise,
mPrefs.mTransientOn ? "on" : "off", mPrefs.mChannels);
}
diff --git a/dom/media/MediaRecorder.cpp b/dom/media/MediaRecorder.cpp
index 42cd655093..be3b97cc99 100644
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -569,7 +569,9 @@ void SelectBitrates(uint32_t aBitsPerSecond, uint8_t aNumVideoTracks,
*/
class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
public DOMMediaStream::TrackListener {
- NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Session)
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(Session,
+ DOMMediaStream::TrackListener)
struct TrackTypeComparator {
enum Type {
@@ -1170,6 +1172,14 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
RefPtr<ShutdownBlocker> mShutdownBlocker;
};
+NS_IMPL_CYCLE_COLLECTION_INHERITED(MediaRecorder::Session,
+ DOMMediaStream::TrackListener, mMediaStream,
+ mMediaStreamTracks)
+NS_IMPL_ADDREF_INHERITED(MediaRecorder::Session, DOMMediaStream::TrackListener)
+NS_IMPL_RELEASE_INHERITED(MediaRecorder::Session, DOMMediaStream::TrackListener)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaRecorder::Session)
+NS_INTERFACE_MAP_END_INHERITING(DOMMediaStream::TrackListener)
+
MediaRecorder::~MediaRecorder() {
LOG(LogLevel::Debug, ("~MediaRecorder (%p)", this));
UnRegisterActivityObserver();
diff --git a/dom/media/MediaStreamWindowCapturer.cpp b/dom/media/MediaStreamWindowCapturer.cpp
index 142242eff0..0e3eca801b 100644
--- a/dom/media/MediaStreamWindowCapturer.cpp
+++ b/dom/media/MediaStreamWindowCapturer.cpp
@@ -13,6 +13,15 @@ namespace mozilla {
using dom::AudioStreamTrack;
using dom::MediaStreamTrack;
+NS_IMPL_CYCLE_COLLECTION_INHERITED(MediaStreamWindowCapturer,
+ DOMMediaStream::TrackListener)
+NS_IMPL_ADDREF_INHERITED(MediaStreamWindowCapturer,
+ DOMMediaStream::TrackListener)
+NS_IMPL_RELEASE_INHERITED(MediaStreamWindowCapturer,
+ DOMMediaStream::TrackListener)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaStreamWindowCapturer)
+NS_INTERFACE_MAP_END_INHERITING(DOMMediaStream::TrackListener)
+
MediaStreamWindowCapturer::CapturedTrack::CapturedTrack(
MediaStreamTrack* aTrack, uint64_t aWindowID)
: mTrack(aTrack),
@@ -28,7 +37,6 @@ MediaStreamWindowCapturer::CapturedTrack::~CapturedTrack() {
MediaStreamWindowCapturer::MediaStreamWindowCapturer(DOMMediaStream* aStream,
uint64_t aWindowId)
: mStream(aStream), mWindowId(aWindowId) {
- mStream->RegisterTrackListener(this);
nsTArray<RefPtr<AudioStreamTrack>> tracks;
mStream->GetAudioTracks(tracks);
for (const auto& t : tracks) {
@@ -39,11 +47,7 @@ MediaStreamWindowCapturer::MediaStreamWindowCapturer(DOMMediaStream* aStream,
}
}
-MediaStreamWindowCapturer::~MediaStreamWindowCapturer() {
- if (mStream) {
- mStream->UnregisterTrackListener(this);
- }
-}
+MediaStreamWindowCapturer::~MediaStreamWindowCapturer() = default;
void MediaStreamWindowCapturer::NotifyTrackAdded(
const RefPtr<MediaStreamTrack>& aTrack) {
diff --git a/dom/media/MediaStreamWindowCapturer.h b/dom/media/MediaStreamWindowCapturer.h
index 8a6695ed43..dfd6062c3c 100644
--- a/dom/media/MediaStreamWindowCapturer.h
+++ b/dom/media/MediaStreamWindowCapturer.h
@@ -24,7 +24,9 @@ class MediaInputPort;
class MediaStreamWindowCapturer : public DOMMediaStream::TrackListener {
public:
MediaStreamWindowCapturer(DOMMediaStream* aStream, uint64_t aWindowId);
- ~MediaStreamWindowCapturer();
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaStreamWindowCapturer,
+ DOMMediaStream::TrackListener)
void NotifyTrackAdded(const RefPtr<dom::MediaStreamTrack>& aTrack) override;
void NotifyTrackRemoved(const RefPtr<dom::MediaStreamTrack>& aTrack) override;
@@ -41,6 +43,7 @@ class MediaStreamWindowCapturer : public DOMMediaStream::TrackListener {
const uint64_t mWindowId;
protected:
+ ~MediaStreamWindowCapturer();
void AddTrack(dom::AudioStreamTrack* aTrack);
void RemoveTrack(dom::AudioStreamTrack* aTrack);
diff --git a/dom/media/MediaTrackGraph.cpp b/dom/media/MediaTrackGraph.cpp
index 2af7aacb1f..a4f81d0071 100644
--- a/dom/media/MediaTrackGraph.cpp
+++ b/dom/media/MediaTrackGraph.cpp
@@ -4070,6 +4070,9 @@ double MediaTrackGraphImpl::AudioOutputLatency() {
return mAudioOutputLatency;
}
+bool MediaTrackGraph::OutputForAECMightDrift() {
+ return static_cast<MediaTrackGraphImpl*>(this)->OutputForAECMightDrift();
+}
bool MediaTrackGraph::IsNonRealtime() const {
return !static_cast<const MediaTrackGraphImpl*>(this)->mRealtime;
}
diff --git a/dom/media/MediaTrackGraph.h b/dom/media/MediaTrackGraph.h
index 8afeb1dd0d..a754b158eb 100644
--- a/dom/media/MediaTrackGraph.h
+++ b/dom/media/MediaTrackGraph.h
@@ -1200,6 +1200,10 @@ class MediaTrackGraph {
}
double AudioOutputLatency();
+ /* Return whether the clock for the audio output device used for the AEC
+ * reverse stream might drift from the clock for this MediaTrackGraph.
+ * Graph thread only. */
+ bool OutputForAECMightDrift();
void RegisterCaptureTrackForWindow(uint64_t aWindowId,
ProcessedMediaTrack* aCaptureTrack);
diff --git a/dom/media/MediaTrackGraphImpl.h b/dom/media/MediaTrackGraphImpl.h
index e733b961ff..5daed83ef3 100644
--- a/dom/media/MediaTrackGraphImpl.h
+++ b/dom/media/MediaTrackGraphImpl.h
@@ -571,7 +571,12 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
void SetMaxOutputChannelCount(uint32_t aMaxChannelCount);
double AudioOutputLatency();
-
+ /* Return whether the clock for the audio output device used for the AEC
+ * reverse stream might drift from the clock for this MediaTrackGraph. */
+ bool OutputForAECMightDrift() {
+ AssertOnGraphThread();
+ return mOutputDeviceForAEC != PrimaryOutputDeviceID();
+ }
/**
* The audio input channel count for a MediaTrackGraph is the max of all the
* channel counts requested by the listeners. The max channel count is
@@ -1115,12 +1120,14 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
const float mGlobalVolume;
#ifdef DEBUG
+ protected:
/**
* Used to assert when AppendMessage() runs control messages synchronously.
*/
bool mCanRunMessagesSynchronously;
#endif
+ private:
/**
* The graph's main-thread observable graph time.
* Updated by the stable state runnable after each iteration.
diff --git a/dom/media/PeerConnection.sys.mjs b/dom/media/PeerConnection.sys.mjs
index 00b4023c2f..a775a92d99 100644
--- a/dom/media/PeerConnection.sys.mjs
+++ b/dom/media/PeerConnection.sys.mjs
@@ -217,6 +217,51 @@ setupPrototype(GlobalPCList, {
var _globalPCList = new GlobalPCList();
+// Parses grammar in RFC5245 section 15 and ICE TCP from RFC6544 section 4.5.
+function parseCandidate(line) {
+ const match = line.match(
+ /^(a=)?candidate:([A-Za-z0-9+\/]{1,32}) (\d+) (UDP|TCP) (\d+) ([A-Za-z0-9.:-]+) (\d+) typ (host|srflx|prflx|relay)(?: raddr ([A-Za-z0-9.:-]+) rport (\d+))?(.*)$/i
+ );
+ if (!match) {
+ return null;
+ }
+ const candidate = {
+ foundation: match[2],
+ componentId: parseInt(match[3], 10),
+ transport: match[4],
+ priority: parseInt(match[5], 10),
+ address: match[6],
+ port: parseInt(match[7], 10),
+ type: match[8],
+ relatedAddress: match[9],
+ relatedPort: match[10],
+ };
+ if (candidate.componentId < 1 || candidate.componentId > 256) {
+ return null;
+ }
+ if (candidate.priority < 0 || candidate.priority > 4294967295) {
+ return null;
+ }
+ if (candidate.port < 0 || candidate.port > 65535) {
+ return null;
+ }
+ candidate.component = { 1: "rtp", 2: "rtcp" }[candidate.componentId] || null;
+ candidate.protocol =
+ { udp: "udp", tcp: "tcp" }[candidate.transport.toLowerCase()] || null;
+
+ const tcpTypeMatch = match[11].match(/tcptype (\S+)/i);
+ if (tcpTypeMatch) {
+ candidate.tcpType = tcpTypeMatch[1];
+ if (
+ candidate.protocol != "tcp" ||
+ !["active", "passive", "so"].includes(candidate.tcpType)
+ ) {
+ return null;
+ }
+ }
+ return candidate;
+}
+
export class RTCIceCandidate {
init(win) {
this._win = win;
@@ -229,6 +274,20 @@ export class RTCIceCandidate {
);
}
Object.assign(this, dict);
+ const candidate = parseCandidate(this.candidate);
+ if (!candidate) {
+ return;
+ }
+ Object.assign(this, candidate);
+ }
+
+ toJSON() {
+ return {
+ candidate: this.candidate,
+ sdpMid: this.sdpMid,
+ sdpMLineIndex: this.sdpMLineIndex,
+ usernameFragment: this.usernameFragment,
+ };
}
}
diff --git a/dom/media/TimedPacketizer.h b/dom/media/TimedPacketizer.h
new file mode 100644
index 0000000000..dbccc77e61
--- /dev/null
+++ b/dom/media/TimedPacketizer.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_TIMEPACKETIZER_H_
+#define DOM_MEDIA_TIMEPACKETIZER_H_
+
+#include "AudioPacketizer.h"
+#include "TimeUnits.h"
+
+namespace mozilla {
+/**
+ * This class wraps an AudioPacketizer and provides packets of audio with
+ * timestamps.
+ */
+template <typename InputType, typename OutputType>
+class TimedPacketizer {
+ public:
+ TimedPacketizer(uint32_t aPacketSize, uint32_t aChannels,
+ int64_t aInitialTick, int64_t aBase)
+ : mPacketizer(aPacketSize, aChannels),
+ mTimeStamp(media::TimeUnit(aInitialTick, aBase)),
+ mBase(aBase) {}
+
+ void Input(const InputType* aFrames, uint32_t aFrameCount) {
+ mPacketizer.Input(aFrames, aFrameCount);
+ }
+
+ media::TimeUnit Output(OutputType* aOutputBuffer) {
+ MOZ_ASSERT(mPacketizer.PacketsAvailable());
+ media::TimeUnit pts = mTimeStamp;
+ mPacketizer.Output(aOutputBuffer);
+ mTimeStamp += media::TimeUnit(mPacketizer.mPacketSize, mBase);
+ return pts;
+ }
+
+ media::TimeUnit Drain(OutputType* aOutputBuffer, uint32_t& aWritten) {
+ MOZ_ASSERT(!mPacketizer.PacketsAvailable(),
+ "Consume all packets before calling drain");
+ media::TimeUnit pts = mTimeStamp;
+ aWritten = mPacketizer.Output(aOutputBuffer);
+ mTimeStamp += media::TimeUnit(mPacketizer.mPacketSize, mBase);
+ return pts;
+ }
+
+ // Call this when a discontinuity in input has been detected, e.g. based on
+ // the pts of input packets.
+ void Discontinuity(int64_t aNewTick) {
+ MOZ_ASSERT(!mPacketizer.FramesAvailable(),
+ "Drain before enqueueing discontinuous audio");
+ mTimeStamp = media::TimeUnit(aNewTick, mBase);
+ }
+
+ void Clear() { mPacketizer.Clear(); }
+
+ uint32_t PacketSize() const { return mPacketizer.mPacketSize; }
+
+ uint32_t ChannelCount() const { return mPacketizer.mChannels; }
+
+ uint32_t PacketsAvailable() const { return mPacketizer.PacketsAvailable(); }
+
+ uint32_t FramesAvailable() const { return mPacketizer.FramesAvailable(); }
+
+ private:
+ AudioPacketizer<InputType, OutputType> mPacketizer;
+ media::TimeUnit mTimeStamp;
+ uint64_t mBase;
+};
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_TIMEPACKETIZER_H_
diff --git a/dom/media/VideoUtils.cpp b/dom/media/VideoUtils.cpp
index 90057f7c83..24b1f0dd59 100644
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -188,12 +188,13 @@ uint32_t DecideAudioPlaybackSampleRate(const AudioInfo& aInfo,
rate = 48000;
} else if (aInfo.mRate >= 44100) {
// The original rate is of good quality and we want to minimize unecessary
- // resampling, so we let cubeb decide how to resample (if needed).
- rate = aInfo.mRate;
+ // resampling, so we let cubeb decide how to resample (if needed). Cap to
+ // 384kHz for good measure.
+ rate = std::min<unsigned>(aInfo.mRate, 384000u);
} else {
// We will resample all data to match cubeb's preferred sampling rate.
rate = CubebUtils::PreferredSampleRate(aShouldResistFingerprinting);
- if (rate > 384000) {
+ if (rate > 768000) {
// bogus rate, fall back to something else;
rate = 48000;
}
diff --git a/dom/media/VideoUtils.h b/dom/media/VideoUtils.h
index a2fa7208f5..b1dbb0cf2b 100644
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -194,34 +194,6 @@ enum class MediaThreadType {
// for decoding streams.
already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType);
-enum H264_PROFILE {
- H264_PROFILE_UNKNOWN = 0,
- H264_PROFILE_BASE = 0x42,
- H264_PROFILE_MAIN = 0x4D,
- H264_PROFILE_EXTENDED = 0x58,
- H264_PROFILE_HIGH = 0x64,
-};
-
-enum H264_LEVEL {
- H264_LEVEL_1 = 10,
- H264_LEVEL_1_b = 11,
- H264_LEVEL_1_1 = 11,
- H264_LEVEL_1_2 = 12,
- H264_LEVEL_1_3 = 13,
- H264_LEVEL_2 = 20,
- H264_LEVEL_2_1 = 21,
- H264_LEVEL_2_2 = 22,
- H264_LEVEL_3 = 30,
- H264_LEVEL_3_1 = 31,
- H264_LEVEL_3_2 = 32,
- H264_LEVEL_4 = 40,
- H264_LEVEL_4_1 = 41,
- H264_LEVEL_4_2 = 42,
- H264_LEVEL_5 = 50,
- H264_LEVEL_5_1 = 51,
- H264_LEVEL_5_2 = 52
-};
-
// Extracts the H.264/AVC profile and level from an H.264 codecs string.
// H.264 codecs parameters have a type defined as avc1.PPCCLL, where
// PP = profile_idc, CC = constraint_set flags, LL = level_idc.
diff --git a/dom/media/autoplay/test/mochitest/mochitest.toml b/dom/media/autoplay/test/mochitest/mochitest.toml
index 0b6f0a169f..76cb353ffe 100644
--- a/dom/media/autoplay/test/mochitest/mochitest.toml
+++ b/dom/media/autoplay/test/mochitest/mochitest.toml
@@ -3,7 +3,6 @@ subsuite = "media"
tags = "autoplay"
support-files = [
"../../../test/manifest.js",
- "../../../test/320x240.ogv",
"../../../test/bogus.duh",
"../../../test/detodos-short.opus",
"../../../test/flac-s24.flac",
@@ -23,6 +22,7 @@ support-files = [
"../../../test/small-shot-mp3.mp4",
"../../../test/small-shot.ogg",
"../../../test/vp9-short.webm",
+ "../../../test/vp9.webm",
"AutoplayTestUtils.js",
"file_autoplay_gv_play_request_frame.html",
"file_autoplay_gv_play_request_window.html",
diff --git a/dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html b/dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html
index b5f70be227..d76aa96348 100644
--- a/dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html
+++ b/dom/media/autoplay/test/mochitest/test_autoplay_policy_play_before_loadedmetadata.html
@@ -28,12 +28,12 @@
let testCases = [
{
- resource: "320x240.ogv", // Only video track.
+ resource: "vp9.webm", // Only video track.
shouldPlay: false,
muted: false,
},
{
- resource: "320x240.ogv", // Only video track.
+ resource: "vp9.webm", // Only video track.
shouldPlay: true,
muted: true,
},
diff --git a/dom/media/eme/EMEUtils.cpp b/dom/media/eme/EMEUtils.cpp
index 5a6b645df2..19639388da 100644
--- a/dom/media/eme/EMEUtils.cpp
+++ b/dom/media/eme/EMEUtils.cpp
@@ -10,8 +10,10 @@
#include "MediaData.h"
#include "KeySystemConfig.h"
#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/dom/Document.h"
#include "mozilla/dom/KeySystemNames.h"
#include "mozilla/dom/UnionTypes.h"
+#include "nsContentUtils.h"
#ifdef MOZ_WMF_CDM
# include "mozilla/PMFCDM.h"
@@ -49,37 +51,33 @@ bool IsWidevineKeySystem(const nsAString& aKeySystem) {
}
#ifdef MOZ_WMF_CDM
-bool IsPlayReadyKeySystemAndSupported(const nsAString& aKeySystem) {
- if (!StaticPrefs::media_eme_playready_enabled()) {
- return false;
- }
+bool IsPlayReadyEnabled() {
// 1=enabled encrypted and clear, 2=enabled encrytped.
- if (StaticPrefs::media_wmf_media_engine_enabled() != 1 &&
- StaticPrefs::media_wmf_media_engine_enabled() != 2) {
- return false;
- }
- return IsPlayReadyKeySystem(aKeySystem);
+ return StaticPrefs::media_eme_playready_enabled() &&
+ (StaticPrefs::media_wmf_media_engine_enabled() == 1 ||
+ StaticPrefs::media_wmf_media_engine_enabled() == 2);
}
-bool IsPlayReadyKeySystem(const nsAString& aKeySystem) {
+bool IsPlayReadyKeySystemAndSupported(const nsAString& aKeySystem) {
+ if (!IsPlayReadyEnabled()) {
+ return false;
+ }
return aKeySystem.EqualsLiteral(kPlayReadyKeySystemName) ||
aKeySystem.EqualsLiteral(kPlayReadyKeySystemHardware) ||
aKeySystem.EqualsLiteral(kPlayReadyHardwareClearLeadKeySystemName);
}
-bool IsWidevineExperimentKeySystemAndSupported(const nsAString& aKeySystem) {
- if (!StaticPrefs::media_eme_widevine_experiment_enabled()) {
- return false;
- }
+bool IsWidevineHardwareDecryptionEnabled() {
// 1=enabled encrypted and clear, 2=enabled encrytped.
- if (StaticPrefs::media_wmf_media_engine_enabled() != 1 &&
- StaticPrefs::media_wmf_media_engine_enabled() != 2) {
- return false;
- }
- return IsWidevineExperimentKeySystem(aKeySystem);
+ return StaticPrefs::media_eme_widevine_experiment_enabled() &&
+ (StaticPrefs::media_wmf_media_engine_enabled() == 1 ||
+ StaticPrefs::media_wmf_media_engine_enabled() == 2);
}
-bool IsWidevineExperimentKeySystem(const nsAString& aKeySystem) {
+bool IsWidevineExperimentKeySystemAndSupported(const nsAString& aKeySystem) {
+ if (!IsWidevineHardwareDecryptionEnabled()) {
+ return false;
+ }
return aKeySystem.EqualsLiteral(kWidevineExperimentKeySystemName) ||
aKeySystem.EqualsLiteral(kWidevineExperiment2KeySystemName);
}
@@ -121,26 +119,6 @@ nsString KeySystemToProxyName(const nsAString& aKeySystem) {
return u""_ns;
}
-#define ENUM_TO_STR(enumVal) \
- case enumVal: \
- return #enumVal
-
-const char* ToMediaKeyStatusStr(dom::MediaKeyStatus aStatus) {
- switch (aStatus) {
- ENUM_TO_STR(dom::MediaKeyStatus::Usable);
- ENUM_TO_STR(dom::MediaKeyStatus::Expired);
- ENUM_TO_STR(dom::MediaKeyStatus::Released);
- ENUM_TO_STR(dom::MediaKeyStatus::Output_restricted);
- ENUM_TO_STR(dom::MediaKeyStatus::Output_downscaled);
- ENUM_TO_STR(dom::MediaKeyStatus::Status_pending);
- ENUM_TO_STR(dom::MediaKeyStatus::Internal_error);
- default:
- return "Undefined MediaKeyStatus!";
- }
-}
-
-#undef ENUM_TO_STR
-
bool IsHardwareDecryptionSupported(
const dom::MediaKeySystemConfiguration& aConfig) {
for (const auto& capabilities : aConfig.mAudioCapabilities) {
@@ -223,7 +201,9 @@ void MFCDMCapabilitiesIPDLToKeySystemConfig(
aKeySystemConfig.mEncryptionSchemes.AppendElement(
NS_ConvertUTF8toUTF16(EncryptionSchemeStr(scheme)));
}
- aKeySystemConfig.mIsHDCP22Compatible = aCDMConfig.isHDCP22Compatible();
+ aKeySystemConfig.mIsHDCP22Compatible = aCDMConfig.isHDCP22Compatible()
+ ? *aCDMConfig.isHDCP22Compatible()
+ : false;
EME_LOG("New Capabilities=%s",
NS_ConvertUTF16toUTF8(aKeySystemConfig.GetDebugInfo()).get());
}
@@ -270,4 +250,21 @@ bool DoesKeySystemSupportHardwareDecryption(const nsAString& aKeySystem) {
return false;
}
+void DeprecationWarningLog(const dom::Document* aDocument,
+ const char* aMsgName) {
+ if (!aDocument || !aMsgName) {
+ return;
+ }
+ EME_LOG("DeprecationWarning Logging deprecation warning '%s' to WebConsole.",
+ aMsgName);
+ nsTHashMap<nsCharPtrHashKey, bool> warnings;
+ warnings.InsertOrUpdate(aMsgName, true);
+ AutoTArray<nsString, 1> params;
+ nsString& uri = *params.AppendElement();
+ Unused << aDocument->GetDocumentURI(uri);
+ nsContentUtils::ReportToConsole(nsIScriptError::warningFlag, "Media"_ns,
+ aDocument, nsContentUtils::eDOM_PROPERTIES,
+ aMsgName, params);
+}
+
} // namespace mozilla
diff --git a/dom/media/eme/EMEUtils.h b/dom/media/eme/EMEUtils.h
index 3fbf22f359..424346645c 100644
--- a/dom/media/eme/EMEUtils.h
+++ b/dom/media/eme/EMEUtils.h
@@ -23,7 +23,8 @@ struct KeySystemConfig;
namespace dom {
class ArrayBufferViewOrArrayBuffer;
-}
+class Document;
+} // namespace dom
#ifndef EME_LOG
LogModule* GetEMELog();
@@ -61,14 +62,14 @@ bool IsClearkeyKeySystem(const nsAString& aKeySystem);
bool IsWidevineKeySystem(const nsAString& aKeySystem);
#ifdef MOZ_WMF_CDM
+bool IsPlayReadyEnabled();
+
bool IsPlayReadyKeySystemAndSupported(const nsAString& aKeySystem);
-bool IsPlayReadyKeySystem(const nsAString& aKeySystem);
+bool IsWidevineHardwareDecryptionEnabled();
bool IsWidevineExperimentKeySystemAndSupported(const nsAString& aKeySystem);
-bool IsWidevineExperimentKeySystem(const nsAString& aKeySystem);
-
bool IsWMFClearKeySystemAndSupported(const nsAString& aKeySystem);
#endif
@@ -107,6 +108,9 @@ bool CheckIfHarewareDRMConfigExists(
bool DoesKeySystemSupportHardwareDecryption(const nsAString& aKeySystem);
+void DeprecationWarningLog(const dom::Document* aDocument,
+ const char* aMsgName);
+
} // namespace mozilla
#endif // EME_LOG_H_
diff --git a/dom/media/eme/KeySystemConfig.cpp b/dom/media/eme/KeySystemConfig.cpp
index 0cb5da1a56..8f3227ecf6 100644
--- a/dom/media/eme/KeySystemConfig.cpp
+++ b/dom/media/eme/KeySystemConfig.cpp
@@ -68,221 +68,262 @@ bool KeySystemConfig::Supports(const nsAString& aKeySystem) {
return false;
}
-/* static */
-bool KeySystemConfig::CreateKeySystemConfigs(
- const nsAString& aKeySystem, const DecryptionInfo aDecryption,
+/* static */ void KeySystemConfig::CreateClearKeyKeySystemConfigs(
+ const KeySystemConfigRequest& aRequest,
nsTArray<KeySystemConfig>& aOutConfigs) {
- if (!Supports(aKeySystem)) {
- return false;
+ KeySystemConfig* config = aOutConfigs.AppendElement();
+ config->mKeySystem = aRequest.mKeySystem;
+ config->mInitDataTypes.AppendElement(u"cenc"_ns);
+ config->mInitDataTypes.AppendElement(u"keyids"_ns);
+ config->mInitDataTypes.AppendElement(u"webm"_ns);
+ config->mPersistentState = Requirement::Optional;
+ config->mDistinctiveIdentifier = Requirement::NotAllowed;
+ config->mSessionTypes.AppendElement(SessionType::Temporary);
+ config->mEncryptionSchemes.AppendElement(u"cenc"_ns);
+ config->mEncryptionSchemes.AppendElement(u"cbcs"_ns);
+ config->mEncryptionSchemes.AppendElement(u"cbcs-1-9"_ns);
+ if (StaticPrefs::media_clearkey_persistent_license_enabled()) {
+ config->mSessionTypes.AppendElement(SessionType::PersistentLicense);
}
-
- if (IsClearkeyKeySystem(aKeySystem)) {
- KeySystemConfig* config = aOutConfigs.AppendElement();
- config->mKeySystem = aKeySystem;
- config->mInitDataTypes.AppendElement(u"cenc"_ns);
- config->mInitDataTypes.AppendElement(u"keyids"_ns);
- config->mInitDataTypes.AppendElement(u"webm"_ns);
- config->mPersistentState = Requirement::Optional;
- config->mDistinctiveIdentifier = Requirement::NotAllowed;
- config->mSessionTypes.AppendElement(SessionType::Temporary);
- config->mEncryptionSchemes.AppendElement(u"cenc"_ns);
- config->mEncryptionSchemes.AppendElement(u"cbcs"_ns);
- config->mEncryptionSchemes.AppendElement(u"cbcs-1-9"_ns);
- if (StaticPrefs::media_clearkey_persistent_license_enabled()) {
- config->mSessionTypes.AppendElement(SessionType::PersistentLicense);
- }
#if defined(XP_WIN)
- // Clearkey CDM uses WMF's H.264 decoder on Windows.
- if (WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::H264)) {
- config->mMP4.SetCanDecryptAndDecode(EME_CODEC_H264);
- } else {
- config->mMP4.SetCanDecrypt(EME_CODEC_H264);
- }
-#else
+ // Clearkey CDM uses WMF's H.264 decoder on Windows.
+ if (WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::H264)) {
+ config->mMP4.SetCanDecryptAndDecode(EME_CODEC_H264);
+ } else {
config->mMP4.SetCanDecrypt(EME_CODEC_H264);
+ }
+#else
+ config->mMP4.SetCanDecrypt(EME_CODEC_H264);
#endif
- config->mMP4.SetCanDecrypt(EME_CODEC_AAC);
- config->mMP4.SetCanDecrypt(EME_CODEC_FLAC);
- config->mMP4.SetCanDecrypt(EME_CODEC_OPUS);
- config->mMP4.SetCanDecrypt(EME_CODEC_VP9);
+ config->mMP4.SetCanDecrypt(EME_CODEC_AAC);
+ config->mMP4.SetCanDecrypt(EME_CODEC_FLAC);
+ config->mMP4.SetCanDecrypt(EME_CODEC_OPUS);
+ config->mMP4.SetCanDecrypt(EME_CODEC_VP9);
#ifdef MOZ_AV1
- config->mMP4.SetCanDecrypt(EME_CODEC_AV1);
+ config->mMP4.SetCanDecrypt(EME_CODEC_AV1);
#endif
- config->mWebM.SetCanDecrypt(EME_CODEC_VORBIS);
- config->mWebM.SetCanDecrypt(EME_CODEC_OPUS);
- config->mWebM.SetCanDecrypt(EME_CODEC_VP8);
- config->mWebM.SetCanDecrypt(EME_CODEC_VP9);
+ config->mWebM.SetCanDecrypt(EME_CODEC_VORBIS);
+ config->mWebM.SetCanDecrypt(EME_CODEC_OPUS);
+ config->mWebM.SetCanDecrypt(EME_CODEC_VP8);
+ config->mWebM.SetCanDecrypt(EME_CODEC_VP9);
#ifdef MOZ_AV1
- config->mWebM.SetCanDecrypt(EME_CODEC_AV1);
+ config->mWebM.SetCanDecrypt(EME_CODEC_AV1);
#endif
- if (StaticPrefs::media_clearkey_test_key_systems_enabled()) {
- // Add testing key systems. These offer the same capabilities as the
- // base clearkey system, so just clone clearkey and change the name.
- KeySystemConfig clearkeyWithProtectionQuery{*config};
- clearkeyWithProtectionQuery.mKeySystem.AssignLiteral(
- kClearKeyWithProtectionQueryKeySystemName);
- aOutConfigs.AppendElement(std::move(clearkeyWithProtectionQuery));
- }
- return true;
+ if (StaticPrefs::media_clearkey_test_key_systems_enabled()) {
+ // Add testing key systems. These offer the same capabilities as the
+ // base clearkey system, so just clone clearkey and change the name.
+ KeySystemConfig clearkeyWithProtectionQuery{*config};
+ clearkeyWithProtectionQuery.mKeySystem.AssignLiteral(
+ kClearKeyWithProtectionQueryKeySystemName);
+ aOutConfigs.AppendElement(std::move(clearkeyWithProtectionQuery));
}
+}
- if (IsWidevineKeySystem(aKeySystem)) {
- KeySystemConfig* config = aOutConfigs.AppendElement();
- config->mKeySystem = aKeySystem;
- config->mInitDataTypes.AppendElement(u"cenc"_ns);
- config->mInitDataTypes.AppendElement(u"keyids"_ns);
- config->mInitDataTypes.AppendElement(u"webm"_ns);
- config->mPersistentState = Requirement::Optional;
- config->mDistinctiveIdentifier = Requirement::NotAllowed;
- config->mSessionTypes.AppendElement(SessionType::Temporary);
+/* static */ void KeySystemConfig::CreateWivineL3KeySystemConfigs(
+ const KeySystemConfigRequest& aRequest,
+ nsTArray<KeySystemConfig>& aOutConfigs) {
+ KeySystemConfig* config = aOutConfigs.AppendElement();
+ config->mKeySystem = aRequest.mKeySystem;
+ config->mInitDataTypes.AppendElement(u"cenc"_ns);
+ config->mInitDataTypes.AppendElement(u"keyids"_ns);
+ config->mInitDataTypes.AppendElement(u"webm"_ns);
+ config->mPersistentState = Requirement::Optional;
+ config->mDistinctiveIdentifier = Requirement::NotAllowed;
+ config->mSessionTypes.AppendElement(SessionType::Temporary);
#ifdef MOZ_WIDGET_ANDROID
- config->mSessionTypes.AppendElement(SessionType::PersistentLicense);
+ config->mSessionTypes.AppendElement(SessionType::PersistentLicense);
#endif
- config->mAudioRobustness.AppendElement(u"SW_SECURE_CRYPTO"_ns);
- config->mVideoRobustness.AppendElement(u"SW_SECURE_CRYPTO"_ns);
- config->mVideoRobustness.AppendElement(u"SW_SECURE_DECODE"_ns);
- config->mEncryptionSchemes.AppendElement(u"cenc"_ns);
- config->mEncryptionSchemes.AppendElement(u"cbcs"_ns);
- config->mEncryptionSchemes.AppendElement(u"cbcs-1-9"_ns);
+ config->mAudioRobustness.AppendElement(u"SW_SECURE_CRYPTO"_ns);
+ config->mVideoRobustness.AppendElement(u"SW_SECURE_CRYPTO"_ns);
+ config->mVideoRobustness.AppendElement(u"SW_SECURE_DECODE"_ns);
+ config->mEncryptionSchemes.AppendElement(u"cenc"_ns);
+ config->mEncryptionSchemes.AppendElement(u"cbcs"_ns);
+ config->mEncryptionSchemes.AppendElement(u"cbcs-1-9"_ns);
#if defined(MOZ_WIDGET_ANDROID)
- // MediaDrm.isCryptoSchemeSupported only allows passing
- // "video/mp4" or "video/webm" for mimetype string.
- // See
- // https://developer.android.com/reference/android/media/MediaDrm.html#isCryptoSchemeSupported(java.util.UUID,
- // java.lang.String) for more detail.
- typedef struct {
- const nsCString& mMimeType;
- const nsCString& mEMECodecType;
- const char16_t* mCodecType;
- KeySystemConfig::ContainerSupport* mSupportType;
- } DataForValidation;
+ // MediaDrm.isCryptoSchemeSupported only allows passing
+ // "video/mp4" or "video/webm" for mimetype string.
+ // See
+ // https://developer.android.com/reference/android/media/MediaDrm.html#isCryptoSchemeSupported(java.util.UUID,
+ // java.lang.String) for more detail.
+ typedef struct {
+ const nsCString& mMimeType;
+ const nsCString& mEMECodecType;
+ const char16_t* mCodecType;
+ KeySystemConfig::ContainerSupport* mSupportType;
+ } DataForValidation;
- DataForValidation validationList[] = {
- {nsCString(VIDEO_MP4), EME_CODEC_H264, java::MediaDrmProxy::AVC,
- &config->mMP4},
- {nsCString(VIDEO_MP4), EME_CODEC_VP9, java::MediaDrmProxy::AVC,
- &config->mMP4},
+ DataForValidation validationList[] = {
+ {nsCString(VIDEO_MP4), EME_CODEC_H264, java::MediaDrmProxy::AVC,
+ &config->mMP4},
+ {nsCString(VIDEO_MP4), EME_CODEC_VP9, java::MediaDrmProxy::AVC,
+ &config->mMP4},
# ifdef MOZ_AV1
- {nsCString(VIDEO_MP4), EME_CODEC_AV1, java::MediaDrmProxy::AV1,
- &config->mMP4},
+ {nsCString(VIDEO_MP4), EME_CODEC_AV1, java::MediaDrmProxy::AV1,
+ &config->mMP4},
# endif
- {nsCString(AUDIO_MP4), EME_CODEC_AAC, java::MediaDrmProxy::AAC,
- &config->mMP4},
- {nsCString(AUDIO_MP4), EME_CODEC_FLAC, java::MediaDrmProxy::FLAC,
- &config->mMP4},
- {nsCString(AUDIO_MP4), EME_CODEC_OPUS, java::MediaDrmProxy::OPUS,
- &config->mMP4},
- {nsCString(VIDEO_WEBM), EME_CODEC_VP8, java::MediaDrmProxy::VP8,
- &config->mWebM},
- {nsCString(VIDEO_WEBM), EME_CODEC_VP9, java::MediaDrmProxy::VP9,
- &config->mWebM},
+ {nsCString(AUDIO_MP4), EME_CODEC_AAC, java::MediaDrmProxy::AAC,
+ &config->mMP4},
+ {nsCString(AUDIO_MP4), EME_CODEC_FLAC, java::MediaDrmProxy::FLAC,
+ &config->mMP4},
+ {nsCString(AUDIO_MP4), EME_CODEC_OPUS, java::MediaDrmProxy::OPUS,
+ &config->mMP4},
+ {nsCString(VIDEO_WEBM), EME_CODEC_VP8, java::MediaDrmProxy::VP8,
+ &config->mWebM},
+ {nsCString(VIDEO_WEBM), EME_CODEC_VP9, java::MediaDrmProxy::VP9,
+ &config->mWebM},
# ifdef MOZ_AV1
- {nsCString(VIDEO_WEBM), EME_CODEC_AV1, java::MediaDrmProxy::AV1,
- &config->mWebM},
+ {nsCString(VIDEO_WEBM), EME_CODEC_AV1, java::MediaDrmProxy::AV1,
+ &config->mWebM},
# endif
- {nsCString(AUDIO_WEBM), EME_CODEC_VORBIS, java::MediaDrmProxy::VORBIS,
- &config->mWebM},
- {nsCString(AUDIO_WEBM), EME_CODEC_OPUS, java::MediaDrmProxy::OPUS,
- &config->mWebM},
- };
+ {nsCString(AUDIO_WEBM), EME_CODEC_VORBIS, java::MediaDrmProxy::VORBIS,
+ &config->mWebM},
+ {nsCString(AUDIO_WEBM), EME_CODEC_OPUS, java::MediaDrmProxy::OPUS,
+ &config->mWebM},
+ };
- for (const auto& data : validationList) {
- if (java::MediaDrmProxy::IsCryptoSchemeSupported(kWidevineKeySystemName,
- data.mMimeType)) {
- if (!AndroidDecoderModule::SupportsMimeType(data.mMimeType).isEmpty()) {
- data.mSupportType->SetCanDecryptAndDecode(data.mEMECodecType);
- } else {
- data.mSupportType->SetCanDecrypt(data.mEMECodecType);
- }
+ for (const auto& data : validationList) {
+ if (java::MediaDrmProxy::IsCryptoSchemeSupported(kWidevineKeySystemName,
+ data.mMimeType)) {
+ if (!AndroidDecoderModule::SupportsMimeType(data.mMimeType).isEmpty()) {
+ data.mSupportType->SetCanDecryptAndDecode(data.mEMECodecType);
+ } else {
+ data.mSupportType->SetCanDecrypt(data.mEMECodecType);
}
}
+ }
#else
# if defined(XP_WIN)
- // Widevine CDM doesn't include an AAC decoder. So if WMF can't
- // decode AAC, and a codec wasn't specified, be conservative
- // and reject the MediaKeys request, since we assume Widevine
- // will be used with AAC.
- if (WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::AAC)) {
- config->mMP4.SetCanDecrypt(EME_CODEC_AAC);
- }
-# else
+ // Widevine CDM doesn't include an AAC decoder. So if WMF can't
+ // decode AAC, and a codec wasn't specified, be conservative
+ // and reject the MediaKeys request, since we assume Widevine
+ // will be used with AAC.
+ if (WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::AAC)) {
config->mMP4.SetCanDecrypt(EME_CODEC_AAC);
+ }
+# else
+ config->mMP4.SetCanDecrypt(EME_CODEC_AAC);
# endif
- config->mMP4.SetCanDecrypt(EME_CODEC_FLAC);
- config->mMP4.SetCanDecrypt(EME_CODEC_OPUS);
- config->mMP4.SetCanDecryptAndDecode(EME_CODEC_H264);
- config->mMP4.SetCanDecryptAndDecode(EME_CODEC_VP9);
+ config->mMP4.SetCanDecrypt(EME_CODEC_FLAC);
+ config->mMP4.SetCanDecrypt(EME_CODEC_OPUS);
+ config->mMP4.SetCanDecryptAndDecode(EME_CODEC_H264);
+ config->mMP4.SetCanDecryptAndDecode(EME_CODEC_VP9);
# ifdef MOZ_AV1
- config->mMP4.SetCanDecryptAndDecode(EME_CODEC_AV1);
+ config->mMP4.SetCanDecryptAndDecode(EME_CODEC_AV1);
# endif
- config->mWebM.SetCanDecrypt(EME_CODEC_VORBIS);
- config->mWebM.SetCanDecrypt(EME_CODEC_OPUS);
- config->mWebM.SetCanDecryptAndDecode(EME_CODEC_VP8);
- config->mWebM.SetCanDecryptAndDecode(EME_CODEC_VP9);
+ config->mWebM.SetCanDecrypt(EME_CODEC_VORBIS);
+ config->mWebM.SetCanDecrypt(EME_CODEC_OPUS);
+ config->mWebM.SetCanDecryptAndDecode(EME_CODEC_VP8);
+ config->mWebM.SetCanDecryptAndDecode(EME_CODEC_VP9);
# ifdef MOZ_AV1
- config->mWebM.SetCanDecryptAndDecode(EME_CODEC_AV1);
+ config->mWebM.SetCanDecryptAndDecode(EME_CODEC_AV1);
# endif
#endif
- return true;
- }
+}
+
+/* static */
+RefPtr<KeySystemConfig::SupportedConfigsPromise>
+KeySystemConfig::CreateKeySystemConfigs(
+ const nsTArray<KeySystemConfigRequest>& aRequests) {
+ // Create available configs for all supported key systems in the request, but
+ // some of them might not be created immediately.
+
+ nsTArray<KeySystemConfig> outConfigs;
+ nsTArray<KeySystemConfigRequest> asyncRequests;
+
+ for (const auto& request : aRequests) {
+ const nsAString& keySystem = request.mKeySystem;
+ if (!Supports(keySystem)) {
+ continue;
+ }
+
+ if (IsClearkeyKeySystem(keySystem)) {
+ CreateClearKeyKeySystemConfigs(request, outConfigs);
+ } else if (IsWidevineKeySystem(keySystem)) {
+ CreateWivineL3KeySystemConfigs(request, outConfigs);
+ }
#ifdef MOZ_WMF_CDM
- if (IsPlayReadyKeySystemAndSupported(aKeySystem) ||
- IsWidevineExperimentKeySystemAndSupported(aKeySystem)) {
- RefPtr<WMFCDMImpl> cdm = MakeRefPtr<WMFCDMImpl>(aKeySystem);
- return cdm->GetCapabilities(aDecryption == DecryptionInfo::Hardware,
- aOutConfigs);
- }
+ else if (IsPlayReadyKeySystemAndSupported(keySystem) ||
+ IsWidevineExperimentKeySystemAndSupported(keySystem)) {
+ asyncRequests.AppendElement(request);
+ }
#endif
- return false;
-}
+ }
-bool KeySystemConfig::IsSameKeySystem(const nsAString& aKeySystem) const {
#ifdef MOZ_WMF_CDM
- // We want to map Widevine experiment key system to normal Widevine key system
- // as well.
- if (IsWidevineExperimentKeySystemAndSupported(mKeySystem)) {
- return mKeySystem.Equals(aKeySystem) ||
- aKeySystem.EqualsLiteral(kWidevineKeySystemName);
+ if (!asyncRequests.IsEmpty()) {
+ RefPtr<SupportedConfigsPromise::Private> promise =
+ new SupportedConfigsPromise::Private(__func__);
+ RefPtr<WMFCDMCapabilites> cdm = new WMFCDMCapabilites();
+ cdm->GetCapabilities(asyncRequests)
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [syncConfigs = std::move(outConfigs),
+ promise](SupportedConfigsPromise::ResolveOrRejectValue&&
+ aResult) mutable {
+ // Return the capabilities we already know
+ if (aResult.IsReject()) {
+ promise->Resolve(std::move(syncConfigs), __func__);
+ return;
+ }
+ // Merge sync results with async results
+ auto& asyncConfigs = aResult.ResolveValue();
+ asyncConfigs.AppendElements(std::move(syncConfigs));
+ promise->Resolve(std::move(asyncConfigs), __func__);
+ });
+ return promise;
}
#endif
- return mKeySystem.Equals(aKeySystem);
+ return SupportedConfigsPromise::CreateAndResolve(std::move(outConfigs),
+ __func__);
}
/* static */
void KeySystemConfig::GetGMPKeySystemConfigs(dom::Promise* aPromise) {
MOZ_ASSERT(aPromise);
- nsTArray<KeySystemConfig> keySystemConfigs;
+
+ // Generate config requests
const nsTArray<nsString> keySystemNames{
NS_ConvertUTF8toUTF16(kClearKeyKeySystemName),
NS_ConvertUTF8toUTF16(kWidevineKeySystemName),
};
- FallibleTArray<dom::CDMInformation> cdmInfo;
- for (const auto& name : keySystemNames) {
+ nsTArray<KeySystemConfigRequest> requests;
+ for (const auto& keySystem : keySystemNames) {
#ifdef MOZ_WMF_CDM
- if (IsWMFClearKeySystemAndSupported(name)) {
+ if (IsWMFClearKeySystemAndSupported(keySystem)) {
// Using wmf clearkey, not gmp clearkey.
continue;
}
#endif
- if (KeySystemConfig::CreateKeySystemConfigs(
- name, KeySystemConfig::DecryptionInfo::Software,
- keySystemConfigs)) {
- auto* info = cdmInfo.AppendElement(fallible);
- if (!info) {
- aPromise->MaybeReject(NS_ERROR_OUT_OF_MEMORY);
- return;
- }
- MOZ_ASSERT(keySystemConfigs.Length() == cdmInfo.Length());
- info->mKeySystemName = name;
- info->mCapabilities = keySystemConfigs.LastElement().GetDebugInfo();
- info->mClearlead = DoesKeySystemSupportClearLead(name);
- // TODO : ask real CDM
- info->mIsHDCP22Compatible = false;
- }
+ requests.AppendElement(
+ KeySystemConfigRequest{keySystem, DecryptionInfo::Software});
}
- aPromise->MaybeResolve(cdmInfo);
+
+ // Get supported configs
+ KeySystemConfig::CreateKeySystemConfigs(requests)->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [promise = RefPtr<dom::Promise>{aPromise}](
+ const SupportedConfigsPromise::ResolveOrRejectValue& aResult) {
+ if (aResult.IsResolve()) {
+ // Generate CDMInformation from configs
+ FallibleTArray<dom::CDMInformation> cdmInfo;
+ for (const auto& config : aResult.ResolveValue()) {
+ auto* info = cdmInfo.AppendElement(fallible);
+ if (!info) {
+ promise->MaybeReject(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+ info->mKeySystemName = config.mKeySystem;
+ info->mCapabilities = config.GetDebugInfo();
+ info->mClearlead = DoesKeySystemSupportClearLead(config.mKeySystem);
+ // TODO : ask real CDM
+ info->mIsHDCP22Compatible = false;
+ }
+ promise->MaybeResolve(cdmInfo);
+ } else {
+ promise->MaybeReject(NS_ERROR_DOM_MEDIA_CDM_ERR);
+ }
+ });
}
nsString KeySystemConfig::GetDebugInfo() const {
diff --git a/dom/media/eme/KeySystemConfig.h b/dom/media/eme/KeySystemConfig.h
index cc35ba76de..39027d4401 100644
--- a/dom/media/eme/KeySystemConfig.h
+++ b/dom/media/eme/KeySystemConfig.h
@@ -9,12 +9,23 @@
#include "nsString.h"
#include "nsTArray.h"
+#include "mozilla/MozPromise.h"
#include "mozilla/dom/MediaKeysBinding.h"
+#include "mozilla/dom/MediaKeySystemAccessBinding.h"
namespace mozilla {
+struct KeySystemConfigRequest;
+
struct KeySystemConfig {
public:
+ using SupportedConfigsPromise =
+ MozPromise<nsTArray<KeySystemConfig>, bool /* aIgnored */,
+ /* IsExclusive = */ true>;
+ using KeySystemConfigPromise =
+ MozPromise<dom::MediaKeySystemConfiguration, bool /* aIgnored */,
+ /* IsExclusive = */ true>;
+
// EME MediaKeysRequirement:
// https://www.w3.org/TR/encrypted-media/#dom-mediakeysrequirement
enum class Requirement {
@@ -129,9 +140,8 @@ struct KeySystemConfig {
Software,
Hardware,
};
- static bool CreateKeySystemConfigs(const nsAString& aKeySystem,
- const DecryptionInfo aDecryption,
- nsTArray<KeySystemConfig>& aOutConfigs);
+ static RefPtr<SupportedConfigsPromise> CreateKeySystemConfigs(
+ const nsTArray<KeySystemConfigRequest>& aRequests);
static void GetGMPKeySystemConfigs(dom::Promise* aPromise);
KeySystemConfig() = default;
@@ -169,10 +179,6 @@ struct KeySystemConfig {
nsString GetDebugInfo() const;
- // Return true if the given key system is equal to `mKeySystem`, or it can be
- // mapped to the same key system
- bool IsSameKeySystem(const nsAString& aKeySystem) const;
-
nsString mKeySystem;
nsTArray<nsString> mInitDataTypes;
Requirement mPersistentState = Requirement::NotAllowed;
@@ -184,6 +190,22 @@ struct KeySystemConfig {
ContainerSupport mMP4;
ContainerSupport mWebM;
bool mIsHDCP22Compatible = false;
+
+ private:
+ static void CreateClearKeyKeySystemConfigs(
+ const KeySystemConfigRequest& aRequest,
+ nsTArray<KeySystemConfig>& aOutConfigs);
+ static void CreateWivineL3KeySystemConfigs(
+ const KeySystemConfigRequest& aRequest,
+ nsTArray<KeySystemConfig>& aOutConfigs);
+};
+
+struct KeySystemConfigRequest final {
+ KeySystemConfigRequest(const nsAString& aKeySystem,
+ KeySystemConfig::DecryptionInfo aDecryption)
+ : mKeySystem(aKeySystem), mDecryption(aDecryption) {}
+ const nsString mKeySystem;
+ const KeySystemConfig::DecryptionInfo mDecryption;
};
KeySystemConfig::SessionType ConvertToKeySystemConfigSessionType(
diff --git a/dom/media/eme/MediaKeySession.cpp b/dom/media/eme/MediaKeySession.cpp
index 8a3a01dd5c..66ee77a2f5 100644
--- a/dom/media/eme/MediaKeySession.cpp
+++ b/dom/media/eme/MediaKeySession.cpp
@@ -250,17 +250,33 @@ already_AddRefed<Promise> MediaKeySession::GenerateRequest(
// cdm implementation value does not support initDataType as an
// Initialization Data Type, return a promise rejected with a
// NotSupportedError. String comparison is case-sensitive.
- if (!MediaKeySystemAccess::KeySystemSupportsInitDataType(
- mKeySystem, aInitDataType, mHardwareDecryption)) {
- promise->MaybeRejectWithNotSupportedError(
- "Unsupported initDataType passed to MediaKeySession.generateRequest()");
- EME_LOG(
- "MediaKeySession[%p,'%s'] GenerateRequest() failed, unsupported "
- "initDataType",
- this, NS_ConvertUTF16toUTF8(mSessionId).get());
- return promise.forget();
- }
+ MediaKeySystemAccess::KeySystemSupportsInitDataType(mKeySystem, aInitDataType,
+ mHardwareDecryption)
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [self = RefPtr<MediaKeySession>{this}, this,
+ initDataType = nsString{aInitDataType},
+ initData = std::move(data), promise](
+ const GenericPromise::ResolveOrRejectValue& aResult) mutable {
+ if (aResult.IsReject()) {
+ promise->MaybeRejectWithNotSupportedError(
+ "Unsupported initDataType passed to "
+ "MediaKeySession.generateRequest()");
+ EME_LOG(
+ "MediaKeySession[%p,'%s'] GenerateRequest() failed, "
+ "unsupported "
+ "initDataType",
+ this, NS_ConvertUTF16toUTF8(mSessionId).get());
+ return;
+ }
+ // Run rest of steps in the spec, starting from 6.6.2.7
+ CompleteGenerateRequest(initDataType, initData, promise);
+ });
+ return promise.forget();
+}
+void MediaKeySession::CompleteGenerateRequest(const nsString& aInitDataType,
+ nsTArray<uint8_t>& aData,
+ DetailedPromise* aPromise) {
// Let init data be a copy of the contents of the initData parameter.
// Note: Handled by the CopyArrayBufferViewOrArrayBufferData call above.
@@ -270,42 +286,41 @@ already_AddRefed<Promise> MediaKeySession::GenerateRequest(
// Run the following steps in parallel:
- // If the init data is not valid for initDataType, reject promise with
- // a newly created TypeError.
- if (!ValidateInitData(data, aInitDataType)) {
+ // If the init data is not valid for initDataType, reject promise with a newly
+ // created TypeError.
+ if (!ValidateInitData(aData, aInitDataType)) {
// If the preceding step failed, reject promise with a newly created
// TypeError.
- promise->MaybeRejectWithTypeError(
- "initData sanitization failed in MediaKeySession.generateRequest()");
+ aPromise->MaybeRejectWithTypeError(
+ "initData sanitization failed in "
+ "MediaKeySession.generateRequest()");
EME_LOG(
- "MediaKeySession[%p,'%s'] GenerateRequest() initData sanitization "
+ "MediaKeySession[%p,'%s'] GenerateRequest() initData "
+ "sanitization "
"failed",
this, NS_ConvertUTF16toUTF8(mSessionId).get());
- return promise.forget();
+ return;
}
// Let sanitized init data be a validated and sanitized version of init data.
// If sanitized init data is empty, reject promise with a NotSupportedError.
- // Note: Remaining steps of generateRequest method continue in CDM.
+ // Note: Remaining steps of generateRequest method continue in CDM.
// Convert initData to hex for easier logging.
- // Note: CreateSession() std::move()s the data out of the array, so we have
- // to copy it here.
- nsAutoCString hexInitData(ToHexString(data));
- PromiseId pid = mKeys->StorePromise(promise);
+ // Note: CreateSession() std::move()s the data out of the array, so we have to
+ // copy it here.
+ nsAutoCString hexInitData(ToHexString(aData));
+ PromiseId pid = mKeys->StorePromise(aPromise);
mKeys->ConnectPendingPromiseIdWithToken(pid, Token());
mKeys->GetCDMProxy()->CreateSession(Token(), mSessionType, pid, aInitDataType,
- data);
-
+ aData);
EME_LOG(
"MediaKeySession[%p,'%s'] GenerateRequest() sent, "
"promiseId=%d initData='%s' initDataType='%s'",
this, NS_ConvertUTF16toUTF8(mSessionId).get(), pid, hexInitData.get(),
NS_ConvertUTF16toUTF8(aInitDataType).get());
-
- return promise.forget();
}
already_AddRefed<Promise> MediaKeySession::Load(const nsAString& aSessionId,
diff --git a/dom/media/eme/MediaKeySession.h b/dom/media/eme/MediaKeySession.h
index 7204f99eef..b0edb16cf1 100644
--- a/dom/media/eme/MediaKeySession.h
+++ b/dom/media/eme/MediaKeySession.h
@@ -120,6 +120,12 @@ class MediaKeySession final : public DOMEventTargetHelper,
already_AddRefed<DetailedPromise> MakePromise(ErrorResult& aRv,
const nsACString& aName);
+ // EME spec, starting from 6.6.2.7
+ // https://w3c.github.io/encrypted-media/
+ void CompleteGenerateRequest(const nsString& aInitDataType,
+ nsTArray<uint8_t>& aData,
+ DetailedPromise* aPromise);
+
RefPtr<DetailedPromise> mClosed;
RefPtr<MediaKeyError> mMediaKeyError;
diff --git a/dom/media/eme/MediaKeySystemAccess.cpp b/dom/media/eme/MediaKeySystemAccess.cpp
index d498c2a773..af9038d309 100644
--- a/dom/media/eme/MediaKeySystemAccess.cpp
+++ b/dom/media/eme/MediaKeySystemAccess.cpp
@@ -10,7 +10,6 @@
#include "DecoderDoctorDiagnostics.h"
#include "DecoderTraits.h"
-#include "KeySystemConfig.h"
#include "MP4Decoder.h"
#include "MediaContainerType.h"
#include "WebMDecoder.h"
@@ -19,6 +18,7 @@
#include "mozilla/Preferences.h"
#include "mozilla/Services.h"
#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/dom/Document.h"
#include "mozilla/dom/KeySystemNames.h"
#include "mozilla/dom/MediaKeySession.h"
#include "mozilla/dom/MediaKeySystemAccessBinding.h"
@@ -231,75 +231,83 @@ static KeySystemConfig::EMECodecString ToEMEAPICodecString(
return ""_ns;
}
-static nsTArray<KeySystemConfig> GetSupportedKeySystems(
- const nsAString& aKeySystem, bool aIsHardwareDecryption) {
+static RefPtr<KeySystemConfig::SupportedConfigsPromise>
+GetSupportedKeySystemConfigs(const nsAString& aKeySystem,
+ bool aIsHardwareDecryption) {
using DecryptionInfo = KeySystemConfig::DecryptionInfo;
- nsTArray<KeySystemConfig> keySystemConfigs;
+ nsTArray<KeySystemConfigRequest> requests;
+
+ // Software Widevine and Clearkey
if (IsWidevineKeySystem(aKeySystem) || IsClearkeyKeySystem(aKeySystem)) {
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- aKeySystem, DecryptionInfo::Software, keySystemConfigs);
+ requests.AppendElement(
+ KeySystemConfigRequest{aKeySystem, DecryptionInfo::Software});
}
#ifdef MOZ_WMF_CDM
- if (IsPlayReadyKeySystem(aKeySystem)) {
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
- DecryptionInfo::Software, keySystemConfigs);
- if (aIsHardwareDecryption) {
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
- DecryptionInfo::Hardware, keySystemConfigs);
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- NS_ConvertUTF8toUTF16(kPlayReadyKeySystemHardware),
- DecryptionInfo::Hardware, keySystemConfigs);
- Unused << KeySystemConfig::CreateKeySystemConfigs(
+ if (IsPlayReadyEnabled()) {
+ // PlayReady software and hardware
+ if (aKeySystem.EqualsLiteral(kPlayReadyKeySystemName) ||
+ aKeySystem.EqualsLiteral(kPlayReadyKeySystemHardware)) {
+ requests.AppendElement(
+ KeySystemConfigRequest{NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
+ DecryptionInfo::Software});
+ if (aIsHardwareDecryption) {
+ requests.AppendElement(KeySystemConfigRequest{
+ NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
+ DecryptionInfo::Hardware});
+ requests.AppendElement(KeySystemConfigRequest{
+ NS_ConvertUTF8toUTF16(kPlayReadyKeySystemHardware),
+ DecryptionInfo::Hardware});
+ }
+ }
+ // PlayReady clearlead
+ if (aKeySystem.EqualsLiteral(kPlayReadyHardwareClearLeadKeySystemName)) {
+ requests.AppendElement(KeySystemConfigRequest{
NS_ConvertUTF8toUTF16(kPlayReadyHardwareClearLeadKeySystemName),
- DecryptionInfo::Hardware, keySystemConfigs);
+ DecryptionInfo::Hardware});
}
}
- // If key system is kWidevineKeySystemName but with hardware decryption
- // requirement, then we need to check those experiement key systems which are
- // used for hardware decryption.
- if (IsWidevineExperimentKeySystem(aKeySystem) ||
- (IsWidevineKeySystem(aKeySystem) && aIsHardwareDecryption)) {
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- NS_ConvertUTF8toUTF16(kWidevineExperimentKeySystemName),
- DecryptionInfo::Hardware, keySystemConfigs);
- Unused << KeySystemConfig::CreateKeySystemConfigs(
- NS_ConvertUTF8toUTF16(kWidevineExperiment2KeySystemName),
- DecryptionInfo::Hardware, keySystemConfigs);
- }
-#endif
- return keySystemConfigs;
-}
-static bool GetKeySystemConfigs(
- const nsAString& aKeySystem, bool aIsHardwareDecryption,
- nsTArray<KeySystemConfig>& aOutKeySystemConfig) {
- bool foundConfigs = false;
- for (auto& config :
- GetSupportedKeySystems(aKeySystem, aIsHardwareDecryption)) {
- if (config.IsSameKeySystem(aKeySystem)) {
- aOutKeySystemConfig.AppendElement(std::move(config));
- foundConfigs = true;
+ if (IsWidevineHardwareDecryptionEnabled()) {
+ // Widevine hardware
+ if (aKeySystem.EqualsLiteral(kWidevineExperimentKeySystemName) ||
+ (IsWidevineKeySystem(aKeySystem) && aIsHardwareDecryption)) {
+ requests.AppendElement(KeySystemConfigRequest{
+ NS_ConvertUTF8toUTF16(kWidevineExperimentKeySystemName),
+ DecryptionInfo::Hardware});
+ }
+ // Widevine clearlead
+ if (aKeySystem.EqualsLiteral(kWidevineExperiment2KeySystemName)) {
+ requests.AppendElement(KeySystemConfigRequest{
+ NS_ConvertUTF8toUTF16(kWidevineExperiment2KeySystemName),
+ DecryptionInfo::Hardware});
}
}
- return foundConfigs;
+#endif
+ return KeySystemConfig::CreateKeySystemConfigs(requests);
}
/* static */
-bool MediaKeySystemAccess::KeySystemSupportsInitDataType(
+RefPtr<GenericPromise> MediaKeySystemAccess::KeySystemSupportsInitDataType(
const nsAString& aKeySystem, const nsAString& aInitDataType,
bool aIsHardwareDecryption) {
- nsTArray<KeySystemConfig> implementations;
- GetKeySystemConfigs(aKeySystem, aIsHardwareDecryption, implementations);
- bool containInitType = false;
- for (const auto& config : implementations) {
- if (config.mInitDataTypes.Contains(aInitDataType)) {
- containInitType = true;
- break;
- }
- }
- return containInitType;
+ RefPtr<GenericPromise::Private> promise =
+ new GenericPromise::Private(__func__);
+ GetSupportedKeySystemConfigs(aKeySystem, aIsHardwareDecryption)
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [promise, initDataType = nsString{std::move(aInitDataType)}](
+ const KeySystemConfig::SupportedConfigsPromise::
+ ResolveOrRejectValue& aResult) {
+ if (aResult.IsResolve()) {
+ for (const auto& config : aResult.ResolveValue()) {
+ if (config.mInitDataTypes.Contains(initDataType)) {
+ promise->Resolve(true, __func__);
+ return;
+ }
+ }
+ }
+ promise->Reject(NS_ERROR_DOM_MEDIA_CDM_ERR, __func__);
+ });
+ return promise.forget();
}
enum CodecType { Audio, Video, Invalid };
@@ -474,7 +482,7 @@ static Sequence<MediaKeySystemMediaCapability> GetSupportedCapabilities(
const nsTArray<MediaKeySystemMediaCapability>& aRequestedCapabilities,
const MediaKeySystemConfiguration& aPartialConfig,
const KeySystemConfig& aKeySystem, DecoderDoctorDiagnostics* aDiagnostics,
- const std::function<void(const char*)>& aDeprecationLogFn) {
+ const Document* aDocument) {
// Let local accumulated configuration be a local copy of partial
// configuration. (Note: It's not necessary for us to maintain a local copy,
// as we don't need to test whether capabilites from previous calls to this
@@ -609,7 +617,7 @@ static Sequence<MediaKeySystemMediaCapability> GetSupportedCapabilities(
// If media types is empty:
if (codecs.IsEmpty()) {
// Log deprecation warning to encourage authors to not do this!
- aDeprecationLogFn("MediaEMENoCodecsDeprecatedWarning");
+ DeprecationWarningLog(aDocument, "MediaEMENoCodecsDeprecatedWarning");
// TODO: Remove this once we're sure it doesn't break the web.
// If container normatively implies a specific set of codecs and codec
// constraints: Let parameters be that set.
@@ -808,12 +816,12 @@ static Sequence<nsString> UnboxSessionTypes(
}
// 3.1.1.2 Get Supported Configuration and Consent
-static bool GetSupportedConfig(
- const KeySystemConfig& aKeySystem,
- const MediaKeySystemConfiguration& aCandidate,
- MediaKeySystemConfiguration& aOutConfig,
- DecoderDoctorDiagnostics* aDiagnostics, bool aInPrivateBrowsing,
- const std::function<void(const char*)>& aDeprecationLogFn) {
+static bool GetSupportedConfig(const KeySystemConfig& aKeySystem,
+ const MediaKeySystemConfiguration& aCandidate,
+ MediaKeySystemConfiguration& aOutConfig,
+ DecoderDoctorDiagnostics* aDiagnostics,
+ bool aInPrivateBrowsing,
+ const Document* aDocument) {
EME_LOG("Compare implementation '%s'\n with request '%s'",
NS_ConvertUTF16toUTF8(aKeySystem.GetDebugInfo()).get(),
ToCString(aCandidate).get());
@@ -941,7 +949,7 @@ static bool GetSupportedConfig(
// TODO: Most sites using EME still don't pass capabilities, so we
// can't reject on it yet without breaking them. So add this later.
// Log deprecation warning to encourage authors to not do this!
- aDeprecationLogFn("MediaEMENoCapabilitiesDeprecatedWarning");
+ DeprecationWarningLog(aDocument, "MediaEMENoCapabilitiesDeprecatedWarning");
}
// If the videoCapabilities member in candidate configuration is non-empty:
@@ -952,7 +960,7 @@ static bool GetSupportedConfig(
// and restrictions.
Sequence<MediaKeySystemMediaCapability> caps =
GetSupportedCapabilities(Video, aCandidate.mVideoCapabilities, config,
- aKeySystem, aDiagnostics, aDeprecationLogFn);
+ aKeySystem, aDiagnostics, aDocument);
// If video capabilities is null, return NotSupported.
if (caps.IsEmpty()) {
EME_LOG(
@@ -978,7 +986,7 @@ static bool GetSupportedConfig(
// restrictions.
Sequence<MediaKeySystemMediaCapability> caps =
GetSupportedCapabilities(Audio, aCandidate.mAudioCapabilities, config,
- aKeySystem, aDiagnostics, aDeprecationLogFn);
+ aKeySystem, aDiagnostics, aDocument);
// If audio capabilities is null, return NotSupported.
if (caps.IsEmpty()) {
EME_LOG(
@@ -1058,30 +1066,42 @@ static bool GetSupportedConfig(
}
/* static */
-bool MediaKeySystemAccess::GetSupportedConfig(
- const nsAString& aKeySystem,
- const Sequence<MediaKeySystemConfiguration>& aConfigs,
- MediaKeySystemConfiguration& aOutConfig,
- DecoderDoctorDiagnostics* aDiagnostics, bool aIsPrivateBrowsing,
- const std::function<void(const char*)>& aDeprecationLogFn) {
+RefPtr<KeySystemConfig::KeySystemConfigPromise>
+MediaKeySystemAccess::GetSupportedConfig(MediaKeySystemAccessRequest* aRequest,
+ bool aIsPrivateBrowsing,
+ const Document* aDocument) {
nsTArray<KeySystemConfig> implementations;
const bool isHardwareDecryptionRequest =
- CheckIfHarewareDRMConfigExists(aConfigs) ||
- DoesKeySystemSupportHardwareDecryption(aKeySystem);
- if (!GetKeySystemConfigs(aKeySystem, isHardwareDecryptionRequest,
- implementations)) {
- return false;
- }
- for (const auto& implementation : implementations) {
- for (const MediaKeySystemConfiguration& candidate : aConfigs) {
- if (mozilla::dom::GetSupportedConfig(
- implementation, candidate, aOutConfig, aDiagnostics,
- aIsPrivateBrowsing, aDeprecationLogFn)) {
- return true;
- }
- }
- }
- return false;
+ CheckIfHarewareDRMConfigExists(aRequest->mConfigs) ||
+ DoesKeySystemSupportHardwareDecryption(aRequest->mKeySystem);
+
+ RefPtr<KeySystemConfig::KeySystemConfigPromise::Private> promise =
+ new KeySystemConfig::KeySystemConfigPromise::Private(__func__);
+ GetSupportedKeySystemConfigs(aRequest->mKeySystem,
+ isHardwareDecryptionRequest)
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [promise, aRequest, aIsPrivateBrowsing,
+ document = RefPtr<const Document>{aDocument}](
+ const KeySystemConfig::SupportedConfigsPromise::
+ ResolveOrRejectValue& aResult) {
+ if (aResult.IsResolve()) {
+ MediaKeySystemConfiguration outConfig;
+ for (const auto& implementation : aResult.ResolveValue()) {
+ for (const MediaKeySystemConfiguration& candidate :
+ aRequest->mConfigs) {
+ if (mozilla::dom::GetSupportedConfig(
+ implementation, candidate, outConfig,
+ &aRequest->mDiagnostics, aIsPrivateBrowsing,
+ document)) {
+ promise->Resolve(std::move(outConfig), __func__);
+ return;
+ }
+ }
+ }
+ }
+ promise->Reject(false, __func__);
+ });
+ return promise.forget();
}
/* static */
diff --git a/dom/media/eme/MediaKeySystemAccess.h b/dom/media/eme/MediaKeySystemAccess.h
index 18eec47008..5f3309766d 100644
--- a/dom/media/eme/MediaKeySystemAccess.h
+++ b/dom/media/eme/MediaKeySystemAccess.h
@@ -14,6 +14,7 @@
#include "mozilla/dom/Promise.h"
#include "mozilla/dom/MediaKeySystemAccessBinding.h"
#include "mozilla/dom/MediaKeysRequestStatusBinding.h"
+#include "mozilla/KeySystemConfig.h"
#include "js/TypeDecls.h"
@@ -59,16 +60,13 @@ class MediaKeySystemAccess final : public nsISupports, public nsWrapperCache {
const nsAString& aKeySystem,
MediaKeySystemStatus aStatus);
- static bool GetSupportedConfig(
- const nsAString& aKeySystem,
- const Sequence<MediaKeySystemConfiguration>& aConfigs,
- MediaKeySystemConfiguration& aOutConfig,
- DecoderDoctorDiagnostics* aDiagnostics, bool aIsPrivateBrowsing,
- const std::function<void(const char*)>& aDeprecationLogFn);
+ static RefPtr<KeySystemConfig::KeySystemConfigPromise> GetSupportedConfig(
+ MediaKeySystemAccessRequest* aRequest, bool aIsPrivateBrowsing,
+ const Document* aDocument);
- static bool KeySystemSupportsInitDataType(const nsAString& aKeySystem,
- const nsAString& aInitDataType,
- bool aIsHardwareDecryption);
+ static RefPtr<GenericPromise> KeySystemSupportsInitDataType(
+ const nsAString& aKeySystem, const nsAString& aInitDataType,
+ bool aIsHardwareDecryption);
static nsCString ToCString(
const Sequence<MediaKeySystemConfiguration>& aConfig);
diff --git a/dom/media/eme/MediaKeySystemAccessManager.cpp b/dom/media/eme/MediaKeySystemAccessManager.cpp
index 8ebe7ceee7..84389d1db0 100644
--- a/dom/media/eme/MediaKeySystemAccessManager.cpp
+++ b/dom/media/eme/MediaKeySystemAccessManager.cpp
@@ -368,8 +368,6 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
// 5. Let promise be a new promise.
// 6. Run the following steps in parallel:
- DecoderDoctorDiagnostics diagnostics;
-
// 1. If keySystem is not one of the Key Systems supported by the user
// agent, reject promise with a NotSupportedError. String comparison is
// case-sensitive.
@@ -383,7 +381,7 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
// supported.
aRequest->RejectPromiseWithNotSupportedError(
"Key system is unsupported"_ns);
- diagnostics.StoreMediaKeySystemAccess(
+ aRequest->mDiagnostics.StoreMediaKeySystemAccess(
mWindow->GetExtantDoc(), aRequest->mKeySystem, false, __func__);
return;
}
@@ -399,7 +397,7 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
MediaKeySystemStatus::Api_disabled);
}
aRequest->RejectPromiseWithNotSupportedError("EME has been preffed off"_ns);
- diagnostics.StoreMediaKeySystemAccess(
+ aRequest->mDiagnostics.StoreMediaKeySystemAccess(
mWindow->GetExtantDoc(), aRequest->mKeySystem, false, __func__);
return;
}
@@ -439,7 +437,7 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
// "I can't play, updating" notification.
aRequest->RejectPromiseWithNotSupportedError(
"Timed out while waiting for a CDM update"_ns);
- diagnostics.StoreMediaKeySystemAccess(
+ aRequest->mDiagnostics.StoreMediaKeySystemAccess(
mWindow->GetExtantDoc(), aRequest->mKeySystem, false, __func__);
return;
}
@@ -453,6 +451,7 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
keySystem = NS_ConvertUTF8toUTF16(kWidevineExperimentKeySystemName);
}
#endif
+ auto& diagnostics = aRequest->mDiagnostics;
if (AwaitInstall(std::move(aRequest))) {
// Notify chrome that we're going to wait for the CDM to download/update.
EME_LOG("Await %s for installation",
@@ -480,25 +479,6 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
return;
}
- nsCOMPtr<Document> doc = mWindow->GetExtantDoc();
- nsTHashMap<nsCharPtrHashKey, bool> warnings;
- std::function<void(const char*)> deprecationWarningLogFn =
- [&](const char* aMsgName) {
- EME_LOG(
- "MediaKeySystemAccessManager::DeprecationWarningLambda Logging "
- "deprecation warning '%s' to WebConsole.",
- aMsgName);
- warnings.InsertOrUpdate(aMsgName, true);
- AutoTArray<nsString, 1> params;
- nsString& uri = *params.AppendElement();
- if (doc) {
- Unused << doc->GetDocumentURI(uri);
- }
- nsContentUtils::ReportToConsole(nsIScriptError::warningFlag, "Media"_ns,
- doc, nsContentUtils::eDOM_PROPERTIES,
- aMsgName, params);
- };
-
bool isPrivateBrowsing =
mWindow->GetExtantDoc() &&
mWindow->GetExtantDoc()->NodePrincipal()->GetPrivateBrowsingId() > 0;
@@ -517,23 +497,28 @@ void MediaKeySystemAccessManager::RequestMediaKeySystemAccess(
// 3. Let the cdm implementation value be implementation.
// 2. Resolve promise with access and abort the parallel steps of this
// algorithm.
- MediaKeySystemConfiguration config;
- if (MediaKeySystemAccess::GetSupportedConfig(
- aRequest->mKeySystem, aRequest->mConfigs, config, &diagnostics,
- isPrivateBrowsing, deprecationWarningLogFn)) {
- aRequest->mSupportedConfig = Some(config);
- // The app gets the final say on if we provide access or not.
- CheckDoesAppAllowProtectedMedia(std::move(aRequest));
- return;
- }
- // 4. Reject promise with a NotSupportedError.
-
- // Not to inform user, because nothing to do if the corresponding keySystem
- // configuration is not supported.
- aRequest->RejectPromiseWithNotSupportedError(
- "Key system configuration is not supported"_ns);
- diagnostics.StoreMediaKeySystemAccess(mWindow->GetExtantDoc(),
- aRequest->mKeySystem, false, __func__);
+ MediaKeySystemAccess::GetSupportedConfig(aRequest.get(), isPrivateBrowsing,
+ mWindow->GetExtantDoc())
+ ->Then(GetMainThreadSerialEventTarget(), __func__,
+ [self = RefPtr<MediaKeySystemAccessManager>{this}, this,
+ request = UniquePtr<PendingRequest>{std::move(aRequest)}](
+ const KeySystemConfig::KeySystemConfigPromise::
+ ResolveOrRejectValue& aResult) mutable {
+ if (aResult.IsResolve()) {
+ request->mSupportedConfig = Some(aResult.ResolveValue());
+ // The app gets the final say on if we provide access or not.
+ CheckDoesAppAllowProtectedMedia(std::move(request));
+ } else {
+ // 4. Reject promise with a NotSupportedError.
+ // Not to inform user, because nothing to do if the
+ // corresponding keySystem configuration is not supported.
+ request->RejectPromiseWithNotSupportedError(
+ "Key system configuration is not supported"_ns);
+ request->mDiagnostics.StoreMediaKeySystemAccess(
+ mWindow->GetExtantDoc(), request->mKeySystem, false,
+ __func__);
+ }
+ });
}
void MediaKeySystemAccessManager::ProvideAccess(
diff --git a/dom/media/eme/MediaKeySystemAccessManager.h b/dom/media/eme/MediaKeySystemAccessManager.h
index 77feded701..9ea621df84 100644
--- a/dom/media/eme/MediaKeySystemAccessManager.h
+++ b/dom/media/eme/MediaKeySystemAccessManager.h
@@ -5,6 +5,7 @@
#ifndef DOM_MEDIA_MEDIAKEYSYSTEMACCESSMANAGER_H_
#define DOM_MEDIA_MEDIAKEYSYSTEMACCESSMANAGER_H_
+#include "DecoderDoctorDiagnostics.h"
#include "mozilla/dom/MediaKeySystemAccess.h"
#include "mozilla/MozPromise.h"
#include "nsCycleCollectionParticipant.h"
@@ -95,6 +96,7 @@ struct MediaKeySystemAccessRequest {
const nsString mKeySystem;
// The config(s) passed for this request.
const Sequence<MediaKeySystemConfiguration> mConfigs;
+ DecoderDoctorDiagnostics mDiagnostics;
};
class MediaKeySystemAccessManager final : public nsIObserver, public nsINamed {
diff --git a/dom/media/eme/mediafoundation/WMFCDMImpl.cpp b/dom/media/eme/mediafoundation/WMFCDMImpl.cpp
index add978f755..983a7c00f2 100644
--- a/dom/media/eme/mediafoundation/WMFCDMImpl.cpp
+++ b/dom/media/eme/mediafoundation/WMFCDMImpl.cpp
@@ -10,81 +10,12 @@
#include "mozilla/AppShutdown.h"
#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/ScopeExit.h"
#include "mozilla/dom/MediaKeySession.h"
#include "mozilla/dom/KeySystemNames.h"
namespace mozilla {
-bool WMFCDMImpl::GetCapabilities(bool aIsHardwareDecryption,
- nsTArray<KeySystemConfig>& aOutConfigs) {
- MOZ_ASSERT(NS_IsMainThread());
- if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownConfirmed)) {
- return false;
- }
-
- static std::unordered_map<std::string, nsTArray<KeySystemConfig>>
- sKeySystemConfigs{};
- static bool sSetRunOnShutdown = false;
- if (!sSetRunOnShutdown) {
- GetMainThreadSerialEventTarget()->Dispatch(
- NS_NewRunnableFunction("WMFCDMImpl::GetCapabilities", [&] {
- RunOnShutdown([&] { sKeySystemConfigs.clear(); },
- ShutdownPhase::XPCOMShutdown);
- }));
- sSetRunOnShutdown = true;
- }
-
- // Retrieve result from our cached key system
- auto keySystem = std::string{NS_ConvertUTF16toUTF8(mKeySystem).get()};
- if (auto rv = sKeySystemConfigs.find(keySystem);
- rv != sKeySystemConfigs.end()) {
- for (const auto& config : rv->second) {
- if (IsHardwareDecryptionSupported(config) == aIsHardwareDecryption) {
- EME_LOG("Return cached capabilities for %s (%s)", keySystem.c_str(),
- NS_ConvertUTF16toUTF8(config.GetDebugInfo()).get());
- aOutConfigs.AppendElement(config);
- return true;
- }
- }
- }
-
- // Not cached result, ask the remote process.
- nsCOMPtr<nsISerialEventTarget> backgroundTaskQueue;
- NS_CreateBackgroundTaskQueue(__func__, getter_AddRefs(backgroundTaskQueue));
- if (!mCDM) {
- mCDM = MakeRefPtr<MFCDMChild>(mKeySystem);
- }
- bool ok = false;
- media::Await(
- do_AddRef(backgroundTaskQueue),
- mCDM->GetCapabilities(aIsHardwareDecryption),
- [&ok, &aOutConfigs, keySystem,
- aIsHardwareDecryption](const MFCDMCapabilitiesIPDL& capabilities) {
- EME_LOG("capabilities: keySystem=%s (hw-secure=%d)", keySystem.c_str(),
- aIsHardwareDecryption);
- for (const auto& v : capabilities.videoCapabilities()) {
- EME_LOG("capabilities: video=%s",
- NS_ConvertUTF16toUTF8(v.contentType()).get());
- }
- for (const auto& a : capabilities.audioCapabilities()) {
- EME_LOG("capabilities: audio=%s",
- NS_ConvertUTF16toUTF8(a.contentType()).get());
- }
- for (const auto& v : capabilities.encryptionSchemes()) {
- EME_LOG("capabilities: encryptionScheme=%s", EncryptionSchemeStr(v));
- }
- KeySystemConfig* config = aOutConfigs.AppendElement();
- MFCDMCapabilitiesIPDLToKeySystemConfig(capabilities, *config);
- sKeySystemConfigs[keySystem].AppendElement(*config);
- ok = true;
- },
- [](nsresult rv) {
- EME_LOG("Fail to get key system capabilities. rv=%x", uint32_t(rv));
- });
-
- return ok;
-}
-
RefPtr<WMFCDMImpl::InitPromise> WMFCDMImpl::Init(
const WMFCDMImpl::InitParams& aParams) {
if (!mCDM) {
@@ -111,4 +42,88 @@ RefPtr<WMFCDMImpl::InitPromise> WMFCDMImpl::Init(
return mInitPromiseHolder.Ensure(__func__);
}
+RefPtr<KeySystemConfig::SupportedConfigsPromise>
+WMFCDMCapabilites::GetCapabilities(
+ const nsTArray<KeySystemConfigRequest>& aRequests) {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownConfirmed)) {
+ return SupportedConfigsPromise::CreateAndReject(false, __func__);
+ }
+
+ if (!mCapabilitiesPromiseHolder.IsEmpty()) {
+ return mCapabilitiesPromiseHolder.Ensure(__func__);
+ }
+
+ using CapabilitiesPromise = MFCDMChild::CapabilitiesPromise;
+ nsTArray<RefPtr<CapabilitiesPromise>> promises;
+ for (const auto& request : aRequests) {
+ RefPtr<MFCDMChild> cdm = new MFCDMChild(request.mKeySystem);
+ promises.AppendElement(cdm->GetCapabilities(MFCDMCapabilitiesRequest{
+ nsString{request.mKeySystem},
+ request.mDecryption == KeySystemConfig::DecryptionInfo::Hardware}));
+ mCDMs.AppendElement(std::move(cdm));
+ }
+
+ CapabilitiesPromise::AllSettled(GetCurrentSerialEventTarget(), promises)
+ ->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [self = RefPtr<WMFCDMCapabilites>(this), this](
+ CapabilitiesPromise::AllSettledPromiseType::ResolveOrRejectValue&&
+ aResult) {
+ mCapabilitiesPromisesRequest.Complete();
+
+ // Reset cdm
+ auto exit = MakeScopeExit([&] {
+ for (auto& cdm : mCDMs) {
+ cdm->Shutdown();
+ }
+ mCDMs.Clear();
+ });
+
+ nsTArray<KeySystemConfig> outConfigs;
+ for (const auto& promiseRv : aResult.ResolveValue()) {
+ if (promiseRv.IsReject()) {
+ continue;
+ }
+ const MFCDMCapabilitiesIPDL& capabilities =
+ promiseRv.ResolveValue();
+ EME_LOG("capabilities: keySystem=%s (hw-secure=%d)",
+ NS_ConvertUTF16toUTF8(capabilities.keySystem()).get(),
+ capabilities.isHardwareDecryption());
+ for (const auto& v : capabilities.videoCapabilities()) {
+ EME_LOG("capabilities: video=%s",
+ NS_ConvertUTF16toUTF8(v.contentType()).get());
+ }
+ for (const auto& a : capabilities.audioCapabilities()) {
+ EME_LOG("capabilities: audio=%s",
+ NS_ConvertUTF16toUTF8(a.contentType()).get());
+ }
+ for (const auto& v : capabilities.encryptionSchemes()) {
+ EME_LOG("capabilities: encryptionScheme=%s",
+ EncryptionSchemeStr(v));
+ }
+ KeySystemConfig* config = outConfigs.AppendElement();
+ MFCDMCapabilitiesIPDLToKeySystemConfig(capabilities, *config);
+ }
+ if (outConfigs.IsEmpty()) {
+ EME_LOG(
+ "Failed on getting capabilities from all settled promise");
+ mCapabilitiesPromiseHolder.Reject(false, __func__);
+ return;
+ }
+ mCapabilitiesPromiseHolder.Resolve(std::move(outConfigs), __func__);
+ })
+ ->Track(mCapabilitiesPromisesRequest);
+
+ return mCapabilitiesPromiseHolder.Ensure(__func__);
+}
+
+WMFCDMCapabilites::~WMFCDMCapabilites() {
+ mCapabilitiesPromisesRequest.DisconnectIfExists();
+ mCapabilitiesPromiseHolder.RejectIfExists(false, __func__);
+ for (auto& cdm : mCDMs) {
+ cdm->Shutdown();
+ }
+}
+
} // namespace mozilla
diff --git a/dom/media/eme/mediafoundation/WMFCDMImpl.h b/dom/media/eme/mediafoundation/WMFCDMImpl.h
index b7e6308848..c5bf8234af 100644
--- a/dom/media/eme/mediafoundation/WMFCDMImpl.h
+++ b/dom/media/eme/mediafoundation/WMFCDMImpl.h
@@ -34,10 +34,6 @@ class WMFCDMImpl final {
explicit WMFCDMImpl(const nsAString& aKeySystem) : mKeySystem(aKeySystem) {}
- // TODO: make this async?
- bool GetCapabilities(bool aIsHardwareDecryption,
- nsTArray<KeySystemConfig>& aOutConfigs);
-
using InitPromise = GenericPromise;
struct InitParams {
nsString mOrigin;
@@ -119,6 +115,26 @@ class WMFCDMImpl final {
MozPromiseHolder<InitPromise> mInitPromiseHolder;
};
+// A helper class to get multiple capabilities from different key systems.
+class WMFCDMCapabilites final {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WMFCDMCapabilites);
+ WMFCDMCapabilites() = default;
+
+ using SupportedConfigsPromise = KeySystemConfig::SupportedConfigsPromise;
+ RefPtr<SupportedConfigsPromise> GetCapabilities(
+ const nsTArray<KeySystemConfigRequest>& aRequests);
+
+ private:
+ ~WMFCDMCapabilites();
+
+ nsTArray<RefPtr<MFCDMChild>> mCDMs;
+ MozPromiseHolder<SupportedConfigsPromise> mCapabilitiesPromiseHolder;
+ MozPromiseRequestHolder<
+ MFCDMChild::CapabilitiesPromise::AllSettledPromiseType>
+ mCapabilitiesPromisesRequest;
+};
+
} // namespace mozilla
#endif // DOM_MEDIA_EME_MEDIAFOUNDATION_WMFCDMIMPL_H_
diff --git a/dom/media/eme/mediafoundation/WMFCDMProxy.cpp b/dom/media/eme/mediafoundation/WMFCDMProxy.cpp
index f7e05dfb6a..5fd73c2dcf 100644
--- a/dom/media/eme/mediafoundation/WMFCDMProxy.cpp
+++ b/dom/media/eme/mediafoundation/WMFCDMProxy.cpp
@@ -158,7 +158,7 @@ void WMFCDMProxy::ResolvePromiseWithKeyStatus(
RETURN_IF_SHUTDOWN();
EME_LOG("WMFCDMProxy::ResolvePromiseWithKeyStatus(this=%p, pid=%" PRIu32
", status=%s)",
- this, aId, ToMediaKeyStatusStr(aStatus));
+ this, aId, dom::GetEnumString(aStatus).get());
if (!mKeys.IsNull()) {
mKeys->ResolvePromiseWithKeyStatus(aId, aStatus);
} else {
diff --git a/dom/media/eme/metrics.yaml b/dom/media/eme/metrics.yaml
new file mode 100644
index 0000000000..9a8cb0783a
--- /dev/null
+++ b/dom/media/eme/metrics.yaml
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Adding a new metric? We have docs for that!
+# https://firefox-source-docs.mozilla.org/toolkit/components/glean/user/new_definitions_file.html
+
+---
+$schema: moz://mozilla.org/schemas/glean/metrics/2-0-0
+$tags:
+ - 'Core :: Audio/Video'
+
+mediadrm:
+ eme_playback:
+ type: event
+ description: >
+ Record the EME play time with the video codec and resolutions.
+ metadata:
+ tags:
+ - 'Core :: Audio/Video: Playback'
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1882567
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1882567#3
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - media-alerts@mozilla.com
+ extra_keys:
+ key_system:
+ description: The key system used for the EME playback
+ type: string
+ played_time:
+ description: How many second the EME content has been played since last record
+ type: quantity
+ resolution:
+ description: The video resolution used for EME playback
+ type: string
+ video_codec:
+ description: The video codec used for EME playback
+ type: string
+ expires: never
diff --git a/dom/media/gmp/ChromiumCDMChild.cpp b/dom/media/gmp/ChromiumCDMChild.cpp
index 4592f2e291..343622d37f 100644
--- a/dom/media/gmp/ChromiumCDMChild.cpp
+++ b/dom/media/gmp/ChromiumCDMChild.cpp
@@ -450,7 +450,7 @@ mozilla::ipc::IPCResult ChromiumCDMChild::RecvCreateSessionAndGenerateRequest(
"pid=%" PRIu32 ", sessionType=%" PRIu32 ", initDataType=%" PRIu32
") initDataLen=%zu",
aPromiseId, aSessionType, aInitDataType, aInitData.Length());
- MOZ_ASSERT(aSessionType <= cdm::SessionType::kPersistentUsageRecord);
+ MOZ_ASSERT(aSessionType <= cdm::SessionType::kPersistentLicense);
MOZ_ASSERT(aInitDataType <= cdm::InitDataType::kWebM);
if (mCDM) {
mCDM->CreateSessionAndGenerateRequest(
diff --git a/dom/media/gmp/ChromiumCDMProxy.cpp b/dom/media/gmp/ChromiumCDMProxy.cpp
index 566b386b0b..43ce3e50af 100644
--- a/dom/media/gmp/ChromiumCDMProxy.cpp
+++ b/dom/media/gmp/ChromiumCDMProxy.cpp
@@ -7,6 +7,7 @@
#include "ChromiumCDMProxy.h"
#include "ChromiumCDMCallbackProxy.h"
#include "MediaResult.h"
+#include "mozilla/StaticPrefs_media.h"
#include "mozilla/dom/MediaKeySession.h"
#include "mozilla/dom/MediaKeysBinding.h"
#include "GMPUtils.h"
@@ -382,13 +383,19 @@ void ChromiumCDMProxy::NotifyOutputProtectionStatus(
}
uint32_t linkMask{};
- uint32_t protectionMask{}; // Unused/always zeroed.
+ uint32_t protectionMask{};
if (aCheckStatus == OutputProtectionCheckStatus::CheckSuccessful &&
aCaptureStatus == OutputProtectionCaptureStatus::CapturePossilbe) {
// The result indicates the capture is possible, so set the mask
// to indicate this.
linkMask |= cdm::OutputLinkTypes::kLinkTypeNetwork;
}
+ // `kProtectionNone` can cause playback to stop if HDCP_V1 is required. Report
+ // HDCP protection if there's no potential capturing.
+ if (linkMask == cdm::OutputLinkTypes::kLinkTypeNone &&
+ StaticPrefs::media_widevine_hdcp_protection_mask()) {
+ protectionMask = cdm::OutputProtectionMethods::kProtectionHDCP;
+ }
mGMPThread->Dispatch(NewRunnableMethod<bool, uint32_t, uint32_t>(
"gmp::ChromiumCDMParent::NotifyOutputProtectionStatus", cdm,
&gmp::ChromiumCDMParent::NotifyOutputProtectionStatus,
diff --git a/dom/media/gmp/GMPChild.cpp b/dom/media/gmp/GMPChild.cpp
index d543d46387..ca8b6a4ed4 100644
--- a/dom/media/gmp/GMPChild.cpp
+++ b/dom/media/gmp/GMPChild.cpp
@@ -41,6 +41,7 @@
#include "nsXULAppAPI.h"
#include "nsIXULRuntime.h"
#include "nsXPCOM.h"
+#include "nsXPCOMPrivate.h" // for XUL_DLL
#include "prio.h"
#ifdef XP_WIN
# include <stdlib.h> // for _exit()
@@ -348,15 +349,11 @@ static bool IsFileLeafEqualToASCII(const nsCOMPtr<nsIFile>& aFile,
#endif
#if defined(XP_WIN)
-# define FIREFOX_FILE u"firefox.exe"_ns
-# define XUL_LIB_FILE u"xul.dll"_ns
-#elif defined(XP_MACOSX)
-# define FIREFOX_FILE u"firefox"_ns
-# define XUL_LIB_FILE u"XUL"_ns
+# define FIREFOX_FILE MOZ_APP_NAME u".exe"_ns
#else
-# define FIREFOX_FILE u"firefox"_ns
-# define XUL_LIB_FILE u"libxul.so"_ns
+# define FIREFOX_FILE MOZ_APP_NAME u""_ns
#endif
+#define XUL_LIB_FILE XUL_DLL u""_ns
static nsCOMPtr<nsIFile> GetFirefoxAppPath(
nsCOMPtr<nsIFile> aPluginContainerPath) {
diff --git a/dom/media/gmp/moz.build b/dom/media/gmp/moz.build
index 3c34021506..744209fa1c 100644
--- a/dom/media/gmp/moz.build
+++ b/dom/media/gmp/moz.build
@@ -129,6 +129,8 @@ PREPROCESSED_IPDL_SOURCES += [
if CONFIG["TARGET_OS"] in ["WINNT", "OSX"]:
DEFINES["SUPPORT_STORAGE_ID"] = 1
+DEFINES["MOZ_APP_NAME"] = '"%s"' % CONFIG["MOZ_APP_NAME"]
+
include("/ipc/chromium/chromium-config.mozbuild")
if CONFIG["MOZ_SANDBOX"]:
diff --git a/dom/media/gmp/mozIGeckoMediaPluginChromeService.idl b/dom/media/gmp/mozIGeckoMediaPluginChromeService.idl
index 51dc545092..bc782978c9 100644
--- a/dom/media/gmp/mozIGeckoMediaPluginChromeService.idl
+++ b/dom/media/gmp/mozIGeckoMediaPluginChromeService.idl
@@ -27,7 +27,7 @@ interface mozIGeckoMediaPluginChromeService : nsISupports
* @note Main-thread API.
*/
void removeAndDeletePluginDirectory(in AString directory,
- [optional] in bool defer);
+ [optional] in boolean defer);
/**
* Clears storage data associated with the site and the originAttributes
@@ -48,7 +48,7 @@ interface mozIGeckoMediaPluginChromeService : nsISupports
* persistently on disk. Private Browsing and local content are not
* allowed to store persistent data.
*/
- bool isPersistentStorageAllowed(in ACString nodeId);
+ boolean isPersistentStorageAllowed(in ACString nodeId);
/**
* Returns the directory to use as the base for storing data about GMPs.
diff --git a/dom/media/gmp/widevine-adapter/content_decryption_module.h b/dom/media/gmp/widevine-adapter/content_decryption_module.h
index 68fee35195..f9101fbaf0 100644
--- a/dom/media/gmp/widevine-adapter/content_decryption_module.h
+++ b/dom/media/gmp/widevine-adapter/content_decryption_module.h
@@ -411,7 +411,6 @@ CHECK_TYPE(InitDataType, 4, 4);
enum SessionType : uint32_t {
kTemporary = 0,
kPersistentLicense = 1,
- kPersistentUsageRecord = 2
};
CHECK_TYPE(SessionType, 4, 4);
diff --git a/dom/media/gtest/TestAudioInputProcessing.cpp b/dom/media/gtest/TestAudioInputProcessing.cpp
index 82c1831e84..d21c37a900 100644
--- a/dom/media/gtest/TestAudioInputProcessing.cpp
+++ b/dom/media/gtest/TestAudioInputProcessing.cpp
@@ -30,11 +30,21 @@ class MockGraph : public MediaTrackGraphImpl {
void Init(uint32_t aChannels) {
MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
- // Remove this graph's driver since it holds a ref. If no AppendMessage
- // takes place, the driver never starts. This will also make sure no-one
- // tries to use it. We are still kept alive by the self-ref. Destroy() must
- // be called to break that cycle.
- SetCurrentDriver(nullptr);
+
+ MonitorAutoLock lock(mMonitor);
+ // We don't need a graph driver. Advance to
+ // LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION so that the driver never
+ // starts. Graph control messages run as in shutdown, synchronously.
+ // This permits the main thread part of track initialization through
+ // AudioProcessingTrack::Create().
+ mLifecycleState = LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION;
+#ifdef DEBUG
+ mCanRunMessagesSynchronously = true;
+#endif
+ // Remove this graph's driver since it holds a ref. We are still kept
+ // alive by the self-ref. Destroy() must be called to break that cycle if
+ // no tracks are created and destroyed.
+ mDriver = nullptr;
}
MOCK_CONST_METHOD0(OnGraphThread, bool());
@@ -53,6 +63,7 @@ TEST(TestAudioInputProcessing, Buffering)
const uint32_t channels = 1;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
+ RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
@@ -62,19 +73,26 @@ TEST(TestAudioInputProcessing, Buffering)
GraphTime processedTime;
GraphTime nextTime;
AudioSegment output;
+ MediaEnginePrefs settings;
+ settings.mChannels = channels;
+ // pref "media.getusermedia.agc2_forced" defaults to true.
+ // mAgc would need to be set to something other than kAdaptiveAnalog
+ // for mobile, as asserted in AudioInputProcessing::ConfigForPrefs,
+ // if gain_controller1 were used.
+ settings.mAgc2Forced = true;
// Toggle pass-through mode without starting
{
- EXPECT_EQ(aip->PassThrough(graph), false);
- EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
-
- aip->SetPassThrough(graph, true);
+ EXPECT_EQ(aip->IsPassThrough(graph), true);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
- aip->SetPassThrough(graph, false);
+ settings.mAgcOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->IsPassThrough(graph), false);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
- aip->SetPassThrough(graph, true);
+ settings.mAgcOn = false;
+ aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
@@ -88,14 +106,15 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
// Set aip to processing/non-pass-through mode
- aip->SetPassThrough(graph, false);
+ settings.mAgcOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
{
// Need (nextTime - processedTime) = 256 - 128 = 128 frames this round.
// aip has not started yet, so output will be filled with silence data
@@ -106,37 +125,35 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
- // aip has been started and set to processing mode so it will insert 80 frames
- // into aip's internal buffer as pre-buffering.
+ // aip has been set to processing mode and is started.
aip->Start(graph);
{
// Need (nextTime - processedTime) = 256 - 256 = 0 frames this round.
- // The Process() aip will take 0 frames from input, packetize and process
- // these frames into 0 80-frame packet(0 frames left in packetizer), insert
- // packets into aip's internal buffer, then move 0 frames the internal
- // buffer to output, leaving 80 + 0 - 0 = 80 frames in aip's internal
- // buffer.
+ // Process() will return early on 0 frames of input.
+ // Pre-buffering is not triggered.
processedTime = nextTime;
nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(3 * frames);
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
- EXPECT_EQ(aip->NumBufferedFrames(graph), 80);
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
{
// Need (nextTime - processedTime) = 384 - 256 = 128 frames this round.
- // The Process() aip will take 128 frames from input, packetize and process
+ // On receipt of the these first frames, aip will insert 80 frames
+ // into its internal buffer as pre-buffering.
+ // Process() will take 128 frames from input, packetize and process
// these frames into floor(128/80) = 1 80-frame packet (48 frames left in
// packetizer), insert packets into aip's internal buffer, then move 128
// frames the internal buffer to output, leaving 80 + 80 - 128 = 32 frames
@@ -147,7 +164,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
@@ -161,7 +178,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
@@ -180,13 +197,15 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 64);
}
- aip->SetPassThrough(graph, true);
+ // Set aip to pass-through mode
+ settings.mAgcOn = false;
+ aip->ApplySettings(graph, nullptr, settings);
{
// Need (nextTime - processedTime) = 512 - 512 = 0 frames this round.
// No buffering in pass-through mode
@@ -196,14 +215,14 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), processedTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
aip->Stop(graph);
- graph->Destroy();
+ track->Destroy();
}
TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
@@ -212,6 +231,7 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
const uint32_t channels = 2;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
+ RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
AudioGenerator<AudioDataValue> generator(channels, rate);
@@ -271,26 +291,30 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
};
// Check the principals in audio-processing mode.
- EXPECT_EQ(aip->PassThrough(graph), false);
+ MediaEnginePrefs settings;
+ settings.mChannels = channels;
+ settings.mAgcOn = true;
+ settings.mAgc2Forced = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->IsPassThrough(graph), false);
aip->Start(graph);
{
- EXPECT_EQ(aip->NumBufferedFrames(graph), 480);
AudioSegment output;
{
- // Trim the prebuffering silence.
-
AudioSegment data;
- aip->Process(graph, 0, 4800, &input, &data);
+ aip->Process(track, 0, 4800, &input, &data);
EXPECT_EQ(input.GetDuration(), 4800);
EXPECT_EQ(data.GetDuration(), 4800);
+ // Extract another 480 frames to account for delay from pre-buffering.
+ EXPECT_EQ(aip->NumBufferedFrames(graph), 480);
AudioSegment dummy;
dummy.AppendNullData(480);
- aip->Process(graph, 0, 480, &dummy, &data);
+ aip->Process(track, 0, 480, &dummy, &data);
EXPECT_EQ(dummy.GetDuration(), 480);
EXPECT_EQ(data.GetDuration(), 480 + 4800);
- // Ignore the pre-buffering data
+ // Ignore the pre-buffering silence.
output.AppendSlice(data, 480, 480 + 4800);
}
@@ -298,10 +322,12 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
}
// Check the principals in pass-through mode.
- aip->SetPassThrough(graph, true);
+ settings.mAgcOn = false;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->IsPassThrough(graph), true);
{
AudioSegment output;
- aip->Process(graph, 0, 4800, &input, &output);
+ aip->Process(track, 0, 4800, &input, &output);
EXPECT_EQ(input.GetDuration(), 4800);
EXPECT_EQ(output.GetDuration(), 4800);
@@ -309,7 +335,7 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
}
aip->Stop(graph);
- graph->Destroy();
+ track->Destroy();
}
TEST(TestAudioInputProcessing, Downmixing)
@@ -318,6 +344,7 @@ TEST(TestAudioInputProcessing, Downmixing)
const uint32_t channels = 4;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
+ RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
@@ -327,7 +354,12 @@ TEST(TestAudioInputProcessing, Downmixing)
GraphTime processedTime;
GraphTime nextTime;
- aip->SetPassThrough(graph, false);
+ MediaEnginePrefs settings;
+ settings.mChannels = channels;
+ settings.mAgcOn = true;
+ settings.mAgc2Forced = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->IsPassThrough(graph), false);
aip->Start(graph);
processedTime = 0;
@@ -347,7 +379,7 @@ TEST(TestAudioInputProcessing, Downmixing)
// downmix to mono, scaling the input by 1/4 in the process.
// We can't compare the input and output signal because the sine is going to
// be mangledui
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(output.MaxChannelCount(), 1u);
@@ -367,15 +399,18 @@ TEST(TestAudioInputProcessing, Downmixing)
}
}
- // Now, repeat the test, checking we get the unmodified 4 channels.
- aip->SetPassThrough(graph, true);
+ // Now, repeat the test in pass-through mode, checking we get the unmodified
+ // 4 channels.
+ settings.mAgcOn = false;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->IsPassThrough(graph), true);
AudioSegment input, output;
processedTime = nextTime;
nextTime += MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
generator.Generate(input, nextTime - processedTime);
- aip->Process(graph, processedTime, nextTime, &input, &output);
+ aip->Process(track, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime - processedTime);
// This time, no downmix: 4 channels of input, 4 channels of output
@@ -391,5 +426,5 @@ TEST(TestAudioInputProcessing, Downmixing)
}
aip->Stop(graph);
- graph->Destroy();
+ track->Destroy();
}
diff --git a/dom/media/gtest/TestAudioPacketizer.cpp b/dom/media/gtest/TestAudioPacketizer.cpp
index 96a2d6f08c..2c9f86bb14 100644
--- a/dom/media/gtest/TestAudioPacketizer.cpp
+++ b/dom/media/gtest/TestAudioPacketizer.cpp
@@ -7,6 +7,7 @@
#include <math.h>
#include <memory>
#include "../AudioPacketizer.h"
+#include "../TimedPacketizer.h"
#include "gtest/gtest.h"
using namespace mozilla;
@@ -25,16 +26,15 @@ class AutoBuffer {
int16_t Sequence(int16_t* aBuffer, uint32_t aSize, uint32_t aStart = 0) {
uint32_t i;
for (i = 0; i < aSize; i++) {
- aBuffer[i] = aStart + i;
+ aBuffer[i] = (aStart + i) % INT16_MAX;
}
return aStart + i;
}
-void IsSequence(std::unique_ptr<int16_t[]> aBuffer, uint32_t aSize,
- uint32_t aStart = 0) {
+void IsSequence(int16_t* aBuffer, uint32_t aSize, uint32_t aStart = 0) {
for (uint32_t i = 0; i < aSize; i++) {
- ASSERT_TRUE(aBuffer[i] == static_cast<int64_t>(aStart + i))
- << "Buffer is not a sequence at offset " << i << '\n';
+ ASSERT_EQ(aBuffer[i], static_cast<int64_t>((aStart + i) % INT16_MAX))
+ << "Buffer is not a sequence at offset " << i << '\n';
}
// Buffer is a sequence.
}
@@ -70,7 +70,7 @@ TEST(AudioPacketizer, Test)
seqEnd = Sequence(b.Get(), channels * 441, prevEnd);
ap.Input(b.Get(), 441);
std::unique_ptr<int16_t[]> out(ap.Output());
- IsSequence(std::move(out), 441 * channels, prevEnd);
+ IsSequence(out.get(), 441 * channels, prevEnd);
}
}
// Simple test, with input/output buffer size aligned on the packet size,
@@ -89,8 +89,8 @@ TEST(AudioPacketizer, Test)
ap.Input(b1.Get(), 441);
std::unique_ptr<int16_t[]> out(ap.Output());
std::unique_ptr<int16_t[]> out2(ap.Output());
- IsSequence(std::move(out), 441 * channels, prevEnd0);
- IsSequence(std::move(out2), 441 * channels, prevEnd1);
+ IsSequence(out.get(), 441 * channels, prevEnd0);
+ IsSequence(out2.get(), 441 * channels, prevEnd1);
}
}
// Input/output buffer size not aligned on the packet size,
@@ -108,9 +108,9 @@ TEST(AudioPacketizer, Test)
ap.Input(b1.Get(), 480);
std::unique_ptr<int16_t[]> out(ap.Output());
std::unique_ptr<int16_t[]> out2(ap.Output());
- IsSequence(std::move(out), 441 * channels, prevEnd);
+ IsSequence(out.get(), 441 * channels, prevEnd);
prevEnd += 441 * channels;
- IsSequence(std::move(out2), 441 * channels, prevEnd);
+ IsSequence(out2.get(), 441 * channels, prevEnd);
prevEnd += 441 * channels;
}
printf("Available: %d\n", ap.PacketsAvailable());
@@ -161,3 +161,34 @@ TEST(AudioPacketizer, Test)
}
}
}
+
+TEST(TimedPacketizer, Test)
+{
+ const int channels = 2;
+ const int64_t rate = 48000;
+ const int64_t inputPacketSize = 240;
+ const int64_t packetSize = 96;
+ TimedPacketizer<int16_t, int16_t> tp(packetSize, channels, 0, rate);
+ int16_t prevEnd = 0;
+ int16_t prevSeq = 0;
+ nsTArray<int16_t> packet;
+ uint64_t tsCheck = 0;
+ packet.SetLength(tp.PacketSize() * channels);
+ for (int16_t i = 0; i < 10; i++) {
+ AutoBuffer<int16_t> b(inputPacketSize * channels);
+ prevSeq = Sequence(b.Get(), inputPacketSize * channels, prevSeq);
+ tp.Input(b.Get(), inputPacketSize);
+ while (tp.PacketsAvailable()) {
+ media::TimeUnit ts = tp.Output(packet.Elements());
+ IsSequence(packet.Elements(), packetSize * channels, prevEnd);
+ EXPECT_EQ(ts, media::TimeUnit(tsCheck, rate));
+ prevEnd += packetSize * channels;
+ tsCheck += packetSize;
+ }
+ }
+ EXPECT_TRUE(!tp.PacketsAvailable());
+ uint32_t drained;
+ media::TimeUnit ts = tp.Drain(packet.Elements(), drained);
+ EXPECT_EQ(ts, media::TimeUnit(tsCheck, rate));
+ EXPECT_LE(drained, packetSize);
+}
diff --git a/dom/media/gtest/TestAudioTrackGraph.cpp b/dom/media/gtest/TestAudioTrackGraph.cpp
index 1bd255bed1..7be1224ab9 100644
--- a/dom/media/gtest/TestAudioTrackGraph.cpp
+++ b/dom/media/gtest/TestAudioTrackGraph.cpp
@@ -59,39 +59,27 @@ struct StopInputProcessing : public ControlMessage {
void Run() override { mInputProcessing->Stop(mTrack->Graph()); }
};
-struct SetPassThrough : public ControlMessage {
- const RefPtr<AudioInputProcessing> mInputProcessing;
- const bool mPassThrough;
-
- SetPassThrough(MediaTrack* aTrack, AudioInputProcessing* aInputProcessing,
- bool aPassThrough)
- : ControlMessage(aTrack),
- mInputProcessing(aInputProcessing),
- mPassThrough(aPassThrough) {}
- void Run() override {
- EXPECT_EQ(mInputProcessing->PassThrough(mTrack->Graph()), !mPassThrough);
- mInputProcessing->SetPassThrough(mTrack->Graph(), mPassThrough);
- }
-};
-
-struct SetRequestedInputChannelCount : public ControlMessage {
- const CubebUtils::AudioDeviceID mDeviceId;
- const RefPtr<AudioInputProcessing> mInputProcessing;
- const uint32_t mChannelCount;
+void QueueApplySettings(AudioProcessingTrack* aTrack,
+ AudioInputProcessing* aInputProcessing,
+ const MediaEnginePrefs& aSettings) {
+ aTrack->QueueControlMessageWithNoShutdown(
+ [inputProcessing = RefPtr{aInputProcessing}, aSettings,
+ // If the track is not connected to a device then the particular
+ // AudioDeviceID (nullptr) passed to ReevaluateInputDevice() is not
+ // important.
+ deviceId = aTrack->DeviceId().valueOr(nullptr),
+ graph = aTrack->Graph()] {
+ inputProcessing->ApplySettings(graph, deviceId, aSettings);
+ });
+}
- SetRequestedInputChannelCount(MediaTrack* aTrack,
- CubebUtils::AudioDeviceID aDeviceId,
- AudioInputProcessing* aInputProcessing,
- uint32_t aChannelCount)
- : ControlMessage(aTrack),
- mDeviceId(aDeviceId),
- mInputProcessing(aInputProcessing),
- mChannelCount(aChannelCount) {}
- void Run() override {
- mInputProcessing->SetRequestedInputChannelCount(mTrack->Graph(), mDeviceId,
- mChannelCount);
- }
-};
+void QueueExpectIsPassThrough(AudioProcessingTrack* aTrack,
+ AudioInputProcessing* aInputProcessing) {
+ aTrack->QueueControlMessageWithNoShutdown(
+ [inputProcessing = RefPtr{aInputProcessing}, graph = aTrack->Graph()] {
+ EXPECT_EQ(inputProcessing->IsPassThrough(graph), true);
+ });
+}
#endif // MOZ_WEBRTC
class GoFaster : public ControlMessage {
@@ -557,8 +545,7 @@ class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
} else {
MOZ_ASSERT(mInputs.Length() == 1);
AudioSegment data;
- DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
- aTo);
+ DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
GetData<AudioSegment>()->AppendFrom(&data);
}
};
@@ -620,8 +607,8 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
TestDeviceInputConsumerTrack::Create(graphImpl);
track1->ConnectDeviceInput(device1, listener1.get(), PRINCIPAL_HANDLE_NONE);
- EXPECT_TRUE(track1->ConnectToNativeDevice());
- EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+ EXPECT_TRUE(track1->ConnectedToNativeDevice());
+ EXPECT_FALSE(track1->ConnectedToNonNativeDevice());
auto started =
Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
@@ -637,8 +624,8 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
TestDeviceInputConsumerTrack::Create(graphImpl);
track2->ConnectDeviceInput(device2, listener2.get(), PRINCIPAL_HANDLE_NONE);
- EXPECT_FALSE(track2->ConnectToNativeDevice());
- EXPECT_TRUE(track2->ConnectToNonNativeDevice());
+ EXPECT_FALSE(track2->ConnectedToNativeDevice());
+ EXPECT_TRUE(track2->ConnectedToNonNativeDevice());
RefPtr<SmartMockCubebStream> stream2 = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream2->mHasInput);
EXPECT_FALSE(stream2->mHasOutput);
@@ -852,8 +839,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
track1->ConnectDeviceInput(nativeDevice, listener1.get(),
PRINCIPAL_HANDLE_NONE);
- EXPECT_TRUE(track1->ConnectToNativeDevice());
- EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+ EXPECT_TRUE(track1->ConnectedToNativeDevice());
+ EXPECT_FALSE(track1->ConnectedToNonNativeDevice());
auto started =
Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
nativeStream = WaitFor(cubeb->StreamInitEvent());
@@ -891,8 +878,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
TestDeviceInputConsumerTrack::Create(graphImpl);
track3->ConnectDeviceInput(nonNativeDevice, listener3.get(),
PRINCIPAL_HANDLE_NONE);
- EXPECT_FALSE(track3->ConnectToNativeDevice());
- EXPECT_TRUE(track3->ConnectToNonNativeDevice());
+ EXPECT_FALSE(track3->ConnectedToNativeDevice());
+ EXPECT_TRUE(track3->ConnectedToNonNativeDevice());
RefPtr<SmartMockCubebStream> nonNativeStream =
WaitFor(cubeb->StreamInitEvent());
@@ -1176,8 +1163,7 @@ TEST(TestAudioTrackGraph, ErrorCallback)
auto started = Invoke([&] {
processingTrack = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
- processingTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@@ -1247,8 +1233,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
port = outputTrack->AllocateInputPort(processingTrack);
/* Primary graph: Open Audio Input through SourceMediaTrack */
listener = new AudioInputProcessing(2);
- processingTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@@ -1336,12 +1321,22 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
outputTrack->QueueSetAutoend(false);
outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
port = outputTrack->AllocateInputPort(processingTrack);
- listener = new AudioInputProcessing(2);
+
+ const int32_t channelCount = 2;
+ listener = new AudioInputProcessing(channelCount);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
processingTrack->ConnectDeviceInput(deviceId, listener,
PRINCIPAL_HANDLE_NONE);
+ MediaEnginePrefs settings;
+ settings.mChannels = channelCount;
+ settings.mAgcOn = true; // Turn off pass-through.
+ // AGC1 Mode 0 interferes with AudioVerifier's frequency estimation
+ // through zero-crossing counts.
+ settings.mAgc2Forced = true;
+ QueueApplySettings(processingTrack, listener, settings);
+
return graph->NotifyWhenDeviceStarted(nullptr);
});
@@ -1494,8 +1489,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
port = outputTrack->AllocateInputPort(processingTrack);
/* Primary graph: Open Audio Input through SourceMediaTrack */
listener = new AudioInputProcessing(2);
- processingTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->SetInputProcessing(listener);
processingTrack->ConnectDeviceInput(deviceId, listener,
PRINCIPAL_HANDLE_NONE);
@@ -1511,32 +1505,40 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
stream->SetOutputRecordingEnabled(true);
// Wait for a second worth of audio data.
- uint32_t totalFrames = 0;
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- totalFrames += aFrames;
- return totalFrames > static_cast<uint32_t>(graph->GraphRate());
- });
+ uint64_t targetPosition = graph->GraphRate();
+ auto AdvanceToTargetPosition = [&] {
+ DispatchFunction([&] {
+ processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
+ });
+ WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
+ // Position() gives a more up-to-date indication than summing aFrames if
+ // multiple events are queued.
+ if (stream->Position() < targetPosition) {
+ return false;
+ }
+ cubeb->DontGoFaster();
+ return true;
+ });
+ };
+ AdvanceToTargetPosition();
const uint32_t ITERATION_COUNT = 5;
uint32_t iterations = ITERATION_COUNT;
- DisabledTrackMode currentMode = DisabledTrackMode::SILENCE_BLACK;
+ DisabledTrackMode nextMode = DisabledTrackMode::SILENCE_BLACK;
while (iterations--) {
// toggle the track enabled mode, wait a second, do this ITERATION_COUNT
// times
DispatchFunction([&] {
- processingTrack->SetDisabledTrackMode(currentMode);
- if (currentMode == DisabledTrackMode::SILENCE_BLACK) {
- currentMode = DisabledTrackMode::ENABLED;
+ processingTrack->SetDisabledTrackMode(nextMode);
+ if (nextMode == DisabledTrackMode::SILENCE_BLACK) {
+ nextMode = DisabledTrackMode::ENABLED;
} else {
- currentMode = DisabledTrackMode::SILENCE_BLACK;
+ nextMode = DisabledTrackMode::SILENCE_BLACK;
}
});
- totalFrames = 0;
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- totalFrames += aFrames;
- return totalFrames > static_cast<uint32_t>(graph->GraphRate());
- });
+ targetPosition += graph->GraphRate();
+ AdvanceToTargetPosition();
}
// Clean up.
@@ -1595,8 +1597,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(2);
track1->SetInputProcessing(listener1);
- track1->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track1, listener1, true));
+ QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
@@ -1617,8 +1618,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(1);
track2->SetInputProcessing(listener2);
- track2->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track2, listener2, true));
+ QueueExpectIsPassThrough(track2, listener2);
track2->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track2, listener2));
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
@@ -1635,7 +1635,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack> aTrack,
const RefPtr<AudioInputProcessing>& aListener,
RefPtr<SmartMockCubebStream>& aStream,
- uint32_t aChannelCount) {
+ int32_t aChannelCount) {
bool destroyed = false;
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
AbstractThread::GetCurrent(),
@@ -1650,11 +1650,9 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
newStream = aCreated;
});
- DispatchFunction([&] {
- aTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetRequestedInputChannelCount>(aTrack, *aTrack->DeviceId(),
- aListener, aChannelCount));
- });
+ MediaEnginePrefs settings;
+ settings.mChannels = aChannelCount;
+ QueueApplySettings(aTrack, aListener, settings);
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
"TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)"_ns,
@@ -1726,14 +1724,12 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack>& aTrack,
const RefPtr<AudioInputProcessing>& aListener,
RefPtr<SmartMockCubebStream>& aStream,
- uint32_t aChannelCount) {
+ int32_t aChannelCount) {
ASSERT_TRUE(!!aTrack);
ASSERT_TRUE(!!aListener);
ASSERT_TRUE(!!aStream);
ASSERT_TRUE(aStream->mHasInput);
- ASSERT_NE(aChannelCount, 0U);
-
- const CubebUtils::AudioDeviceID device = *aTrack->DeviceId();
+ ASSERT_NE(aChannelCount, 0);
bool destroyed = false;
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
@@ -1749,11 +1745,9 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
newStream = aCreated;
});
- DispatchFunction([&] {
- aTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetRequestedInputChannelCount>(aTrack, device, aListener,
- aChannelCount));
- });
+ MediaEnginePrefs settings;
+ settings.mChannels = aChannelCount;
+ QueueApplySettings(aTrack, aListener, settings);
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
"TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged) #1"_ns,
@@ -1794,8 +1788,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
aTrack = AudioProcessingTrack::Create(graph);
aListener = new AudioInputProcessing(aChannelCount);
aTrack->SetInputProcessing(aListener);
- aTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(aTrack, aListener, true));
+ QueueExpectIsPassThrough(aTrack, aListener);
aTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(aTrack, aListener));
@@ -1829,8 +1822,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
track1 = AudioProcessingTrack::Create(graph);
listener1 = new AudioInputProcessing(1);
track1->SetInputProcessing(listener1);
- track1->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track1, listener1, true));
+ QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(nativeDevice, listener1, PRINCIPAL_HANDLE_NONE);
@@ -1873,8 +1865,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
track3->SetInputProcessing(listener3);
- track3->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track3, listener3, true));
+ QueueExpectIsPassThrough(track3, listener3);
track3->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track3, listener3));
track3->ConnectDeviceInput(nonNativeDevice, listener3,
@@ -1992,12 +1983,13 @@ TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
- track->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track, listener, true));
+ QueueExpectIsPassThrough(track, listener);
track->SetInputProcessing(listener);
- track->GraphImpl()->AppendMessage(
- MakeUnique<SetRequestedInputChannelCount>(track, deviceId, listener,
- 1));
+
+ MediaEnginePrefs settings;
+ settings.mChannels = 1;
+ QueueApplySettings(track, listener, settings);
+
track->GraphImpl()->AppendMessage(
MakeUnique<GuardMessage>(track, std::move(h)));
});
@@ -2058,8 +2050,7 @@ TEST(TestAudioTrackGraph, StartAudioDeviceBeforeStartingAudioProcessing)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
- track->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track, listener, true));
+ QueueExpectIsPassThrough(track, listener);
track->SetInputProcessing(listener);
// Start audio device without starting audio processing.
track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
@@ -2124,8 +2115,7 @@ TEST(TestAudioTrackGraph, StopAudioProcessingBeforeStoppingAudioDevice)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
- track->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track, listener, true));
+ QueueExpectIsPassThrough(track, listener);
track->SetInputProcessing(listener);
track->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track, listener));
@@ -2260,8 +2250,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(1);
track1->SetInputProcessing(listener1);
- track1->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track1, listener1, true));
+ QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
@@ -2284,8 +2273,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(2);
track2->SetInputProcessing(listener2);
- track2->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track2, listener2, true));
+ QueueExpectIsPassThrough(track2, listener2);
track2->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track2, listener2));
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
@@ -2304,8 +2292,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
track3->SetInputProcessing(listener3);
- track3->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(track3, listener3, true));
+ QueueExpectIsPassThrough(track3, listener3);
track3->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track3, listener3));
track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
@@ -2410,8 +2397,7 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
/* Primary graph: Create input track and open it */
processingTrack = AudioProcessingTrack::Create(primary);
listener = new AudioInputProcessing(2);
- processingTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@@ -2632,8 +2618,7 @@ TEST(TestAudioTrackGraph, SecondaryOutputDevice)
/* Create an input track and connect it to a device */
processingTrack = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
- processingTrack->GraphImpl()->AppendMessage(
- MakeUnique<SetPassThrough>(processingTrack, listener, true));
+ QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@@ -2719,6 +2704,116 @@ TEST(TestAudioTrackGraph, SecondaryOutputDevice)
});
WaitFor(primaryStream->OutputVerificationEvent());
}
+
+// Test when AudioInputProcessing expects clock drift
+TEST(TestAudioInputProcessing, ClockDriftExpectation)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const TrackRate rate = 44100;
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER,
+ /*Window ID*/ 1, rate, nullptr, GetMainThreadSerialEventTarget());
+
+ auto createInputProcessing =
+ [&](CubebUtils::AudioDeviceID aDeviceID,
+ RefPtr<AudioProcessingTrack>* aProcessingTrack,
+ RefPtr<AudioInputProcessing>* aInputProcessing) {
+ /* Create an input track and connect it to a device */
+ const int32_t channelCount = 2;
+ RefPtr processingTrack = AudioProcessingTrack::Create(graph);
+ RefPtr inputProcessing = new AudioInputProcessing(channelCount);
+ processingTrack->SetInputProcessing(inputProcessing);
+ MediaEnginePrefs settings;
+ settings.mChannels = channelCount;
+ settings.mAecOn = true;
+ QueueApplySettings(processingTrack, inputProcessing, settings);
+ processingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StartInputProcessing>(processingTrack, inputProcessing));
+ processingTrack->ConnectDeviceInput(aDeviceID, inputProcessing,
+ PRINCIPAL_HANDLE_NONE);
+ aProcessingTrack->swap(processingTrack);
+ aInputProcessing->swap(inputProcessing);
+ };
+
+ // Native input, which uses a duplex stream
+ RefPtr<AudioProcessingTrack> processingTrack1;
+ RefPtr<AudioInputProcessing> inputProcessing1;
+ DispatchFunction([&] {
+ createInputProcessing(nullptr, &processingTrack1, &inputProcessing1);
+ });
+ // Non-native input
+ const auto* nonNativeInputDeviceID = CubebUtils::AudioDeviceID(1);
+ RefPtr<AudioProcessingTrack> processingTrack2;
+ RefPtr<AudioInputProcessing> inputProcessing2;
+ DispatchFunction([&] {
+ createInputProcessing(nonNativeInputDeviceID, &processingTrack2,
+ &inputProcessing2);
+ processingTrack2->AddAudioOutput(nullptr, nullptr, rate);
+ });
+
+ RefPtr<SmartMockCubebStream> primaryStream;
+ RefPtr<SmartMockCubebStream> nonNativeInputStream;
+ WaitUntil(cubeb->StreamInitEvent(),
+ [&](RefPtr<SmartMockCubebStream>&& stream) {
+ if (stream->OutputChannels() > 0) {
+ primaryStream = std::move(stream);
+ return false;
+ }
+ nonNativeInputStream = std::move(stream);
+ return true;
+ });
+ EXPECT_EQ(nonNativeInputStream->GetInputDeviceID(), nonNativeInputDeviceID);
+
+ // Wait until non-native input signal reaches the output, when input
+ // processing has run and so has been configured.
+ WaitFor(primaryStream->FramesVerifiedEvent());
+
+ const void* secondaryOutputDeviceID = CubebUtils::AudioDeviceID(2);
+ DispatchFunction([&] {
+ // Check input processing config with output to primary device.
+ processingTrack1->QueueControlMessageWithNoShutdown([&] {
+ EXPECT_FALSE(inputProcessing1->HadAECAndDrift());
+ EXPECT_TRUE(inputProcessing2->HadAECAndDrift());
+ });
+
+ // Switch output to a secondary device.
+ processingTrack2->RemoveAudioOutput(nullptr);
+ processingTrack2->AddAudioOutput(nullptr, secondaryOutputDeviceID, rate);
+ });
+
+ RefPtr<SmartMockCubebStream> secondaryOutputStream =
+ WaitFor(cubeb->StreamInitEvent());
+ EXPECT_EQ(secondaryOutputStream->GetOutputDeviceID(),
+ secondaryOutputDeviceID);
+
+ WaitFor(secondaryOutputStream->FramesVerifiedEvent());
+ DispatchFunction([&] {
+ // Check input processing config with output to secondary device.
+ processingTrack1->QueueControlMessageWithNoShutdown([&] {
+ EXPECT_TRUE(inputProcessing1->HadAECAndDrift());
+ EXPECT_TRUE(inputProcessing2->HadAECAndDrift());
+ });
+ });
+
+ auto destroyInputProcessing = [&](AudioProcessingTrack* aProcessingTrack,
+ AudioInputProcessing* aInputProcessing) {
+ aProcessingTrack->GraphImpl()->AppendMessage(
+ MakeUnique<StopInputProcessing>(aProcessingTrack, aInputProcessing));
+ aProcessingTrack->DisconnectDeviceInput();
+ aProcessingTrack->Destroy();
+ };
+
+ DispatchFunction([&] {
+ // Clean up
+ destroyInputProcessing(processingTrack1, inputProcessing1);
+ destroyInputProcessing(processingTrack2, inputProcessing2);
+ });
+ // Wait for stream stop to ensure that expectations have been checked.
+ WaitFor(nonNativeInputStream->OutputVerificationEvent());
+}
#endif // MOZ_WEBRTC
#undef Invoke
diff --git a/dom/media/gtest/TestDeviceInputTrack.cpp b/dom/media/gtest/TestDeviceInputTrack.cpp
index 6eb8c08774..14b5227f9d 100644
--- a/dom/media/gtest/TestDeviceInputTrack.cpp
+++ b/dom/media/gtest/TestDeviceInputTrack.cpp
@@ -150,16 +150,16 @@ TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
RefPtr<TestDeviceInputConsumerTrack> track1 =
TestDeviceInputConsumerTrack::Create(mGraph);
track1->ConnectDeviceInput(device1, listener1.get(), testPrincipal);
- EXPECT_TRUE(track1->ConnectToNativeDevice());
- EXPECT_FALSE(track1->ConnectToNonNativeDevice());
+ EXPECT_TRUE(track1->ConnectedToNativeDevice());
+ EXPECT_FALSE(track1->ConnectedToNonNativeDevice());
const CubebUtils::AudioDeviceID device2 = (void*)2;
RefPtr<TestAudioDataListener> listener2 = new TestAudioDataListener(2, false);
RefPtr<TestDeviceInputConsumerTrack> track2 =
TestDeviceInputConsumerTrack::Create(mGraph);
track2->ConnectDeviceInput(device2, listener2.get(), testPrincipal);
- EXPECT_FALSE(track2->ConnectToNativeDevice());
- EXPECT_TRUE(track2->ConnectToNonNativeDevice());
+ EXPECT_FALSE(track2->ConnectedToNativeDevice());
+ EXPECT_TRUE(track2->ConnectedToNonNativeDevice());
track2->Destroy();
mGraph->RemoveTrackGraphThread(track2);
diff --git a/dom/media/gtest/TestMediaDataEncoder.cpp b/dom/media/gtest/TestMediaDataEncoder.cpp
index 27a6b7cd07..39c92fb19c 100644
--- a/dom/media/gtest/TestMediaDataEncoder.cpp
+++ b/dom/media/gtest/TestMediaDataEncoder.cpp
@@ -33,7 +33,7 @@
#define FRAME_RATE 30
#define FRAME_DURATION (1000000 / FRAME_RATE)
#define BIT_RATE (1000 * 1000) // 1Mbps
-#define BIT_RATE_MODE MediaDataEncoder::BitrateMode::Variable
+#define BIT_RATE_MODE BitrateMode::Variable
#define KEYFRAME_INTERVAL FRAME_RATE // 1 keyframe per second
using namespace mozilla;
@@ -156,9 +156,8 @@ class MediaDataEncoderTest : public testing::Test {
template <typename T>
already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
- CodecType aCodec, MediaDataEncoder::Usage aUsage,
- MediaDataEncoder::PixelFormat aPixelFormat, int32_t aWidth, int32_t aHeight,
- MediaDataEncoder::ScalabilityMode aScalabilityMode,
+ CodecType aCodec, Usage aUsage, dom::ImageBitmapFormat aPixelFormat,
+ int32_t aWidth, int32_t aHeight, ScalabilityMode aScalabilityMode,
const Maybe<T>& aSpecific) {
RefPtr<PEMFactory> f(new PEMFactory());
@@ -171,13 +170,7 @@ already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
"TestMediaDataEncoder"));
RefPtr<MediaDataEncoder> e;
-#ifdef MOZ_WIDGET_ANDROID
- const MediaDataEncoder::HardwarePreference pref =
- MediaDataEncoder::HardwarePreference::None;
-#else
- const MediaDataEncoder::HardwarePreference pref =
- MediaDataEncoder::HardwarePreference::None;
-#endif
+ const HardwarePreference pref = HardwarePreference::None;
e = f->CreateEncoder(
EncoderConfig(aCodec, gfx::IntSize{aWidth, aHeight}, aUsage, aPixelFormat,
aPixelFormat, FRAME_RATE /* FPS */,
@@ -190,12 +183,10 @@ already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
}
static already_AddRefed<MediaDataEncoder> CreateH264Encoder(
- MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat aPixelFormat =
- MediaDataEncoder::PixelFormat::YUV420P,
+ Usage aUsage = Usage::Realtime,
+ dom::ImageBitmapFormat aPixelFormat = dom::ImageBitmapFormat::YUV420P,
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
- MediaDataEncoder::ScalabilityMode aScalabilityMode =
- MediaDataEncoder::ScalabilityMode::None,
+ ScalabilityMode aScalabilityMode = ScalabilityMode::None,
const Maybe<H264Specific>& aSpecific = Some(kH264SpecificAnnexB)) {
return CreateVideoEncoder(CodecType::H264, aUsage, aPixelFormat, aWidth,
aHeight, aScalabilityMode, aSpecific);
@@ -234,10 +225,7 @@ static bool EnsureInit(const RefPtr<MediaDataEncoder>& aEncoder) {
bool succeeded;
media::Await(
GetMediaThreadPool(MediaThreadType::SUPERVISOR), aEncoder->Init(),
- [&succeeded](TrackInfo::TrackType t) {
- EXPECT_EQ(TrackInfo::TrackType::kVideoTrack, t);
- succeeded = true;
- },
+ [&succeeded](bool) { succeeded = true; },
[&succeeded](const MediaResult& r) { succeeded = false; });
return succeeded;
}
@@ -246,9 +234,8 @@ TEST_F(MediaDataEncoderTest, H264Inits) {
RUN_IF_SUPPORTED(CodecType::H264, []() {
// w/o codec specific: should fail for h264.
RefPtr<MediaDataEncoder> e =
- CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::None, Nothing());
+ CreateH264Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::None, Nothing());
EXPECT_FALSE(e);
// w/ codec specific
@@ -319,9 +306,8 @@ TEST_F(MediaDataEncoderTest, H264Encodes) {
WaitForShutdown(e);
// Encode one frame and output in avcC format.
- e = CreateH264Encoder(MediaDataEncoder::Usage::Record,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::None,
+ e = CreateH264Encoder(Usage::Record, dom::ImageBitmapFormat::YUV420P, WIDTH,
+ HEIGHT, ScalabilityMode::None,
Some(kH264SpecificAVCC));
EnsureInit(e);
output = Encode(e, NUM_FRAMES, mData);
@@ -349,22 +335,19 @@ TEST_F(MediaDataEncoderTest, H264Duration) {
TEST_F(MediaDataEncoderTest, InvalidSize) {
RUN_IF_SUPPORTED(CodecType::H264, []() {
- RefPtr<MediaDataEncoder> e0x0 = CreateH264Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, 0, 0,
- MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ RefPtr<MediaDataEncoder> e0x0 =
+ CreateH264Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P, 0,
+ 0, ScalabilityMode::None, Some(kH264SpecificAnnexB));
EXPECT_EQ(e0x0, nullptr);
- RefPtr<MediaDataEncoder> e0x1 = CreateH264Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, 0, 1,
- MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ RefPtr<MediaDataEncoder> e0x1 =
+ CreateH264Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P, 0,
+ 1, ScalabilityMode::None, Some(kH264SpecificAnnexB));
EXPECT_EQ(e0x1, nullptr);
- RefPtr<MediaDataEncoder> e1x0 = CreateH264Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, 1, 0,
- MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ RefPtr<MediaDataEncoder> e1x0 =
+ CreateH264Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P, 1,
+ 0, ScalabilityMode::None, Some(kH264SpecificAnnexB));
EXPECT_EQ(e1x0, nullptr);
});
}
@@ -372,10 +355,9 @@ TEST_F(MediaDataEncoderTest, InvalidSize) {
#ifdef MOZ_WIDGET_ANDROID
TEST_F(MediaDataEncoderTest, AndroidNotSupportedSize) {
RUN_IF_SUPPORTED(CodecType::H264, []() {
- RefPtr<MediaDataEncoder> e = CreateH264Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, 1, 1,
- MediaDataEncoder::ScalabilityMode::None, Some(kH264SpecificAnnexB));
+ RefPtr<MediaDataEncoder> e =
+ CreateH264Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P, 1,
+ 1, ScalabilityMode::None, Some(kH264SpecificAnnexB));
EXPECT_NE(e, nullptr);
EXPECT_FALSE(EnsureInit(e));
});
@@ -387,9 +369,8 @@ TEST_F(MediaDataEncoderTest, H264AVCC) {
RUN_IF_SUPPORTED(CodecType::H264, [this]() {
// Encod frames in avcC format.
RefPtr<MediaDataEncoder> e = CreateH264Encoder(
- MediaDataEncoder::Usage::Record, MediaDataEncoder::PixelFormat::YUV420P,
- WIDTH, HEIGHT, MediaDataEncoder::ScalabilityMode::None,
- Some(kH264SpecificAVCC));
+ Usage::Record, dom::ImageBitmapFormat::YUV420P, WIDTH, HEIGHT,
+ ScalabilityMode::None, Some(kH264SpecificAVCC));
EnsureInit(e);
MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData);
EXPECT_EQ(output.Length(), NUM_FRAMES);
@@ -412,24 +393,20 @@ TEST_F(MediaDataEncoderTest, H264AVCC) {
#endif
static already_AddRefed<MediaDataEncoder> CreateVP8Encoder(
- MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat aPixelFormat =
- MediaDataEncoder::PixelFormat::YUV420P,
+ Usage aUsage = Usage::Realtime,
+ dom::ImageBitmapFormat aPixelFormat = dom::ImageBitmapFormat::YUV420P,
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
- MediaDataEncoder::ScalabilityMode aScalabilityMode =
- MediaDataEncoder::ScalabilityMode::None,
+ ScalabilityMode aScalabilityMode = ScalabilityMode::None,
const Maybe<VP8Specific>& aSpecific = Some(VP8Specific())) {
return CreateVideoEncoder(CodecType::VP8, aUsage, aPixelFormat, aWidth,
aHeight, aScalabilityMode, aSpecific);
}
static already_AddRefed<MediaDataEncoder> CreateVP9Encoder(
- MediaDataEncoder::Usage aUsage = MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat aPixelFormat =
- MediaDataEncoder::PixelFormat::YUV420P,
+ Usage aUsage = Usage::Realtime,
+ dom::ImageBitmapFormat aPixelFormat = dom::ImageBitmapFormat::YUV420P,
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
- MediaDataEncoder::ScalabilityMode aScalabilityMode =
- MediaDataEncoder::ScalabilityMode::None,
+ ScalabilityMode aScalabilityMode = ScalabilityMode::None,
const Maybe<VP9Specific>& aSpecific = Some(VP9Specific())) {
return CreateVideoEncoder(CodecType::VP9, aUsage, aPixelFormat, aWidth,
aHeight, aScalabilityMode, aSpecific);
@@ -447,9 +424,8 @@ TEST_F(MediaDataEncoderTest, VP8Inits) {
RUN_IF_SUPPORTED(CodecType::VP8, []() {
// w/o codec specific.
RefPtr<MediaDataEncoder> e =
- CreateVP8Encoder(MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::None, Nothing());
+ CreateVP8Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::None, Nothing());
EXPECT_TRUE(EnsureInit(e));
WaitForShutdown(e);
@@ -551,10 +527,9 @@ TEST_F(MediaDataEncoderTest, VP8EncodeWithScalabilityModeL1T2) {
false, /* mAutoResize */
false /* mFrameDropping */
);
- RefPtr<MediaDataEncoder> e = CreateVP8Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::L1T2, Some(specific));
+ RefPtr<MediaDataEncoder> e =
+ CreateVP8Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::L1T2, Some(specific));
EnsureInit(e);
const nsTArray<uint8_t> pattern({0, 1});
@@ -580,10 +555,9 @@ TEST_F(MediaDataEncoderTest, VP8EncodeWithScalabilityModeL1T3) {
false, /* mAutoResize */
false /* mFrameDropping */
);
- RefPtr<MediaDataEncoder> e = CreateVP8Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::L1T3, Some(specific));
+ RefPtr<MediaDataEncoder> e =
+ CreateVP8Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::L1T3, Some(specific));
EnsureInit(e);
const nsTArray<uint8_t> pattern({0, 2, 1, 2});
@@ -613,9 +587,8 @@ TEST_F(MediaDataEncoderTest, VP9Inits) {
RUN_IF_SUPPORTED(CodecType::VP9, []() {
// w/o codec specific.
RefPtr<MediaDataEncoder> e =
- CreateVP9Encoder(MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::None, Nothing());
+ CreateVP9Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::None, Nothing());
EXPECT_TRUE(EnsureInit(e));
WaitForShutdown(e);
@@ -719,10 +692,9 @@ TEST_F(MediaDataEncoderTest, VP9EncodeWithScalabilityModeL1T2) {
false /* mFlexible */
);
- RefPtr<MediaDataEncoder> e = CreateVP9Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::L1T2, Some(specific));
+ RefPtr<MediaDataEncoder> e =
+ CreateVP9Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::L1T2, Some(specific));
EnsureInit(e);
const nsTArray<uint8_t> pattern({0, 1});
@@ -751,10 +723,9 @@ TEST_F(MediaDataEncoderTest, VP9EncodeWithScalabilityModeL1T3) {
false /* mFlexible */
);
- RefPtr<MediaDataEncoder> e = CreateVP9Encoder(
- MediaDataEncoder::Usage::Realtime,
- MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT,
- MediaDataEncoder::ScalabilityMode::L1T3, Some(specific));
+ RefPtr<MediaDataEncoder> e =
+ CreateVP9Encoder(Usage::Realtime, dom::ImageBitmapFormat::YUV420P,
+ WIDTH, HEIGHT, ScalabilityMode::L1T3, Some(specific));
EnsureInit(e);
const nsTArray<uint8_t> pattern({0, 2, 1, 2});
diff --git a/dom/media/hls/HLSDecoder.cpp b/dom/media/hls/HLSDecoder.cpp
index 99bf4c0ff6..dbf2339bef 100644
--- a/dom/media/hls/HLSDecoder.cpp
+++ b/dom/media/hls/HLSDecoder.cpp
@@ -18,9 +18,11 @@
#include "mozilla/java/GeckoHLSResourceWrapperNatives.h"
#include "nsContentUtils.h"
#include "nsIChannel.h"
+#include "nsIURL.h"
#include "nsNetUtil.h"
#include "nsThreadUtils.h"
#include "mozilla/dom/HTMLMediaElement.h"
+#include "mozilla/glean/GleanMetrics.h"
#include "mozilla/NullPrincipal.h"
#include "mozilla/StaticPrefs_media.h"
@@ -169,7 +171,8 @@ nsresult HLSDecoder::Load(nsIChannel* aChannel) {
mChannel = aChannel;
nsCString spec;
Unused << mURI->GetSpec(spec);
- ;
+ mUsageRecorded = false;
+
HLSResourceCallbacksSupport::Init();
mJavaCallbacks = java::GeckoHLSResourceWrapper::Callbacks::New();
mCallbackSupport = new HLSResourceCallbacksSupport(this);
@@ -253,13 +256,36 @@ void HLSDecoder::NotifyDataArrived() {
void HLSDecoder::NotifyLoad(nsCString aMediaUrl) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
- UpdateCurrentPrincipal(aMediaUrl);
+
+ nsCOMPtr<nsIURI> uri;
+ nsresult rv = NS_NewURI(getter_AddRefs(uri), aMediaUrl.Data());
+ NS_ENSURE_SUCCESS_VOID(rv);
+
+ RecordMediaUsage(uri);
+ UpdateCurrentPrincipal(uri);
+}
+
+void HLSDecoder::RecordMediaUsage(nsIURI* aMediaUri) {
+ if (mUsageRecorded) {
+ return;
+ }
+
+ nsresult rv;
+ nsCOMPtr<nsIURL> url = do_QueryInterface(aMediaUri, &rv);
+ NS_ENSURE_SUCCESS_VOID(rv);
+
+ // TODO: get hostname. See bug 1887053.
+ nsAutoCString mediaExt;
+ Unused << url->GetFileExtension(mediaExt);
+ glean::hls::MediaLoadExtra extra = {.mediaExtension = Some(mediaExt.get())};
+ glean::hls::media_load.Record(Some(extra));
+ mUsageRecorded = true;
}
// Should be called when the decoder loads media from a URL to ensure the
// principal of the media element is appropriately set for CORS.
-void HLSDecoder::UpdateCurrentPrincipal(nsCString aMediaUrl) {
- nsCOMPtr<nsIPrincipal> principal = GetContentPrincipal(aMediaUrl);
+void HLSDecoder::UpdateCurrentPrincipal(nsIURI* aMediaUri) {
+ nsCOMPtr<nsIPrincipal> principal = GetContentPrincipal(aMediaUri);
MOZ_DIAGNOSTIC_ASSERT(principal);
// Check the subsumption of old and new principals. Should be either
@@ -280,12 +306,8 @@ void HLSDecoder::UpdateCurrentPrincipal(nsCString aMediaUrl) {
}
already_AddRefed<nsIPrincipal> HLSDecoder::GetContentPrincipal(
- nsCString aMediaUrl) {
- nsCOMPtr<nsIURI> uri;
- nsresult rv = NS_NewURI(getter_AddRefs(uri), aMediaUrl.Data());
- NS_ENSURE_SUCCESS(rv, nullptr);
+ nsIURI* aMediaUri) {
RefPtr<dom::HTMLMediaElement> element = GetOwner()->GetMediaElement();
- NS_ENSURE_SUCCESS(rv, nullptr);
nsSecurityFlags securityFlags =
element->ShouldCheckAllowOrigin()
? nsILoadInfo::SEC_REQUIRE_CORS_INHERITS_SEC_CONTEXT
@@ -294,9 +316,9 @@ already_AddRefed<nsIPrincipal> HLSDecoder::GetContentPrincipal(
securityFlags |= nsILoadInfo::SEC_COOKIES_INCLUDE;
}
nsCOMPtr<nsIChannel> channel;
- rv = NS_NewChannel(getter_AddRefs(channel), uri,
- static_cast<dom::Element*>(element), securityFlags,
- nsIContentPolicy::TYPE_INTERNAL_VIDEO);
+ nsresult rv = NS_NewChannel(
+ getter_AddRefs(channel), aMediaUri, static_cast<dom::Element*>(element),
+ securityFlags, nsIContentPolicy::TYPE_INTERNAL_VIDEO);
NS_ENSURE_SUCCESS(rv, nullptr);
nsCOMPtr<nsIPrincipal> principal;
nsIScriptSecurityManager* secMan = nsContentUtils::GetSecurityManager();
diff --git a/dom/media/hls/HLSDecoder.h b/dom/media/hls/HLSDecoder.h
index 0f65457765..3624a8c3f4 100644
--- a/dom/media/hls/HLSDecoder.h
+++ b/dom/media/hls/HLSDecoder.h
@@ -47,6 +47,8 @@ class HLSDecoder final : public MediaDecoder {
// Called when Exoplayer start to load media. Main thread only.
void NotifyLoad(nsCString aMediaUrl);
+ bool IsHLSDecoder() const override { return true; }
+
private:
friend class HLSResourceCallbacksSupport;
@@ -61,8 +63,9 @@ class HLSDecoder final : public MediaDecoder {
return true;
}
- void UpdateCurrentPrincipal(nsCString aMediaUrl);
- already_AddRefed<nsIPrincipal> GetContentPrincipal(nsCString aMediaUrl);
+ void UpdateCurrentPrincipal(nsIURI* aMediaUri);
+ already_AddRefed<nsIPrincipal> GetContentPrincipal(nsIURI* aMediaUri);
+ void RecordMediaUsage(nsIURI* aMediaUri);
static size_t sAllocatedInstances; // Access only in the main thread.
@@ -72,6 +75,9 @@ class HLSDecoder final : public MediaDecoder {
java::GeckoHLSResourceWrapper::Callbacks::GlobalRef mJavaCallbacks;
RefPtr<HLSResourceCallbacksSupport> mCallbackSupport;
nsCOMPtr<nsIPrincipal> mContentPrincipal;
+ // There can be multiple media files loaded for one HLS content. Use this flag
+ // to ensure we only record once per content.
+ bool mUsageRecorded;
};
} // namespace mozilla
diff --git a/dom/media/hls/metrics.yaml b/dom/media/hls/metrics.yaml
new file mode 100644
index 0000000000..ea27b358ff
--- /dev/null
+++ b/dom/media/hls/metrics.yaml
@@ -0,0 +1,70 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Adding a new metric? We have docs for that!
+# https://firefox-source-docs.mozilla.org/toolkit/components/glean/user/new_definitions_file.html
+
+---
+$schema: moz://mozilla.org/schemas/glean/metrics/2-0-0
+$tags:
+ - 'Core :: Audio/Video'
+
+hls:
+ canplay_requested:
+ type: counter
+ description: >
+ Record when a page requests canPlayType for a HLS media type.
+ metadata:
+ tags:
+ - 'Core :: Audio/Video: Playback'
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751#8
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - media-alerts@mozilla.com
+ expires: 132
+
+ canplay_supported:
+ type: counter
+ description: >
+ Record when a canPlayType request supports HLS.
+ metadata:
+ tags:
+ - 'Core :: Audio/Video: Playback'
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751#8
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - media-alerts@mozilla.com
+ expires: 132
+
+ media_load:
+ type: event
+ description: >
+ Record the information about the HLS playback on Android using ExoPlayer.
+ The value of this event contains the media format.
+ metadata:
+ tags:
+ - 'Core :: Audio/Video: Playback'
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1672751#8
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - media-alerts@mozilla.com
+ extra_keys:
+ media_extension:
+ description: >
+ The extension in the media file name, could be 'ts' (for MPEG-TS), 'mp4',
+ 'aac', 'mp3', ...
+ type: string
+ expires: 132
diff --git a/dom/media/ipc/MFCDMChild.cpp b/dom/media/ipc/MFCDMChild.cpp
index aedae5bc36..2ba2bdaf4e 100644
--- a/dom/media/ipc/MFCDMChild.cpp
+++ b/dom/media/ipc/MFCDMChild.cpp
@@ -148,7 +148,7 @@ void MFCDMChild::Shutdown() {
}
RefPtr<MFCDMChild::CapabilitiesPromise> MFCDMChild::GetCapabilities(
- bool aIsHWSecured) {
+ MFCDMCapabilitiesRequest&& aRequest) {
MOZ_ASSERT(mManagerThread);
if (mShutdown) {
@@ -160,23 +160,21 @@ RefPtr<MFCDMChild::CapabilitiesPromise> MFCDMChild::GetCapabilities(
return CapabilitiesPromise::CreateAndReject(mState, __func__);
}
- auto doSend = [self = RefPtr{this}, aIsHWSecured, this]() {
- SendGetCapabilities(aIsHWSecured)
- ->Then(
- mManagerThread, __func__,
- [self, this](MFCDMCapabilitiesResult&& aResult) {
- if (aResult.type() == MFCDMCapabilitiesResult::Tnsresult) {
- mCapabilitiesPromiseHolder.RejectIfExists(
- aResult.get_nsresult(), __func__);
- return;
- }
- mCapabilitiesPromiseHolder.ResolveIfExists(
- std::move(aResult.get_MFCDMCapabilitiesIPDL()), __func__);
- },
- [self, this](const mozilla::ipc::ResponseRejectReason& aReason) {
- mCapabilitiesPromiseHolder.RejectIfExists(NS_ERROR_FAILURE,
- __func__);
- });
+ auto doSend = [self = RefPtr{this}, request = std::move(aRequest), this]() {
+ SendGetCapabilities(request)->Then(
+ mManagerThread, __func__,
+ [self, this](MFCDMCapabilitiesResult&& aResult) {
+ if (aResult.type() == MFCDMCapabilitiesResult::Tnsresult) {
+ mCapabilitiesPromiseHolder.RejectIfExists(aResult.get_nsresult(),
+ __func__);
+ return;
+ }
+ mCapabilitiesPromiseHolder.ResolveIfExists(
+ std::move(aResult.get_MFCDMCapabilitiesIPDL()), __func__);
+ },
+ [self, this](const mozilla::ipc::ResponseRejectReason& aReason) {
+ mCapabilitiesPromiseHolder.RejectIfExists(NS_ERROR_FAILURE, __func__);
+ });
};
return InvokeAsync(doSend, __func__, mCapabilitiesPromiseHolder);
diff --git a/dom/media/ipc/MFCDMChild.h b/dom/media/ipc/MFCDMChild.h
index e62f2b7184..3396b0c790 100644
--- a/dom/media/ipc/MFCDMChild.h
+++ b/dom/media/ipc/MFCDMChild.h
@@ -25,7 +25,8 @@ class MFCDMChild final : public PMFCDMChild {
explicit MFCDMChild(const nsAString& aKeySystem);
using CapabilitiesPromise = MozPromise<MFCDMCapabilitiesIPDL, nsresult, true>;
- RefPtr<CapabilitiesPromise> GetCapabilities(bool aIsHWSecured);
+ RefPtr<CapabilitiesPromise> GetCapabilities(
+ MFCDMCapabilitiesRequest&& aRequest);
template <typename PromiseType>
already_AddRefed<PromiseType> InvokeAsync(
diff --git a/dom/media/ipc/MFCDMParent.cpp b/dom/media/ipc/MFCDMParent.cpp
index 2e91048b88..4570fbe838 100644
--- a/dom/media/ipc/MFCDMParent.cpp
+++ b/dom/media/ipc/MFCDMParent.cpp
@@ -5,6 +5,7 @@
#include "MFCDMParent.h"
#include <mfmediaengine.h>
+#include <unknwnbase.h>
#include <wtypes.h>
#define INITGUID // Enable DEFINE_PROPERTYKEY()
#include <propkeydef.h> // For DEFINE_PROPERTYKEY() definition
@@ -92,6 +93,8 @@ StaticMutex sFactoryMutex;
static nsTHashMap<nsStringHashKey, ComPtr<IMFContentDecryptionModuleFactory>>
sFactoryMap;
static CopyableTArray<MFCDMCapabilitiesIPDL> sCapabilities;
+StaticMutex sCapabilitesMutex;
+static ComPtr<IUnknown> sMediaEngineClassFactory;
// RAIIized PROPVARIANT. See
// third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h
@@ -166,6 +169,11 @@ static nsString GetHdcpPolicy(const dom::HDCPVersion& aMinHdcpVersion) {
return nsString(u"hdcp=1");
}
+static bool RequireClearLead(const nsString& aKeySystem) {
+ return aKeySystem.EqualsLiteral(kWidevineExperiment2KeySystemName) ||
+ aKeySystem.EqualsLiteral(kPlayReadyHardwareClearLeadKeySystemName);
+}
+
static void BuildCapabilitiesArray(
const nsTArray<MFCDMMediaCapability>& aCapabilities,
AutoPropVar& capabilitiesPropOut) {
@@ -464,8 +472,10 @@ LPCWSTR MFCDMParent::GetCDMLibraryName(const nsString& aKeySystem) {
/* static */
void MFCDMParent::Shutdown() {
+ StaticMutexAutoLock lock(sCapabilitesMutex);
sFactoryMap.Clear();
sCapabilities.Clear();
+ sMediaEngineClassFactory.Reset();
}
/* static */
@@ -500,10 +510,13 @@ HRESULT MFCDMParent::LoadFactory(
NS_ConvertUTF16toUTF8(aKeySystem).get());
ComPtr<IMFContentDecryptionModuleFactory> cdmFactory;
if (loadFromPlatform) {
+ if (!sMediaEngineClassFactory) {
+ MFCDM_RETURN_IF_FAILED(CoCreateInstance(
+ CLSID_MFMediaEngineClassFactory, nullptr, CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&sMediaEngineClassFactory)));
+ }
ComPtr<IMFMediaEngineClassFactory4> clsFactory;
- MFCDM_RETURN_IF_FAILED(CoCreateInstance(CLSID_MFMediaEngineClassFactory,
- nullptr, CLSCTX_INPROC_SERVER,
- IID_PPV_ARGS(&clsFactory)));
+ MFCDM_RETURN_IF_FAILED(sMediaEngineClassFactory.As(&clsFactory));
MFCDM_RETURN_IF_FAILED(clsFactory->CreateContentDecryptionModuleFactory(
MapKeySystem(aKeySystem).get(), IID_PPV_ARGS(&cdmFactory)));
aFactoryOut.Swap(cdmFactory);
@@ -617,12 +630,8 @@ static bool FactorySupports(ComPtr<IMFContentDecryptionModuleFactory>& aFactory,
// use another way to check the capabilities.
if (IsPlayReadyKeySystemAndSupported(aKeySystem) &&
StaticPrefs::media_eme_playready_istypesupportedex()) {
- ComPtr<IMFMediaEngineClassFactory> spFactory;
ComPtr<IMFExtendedDRMTypeSupport> spDrmTypeSupport;
- MFCDM_RETURN_BOOL_IF_FAILED(
- CoCreateInstance(CLSID_MFMediaEngineClassFactory, NULL,
- CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&spFactory)));
- MFCDM_RETURN_BOOL_IF_FAILED(spFactory.As(&spDrmTypeSupport));
+ MFCDM_RETURN_BOOL_IF_FAILED(sMediaEngineClassFactory.As(&spDrmTypeSupport));
BSTR keySystem = aIsHWSecure
? CreateBSTRFromConstChar(kPlayReadyKeySystemHardware)
: CreateBSTRFromConstChar(kPlayReadyKeySystemName);
@@ -699,46 +708,55 @@ MFCDMParent::GetAllKeySystemsCapabilities() {
new CapabilitiesPromise::Private(__func__);
Unused << backgroundTaskQueue->Dispatch(NS_NewRunnableFunction(__func__, [p] {
MFCDM_PARENT_SLOG("GetAllKeySystemsCapabilities");
- if (sCapabilities.IsEmpty()) {
- enum SecureLevel : bool {
- Software = false,
- Hardware = true,
- };
- const nsTArray<std::pair<nsString, SecureLevel>> kKeySystems{
- std::pair<nsString, SecureLevel>(
- NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
- SecureLevel::Software),
- std::pair<nsString, SecureLevel>(
- NS_ConvertUTF8toUTF16(kPlayReadyKeySystemHardware),
- SecureLevel::Hardware),
- std::pair<nsString, SecureLevel>(
- NS_ConvertUTF8toUTF16(kPlayReadyHardwareClearLeadKeySystemName),
- SecureLevel::Hardware),
- std::pair<nsString, SecureLevel>(
- NS_ConvertUTF8toUTF16(kWidevineExperimentKeySystemName),
- SecureLevel::Hardware),
- std::pair<nsString, SecureLevel>(
- NS_ConvertUTF8toUTF16(kWidevineExperiment2KeySystemName),
- SecureLevel::Hardware),
- };
- for (const auto& keySystem : kKeySystems) {
- // Only check the capabilites if the relative prefs for the key system
- // are ON.
- if (IsPlayReadyKeySystemAndSupported(keySystem.first) ||
- IsWidevineExperimentKeySystemAndSupported(keySystem.first)) {
- MFCDMCapabilitiesIPDL* c = sCapabilities.AppendElement();
- GetCapabilities(keySystem.first, keySystem.second, nullptr, *c);
+ enum SecureLevel : bool {
+ Software = false,
+ Hardware = true,
+ };
+ const nsTArray<std::pair<nsString, SecureLevel>> kKeySystems{
+ std::pair<nsString, SecureLevel>(
+ NS_ConvertUTF8toUTF16(kPlayReadyKeySystemName),
+ SecureLevel::Software),
+ std::pair<nsString, SecureLevel>(
+ NS_ConvertUTF8toUTF16(kPlayReadyKeySystemHardware),
+ SecureLevel::Hardware),
+ std::pair<nsString, SecureLevel>(
+ NS_ConvertUTF8toUTF16(kPlayReadyHardwareClearLeadKeySystemName),
+ SecureLevel::Hardware),
+ std::pair<nsString, SecureLevel>(
+ NS_ConvertUTF8toUTF16(kWidevineExperimentKeySystemName),
+ SecureLevel::Hardware),
+ std::pair<nsString, SecureLevel>(
+ NS_ConvertUTF8toUTF16(kWidevineExperiment2KeySystemName),
+ SecureLevel::Hardware),
+ };
+
+ CopyableTArray<MFCDMCapabilitiesIPDL> capabilitiesArr;
+ for (const auto& keySystem : kKeySystems) {
+ // Only check the capabilites if the relative prefs for the key system
+ // are ON.
+ if (IsPlayReadyKeySystemAndSupported(keySystem.first) ||
+ IsWidevineExperimentKeySystemAndSupported(keySystem.first)) {
+ MFCDMCapabilitiesIPDL* c = capabilitiesArr.AppendElement();
+ CapabilitesFlagSet flags;
+ if (keySystem.second == SecureLevel::Hardware) {
+ flags += CapabilitesFlag::HarewareDecryption;
+ }
+ flags += CapabilitesFlag::NeedHDCPCheck;
+ if (RequireClearLead(keySystem.first)) {
+ flags += CapabilitesFlag::NeedClearLeadCheck;
}
+ GetCapabilities(keySystem.first, flags, nullptr, *c);
}
}
- p->Resolve(sCapabilities, __func__);
+
+ p->Resolve(std::move(capabilitiesArr), __func__);
}));
return p;
}
/* static */
void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
- const bool aIsHWSecure,
+ const CapabilitesFlagSet& aFlags,
IMFContentDecryptionModuleFactory* aFactory,
MFCDMCapabilitiesIPDL& aCapabilitiesOut) {
aCapabilitiesOut.keySystem() = aKeySystem;
@@ -747,9 +765,12 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
aCapabilitiesOut.persistentState() = KeySystemConfig::Requirement::Required;
aCapabilitiesOut.distinctiveID() = KeySystemConfig::Requirement::Required;
+ const bool isHardwareDecryption =
+ aFlags.contains(CapabilitesFlag::HarewareDecryption);
+ aCapabilitiesOut.isHardwareDecryption() = isHardwareDecryption;
// Return empty capabilites for SWDRM on Windows 10 because it has the process
// leaking problem.
- if (!IsWin11OrLater() && !aIsHWSecure) {
+ if (!IsWin11OrLater() && !isHardwareDecryption) {
return;
}
@@ -758,6 +779,30 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
RETURN_VOID_IF_FAILED(GetOrCreateFactory(aKeySystem, factory));
}
+ StaticMutexAutoLock lock(sCapabilitesMutex);
+ for (auto& capabilities : sCapabilities) {
+ if (capabilities.keySystem().Equals(aKeySystem) &&
+ capabilities.isHardwareDecryption() == isHardwareDecryption) {
+ MFCDM_PARENT_SLOG(
+ "Return cached capabilities for %s (hardwareDecryption=%d)",
+ NS_ConvertUTF16toUTF8(aKeySystem).get(), isHardwareDecryption);
+ if (capabilities.isHDCP22Compatible().isNothing() &&
+ aFlags.contains(CapabilitesFlag::NeedHDCPCheck)) {
+ const bool rv = IsHDCPVersionSupported(factory, aKeySystem,
+ dom::HDCPVersion::_2_2) == NS_OK;
+ MFCDM_PARENT_SLOG(
+ "Check HDCP 2.2 compatible (%d) for the cached capabilites", rv);
+ capabilities.isHDCP22Compatible() = Some(rv);
+ }
+ aCapabilitiesOut = capabilities;
+ return;
+ }
+ }
+
+ MFCDM_PARENT_SLOG(
+ "Query capabilities for %s from the factory (hardwareDecryption=%d)",
+ NS_ConvertUTF16toUTF8(aKeySystem).get(), isHardwareDecryption);
+
// Widevine requires codec type to be four CC, PlayReady is fine with both.
static auto convertCodecToFourCC =
[](const KeySystemConfig::EMECodecString& aCodec) {
@@ -809,12 +854,12 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
}
if (FactorySupports(factory, aKeySystem, convertCodecToFourCC(codec),
KeySystemConfig::EMECodecString(""), nsString(u""),
- aIsHWSecure)) {
+ isHardwareDecryption)) {
MFCDMMediaCapability* c =
aCapabilitiesOut.videoCapabilities().AppendElement();
c->contentType() = NS_ConvertUTF8toUTF16(codec);
c->robustness() =
- GetRobustnessStringForKeySystem(aKeySystem, aIsHWSecure);
+ GetRobustnessStringForKeySystem(aKeySystem, isHardwareDecryption);
MFCDM_PARENT_SLOG("%s: +video:%s", __func__, codec.get());
supportedVideoCodecs.AppendElement(codec);
}
@@ -831,52 +876,51 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
KeySystemConfig::EME_CODEC_VORBIS,
});
for (const auto& codec : kAudioCodecs) {
- if (FactorySupports(
- factory, aKeySystem, convertCodecToFourCC(supportedVideoCodecs[0]),
- convertCodecToFourCC(codec), nsString(u""), aIsHWSecure)) {
+ // Hardware decryption is usually only used for video, so we can just check
+ // the software capabilities for audio in order to save some time. As the
+ // media foundation would create a new D3D device everytime when we check
+ // hardware decryption, which takes way longer time.
+ if (FactorySupports(factory, aKeySystem,
+ convertCodecToFourCC(supportedVideoCodecs[0]),
+ convertCodecToFourCC(codec), nsString(u""),
+ false /* aIsHWSecure */)) {
MFCDMMediaCapability* c =
aCapabilitiesOut.audioCapabilities().AppendElement();
c->contentType() = NS_ConvertUTF8toUTF16(codec);
- c->robustness() = GetRobustnessStringForKeySystem(aKeySystem, aIsHWSecure,
- false /* isVideo */);
+ c->robustness() = GetRobustnessStringForKeySystem(
+ aKeySystem, false /* aIsHWSecure */, false /* isVideo */);
MFCDM_PARENT_SLOG("%s: +audio:%s", __func__, codec.get());
}
}
- // Collect schemes supported by all video codecs.
- static nsTArray<std::pair<CryptoScheme, nsDependentString>> kSchemes = {
- std::pair<CryptoScheme, nsDependentString>(
- CryptoScheme::Cenc, u"encryption-type=cenc,encryption-iv-size=8,"),
- std::pair<CryptoScheme, nsDependentString>(
- CryptoScheme::Cbcs, u"encryption-type=cbcs,encryption-iv-size=16,")};
- for (auto& scheme : kSchemes) {
- bool ok = true;
- for (auto& codec : supportedVideoCodecs) {
- ok &= FactorySupports(
- factory, aKeySystem, convertCodecToFourCC(codec), nsCString(""),
- scheme.second /* additional feature */, aIsHWSecure);
- if (!ok) {
- break;
- }
- }
- if (ok) {
- aCapabilitiesOut.encryptionSchemes().AppendElement(scheme.first);
- MFCDM_PARENT_SLOG("%s: +scheme:%s", __func__,
- scheme.first == CryptoScheme::Cenc ? "cenc" : "cbcs");
- }
+ // 'If value is unspecified, default value of "cenc" is used.' See
+ // https://learn.microsoft.com/en-us/windows/win32/api/mfmediaengine/nf-mfmediaengine-imfextendeddrmtypesupport-istypesupportedex
+ if (!supportedVideoCodecs.IsEmpty()) {
+ aCapabilitiesOut.encryptionSchemes().AppendElement(CryptoScheme::Cenc);
+ MFCDM_PARENT_SLOG("%s: +scheme:cenc", __func__);
}
- static auto RequireClearLead = [](const nsString& aKeySystem) {
- if (aKeySystem.EqualsLiteral(kWidevineExperiment2KeySystemName) ||
- aKeySystem.EqualsLiteral(kPlayReadyHardwareClearLeadKeySystemName)) {
- return true;
+ // Check another scheme "cbcs"
+ static std::pair<CryptoScheme, nsDependentString> kCbcs =
+ std::pair<CryptoScheme, nsDependentString>(
+ CryptoScheme::Cbcs, u"encryption-type=cbcs,encryption-iv-size=16,");
+ bool ok = true;
+ for (const auto& codec : supportedVideoCodecs) {
+ ok &= FactorySupports(factory, aKeySystem, convertCodecToFourCC(codec),
+ nsCString(""), kCbcs.second /* additional feature */,
+ isHardwareDecryption);
+ if (!ok) {
+ break;
}
- return false;
- };
+ }
+ if (ok) {
+ aCapabilitiesOut.encryptionSchemes().AppendElement(kCbcs.first);
+ MFCDM_PARENT_SLOG("%s: +scheme:cbcs", __func__);
+ }
// For key system requires clearlead, every codec needs to have clear support.
// If not, then we will remove the codec from supported codec.
- if (RequireClearLead(aKeySystem)) {
+ if (aFlags.contains(CapabilitesFlag::NeedClearLeadCheck)) {
for (const auto& scheme : aCapabilitiesOut.encryptionSchemes()) {
nsTArray<KeySystemConfig::EMECodecString> noClearLeadCodecs;
for (const auto& codec : supportedVideoCodecs) {
@@ -894,9 +938,9 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
} else {
additionalFeature.AppendLiteral(u"cbcs-clearlead,");
}
- bool rv =
- FactorySupports(factory, aKeySystem, convertCodecToFourCC(codec),
- nsCString(""), additionalFeature, aIsHWSecure);
+ bool rv = FactorySupports(factory, aKeySystem,
+ convertCodecToFourCC(codec), nsCString(""),
+ additionalFeature, isHardwareDecryption);
MFCDM_PARENT_SLOG("clearlead %s IV 8 bytes %s %s",
CryptoSchemeToString(scheme), codec.get(),
rv ? "supported" : "not supported");
@@ -906,7 +950,8 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
// Try 16 bytes IV.
additionalFeature.AppendLiteral(u"encryption-iv-size=16,");
rv = FactorySupports(factory, aKeySystem, convertCodecToFourCC(codec),
- nsCString(""), additionalFeature, aIsHWSecure);
+ nsCString(""), additionalFeature,
+ isHardwareDecryption);
MFCDM_PARENT_SLOG("clearlead %s IV 16 bytes %s %s",
CryptoSchemeToString(scheme), codec.get(),
rv ? "supported" : "not supported");
@@ -926,9 +971,14 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
}
}
- if (IsHDCPVersionSupported(factory, aKeySystem, dom::HDCPVersion::_2_2) ==
- NS_OK) {
- aCapabilitiesOut.isHDCP22Compatible() = true;
+ // Only perform HDCP if necessary, "The hdcp query (item 4) has a
+ // computationally expensive first invocation cost". See
+ // https://learn.microsoft.com/en-us/windows/win32/api/mfmediaengine/nf-mfmediaengine-imfextendeddrmtypesupport-istypesupportedex
+ if (aFlags.contains(CapabilitesFlag::NeedHDCPCheck) &&
+ IsHDCPVersionSupported(factory, aKeySystem, dom::HDCPVersion::_2_2) ==
+ NS_OK) {
+ MFCDM_PARENT_SLOG("Capabilites is compatible with HDCP 2.2");
+ aCapabilitiesOut.isHDCP22Compatible() = Some(true);
}
// TODO: don't hardcode
@@ -938,13 +988,24 @@ void MFCDMParent::GetCapabilities(const nsString& aKeySystem,
KeySystemConfig::SessionType::Temporary);
aCapabilitiesOut.sessionTypes().AppendElement(
KeySystemConfig::SessionType::PersistentLicense);
+
+ // Cache capabilities for reuse.
+ sCapabilities.AppendElement(aCapabilitiesOut);
}
mozilla::ipc::IPCResult MFCDMParent::RecvGetCapabilities(
- const bool aIsHWSecure, GetCapabilitiesResolver&& aResolver) {
+ const MFCDMCapabilitiesRequest& aRequest,
+ GetCapabilitiesResolver&& aResolver) {
MFCDM_REJECT_IF(!mFactory, NS_ERROR_DOM_NOT_SUPPORTED_ERR);
MFCDMCapabilitiesIPDL capabilities;
- GetCapabilities(mKeySystem, aIsHWSecure, mFactory.Get(), capabilities);
+ CapabilitesFlagSet flags;
+ if (aRequest.isHardwareDecryption()) {
+ flags += CapabilitesFlag::HarewareDecryption;
+ }
+ if (RequireClearLead(aRequest.keySystem())) {
+ flags += CapabilitesFlag::NeedClearLeadCheck;
+ }
+ GetCapabilities(aRequest.keySystem(), flags, mFactory.Get(), capabilities);
aResolver(std::move(capabilities));
return IPC_OK();
}
diff --git a/dom/media/ipc/MFCDMParent.h b/dom/media/ipc/MFCDMParent.h
index b4ef1b831b..921d86be73 100644
--- a/dom/media/ipc/MFCDMParent.h
+++ b/dom/media/ipc/MFCDMParent.h
@@ -52,7 +52,8 @@ class MFCDMParent final : public PMFCDMParent {
uint64_t Id() const { return mId; }
mozilla::ipc::IPCResult RecvGetCapabilities(
- const bool aIsHWSecured, GetCapabilitiesResolver&& aResolver);
+ const MFCDMCapabilitiesRequest& aRequest,
+ GetCapabilitiesResolver&& aResolver);
mozilla::ipc::IPCResult RecvInit(const MFCDMInitParamsIPDL& aParams,
InitResolver&& aResolver);
@@ -97,6 +98,13 @@ class MFCDMParent final : public PMFCDMParent {
private:
~MFCDMParent();
+ enum class CapabilitesFlag {
+ HarewareDecryption,
+ NeedHDCPCheck,
+ NeedClearLeadCheck,
+ };
+ using CapabilitesFlagSet = EnumSet<CapabilitesFlag, uint8_t>;
+
static LPCWSTR GetCDMLibraryName(const nsString& aKeySystem);
static HRESULT GetOrCreateFactory(
@@ -108,7 +116,7 @@ class MFCDMParent final : public PMFCDMParent {
Microsoft::WRL::ComPtr<IMFContentDecryptionModuleFactory>& aFactoryOut);
static void GetCapabilities(const nsString& aKeySystem,
- const bool aIsHWSecure,
+ const CapabilitesFlagSet& aFlags,
IMFContentDecryptionModuleFactory* aFactory,
MFCDMCapabilitiesIPDL& aCapabilitiesOut);
diff --git a/dom/media/ipc/MFMediaEngineChild.cpp b/dom/media/ipc/MFMediaEngineChild.cpp
index 02013056d5..cc32d15ea4 100644
--- a/dom/media/ipc/MFMediaEngineChild.cpp
+++ b/dom/media/ipc/MFMediaEngineChild.cpp
@@ -47,20 +47,22 @@ MFMediaEngineChild::MFMediaEngineChild(MFMediaEngineWrapper* aOwner,
}
RefPtr<GenericNonExclusivePromise> MFMediaEngineChild::Init(
- bool aShouldPreload) {
+ const MediaInfo& aInfo, bool aShouldPreload) {
if (!mManagerThread) {
return GenericNonExclusivePromise::CreateAndReject(NS_ERROR_FAILURE,
__func__);
}
- CLOG("Init");
+ CLOG("Init, hasAudio=%d, hasVideo=%d, encrypted=%d", aInfo.HasAudio(),
+ aInfo.HasVideo(), aInfo.IsEncrypted());
+
MOZ_ASSERT(mMediaEngineId == 0);
RefPtr<MFMediaEngineChild> self = this;
RemoteDecoderManagerChild::LaunchUtilityProcessIfNeeded(
RemoteDecodeIn::UtilityProcess_MFMediaEngineCDM)
->Then(
mManagerThread, __func__,
- [self, this, aShouldPreload](bool) {
+ [self, this, aShouldPreload, info = aInfo](bool) {
RefPtr<RemoteDecoderManagerChild> manager =
RemoteDecoderManagerChild::GetSingleton(
RemoteDecodeIn::UtilityProcess_MFMediaEngineCDM);
@@ -72,8 +74,13 @@ RefPtr<GenericNonExclusivePromise> MFMediaEngineChild::Init(
mIPDLSelfRef = this;
Unused << manager->SendPMFMediaEngineConstructor(this);
- MediaEngineInfoIPDL info(aShouldPreload);
- SendInitMediaEngine(info)
+
+ MediaInfoIPDL mediaInfo(
+ info.HasAudio() ? Some(info.mAudio) : Nothing(),
+ info.HasVideo() ? Some(info.mVideo) : Nothing());
+
+ MediaEngineInfoIPDL initInfo(mediaInfo, aShouldPreload);
+ SendInitMediaEngine(initInfo)
->Then(
mManagerThread, __func__,
[self, this](uint64_t aId) {
@@ -256,9 +263,9 @@ MFMediaEngineWrapper::MFMediaEngineWrapper(ExternalEngineStateMachine* aOwner,
mCurrentTimeInSecond(0.0) {}
RefPtr<GenericNonExclusivePromise> MFMediaEngineWrapper::Init(
- bool aShouldPreload) {
+ const MediaInfo& aInfo, bool aShouldPreload) {
WLOG("Init");
- return mEngine->Init(aShouldPreload);
+ return mEngine->Init(aInfo, aShouldPreload);
}
MFMediaEngineWrapper::~MFMediaEngineWrapper() { mEngine->OwnerDestroyed(); }
@@ -335,18 +342,6 @@ void MFMediaEngineWrapper::NotifyEndOfStream(TrackInfo::TrackType aType) {
[engine = mEngine, aType] { engine->SendNotifyEndOfStream(aType); }));
}
-void MFMediaEngineWrapper::SetMediaInfo(const MediaInfo& aInfo) {
- WLOG("SetMediaInfo, hasAudio=%d, hasVideo=%d, encrypted=%d", aInfo.HasAudio(),
- aInfo.HasVideo(), aInfo.IsEncrypted());
- MOZ_ASSERT(IsInited());
- Unused << ManagerThread()->Dispatch(NS_NewRunnableFunction(
- "MFMediaEngineWrapper::SetMediaInfo", [engine = mEngine, aInfo] {
- MediaInfoIPDL info(aInfo.HasAudio() ? Some(aInfo.mAudio) : Nothing(),
- aInfo.HasVideo() ? Some(aInfo.mVideo) : Nothing());
- engine->SendNotifyMediaInfo(info);
- }));
-}
-
bool MFMediaEngineWrapper::SetCDMProxy(CDMProxy* aProxy) {
#ifdef MOZ_WMF_CDM
WMFCDMProxy* proxy = aProxy->AsWMFCDMProxy();
diff --git a/dom/media/ipc/MFMediaEngineChild.h b/dom/media/ipc/MFMediaEngineChild.h
index 92de3b9483..13b837b7d8 100644
--- a/dom/media/ipc/MFMediaEngineChild.h
+++ b/dom/media/ipc/MFMediaEngineChild.h
@@ -32,7 +32,8 @@ class MFMediaEngineChild final : public PMFMediaEngineChild {
void OwnerDestroyed();
void IPDLActorDestroyed();
- RefPtr<GenericNonExclusivePromise> Init(bool aShouldPreload);
+ RefPtr<GenericNonExclusivePromise> Init(const MediaInfo& aInfo,
+ bool aShouldPreload);
void Shutdown();
// Methods for PMFMediaEngineChild
@@ -99,7 +100,8 @@ class MFMediaEngineWrapper final : public ExternalPlaybackEngine {
~MFMediaEngineWrapper();
// Methods for ExternalPlaybackEngine
- RefPtr<GenericNonExclusivePromise> Init(bool aShouldPreload) override;
+ RefPtr<GenericNonExclusivePromise> Init(const MediaInfo& aInfo,
+ bool aShouldPreload) override;
void Play() override;
void Pause() override;
void Seek(const media::TimeUnit& aTargetTime) override;
@@ -111,7 +113,7 @@ class MFMediaEngineWrapper final : public ExternalPlaybackEngine {
media::TimeUnit GetCurrentPosition() override;
void NotifyEndOfStream(TrackInfo::TrackType aType) override;
uint64_t Id() const override { return mEngine->Id(); }
- void SetMediaInfo(const MediaInfo& aInfo) override;
+ bool IsInited() const { return mEngine->Id() != 0; }
bool SetCDMProxy(CDMProxy* aProxy) override;
void NotifyResizing(uint32_t aWidth, uint32_t aHeight) override;
@@ -121,7 +123,6 @@ class MFMediaEngineWrapper final : public ExternalPlaybackEngine {
private:
friend class MFMediaEngineChild;
- bool IsInited() const { return mEngine->Id() != 0; }
void UpdateCurrentTime(double aCurrentTimeInSecond);
void NotifyEvent(ExternalEngineEvent aEvent);
void NotifyError(const MediaResult& aError);
diff --git a/dom/media/ipc/MFMediaEngineParent.cpp b/dom/media/ipc/MFMediaEngineParent.cpp
index 5ed1b71160..3a9670f330 100644
--- a/dom/media/ipc/MFMediaEngineParent.cpp
+++ b/dom/media/ipc/MFMediaEngineParent.cpp
@@ -338,17 +338,17 @@ mozilla::ipc::IPCResult MFMediaEngineParent::RecvInitMediaEngine(
// TODO : really need this?
Unused << mMediaEngine->SetPreload(MF_MEDIA_ENGINE_PRELOAD_AUTOMATIC);
}
+ RETURN_PARAM_IF_FAILED(SetMediaInfo(aInfo.mediaInfo()), IPC_OK());
aResolver(mMediaEngineId);
return IPC_OK();
}
-mozilla::ipc::IPCResult MFMediaEngineParent::RecvNotifyMediaInfo(
- const MediaInfoIPDL& aInfo) {
+HRESULT MFMediaEngineParent::SetMediaInfo(const MediaInfoIPDL& aInfo) {
AssertOnManagerThread();
MOZ_ASSERT(mIsCreatedMediaEngine, "Hasn't created media engine?");
MOZ_ASSERT(!mMediaSource);
- LOG("RecvNotifyMediaInfo");
+ LOG("SetMediaInfo");
auto errorExit = MakeScopeExit([&] {
MediaResult error(NS_ERROR_DOM_MEDIA_FATAL_ERR,
@@ -378,9 +378,8 @@ mozilla::ipc::IPCResult MFMediaEngineParent::RecvNotifyMediaInfo(
if (aInfo.videoInfo()) {
ComPtr<IMFMediaEngineEx> mediaEngineEx;
- RETURN_PARAM_IF_FAILED(mMediaEngine.As(&mediaEngineEx), IPC_OK());
- RETURN_PARAM_IF_FAILED(mediaEngineEx->EnableWindowlessSwapchainMode(true),
- IPC_OK());
+ RETURN_IF_FAILED(mMediaEngine.As(&mediaEngineEx));
+ RETURN_IF_FAILED(mediaEngineEx->EnableWindowlessSwapchainMode(true));
LOG("Enabled dcomp swap chain mode");
ENGINE_MARKER("MFMediaEngineParent,EnabledSwapChain");
}
@@ -392,7 +391,7 @@ mozilla::ipc::IPCResult MFMediaEngineParent::RecvNotifyMediaInfo(
#ifdef MOZ_WMF_CDM
if (isEncryted && !mContentProtectionManager) {
// We will set the source later when the CDM proxy is ready.
- return IPC_OK();
+ return S_OK;
}
if (isEncryted && mContentProtectionManager) {
@@ -403,7 +402,7 @@ mozilla::ipc::IPCResult MFMediaEngineParent::RecvNotifyMediaInfo(
#endif
SetMediaSourceOnEngine();
- return IPC_OK();
+ return S_OK;
}
void MFMediaEngineParent::SetMediaSourceOnEngine() {
diff --git a/dom/media/ipc/MFMediaEngineParent.h b/dom/media/ipc/MFMediaEngineParent.h
index f606d3c44d..843ac91aaf 100644
--- a/dom/media/ipc/MFMediaEngineParent.h
+++ b/dom/media/ipc/MFMediaEngineParent.h
@@ -52,7 +52,6 @@ class MFMediaEngineParent final : public PMFMediaEngineParent {
// Methods for PMFMediaEngineParent
mozilla::ipc::IPCResult RecvInitMediaEngine(
const MediaEngineInfoIPDL& aInfo, InitMediaEngineResolver&& aResolver);
- mozilla::ipc::IPCResult RecvNotifyMediaInfo(const MediaInfoIPDL& aInfo);
mozilla::ipc::IPCResult RecvPlay();
mozilla::ipc::IPCResult RecvPause();
mozilla::ipc::IPCResult RecvSeek(double aTargetTimeInSecond);
@@ -69,6 +68,7 @@ class MFMediaEngineParent final : public PMFMediaEngineParent {
~MFMediaEngineParent();
void CreateMediaEngine();
+ HRESULT SetMediaInfo(const MediaInfoIPDL& aInfo);
void InitializeDXGIDeviceManager();
diff --git a/dom/media/ipc/MediaIPCUtils.h b/dom/media/ipc/MediaIPCUtils.h
index fecf41c325..9598e1557f 100644
--- a/dom/media/ipc/MediaIPCUtils.h
+++ b/dom/media/ipc/MediaIPCUtils.h
@@ -15,6 +15,7 @@
#include "ipc/EnumSerializer.h"
#include "mozilla/EnumSet.h"
#include "mozilla/GfxMessageUtils.h"
+#include "mozilla/dom/WebGLIpdl.h"
#include "mozilla/gfx/Rect.h"
#include "mozilla/dom/MFCDMSerializers.h"
@@ -139,7 +140,7 @@ struct ParamTraits<mozilla::FlacCodecSpecificData> {
template <>
struct ParamTraits<mozilla::Mp3CodecSpecificData>
- : public PlainOldDataSerializer<mozilla::Mp3CodecSpecificData> {};
+ : public ParamTraits_TiedFields<mozilla::Mp3CodecSpecificData> {};
template <>
struct ParamTraits<mozilla::OpusCodecSpecificData> {
diff --git a/dom/media/ipc/PMFCDM.ipdl b/dom/media/ipc/PMFCDM.ipdl
index e86b94c217..793cfa3808 100644
--- a/dom/media/ipc/PMFCDM.ipdl
+++ b/dom/media/ipc/PMFCDM.ipdl
@@ -12,6 +12,7 @@ using mozilla::CryptoScheme from "MediaData.h";
using mozilla::dom::MediaKeyMessageType from "mozilla/dom/MediaKeyMessageEventBinding.h";
using mozilla::dom::MediaKeyStatus from "mozilla/dom/MediaKeyStatusMapBinding.h";
using mozilla::dom::HDCPVersion from "mozilla/dom/MediaKeysBinding.h";
+using mozilla::KeySystemConfigRequest from "mozilla/KeySystemConfig.h";
namespace mozilla {
@@ -57,7 +58,8 @@ struct MFCDMCapabilitiesIPDL {
CryptoScheme[] encryptionSchemes;
Requirement distinctiveID;
Requirement persistentState;
- bool isHDCP22Compatible;
+ bool? isHDCP22Compatible;
+ bool isHardwareDecryption;
};
union MFCDMCapabilitiesResult {
@@ -95,12 +97,17 @@ union MFCDMSessionResult {
nsresult;
};
+struct MFCDMCapabilitiesRequest {
+ nsString keySystem;
+ bool isHardwareDecryption;
+};
+
[ManualDealloc]
async protocol PMFCDM
{
manager PRemoteDecoderManager;
parent:
- async GetCapabilities(bool isHwSecured) returns (MFCDMCapabilitiesResult result);
+ async GetCapabilities(MFCDMCapabilitiesRequest request) returns (MFCDMCapabilitiesResult result);
async Init(MFCDMInitParamsIPDL params) returns (MFCDMInitResult result);
async CreateSessionAndGenerateRequest(MFCDMCreateSessionParamsIPDL type)
returns (MFCDMSessionResult result);
diff --git a/dom/media/ipc/PMFMediaEngine.ipdl b/dom/media/ipc/PMFMediaEngine.ipdl
index 8edc44bb81..ebed4e101c 100644
--- a/dom/media/ipc/PMFMediaEngine.ipdl
+++ b/dom/media/ipc/PMFMediaEngine.ipdl
@@ -17,6 +17,7 @@ namespace mozilla {
struct MediaEngineInfoIPDL
{
+ MediaInfoIPDL mediaInfo;
bool preload;
};
@@ -39,7 +40,6 @@ async protocol PMFMediaEngine
parent:
// Return 0 if media engine can't be created.
async InitMediaEngine(MediaEngineInfoIPDL info) returns (uint64_t id);
- async NotifyMediaInfo(MediaInfoIPDL info);
async Play();
async Pause();
async Seek(double targetTimeInSecond);
diff --git a/dom/media/ipc/RemoteMediaDataDecoder.cpp b/dom/media/ipc/RemoteMediaDataDecoder.cpp
index 6db3c0d940..32e1ee6b31 100644
--- a/dom/media/ipc/RemoteMediaDataDecoder.cpp
+++ b/dom/media/ipc/RemoteMediaDataDecoder.cpp
@@ -18,7 +18,12 @@ namespace mozilla {
##__VA_ARGS__)
RemoteMediaDataDecoder::RemoteMediaDataDecoder(RemoteDecoderChild* aChild)
- : mChild(aChild) {
+ : mChild(aChild),
+ mDescription("RemoteMediaDataDecoder"_ns),
+ mProcessName("unknown"_ns),
+ mCodecName("unknown"_ns),
+ mIsHardwareAccelerated(false),
+ mConversion(ConversionRequired::kNeedNone) {
LOG("%p is created", this);
}
@@ -48,6 +53,7 @@ RefPtr<MediaDataDecoder::InitPromise> RemoteMediaDataDecoder::Init() {
->Then(
RemoteDecoderManagerChild::GetManagerThread(), __func__,
[self, this](TrackType aTrack) {
+ MutexAutoLock lock(mMutex);
// If shutdown has started in the meantime shutdown promise may
// be resloved before this task. In this case mChild will be null
// and the init promise has to be canceled.
@@ -127,6 +133,7 @@ RefPtr<ShutdownPromise> RemoteMediaDataDecoder::Shutdown() {
bool RemoteMediaDataDecoder::IsHardwareAccelerated(
nsACString& aFailureReason) const {
+ MutexAutoLock lock(mMutex);
aFailureReason = mHardwareAcceleratedReason;
return mIsHardwareAccelerated;
}
@@ -145,18 +152,24 @@ void RemoteMediaDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime) {
MediaDataDecoder::ConversionRequired RemoteMediaDataDecoder::NeedsConversion()
const {
+ MutexAutoLock lock(mMutex);
return mConversion;
}
nsCString RemoteMediaDataDecoder::GetDescriptionName() const {
+ MutexAutoLock lock(mMutex);
return mDescription;
}
nsCString RemoteMediaDataDecoder::GetProcessName() const {
+ MutexAutoLock lock(mMutex);
return mProcessName;
}
-nsCString RemoteMediaDataDecoder::GetCodecName() const { return mCodecName; }
+nsCString RemoteMediaDataDecoder::GetCodecName() const {
+ MutexAutoLock lock(mMutex);
+ return mCodecName;
+}
#undef LOG
diff --git a/dom/media/ipc/RemoteMediaDataDecoder.h b/dom/media/ipc/RemoteMediaDataDecoder.h
index 4acc5801f7..5d8612529d 100644
--- a/dom/media/ipc/RemoteMediaDataDecoder.h
+++ b/dom/media/ipc/RemoteMediaDataDecoder.h
@@ -53,14 +53,16 @@ class RemoteMediaDataDecoder final
// destructor when we can guarantee no other threads are accessing it). Only
// read from the manager thread.
RefPtr<RemoteDecoderChild> mChild;
+
+ mutable Mutex mMutex{"RemoteMediaDataDecoder"};
+
// Only ever written/modified during decoder initialisation.
- // As such can be accessed from any threads after that.
- nsCString mDescription = "RemoteMediaDataDecoder"_ns;
- nsCString mProcessName = "unknown"_ns;
- nsCString mCodecName = "unknown"_ns;
- bool mIsHardwareAccelerated = false;
- nsCString mHardwareAcceleratedReason;
- ConversionRequired mConversion = ConversionRequired::kNeedNone;
+ nsCString mDescription MOZ_GUARDED_BY(mMutex);
+ nsCString mProcessName MOZ_GUARDED_BY(mMutex);
+ nsCString mCodecName MOZ_GUARDED_BY(mMutex);
+ bool mIsHardwareAccelerated MOZ_GUARDED_BY(mMutex);
+ nsCString mHardwareAcceleratedReason MOZ_GUARDED_BY(mMutex);
+ ConversionRequired mConversion MOZ_GUARDED_BY(mMutex);
};
} // namespace mozilla
diff --git a/dom/media/mediacontrol/ContentMediaController.cpp b/dom/media/mediacontrol/ContentMediaController.cpp
index c0b466ff0f..e1fe574d9b 100644
--- a/dom/media/mediacontrol/ContentMediaController.cpp
+++ b/dom/media/mediacontrol/ContentMediaController.cpp
@@ -229,7 +229,7 @@ void ContentMediaAgent::EnableAction(uint64_t aBrowsingContextId,
}
LOG("Notify to enable action '%s' in BC %" PRId64,
- ToMediaSessionActionStr(aAction), bc->Id());
+ GetEnumString(aAction).get(), bc->Id());
if (XRE_IsContentProcess()) {
ContentChild* contentChild = ContentChild::GetSingleton();
Unused << contentChild->SendNotifyMediaSessionSupportedActionChanged(
@@ -251,7 +251,7 @@ void ContentMediaAgent::DisableAction(uint64_t aBrowsingContextId,
}
LOG("Notify to disable action '%s' in BC %" PRId64,
- ToMediaSessionActionStr(aAction), bc->Id());
+ GetEnumString(aAction).get(), bc->Id());
if (XRE_IsContentProcess()) {
ContentChild* contentChild = ContentChild::GetSingleton();
Unused << contentChild->SendNotifyMediaSessionSupportedActionChanged(
@@ -325,7 +325,7 @@ void ContentMediaController::HandleMediaKey(MediaControlKey aKey) {
if (mReceivers.IsEmpty()) {
return;
}
- LOG("Handle '%s' event, receiver num=%zu", ToMediaControlKeyStr(aKey),
+ LOG("Handle '%s' event, receiver num=%zu", GetEnumString(aKey).get(),
mReceivers.Length());
// We have default handlers for play, pause and stop.
// https://w3c.github.io/mediasession/#ref-for-dom-mediasessionaction-play%E2%91%A3
diff --git a/dom/media/mediacontrol/ContentPlaybackController.cpp b/dom/media/mediacontrol/ContentPlaybackController.cpp
index fcc8e3ab58..ba06ea1cdb 100644
--- a/dom/media/mediacontrol/ContentPlaybackController.cpp
+++ b/dom/media/mediacontrol/ContentPlaybackController.cpp
@@ -46,7 +46,7 @@ void ContentPlaybackController::NotifyContentMediaControlKeyReceiver(
if (RefPtr<ContentMediaControlKeyReceiver> receiver =
ContentMediaControlKeyReceiver::Get(mBC)) {
LOG("Handle '%s' in default behavior for BC %" PRIu64,
- ToMediaControlKeyStr(aKey), mBC->Id());
+ GetEnumString(aKey).get(), mBC->Id());
receiver->HandleMediaKey(aKey);
}
}
@@ -61,7 +61,7 @@ void ContentPlaybackController::NotifyMediaSession(
const MediaSessionActionDetails& aDetails) {
if (RefPtr<MediaSession> session = GetMediaSession()) {
LOG("Handle '%s' in media session behavior for BC %" PRIu64,
- ToMediaSessionActionStr(aDetails.mAction), mBC->Id());
+ GetEnumString(aDetails.mAction).get(), mBC->Id());
MOZ_ASSERT(session->IsActive(), "Notify inactive media session!");
session->NotifyHandler(aDetails);
}
diff --git a/dom/media/mediacontrol/MediaControlKeyManager.cpp b/dom/media/mediacontrol/MediaControlKeyManager.cpp
index ba6ed3a524..b40d3af91e 100644
--- a/dom/media/mediacontrol/MediaControlKeyManager.cpp
+++ b/dom/media/mediacontrol/MediaControlKeyManager.cpp
@@ -161,7 +161,7 @@ void MediaControlKeyManager::SetSupportedMediaKeys(
const MediaKeysArray& aSupportedKeys) {
mSupportedKeys.Clear();
for (const auto& key : aSupportedKeys) {
- LOG_INFO("Supported keys=%s", ToMediaControlKeyStr(key));
+ LOG_INFO("Supported keys=%s", GetEnumString(key).get());
mSupportedKeys.AppendElement(key);
}
if (mEventSource && mEventSource->IsOpened()) {
diff --git a/dom/media/mediacontrol/MediaControlUtils.h b/dom/media/mediacontrol/MediaControlUtils.h
index e4e75e7c97..f013c40aa2 100644
--- a/dom/media/mediacontrol/MediaControlUtils.h
+++ b/dom/media/mediacontrol/MediaControlUtils.h
@@ -20,66 +20,12 @@ extern mozilla::LazyLogModule gMediaControlLog;
namespace mozilla::dom {
-inline const char* ToMediaControlKeyStr(MediaControlKey aKey) {
- switch (aKey) {
- case MediaControlKey::Focus:
- return "Focus";
- case MediaControlKey::Pause:
- return "Pause";
- case MediaControlKey::Play:
- return "Play";
- case MediaControlKey::Playpause:
- return "Play & pause";
- case MediaControlKey::Previoustrack:
- return "Previous track";
- case MediaControlKey::Nexttrack:
- return "Next track";
- case MediaControlKey::Seekbackward:
- return "Seek backward";
- case MediaControlKey::Seekforward:
- return "Seek forward";
- case MediaControlKey::Skipad:
- return "Skip Ad";
- case MediaControlKey::Seekto:
- return "Seek to";
- case MediaControlKey::Stop:
- return "Stop";
- default:
- MOZ_ASSERT_UNREACHABLE("Invalid action.");
- return "Unknown";
- }
-}
-
inline const char* ToMediaControlKeyStr(const Maybe<MediaControlKey>& aKey) {
if (aKey.isNothing()) {
MOZ_ASSERT_UNREACHABLE("Invalid action.");
return "Unknown";
}
- return ToMediaControlKeyStr(aKey.value());
-}
-
-inline const char* ToMediaSessionActionStr(MediaSessionAction aAction) {
- switch (aAction) {
- case MediaSessionAction::Play:
- return "play";
- case MediaSessionAction::Pause:
- return "pause";
- case MediaSessionAction::Seekbackward:
- return "seek backward";
- case MediaSessionAction::Seekforward:
- return "seek forward";
- case MediaSessionAction::Previoustrack:
- return "previous track";
- case MediaSessionAction::Nexttrack:
- return "next track";
- case MediaSessionAction::Skipad:
- return "skip ad";
- case MediaSessionAction::Seekto:
- return "Seek to";
- default:
- MOZ_ASSERT(aAction == MediaSessionAction::Stop);
- return "stop";
- }
+ return GetEnumString(aKey.value()).get();
}
inline MediaControlKey ConvertMediaSessionActionToControlKey(
diff --git a/dom/media/mediacontrol/MediaStatusManager.cpp b/dom/media/mediacontrol/MediaStatusManager.cpp
index 9187e56f25..633ae19a44 100644
--- a/dom/media/mediacontrol/MediaStatusManager.cpp
+++ b/dom/media/mediacontrol/MediaStatusManager.cpp
@@ -338,10 +338,10 @@ void MediaStatusManager::EnableAction(uint64_t aBrowsingContextId,
}
if (info->IsActionSupported(aAction)) {
LOG("Action '%s' has already been enabled for context %" PRIu64,
- ToMediaSessionActionStr(aAction), aBrowsingContextId);
+ GetEnumString(aAction).get(), aBrowsingContextId);
return;
}
- LOG("Enable action %s for context %" PRIu64, ToMediaSessionActionStr(aAction),
+ LOG("Enable action %s for context %" PRIu64, GetEnumString(aAction).get(),
aBrowsingContextId);
info->EnableAction(aAction);
NotifySupportedKeysChangedIfNeeded(aBrowsingContextId);
@@ -355,11 +355,11 @@ void MediaStatusManager::DisableAction(uint64_t aBrowsingContextId,
}
if (!info->IsActionSupported(aAction)) {
LOG("Action '%s' hasn't been enabled yet for context %" PRIu64,
- ToMediaSessionActionStr(aAction), aBrowsingContextId);
+ GetEnumString(aAction).get(), aBrowsingContextId);
return;
}
- LOG("Disable action %s for context %" PRIu64,
- ToMediaSessionActionStr(aAction), aBrowsingContextId);
+ LOG("Disable action %s for context %" PRIu64, GetEnumString(aAction).get(),
+ aBrowsingContextId);
info->DisableAction(aAction);
NotifySupportedKeysChangedIfNeeded(aBrowsingContextId);
}
diff --git a/dom/media/mediacontrol/tests/browser/browser.toml b/dom/media/mediacontrol/tests/browser/browser.toml
index 8b52f2aed4..faeebc0e94 100644
--- a/dom/media/mediacontrol/tests/browser/browser.toml
+++ b/dom/media/mediacontrol/tests/browser/browser.toml
@@ -16,7 +16,6 @@ support-files = [
"file_non_eligible_media.html",
"file_non_looping_media.html",
"head.js",
- "../../../test/bogus.ogv",
"../../../test/gizmo.mp4",
"../../../test/gizmo-noaudio.webm",
"../../../test/gizmo-short.mp4",
diff --git a/dom/media/mediacontrol/tests/browser/file_error_media.html b/dom/media/mediacontrol/tests/browser/file_error_media.html
index 7f54340dd1..dfcdeab65f 100644
--- a/dom/media/mediacontrol/tests/browser/file_error_media.html
+++ b/dom/media/mediacontrol/tests/browser/file_error_media.html
@@ -4,6 +4,6 @@
<title>Error media</title>
</head>
<body>
-<video id="video" src="bogus.ogv"></video>
+<video id="video" src="bogus.webm"></video>
</body>
</html>
diff --git a/dom/media/metrics.yaml b/dom/media/metrics.yaml
index fe2ed5ff6a..58e525174b 100644
--- a/dom/media/metrics.yaml
+++ b/dom/media/metrics.yaml
@@ -117,7 +117,24 @@ media.playback:
first_frame_loaded_time:
description:
How long (in milliseconds) does the our media pipeline take to load
- the first video frame.
+ the first video frame from "the creation of MDSM" to "the first frame
+ loaded".
+ type: quantity
+ metadata_loaded_time:
+ description:
+ How long (in milliseconds) does the our media pipeline take to load
+ the metadata, which happens before finishing loading the first frame.
+ type: quantity
+ total_waiting_data_time:
+ description:
+ How long (in milliseconds) does the our media pipeline has been in a
+ state of waiting video data due to lacking of data before the first
+ frame is loaded.
+ type: quantity
+ buffering_time:
+ description:
+ How long (in milliseconds) does the our media pipeline has been spent
+ on the buffering state before the first frame is loaded.
type: quantity
playback_type:
description:
@@ -139,4 +156,13 @@ media.playback:
key_system:
description: The key system used for the EME playback if exists
type: string
+ hls_decoder:
+ description:
+ This value will only be set on Android. It tells that whether playback
+ is performed by the HLS decoder, which utilizes the external player to
+ play video.
+ type: boolean
+ is_hardware_decoding:
+ description: True if the first frame is decoded by a hardware decoder.
+ type: boolean
expires: never
diff --git a/dom/media/moz.build b/dom/media/moz.build
index c78b794591..ac62e9b67e 100644
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -200,6 +200,7 @@ EXPORTS += [
"SeekTarget.h",
"SelfRef.h",
"SharedBuffer.h",
+ "TimedPacketizer.h",
"TimeUnits.h",
"Tracing.h",
"VideoFrameContainer.h",
diff --git a/dom/media/ogg/OggDecoder.cpp b/dom/media/ogg/OggDecoder.cpp
index 5f6d61f694..1bacafcf3e 100644
--- a/dom/media/ogg/OggDecoder.cpp
+++ b/dom/media/ogg/OggDecoder.cpp
@@ -24,7 +24,10 @@ bool OggDecoder::IsSupportedType(const MediaContainerType& aContainerType) {
return false;
}
- const bool isOggVideo = (aContainerType.Type() != MEDIAMIMETYPE(AUDIO_OGG));
+ const bool isOggVideo = (aContainerType.Type() == MEDIAMIMETYPE(VIDEO_OGG));
+ if (isOggVideo && !StaticPrefs::media_theora_enabled()) {
+ return false;
+ }
const MediaCodecs& codecs = aContainerType.ExtendedType().Codecs();
if (codecs.IsEmpty()) {
@@ -40,8 +43,9 @@ bool OggDecoder::IsSupportedType(const MediaContainerType& aContainerType) {
}
// Note: Only accept Theora in a video container type, not in an audio
// container type.
- if (isOggVideo && codec.EqualsLiteral("theora")) {
- continue;
+ if (aContainerType.Type() != MEDIAMIMETYPE(AUDIO_OGG) &&
+ codec.EqualsLiteral("theora")) {
+ return StaticPrefs::media_theora_enabled();
}
// Some unsupported codec.
return false;
diff --git a/dom/media/platforms/EncoderConfig.cpp b/dom/media/platforms/EncoderConfig.cpp
new file mode 100644
index 0000000000..ed780b947c
--- /dev/null
+++ b/dom/media/platforms/EncoderConfig.cpp
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EncoderConfig.h"
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+
+namespace mozilla {
+
+CodecType EncoderConfig::CodecTypeForMime(const nsACString& aMimeType) {
+ if (MP4Decoder::IsH264(aMimeType)) {
+ return CodecType::H264;
+ }
+ if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) {
+ return CodecType::VP8;
+ }
+ if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) {
+ return CodecType::VP9;
+ }
+ MOZ_ASSERT_UNREACHABLE("Unsupported Mimetype");
+ return CodecType::Unknown;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/EncoderConfig.h b/dom/media/platforms/EncoderConfig.h
new file mode 100644
index 0000000000..e0da1709d6
--- /dev/null
+++ b/dom/media/platforms/EncoderConfig.h
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_EncoderConfig_h_
+#define mozilla_EncoderConfig_h_
+
+#include "mozilla/dom/ImageBitmapBinding.h"
+#include "H264.h"
+
+namespace mozilla {
+
+enum class CodecType {
+ _BeginVideo_,
+ H264,
+ VP8,
+ VP9,
+ AV1,
+ _EndVideo_,
+ _BeginAudio_ = _EndVideo_,
+ Opus,
+ Vorbis,
+ Flac,
+ AAC,
+ PCM,
+ G722,
+ _EndAudio_,
+ Unknown,
+};
+
+enum class Usage {
+ Realtime, // Low latency prefered
+ Record
+};
+enum class BitrateMode { Constant, Variable };
+// Scalable Video Coding (SVC) settings for WebCodecs:
+// https://www.w3.org/TR/webrtc-svc/
+enum class ScalabilityMode { None, L1T2, L1T3 };
+
+enum class HardwarePreference { RequireHardware, RequireSoftware, None };
+
+// TODO: Automatically generate this (Bug 1865896)
+const char* GetCodecTypeString(const CodecType& aCodecType);
+
+enum class H264BitStreamFormat { AVC, ANNEXB };
+
+struct H264Specific final {
+ const H264_PROFILE mProfile;
+ const H264_LEVEL mLevel;
+ const H264BitStreamFormat mFormat;
+
+ H264Specific(H264_PROFILE aProfile, H264_LEVEL aLevel,
+ H264BitStreamFormat aFormat)
+ : mProfile(aProfile), mLevel(aLevel), mFormat(aFormat) {}
+};
+
+enum class OpusBitstreamFormat { Opus, OGG };
+
+// The default values come from the Web Codecs specification.
+struct OpusSpecific final {
+ enum class Application { Unspecified, Voip, Audio, RestricedLowDelay };
+ Application mApplication = Application::Unspecified;
+ uint64_t mFrameDuration = 20000; // microseconds
+ uint8_t mComplexity = 10; // 0-10
+ OpusBitstreamFormat mFormat = OpusBitstreamFormat::Opus;
+ uint64_t mPacketLossPerc = 0; // 0-100
+ bool mUseInBandFEC = false;
+ bool mUseDTX = false;
+};
+
+enum class VPXComplexity { Normal, High, Higher, Max };
+struct VP8Specific {
+ VP8Specific() = default;
+ // Ignore webrtc::VideoCodecVP8::errorConcealmentOn,
+ // for it's always false in the codebase (except libwebrtc test cases).
+ VP8Specific(const VPXComplexity aComplexity, const bool aResilience,
+ const uint8_t aNumTemporalLayers, const bool aDenoising,
+ const bool aAutoResize, const bool aFrameDropping)
+ : mComplexity(aComplexity),
+ mResilience(aResilience),
+ mNumTemporalLayers(aNumTemporalLayers),
+ mDenoising(aDenoising),
+ mAutoResize(aAutoResize),
+ mFrameDropping(aFrameDropping) {}
+ const VPXComplexity mComplexity{VPXComplexity::Normal};
+ const bool mResilience{true};
+ const uint8_t mNumTemporalLayers{1};
+ const bool mDenoising{true};
+ const bool mAutoResize{false};
+ const bool mFrameDropping{false};
+};
+
+struct VP9Specific : public VP8Specific {
+ VP9Specific() = default;
+ VP9Specific(const VPXComplexity aComplexity, const bool aResilience,
+ const uint8_t aNumTemporalLayers, const bool aDenoising,
+ const bool aAutoResize, const bool aFrameDropping,
+ const bool aAdaptiveQp, const uint8_t aNumSpatialLayers,
+ const bool aFlexible)
+ : VP8Specific(aComplexity, aResilience, aNumTemporalLayers, aDenoising,
+ aAutoResize, aFrameDropping),
+ mAdaptiveQp(aAdaptiveQp),
+ mNumSpatialLayers(aNumSpatialLayers),
+ mFlexible(aFlexible) {}
+ const bool mAdaptiveQp{true};
+ const uint8_t mNumSpatialLayers{1};
+ const bool mFlexible{false};
+};
+
+// A class that holds the intial configuration of an encoder. For simplicity,
+// this is used for both audio and video encoding. Members irrelevant to the
+// instance are to be ignored, and are set at their default value.
+class EncoderConfig final {
+ public:
+ using PixelFormat = dom::ImageBitmapFormat;
+ using CodecSpecific =
+ Variant<H264Specific, OpusSpecific, VP8Specific, VP9Specific>;
+
+ EncoderConfig(const EncoderConfig& aConfig) = default;
+
+ // This constructor is used for video encoders
+ EncoderConfig(const CodecType aCodecType, gfx::IntSize aSize,
+ const Usage aUsage, const PixelFormat aPixelFormat,
+ const PixelFormat aSourcePixelFormat, const uint8_t aFramerate,
+ const size_t aKeyframeInterval, const uint32_t aBitrate,
+ const BitrateMode aBitrateMode,
+ const HardwarePreference aHardwarePreference,
+ const ScalabilityMode aScalabilityMode,
+ const Maybe<CodecSpecific>& aCodecSpecific)
+ : mCodec(aCodecType),
+ mSize(aSize),
+ mBitrateMode(aBitrateMode),
+ mBitrate(aBitrate),
+ mUsage(aUsage),
+ mHardwarePreference(aHardwarePreference),
+ mPixelFormat(aPixelFormat),
+ mSourcePixelFormat(aSourcePixelFormat),
+ mScalabilityMode(aScalabilityMode),
+ mFramerate(aFramerate),
+ mKeyframeInterval(aKeyframeInterval),
+ mCodecSpecific(aCodecSpecific) {
+ MOZ_ASSERT(IsVideo());
+ }
+
+ // This constructor is used for audio encoders
+ EncoderConfig(const CodecType aCodecType, uint32_t aNumberOfChannels,
+ const BitrateMode aBitrateMode, uint32_t aSampleRate,
+ uint32_t aBitrate, const Maybe<CodecSpecific>& aCodecSpecific)
+ : mCodec(aCodecType),
+ mBitrateMode(aBitrateMode),
+ mBitrate(aBitrate),
+ mNumberOfChannels(aNumberOfChannels),
+ mSampleRate(aSampleRate),
+ mCodecSpecific(aCodecSpecific) {
+ MOZ_ASSERT(IsAudio());
+ }
+
+ static CodecType CodecTypeForMime(const nsACString& aMimeType);
+
+ bool IsVideo() const {
+ return mCodec > CodecType::_BeginVideo_ && mCodec < CodecType::_EndVideo_;
+ }
+
+ bool IsAudio() const {
+ return mCodec > CodecType::_BeginAudio_ && mCodec < CodecType::_EndAudio_;
+ }
+
+ CodecType mCodec{};
+ gfx::IntSize mSize{};
+ BitrateMode mBitrateMode{};
+ uint32_t mBitrate{};
+ Usage mUsage{};
+ // Video-only
+ HardwarePreference mHardwarePreference{};
+ PixelFormat mPixelFormat{};
+ PixelFormat mSourcePixelFormat{};
+ ScalabilityMode mScalabilityMode{};
+ uint8_t mFramerate{};
+ size_t mKeyframeInterval{};
+ // Audio-only
+ uint32_t mNumberOfChannels{};
+ uint32_t mSampleRate{};
+ Maybe<CodecSpecific> mCodecSpecific{};
+};
+
+} // namespace mozilla
+
+#endif // mozilla_EncoderConfig_h_
diff --git a/dom/media/platforms/PlatformEncoderModule.cpp b/dom/media/platforms/PlatformEncoderModule.cpp
index 3eb4abd511..525729e756 100644
--- a/dom/media/platforms/PlatformEncoderModule.cpp
+++ b/dom/media/platforms/PlatformEncoderModule.cpp
@@ -32,6 +32,15 @@ const char* GetCodecTypeString(const CodecType& aCodecType) {
return "_EndVideo_/_BeginAudio_";
case CodecType::Opus:
return "Opus";
+ case CodecType::Vorbis:
+ return "Vorbis";
+ case CodecType::Flac:
+ return "Flac";
+ case CodecType::AAC:
+ return "AAC";
+ case CodecType::PCM:
+ return "PCM";
+ break;
case CodecType::G722:
return "G722";
case CodecType::_EndAudio_:
@@ -100,22 +109,28 @@ struct ConfigurationChangeToString {
return nsPrintfCString("Framerate: %lfHz", aFramerateChange.get().value());
}
nsCString operator()(const BitrateModeChange& aBitrateModeChange) {
- return nsPrintfCString(
- "Bitrate mode: %s",
- aBitrateModeChange.get() == MediaDataEncoder::BitrateMode::Constant
- ? "Constant"
- : "Variable");
+ return nsPrintfCString("Bitrate mode: %s",
+ aBitrateModeChange.get() == BitrateMode::Constant
+ ? "Constant"
+ : "Variable");
}
nsCString operator()(const UsageChange& aUsageChange) {
return nsPrintfCString(
"Usage mode: %s",
- aUsageChange.get() == MediaDataEncoder::Usage::Realtime ? "Realtime"
- : "Recoding");
+ aUsageChange.get() == Usage::Realtime ? "Realtime" : "Recoding");
}
nsCString operator()(const ContentHintChange& aContentHintChange) {
return nsPrintfCString("Content hint: %s",
MaybeToString(aContentHintChange.get()).get());
}
+ nsCString operator()(const SampleRateChange& aSampleRateChange) {
+ return nsPrintfCString("Sample rate %" PRIu32 "Hz",
+ aSampleRateChange.get());
+ }
+ nsCString operator()(const NumberOfChannelsChange& aNumberOfChannelsChange) {
+ return nsPrintfCString("Channels: %" PRIu32 "Hz",
+ aNumberOfChannelsChange.get());
+ }
};
nsString EncoderConfigurationChangeList::ToString() const {
@@ -132,7 +147,9 @@ bool CanLikelyEncode(const EncoderConfig& aConfig) {
if (aConfig.mCodec == CodecType::H264) {
if (!aConfig.mCodecSpecific ||
!aConfig.mCodecSpecific->is<H264Specific>()) {
- LOGD("Error: asking for support codec for h264 without h264 specific config.");
+ LOGD(
+ "Error: asking for support codec for h264 without h264 specific "
+ "config.");
return false;
}
H264Specific specific = aConfig.mCodecSpecific->as<H264Specific>();
diff --git a/dom/media/platforms/PlatformEncoderModule.h b/dom/media/platforms/PlatformEncoderModule.h
index 72dad430e6..222a9bb48c 100644
--- a/dom/media/platforms/PlatformEncoderModule.h
+++ b/dom/media/platforms/PlatformEncoderModule.h
@@ -8,11 +8,8 @@
# define PlatformEncoderModule_h_
# include "MP4Decoder.h"
-# include "MediaData.h"
-# include "MediaInfo.h"
# include "MediaResult.h"
# include "VPXDecoder.h"
-# include "mozilla/Attributes.h"
# include "mozilla/Maybe.h"
# include "mozilla/MozPromise.h"
# include "mozilla/RefPtr.h"
@@ -20,93 +17,14 @@
# include "mozilla/dom/ImageBitmapBinding.h"
# include "nsISupportsImpl.h"
# include "VideoUtils.h"
+# include "EncoderConfig.h"
namespace mozilla {
class MediaDataEncoder;
-class EncoderConfig;
+class MediaData;
struct EncoderConfigurationChangeList;
-enum class CodecType {
- _BeginVideo_,
- H264,
- VP8,
- VP9,
- AV1,
- _EndVideo_,
- _BeginAudio_ = _EndVideo_,
- Opus,
- G722,
- _EndAudio_,
- Unknown,
-};
-
-// TODO: Automatically generate this (Bug 1865896)
-const char* GetCodecTypeString(const CodecType& aCodecType);
-
-enum class H264BitStreamFormat { AVC, ANNEXB };
-
-struct H264Specific final {
- const H264_PROFILE mProfile;
- const H264_LEVEL mLevel;
- const H264BitStreamFormat mFormat;
-
- H264Specific(H264_PROFILE aProfile, H264_LEVEL aLevel,
- H264BitStreamFormat aFormat)
- : mProfile(aProfile), mLevel(aLevel), mFormat(aFormat) {}
-};
-
-struct OpusSpecific final {
- enum class Application { Voip, Audio, RestricedLowDelay };
-
- const Application mApplication;
- const uint8_t mComplexity; // from 0-10
-
- OpusSpecific(const Application aApplication, const uint8_t aComplexity)
- : mApplication(aApplication), mComplexity(aComplexity) {
- MOZ_ASSERT(mComplexity <= 10);
- }
-};
-
-enum class VPXComplexity { Normal, High, Higher, Max };
-struct VP8Specific {
- VP8Specific() = default;
- // Ignore webrtc::VideoCodecVP8::errorConcealmentOn,
- // for it's always false in the codebase (except libwebrtc test cases).
- VP8Specific(const VPXComplexity aComplexity, const bool aResilience,
- const uint8_t aNumTemporalLayers, const bool aDenoising,
- const bool aAutoResize, const bool aFrameDropping)
- : mComplexity(aComplexity),
- mResilience(aResilience),
- mNumTemporalLayers(aNumTemporalLayers),
- mDenoising(aDenoising),
- mAutoResize(aAutoResize),
- mFrameDropping(aFrameDropping) {}
- const VPXComplexity mComplexity{VPXComplexity::Normal};
- const bool mResilience{true};
- const uint8_t mNumTemporalLayers{1};
- const bool mDenoising{true};
- const bool mAutoResize{false};
- const bool mFrameDropping{false};
-};
-
-struct VP9Specific : public VP8Specific {
- VP9Specific() = default;
- VP9Specific(const VPXComplexity aComplexity, const bool aResilience,
- const uint8_t aNumTemporalLayers, const bool aDenoising,
- const bool aAutoResize, const bool aFrameDropping,
- const bool aAdaptiveQp, const uint8_t aNumSpatialLayers,
- const bool aFlexible)
- : VP8Specific(aComplexity, aResilience, aNumTemporalLayers, aDenoising,
- aAutoResize, aFrameDropping),
- mAdaptiveQp(aAdaptiveQp),
- mNumSpatialLayers(aNumSpatialLayers),
- mFlexible(aFlexible) {}
- const bool mAdaptiveQp{true};
- const uint8_t mNumSpatialLayers{1};
- const bool mFlexible{false};
-};
-
class PlatformEncoderModule {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformEncoderModule)
@@ -144,18 +62,6 @@ class MediaDataEncoder {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataEncoder)
- enum class Usage {
- Realtime, // Low latency prefered
- Record
- };
- using PixelFormat = dom::ImageBitmapFormat;
- enum class BitrateMode { Constant, Variable };
- // Scalable Video Coding (SVC) settings for WebCodecs:
- // https://www.w3.org/TR/webrtc-svc/
- enum class ScalabilityMode { None, L1T2, L1T3 };
-
- enum class HardwarePreference { RequireHardware, RequireSoftware, None };
-
static bool IsVideo(const CodecType aCodec) {
return aCodec > CodecType::_BeginVideo_ && aCodec < CodecType::_EndVideo_;
}
@@ -163,8 +69,7 @@ class MediaDataEncoder {
return aCodec > CodecType::_BeginAudio_ && aCodec < CodecType::_EndAudio_;
}
- using InitPromise =
- MozPromise<TrackInfo::TrackType, MediaResult, /* IsExclusive = */ true>;
+ using InitPromise = MozPromise<bool, MediaResult, /* IsExclusive = */ true>;
using EncodedData = nsTArray<RefPtr<MediaRawData>>;
using EncodePromise =
MozPromise<EncodedData, MediaResult, /* IsExclusive = */ true>;
@@ -229,85 +134,6 @@ class MediaDataEncoder {
virtual ~MediaDataEncoder() = default;
};
-class EncoderConfig final {
- public:
- using CodecSpecific =
- Variant<H264Specific, OpusSpecific, VP8Specific, VP9Specific>;
-
- EncoderConfig(const EncoderConfig& aConfig)
- : mCodec(aConfig.mCodec),
- mSize(aConfig.mSize),
- mUsage(aConfig.mUsage),
- mHardwarePreference(aConfig.mHardwarePreference),
- mPixelFormat(aConfig.mPixelFormat),
- mSourcePixelFormat(aConfig.mSourcePixelFormat),
- mScalabilityMode(aConfig.mScalabilityMode),
- mFramerate(aConfig.mFramerate),
- mKeyframeInterval(aConfig.mKeyframeInterval),
- mBitrate(aConfig.mBitrate),
- mBitrateMode(aConfig.mBitrateMode),
- mCodecSpecific(aConfig.mCodecSpecific) {}
-
- template <typename... Ts>
- EncoderConfig(const CodecType aCodecType, gfx::IntSize aSize,
- const MediaDataEncoder::Usage aUsage,
- const MediaDataEncoder::PixelFormat aPixelFormat,
- const MediaDataEncoder::PixelFormat aSourcePixelFormat,
- const uint8_t aFramerate, const size_t aKeyframeInterval,
- const uint32_t aBitrate,
- const MediaDataEncoder::BitrateMode aBitrateMode,
- const MediaDataEncoder::HardwarePreference aHardwarePreference,
- const MediaDataEncoder::ScalabilityMode aScalabilityMode,
- const Maybe<CodecSpecific>& aCodecSpecific)
- : mCodec(aCodecType),
- mSize(aSize),
- mUsage(aUsage),
- mHardwarePreference(aHardwarePreference),
- mPixelFormat(aPixelFormat),
- mSourcePixelFormat(aSourcePixelFormat),
- mScalabilityMode(aScalabilityMode),
- mFramerate(aFramerate),
- mKeyframeInterval(aKeyframeInterval),
- mBitrate(aBitrate),
- mBitrateMode(aBitrateMode),
- mCodecSpecific(aCodecSpecific) {}
-
- static CodecType CodecTypeForMime(const nsACString& aMimeType) {
- if (MP4Decoder::IsH264(aMimeType)) {
- return CodecType::H264;
- }
- if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) {
- return CodecType::VP8;
- }
- if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) {
- return CodecType::VP9;
- }
- MOZ_ASSERT_UNREACHABLE("Unsupported Mimetype");
- return CodecType::Unknown;
- }
-
- bool IsVideo() const {
- return mCodec > CodecType::_BeginVideo_ && mCodec < CodecType::_EndVideo_;
- }
-
- bool IsAudio() const {
- return mCodec > CodecType::_BeginAudio_ && mCodec < CodecType::_EndAudio_;
- }
-
- CodecType mCodec;
- gfx::IntSize mSize;
- MediaDataEncoder::Usage mUsage;
- MediaDataEncoder::HardwarePreference mHardwarePreference;
- MediaDataEncoder::PixelFormat mPixelFormat;
- MediaDataEncoder::PixelFormat mSourcePixelFormat;
- MediaDataEncoder::ScalabilityMode mScalabilityMode;
- uint8_t mFramerate{};
- size_t mKeyframeInterval{};
- uint32_t mBitrate{};
- MediaDataEncoder::BitrateMode mBitrateMode{};
- Maybe<CodecSpecific> mCodecSpecific;
-};
-
// Wrap a type to make it unique. This allows using ergonomically in the Variant
// below. Simply aliasing with `using` isn't enough, because typedefs in C++
// don't produce strong types, so two integer variants result in
@@ -341,20 +167,25 @@ using FramerateChange =
StrongTypedef<Maybe<double>, struct FramerateChangeType>;
// The bitrate mode (variable, constant) of the encoding
using BitrateModeChange =
- StrongTypedef<MediaDataEncoder::BitrateMode, struct BitrateModeChangeType>;
+ StrongTypedef<BitrateMode, struct BitrateModeChangeType>;
// The usage for the encoded stream, this influence latency, ordering, etc.
-using UsageChange =
- StrongTypedef<MediaDataEncoder::Usage, struct UsageChangeType>;
+using UsageChange = StrongTypedef<Usage, struct UsageChangeType>;
// If present, the expected content of the video frames (screen, movie, etc.).
// The value the string can have isn't decided just yet. When absent, the
// encoder uses generic settings.
using ContentHintChange =
StrongTypedef<Maybe<nsString>, struct ContentHintTypeType>;
+// If present, the new sample-rate of the audio
+using SampleRateChange = StrongTypedef<uint32_t, struct SampleRateChangeType>;
+// If present, the new sample-rate of the audio
+using NumberOfChannelsChange =
+ StrongTypedef<uint32_t, struct NumberOfChannelsChangeType>;
// A change to a parameter of an encoder instance.
using EncoderConfigurationItem =
Variant<DimensionsChange, DisplayDimensionsChange, BitrateModeChange,
- BitrateChange, FramerateChange, UsageChange, ContentHintChange>;
+ BitrateChange, FramerateChange, UsageChange, ContentHintChange,
+ SampleRateChange, NumberOfChannelsChange>;
// A list of changes to an encoder configuration, that _might_ be able to change
// on the fly. Not all encoder modules can adjust their configuration on the
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
index 7bdc30b432..753dee0238 100644
--- a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -36,8 +36,9 @@ static bool IsAvailableInDefault(DecoderType type) {
case DecoderType::AV1:
return StaticPrefs::media_av1_enabled();
#endif
- case DecoderType::Opus:
case DecoderType::Theora:
+ return StaticPrefs::media_theora_enabled();
+ case DecoderType::Opus:
case DecoderType::Vorbis:
case DecoderType::VPX:
case DecoderType::Wave:
@@ -56,7 +57,8 @@ static bool IsAvailableInRdd(DecoderType type) {
case DecoderType::Opus:
return StaticPrefs::media_rdd_opus_enabled();
case DecoderType::Theora:
- return StaticPrefs::media_rdd_theora_enabled();
+ return StaticPrefs::media_rdd_theora_enabled() &&
+ StaticPrefs::media_theora_enabled();
case DecoderType::Vorbis:
#if defined(__MINGW32__)
// If this is a MinGW build we need to force AgnosticDecoderModule to
@@ -129,7 +131,8 @@ media::DecodeSupportSet AgnosticDecoderModule::Supports(
(AOMDecoder::IsAV1(mimeType) && IsAvailable(DecoderType::AV1)) ||
#endif
(VPXDecoder::IsVPX(mimeType) && IsAvailable(DecoderType::VPX)) ||
- (TheoraDecoder::IsTheora(mimeType) && IsAvailable(DecoderType::Theora));
+ (TheoraDecoder::IsTheora(mimeType) && IsAvailable(DecoderType::Theora) &&
+ StaticPrefs::media_theora_enabled());
MOZ_LOG(sPDMLog, LogLevel::Debug,
("Agnostic decoder %s requested type '%s'",
supports ? "supports" : "rejects", mimeType.BeginReading()));
@@ -164,7 +167,8 @@ already_AddRefed<MediaDataDecoder> AgnosticDecoderModule::CreateVideoDecoder(
}
}
#endif
- else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType)) {
+ else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType) &&
+ StaticPrefs::media_theora_enabled()) {
m = new TheoraDecoder(aParams);
}
diff --git a/dom/media/platforms/agnostic/bytestreams/H264.cpp b/dom/media/platforms/agnostic/bytestreams/H264.cpp
index 113be67d0e..ba8d15dc40 100644
--- a/dom/media/platforms/agnostic/bytestreams/H264.cpp
+++ b/dom/media/platforms/agnostic/bytestreams/H264.cpp
@@ -3,16 +3,17 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "H264.h"
-#include <limits>
#include "AnnexB.h"
#include "BitReader.h"
#include "BitWriter.h"
#include "BufferReader.h"
#include "ByteStreamsUtils.h"
#include "ByteWriter.h"
+#include "MediaInfo.h"
#include "mozilla/PodOperations.h"
#include "mozilla/ResultExtensions.h"
#include "mozilla/Try.h"
+#include <limits>
#define READSE(var, min, max) \
{ \
diff --git a/dom/media/platforms/agnostic/bytestreams/H264.h b/dom/media/platforms/agnostic/bytestreams/H264.h
index c3651d1a0f..6207a26113 100644
--- a/dom/media/platforms/agnostic/bytestreams/H264.h
+++ b/dom/media/platforms/agnostic/bytestreams/H264.h
@@ -6,11 +6,45 @@
#define MP4_DEMUXER_H264_H_
#include <stdint.h>
-#include "DecoderData.h"
+#include "ErrorList.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Result.h"
+#include "mozilla/Span.h"
+#include "mozilla/gfx/Point.h"
#include "mozilla/gfx/Types.h"
namespace mozilla {
class BitReader;
+class MediaByteBuffer;
+class MediaRawData;
+
+enum H264_PROFILE {
+ H264_PROFILE_UNKNOWN = 0,
+ H264_PROFILE_BASE = 0x42,
+ H264_PROFILE_MAIN = 0x4D,
+ H264_PROFILE_EXTENDED = 0x58,
+ H264_PROFILE_HIGH = 0x64,
+};
+
+enum H264_LEVEL {
+ H264_LEVEL_1 = 10,
+ H264_LEVEL_1_b = 11,
+ H264_LEVEL_1_1 = 11,
+ H264_LEVEL_1_2 = 12,
+ H264_LEVEL_1_3 = 13,
+ H264_LEVEL_2 = 20,
+ H264_LEVEL_2_1 = 21,
+ H264_LEVEL_2_2 = 22,
+ H264_LEVEL_3 = 30,
+ H264_LEVEL_3_1 = 31,
+ H264_LEVEL_3_2 = 32,
+ H264_LEVEL_4 = 40,
+ H264_LEVEL_4_1 = 41,
+ H264_LEVEL_4_2 = 42,
+ H264_LEVEL_5 = 50,
+ H264_LEVEL_5_1 = 51,
+ H264_LEVEL_5_2 = 52
+};
// Spec 7.4.2.1
#define MAX_SPS_COUNT 32
diff --git a/dom/media/platforms/apple/AppleDecoderModule.cpp b/dom/media/platforms/apple/AppleDecoderModule.cpp
index c54593a495..b92369601c 100644
--- a/dom/media/platforms/apple/AppleDecoderModule.cpp
+++ b/dom/media/platforms/apple/AppleDecoderModule.cpp
@@ -13,6 +13,7 @@
#include "MP4Decoder.h"
#include "VideoUtils.h"
#include "VPXDecoder.h"
+#include "AOMDecoder.h"
#include "mozilla/Logging.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/gfx/gfxVars.h"
@@ -34,6 +35,7 @@ using media::MediaCodec;
bool AppleDecoderModule::sInitialized = false;
bool AppleDecoderModule::sCanUseVP9Decoder = false;
+bool AppleDecoderModule::sCanUseAV1Decoder = false;
/* static */
void AppleDecoderModule::Init() {
@@ -45,6 +47,7 @@ void AppleDecoderModule::Init() {
if (RegisterSupplementalVP9Decoder()) {
sCanUseVP9Decoder = CanCreateHWDecoder(MediaCodec::VP9);
}
+ sCanUseAV1Decoder = CanCreateHWDecoder(MediaCodec::AV1);
}
nsresult AppleDecoderModule::Startup() {
@@ -83,7 +86,8 @@ DecodeSupportSet AppleDecoderModule::SupportsMimeType(
const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
bool checkSupport = aMimeType.EqualsLiteral("audio/mp4a-latm") ||
MP4Decoder::IsH264(aMimeType) ||
- VPXDecoder::IsVP9(aMimeType);
+ VPXDecoder::IsVP9(aMimeType) ||
+ AOMDecoder::IsAV1(aMimeType);
DecodeSupportSet supportType{};
if (checkSupport) {
@@ -142,6 +146,35 @@ bool AppleDecoderModule::IsVideoSupported(
if (MP4Decoder::IsH264(aConfig.mMimeType)) {
return true;
}
+ if (AOMDecoder::IsAV1(aConfig.mMimeType)) {
+ if (!sCanUseAV1Decoder ||
+ aOptions.contains(
+ CreateDecoderParams::Option::HardwareDecoderNotAllowed)) {
+ return false;
+ }
+
+ // HW AV1 decoder only supports 8 or 10 bit color.
+ if (aConfig.mColorDepth != gfx::ColorDepth::COLOR_8 &&
+ aConfig.mColorDepth != gfx::ColorDepth::COLOR_10) {
+ return false;
+ }
+
+ if (aConfig.mColorSpace.isSome()) {
+ if (*aConfig.mColorSpace == gfx::YUVColorSpace::Identity) {
+ // HW AV1 decoder doesn't support RGB
+ return false;
+ }
+ }
+
+ if (aConfig.mExtraData && aConfig.mExtraData->Length() < 2) {
+ return true; // Assume it's okay.
+ }
+ // top 3 bits are the profile.
+ int profile = aConfig.mExtraData->ElementAt(1) >> 5;
+ // 0 is main profile
+ return profile == 0;
+ }
+
if (!VPXDecoder::IsVP9(aConfig.mMimeType) || !sCanUseVP9Decoder ||
aOptions.contains(
CreateDecoderParams::Option::HardwareDecoderNotAllowed)) {
@@ -187,6 +220,20 @@ bool AppleDecoderModule::CanCreateHWDecoder(MediaCodec aCodec) {
return false;
}
switch (aCodec) {
+ case MediaCodec::AV1: {
+ info.mMimeType = "video/av1";
+
+ // Build up a fake CBox
+ bool hasSeqHdr;
+ AOMDecoder::AV1SequenceInfo seqInfo;
+ AOMDecoder::OperatingPoint op;
+ seqInfo.mOperatingPoints.AppendElement(op);
+ seqInfo.mImage = {1920, 1080};
+ AOMDecoder::WriteAV1CBox(seqInfo, info.mExtraData, hasSeqHdr);
+
+ vtReportsSupport = VTIsHardwareDecodeSupported(kCMVideoCodecType_AV1);
+ break;
+ }
case MediaCodec::VP9:
info.mMimeType = "video/vp9";
VPXDecoder::GetVPCCBox(info.mExtraData, VPXDecoder::VPXStreamInfo());
diff --git a/dom/media/platforms/apple/AppleDecoderModule.h b/dom/media/platforms/apple/AppleDecoderModule.h
index f869243a5c..46b0223d75 100644
--- a/dom/media/platforms/apple/AppleDecoderModule.h
+++ b/dom/media/platforms/apple/AppleDecoderModule.h
@@ -39,6 +39,7 @@ class AppleDecoderModule : public PlatformDecoderModule {
static void Init();
static bool sCanUseVP9Decoder;
+ static bool sCanUseAV1Decoder;
static constexpr int kCMVideoCodecType_H264{'avc1'};
static constexpr int kCMVideoCodecType_VP9{'vp09'};
diff --git a/dom/media/platforms/apple/AppleVTDecoder.cpp b/dom/media/platforms/apple/AppleVTDecoder.cpp
index ae34c2d142..6a70ed19d5 100644
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -18,6 +18,7 @@
#include "MacIOSurfaceImage.h"
#include "MediaData.h"
#include "VPXDecoder.h"
+#include "AOMDecoder.h"
#include "VideoUtils.h"
#include "gfxMacUtils.h"
#include "mozilla/ArrayUtils.h"
@@ -55,6 +56,7 @@ AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
mColorDepth(aConfig.mColorDepth),
mStreamType(MP4Decoder::IsH264(aConfig.mMimeType) ? StreamType::H264
: VPXDecoder::IsVP9(aConfig.mMimeType) ? StreamType::VP9
+ : AOMDecoder::IsAV1(aConfig.mMimeType) ? StreamType::AV1
: StreamType::Unknown),
mTaskQueue(TaskQueue::Create(
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
@@ -89,7 +91,10 @@ AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
MOZ_ASSERT(mStreamType != StreamType::Unknown);
// TODO: Verify aConfig.mime_type.
LOG("Creating AppleVTDecoder for %dx%d %s video", mDisplayWidth,
- mDisplayHeight, mStreamType == StreamType::H264 ? "H.264" : "VP9");
+ mDisplayHeight,
+ mStreamType == StreamType::H264 ? "H.264"
+ : mStreamType == StreamType::VP9 ? "VP9"
+ : "AV1");
}
AppleVTDecoder::~AppleVTDecoder() { MOZ_COUNT_DTOR(AppleVTDecoder); }
@@ -177,6 +182,9 @@ void AppleVTDecoder::ProcessDecode(MediaRawData* aSample) {
case StreamType::VP9:
flag |= MediaInfoFlag::VIDEO_VP9;
break;
+ case StreamType::AV1:
+ flag |= MediaInfoFlag::VIDEO_AV1;
+ break;
default:
break;
}
@@ -377,6 +385,8 @@ nsCString AppleVTDecoder::GetCodecName() const {
return "h264"_ns;
case StreamType::VP9:
return "vp9"_ns;
+ case StreamType::AV1:
+ return "av1"_ns;
default:
return "unknown"_ns;
}
@@ -598,13 +608,17 @@ MediaResult AppleVTDecoder::InitializeSession() {
OSStatus rv;
AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
+ CMVideoCodecType streamType;
+ if (mStreamType == StreamType::H264) {
+ streamType = kCMVideoCodecType_H264;
+ } else if (mStreamType == StreamType::VP9) {
+ streamType = CMVideoCodecType(AppleDecoderModule::kCMVideoCodecType_VP9);
+ } else {
+ streamType = kCMVideoCodecType_AV1;
+ }
rv = CMVideoFormatDescriptionCreate(
- kCFAllocatorDefault,
- mStreamType == StreamType::H264
- ? kCMVideoCodecType_H264
- : CMVideoCodecType(AppleDecoderModule::kCMVideoCodecType_VP9),
- AssertedCast<int32_t>(mPictureWidth),
+ kCFAllocatorDefault, streamType, AssertedCast<int32_t>(mPictureWidth),
AssertedCast<int32_t>(mPictureHeight), extensions, &mFormat);
if (rv != noErr) {
return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
@@ -626,6 +640,7 @@ MediaResult AppleVTDecoder::InitializeSession() {
&cb, &mSession);
if (rv != noErr) {
+ LOG("AppleVTDecoder: VTDecompressionSessionCreate failed: %d", rv);
return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Couldn't create decompression session!"));
}
@@ -656,7 +671,10 @@ CFDictionaryRef AppleVTDecoder::CreateDecoderExtensions() {
AssertedCast<CFIndex>(mExtraData->Length()));
const void* atomsKey[1];
- atomsKey[0] = mStreamType == StreamType::H264 ? CFSTR("avcC") : CFSTR("vpcC");
+ atomsKey[0] = mStreamType == StreamType::H264 ? CFSTR("avcC")
+ : mStreamType == StreamType::VP9 ? CFSTR("vpcC")
+ : CFSTR("av1C");
+ ;
const void* atomsValue[] = {data};
static_assert(ArrayLength(atomsKey) == ArrayLength(atomsValue),
"Non matching keys/values array size");
diff --git a/dom/media/platforms/apple/AppleVTDecoder.h b/dom/media/platforms/apple/AppleVTDecoder.h
index 5b8f02b86f..a32bec112e 100644
--- a/dom/media/platforms/apple/AppleVTDecoder.h
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -111,7 +111,7 @@ class AppleVTDecoder final : public MediaDataDecoder,
CFDictionaryRef CreateDecoderSpecification();
CFDictionaryRef CreateDecoderExtensions();
- enum class StreamType { Unknown, H264, VP9 };
+ enum class StreamType { Unknown, H264, VP9, AV1 };
const StreamType mStreamType;
const RefPtr<TaskQueue> mTaskQueue;
const uint32_t mMaxRefFrames;
diff --git a/dom/media/platforms/apple/AppleVTEncoder.cpp b/dom/media/platforms/apple/AppleVTEncoder.cpp
index 5ec9abebe2..c464ddd6f3 100644
--- a/dom/media/platforms/apple/AppleVTEncoder.cpp
+++ b/dom/media/platforms/apple/AppleVTEncoder.cpp
@@ -80,9 +80,8 @@ static bool SetConstantBitrate(VTCompressionSessionRef& aSession,
}
static bool SetBitrateAndMode(VTCompressionSessionRef& aSession,
- MediaDataEncoder::BitrateMode aBitrateMode,
- uint32_t aBitsPerSec) {
- if (aBitrateMode == MediaDataEncoder::BitrateMode::Variable) {
+ BitrateMode aBitrateMode, uint32_t aBitsPerSec) {
+ if (aBitrateMode == BitrateMode::Variable) {
return SetAverageBitrate(aSession, aBitsPerSec);
}
return SetConstantBitrate(aSession, aBitsPerSec);
@@ -177,9 +176,8 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
if (mConfig.mBitrate) {
if (!SetBitrateAndMode(mSession, mConfig.mBitrateMode, mConfig.mBitrate)) {
LOGE("failed to set bitrate to %d and mode to %s", mConfig.mBitrate,
- mConfig.mBitrateMode == MediaDataEncoder::BitrateMode::Constant
- ? "constant"
- : "variable");
+ mConfig.mBitrateMode == BitrateMode::Constant ? "constant"
+ : "variable");
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
"fail to configurate bitrate"),
@@ -228,26 +226,25 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
}
mError = NS_OK;
- return InitPromise::CreateAndResolve(TrackInfo::TrackType::kVideoTrack,
- __func__);
+ return InitPromise::CreateAndResolve(true, __func__);
}
-static Maybe<OSType> MapPixelFormat(MediaDataEncoder::PixelFormat aFormat) {
+static Maybe<OSType> MapPixelFormat(dom::ImageBitmapFormat aFormat) {
switch (aFormat) {
- case MediaDataEncoder::PixelFormat::RGBA32:
- case MediaDataEncoder::PixelFormat::BGRA32:
+ case dom::ImageBitmapFormat::RGBA32:
+ case dom::ImageBitmapFormat::BGRA32:
return Some(kCVPixelFormatType_32BGRA);
- case MediaDataEncoder::PixelFormat::RGB24:
+ case dom::ImageBitmapFormat::RGB24:
return Some(kCVPixelFormatType_24RGB);
- case MediaDataEncoder::PixelFormat::BGR24:
+ case dom::ImageBitmapFormat::BGR24:
return Some(kCVPixelFormatType_24BGR);
- case MediaDataEncoder::PixelFormat::GRAY8:
+ case dom::ImageBitmapFormat::GRAY8:
return Some(kCVPixelFormatType_OneComponent8);
- case MediaDataEncoder::PixelFormat::YUV444P:
+ case dom::ImageBitmapFormat::YUV444P:
return Some(kCVPixelFormatType_444YpCbCr8);
- case MediaDataEncoder::PixelFormat::YUV420P:
+ case dom::ImageBitmapFormat::YUV420P:
return Some(kCVPixelFormatType_420YpCbCr8PlanarFullRange);
- case MediaDataEncoder::PixelFormat::YUV420SP_NV12:
+ case dom::ImageBitmapFormat::YUV420SP_NV12:
return Some(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
default:
return Nothing();
@@ -459,11 +456,10 @@ void AppleVTEncoder::OutputFrame(CMSampleBufferRef aBuffer) {
LOGD("::OutputFrame");
RefPtr<MediaRawData> output(new MediaRawData());
-
bool forceAvcc = false;
if (mConfig.mCodecSpecific->is<H264Specific>()) {
forceAvcc = mConfig.mCodecSpecific->as<H264Specific>().mFormat ==
- H264BitStreamFormat::AVC;
+ H264BitStreamFormat::AVC;
}
bool asAnnexB = mConfig.mUsage == Usage::Realtime && !forceAvcc;
bool succeeded = WriteExtraData(output, aBuffer, asAnnexB) &&
@@ -590,7 +586,9 @@ AppleVTEncoder::ProcessReconfigure(
mConfig.mUsage = aChange.get();
return SetRealtime(mSession, aChange.get() == Usage::Realtime);
},
- [&](const ContentHintChange& aChange) -> bool { return false; });
+ [&](const ContentHintChange& aChange) -> bool { return false; },
+ [&](const SampleRateChange& aChange) -> bool { return false; },
+ [&](const NumberOfChannelsChange& aChange) -> bool { return false; });
};
using P = MediaDataEncoder::ReconfigurationPromise;
if (ok) {
@@ -599,18 +597,18 @@ AppleVTEncoder::ProcessReconfigure(
return P::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
-static size_t NumberOfPlanes(MediaDataEncoder::PixelFormat aPixelFormat) {
+static size_t NumberOfPlanes(dom::ImageBitmapFormat aPixelFormat) {
switch (aPixelFormat) {
- case MediaDataEncoder::PixelFormat::RGBA32:
- case MediaDataEncoder::PixelFormat::BGRA32:
- case MediaDataEncoder::PixelFormat::RGB24:
- case MediaDataEncoder::PixelFormat::BGR24:
- case MediaDataEncoder::PixelFormat::GRAY8:
+ case dom::ImageBitmapFormat::RGBA32:
+ case dom::ImageBitmapFormat::BGRA32:
+ case dom::ImageBitmapFormat::RGB24:
+ case dom::ImageBitmapFormat::BGR24:
+ case dom::ImageBitmapFormat::GRAY8:
return 1;
- case MediaDataEncoder::PixelFormat::YUV444P:
- case MediaDataEncoder::PixelFormat::YUV420P:
+ case dom::ImageBitmapFormat::YUV444P:
+ case dom::ImageBitmapFormat::YUV420P:
return 3;
- case MediaDataEncoder::PixelFormat::YUV420SP_NV12:
+ case dom::ImageBitmapFormat::YUV420SP_NV12:
return 2;
default:
LOGE("Unsupported input pixel format");
diff --git a/dom/media/platforms/apple/AppleVTEncoder.h b/dom/media/platforms/apple/AppleVTEncoder.h
index eded46c8c8..c7985a454c 100644
--- a/dom/media/platforms/apple/AppleVTEncoder.h
+++ b/dom/media/platforms/apple/AppleVTEncoder.h
@@ -24,9 +24,8 @@ class AppleVTEncoder final : public MediaDataEncoder {
const RefPtr<TaskQueue>& aTaskQueue)
: mConfig(aConfig),
mTaskQueue(aTaskQueue),
- mHardwareNotAllowed(
- aConfig.mHardwarePreference ==
- MediaDataEncoder::HardwarePreference::RequireSoftware),
+ mHardwareNotAllowed(aConfig.mHardwarePreference ==
+ HardwarePreference::RequireSoftware),
mFramesCompleted(false),
mError(NS_OK),
mSession(nullptr) {
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp
new file mode 100644
index 0000000000..28db667732
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp
@@ -0,0 +1,458 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegAudioEncoder.h"
+
+#include "FFmpegRuntimeLinker.h"
+#include "FFmpegLog.h"
+#include "FFmpegUtils.h"
+#include "MediaData.h"
+
+#include "AudioSegment.h"
+
+namespace mozilla {
+
+FFmpegAudioEncoder<LIBAV_VER>::FFmpegAudioEncoder(
+ const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
+ const RefPtr<TaskQueue>& aTaskQueue, const EncoderConfig& aConfig)
+ : FFmpegDataEncoder(aLib, aCodecID, aTaskQueue, aConfig) {}
+
+nsCString FFmpegAudioEncoder<LIBAV_VER>::GetDescriptionName() const {
+#ifdef USING_MOZFFVPX
+ return "ffvpx audio encoder"_ns;
+#else
+ const char* lib =
+# if defined(MOZ_FFMPEG)
+ FFmpegRuntimeLinker::LinkStatusLibraryName();
+# else
+ "no library: ffmpeg disabled during build";
+# endif
+ return nsPrintfCString("ffmpeg audio encoder (%s)", lib);
+#endif
+}
+
+void FFmpegAudioEncoder<LIBAV_VER>::ResamplerDestroy::operator()(
+ SpeexResamplerState* aResampler) {
+ speex_resampler_destroy(aResampler);
+}
+
+nsresult FFmpegAudioEncoder<LIBAV_VER>::InitSpecific() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("FFmpegAudioEncoder::InitInternal");
+
+ // Initialize the common members of the encoder instance
+ AVCodec* codec = FFmpegDataEncoder<LIBAV_VER>::InitCommon();
+ if (!codec) {
+ FFMPEG_LOG("FFmpegDataEncoder::InitCommon failed");
+ return NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR;
+ }
+
+ // Find a compatible input rate for the codec, update the encoder config, and
+ // note the rate at which this instance was configured.
+ mInputSampleRate = AssertedCast<int>(mConfig.mSampleRate);
+ if (codec->supported_samplerates) {
+ // Ensure the sample-rate list is sorted, iterate and either find that the
+ // sample rate is supported, or pick the same rate just above the audio
+ // input sample-rate (as to not lose information). If the audio is higher
+ // than the highest supported sample-rate, down-sample to the highest
+ // sample-rate supported by the codec. This is the case when encoding high
+ // samplerate audio to opus.
+ AutoTArray<int, 16> supportedSampleRates;
+ IterateZeroTerminated(codec->supported_samplerates,
+ [&supportedSampleRates](int aRate) mutable {
+ supportedSampleRates.AppendElement(aRate);
+ });
+ supportedSampleRates.Sort();
+
+ for (const auto& rate : supportedSampleRates) {
+ if (mInputSampleRate == rate) {
+ mConfig.mSampleRate = rate;
+ break;
+ }
+ if (mInputSampleRate < rate) {
+ // This rate is the smallest supported rate above the content's rate.
+ mConfig.mSampleRate = rate;
+ break;
+ }
+ if (mInputSampleRate > rate) {
+ mConfig.mSampleRate = rate;
+ }
+ }
+ }
+
+ if (mConfig.mSampleRate != AssertedCast<uint32_t>(mInputSampleRate)) {
+ // Need to resample to targetRate
+ int err;
+ SpeexResamplerState* resampler = speex_resampler_init(
+ mConfig.mNumberOfChannels, mInputSampleRate, mConfig.mSampleRate,
+ SPEEX_RESAMPLER_QUALITY_DEFAULT, &err);
+ if (!err) {
+ mResampler.reset(resampler);
+ } else {
+ FFMPEG_LOG(
+ "Error creating resampler in FFmpegAudioEncoder %dHz -> %dHz (%dch)",
+ mInputSampleRate, mConfig.mSampleRate, mConfig.mNumberOfChannels);
+ }
+ }
+
+ // And now the audio-specific part
+ mCodecContext->sample_rate = AssertedCast<int>(mConfig.mSampleRate);
+ mCodecContext->channels = AssertedCast<int>(mConfig.mNumberOfChannels);
+
+#if LIBAVCODEC_VERSION_MAJOR >= 60
+ // Gecko's ordering intentionnally matches ffmepg's ordering
+ mLib->av_channel_layout_default(&mCodecContext->ch_layout,
+ AssertedCast<int>(mCodecContext->channels));
+#endif
+
+ switch (mConfig.mCodec) {
+ case CodecType::Opus:
+ // When using libopus, ffmpeg supports interleaved float and s16 input.
+ mCodecContext->sample_fmt = AV_SAMPLE_FMT_FLT;
+ break;
+ case CodecType::Vorbis:
+ // When using libvorbis, ffmpeg only supports planar f32 input.
+ mCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Not supported");
+ }
+
+ if (mConfig.mCodec == CodecType::Opus) {
+ // Default is VBR
+ if (mConfig.mBitrateMode == BitrateMode::Constant) {
+ mLib->av_opt_set(mCodecContext->priv_data, "vbr", "off", 0);
+ }
+ if (mConfig.mCodecSpecific.isSome()) {
+ MOZ_ASSERT(mConfig.mCodecSpecific->is<OpusSpecific>());
+ const OpusSpecific& specific = mConfig.mCodecSpecific->as<OpusSpecific>();
+ // This attribute maps directly to complexity
+ mCodecContext->compression_level = specific.mComplexity;
+ FFMPEG_LOG("Opus complexity set to %d", specific.mComplexity);
+ float frameDurationMs =
+ AssertedCast<float>(specific.mFrameDuration) / 1000.f;
+ if (mLib->av_opt_set_double(mCodecContext->priv_data, "frame_duration",
+ frameDurationMs, 0)) {
+ FFMPEG_LOG("Error setting the frame duration on Opus encoder");
+ return NS_ERROR_FAILURE;
+ }
+ FFMPEG_LOG("Opus frame duration set to %0.2f", frameDurationMs);
+ if (specific.mPacketLossPerc) {
+ if (mLib->av_opt_set_int(
+ mCodecContext->priv_data, "packet_loss",
+ AssertedCast<int64_t>(specific.mPacketLossPerc), 0)) {
+ FFMPEG_LOG("Error setting the packet loss percentage to %" PRIu64
+ " on Opus encoder",
+ specific.mPacketLossPerc);
+ return NS_ERROR_FAILURE;
+ }
+ FFMPEG_LOGV("Packet loss set to %d%% in Opus encoder",
+ AssertedCast<int>(specific.mPacketLossPerc));
+ }
+ if (specific.mUseInBandFEC) {
+ if (mLib->av_opt_set(mCodecContext->priv_data, "fec", "on", 0)) {
+ FFMPEG_LOG("Error %s FEC on Opus encoder",
+ specific.mUseInBandFEC ? "enabling" : "disabling");
+ return NS_ERROR_FAILURE;
+ }
+ FFMPEG_LOGV("In-band FEC enabled for Opus encoder.");
+ }
+ if (specific.mUseDTX) {
+ if (mLib->av_opt_set(mCodecContext->priv_data, "dtx", "on", 0)) {
+ FFMPEG_LOG("Error %s DTX on Opus encoder",
+ specific.mUseDTX ? "enabling" : "disabling");
+ return NS_ERROR_FAILURE;
+ }
+ // DTX packets are a TOC byte, and possibly one byte of length, packets
+ // 3 bytes and larger are to be returned.
+ mDtxThreshold = 3;
+ }
+ // TODO: format
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1876066
+ }
+ }
+ // Override the time base: always the sample-rate the encoder is running at
+ mCodecContext->time_base =
+ AVRational{.num = 1, .den = mCodecContext->sample_rate};
+
+ MediaResult rv = FinishInitCommon(codec);
+ if (NS_FAILED(rv)) {
+ FFMPEG_LOG("FFmpeg encode initialization failure.");
+ return rv.Code();
+ }
+
+ return NS_OK;
+}
+
+// avcodec_send_frame and avcodec_receive_packet were introduced in version 58.
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+
+Result<MediaDataEncoder::EncodedData, nsresult>
+FFmpegAudioEncoder<LIBAV_VER>::EncodeOnePacket(Span<float> aSamples,
+ media::TimeUnit aPts) {
+ // Allocate AVFrame.
+ if (!PrepareFrame()) {
+ FFMPEG_LOG("failed to allocate frame");
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+
+ uint32_t frameCount = aSamples.Length() / mConfig.mNumberOfChannels;
+
+ // This method assumes that the audio has been packetized appropriately --
+ // packets smaller than the packet size are allowed when draining.
+ MOZ_ASSERT(AssertedCast<int>(frameCount) <= mCodecContext->frame_size);
+
+ mFrame->channels = AssertedCast<int>(mConfig.mNumberOfChannels);
+
+# if LIBAVCODEC_VERSION_MAJOR >= 60
+ int rv = mLib->av_channel_layout_copy(&mFrame->ch_layout,
+ &mCodecContext->ch_layout);
+ if (rv < 0) {
+ FFMPEG_LOG("channel layout copy error: %s",
+ MakeErrorString(mLib, rv).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+# endif
+
+ mFrame->sample_rate = AssertedCast<int>(mConfig.mSampleRate);
+ // Not a mistake, nb_samples is per channel in ffmpeg
+ mFrame->nb_samples = AssertedCast<int>(frameCount);
+ // Audio is converted below if needed
+ mFrame->format = mCodecContext->sample_fmt;
+ // Set presentation timestamp and duration of the AVFrame.
+# if LIBAVCODEC_VERSION_MAJOR >= 59
+ mFrame->time_base =
+ AVRational{.num = 1, .den = static_cast<int>(mConfig.mSampleRate)};
+# endif
+ mFrame->pts = aPts.ToTicksAtRate(mConfig.mSampleRate);
+ mFrame->pkt_duration = frameCount;
+# if LIBAVCODEC_VERSION_MAJOR >= 60
+ mFrame->duration = frameCount;
+# else
+ // Save duration in the time_base unit.
+ mDurationMap.Insert(mFrame->pts, mFrame->pkt_duration);
+# endif
+
+ if (int ret = mLib->av_frame_get_buffer(mFrame, 16); ret < 0) {
+ FFMPEG_LOG("failed to allocate frame data: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+
+ // Make sure AVFrame is writable.
+ if (int ret = mLib->av_frame_make_writable(mFrame); ret < 0) {
+ FFMPEG_LOG("failed to make frame writable: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+
+ // The input is always in f32 interleaved for now
+ if (mCodecContext->sample_fmt == AV_SAMPLE_FMT_FLT) {
+ PodCopy(reinterpret_cast<float*>(mFrame->data[0]), aSamples.data(),
+ aSamples.Length());
+ } else {
+ MOZ_ASSERT(mCodecContext->sample_fmt == AV_SAMPLE_FMT_FLTP);
+ for (uint32_t i = 0; i < mConfig.mNumberOfChannels; i++) {
+ DeinterleaveAndConvertBuffer(aSamples.data(), mFrame->nb_samples,
+ mFrame->channels, mFrame->data);
+ }
+ }
+
+ // Now send the AVFrame to ffmpeg for encoding, same code for audio and video.
+ return FFmpegDataEncoder<LIBAV_VER>::EncodeWithModernAPIs();
+}
+
+Result<MediaDataEncoder::EncodedData, nsresult> FFmpegAudioEncoder<
+ LIBAV_VER>::EncodeInputWithModernAPIs(RefPtr<const MediaData> aSample) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ MOZ_ASSERT(mCodecContext);
+ MOZ_ASSERT(aSample);
+
+ RefPtr<const AudioData> sample(aSample->As<AudioData>());
+
+ FFMPEG_LOG("Encoding %" PRIu32 " frames of audio at pts: %s",
+ sample->Frames(), sample->mTime.ToString().get());
+
+ if ((!mResampler && sample->mRate != mConfig.mSampleRate) ||
+ (mResampler &&
+ sample->mRate != AssertedCast<uint32_t>(mInputSampleRate)) ||
+ sample->mChannels != mConfig.mNumberOfChannels) {
+ FFMPEG_LOG(
+ "Rate or sample-rate at the inputof the encoder different from what "
+ "has been configured initially, erroring out");
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ }
+
+ // ffmpeg expects exactly sized input audio packets most of the time.
+ // Packetization is performed if needed, and audio packets of the correct size
+ // are fed to ffmpeg, with timestamps extrapolated the timestamp found on
+ // the input MediaData.
+
+ if (!mPacketizer) {
+ media::TimeUnit basePts = media::TimeUnit::Zero(mConfig.mSampleRate);
+ basePts += sample->mTime;
+ mPacketizer.emplace(mCodecContext->frame_size, sample->mChannels,
+ basePts.ToTicksAtRate(mConfig.mSampleRate),
+ mConfig.mSampleRate);
+ }
+
+ if (!mFirstPacketPts.IsValid()) {
+ mFirstPacketPts = sample->mTime;
+ }
+
+ Span<float> audio = sample->Data();
+
+ if (mResampler) {
+ // Ensure that all input frames are consumed each time by oversizing the
+ // output buffer.
+ int bufferLengthGuess = std::ceil(2. * static_cast<float>(audio.size()) *
+ mConfig.mSampleRate / mInputSampleRate);
+ mTempBuffer.SetLength(bufferLengthGuess);
+ uint32_t inputFrames = audio.size() / mConfig.mNumberOfChannels;
+ uint32_t inputFramesProcessed = inputFrames;
+ uint32_t outputFrames = bufferLengthGuess / mConfig.mNumberOfChannels;
+ DebugOnly<int> rv = speex_resampler_process_interleaved_float(
+ mResampler.get(), audio.data(), &inputFramesProcessed,
+ mTempBuffer.Elements(), &outputFrames);
+ audio = Span<float>(mTempBuffer.Elements(),
+ outputFrames * mConfig.mNumberOfChannels);
+ MOZ_ASSERT(inputFrames == inputFramesProcessed,
+ "increate the buffer to consume all input each time");
+ MOZ_ASSERT(rv == RESAMPLER_ERR_SUCCESS);
+ }
+
+ EncodedData output;
+ MediaResult rv = NS_OK;
+
+ mPacketizer->Input(audio.data(), audio.Length() / mConfig.mNumberOfChannels);
+
+ // Dequeue and encode each packet
+ while (mPacketizer->PacketsAvailable() && rv.Code() == NS_OK) {
+ mTempBuffer.SetLength(mCodecContext->frame_size *
+ mConfig.mNumberOfChannels);
+ media::TimeUnit pts = mPacketizer->Output(mTempBuffer.Elements());
+ auto audio = Span(mTempBuffer.Elements(), mTempBuffer.Length());
+ FFMPEG_LOG("Encoding %" PRIu32 " frames, pts: %s",
+ mPacketizer->PacketSize(), pts.ToString().get());
+ auto encodeResult = EncodeOnePacket(audio, pts);
+ if (encodeResult.isOk()) {
+ output.AppendElements(std::move(encodeResult.unwrap()));
+ } else {
+ return encodeResult;
+ }
+ pts += media::TimeUnit(mPacketizer->PacketSize(), mConfig.mSampleRate);
+ }
+ return Result<MediaDataEncoder::EncodedData, nsresult>(std::move(output));
+}
+
+Result<MediaDataEncoder::EncodedData, nsresult>
+FFmpegAudioEncoder<LIBAV_VER>::DrainWithModernAPIs() {
+ // If there's no packetizer, or it's empty, we can proceed immediately.
+ if (!mPacketizer || mPacketizer->FramesAvailable() == 0) {
+ return FFmpegDataEncoder<LIBAV_VER>::DrainWithModernAPIs();
+ }
+ EncodedData output;
+ MediaResult rv = NS_OK;
+ // Dequeue and encode each packet
+ mTempBuffer.SetLength(mCodecContext->frame_size *
+ mPacketizer->ChannelCount());
+ uint32_t written;
+ media::TimeUnit pts = mPacketizer->Drain(mTempBuffer.Elements(), written);
+ auto audio =
+ Span(mTempBuffer.Elements(), written * mPacketizer->ChannelCount());
+ auto encodeResult = EncodeOnePacket(audio, pts);
+ if (encodeResult.isOk()) {
+ auto array = encodeResult.unwrap();
+ output.AppendElements(std::move(array));
+ } else {
+ return encodeResult;
+ }
+ // Now, drain the encoder
+ auto drainResult = FFmpegDataEncoder<LIBAV_VER>::DrainWithModernAPIs();
+ if (drainResult.isOk()) {
+ auto array = drainResult.unwrap();
+ output.AppendElements(std::move(array));
+ } else {
+ return drainResult;
+ }
+ return Result<MediaDataEncoder::EncodedData, nsresult>(std::move(output));
+}
+#endif // if LIBAVCODEC_VERSION_MAJOR >= 58
+
+RefPtr<MediaRawData> FFmpegAudioEncoder<LIBAV_VER>::ToMediaRawData(
+ AVPacket* aPacket) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ MOZ_ASSERT(aPacket);
+
+ if (aPacket->size < mDtxThreshold) {
+ FFMPEG_LOG(
+ "DTX enabled and packet is %d bytes (threshold %d), not returning.",
+ aPacket->size, mDtxThreshold);
+ return nullptr;
+ }
+
+ RefPtr<MediaRawData> data = ToMediaRawDataCommon(aPacket);
+
+ data->mTime = media::TimeUnit(aPacket->pts, mConfig.mSampleRate);
+ data->mTimecode = data->mTime;
+ data->mDuration =
+ media::TimeUnit(mCodecContext->frame_size, mConfig.mSampleRate);
+
+ // Handle encoder delay
+ // Tracked in https://github.com/w3c/webcodecs/issues/626 because not quite
+ // specced yet.
+ if (mFirstPacketPts > data->mTime) {
+ data->mOriginalPresentationWindow =
+ Some(media::TimeInterval{data->mTime, data->GetEndTime()});
+ // Duration is likely to be ajusted when the above spec issue is fixed. For
+ // now, leave it as-is
+ // data->mDuration -= (mFirstPacketPts - data->mTime);
+ // if (data->mDuration.IsNegative()) {
+ // data->mDuration = media::TimeUnit::Zero();
+ // }
+ data->mTime = mFirstPacketPts;
+ }
+
+ if (mPacketsDelivered++ == 0) {
+ // Attach extradata, and the config (including any channel / samplerate
+ // modification to fit the encoder requirements), if needed.
+ if (auto r = GetExtraData(aPacket); r.isOk()) {
+ data->mExtraData = r.unwrap();
+ }
+ data->mConfig = MakeUnique<EncoderConfig>(mConfig);
+ }
+
+ if (data->mExtraData) {
+ FFMPEG_LOG(
+ "FFmpegAudioEncoder out: [%s,%s] (%zu bytes, extradata %zu bytes)",
+ data->mTime.ToString().get(), data->mDuration.ToString().get(),
+ data->Size(), data->mExtraData->Length());
+ } else {
+ FFMPEG_LOG("FFmpegAudioEncoder out: [%s,%s] (%zu bytes)",
+ data->mTime.ToString().get(), data->mDuration.ToString().get(),
+ data->Size());
+ }
+
+ return data;
+}
+
+Result<already_AddRefed<MediaByteBuffer>, nsresult>
+FFmpegAudioEncoder<LIBAV_VER>::GetExtraData(AVPacket* /* aPacket */) {
+ if (!mCodecContext->extradata_size) {
+ return Err(NS_ERROR_NOT_AVAILABLE);
+ }
+ // Create extra data -- they are on the context.
+ auto extraData = MakeRefPtr<MediaByteBuffer>();
+ extraData->SetLength(mCodecContext->extradata_size);
+ MOZ_ASSERT(extraData);
+ PodCopy(extraData->Elements(), mCodecContext->extradata,
+ mCodecContext->extradata_size);
+ return extraData.forget();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.h b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.h
new file mode 100644
index 0000000000..51b0bfa44e
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGAUDIOENCODER_H_
+#define DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGAUDIOENCODER_H_
+
+#include "FFmpegDataEncoder.h"
+#include "FFmpegLibWrapper.h"
+#include "PlatformEncoderModule.h"
+#include "TimedPacketizer.h"
+
+// This must be the last header included
+#include "FFmpegLibs.h"
+#include "speex/speex_resampler.h"
+
+namespace mozilla {
+
+template <int V>
+class FFmpegAudioEncoder : public MediaDataEncoder {};
+
+template <>
+class FFmpegAudioEncoder<LIBAV_VER> : public FFmpegDataEncoder<LIBAV_VER> {
+ public:
+ FFmpegAudioEncoder(const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
+ const RefPtr<TaskQueue>& aTaskQueue,
+ const EncoderConfig& aConfig);
+
+ nsCString GetDescriptionName() const override;
+
+ protected:
+ // Methods only called on mTaskQueue.
+ virtual nsresult InitSpecific() override;
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ Result<EncodedData, nsresult> EncodeOnePacket(Span<float> aSamples,
+ media::TimeUnit aPts);
+ Result<EncodedData, nsresult> EncodeInputWithModernAPIs(
+ RefPtr<const MediaData> aSample) override;
+ Result<MediaDataEncoder::EncodedData, nsresult> DrainWithModernAPIs()
+ override;
+#endif
+ virtual RefPtr<MediaRawData> ToMediaRawData(AVPacket* aPacket) override;
+ Result<already_AddRefed<MediaByteBuffer>, nsresult> GetExtraData(
+ AVPacket* aPacket) override;
+ // Most audio codecs (except PCM) require a very specific frame size.
+ Maybe<TimedPacketizer<float, float>> mPacketizer;
+ // A temporary buffer kept around for shuffling audio frames, resampling,
+ // packetization, etc.
+ nsTArray<float> mTempBuffer;
+ // The pts of the first packet this encoder has seen, to be able to properly
+ // mark encoder delay as such.
+ media::TimeUnit mFirstPacketPts{media::TimeUnit::Invalid()};
+ struct ResamplerDestroy {
+ void operator()(SpeexResamplerState* aResampler);
+ };
+ // Rate at which this instance has been configured, which might be different
+ // from the rate the underlying encoder is running at.
+ int mInputSampleRate = 0;
+ UniquePtr<SpeexResamplerState, ResamplerDestroy> mResampler;
+ uint64_t mPacketsDelivered = 0;
+ // Threshold under which a packet isn't returned to the encoder user,
+ // because it is known to be silent and DTX is enabled.
+ int mDtxThreshold = 0;
+};
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGAUDIOENCODER_H_
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
index 1acfc26a4c..30422987cf 100644
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -17,30 +17,14 @@
#include "mozilla/TaskQueue.h"
#include "prsystem.h"
#include "VideoUtils.h"
+#include "FFmpegUtils.h"
+
#include "FFmpegLibs.h"
namespace mozilla {
StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMutex;
-static bool IsVideoCodec(AVCodecID aCodecID) {
- switch (aCodecID) {
- case AV_CODEC_ID_H264:
-#if LIBAVCODEC_VERSION_MAJOR >= 54
- case AV_CODEC_ID_VP8:
-#endif
-#if LIBAVCODEC_VERSION_MAJOR >= 55
- case AV_CODEC_ID_VP9:
-#endif
-#if LIBAVCODEC_VERSION_MAJOR >= 59
- case AV_CODEC_ID_AV1:
-#endif
- return true;
- default:
- return false;
- }
-}
-
FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
AVCodecID aCodecID)
: mLib(aLib),
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.cpp
new file mode 100644
index 0000000000..6b97a48156
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.cpp
@@ -0,0 +1,495 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegDataEncoder.h"
+#include "PlatformEncoderModule.h"
+
+#include <utility>
+
+#include "FFmpegLog.h"
+#include "libavutil/error.h"
+#include "mozilla/StaticMutex.h"
+
+#include "FFmpegUtils.h"
+
+namespace mozilla {
+
+// TODO: Remove this function and simply use `avcodec_find_encoder` once
+// libopenh264 is supported.
+static AVCodec* FindEncoderWithPreference(const FFmpegLibWrapper* aLib,
+ AVCodecID aCodecId) {
+ MOZ_ASSERT(aLib);
+
+ AVCodec* codec = nullptr;
+
+ // Prioritize libx264 for now since it's the only h264 codec we tested.
+ if (aCodecId == AV_CODEC_ID_H264) {
+ codec = aLib->avcodec_find_encoder_by_name("libx264");
+ if (codec) {
+ FFMPEGV_LOG("Prefer libx264 for h264 codec");
+ return codec;
+ }
+ FFMPEGV_LOG("Fallback to other h264 library. Fingers crossed");
+ }
+
+ return aLib->avcodec_find_encoder(aCodecId);
+}
+
+template <>
+AVCodecID GetFFmpegEncoderCodecId<LIBAV_VER>(CodecType aCodec) {
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ if (aCodec == CodecType::VP8) {
+ return AV_CODEC_ID_VP8;
+ }
+
+ if (aCodec == CodecType::VP9) {
+ return AV_CODEC_ID_VP9;
+ }
+
+# if !defined(USING_MOZFFVPX)
+ if (aCodec == CodecType::H264) {
+ return AV_CODEC_ID_H264;
+ }
+# endif
+
+ if (aCodec == CodecType::AV1) {
+ return AV_CODEC_ID_AV1;
+ }
+
+ if (aCodec == CodecType::Opus) {
+ return AV_CODEC_ID_OPUS;
+ }
+
+ if (aCodec == CodecType::Vorbis) {
+ return AV_CODEC_ID_VORBIS;
+ }
+#endif
+ return AV_CODEC_ID_NONE;
+}
+
+StaticMutex FFmpegDataEncoder<LIBAV_VER>::sMutex;
+
+FFmpegDataEncoder<LIBAV_VER>::FFmpegDataEncoder(
+ const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
+ const RefPtr<TaskQueue>& aTaskQueue, const EncoderConfig& aConfig)
+ : mLib(aLib),
+ mCodecID(aCodecID),
+ mTaskQueue(aTaskQueue),
+ mConfig(aConfig),
+ mCodecName(EmptyCString()),
+ mCodecContext(nullptr),
+ mFrame(nullptr),
+ mVideoCodec(IsVideoCodec(aCodecID)) {
+ MOZ_ASSERT(mLib);
+ MOZ_ASSERT(mTaskQueue);
+#if LIBAVCODEC_VERSION_MAJOR < 58
+ MOZ_CRASH("FFmpegDataEncoder needs ffmpeg 58 at least.");
+#endif
+};
+
+RefPtr<MediaDataEncoder::InitPromise> FFmpegDataEncoder<LIBAV_VER>::Init() {
+ FFMPEG_LOG("Init");
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &FFmpegDataEncoder::ProcessInit);
+}
+
+RefPtr<MediaDataEncoder::EncodePromise> FFmpegDataEncoder<LIBAV_VER>::Encode(
+ const MediaData* aSample) {
+ MOZ_ASSERT(aSample != nullptr);
+
+ FFMPEG_LOG("Encode");
+ return InvokeAsync(mTaskQueue, __func__,
+ [self = RefPtr<FFmpegDataEncoder<LIBAV_VER>>(this),
+ sample = RefPtr<const MediaData>(aSample)]() {
+ return self->ProcessEncode(sample);
+ });
+}
+
+RefPtr<MediaDataEncoder::ReconfigurationPromise>
+FFmpegDataEncoder<LIBAV_VER>::Reconfigure(
+ const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
+ return InvokeAsync<const RefPtr<const EncoderConfigurationChangeList>>(
+ mTaskQueue, this, __func__,
+ &FFmpegDataEncoder<LIBAV_VER>::ProcessReconfigure, aConfigurationChanges);
+}
+
+RefPtr<MediaDataEncoder::EncodePromise> FFmpegDataEncoder<LIBAV_VER>::Drain() {
+ FFMPEG_LOG("Drain");
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &FFmpegDataEncoder::ProcessDrain);
+}
+
+RefPtr<ShutdownPromise> FFmpegDataEncoder<LIBAV_VER>::Shutdown() {
+ FFMPEG_LOG("Shutdown");
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &FFmpegDataEncoder::ProcessShutdown);
+}
+
+RefPtr<GenericPromise> FFmpegDataEncoder<LIBAV_VER>::SetBitrate(
+ uint32_t aBitrate) {
+ FFMPEG_LOG("SetBitrate");
+ return GenericPromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
+}
+
+RefPtr<MediaDataEncoder::InitPromise>
+FFmpegDataEncoder<LIBAV_VER>::ProcessInit() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ProcessInit");
+ nsresult rv = InitSpecific();
+ return NS_FAILED(rv) ? InitPromise::CreateAndReject(rv, __func__)
+ : InitPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataEncoder::EncodePromise>
+FFmpegDataEncoder<LIBAV_VER>::ProcessEncode(RefPtr<const MediaData> aSample) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ProcessEncode");
+
+#if LIBAVCODEC_VERSION_MAJOR < 58
+ // TODO(Bug 1868253): implement encode with avcodec_encode_video2().
+ MOZ_CRASH("FFmpegDataEncoder needs ffmpeg 58 at least.");
+ return EncodePromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
+#else
+
+ auto rv = EncodeInputWithModernAPIs(std::move(aSample));
+ if (rv.isErr()) {
+ return EncodePromise::CreateAndReject(rv.inspectErr(), __func__);
+ }
+
+ return EncodePromise::CreateAndResolve(rv.unwrap(), __func__);
+#endif
+}
+
+RefPtr<MediaDataEncoder::ReconfigurationPromise>
+FFmpegDataEncoder<LIBAV_VER>::ProcessReconfigure(
+ const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ProcessReconfigure");
+
+ // Tracked in bug 1869583 -- for now this encoder always reports it cannot be
+ // reconfigured on the fly
+ return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
+ NS_ERROR_NOT_IMPLEMENTED, __func__);
+}
+
+RefPtr<MediaDataEncoder::EncodePromise>
+FFmpegDataEncoder<LIBAV_VER>::ProcessDrain() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ProcessDrain");
+
+#if LIBAVCODEC_VERSION_MAJOR < 58
+ MOZ_CRASH("FFmpegDataEncoder needs ffmpeg 58 at least.");
+ return EncodePromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
+#else
+ auto rv = DrainWithModernAPIs();
+ if (rv.isErr()) {
+ return EncodePromise::CreateAndReject(rv.inspectErr(), __func__);
+ }
+ return EncodePromise::CreateAndResolve(rv.unwrap(), __func__);
+#endif
+}
+
+RefPtr<ShutdownPromise> FFmpegDataEncoder<LIBAV_VER>::ProcessShutdown() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ProcessShutdown");
+
+ ShutdownInternal();
+
+ // Don't shut mTaskQueue down since it's owned by others.
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+AVCodec* FFmpegDataEncoder<LIBAV_VER>::InitCommon() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("FFmpegDataEncoder::InitCommon");
+
+ AVCodec* codec = FindEncoderWithPreference(mLib, mCodecID);
+ if (!codec) {
+ FFMPEG_LOG("failed to find ffmpeg encoder for codec id %d", mCodecID);
+ return nullptr;
+ }
+ FFMPEG_LOG("found codec: %s", codec->name);
+ mCodecName = codec->name;
+
+ ForceEnablingFFmpegDebugLogs();
+
+ MOZ_ASSERT(!mCodecContext);
+ if (!(mCodecContext = mLib->avcodec_alloc_context3(codec))) {
+ FFMPEG_LOG("failed to allocate ffmpeg context for codec %s", codec->name);
+ return nullptr;
+ }
+
+ return codec;
+}
+
+MediaResult FFmpegDataEncoder<LIBAV_VER>::FinishInitCommon(AVCodec* aCodec) {
+ mCodecContext->bit_rate = static_cast<FFmpegBitRate>(mConfig.mBitrate);
+#if LIBAVCODEC_VERSION_MAJOR >= 60
+ mCodecContext->flags |= AV_CODEC_FLAG_FRAME_DURATION;
+#endif
+
+ AVDictionary* options = nullptr;
+ if (int ret = OpenCodecContext(aCodec, &options); ret < 0) {
+ FFMPEG_LOG("failed to open %s avcodec: %s", aCodec->name,
+ MakeErrorString(mLib, ret).get());
+ return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("avcodec_open2 error"));
+ }
+ mLib->av_dict_free(&options);
+
+ return MediaResult(NS_OK);
+}
+
+void FFmpegDataEncoder<LIBAV_VER>::ShutdownInternal() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ FFMPEG_LOG("ShutdownInternal");
+
+ DestroyFrame();
+
+ if (mCodecContext) {
+ CloseCodecContext();
+ mLib->av_freep(&mCodecContext);
+ mCodecContext = nullptr;
+ }
+}
+
+int FFmpegDataEncoder<LIBAV_VER>::OpenCodecContext(const AVCodec* aCodec,
+ AVDictionary** aOptions) {
+ MOZ_ASSERT(mCodecContext);
+
+ StaticMutexAutoLock mon(sMutex);
+ return mLib->avcodec_open2(mCodecContext, aCodec, aOptions);
+}
+
+void FFmpegDataEncoder<LIBAV_VER>::CloseCodecContext() {
+ MOZ_ASSERT(mCodecContext);
+
+ StaticMutexAutoLock mon(sMutex);
+ mLib->avcodec_close(mCodecContext);
+}
+
+bool FFmpegDataEncoder<LIBAV_VER>::PrepareFrame() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ // TODO: Merge the duplicate part with FFmpegDataDecoder's PrepareFrame.
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if (mFrame) {
+ mLib->av_frame_unref(mFrame);
+ } else {
+ mFrame = mLib->av_frame_alloc();
+ }
+#elif LIBAVCODEC_VERSION_MAJOR == 54
+ if (mFrame) {
+ mLib->avcodec_get_frame_defaults(mFrame);
+ } else {
+ mFrame = mLib->avcodec_alloc_frame();
+ }
+#else
+ mLib->av_freep(&mFrame);
+ mFrame = mLib->avcodec_alloc_frame();
+#endif
+ return !!mFrame;
+}
+
+void FFmpegDataEncoder<LIBAV_VER>::DestroyFrame() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ if (mFrame) {
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ mLib->av_frame_unref(mFrame);
+ mLib->av_frame_free(&mFrame);
+#elif LIBAVCODEC_VERSION_MAJOR == 54
+ mLib->avcodec_free_frame(&mFrame);
+#else
+ mLib->av_freep(&mFrame);
+#endif
+ mFrame = nullptr;
+ }
+}
+
+// avcodec_send_frame and avcodec_receive_packet were introduced in version 58.
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+Result<MediaDataEncoder::EncodedData, nsresult>
+FFmpegDataEncoder<LIBAV_VER>::EncodeWithModernAPIs() {
+ // Initialize AVPacket.
+ AVPacket* pkt = mLib->av_packet_alloc();
+
+ if (!pkt) {
+ FFMPEG_LOG("failed to allocate packet");
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+
+ auto freePacket = MakeScopeExit([this, &pkt] { mLib->av_packet_free(&pkt); });
+
+ // Send frame and receive packets.
+ if (int ret = mLib->avcodec_send_frame(mCodecContext, mFrame); ret < 0) {
+ // In theory, avcodec_send_frame could sent -EAGAIN to signal its internal
+ // buffers is full. In practice this can't happen as we only feed one frame
+ // at a time, and we immediately call avcodec_receive_packet right after.
+ // TODO: Create a NS_ERROR_DOM_MEDIA_ENCODE_ERR in ErrorList.py?
+ FFMPEG_LOG("avcodec_send_frame error: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+
+ EncodedData output;
+ while (true) {
+ int ret = mLib->avcodec_receive_packet(mCodecContext, pkt);
+ if (ret == AVERROR(EAGAIN)) {
+ // The encoder is asking for more inputs.
+ FFMPEG_LOG("encoder is asking for more input!");
+ break;
+ }
+
+ if (ret < 0) {
+ // AVERROR_EOF is returned when the encoder has been fully flushed, but it
+ // shouldn't happen here.
+ FFMPEG_LOG("avcodec_receive_packet error: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+
+ RefPtr<MediaRawData> d = ToMediaRawData(pkt);
+ mLib->av_packet_unref(pkt);
+ if (!d) {
+ // This can happen if e.g. DTX is enabled
+ FFMPEG_LOG("No encoded packet output");
+ continue;
+ }
+ output.AppendElement(std::move(d));
+ }
+
+ FFMPEG_LOG("Got %zu encoded data", output.Length());
+ return std::move(output);
+}
+
+Result<MediaDataEncoder::EncodedData, nsresult>
+FFmpegDataEncoder<LIBAV_VER>::DrainWithModernAPIs() {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ MOZ_ASSERT(mCodecContext);
+
+ // TODO: Create a Result<EncodedData, nsresult> EncodeWithModernAPIs(AVFrame
+ // *aFrame) to merge the duplicate code below with EncodeWithModernAPIs above.
+
+ // Initialize AVPacket.
+ AVPacket* pkt = mLib->av_packet_alloc();
+ if (!pkt) {
+ FFMPEG_LOG("failed to allocate packet");
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+ auto freePacket = MakeScopeExit([this, &pkt] { mLib->av_packet_free(&pkt); });
+
+ // Enter draining mode by sending NULL to the avcodec_send_frame(). Note that
+ // this can leave the encoder in a permanent EOF state after draining. As a
+ // result, the encoder is unable to continue encoding. A new
+ // AVCodecContext/encoder creation is required if users need to encode after
+ // draining.
+ //
+ // TODO: Use `avcodec_flush_buffers` to drain the pending packets if
+ // AV_CODEC_CAP_ENCODER_FLUSH is set in mCodecContext->codec->capabilities.
+ if (int ret = mLib->avcodec_send_frame(mCodecContext, nullptr); ret < 0) {
+ if (ret == AVERROR_EOF) {
+ // The encoder has been flushed. Drain can be called multiple time.
+ FFMPEG_LOG("encoder has been flushed!");
+ return EncodedData();
+ }
+
+ FFMPEG_LOG("avcodec_send_frame error: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+
+ EncodedData output;
+ while (true) {
+ int ret = mLib->avcodec_receive_packet(mCodecContext, pkt);
+ if (ret == AVERROR_EOF) {
+ FFMPEG_LOG("encoder has no more output packet!");
+ break;
+ }
+
+ if (ret < 0) {
+ // avcodec_receive_packet should not result in a -EAGAIN once it's in
+ // draining mode.
+ FFMPEG_LOG("avcodec_receive_packet error: %s",
+ MakeErrorString(mLib, ret).get());
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+
+ RefPtr<MediaRawData> d = ToMediaRawData(pkt);
+ mLib->av_packet_unref(pkt);
+ if (!d) {
+ FFMPEG_LOG("failed to create a MediaRawData from the AVPacket");
+ return Err(NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+ output.AppendElement(std::move(d));
+ }
+
+ FFMPEG_LOG("Encoding successful, %zu packets", output.Length());
+
+ // TODO: Evaluate a better solution (Bug 1869466)
+ // TODO: Only re-create AVCodecContext when avcodec_flush_buffers is
+ // unavailable.
+ ShutdownInternal();
+ nsresult r = InitSpecific();
+ return NS_FAILED(r) ? Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR)
+ : Result<MediaDataEncoder::EncodedData, nsresult>(
+ std::move(output));
+}
+#endif // LIBAVCODEC_VERSION_MAJOR >= 58
+
+RefPtr<MediaRawData> FFmpegDataEncoder<LIBAV_VER>::ToMediaRawDataCommon(
+ AVPacket* aPacket) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ MOZ_ASSERT(aPacket);
+
+ // Copy frame data from AVPacket.
+ auto data = MakeRefPtr<MediaRawData>();
+ UniquePtr<MediaRawDataWriter> writer(data->CreateWriter());
+ if (!writer->Append(aPacket->data, static_cast<size_t>(aPacket->size))) {
+ FFMPEG_LOG("fail to allocate MediaRawData buffer");
+ return nullptr; // OOM
+ }
+
+ data->mKeyframe = (aPacket->flags & AV_PKT_FLAG_KEY) != 0;
+ // TODO(bug 1869560): The unit of pts, dts, and duration is time_base, which
+ // is recommended to be the reciprocal of the frame rate, but we set it to
+ // microsecond for now.
+ data->mTime = media::TimeUnit::FromMicroseconds(aPacket->pts);
+#if LIBAVCODEC_VERSION_MAJOR >= 60
+ data->mDuration = media::TimeUnit::FromMicroseconds(aPacket->duration);
+#else
+ int64_t duration;
+ if (mDurationMap.Find(aPacket->pts, duration)) {
+ data->mDuration = media::TimeUnit::FromMicroseconds(duration);
+ } else {
+ data->mDuration = media::TimeUnit::FromMicroseconds(aPacket->duration);
+ }
+#endif
+ data->mTimecode = media::TimeUnit::FromMicroseconds(aPacket->dts);
+
+ if (auto r = GetExtraData(aPacket); r.isOk()) {
+ data->mExtraData = r.unwrap();
+ }
+
+ return data;
+}
+void FFmpegDataEncoder<LIBAV_VER>::ForceEnablingFFmpegDebugLogs() {
+#if DEBUG
+ if (!getenv("MOZ_AV_LOG_LEVEL") &&
+ MOZ_LOG_TEST(sFFmpegVideoLog, LogLevel::Debug)) {
+ mLib->av_log_set_level(AV_LOG_DEBUG);
+ }
+#endif // DEBUG
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h
new file mode 100644
index 0000000000..de80ed36ca
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h
@@ -0,0 +1,107 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGDATAENCODER_H_
+#define DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGDATAENCODER_H_
+
+#include "FFmpegLibWrapper.h"
+#include "PlatformEncoderModule.h"
+#include "SimpleMap.h"
+#include "mozilla/ThreadSafety.h"
+
+// This must be the last header included
+#include "FFmpegLibs.h"
+
+namespace mozilla {
+
+template <int V>
+AVCodecID GetFFmpegEncoderCodecId(CodecType aCodec);
+
+template <>
+AVCodecID GetFFmpegEncoderCodecId<LIBAV_VER>(CodecType aCodec);
+
+template <int V>
+class FFmpegDataEncoder : public MediaDataEncoder {};
+
+template <>
+class FFmpegDataEncoder<LIBAV_VER> : public MediaDataEncoder {
+ using DurationMap = SimpleMap<int64_t>;
+
+ public:
+ FFmpegDataEncoder(const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
+ const RefPtr<TaskQueue>& aTaskQueue,
+ const EncoderConfig& aConfig);
+
+ /* MediaDataEncoder Methods */
+ // All methods run on the task queue, except for GetDescriptionName.
+ RefPtr<InitPromise> Init() override;
+ RefPtr<EncodePromise> Encode(const MediaData* aSample) override;
+ RefPtr<ReconfigurationPromise> Reconfigure(
+ const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
+ override;
+ RefPtr<EncodePromise> Drain() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ RefPtr<GenericPromise> SetBitrate(uint32_t aBitRate) override;
+
+ protected:
+ // Methods only called on mTaskQueue.
+ RefPtr<InitPromise> ProcessInit();
+ RefPtr<EncodePromise> ProcessEncode(RefPtr<const MediaData> aSample);
+ RefPtr<ReconfigurationPromise> ProcessReconfigure(
+ const RefPtr<const EncoderConfigurationChangeList>&
+ aConfigurationChanges);
+ RefPtr<EncodePromise> ProcessDrain();
+ RefPtr<ShutdownPromise> ProcessShutdown();
+ // Initialize the audio or video-specific members of an encoder instance.
+ virtual nsresult InitSpecific() = 0;
+ // nullptr in case of failure. This is to be called by the
+ // audio/video-specific InitInternal methods in the sub-class, and initializes
+ // the common members.
+ AVCodec* InitCommon();
+ MediaResult FinishInitCommon(AVCodec* aCodec);
+ void ShutdownInternal();
+ int OpenCodecContext(const AVCodec* aCodec, AVDictionary** aOptions)
+ MOZ_EXCLUDES(sMutex);
+ void CloseCodecContext() MOZ_EXCLUDES(sMutex);
+ bool PrepareFrame();
+ void DestroyFrame();
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ virtual Result<EncodedData, nsresult> EncodeInputWithModernAPIs(
+ RefPtr<const MediaData> aSample) = 0;
+ Result<EncodedData, nsresult> EncodeWithModernAPIs();
+ virtual Result<EncodedData, nsresult> DrainWithModernAPIs();
+#endif
+ // Convert an AVPacket to a MediaRawData. This can return nullptr if a packet
+ // has been processed by the encoder, but is not to be returned to the caller,
+ // because DTX is enabled.
+ virtual RefPtr<MediaRawData> ToMediaRawData(AVPacket* aPacket) = 0;
+ RefPtr<MediaRawData> ToMediaRawDataCommon(AVPacket* aPacket);
+ virtual Result<already_AddRefed<MediaByteBuffer>, nsresult> GetExtraData(
+ AVPacket* aPacket) = 0;
+ void ForceEnablingFFmpegDebugLogs();
+
+ // This refers to a static FFmpegLibWrapper, so raw pointer is adequate.
+ const FFmpegLibWrapper* mLib;
+ const AVCodecID mCodecID;
+ const RefPtr<TaskQueue> mTaskQueue;
+
+ // set in constructor, modified when parameters change
+ EncoderConfig mConfig;
+
+ // mTaskQueue only.
+ nsCString mCodecName;
+ AVCodecContext* mCodecContext;
+ AVFrame* mFrame;
+ DurationMap mDurationMap;
+
+ // Provide critical-section for open/close mCodecContext.
+ static StaticMutex sMutex;
+ const bool mVideoCodec;
+};
+
+} // namespace mozilla
+
+#endif /* DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGDATAENCODER_H_ */
diff --git a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
index 42c54a48ed..b6e734268d 100644
--- a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
@@ -7,6 +7,7 @@
#include "FFmpegEncoderModule.h"
#include "FFmpegLog.h"
+#include "FFmpegAudioEncoder.h"
#include "FFmpegVideoEncoder.h"
// This must be the last header included
@@ -44,6 +45,23 @@ already_AddRefed<MediaDataEncoder> FFmpegEncoderModule<V>::CreateVideoEncoder(
return encoder.forget();
}
+template <int V>
+already_AddRefed<MediaDataEncoder> FFmpegEncoderModule<V>::CreateAudioEncoder(
+ const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
+ AVCodecID codecId = GetFFmpegEncoderCodecId<V>(aConfig.mCodec);
+ if (codecId == AV_CODEC_ID_NONE) {
+ FFMPEGV_LOG("No ffmpeg encoder for %s", GetCodecTypeString(aConfig.mCodec));
+ return nullptr;
+ }
+
+ RefPtr<MediaDataEncoder> encoder =
+ new FFmpegAudioEncoder<V>(mLib, codecId, aTaskQueue, aConfig);
+ FFMPEGA_LOG("ffmpeg %s encoder: %s has been created",
+ GetCodecTypeString(aConfig.mCodec),
+ encoder->GetDescriptionName().get());
+ return encoder.forget();
+}
+
template class FFmpegEncoderModule<LIBAV_VER>;
} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.h b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.h
index 1c9e94b78f..6d0e4b1c30 100644
--- a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.h
+++ b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.h
@@ -30,6 +30,10 @@ class FFmpegEncoderModule final : public PlatformEncoderModule {
const EncoderConfig& aConfig,
const RefPtr<TaskQueue>& aTaskQueue) const override;
+ already_AddRefed<MediaDataEncoder> CreateAudioEncoder(
+ const EncoderConfig& aConfig,
+ const RefPtr<TaskQueue>& aTaskQueue) const override;
+
protected:
explicit FFmpegEncoderModule(FFmpegLibWrapper* aLib) : mLib(aLib) {
MOZ_ASSERT(mLib);
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index bfb3105a57..5fd6102a34 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -200,6 +200,7 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC_OPTION(av_channel_layout_default, AV_FUNC_AVUTIL_60)
AV_FUNC_OPTION(av_channel_layout_from_mask, AV_FUNC_AVUTIL_60)
+ AV_FUNC_OPTION(av_channel_layout_copy, AV_FUNC_AVUTIL_60)
AV_FUNC_OPTION(av_buffer_get_opaque,
(AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
@@ -218,6 +219,8 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(av_dict_set, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_dict_free, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_opt_set, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_opt_set_double, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_opt_set_int, AV_FUNC_AVUTIL_ALL)
#ifdef MOZ_WIDGET_GTK
AV_FUNC_OPTION_SILENT(avcodec_get_hw_config,
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
index eacbba286a..226b4fc8cb 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -161,11 +161,16 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
int nb_channels);
void (*av_channel_layout_from_mask)(AVChannelLayout* ch_layout,
uint64_t mask);
+ int (*av_channel_layout_copy)(AVChannelLayout* dst, AVChannelLayout* src);
int (*av_dict_set)(AVDictionary** pm, const char* key, const char* value,
int flags);
void (*av_dict_free)(AVDictionary** m);
int (*av_opt_set)(void* obj, const char* name, const char* val,
int search_flags);
+ int (*av_opt_set_double)(void* obj, const char* name, double val,
+ int search_flags);
+ int (*av_opt_set_int)(void* obj, const char* name, int64_t val,
+ int search_flags);
// libavutil v55 and later only
AVFrame* (*av_frame_alloc)();
diff --git a/dom/media/platforms/ffmpeg/FFmpegLog.h b/dom/media/platforms/ffmpeg/FFmpegLog.h
index 45ea700936..676c5e4ba1 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLog.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLog.h
@@ -19,6 +19,9 @@ static mozilla::LazyLogModule sFFmpegAudioLog("FFmpegAudio");
# define FFMPEGV_LOG(str, ...) \
MOZ_LOG(sFFmpegVideoLog, mozilla::LogLevel::Debug, \
("FFVPX: " str, ##__VA_ARGS__))
+# define FFMPEGA_LOG(str, ...) \
+ MOZ_LOG(sFFmpegAudioLog, mozilla::LogLevel::Debug, \
+ ("FFVPX: " str, ##__VA_ARGS__))
# define FFMPEGP_LOG(str, ...) \
MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("FFVPX: " str, ##__VA_ARGS__))
#else
@@ -28,11 +31,15 @@ static mozilla::LazyLogModule sFFmpegAudioLog("FFmpegAudio");
# define FFMPEGV_LOG(str, ...) \
MOZ_LOG(sFFmpegVideoLog, mozilla::LogLevel::Debug, \
("FFMPEG: " str, ##__VA_ARGS__))
+# define FFMPEGA_LOG(str, ...) \
+ MOZ_LOG(sFFmpegAudioLog, mozilla::LogLevel::Debug, \
+ ("FFMPEG: " str, ##__VA_ARGS__))
# define FFMPEGP_LOG(str, ...) \
MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("FFMPEG: " str, ##__VA_ARGS__))
#endif
-#define FFMPEG_LOGV(...) \
- MOZ_LOG(sFFmpegVideoLog, mozilla::LogLevel::Verbose, (__VA_ARGS__))
+#define FFMPEG_LOGV(...) \
+ MOZ_LOG(mVideoCodec ? sFFmpegVideoLog : sFFmpegAudioLog, \
+ mozilla::LogLevel::Verbose, (__VA_ARGS__))
#endif // __FFmpegLog_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegUtils.cpp b/dom/media/platforms/ffmpeg/FFmpegUtils.cpp
new file mode 100644
index 0000000000..e209306133
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegUtils.cpp
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegUtils.h"
+
+#include "FFmpegLibWrapper.h"
+#include "mozilla/Assertions.h"
+#include "nsString.h"
+
+namespace mozilla {
+
+nsCString MakeErrorString(const FFmpegLibWrapper* aLib, int aErrNum) {
+ MOZ_ASSERT(aLib);
+
+ char errStr[FFmpegErrorMaxStringSize];
+ aLib->av_strerror(aErrNum, errStr, FFmpegErrorMaxStringSize);
+ return nsCString(errStr);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegUtils.h b/dom/media/platforms/ffmpeg/FFmpegUtils.h
new file mode 100644
index 0000000000..fe588ed14c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegUtils.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGUTILS_H_
+#define DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGUTILS_H_
+
+#include <cstddef>
+#include "nsStringFwd.h"
+#include "FFmpegLibWrapper.h"
+
+// This must be the last header included
+#include "FFmpegLibs.h"
+
+namespace mozilla {
+
+#if LIBAVCODEC_VERSION_MAJOR >= 57
+using FFmpegBitRate = int64_t;
+constexpr size_t FFmpegErrorMaxStringSize = AV_ERROR_MAX_STRING_SIZE;
+#else
+using FFmpegBitRate = int;
+constexpr size_t FFmpegErrorMaxStringSize = 64;
+#endif
+
+nsCString MakeErrorString(const FFmpegLibWrapper* aLib, int aErrNum);
+
+template <typename T, typename F>
+void IterateZeroTerminated(const T& aList, F&& aLambda) {
+ for (size_t i = 0; aList[i] != 0; i++) {
+ aLambda(aList[i]);
+ }
+}
+
+inline bool IsVideoCodec(AVCodecID aCodecID) {
+ switch (aCodecID) {
+ case AV_CODEC_ID_H264:
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ case AV_CODEC_ID_VP8:
+#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ case AV_CODEC_ID_VP9:
+#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 59
+ case AV_CODEC_ID_AV1:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGUTILS_H_
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
index 040b2e72a1..3fe46938fd 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -46,6 +46,7 @@
# define AV_PIX_FMT_YUV444P10LE PIX_FMT_YUV444P10LE
# define AV_PIX_FMT_GBRP PIX_FMT_GBRP
# define AV_PIX_FMT_NONE PIX_FMT_NONE
+# define AV_PIX_FMT_VAAPI_VLD PIX_FMT_VAAPI_VLD
#endif
#if LIBAVCODEC_VERSION_MAJOR > 58
# define AV_PIX_FMT_VAAPI_VLD AV_PIX_FMT_VAAPI
@@ -618,6 +619,9 @@ static gfx::ColorDepth GetColorDepth(const AVPixelFormat& aFormat) {
case AV_PIX_FMT_YUV444P12LE:
return gfx::ColorDepth::COLOR_12;
#endif
+ case AV_PIX_FMT_VAAPI_VLD:
+ // Placeholder, it could be deeper colors
+ return gfx::ColorDepth::COLOR_8;
default:
MOZ_ASSERT_UNREACHABLE("Not supported format?");
return gfx::ColorDepth::COLOR_8;
@@ -662,7 +666,7 @@ static int GetVideoBufferWrapper(struct AVCodecContext* aCodecContext,
static void ReleaseVideoBufferWrapper(void* opaque, uint8_t* data) {
if (opaque) {
- FFMPEG_LOGV("ReleaseVideoBufferWrapper: PlanarYCbCrImage=%p", opaque);
+ FFMPEGV_LOG("ReleaseVideoBufferWrapper: PlanarYCbCrImage=%p", opaque);
RefPtr<ImageBufferWrapper> image = static_cast<ImageBufferWrapper*>(opaque);
image->ReleaseBuffer();
}
@@ -1199,6 +1203,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
return Some(DecodeStage::YUV444P);
case AV_PIX_FMT_GBRP:
return Some(DecodeStage::GBRP);
+ case AV_PIX_FMT_VAAPI_VLD:
+ return Some(DecodeStage::VAAPI_SURFACE);
default:
return Nothing();
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
index a3cfdf1b1d..9d1dbcf80f 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
@@ -8,32 +8,21 @@
#include "BufferReader.h"
#include "FFmpegLog.h"
-#include "FFmpegRuntimeLinker.h"
+#include "FFmpegUtils.h"
#include "H264.h"
#include "ImageContainer.h"
#include "libavutil/error.h"
#include "libavutil/pixfmt.h"
-#include "mozilla/CheckedInt.h"
-#include "mozilla/PodOperations.h"
-#include "mozilla/StaticMutex.h"
-#include "mozilla/dom/ImageBitmapBinding.h"
#include "mozilla/dom/ImageUtils.h"
#include "nsPrintfCString.h"
#include "ImageToI420.h"
#include "libyuv.h"
+#include "FFmpegRuntimeLinker.h"
// The ffmpeg namespace is introduced to avoid the PixelFormat's name conflicts
// with MediaDataEncoder::PixelFormat in MediaDataEncoder class scope.
namespace ffmpeg {
-#if LIBAVCODEC_VERSION_MAJOR >= 57
-using FFmpegBitRate = int64_t;
-constexpr size_t FFmpegErrorMaxStringSize = AV_ERROR_MAX_STRING_SIZE;
-#else
-using FFmpegBitRate = int;
-constexpr size_t FFmpegErrorMaxStringSize = 64;
-#endif
-
// TODO: WebCodecs' I420A should map to MediaDataEncoder::PixelFormat and then
// to AV_PIX_FMT_YUVA420P here.
#if LIBAVCODEC_VERSION_MAJOR < 54
@@ -166,9 +155,9 @@ struct VPXSVCSetting {
nsTArray<uint32_t> mTargetBitrates;
};
-static Maybe<VPXSVCSetting> GetVPXSVCSetting(
- const MediaDataEncoder::ScalabilityMode& aMode, uint32_t aBitPerSec) {
- if (aMode == MediaDataEncoder::ScalabilityMode::None) {
+static Maybe<VPXSVCSetting> GetVPXSVCSetting(const ScalabilityMode& aMode,
+ uint32_t aBitPerSec) {
+ if (aMode == ScalabilityMode::None) {
return Nothing();
}
@@ -183,7 +172,7 @@ static Maybe<VPXSVCSetting> GetVPXSVCSetting(
nsTArray<uint8_t> layerIds;
nsTArray<uint8_t> rateDecimators;
nsTArray<uint32_t> bitrates;
- if (aMode == MediaDataEncoder::ScalabilityMode::L1T2) {
+ if (aMode == ScalabilityMode::L1T2) {
// Two temporal layers. 0-1...
//
// Frame pattern:
@@ -208,7 +197,7 @@ static Maybe<VPXSVCSetting> GetVPXSVCSetting(
bitrates.AppendElement(kbps * 3 / 5);
bitrates.AppendElement(kbps);
} else {
- MOZ_ASSERT(aMode == MediaDataEncoder::ScalabilityMode::L1T3);
+ MOZ_ASSERT(aMode == ScalabilityMode::L1T3);
// Three temporal layers. 0-2-1-2...
//
// Frame pattern:
@@ -245,59 +234,6 @@ static Maybe<VPXSVCSetting> GetVPXSVCSetting(
std::move(rateDecimators), std::move(bitrates)});
}
-static nsCString MakeErrorString(const FFmpegLibWrapper* aLib, int aErrNum) {
- MOZ_ASSERT(aLib);
-
- char errStr[ffmpeg::FFmpegErrorMaxStringSize];
- aLib->av_strerror(aErrNum, errStr, ffmpeg::FFmpegErrorMaxStringSize);
- return nsCString(errStr);
-}
-
-// TODO: Remove this function and simply use `avcodec_find_encoder` once
-// libopenh264 is supported.
-static AVCodec* FindEncoderWithPreference(const FFmpegLibWrapper* aLib,
- AVCodecID aCodecId) {
- MOZ_ASSERT(aLib);
-
- AVCodec* codec = nullptr;
-
- // Prioritize libx264 for now since it's the only h264 codec we tested.
- if (aCodecId == AV_CODEC_ID_H264) {
- codec = aLib->avcodec_find_encoder_by_name("libx264");
- if (codec) {
- FFMPEGV_LOG("Prefer libx264 for h264 codec");
- return codec;
- }
- }
-
- FFMPEGV_LOG("Fallback to other h264 library. Fingers crossed");
- return aLib->avcodec_find_encoder(aCodecId);
-}
-
-template <>
-AVCodecID GetFFmpegEncoderCodecId<LIBAV_VER>(CodecType aCodec) {
-#if LIBAVCODEC_VERSION_MAJOR >= 58
- if (aCodec == CodecType::VP8) {
- return AV_CODEC_ID_VP8;
- }
-
- if (aCodec == CodecType::VP9) {
- return AV_CODEC_ID_VP9;
- }
-
-# if !defined(USING_MOZFFVPX)
- if (aCodec == CodecType::H264) {
- return AV_CODEC_ID_H264;
- }
-# endif
-
- if (aCodec == CodecType::AV1) {
- return AV_CODEC_ID_AV1;
- }
-#endif
- return AV_CODEC_ID_NONE;
-}
-
uint8_t FFmpegVideoEncoder<LIBAV_VER>::SVCInfo::UpdateTemporalLayerId() {
MOZ_ASSERT(!mTemporalLayerIds.IsEmpty());
@@ -306,70 +242,10 @@ uint8_t FFmpegVideoEncoder<LIBAV_VER>::SVCInfo::UpdateTemporalLayerId() {
return static_cast<uint8_t>(mTemporalLayerIds[currentIndex]);
}
-StaticMutex FFmpegVideoEncoder<LIBAV_VER>::sMutex;
-
FFmpegVideoEncoder<LIBAV_VER>::FFmpegVideoEncoder(
const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
const RefPtr<TaskQueue>& aTaskQueue, const EncoderConfig& aConfig)
- : mLib(aLib),
- mCodecID(aCodecID),
- mTaskQueue(aTaskQueue),
- mConfig(aConfig),
- mCodecName(EmptyCString()),
- mCodecContext(nullptr),
- mFrame(nullptr),
- mSVCInfo(Nothing()) {
- MOZ_ASSERT(mLib);
- MOZ_ASSERT(mTaskQueue);
-#if LIBAVCODEC_VERSION_MAJOR < 58
- MOZ_CRASH("FFmpegVideoEncoder needs ffmpeg 58 at least.");
-#endif
-};
-
-RefPtr<MediaDataEncoder::InitPromise> FFmpegVideoEncoder<LIBAV_VER>::Init() {
- FFMPEGV_LOG("Init");
- return InvokeAsync(mTaskQueue, this, __func__,
- &FFmpegVideoEncoder::ProcessInit);
-}
-
-RefPtr<MediaDataEncoder::EncodePromise> FFmpegVideoEncoder<LIBAV_VER>::Encode(
- const MediaData* aSample) {
- MOZ_ASSERT(aSample != nullptr);
-
- FFMPEGV_LOG("Encode");
- return InvokeAsync(mTaskQueue, __func__,
- [self = RefPtr<FFmpegVideoEncoder<LIBAV_VER>>(this),
- sample = RefPtr<const MediaData>(aSample)]() {
- return self->ProcessEncode(std::move(sample));
- });
-}
-
-RefPtr<MediaDataEncoder::ReconfigurationPromise>
-FFmpegVideoEncoder<LIBAV_VER>::Reconfigure(
- const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
- return InvokeAsync<const RefPtr<const EncoderConfigurationChangeList>>(
- mTaskQueue, this, __func__,
- &FFmpegVideoEncoder<LIBAV_VER>::ProcessReconfigure,
- aConfigurationChanges);
-}
-
-RefPtr<MediaDataEncoder::EncodePromise> FFmpegVideoEncoder<LIBAV_VER>::Drain() {
- FFMPEGV_LOG("Drain");
- return InvokeAsync(mTaskQueue, this, __func__,
- &FFmpegVideoEncoder::ProcessDrain);
-}
-
-RefPtr<ShutdownPromise> FFmpegVideoEncoder<LIBAV_VER>::Shutdown() {
- FFMPEGV_LOG("Shutdown");
- return InvokeAsync(mTaskQueue, this, __func__,
- &FFmpegVideoEncoder::ProcessShutdown);
-}
-
-RefPtr<GenericPromise> FFmpegVideoEncoder<LIBAV_VER>::SetBitrate(
- uint32_t aBitrate) {
- FFMPEGV_LOG("SetBitrate");
- return GenericPromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
-}
+ : FFmpegDataEncoder(aLib, aCodecID, aTaskQueue, aConfig) {}
nsCString FFmpegVideoEncoder<LIBAV_VER>::GetDescriptionName() const {
#ifdef USING_MOZFFVPX
@@ -385,112 +261,23 @@ nsCString FFmpegVideoEncoder<LIBAV_VER>::GetDescriptionName() const {
#endif
}
-RefPtr<MediaDataEncoder::InitPromise>
-FFmpegVideoEncoder<LIBAV_VER>::ProcessInit() {
+nsresult FFmpegVideoEncoder<LIBAV_VER>::InitSpecific() {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
- FFMPEGV_LOG("ProcessInit");
- MediaResult r = InitInternal();
- return NS_FAILED(r)
- ? InitPromise::CreateAndReject(r, __func__)
- : InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
-}
-
-RefPtr<MediaDataEncoder::EncodePromise>
-FFmpegVideoEncoder<LIBAV_VER>::ProcessEncode(RefPtr<const MediaData> aSample) {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+ FFMPEGV_LOG("FFmpegVideoEncoder::InitSpecific");
- FFMPEGV_LOG("ProcessEncode");
-
-#if LIBAVCODEC_VERSION_MAJOR < 58
- // TODO(Bug 1868253): implement encode with avcodec_encode_video2().
- MOZ_CRASH("FFmpegVideoEncoder needs ffmpeg 58 at least.");
- return EncodePromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
-#else
- RefPtr<const VideoData> sample(aSample->As<const VideoData>());
- MOZ_ASSERT(sample);
-
- return EncodeWithModernAPIs(sample);
-#endif
-}
-
-RefPtr<MediaDataEncoder::ReconfigurationPromise>
-FFmpegVideoEncoder<LIBAV_VER>::ProcessReconfigure(
- const RefPtr<const EncoderConfigurationChangeList> aConfigurationChanges) {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- FFMPEGV_LOG("ProcessReconfigure");
-
- // Tracked in bug 1869583 -- for now this encoder always reports it cannot be
- // reconfigured on the fly
- return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
- NS_ERROR_NOT_IMPLEMENTED, __func__);
-}
-
-RefPtr<MediaDataEncoder::EncodePromise>
-FFmpegVideoEncoder<LIBAV_VER>::ProcessDrain() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- FFMPEGV_LOG("ProcessDrain");
-
-#if LIBAVCODEC_VERSION_MAJOR < 58
- MOZ_CRASH("FFmpegVideoEncoder needs ffmpeg 58 at least.");
- return EncodePromise::CreateAndReject(NS_ERROR_NOT_IMPLEMENTED, __func__);
-#else
- return DrainWithModernAPIs();
-#endif
-}
-
-RefPtr<ShutdownPromise> FFmpegVideoEncoder<LIBAV_VER>::ProcessShutdown() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- FFMPEGV_LOG("ProcessShutdown");
-
- ShutdownInternal();
-
- // Don't shut mTaskQueue down since it's owned by others.
- return ShutdownPromise::CreateAndResolve(true, __func__);
-}
-
-MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- FFMPEGV_LOG("InitInternal");
-
- if (mCodecID == AV_CODEC_ID_H264) {
- // H264Specific is required to get the format (avcc vs annexb).
- if (!mConfig.mCodecSpecific ||
- !mConfig.mCodecSpecific->is<H264Specific>()) {
- return MediaResult(
- NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unable to get H264 necessary encoding info"));
- }
- }
-
- AVCodec* codec = FindEncoderWithPreference(mLib, mCodecID);
+ // Initialize the common members of the encoder instance
+ AVCodec* codec = FFmpegDataEncoder<LIBAV_VER>::InitCommon();
if (!codec) {
- FFMPEGV_LOG("failed to find ffmpeg encoder for codec id %d", mCodecID);
- return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unable to find codec"));
+ FFMPEGV_LOG("FFmpegDataEncoder::InitCommon failed");
+ return NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR;
}
- FFMPEGV_LOG("find codec: %s", codec->name);
- mCodecName = codec->name;
- ForceEnablingFFmpegDebugLogs();
-
- MOZ_ASSERT(!mCodecContext);
- if (!(mCodecContext = mLib->avcodec_alloc_context3(codec))) {
- FFMPEGV_LOG("failed to allocate ffmpeg context for codec %s", codec->name);
- return MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Failed to initialize ffmpeg context"));
- }
-
- // Set up AVCodecContext.
+ // And now the video-specific part
mCodecContext->pix_fmt = ffmpeg::FFMPEG_PIX_FMT_YUV420P;
- mCodecContext->bit_rate =
- static_cast<ffmpeg::FFmpegBitRate>(mConfig.mBitrate);
mCodecContext->width = static_cast<int>(mConfig.mSize.width);
mCodecContext->height = static_cast<int>(mConfig.mSize.height);
+ mCodecContext->gop_size = static_cast<int>(mConfig.mKeyframeInterval);
// TODO(bug 1869560): The recommended time_base is the reciprocal of the frame
// rate, but we set it to microsecond for now.
mCodecContext->time_base =
@@ -500,12 +287,13 @@ MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
mCodecContext->framerate =
AVRational{.num = static_cast<int>(mConfig.mFramerate), .den = 1};
#endif
+
#if LIBAVCODEC_VERSION_MAJOR >= 60
mCodecContext->flags |= AV_CODEC_FLAG_FRAME_DURATION;
#endif
mCodecContext->gop_size = static_cast<int>(mConfig.mKeyframeInterval);
- if (mConfig.mUsage == MediaDataEncoder::Usage::Realtime) {
+ if (mConfig.mUsage == Usage::Realtime) {
mLib->av_opt_set(mCodecContext->priv_data, "deadline", "realtime", 0);
// Explicitly ask encoder do not keep in flight at any one time for
// lookahead purposes.
@@ -578,14 +366,11 @@ MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
// encoder.
mCodecContext->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
- AVDictionary* options = nullptr;
- if (int ret = OpenCodecContext(codec, &options); ret < 0) {
- FFMPEGV_LOG("failed to open %s avcodec: %s", codec->name,
- MakeErrorString(mLib, ret).get());
- return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Unable to open avcodec"));
+ MediaResult rv = FinishInitCommon(codec);
+ if (NS_FAILED(rv)) {
+ FFMPEGV_LOG("FFmpeg video encoder initialization failure.");
+ return rv;
}
- mLib->av_dict_free(&options);
FFMPEGV_LOG("%s has been initialized with format: %s, bitrate: %" PRIi64
", width: %d, height: %d, time_base: %d/%d%s",
@@ -595,74 +380,7 @@ MediaResult FFmpegVideoEncoder<LIBAV_VER>::InitInternal() {
mCodecContext->time_base.num, mCodecContext->time_base.den,
h264Log.IsEmpty() ? "" : h264Log.get());
- return MediaResult(NS_OK);
-}
-
-void FFmpegVideoEncoder<LIBAV_VER>::ShutdownInternal() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- FFMPEGV_LOG("ShutdownInternal");
-
- DestroyFrame();
-
- if (mCodecContext) {
- CloseCodecContext();
- mLib->av_freep(&mCodecContext);
- mCodecContext = nullptr;
- }
-}
-
-int FFmpegVideoEncoder<LIBAV_VER>::OpenCodecContext(const AVCodec* aCodec,
- AVDictionary** aOptions) {
- MOZ_ASSERT(mCodecContext);
-
- StaticMutexAutoLock mon(sMutex);
- return mLib->avcodec_open2(mCodecContext, aCodec, aOptions);
-}
-
-void FFmpegVideoEncoder<LIBAV_VER>::CloseCodecContext() {
- MOZ_ASSERT(mCodecContext);
-
- StaticMutexAutoLock mon(sMutex);
- mLib->avcodec_close(mCodecContext);
-}
-
-bool FFmpegVideoEncoder<LIBAV_VER>::PrepareFrame() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
-
- // TODO: Merge the duplicate part with FFmpegDataDecoder's PrepareFrame.
-#if LIBAVCODEC_VERSION_MAJOR >= 55
- if (mFrame) {
- mLib->av_frame_unref(mFrame);
- } else {
- mFrame = mLib->av_frame_alloc();
- }
-#elif LIBAVCODEC_VERSION_MAJOR == 54
- if (mFrame) {
- mLib->avcodec_get_frame_defaults(mFrame);
- } else {
- mFrame = mLib->avcodec_alloc_frame();
- }
-#else
- mLib->av_freep(&mFrame);
- mFrame = mLib->avcodec_alloc_frame();
-#endif
- return !!mFrame;
-}
-
-void FFmpegVideoEncoder<LIBAV_VER>::DestroyFrame() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
- if (mFrame) {
-#if LIBAVCODEC_VERSION_MAJOR >= 55
- mLib->av_frame_unref(mFrame);
- mLib->av_frame_free(&mFrame);
-#elif LIBAVCODEC_VERSION_MAJOR == 54
- mLib->avcodec_free_frame(&mFrame);
-#else
- mLib->av_freep(&mFrame);
-#endif
- mFrame = nullptr;
- }
+ return NS_OK;
}
bool FFmpegVideoEncoder<LIBAV_VER>::ScaleInputFrame() {
@@ -709,71 +427,62 @@ bool FFmpegVideoEncoder<LIBAV_VER>::ScaleInputFrame() {
// avcodec_send_frame and avcodec_receive_packet were introduced in version 58.
#if LIBAVCODEC_VERSION_MAJOR >= 58
-RefPtr<MediaDataEncoder::EncodePromise> FFmpegVideoEncoder<
- LIBAV_VER>::EncodeWithModernAPIs(RefPtr<const VideoData> aSample) {
+Result<MediaDataEncoder::EncodedData, nsresult> FFmpegVideoEncoder<
+ LIBAV_VER>::EncodeInputWithModernAPIs(RefPtr<const MediaData> aSample) {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
MOZ_ASSERT(mCodecContext);
MOZ_ASSERT(aSample);
+ RefPtr<const VideoData> sample(aSample->As<VideoData>());
+
// Validate input.
- if (!aSample->mImage) {
+ if (!sample->mImage) {
FFMPEGV_LOG("No image");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_ILLEGAL_INPUT,
- RESULT_DETAIL("No image in sample")),
- __func__);
- } else if (aSample->mImage->GetSize().IsEmpty()) {
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
+ }
+ if (sample->mImage->GetSize().IsEmpty()) {
FFMPEGV_LOG("image width or height is invalid");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_ILLEGAL_INPUT,
- RESULT_DETAIL("Invalid image size")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
// Allocate AVFrame.
if (!PrepareFrame()) {
FFMPEGV_LOG("failed to allocate frame");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to allocate frame")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
// Set AVFrame properties for its internal data allocation. For now, we always
// convert into ffmpeg's buffer.
mFrame->format = ffmpeg::FFMPEG_PIX_FMT_YUV420P;
- mFrame->width = static_cast<int>(aSample->mImage->GetSize().width);
- mFrame->height = static_cast<int>(aSample->mImage->GetSize().height);
+ mFrame->width = static_cast<int>(sample->mImage->GetSize().width);
+ mFrame->height = static_cast<int>(sample->mImage->GetSize().height);
// Allocate AVFrame data.
if (int ret = mLib->av_frame_get_buffer(mFrame, 0); ret < 0) {
FFMPEGV_LOG("failed to allocate frame data: %s",
MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to allocate frame data")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
// Make sure AVFrame is writable.
if (int ret = mLib->av_frame_make_writable(mFrame); ret < 0) {
FFMPEGV_LOG("failed to make frame writable: %s",
MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_NOT_AVAILABLE,
- RESULT_DETAIL("Unable to make frame writable")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
nsresult rv = ConvertToI420(
- aSample->mImage, mFrame->data[0], mFrame->linesize[0], mFrame->data[1],
+ sample->mImage, mFrame->data[0], mFrame->linesize[0], mFrame->data[1],
mFrame->linesize[1], mFrame->data[2], mFrame->linesize[2]);
if (NS_FAILED(rv)) {
FFMPEGV_LOG("Conversion error!");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_ILLEGAL_INPUT,
- RESULT_DETAIL("libyuv conversion error")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
// Scale the YUV input frame if needed -- the encoded frame will have the
@@ -781,10 +490,8 @@ RefPtr<MediaDataEncoder::EncodePromise> FFmpegVideoEncoder<
if (mFrame->width != mConfig.mSize.Width() ||
mFrame->height != mConfig.mSize.Height()) {
if (!ScaleInputFrame()) {
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("libyuv scaling error")),
- __func__);
+ return Result<MediaDataEncoder::EncodedData, nsresult>(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR);
}
}
@@ -805,193 +512,17 @@ RefPtr<MediaDataEncoder::EncodePromise> FFmpegVideoEncoder<
# endif
mFrame->pkt_duration = aSample->mDuration.ToMicroseconds();
- // Initialize AVPacket.
- AVPacket* pkt = mLib->av_packet_alloc();
-
- if (!pkt) {
- FFMPEGV_LOG("failed to allocate packet");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to allocate packet")),
- __func__);
- }
-
- auto freePacket = MakeScopeExit([this, &pkt] { mLib->av_packet_free(&pkt); });
-
- // Send frame and receive packets.
-
- if (int ret = mLib->avcodec_send_frame(mCodecContext, mFrame); ret < 0) {
- // In theory, avcodec_send_frame could sent -EAGAIN to signal its internal
- // buffers is full. In practice this can't happen as we only feed one frame
- // at a time, and we immediately call avcodec_receive_packet right after.
- // TODO: Create a NS_ERROR_DOM_MEDIA_ENCODE_ERR in ErrorList.py?
- FFMPEGV_LOG("avcodec_send_frame error: %s",
- MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("avcodec_send_frame error")),
- __func__);
- }
-
- EncodedData output;
- while (true) {
- int ret = mLib->avcodec_receive_packet(mCodecContext, pkt);
- if (ret == AVERROR(EAGAIN)) {
- // The encoder is asking for more inputs.
- FFMPEGV_LOG("encoder is asking for more input!");
- break;
- }
-
- if (ret < 0) {
- // AVERROR_EOF is returned when the encoder has been fully flushed, but it
- // shouldn't happen here.
- FFMPEGV_LOG("avcodec_receive_packet error: %s",
- MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("avcodec_receive_packet error")),
- __func__);
- }
-
- RefPtr<MediaRawData> d = ToMediaRawData(pkt);
- mLib->av_packet_unref(pkt);
- if (!d) {
- FFMPEGV_LOG("failed to create a MediaRawData from the AVPacket");
- return EncodePromise::CreateAndReject(
- MediaResult(
- NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to get MediaRawData from AVPacket")),
- __func__);
- }
- output.AppendElement(std::move(d));
- }
-
- FFMPEGV_LOG("get %zu encoded data", output.Length());
- return EncodePromise::CreateAndResolve(std::move(output), __func__);
+ // Now send the AVFrame to ffmpeg for encoding, same code for audio and video.
+ return FFmpegDataEncoder<LIBAV_VER>::EncodeWithModernAPIs();
}
-
-RefPtr<MediaDataEncoder::EncodePromise>
-FFmpegVideoEncoder<LIBAV_VER>::DrainWithModernAPIs() {
- MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
- MOZ_ASSERT(mCodecContext);
-
- // TODO: Create a Result<EncodedData, nsresult> EncodeWithModernAPIs(AVFrame
- // *aFrame) to merge the duplicate code below with EncodeWithModernAPIs above.
-
- // Initialize AVPacket.
- AVPacket* pkt = mLib->av_packet_alloc();
- if (!pkt) {
- FFMPEGV_LOG("failed to allocate packet");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to allocate packet")),
- __func__);
- }
- auto freePacket = MakeScopeExit([this, &pkt] { mLib->av_packet_free(&pkt); });
-
- // Enter draining mode by sending NULL to the avcodec_send_frame(). Note that
- // this can leave the encoder in a permanent EOF state after draining. As a
- // result, the encoder is unable to continue encoding. A new
- // AVCodecContext/encoder creation is required if users need to encode after
- // draining.
- //
- // TODO: Use `avcodec_flush_buffers` to drain the pending packets if
- // AV_CODEC_CAP_ENCODER_FLUSH is set in mCodecContext->codec->capabilities.
- if (int ret = mLib->avcodec_send_frame(mCodecContext, nullptr); ret < 0) {
- if (ret == AVERROR_EOF) {
- // The encoder has been flushed. Drain can be called multiple time.
- FFMPEGV_LOG("encoder has been flushed!");
- return EncodePromise::CreateAndResolve(EncodedData(), __func__);
- }
-
- FFMPEGV_LOG("avcodec_send_frame error: %s",
- MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("avcodec_send_frame error")),
- __func__);
- }
-
- EncodedData output;
- while (true) {
- int ret = mLib->avcodec_receive_packet(mCodecContext, pkt);
- if (ret == AVERROR_EOF) {
- FFMPEGV_LOG("encoder has no more output packet!");
- break;
- }
-
- if (ret < 0) {
- // avcodec_receive_packet should not result in a -EAGAIN once it's in
- // draining mode.
- FFMPEGV_LOG("avcodec_receive_packet error: %s",
- MakeErrorString(mLib, ret).get());
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("avcodec_receive_packet error")),
- __func__);
- }
-
- RefPtr<MediaRawData> d = ToMediaRawData(pkt);
- mLib->av_packet_unref(pkt);
- if (!d) {
- FFMPEGV_LOG("failed to create a MediaRawData from the AVPacket");
- return EncodePromise::CreateAndReject(
- MediaResult(
- NS_ERROR_OUT_OF_MEMORY,
- RESULT_DETAIL("Unable to get MediaRawData from AVPacket")),
- __func__);
- }
- output.AppendElement(std::move(d));
- }
-
- FFMPEGV_LOG("get %zu encoded data", output.Length());
-
- // TODO: Evaluate a better solution (Bug 1869466)
- // TODO: Only re-create AVCodecContext when avcodec_flush_buffers is
- // unavailable.
- ShutdownInternal();
- MediaResult r = InitInternal();
- return NS_FAILED(r)
- ? EncodePromise::CreateAndReject(r, __func__)
- : EncodePromise::CreateAndResolve(std::move(output), __func__);
-}
-#endif
+#endif // if LIBAVCODEC_VERSION_MAJOR >= 58
RefPtr<MediaRawData> FFmpegVideoEncoder<LIBAV_VER>::ToMediaRawData(
AVPacket* aPacket) {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
MOZ_ASSERT(aPacket);
- // TODO: Do we need to check AV_PKT_FLAG_CORRUPT?
-
- // Copy frame data from AVPacket.
- auto data = MakeRefPtr<MediaRawData>();
- UniquePtr<MediaRawDataWriter> writer(data->CreateWriter());
- if (!writer->Append(aPacket->data, static_cast<size_t>(aPacket->size))) {
- FFMPEGV_LOG("fail to allocate MediaRawData buffer");
- return nullptr; // OOM
- }
-
- data->mKeyframe = (aPacket->flags & AV_PKT_FLAG_KEY) != 0;
- // TODO(bug 1869560): The unit of pts, dts, and duration is time_base, which
- // is recommended to be the reciprocal of the frame rate, but we set it to
- // microsecond for now.
- data->mTime = media::TimeUnit::FromMicroseconds(aPacket->pts);
-#if LIBAVCODEC_VERSION_MAJOR >= 60
- data->mDuration = media::TimeUnit::FromMicroseconds(aPacket->duration);
-#else
- int64_t duration;
- if (mDurationMap.Find(aPacket->pts, duration)) {
- data->mDuration = media::TimeUnit::FromMicroseconds(duration);
- } else {
- data->mDuration = media::TimeUnit::FromMicroseconds(aPacket->duration);
- }
-#endif
- data->mTimecode = media::TimeUnit::FromMicroseconds(aPacket->dts);
-
- if (auto r = GetExtraData(aPacket); r.isOk()) {
- data->mExtraData = r.unwrap();
- }
+ RefPtr<MediaRawData> data = ToMediaRawDataCommon(aPacket);
// TODO: Is it possible to retrieve temporal layer id from underlying codec
// instead?
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
index 07c433ddd7..0ee5f52aec 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
@@ -7,10 +7,10 @@
#ifndef DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGVIDEOENCODER_H_
#define DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGVIDEOENCODER_H_
+#include "FFmpegDataEncoder.h"
#include "FFmpegLibWrapper.h"
#include "PlatformEncoderModule.h"
#include "SimpleMap.h"
-#include "mozilla/ThreadSafety.h"
// This must be the last header included
#include "FFmpegLibs.h"
@@ -18,17 +18,10 @@
namespace mozilla {
template <int V>
-AVCodecID GetFFmpegEncoderCodecId(CodecType aCodec);
-
-template <>
-AVCodecID GetFFmpegEncoderCodecId<LIBAV_VER>(CodecType aCodec);
-
-template <int V>
class FFmpegVideoEncoder : public MediaDataEncoder {};
-// TODO: Bug 1860925: FFmpegDataEncoder
template <>
-class FFmpegVideoEncoder<LIBAV_VER> final : public MediaDataEncoder {
+class FFmpegVideoEncoder<LIBAV_VER> : public FFmpegDataEncoder<LIBAV_VER> {
using DurationMap = SimpleMap<int64_t>;
public:
@@ -36,44 +29,19 @@ class FFmpegVideoEncoder<LIBAV_VER> final : public MediaDataEncoder {
const RefPtr<TaskQueue>& aTaskQueue,
const EncoderConfig& aConfig);
- /* MediaDataEncoder Methods */
- // All methods run on the task queue, except for GetDescriptionName.
- RefPtr<InitPromise> Init() override;
- RefPtr<EncodePromise> Encode(const MediaData* aSample) override;
- RefPtr<ReconfigurationPromise> Reconfigure(
- const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
- override;
- RefPtr<EncodePromise> Drain() override;
- RefPtr<ShutdownPromise> Shutdown() override;
- RefPtr<GenericPromise> SetBitrate(uint32_t aBitRate) override;
nsCString GetDescriptionName() const override;
- private:
- ~FFmpegVideoEncoder() = default;
-
+ protected:
// Methods only called on mTaskQueue.
- RefPtr<InitPromise> ProcessInit();
- RefPtr<EncodePromise> ProcessEncode(RefPtr<const MediaData> aSample);
- RefPtr<ReconfigurationPromise> ProcessReconfigure(
- const RefPtr<const EncoderConfigurationChangeList> aConfigurationChanges);
- RefPtr<EncodePromise> ProcessDrain();
- RefPtr<ShutdownPromise> ProcessShutdown();
- MediaResult InitInternal();
- void ShutdownInternal();
- // TODO: Share these with FFmpegDataDecoder.
- int OpenCodecContext(const AVCodec* aCodec, AVDictionary** aOptions)
- MOZ_EXCLUDES(sMutex);
- void CloseCodecContext() MOZ_EXCLUDES(sMutex);
- bool PrepareFrame();
- void DestroyFrame();
- bool ScaleInputFrame();
+ virtual nsresult InitSpecific() override;
#if LIBAVCODEC_VERSION_MAJOR >= 58
- RefPtr<EncodePromise> EncodeWithModernAPIs(RefPtr<const VideoData> aSample);
- RefPtr<EncodePromise> DrainWithModernAPIs();
+ Result<EncodedData, nsresult> EncodeInputWithModernAPIs(
+ RefPtr<const MediaData> aSample) override;
#endif
- RefPtr<MediaRawData> ToMediaRawData(AVPacket* aPacket);
+ bool ScaleInputFrame();
+ virtual RefPtr<MediaRawData> ToMediaRawData(AVPacket* aPacket) override;
Result<already_AddRefed<MediaByteBuffer>, nsresult> GetExtraData(
- AVPacket* aPacket);
+ AVPacket* aPacket) override;
void ForceEnablingFFmpegDebugLogs();
struct SVCSettings {
nsTArray<uint8_t> mTemporalLayerIds;
@@ -88,21 +56,6 @@ class FFmpegVideoEncoder<LIBAV_VER> final : public MediaDataEncoder {
nsTArray<std::pair<nsCString, nsCString>> mSettingKeyValuePairs;
};
H264Settings GetH264Settings(const H264Specific& aH264Specific);
-
- // This refers to a static FFmpegLibWrapper, so raw pointer is adequate.
- const FFmpegLibWrapper* mLib;
- const AVCodecID mCodecID;
- const RefPtr<TaskQueue> mTaskQueue;
-
- // set in constructor, modified when parameters change
- EncoderConfig mConfig;
-
- // mTaskQueue only.
- nsCString mCodecName;
- AVCodecContext* mCodecContext;
- AVFrame* mFrame;
- DurationMap mDurationMap;
-
struct SVCInfo {
explicit SVCInfo(nsTArray<uint8_t>&& aTemporalLayerIds)
: mTemporalLayerIds(std::move(aTemporalLayerIds)), mNextIndex(0) {}
@@ -111,13 +64,9 @@ class FFmpegVideoEncoder<LIBAV_VER> final : public MediaDataEncoder {
// Return the current temporal layer id and update the next.
uint8_t UpdateTemporalLayerId();
};
- Maybe<SVCInfo> mSVCInfo;
-
- // Provide critical-section for open/close mCodecContext.
- // TODO: Merge this with FFmpegDataDecoder's one.
- static StaticMutex sMutex;
+ Maybe<SVCInfo> mSVCInfo{};
};
} // namespace mozilla
-#endif /* DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGVIDEOENCODER_H_ */
+#endif // DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGVIDEOENCODER_H_
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/moz.build b/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
index f26edcdc7f..db48b36f6b 100644
--- a/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
'../FFmpegAudioDecoder.cpp',
+ '../FFmpegAudioEncoder.cpp',
'../FFmpegDataDecoder.cpp',
+ "../FFmpegDataEncoder.cpp",
'../FFmpegDecoderModule.cpp',
'../FFmpegEncoderModule.cpp',
'../FFmpegVideoDecoder.cpp',
diff --git a/dom/media/platforms/ffmpeg/ffmpeg58/moz.build b/dom/media/platforms/ffmpeg/ffmpeg58/moz.build
index a22bf98abd..12e48c44f0 100644
--- a/dom/media/platforms/ffmpeg/ffmpeg58/moz.build
+++ b/dom/media/platforms/ffmpeg/ffmpeg58/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
'../FFmpegAudioDecoder.cpp',
+ '../FFmpegAudioEncoder.cpp',
'../FFmpegDataDecoder.cpp',
+ "../FFmpegDataEncoder.cpp",
'../FFmpegDecoderModule.cpp',
'../FFmpegEncoderModule.cpp',
'../FFmpegVideoDecoder.cpp',
diff --git a/dom/media/platforms/ffmpeg/ffmpeg59/moz.build b/dom/media/platforms/ffmpeg/ffmpeg59/moz.build
index e0c6c10ecd..c4f7b89951 100644
--- a/dom/media/platforms/ffmpeg/ffmpeg59/moz.build
+++ b/dom/media/platforms/ffmpeg/ffmpeg59/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
"../FFmpegAudioDecoder.cpp",
+ '../FFmpegAudioEncoder.cpp',
"../FFmpegDataDecoder.cpp",
+ "../FFmpegDataEncoder.cpp",
"../FFmpegDecoderModule.cpp",
"../FFmpegEncoderModule.cpp",
"../FFmpegVideoDecoder.cpp",
diff --git a/dom/media/platforms/ffmpeg/ffmpeg60/moz.build b/dom/media/platforms/ffmpeg/ffmpeg60/moz.build
index e0c6c10ecd..c4f7b89951 100644
--- a/dom/media/platforms/ffmpeg/ffmpeg60/moz.build
+++ b/dom/media/platforms/ffmpeg/ffmpeg60/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
"../FFmpegAudioDecoder.cpp",
+ '../FFmpegAudioEncoder.cpp',
"../FFmpegDataDecoder.cpp",
+ "../FFmpegDataEncoder.cpp",
"../FFmpegDecoderModule.cpp",
"../FFmpegEncoderModule.cpp",
"../FFmpegVideoDecoder.cpp",
diff --git a/dom/media/platforms/ffmpeg/ffvpx/moz.build b/dom/media/platforms/ffmpeg/ffvpx/moz.build
index 97a224b08b..bc72b6d1a7 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/moz.build
+++ b/dom/media/platforms/ffmpeg/ffvpx/moz.build
@@ -11,9 +11,12 @@ EXPORTS += [
UNIFIED_SOURCES += [
"../FFmpegAudioDecoder.cpp",
+ "../FFmpegAudioEncoder.cpp",
"../FFmpegDataDecoder.cpp",
+ "../FFmpegDataEncoder.cpp",
"../FFmpegDecoderModule.cpp",
"../FFmpegEncoderModule.cpp",
+ "../FFmpegUtils.cpp",
"../FFmpegVideoDecoder.cpp",
"../FFmpegVideoEncoder.cpp",
]
diff --git a/dom/media/platforms/ffmpeg/libav53/moz.build b/dom/media/platforms/ffmpeg/libav53/moz.build
index 06b226e1f1..81b8b8dcc6 100644
--- a/dom/media/platforms/ffmpeg/libav53/moz.build
+++ b/dom/media/platforms/ffmpeg/libav53/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
'../FFmpegAudioDecoder.cpp',
+ '../FFmpegAudioEncoder.cpp',
'../FFmpegDataDecoder.cpp',
+ "../FFmpegDataEncoder.cpp",
'../FFmpegDecoderModule.cpp',
'../FFmpegEncoderModule.cpp',
'../FFmpegVideoDecoder.cpp',
diff --git a/dom/media/platforms/ffmpeg/libav54/moz.build b/dom/media/platforms/ffmpeg/libav54/moz.build
index 06b226e1f1..81b8b8dcc6 100644
--- a/dom/media/platforms/ffmpeg/libav54/moz.build
+++ b/dom/media/platforms/ffmpeg/libav54/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
'../FFmpegAudioDecoder.cpp',
+ '../FFmpegAudioEncoder.cpp',
'../FFmpegDataDecoder.cpp',
+ "../FFmpegDataEncoder.cpp",
'../FFmpegDecoderModule.cpp',
'../FFmpegEncoderModule.cpp',
'../FFmpegVideoDecoder.cpp',
diff --git a/dom/media/platforms/ffmpeg/libav55/moz.build b/dom/media/platforms/ffmpeg/libav55/moz.build
index af2d4f1831..2c3d89b9b3 100644
--- a/dom/media/platforms/ffmpeg/libav55/moz.build
+++ b/dom/media/platforms/ffmpeg/libav55/moz.build
@@ -6,7 +6,9 @@
UNIFIED_SOURCES += [
'../FFmpegAudioDecoder.cpp',
+ '../FFmpegAudioEncoder.cpp',
'../FFmpegDataDecoder.cpp',
+ "../FFmpegDataEncoder.cpp",
'../FFmpegDecoderModule.cpp',
'../FFmpegEncoderModule.cpp',
'../FFmpegVideoDecoder.cpp',
diff --git a/dom/media/platforms/ffmpeg/moz.build b/dom/media/platforms/ffmpeg/moz.build
index f519b30cec..ac78eee289 100644
--- a/dom/media/platforms/ffmpeg/moz.build
+++ b/dom/media/platforms/ffmpeg/moz.build
@@ -18,9 +18,7 @@ DIRS += [
"ffmpeg60",
]
-UNIFIED_SOURCES += [
- "FFmpegRuntimeLinker.cpp",
-]
+UNIFIED_SOURCES += ["FFmpegRuntimeLinker.cpp"]
if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/dom/media/platforms/moz.build b/dom/media/platforms/moz.build
index 6f71c5cc12..9a4f19aa4b 100644
--- a/dom/media/platforms/moz.build
+++ b/dom/media/platforms/moz.build
@@ -11,6 +11,7 @@ EXPORTS += [
"agnostic/TheoraDecoder.h",
"agnostic/VPXDecoder.h",
"AllocationPolicy.h",
+ "EncoderConfig.h",
"MediaCodecsSupport.h",
"MediaTelemetryConstants.h",
"PDMFactory.h",
@@ -32,6 +33,7 @@ UNIFIED_SOURCES += [
"agnostic/TheoraDecoder.cpp",
"agnostic/VPXDecoder.cpp",
"AllocationPolicy.cpp",
+ "EncoderConfig.cpp",
"MediaCodecsSupport.cpp",
"PDMFactory.cpp",
"PEMFactory.cpp",
diff --git a/dom/media/platforms/wmf/DXVA2Manager.cpp b/dom/media/platforms/wmf/DXVA2Manager.cpp
index 36b424ab8e..9efe9dab55 100644
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -21,6 +21,8 @@
#include "gfxCrashReporterUtils.h"
#include "gfxWindowsPlatform.h"
#include "mfapi.h"
+#include "mozilla/AppShutdown.h"
+#include "mozilla/ClearOnShutdown.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/Telemetry.h"
@@ -122,6 +124,38 @@ using layers::ImageContainer;
using namespace layers;
using namespace gfx;
+StaticRefPtr<ID3D11Device> sDevice;
+StaticMutex sDeviceMutex;
+
+// We found an issue where the ID3D11VideoDecoder won't release its underlying
+// resources properly if the decoder iscreated from a compositor device by
+// ourselves. This problem has been observed with both VP9 and, reportedly, AV1
+// decoders, it does not seem to affect the H264 decoder, but the underlying
+// decoder created by MFT seems not having this issue.
+// Therefore, when checking whether we can use hardware decoding, we should use
+// a non-compositor device to create a decoder in order to prevent resource
+// leaking that can significantly degrade the performance. For the actual
+// decoding, we will still use the compositor device if it's avaiable in order
+// to avoid video copying.
+static ID3D11Device* GetDeviceForDecoderCheck() {
+ StaticMutexAutoLock lock(sDeviceMutex);
+ if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdown)) {
+ return nullptr;
+ }
+ if (!sDevice) {
+ sDevice = gfx::DeviceManagerDx::Get()->CreateDecoderDevice(
+ {DeviceManagerDx::DeviceFlag::disableDeviceReuse});
+ auto clearOnShutdown = [] { ClearOnShutdown(&sDevice); };
+ if (!NS_IsMainThread()) {
+ Unused << NS_DispatchToMainThread(
+ NS_NewRunnableFunction(__func__, clearOnShutdown));
+ } else {
+ clearOnShutdown();
+ }
+ }
+ return sDevice.get();
+}
+
void GetDXVA2ExtendedFormatFromMFMediaType(IMFMediaType* pType,
DXVA2_ExtendedFormat* pFormat) {
// Get the interlace mode.
@@ -362,10 +396,10 @@ class D3D11DXVA2Manager : public DXVA2Manager {
HRESULT CreateOutputSample(RefPtr<IMFSample>& aSample,
ID3D11Texture2D* aTexture);
+ // This is used for check whether hw decoding is possible before using MFT for
+ // decoding.
bool CanCreateDecoder(const D3D11_VIDEO_DECODER_DESC& aDesc) const;
- already_AddRefed<ID3D11VideoDecoder> CreateDecoder(
- const D3D11_VIDEO_DECODER_DESC& aDesc) const;
void RefreshIMFSampleWrappers();
void ReleaseAllIMFSamples();
@@ -618,10 +652,11 @@ D3D11DXVA2Manager::InitInternal(layers::KnowsCompositor* aKnowsCompositor,
mDevice = aDevice;
if (!mDevice) {
- bool useHardwareWebRender =
- aKnowsCompositor && aKnowsCompositor->UsingHardwareWebRender();
- mDevice =
- gfx::DeviceManagerDx::Get()->CreateDecoderDevice(useHardwareWebRender);
+ DeviceManagerDx::DeviceFlagSet flags;
+ if (aKnowsCompositor && aKnowsCompositor->UsingHardwareWebRender()) {
+ flags += DeviceManagerDx::DeviceFlag::isHardwareWebRenderInUse;
+ }
+ mDevice = gfx::DeviceManagerDx::Get()->CreateDecoderDevice(flags);
if (!mDevice) {
aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder");
return E_FAIL;
@@ -1155,20 +1190,26 @@ D3D11DXVA2Manager::ConfigureForSize(IMFMediaType* aInputType,
bool D3D11DXVA2Manager::CanCreateDecoder(
const D3D11_VIDEO_DECODER_DESC& aDesc) const {
- RefPtr<ID3D11VideoDecoder> decoder = CreateDecoder(aDesc);
- return decoder.get() != nullptr;
-}
+ RefPtr<ID3D11Device> device = GetDeviceForDecoderCheck();
+ if (!device) {
+ LOG("Can't create decoder due to lacking of ID3D11Device!");
+ return false;
+ }
-already_AddRefed<ID3D11VideoDecoder> D3D11DXVA2Manager::CreateDecoder(
- const D3D11_VIDEO_DECODER_DESC& aDesc) const {
RefPtr<ID3D11VideoDevice> videoDevice;
- HRESULT hr = mDevice->QueryInterface(
+ HRESULT hr = device->QueryInterface(
static_cast<ID3D11VideoDevice**>(getter_AddRefs(videoDevice)));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+ if (FAILED(hr)) {
+ LOG("Failed to query ID3D11VideoDevice!");
+ return false;
+ }
UINT configCount = 0;
hr = videoDevice->GetVideoDecoderConfigCount(&aDesc, &configCount);
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+ if (FAILED(hr)) {
+ LOG("Failed to get decoder config count!");
+ return false;
+ }
for (UINT i = 0; i < configCount; i++) {
D3D11_VIDEO_DECODER_CONFIG config;
@@ -1177,10 +1218,10 @@ already_AddRefed<ID3D11VideoDecoder> D3D11DXVA2Manager::CreateDecoder(
RefPtr<ID3D11VideoDecoder> decoder;
hr = videoDevice->CreateVideoDecoder(&aDesc, &config,
decoder.StartAssignment());
- return decoder.forget();
+ return decoder != nullptr;
}
}
- return nullptr;
+ return false;
}
/* static */
diff --git a/dom/media/platforms/wmf/MFCDMSession.cpp b/dom/media/platforms/wmf/MFCDMSession.cpp
index b797898abb..0ae4614f3b 100644
--- a/dom/media/platforms/wmf/MFCDMSession.cpp
+++ b/dom/media/platforms/wmf/MFCDMSession.cpp
@@ -11,6 +11,7 @@
#include "MFMediaEngineUtils.h"
#include "GMPUtils.h" // ToHexString
#include "mozilla/EMEUtils.h"
+#include "mozilla/dom/BindingUtils.h"
#include "mozilla/dom/MediaKeyMessageEventBinding.h"
#include "mozilla/dom/MediaKeyStatusMapBinding.h"
#include "nsThreadUtils.h"
@@ -244,7 +245,7 @@ void MFCDMSession::OnSessionKeysChange() {
nsAutoCString keyIdString(ToHexString(keyId));
LOG("Append keyid-sz=%u, keyid=%s, status=%s", keyStatus.cbKeyId,
keyIdString.get(),
- ToMediaKeyStatusStr(ToMediaKeyStatus(keyStatus.eMediaKeyStatus)));
+ dom::GetEnumString(ToMediaKeyStatus(keyStatus.eMediaKeyStatus)).get());
keyInfos.AppendElement(MFCDMKeyInformation{
std::move(keyId), ToMediaKeyStatus(keyStatus.eMediaKeyStatus)});
}
diff --git a/dom/media/platforms/wmf/MFMediaEngineStream.cpp b/dom/media/platforms/wmf/MFMediaEngineStream.cpp
index 70ffa50142..5875b5a17c 100644
--- a/dom/media/platforms/wmf/MFMediaEngineStream.cpp
+++ b/dom/media/platforms/wmf/MFMediaEngineStream.cpp
@@ -11,6 +11,7 @@
#include "TimeUnits.h"
#include "mozilla/ProfilerLabels.h"
#include "mozilla/ProfilerMarkerTypes.h"
+#include "mozilla/ScopeExit.h"
#include "WMF.h"
#include "WMFUtils.h"
@@ -126,6 +127,13 @@ HRESULT MFMediaEngineStream::RuntimeClassInitialize(
mTaskQueue = aParentSource->GetTaskQueue();
MOZ_ASSERT(mTaskQueue);
mStreamId = aStreamId;
+
+ auto errorExit = MakeScopeExit([&] {
+ SLOG("Failed to initialize media stream (id=%" PRIu64 ")", aStreamId);
+ mIsShutdown = true;
+ Unused << mMediaEventQueue->Shutdown();
+ });
+
RETURN_IF_FAILED(wmf::MFCreateEventQueue(&mMediaEventQueue));
ComPtr<IMFMediaType> mediaType;
@@ -134,6 +142,7 @@ HRESULT MFMediaEngineStream::RuntimeClassInitialize(
RETURN_IF_FAILED(GenerateStreamDescriptor(mediaType));
SLOG("Initialized %s (id=%" PRIu64 ", descriptorId=%lu)",
GetDescriptionName().get(), aStreamId, mStreamDescriptorId);
+ errorExit.release();
return S_OK;
}
diff --git a/dom/media/platforms/wmf/WMFDataEncoderUtils.h b/dom/media/platforms/wmf/WMFDataEncoderUtils.h
index 7472827b49..19f04e768f 100644
--- a/dom/media/platforms/wmf/WMFDataEncoderUtils.h
+++ b/dom/media/platforms/wmf/WMFDataEncoderUtils.h
@@ -32,7 +32,6 @@ static const GUID CodecToSubtype(CodecType aCodec) {
case CodecType::VP9:
return MFVideoFormat_VP90;
default:
- MOZ_ASSERT(false, "Unsupported codec");
return GUID_NULL;
}
}
diff --git a/dom/media/platforms/wmf/WMFEncoderModule.cpp b/dom/media/platforms/wmf/WMFEncoderModule.cpp
index f9f35db653..7b5af9bf50 100644
--- a/dom/media/platforms/wmf/WMFEncoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFEncoderModule.cpp
@@ -12,6 +12,10 @@ namespace mozilla {
extern LazyLogModule sPEMLog;
bool WMFEncoderModule::SupportsCodec(CodecType aCodecType) const {
+ if (aCodecType > CodecType::_BeginAudio_ &&
+ aCodecType < CodecType::_EndAudio_) {
+ return false;
+ }
return CanCreateWMFEncoder(aCodecType);
}
@@ -19,6 +23,9 @@ bool WMFEncoderModule::Supports(const EncoderConfig& aConfig) const {
if (!CanLikelyEncode(aConfig)) {
return false;
}
+ if (aConfig.IsAudio()) {
+ return false;
+ }
return SupportsCodec(aConfig.mCodec);
}
diff --git a/dom/media/platforms/wmf/WMFUtils.cpp b/dom/media/platforms/wmf/WMFUtils.cpp
index dda9df808e..bf5b8fe67d 100644
--- a/dom/media/platforms/wmf/WMFUtils.cpp
+++ b/dom/media/platforms/wmf/WMFUtils.cpp
@@ -333,7 +333,9 @@ GUID VideoMimeTypeToMediaFoundationSubtype(const nsACString& aMimeType) {
if (MP4Decoder::IsHEVC(aMimeType)) {
return MFVideoFormat_HEVC;
}
- NS_WARNING("Unsupport video mimetype");
+ NS_WARNING(nsAutoCString(nsDependentCString("Unsupported video mimetype ") +
+ aMimeType)
+ .get());
return GUID_NULL;
}
diff --git a/dom/media/platforms/wrappers/MediaChangeMonitor.cpp b/dom/media/platforms/wrappers/MediaChangeMonitor.cpp
index 46989840bf..bb7b015fab 100644
--- a/dom/media/platforms/wrappers/MediaChangeMonitor.cpp
+++ b/dom/media/platforms/wrappers/MediaChangeMonitor.cpp
@@ -800,6 +800,7 @@ RefPtr<ShutdownPromise> MediaChangeMonitor::ShutdownDecoder() {
AssertOnThread();
mConversionRequired.reset();
if (mDecoder) {
+ MutexAutoLock lock(mMutex);
RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
return decoder->Shutdown();
}
@@ -847,6 +848,7 @@ MediaChangeMonitor::CreateDecoder() {
->Then(
GetCurrentSerialEventTarget(), __func__,
[self = RefPtr{this}, this](RefPtr<MediaDataDecoder>&& aDecoder) {
+ MutexAutoLock lock(mMutex);
mDecoder = std::move(aDecoder);
DDLINKCHILD("decoder", mDecoder.get());
return CreateDecoderPromise::CreateAndResolve(true, __func__);
@@ -1095,6 +1097,11 @@ void MediaChangeMonitor::FlushThenShutdownDecoder(
->Track(mFlushRequest);
}
+MediaDataDecoder* MediaChangeMonitor::GetDecoderOnNonOwnerThread() const {
+ MutexAutoLock lock(mMutex);
+ return mDecoder;
+}
+
#undef LOG
} // namespace mozilla
diff --git a/dom/media/platforms/wrappers/MediaChangeMonitor.h b/dom/media/platforms/wrappers/MediaChangeMonitor.h
index a3ee5b5aa0..ff4f6921f6 100644
--- a/dom/media/platforms/wrappers/MediaChangeMonitor.h
+++ b/dom/media/platforms/wrappers/MediaChangeMonitor.h
@@ -41,34 +41,34 @@ class MediaChangeMonitor final
RefPtr<ShutdownPromise> Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
nsCString GetDescriptionName() const override {
- if (mDecoder) {
- return mDecoder->GetDescriptionName();
+ if (RefPtr<MediaDataDecoder> decoder = GetDecoderOnNonOwnerThread()) {
+ return decoder->GetDescriptionName();
}
return "MediaChangeMonitor decoder (pending)"_ns;
}
nsCString GetProcessName() const override {
- if (mDecoder) {
- return mDecoder->GetProcessName();
+ if (RefPtr<MediaDataDecoder> decoder = GetDecoderOnNonOwnerThread()) {
+ return decoder->GetProcessName();
}
return "MediaChangeMonitor"_ns;
}
nsCString GetCodecName() const override {
- if (mDecoder) {
- return mDecoder->GetCodecName();
+ if (RefPtr<MediaDataDecoder> decoder = GetDecoderOnNonOwnerThread()) {
+ return decoder->GetCodecName();
}
return "MediaChangeMonitor"_ns;
}
void SetSeekThreshold(const media::TimeUnit& aTime) override;
bool SupportDecoderRecycling() const override {
- if (mDecoder) {
- return mDecoder->SupportDecoderRecycling();
+ if (RefPtr<MediaDataDecoder> decoder = GetDecoderOnNonOwnerThread()) {
+ return decoder->SupportDecoderRecycling();
}
return false;
}
ConversionRequired NeedsConversion() const override {
- if (mDecoder) {
- return mDecoder->NeedsConversion();
+ if (RefPtr<MediaDataDecoder> decoder = GetDecoderOnNonOwnerThread()) {
+ return decoder->NeedsConversion();
}
// Default so no conversion is performed.
return ConversionRequired::kNeedNone;
@@ -100,6 +100,9 @@ class MediaChangeMonitor final
MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
}
+ // This is used for getting decoder debug info on other threads. Thread-safe.
+ MediaDataDecoder* GetDecoderOnNonOwnerThread() const;
+
bool CanRecycleDecoder() const;
typedef MozPromise<bool, MediaResult, true /* exclusive */>
@@ -140,6 +143,13 @@ class MediaChangeMonitor final
const CreateDecoderParamsForAsync mParams;
// Keep any seek threshold set for after decoder creation and initialization.
Maybe<media::TimeUnit> mPendingSeekThreshold;
+
+ // This lock is used for mDecoder specifically, but it doens't need to be used
+ // for every places accessing mDecoder which is mostly on the owner thread.
+ // However, when requesting decoder debug info, it can happen on other
+ // threads, so we need this mutex to avoid the data race of
+ // creating/destroying decoder and accessing decoder's debug info.
+ mutable Mutex MOZ_ANNOTATED mMutex{"MediaChangeMonitor"};
};
} // namespace mozilla
diff --git a/dom/media/test/320x240.ogv b/dom/media/test/320x240.ogv
deleted file mode 100644
index 093158432a..0000000000
--- a/dom/media/test/320x240.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/320x240.webm b/dom/media/test/320x240.webm
new file mode 100644
index 0000000000..16ecdbf688
--- /dev/null
+++ b/dom/media/test/320x240.webm
Binary files differ
diff --git a/dom/media/test/320x240.ogv^headers^ b/dom/media/test/320x240.webm^headers^
index 4030ea1d3d..4030ea1d3d 100644
--- a/dom/media/test/320x240.ogv^headers^
+++ b/dom/media/test/320x240.webm^headers^
diff --git a/dom/media/test/448636.ogv b/dom/media/test/448636.ogv
deleted file mode 100644
index 628df924f8..0000000000
--- a/dom/media/test/448636.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bogus.ogv b/dom/media/test/bogus.ogv
deleted file mode 100644
index 528ae275d0..0000000000
--- a/dom/media/test/bogus.ogv
+++ /dev/null
@@ -1,45 +0,0 @@
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
-bogus bogus bogus
diff --git a/dom/media/test/browser/browser_glean_first_frame_loaded_time.js b/dom/media/test/browser/browser_glean_first_frame_loaded_time.js
index 1acfa9957e..2ee5274dae 100644
--- a/dom/media/test/browser/browser_glean_first_frame_loaded_time.js
+++ b/dom/media/test/browser/browser_glean_first_frame_loaded_time.js
@@ -17,7 +17,7 @@ const testCases = [
key_system: undefined,
},
async run(tab) {
- await loadVideo(tab);
+ await loadVideo(tab, "mozfirstframeloadedprobe");
},
},
{
@@ -28,7 +28,7 @@ const testCases = [
key_system: undefined,
},
async run(tab) {
- await loadMseVideo(tab);
+ await loadMseVideo(tab, "mozfirstframeloadedprobe");
},
},
{
@@ -39,11 +39,17 @@ const testCases = [
key_system: "org.w3.clearkey",
},
async run(tab) {
- await loadEmeVideo(tab);
+ await loadEmeVideo(tab, "mozfirstframeloadedprobe");
},
},
];
+add_task(async function setTestPref() {
+ await SpecialPowers.pushPrefEnv({
+ set: [["media.testing-only-events", true]],
+ });
+});
+
add_task(async function testGleanMediaPlayackFirstFrameLoaded() {
for (let test of testCases) {
Services.fog.testResetFOG();
diff --git a/dom/media/test/browser/head.js b/dom/media/test/browser/head.js
index 7ef578a804..489d107be5 100644
--- a/dom/media/test/browser/head.js
+++ b/dom/media/test/browser/head.js
@@ -13,60 +13,94 @@ function openTab() {
// Creates and configures a video element for non-MSE playback in `tab`. Does not
// start playback for the element. Returns a promise that will resolve once
// the element is setup and ready for playback.
-function loadVideo(tab) {
- return SpecialPowers.spawn(tab.linkedBrowser, [], async _ => {
- let video = content.document.createElement("video");
- video.id = "media";
- content.document.body.appendChild(video);
+function loadVideo(tab, extraEvent = undefined) {
+ return SpecialPowers.spawn(
+ tab.linkedBrowser,
+ [extraEvent],
+ async _extraEvent => {
+ let video = content.document.createElement("video");
+ video.id = "media";
+ content.document.body.appendChild(video);
- video.src = "gizmo.mp4";
- video.load();
+ video.src = "gizmo.mp4";
+ video.load();
- info(`waiting 'loadeddata' event to ensure playback is ready`);
- await new Promise(r => (video.onloadeddata = r));
- });
+ info(`waiting 'loadeddata' event to ensure playback is ready`);
+ let promises = [];
+ promises.push(new Promise(r => (video.onloadeddata = r)));
+ if (_extraEvent != undefined) {
+ info(
+ `waiting '${_extraEvent}' event to ensure the probe has been recorded`
+ );
+ promises.push(
+ new Promise(r =>
+ video.addEventListener(_extraEvent, r, { once: true })
+ )
+ );
+ }
+ await Promise.allSettled(promises);
+ }
+ );
}
// Creates and configures a video element for MSE playback in `tab`. Does not
// start playback for the element. Returns a promise that will resolve once
// the element is setup and ready for playback.
-function loadMseVideo(tab) {
- return SpecialPowers.spawn(tab.linkedBrowser, [], async _ => {
- async function once(target, name) {
- return new Promise(r => target.addEventListener(name, r, { once: true }));
- }
+function loadMseVideo(tab, extraEvent = undefined) {
+ return SpecialPowers.spawn(
+ tab.linkedBrowser,
+ [extraEvent],
+ async _extraEvent => {
+ async function once(target, name) {
+ return new Promise(r =>
+ target.addEventListener(name, r, { once: true })
+ );
+ }
- let video = content.document.createElement("video");
- video.id = "media";
- content.document.body.appendChild(video);
+ let video = content.document.createElement("video");
+ video.id = "media";
+ content.document.body.appendChild(video);
- info(`starting setup MSE`);
- const ms = new content.wrappedJSObject.MediaSource();
- video.src = content.wrappedJSObject.URL.createObjectURL(ms);
- await once(ms, "sourceopen");
- const sb = ms.addSourceBuffer("video/mp4");
- const videoFile = "bipbop2s.mp4";
- let fetchResponse = await content.fetch(videoFile);
- sb.appendBuffer(await fetchResponse.arrayBuffer());
- await once(sb, "updateend");
- ms.endOfStream();
- await once(ms, "sourceended");
+ info(`starting setup MSE`);
+ const ms = new content.wrappedJSObject.MediaSource();
+ video.src = content.wrappedJSObject.URL.createObjectURL(ms);
+ await once(ms, "sourceopen");
+ const sb = ms.addSourceBuffer("video/mp4");
+ const videoFile = "bipbop2s.mp4";
+ let fetchResponse = await content.fetch(videoFile);
+ sb.appendBuffer(await fetchResponse.arrayBuffer());
+ await once(sb, "updateend");
+ ms.endOfStream();
+ await once(ms, "sourceended");
- info(`waiting 'loadeddata' event to ensure playback is ready`);
- await once(video, "loadeddata");
- });
+ info(`waiting 'loadeddata' event to ensure playback is ready`);
+ let promises = [];
+ promises.push(once(video, "loadeddata"));
+ if (_extraEvent != undefined) {
+ info(
+ `waiting '${_extraEvent}' event to ensure the probe has been recorded`
+ );
+ promises.push(
+ new Promise(r =>
+ video.addEventListener(_extraEvent, r, { once: true })
+ )
+ );
+ }
+ await Promise.allSettled(promises);
+ }
+ );
}
// Creates and configures a video element for EME playback in `tab`. Does not
// start playback for the element. Returns a promise that will resolve once
// the element is setup and ready for playback.
-function loadEmeVideo(tab) {
+function loadEmeVideo(tab, extraEvent = undefined) {
const emeHelperUri =
gTestPath.substr(0, gTestPath.lastIndexOf("/")) + "/eme_standalone.js";
return SpecialPowers.spawn(
tab.linkedBrowser,
- [emeHelperUri],
- async _emeHelperUri => {
+ [emeHelperUri, extraEvent],
+ async (_emeHelperUri, _extraEvent) => {
async function once(target, name) {
return new Promise(r =>
target.addEventListener(name, r, { once: true })
@@ -113,7 +147,19 @@ function loadEmeVideo(tab) {
await once(ms, "sourceended");
info(`waiting 'loadeddata' event to ensure playback is ready`);
- await once(video, "loadeddata");
+ let promises = [];
+ promises.push(once(video, "loadeddata"));
+ if (_extraEvent != undefined) {
+ info(
+ `waiting '${_extraEvent}' event to ensure the probe has been recorded`
+ );
+ promises.push(
+ new Promise(r =>
+ video.addEventListener(_extraEvent, r, { once: true })
+ )
+ );
+ }
+ await Promise.allSettled(promises);
}
);
}
diff --git a/dom/media/test/bug482461-theora.ogv b/dom/media/test/bug482461-theora.ogv
deleted file mode 100644
index 941b8d8efd..0000000000
--- a/dom/media/test/bug482461-theora.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug482461.ogv b/dom/media/test/bug482461.ogv
deleted file mode 100644
index 6cf6aed330..0000000000
--- a/dom/media/test/bug482461.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug495129.ogv b/dom/media/test/bug495129.ogv
deleted file mode 100644
index 44eb9296f5..0000000000
--- a/dom/media/test/bug495129.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug498380.ogv b/dom/media/test/bug498380.ogv
deleted file mode 100644
index 1179ecb70a..0000000000
--- a/dom/media/test/bug498380.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug498855-1.ogv b/dom/media/test/bug498855-1.ogv
deleted file mode 100644
index 95a524da4c..0000000000
--- a/dom/media/test/bug498855-1.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug498855-2.ogv b/dom/media/test/bug498855-2.ogv
deleted file mode 100644
index 795a308ae1..0000000000
--- a/dom/media/test/bug498855-2.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug498855-3.ogv b/dom/media/test/bug498855-3.ogv
deleted file mode 100644
index 714858dfed..0000000000
--- a/dom/media/test/bug498855-3.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug499519.ogv b/dom/media/test/bug499519.ogv
deleted file mode 100644
index 62c0922d36..0000000000
--- a/dom/media/test/bug499519.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug500311.ogv b/dom/media/test/bug500311.ogv
deleted file mode 100644
index 2cf27ef1ee..0000000000
--- a/dom/media/test/bug500311.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug504613.ogv b/dom/media/test/bug504613.ogv
deleted file mode 100644
index 5c7fd015e9..0000000000
--- a/dom/media/test/bug504613.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug504644.ogv b/dom/media/test/bug504644.ogv
deleted file mode 100644
index 46fb4a876b..0000000000
--- a/dom/media/test/bug504644.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug504843.ogv b/dom/media/test/bug504843.ogv
deleted file mode 100644
index 94b4750865..0000000000
--- a/dom/media/test/bug504843.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug506094.ogv b/dom/media/test/bug506094.ogv
deleted file mode 100644
index 142b7b9ad1..0000000000
--- a/dom/media/test/bug506094.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug516323.indexed.ogv b/dom/media/test/bug516323.indexed.ogv
deleted file mode 100644
index 7bd76eeccc..0000000000
--- a/dom/media/test/bug516323.indexed.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug516323.ogv b/dom/media/test/bug516323.ogv
deleted file mode 100644
index 8f2f38b983..0000000000
--- a/dom/media/test/bug516323.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug523816.ogv b/dom/media/test/bug523816.ogv
deleted file mode 100644
index ca9a31b6da..0000000000
--- a/dom/media/test/bug523816.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug556821.ogv b/dom/media/test/bug556821.ogv
deleted file mode 100644
index 8d76fee45e..0000000000
--- a/dom/media/test/bug556821.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/bug557094.ogv b/dom/media/test/bug557094.ogv
deleted file mode 100644
index b4fc0799a6..0000000000
--- a/dom/media/test/bug557094.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/can_play_type_ogg.js b/dom/media/test/can_play_type_ogg.js
index 6572ab7c0f..e2c974d601 100644
--- a/dom/media/test/can_play_type_ogg.js
+++ b/dom/media/test/can_play_type_ogg.js
@@ -5,18 +5,23 @@ function check_ogg(v, enabled, finish) {
function basic_test() {
return new Promise(function (resolve) {
- // Ogg types
- check("video/ogg", "maybe");
+ if (SpecialPowers.getBoolPref("media.theora.enabled")) {
+ check("video/ogg", "maybe");
+ check("video/ogg; codecs=vorbis", "probably");
+ check("video/ogg; codecs=vorbis,theora", "probably");
+ check('video/ogg; codecs="vorbis, theora"', "probably");
+ check("video/ogg; codecs=theora", "probably");
+ } else {
+ check("video/ogg", "");
+ check("video/ogg; codecs=vorbis", "");
+ check("video/ogg; codecs=vorbis,theora", "");
+ check('video/ogg; codecs="vorbis, theora"', "");
+ check("video/ogg; codecs=theora", "");
+ }
check("audio/ogg", "maybe");
check("application/ogg", "maybe");
- // Supported Ogg codecs
check("audio/ogg; codecs=vorbis", "probably");
- check("video/ogg; codecs=vorbis", "probably");
- check("video/ogg; codecs=vorbis,theora", "probably");
- check('video/ogg; codecs="vorbis, theora"', "probably");
- check("video/ogg; codecs=theora", "probably");
-
resolve();
});
}
diff --git a/dom/media/test/chained-video.ogv b/dom/media/test/chained-video.ogv
deleted file mode 100644
index a6288ef6c9..0000000000
--- a/dom/media/test/chained-video.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/crashtests/576612-1.html b/dom/media/test/crashtests/576612-1.html
deleted file mode 100644
index 04f993e780..0000000000
--- a/dom/media/test/crashtests/576612-1.html
+++ /dev/null
@@ -1,15 +0,0 @@
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<script>
-function boom()
-{
-
- var v = document.getElementById("v");
- v.src = "data:text/plain,_";
- document.documentElement.appendChild(v);
-
-}
-</script>
-</head>
-<body onload="boom();"><video id="v" src="data:video/ogg;codecs=&quot;theora,vorbis&quot;,1"></video></body>
-</html>
diff --git a/dom/media/test/make-headers.sh b/dom/media/test/make-headers.sh
index 35d9bd90f8..d2ac0610e2 100644
--- a/dom/media/test/make-headers.sh
+++ b/dom/media/test/make-headers.sh
@@ -8,9 +8,9 @@
# requests is not interferred with by Necko's cache. See bug 977398
# for details. Necko will fix this in bug 977314.
-FILES=(`ls *.ogg *.ogv *.webm *.mp3 *.opus *.mp4 *.m4s *.wav`)
+FILES=(`ls *.ogg *.webm *.mp3 *.opus *.mp4 *.m4s *.wav`)
-rm -f *.ogg^headers^ *.ogv^headers^ *.webm^headers^ *.mp3^headers^ *.opus^headers^ *.mp4^headers^ *.m4s^headers^ *.wav^headers^
+rm -f *.ogg^headers^ *.webm^headers^ *.mp3^headers^ *.opus^headers^ *.mp4^headers^ *.m4s^headers^ *.wav^headers^
for i in "${FILES[@]}"
do
diff --git a/dom/media/test/manifest.js b/dom/media/test/manifest.js
index c357309021..686e5b07ed 100644
--- a/dom/media/test/manifest.js
+++ b/dom/media/test/manifest.js
@@ -52,14 +52,6 @@ var gSmallTests = [
{ name: "small-shot.flac", type: "audio/flac", duration: 0.197 },
{ name: "r11025_s16_c1-short.wav", type: "audio/x-wav", duration: 0.37 },
{
- name: "320x240.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 0.266,
- contentDuration: 0.133,
- },
- {
name: "seek-short.webm",
type: "video/webm",
width: 320,
@@ -94,7 +86,6 @@ var gFrameCountTests = [
{ name: "gizmo.mp4", type: "video/mp4", totalFrameCount: 166 },
{ name: "seek-short.webm", type: "video/webm", totalFrameCount: 8 },
{ name: "seek.webm", type: "video/webm", totalFrameCount: 120 },
- { name: "320x240.ogv", type: "video/ogg", totalFrameCount: 8 },
{ name: "av1.mp4", type: "video/mp4", totalFrameCount: 24 },
];
@@ -106,13 +97,6 @@ gSmallTests = gSmallTests.concat([
// Used by test_bug654550.html, for videoStats preference
var gVideoTests = [
{
- name: "320x240.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 0.266,
- },
- {
name: "seek-short.webm",
type: "video/webm",
width: 320,
@@ -145,15 +129,6 @@ var gLongerTests = [
var gProgressTests = [
{ name: "r11025_u8_c1.wav", type: "audio/x-wav", duration: 1.0, size: 11069 },
{ name: "big-short.wav", type: "audio/x-wav", duration: 1.11, size: 12366 },
- { name: "seek-short.ogv", type: "video/ogg", duration: 1.03, size: 79921 },
- {
- name: "320x240.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 0.266,
- size: 28942,
- },
{ name: "seek-short.webm", type: "video/webm", duration: 0.23, size: 19267 },
{ name: "gizmo-short.mp4", type: "video/mp4", duration: 0.27, size: 29905 },
{ name: "bogus.duh", type: "bogus/duh" },
@@ -162,7 +137,6 @@ var gProgressTests = [
// Used by test_played.html
var gPlayedTests = [
{ name: "big-short.wav", type: "audio/x-wav", duration: 1.11 },
- { name: "seek-short.ogv", type: "video/ogg", duration: 1.03 },
{ name: "seek-short.webm", type: "video/webm", duration: 0.23 },
{ name: "gizmo-short.mp4", type: "video/mp4", duration: 0.27 },
{ name: "owl-short.mp3", type: "audio/mpeg", duration: 0.52 },
@@ -187,14 +161,14 @@ if (
// anything for testing clone-specific bugs.
var cloneKey = Math.floor(Math.random() * 100000000);
var gCloneTests = [
- // short-video is more like 1s, so if you load this twice you'll get an unexpected duration
+ // vp9.webm is more like 4s, so if you load this twice you'll get an unexpected duration
{
name:
"dynamic_resource.sjs?key=" +
cloneKey +
- "&res1=320x240.ogv&res2=short-video.ogv",
- type: "video/ogg",
- duration: 0.266,
+ "&res1=seek-short.webm&res2=vp9.webm",
+ type: "video/webm",
+ duration: 0.23,
},
];
@@ -223,23 +197,6 @@ var gTrackTests = [
hasVideo: false,
},
{
- name: "320x240.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 0.266,
- size: 28942,
- hasAudio: false,
- hasVideo: true,
- },
- {
- name: "short-video.ogv",
- type: "video/ogg",
- duration: 1.081,
- hasAudio: true,
- hasVideo: true,
- },
- {
name: "seek-short.webm",
type: "video/webm",
duration: 0.23,
@@ -257,10 +214,6 @@ var gTrackTests = [
{ name: "bogus.duh", type: "bogus/duh" },
];
-var gClosingConnectionsTest = [
- { name: "seek-short.ogv", type: "video/ogg", duration: 1.03 },
-];
-
// Used by any media recorder test. Need one test file per decoder backend
// currently supported by the media encoder.
var gMediaRecorderTests = [
@@ -318,36 +271,6 @@ var gPlayTests = [
// Data length 0xFFFFFFFF and odd chunk lengths.
{ name: "bug1301226-odd.wav", type: "audio/x-wav", duration: 0.003673 },
- // Ogg stream without eof marker
- { name: "bug461281.ogg", type: "application/ogg", duration: 2.208 },
-
- // oggz-chop stream
- { name: "bug482461.ogv", type: "video/ogg", duration: 4.34 },
- // Theora only oggz-chop stream
- { name: "bug482461-theora.ogv", type: "video/ogg", duration: 4.138 },
- // With first frame a "duplicate" (empty) frame.
- {
- name: "bug500311.ogv",
- type: "video/ogg",
- duration: 1.96,
- contentDuration: 1.958,
- },
- // Small audio file
- { name: "small-shot.ogg", type: "audio/ogg", duration: 0.276 },
- // More audio in file than video.
- { name: "short-video.ogv", type: "video/ogg", duration: 1.081 },
- // First Theora data packet is zero bytes.
- { name: "bug504613.ogv", type: "video/ogg", duration: Number.NaN },
- // Multiple audio streams.
- { name: "bug516323.ogv", type: "video/ogg", duration: 4.208 },
- // oggz-chop with non-keyframe as first frame
- {
- name: "bug556821.ogv",
- type: "video/ogg",
- duration: 2.936,
- contentDuration: 2.903,
- },
-
// Encoded with vorbis beta1, includes unusually sized codebooks
{ name: "beta-phrasebook.ogg", type: "audio/ogg", duration: 4.01 },
// Small file, only 1 frame with audio only.
@@ -355,45 +278,7 @@ var gPlayTests = [
// Small file with vorbis comments with 0 length values and names.
{ name: "bug520500.ogg", type: "audio/ogg", duration: 0.123 },
- // Various weirdly formed Ogg files
- {
- name: "bug499519.ogv",
- type: "video/ogg",
- duration: 0.24,
- contentDuration: 0.22,
- },
- { name: "bug506094.ogv", type: "video/ogg", duration: 0 },
- { name: "bug498855-1.ogv", type: "video/ogg", duration: 0.24 },
- { name: "bug498855-2.ogv", type: "video/ogg", duration: 0.24 },
- { name: "bug498855-3.ogv", type: "video/ogg", duration: 0.24 },
- {
- name: "bug504644.ogv",
- type: "video/ogg",
- duration: 1.6,
- contentDuration: 1.52,
- },
- {
- name: "chain.ogv",
- type: "video/ogg",
- duration: Number.NaN,
- contentDuration: 0.266,
- },
- {
- name: "bug523816.ogv",
- type: "video/ogg",
- duration: 0.766,
- contentDuration: 0,
- },
- { name: "bug495129.ogv", type: "video/ogg", duration: 2.41 },
- {
- name: "bug498380.ogv",
- type: "video/ogg",
- duration: 0.7663,
- contentDuration: 0,
- },
{ name: "bug495794.ogg", type: "audio/ogg", duration: 0.3 },
- { name: "bug557094.ogv", type: "video/ogg", duration: 0.24 },
- { name: "multiple-bos.ogg", type: "video/ogg", duration: 0.431 },
{ name: "audio-overhang.ogg", type: "video/ogg", duration: 2.3 },
{ name: "video-overhang.ogg", type: "video/ogg", duration: 3.966 },
@@ -402,9 +287,8 @@ var gPlayTests = [
// Test playback/metadata work after a redirect
{
- name: "redirect.sjs?domain=mochi.test:8888&file=320x240.ogv",
- type: "video/ogg",
- duration: 0.266,
+ name: "redirect.sjs?domain=mochi.test:8888&file=vp9.webm",
+ type: "video/webm",
},
// Test playback of a webm file
@@ -559,14 +443,6 @@ var gPlayTests = [
duration: 4.95,
contentDuration: 5.03,
},
- // Ogg with theora video and flac audio.
- {
- name: "A4.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 3.13,
- },
// A file that has no codec delay at the container level, but has a delay at
// the codec level.
{
@@ -651,37 +527,11 @@ var gSeekToNextFrameTests = [
// Test playback of a WebM file with vp9 video
{ name: "vp9-short.webm", type: "video/webm", duration: 0.2 },
{ name: "vp9cake-short.webm", type: "video/webm", duration: 1.0 },
- // oggz-chop stream
- { name: "bug482461.ogv", type: "video/ogg", duration: 4.34 },
- // Theora only oggz-chop stream
- { name: "bug482461-theora.ogv", type: "video/ogg", duration: 4.138 },
- // With first frame a "duplicate" (empty) frame.
- { name: "bug500311.ogv", type: "video/ogg", duration: 1.96 },
-
- // More audio in file than video.
- { name: "short-video.ogv", type: "video/ogg", duration: 1.081 },
- // First Theora data packet is zero bytes.
- { name: "bug504613.ogv", type: "video/ogg", duration: Number.NaN },
- // Multiple audio streams.
- { name: "bug516323.ogv", type: "video/ogg", duration: 4.208 },
- // oggz-chop with non-keyframe as first frame
- { name: "bug556821.ogv", type: "video/ogg", duration: 2.936 },
- // Various weirdly formed Ogg files
- { name: "bug498855-1.ogv", type: "video/ogg", duration: 0.24 },
- { name: "bug498855-2.ogv", type: "video/ogg", duration: 0.24 },
- { name: "bug498855-3.ogv", type: "video/ogg", duration: 0.24 },
- { name: "bug504644.ogv", type: "video/ogg", duration: 1.6 },
-
- { name: "bug523816.ogv", type: "video/ogg", duration: 0.766 },
-
- { name: "bug498380.ogv", type: "video/ogg", duration: 0.2 },
- { name: "bug557094.ogv", type: "video/ogg", duration: 0.24 },
- { name: "multiple-bos.ogg", type: "video/ogg", duration: 0.431 },
+
// Test playback/metadata work after a redirect
{
- name: "redirect.sjs?domain=mochi.test:8888&file=320x240.ogv",
- type: "video/ogg",
- duration: 0.266,
+ name: "redirect.sjs?domain=mochi.test:8888&file=vp9.webm",
+ type: "video/webm",
},
// Test playback of a webm file
{ name: "seek-short.webm", type: "video/webm", duration: 0.23 },
@@ -698,14 +548,6 @@ var gSeekToNextFrameTests = [
// A file for each type we can support.
var gSnifferTests = [
{ name: "big.wav", type: "audio/x-wav", duration: 9.278982, size: 102444 },
- {
- name: "320x240.ogv",
- type: "video/ogg",
- width: 320,
- height: 240,
- duration: 0.233,
- size: 28942,
- },
{ name: "seek.webm", type: "video/webm", duration: 3.966, size: 215529 },
{ name: "gizmo.mp4", type: "video/mp4", duration: 5.56, size: 383631 },
// A mp3 file with id3 tags.
@@ -743,49 +585,20 @@ var gInvalidPlayTests = [
];
// Files to check different cases of ogg skeleton information.
-// sample-fisbone-skeleton4.ogv
-// - Skeleton v4, w/ Content-Type,Role,Name,Language,Title for both theora/vorbis
-// sample-fisbone-wrong-header.ogv
-// - Skeleton v4, wrong message field sequence for vorbis
// multiple-bos-more-header-fields.ogg
// - Skeleton v3, w/ Content-Type,Role,Name,Language,Title for both theora/vorbis
-// seek-short.ogv
-// - No skeleton, but theora
// audio-gaps-short.ogg
// - No skeleton, but vorbis
var gMultitrackInfoOggPlayList = [
- { name: "sample-fisbone-skeleton4.ogv", type: "video/ogg", duration: 1.0 },
- { name: "sample-fisbone-wrong-header.ogv", type: "video/ogg", duration: 1.0 },
{
name: "multiple-bos-more-header-fileds.ogg",
type: "video/ogg",
duration: 0.431,
},
- { name: "seek-short.ogv", type: "video/ogg", duration: 1.03 },
{ name: "audio-gaps-short.ogg", type: "audio/ogg", duration: 0.5 },
];
// Pre-parsed results of gMultitrackInfoOggPlayList.
var gOggTrackInfoResults = {
- "sample-fisbone-skeleton4.ogv": {
- audio_id: " audio_1",
- audio_kind: "main",
- audio_language: " en-US",
- audio_label: " Audio track for test",
- video_id: " video_1",
- video_kind: "main",
- video_language: " fr",
- video_label: " Video track for test",
- },
- "sample-fisbone-wrong-header.ogv": {
- audio_id: "1",
- audio_kind: "main",
- audio_language: "",
- audio_label: "",
- video_id: " video_1",
- video_kind: "main",
- video_language: " fr",
- video_label: " Video track for test",
- },
"multiple-bos-more-header-fileds.ogg": {
audio_id: "1",
audio_kind: "main",
@@ -796,12 +609,6 @@ var gOggTrackInfoResults = {
video_language: "",
video_label: "",
},
- "seek-short.ogv": {
- video_id: "2",
- video_kind: "main",
- video_language: "",
- video_label: "",
- },
"audio-gaps-short.ogg": {
audio_id: "1",
audio_kind: "main",
@@ -865,14 +672,6 @@ function range_equals(r1, r2) {
function makeInfoLeakTests() {
return makeAbsolutePathConverter().then(fileUriToSrc => [
{
- type: "video/ogg",
- src: fileUriToSrc("tests/dom/media/test/320x240.ogv", true),
- },
- {
- type: "video/ogg",
- src: fileUriToSrc("tests/dom/media/test/404.ogv", false),
- },
- {
type: "audio/x-wav",
src: fileUriToSrc("tests/dom/media/test/r11025_s16_c1.wav", true),
},
@@ -882,10 +681,6 @@ function makeInfoLeakTests() {
},
{
type: "audio/ogg",
- src: fileUriToSrc("tests/dom/media/test/bug461281.ogg", true),
- },
- {
- type: "audio/ogg",
src: fileUriToSrc("tests/dom/media/test/404.ogg", false),
},
{
@@ -897,10 +692,6 @@ function makeInfoLeakTests() {
src: fileUriToSrc("tests/dom/media/test/404.webm", false),
},
{
- type: "video/ogg",
- src: "http://localhost/404.ogv",
- },
- {
type: "audio/x-wav",
src: "http://localhost/404.wav",
},
@@ -925,9 +716,6 @@ function makeInfoLeakTests() {
// something crashes we have some idea of which backend is responsible.
var gErrorTests = [
{ name: "bogus.wav", type: "audio/x-wav" },
- { name: "bogus.ogv", type: "video/ogg" },
- { name: "448636.ogv", type: "video/ogg" },
- { name: "bug504843.ogv", type: "video/ogg" },
{ name: "bug501279.ogg", type: "audio/ogg" },
{ name: "bug604067.webm", type: "video/webm" },
{ name: "bug1535980.webm", type: "video/webm" },
@@ -943,11 +731,8 @@ var gDurationTests = [{ name: "bug604067.webm", duration: 6.076 }];
var gSeekTests = [
{ name: "r11025_s16_c1.wav", type: "audio/x-wav", duration: 1.0 },
{ name: "audio.wav", type: "audio/x-wav", duration: 0.031247 },
- { name: "seek.ogv", type: "video/ogg", duration: 3.966 },
- { name: "320x240.ogv", type: "video/ogg", duration: 0.266 },
{ name: "seek.webm", type: "video/webm", duration: 3.966 },
{ name: "sine.webm", type: "audio/webm", duration: 4.001 },
- { name: "bug516323.indexed.ogv", type: "video/ogg", duration: 4.208333 },
{ name: "split.webm", type: "video/webm", duration: 1.967 },
{ name: "detodos.opus", type: "audio/ogg; codecs=opus", duration: 2.9135 },
{ name: "gizmo.mp4", type: "video/mp4", duration: 5.56 },
@@ -958,9 +743,6 @@ var gSeekTests = [
},
{ name: "owl.mp3", type: "audio/mpeg", duration: 3.343 },
{ name: "bogus.duh", type: "bogus/duh", duration: 123 },
-
- // Bug 1242338: hit a numerical problem while seeking to the duration.
- { name: "bug482461-theora.ogv", type: "video/ogg", duration: 4.138 },
];
var gFastSeekTests = [
@@ -971,14 +753,6 @@ var gFastSeekTests = [
},
// Note: Not all keyframes in the file are actually referenced in the Cues in this file.
{ name: "seek.webm", type: "video/webm", keyframes: [0, 0.8, 1.6, 2.4, 3.2] },
- // Note: the sync points are the points on both the audio and video streams
- // before the keyframes. You can't just assume that the keyframes are the sync
- // points, as the audio required for that sync point may be before the keyframe.
- {
- name: "bug516323.indexed.ogv",
- type: "video/ogg",
- keyframes: [0, 0.46, 3.06],
- },
];
// These files are WebMs without cues. They're seekable within their buffered
@@ -1019,7 +793,6 @@ var gAudioTests = [
// various backends.
var g404Tests = [
{ name: "404.wav", type: "audio/x-wav" },
- { name: "404.ogv", type: "video/ogg" },
{ name: "404.oga", type: "audio/ogg" },
{ name: "404.webm", type: "video/webm" },
{ name: "bogus.duh", type: "bogus/duh" },
@@ -1034,7 +807,6 @@ var gDecodeErrorTests = [
{ name: "dirac.ogg", type: "video/ogg" },
// Invalid files
{ name: "bogus.wav", type: "audio/x-wav" },
- { name: "bogus.ogv", type: "video/ogg" },
{ name: "bogus.duh", type: "bogus/duh" },
];
@@ -1062,12 +834,6 @@ var gChainingTests = [
// original sample rate, so we can safely play Opus chained media that have
// different samplerate accross links.
{ name: "variable-samplerate.opus", type: "audio/ogg; codec=opus", links: 2 },
- // A chained video file. We don't support those, so only one link should be
- // reported.
- { name: "chained-video.ogv", type: "video/ogg", links: 1 },
- // A file that consist in 4 links of audio, then another link that has video.
- // We should stop right after the 4 audio links.
- { name: "chained-audio-video.ogg", type: "video/ogg", links: 4 },
// An opus file that has two links, with a different preskip value for each
// link. We should be able to play both links.
{ name: "variable-preskip.opus", type: "audio/ogg; codec=opus", links: 2 },
@@ -1086,36 +852,6 @@ var gAspectRatioTests = [
var gMetadataTests = [
// Ogg Vorbis files
{
- name: "short-video.ogv",
- tags: {
- TITLE: "Lepidoptera",
- ARTIST: "Epoq",
- ALBUM: "Kahvi Collective",
- DATE: "2002",
- COMMENT: "http://www.kahvi.org",
- },
- },
- {
- name: "bug516323.ogv",
- tags: {
- GENRE: "Open Movie",
- ENCODER: "Audacity",
- TITLE: "Elephants Dream",
- ARTIST: "Silvia Pfeiffer",
- COMMENTS: "Audio Description",
- },
- },
- {
- name: "bug516323.indexed.ogv",
- tags: {
- GENRE: "Open Movie",
- ENCODER: "Audacity",
- TITLE: "Elephants Dream",
- ARTIST: "Silvia Pfeiffer",
- COMMENTS: "Audio Description",
- },
- },
- {
name: "detodos.opus",
tags: {
title: "De todos. Para todos.",
@@ -2208,7 +1944,6 @@ var gDecodeSuspendTests = [
// durations that are looped while we check telemetry for macOS video
// low power mode.
var gVideoLowPowerTests = [
- { name: "seek.ogv", type: "video/ogg", duration: 3.966 },
{ name: "gizmo.mp4", type: "video/mp4", duration: 5.56 },
];
diff --git a/dom/media/test/mochitest.toml b/dom/media/test/mochitest.toml
index 3c8a382766..99bd1c41c8 100644
--- a/dom/media/test/mochitest.toml
+++ b/dom/media/test/mochitest.toml
@@ -31,12 +31,7 @@ prefs = [
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
+ "320x240.webm",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -410,63 +405,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -487,14 +438,10 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
"chained-audio-video.ogg",
"chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -575,8 +522,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -631,14 +576,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -651,7 +588,6 @@ support-files = [
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
"short-cenc.mp4",
- "short-video.ogv",
"short.mp4",
"short.mp4.gz",
"short.mp4^headers^",
@@ -678,7 +614,6 @@ support-files = [
"sintel-short-clearkey-subsample-encrypted-audio.webm^headers^",
"sintel-short-clearkey-subsample-encrypted-video.webm",
"sintel-short-clearkey-subsample-encrypted-video.webm^headers^",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_background_video.toml b/dom/media/test/mochitest_background_video.toml
index e1bc542264..6eed2c3eb8 100644
--- a/dom/media/test/mochitest_background_video.toml
+++ b/dom/media/test/mochitest_background_video.toml
@@ -27,12 +27,6 @@ tags = "suspend media-gpu"
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -481,14 +431,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -568,8 +512,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -624,14 +566,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -653,8 +587,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_bugs.toml b/dom/media/test/mochitest_bugs.toml
index 5c68d0e795..9e5f785408 100644
--- a/dom/media/test/mochitest_bugs.toml
+++ b/dom/media/test/mochitest_bugs.toml
@@ -27,12 +27,6 @@ tags = "media-gpu"
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -483,14 +433,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -570,8 +514,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -626,14 +568,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -655,8 +589,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_compat.toml b/dom/media/test/mochitest_compat.toml
index 86f76f1464..fdff340f39 100644
--- a/dom/media/test/mochitest_compat.toml
+++ b/dom/media/test/mochitest_compat.toml
@@ -35,12 +35,6 @@ prefs = ["media.wmf.hevc.enabled=1"] # for test_hevc_playback
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"adts.aac",
@@ -416,63 +410,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -491,14 +441,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -592,8 +536,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -648,14 +590,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -677,8 +611,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
@@ -823,6 +755,10 @@ skip-if = ["true"] # bug 475110 - disabled since we don't play Wave files standa
["test_can_play_type_webm.html"]
["test_closing_connections.html"]
+# This test attempts to load 20 videos to test something network-related, and
+# Android devices that aren't an emulator hit a device-specific decoder limit,
+# that make the test fail.
+skip-if = ["os == 'android' && !is_emulator"]
["test_constants.html"]
diff --git a/dom/media/test/mochitest_eme.toml b/dom/media/test/mochitest_eme.toml
index d7f39c3eb8..fb3c12fbc6 100644
--- a/dom/media/test/mochitest_eme.toml
+++ b/dom/media/test/mochitest_eme.toml
@@ -27,12 +27,6 @@ skip-if = ["os == 'linux' && (asan || debug)"] # Bug 1476870: common fatal error
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -481,14 +431,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -568,8 +512,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -624,14 +566,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -653,8 +587,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_eme_compat.toml b/dom/media/test/mochitest_eme_compat.toml
index 43a5b510fc..cc093b3d28 100644
--- a/dom/media/test/mochitest_eme_compat.toml
+++ b/dom/media/test/mochitest_eme_compat.toml
@@ -28,12 +28,6 @@ skip-if = ["os == 'linux' && (asan || debug)"] # Bug 1476870: common fatal error
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -403,63 +397,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -478,14 +428,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -565,8 +509,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -621,14 +563,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -650,8 +584,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_media_recorder.toml b/dom/media/test/mochitest_media_recorder.toml
index a4893d9cf4..3bebaab839 100644
--- a/dom/media/test/mochitest_media_recorder.toml
+++ b/dom/media/test/mochitest_media_recorder.toml
@@ -27,12 +27,6 @@ tags = "mtg"
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -481,14 +431,10 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
"chained-audio-video.ogg",
"chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -568,8 +514,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -624,14 +568,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -653,8 +589,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
@@ -793,7 +727,7 @@ skip-if = ["os == 'android'"] # android(bug 1232305)
["test_mediarecorder_principals.html"]
skip-if = [
- "os == 'win' && os_version == '10.0'", # Bug 1453375
+ "os == 'win' && os_version == '10.2009'", # Bug 1453375
"os == 'android'", # Bug 1694645
]
diff --git a/dom/media/test/mochitest_seek.toml b/dom/media/test/mochitest_seek.toml
index d71aac775a..7d7e703ab3 100644
--- a/dom/media/test/mochitest_seek.toml
+++ b/dom/media/test/mochitest_seek.toml
@@ -27,12 +27,6 @@ tags = "media-gpu"
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -481,14 +431,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -568,8 +512,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -624,14 +566,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -653,8 +587,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
diff --git a/dom/media/test/mochitest_stream.toml b/dom/media/test/mochitest_stream.toml
index 0badfc52ab..429e694a96 100644
--- a/dom/media/test/mochitest_stream.toml
+++ b/dom/media/test/mochitest_stream.toml
@@ -27,12 +27,6 @@ tags = "mtg capturestream"
support-files = [
"16bit_wave_extrametadata.wav",
"16bit_wave_extrametadata.wav^headers^",
- "320x240.ogv",
- "320x240.ogv^headers^",
- "448636.ogv",
- "448636.ogv^headers^",
- "A4.ogv",
- "A4.ogv^headers^",
"VID_0001.ogg",
"VID_0001.ogg^headers^",
"allowed.sjs",
@@ -406,63 +400,19 @@ support-files = [
"bipbop_short_vp8.webm^headers^",
"bipbop-lateaudio.mp4",
"bipbop-lateaudio.mp4^headers^",
- "black100x100-aspect3to2.ogv",
- "black100x100-aspect3to2.ogv^headers^",
"bogus.duh",
- "bogus.ogv",
- "bogus.ogv^headers^",
"bogus.wav",
"bogus.wav^headers^",
- "bug461281.ogg",
- "bug461281.ogg^headers^",
- "bug482461-theora.ogv",
- "bug482461-theora.ogv^headers^",
- "bug482461.ogv",
- "bug482461.ogv^headers^",
- "bug495129.ogv",
- "bug495129.ogv^headers^",
"bug495794.ogg",
"bug495794.ogg^headers^",
- "bug498380.ogv",
- "bug498380.ogv^headers^",
- "bug498855-1.ogv",
- "bug498855-1.ogv^headers^",
- "bug498855-2.ogv",
- "bug498855-2.ogv^headers^",
- "bug498855-3.ogv",
- "bug498855-3.ogv^headers^",
- "bug499519.ogv",
- "bug499519.ogv^headers^",
- "bug500311.ogv",
- "bug500311.ogv^headers^",
"bug501279.ogg",
"bug501279.ogg^headers^",
- "bug504613.ogv",
- "bug504613.ogv^headers^",
- "bug504644.ogv",
- "bug504644.ogv^headers^",
- "bug504843.ogv",
- "bug504843.ogv^headers^",
- "bug506094.ogv",
- "bug506094.ogv^headers^",
- "bug516323.indexed.ogv",
- "bug516323.indexed.ogv^headers^",
- "bug516323.ogv",
- "bug516323.ogv^headers^",
"bug520493.ogg",
"bug520493.ogg^headers^",
"bug520500.ogg",
"bug520500.ogg^headers^",
- "bug520908.ogv",
- "bug520908.ogv^headers^",
- "bug523816.ogv",
- "bug523816.ogv^headers^",
"bug533822.ogg",
"bug533822.ogg^headers^",
- "bug556821.ogv",
- "bug556821.ogv^headers^",
- "bug557094.ogv",
- "bug557094.ogv^headers^",
"bug604067.webm",
"bug604067.webm^headers^",
"bug1066943.webm",
@@ -481,14 +431,8 @@ support-files = [
"cancellable_request.sjs",
"chain.ogg",
"chain.ogg^headers^",
- "chain.ogv",
- "chain.ogv^headers^",
"chain.opus",
"chain.opus^headers^",
- "chained-audio-video.ogg",
- "chained-audio-video.ogg^headers^",
- "chained-video.ogv",
- "chained-video.ogv^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
"contentType.sjs",
@@ -568,8 +512,6 @@ support-files = [
"invalid-preskip.webm^headers^",
"manifest.js",
"midflight-redirect.sjs",
- "multiple-bos.ogg",
- "multiple-bos.ogg^headers^",
"multiple-bos-more-header-fileds.ogg",
"multiple-bos-more-header-fileds.ogg^headers^",
"multi_id3v2.mp3",
@@ -624,14 +566,6 @@ support-files = [
"sample.3g2",
"sample-encrypted-sgpdstbl-sbgptraf.mp4",
"sample-encrypted-sgpdstbl-sbgptraf.mp4^headers^",
- "sample-fisbone-skeleton4.ogv",
- "sample-fisbone-skeleton4.ogv^headers^",
- "sample-fisbone-wrong-header.ogv",
- "sample-fisbone-wrong-header.ogv^headers^",
- "seek.ogv",
- "seek.ogv^headers^",
- "seek-short.ogv",
- "seek-short.ogv^headers^",
"seek.webm",
"seek.webm^headers^",
"seek-short.webm",
@@ -653,8 +587,6 @@ support-files = [
"short-aac-encrypted-audio.mp4^headers^",
"short-audio-fragmented-cenc-without-pssh.mp4",
"short-audio-fragmented-cenc-without-pssh.mp4^headers^",
- "short-video.ogv",
- "short-video.ogv^headers^",
"short-vp9-encrypted-video.mp4",
"short-vp9-encrypted-video.mp4^headers^",
"small-shot-mp3.mp4",
@@ -763,11 +695,15 @@ support-files = [
"hls/960x720_seg1.ts",
"sync.webm",
]
+# This test requires software decoding on Android to be able to check that
+# painting a video to the canvas throws. bug 1372457, bug 1526207
+prefs = [
+ "media.android-media-codec.preferred=false",
+]
["test_streams_capture_origin.html"]
["test_streams_element_capture.html"]
-skip-if = ["os == 'android'"] # bug 1372457, bug 1526207 for drawImage
["test_streams_element_capture_mediatrack.html"]
@@ -788,9 +724,7 @@ tags = "mtg"
["test_streams_srcObject.html"]
skip-if = [
- "os == 'android'", # bug 1300443, android(bug 1232305)
"os == 'mac' && debug", # Bug 1756880 - temp due to high frequency shutdown hang
]
["test_streams_tracks.html"]
-skip-if = ["os == 'android'"] # android(bug 1232305)
diff --git a/dom/media/test/multiple-bos.ogg b/dom/media/test/multiple-bos.ogg
deleted file mode 100644
index 193200868e..0000000000
--- a/dom/media/test/multiple-bos.ogg
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/reftest/color_quads/reftest.list b/dom/media/test/reftest/color_quads/reftest.list
index 63a538b78a..10b62dc817 100644
--- a/dom/media/test/reftest/color_quads/reftest.list
+++ b/dom/media/test/reftest/color_quads/reftest.list
@@ -18,7 +18,7 @@ fuzzy(16-51,5234-5622) fuzzy-if(swgl,32-38,1600-91746) fuzzy-if(useDrawSnapshot,
fuzzy-if(winWidget&&swgl,0-20,0-5620) fuzzy-if(winWidget&&!swgl,0-1,0-78) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(cocoaWidget,0-35,0-1947) fuzzy-if(cocoaWidget&&swgl,0-67,0-5451) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
fuzzy-if(winWidget,0-1,0-78) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
skip-if(winWidget&&isCoverageBuild) fuzzy(0-16,75-1941) fuzzy-if(Android,28-255,273680-359920) fuzzy-if(cocoaWidget,30-32,187326-187407) fuzzy-if(appleSilicon,30-48,1835-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
-fuzzy-if(winWidget&&swgl,0-20,0-5620) fuzzy-if(winWidget&&!swgl,0-1,0-78) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(cocoaWidget,0-35,0-1947) fuzzy-if(cocoaWidget&&swgl,0-67,0-5451) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
+fuzzy-if(winWidget&&swgl,0-20,0-5620) fuzzy-if(winWidget&&!swgl,0-1,0-78) skip-if(Android) fuzzy-if(cocoaWidget,0-35,0-1947) fuzzy-if(cocoaWidget&&swgl,0-67,0-5451) fuzzy-if(appleSilicon,30-48,1760-187409) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
skip-if(Android) fuzzy(16-48,8107-8818) fuzzy-if(winWidget&&swgl,31-38,8240-184080) fuzzy-if(appleSilicon,33-38,8819-11705) fuzzy-if(useDrawSnapshot,20-20,187200-187200) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
skip-if(Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
diff --git a/dom/media/test/reftest/reftest.list b/dom/media/test/reftest/reftest.list
index 0f709a35ee..bd4cb2d030 100644
--- a/dom/media/test/reftest/reftest.list
+++ b/dom/media/test/reftest/reftest.list
@@ -1,15 +1,15 @@
skip-if(Android) fuzzy-if(cocoaWidget,0-80,0-76800) fuzzy-if(appleSilicon,0-80,0-76800) fuzzy-if(winWidget,0-63,0-76799) fuzzy-if(gtkWidget,0-70,0-2032) HTTP(..) == short.mp4.firstframe.html short.mp4.firstframe-ref.html
skip-if(Android) fuzzy-if(cocoaWidget,0-87,0-76797) fuzzy-if(appleSilicon,0-87,0-76797) fuzzy-if(winWidget,0-60,0-76797) fuzzy-if(gtkWidget,0-60,0-6070) HTTP(..) == short.mp4.lastframe.html short.mp4.lastframe-ref.html
skip-if(Android) skip-if(cocoaWidget) skip-if(winWidget) fuzzy-if(gtkWidget,0-57,0-4282) fuzzy-if(cocoaWidget,55-80,4173-4417) HTTP(..) == bipbop_300_215kbps.mp4.lastframe.html bipbop_300_215kbps.mp4.lastframe-ref.html
-skip-if(Android) fuzzy-if(cocoaWidget,0-25,0-175921) fuzzy-if(appleSilicon,34-34,40100-40100) fuzzy-if(winWidget,0-71,0-179198) HTTP(..) == gizmo.mp4.seek.html gizmo.mp4.55thframe-ref.html
+skip-if(Android) fuzzy-if(cocoaWidget,0-25,0-175921) fuzzy-if(appleSilicon,34-34,40100-40100) fuzzy-if(winWidget,0-71,0-179198) fuzzy-if(gtkWidget,0-46,0-173482) HTTP(..) == gizmo.mp4.seek.html gizmo.mp4.55thframe-ref.html
# Bug 1758718
skip-if(Android) skip-if(cocoaWidget) fuzzy(0-10,0-778236) == image-10bits-rendering-video.html image-10bits-rendering-ref.html
skip-if(Android) fuzzy(0-10,0-778536) fuzzy-if(appleSilicon,0-37,0-699614) == image-10bits-rendering-90-video.html image-10bits-rendering-90-ref.html
# Bug 1758718
skip-if(Android) fuzzy(0-27,0-573106) skip-if(cocoaWidget) == image-10bits-rendering-720-video.html image-10bits-rendering-720-ref.html
skip-if(Android) fuzzy(0-31,0-573249) fuzzy-if(appleSilicon,0-37,0-543189) == image-10bits-rendering-720-90-video.html image-10bits-rendering-720-90-ref.html
-skip-if(Android) fuzzy(0-84,0-771156) fails-if(useDrawSnapshot) == uneven_frame_duration_video.html uneven_frame_duration_video-ref.html # Skip on Windows 7 as the resolution of the video is too high for test machines and will fail in the decoder.
+skip-if(Android) fuzzy(0-84,0-774213) fails-if(useDrawSnapshot) == uneven_frame_duration_video.html uneven_frame_duration_video-ref.html # Skip on Windows 7 as the resolution of the video is too high for test machines and will fail in the decoder.
# Set media.dormant-on-pause-timeout-ms to avoid decoders becoming dormant and busting test, skip on android as test is too noisy and unstable
skip-if(Android) pref(media.dormant-on-pause-timeout-ms,-1) fuzzy(0-20,0-500) == frame_order_mp4.html frame_order_mp4-ref.html
skip-if(Android) fuzzy(0-30,0-270000) == incorrect_display_in_bytestream_vp8.html incorrect_display_in_bytestream_vp8-ref.html
-skip-if(Android) fuzzy(0-22,0-377335) == incorrect_display_in_bytestream_vp9.html incorrect_display_in_bytestream_vp9-ref.html
+skip-if(Android) fuzzy(0-22,0-381481) == incorrect_display_in_bytestream_vp9.html incorrect_display_in_bytestream_vp9-ref.html
diff --git a/dom/media/test/sample-fisbone-skeleton4.ogv b/dom/media/test/sample-fisbone-skeleton4.ogv
deleted file mode 100644
index 8afe0be7a4..0000000000
--- a/dom/media/test/sample-fisbone-skeleton4.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/sample-fisbone-wrong-header.ogv b/dom/media/test/sample-fisbone-wrong-header.ogv
deleted file mode 100644
index 46c3933da5..0000000000
--- a/dom/media/test/sample-fisbone-wrong-header.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/seek-short.ogv b/dom/media/test/seek-short.ogv
deleted file mode 100644
index a5ca6951d0..0000000000
--- a/dom/media/test/seek-short.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/seek.ogv b/dom/media/test/seek.ogv
deleted file mode 100644
index ac7ece3519..0000000000
--- a/dom/media/test/seek.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/seekLies.sjs b/dom/media/test/seekLies.sjs
index 4fc528a0a5..3b2b19921a 100644
--- a/dom/media/test/seekLies.sjs
+++ b/dom/media/test/seekLies.sjs
@@ -6,7 +6,7 @@ function handleRequest(request, response) {
var bis = Cc["@mozilla.org/binaryinputstream;1"].createInstance(
Ci.nsIBinaryInputStream
);
- var paths = "tests/dom/media/test/seek.ogv";
+ var paths = "tests/dom/media/test/vp9.webm";
var split = paths.split("/");
for (var i = 0; i < split.length; ++i) {
file.append(split[i]);
@@ -15,7 +15,7 @@ function handleRequest(request, response) {
bis.setInputStream(fis);
var bytes = bis.readBytes(bis.available());
response.setHeader("Content-Length", "" + bytes.length, false);
- response.setHeader("Content-Type", "video/ogg", false);
+ response.setHeader("Content-Type", "video/webm", false);
response.setHeader("Accept-Ranges", "bytes", false);
response.write(bytes, bytes.length);
bis.close();
diff --git a/dom/media/test/short-video.ogv b/dom/media/test/short-video.ogv
deleted file mode 100644
index 68dee3cf2b..0000000000
--- a/dom/media/test/short-video.ogv
+++ /dev/null
Binary files differ
diff --git a/dom/media/test/test_bug1248229.html b/dom/media/test/test_bug1248229.html
index 3165795622..e0ca1c96b5 100644
--- a/dom/media/test/test_bug1248229.html
+++ b/dom/media/test/test_bug1248229.html
@@ -7,7 +7,7 @@
<script type="text/javascript" src="manifest.js"></script>
</head>
<body onload="doTest()">
-<video id="v" src="black100x100-aspect3to2.ogv"></video>
+<video id="v" src="vp9.webm"></video>
<pre id="test">
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
diff --git a/dom/media/test/test_closing_connections.html b/dom/media/test/test_closing_connections.html
index c5eb565447..fc438d8531 100644
--- a/dom/media/test/test_closing_connections.html
+++ b/dom/media/test/test_closing_connections.html
@@ -32,7 +32,7 @@ window.onload = function() {
we've got the first frame.
*/
-var resource = getPlayableVideo(gClosingConnectionsTest);
+var resource = getPlayableVideo(gPlayTests);
SimpleTest.waitForExplicitFinish();
function beginTest() {
diff --git a/dom/media/test/test_decoder_disable.html b/dom/media/test/test_decoder_disable.html
index dd0d2cc51b..d29b62a5fa 100644
--- a/dom/media/test/test_decoder_disable.html
+++ b/dom/media/test/test_decoder_disable.html
@@ -66,10 +66,10 @@ function videoError(event, id) {
</div>
<script>
function makeVideos() {
- document.getElementById('content').innerHTML = '<video id="video1" preload="metadata"><source type="video/ogg" src="320x240.ogv?decoder_disabled=1" onerror="videoError(event, \'video1\');"/><source type="audio/wave" src="r11025_u8_c1.wav?decoder_disabled=1" id=\'s2\' onerror="videoError(event, \'video1\');"/></video><video id="video2" preload="metadata" src="320x240.ogv?decoder_disabled=2" onerror="videoError(event, \'video2\');"></video><video id="video3" preload="metadata" src="r11025_u8_c1.wav?decoder_disabled=2" onerror="videoError(event, \'video3\');"></video>';
+ document.getElementById('content').innerHTML = '<video id="video1" preload="metadata"><source type="video/webm" src="vp9.webm?decoder_disabled=1" onerror="videoError(event, \'video1\');"/><source type="audio/wave" src="r11025_u8_c1.wav?decoder_disabled=1" id=\'s2\' onerror="videoError(event, \'video1\');"/></video><video id="video2" preload="metadata" src="vp9.webm?decoder_disabled=2" onerror="videoError(event, \'video2\');"></video><video id="video3" preload="metadata" src="r11025_u8_c1.wav?decoder_disabled=2" onerror="videoError(event, \'video3\');"></video>';
}
-SpecialPowers.pushPrefEnv({"set": [["media.ogg.enabled", false], ["media.wave.enabled", false]]}, makeVideos);
+SpecialPowers.pushPrefEnv({"set": [["media.webm.enabled", false], ["media.wave.enabled", false]]}, makeVideos);
</script>
</pre>
diff --git a/dom/media/test/test_error_in_video_document.html b/dom/media/test/test_error_in_video_document.html
index e376ea95e3..b48e7745d0 100644
--- a/dom/media/test/test_error_in_video_document.html
+++ b/dom/media/test/test_error_in_video_document.html
@@ -31,7 +31,7 @@ function check() {
// Debug info for Bug 608634
ok(true, "iframe src=" + document.body.getElementsByTagName("iframe")[0].src);
- is(v.readyState, v.HAVE_NOTHING, "Ready state");
+ is(v.readyState, v.HAVE_NOTHING, "Ready state for " + document.body.getElementsByTagName("iframe")[0].src);
isnot(v.error, null, "Error object");
is(v.networkState, v.NETWORK_NO_SOURCE, "Network state");
@@ -40,15 +40,16 @@ function check() {
}
// Find an error test that we'd think we should be able to play (if it
-// wasn't already known to fail).
-var t = getPlayableVideo(gErrorTests);
+// wasn't already known to fail). This needs to fail early: for example,
+// incorrect metadata, not correct metadata but incorrect encoded packets.
+var t = "bug1535980.webm";
if (!t) {
todo(false, "No types supported");
} else {
SimpleTest.waitForExplicitFinish();
var f = document.createElement("iframe");
- f.src = t.name;
+ f.src = t;
f.addEventListener("load", check);
document.body.appendChild(f);
}
diff --git a/dom/media/test/test_load_same_resource.html b/dom/media/test/test_load_same_resource.html
index f3e6992e8c..d3351226cd 100644
--- a/dom/media/test/test_load_same_resource.html
+++ b/dom/media/test/test_load_same_resource.html
@@ -78,7 +78,7 @@ async function initTest(test, token) {
e.token = token;
manager.started(token);
- // Since 320x240.ogv is less than 32KB, we need to wait for the
+ // Since 320x240.webm is less than 32KB, we need to wait for the
// 'suspend' event to ensure the partial block is flushed to the cache
// otherwise the cloned resource will create a new channel when it
// has no data to read at position 0. The new channel will download
diff --git a/dom/media/test/test_media_selection.html b/dom/media/test/test_media_selection.html
index 33ecabfd58..72bde2dd9e 100644
--- a/dom/media/test/test_media_selection.html
+++ b/dom/media/test/test_media_selection.html
@@ -111,7 +111,7 @@ for (var i = 0; i < gSmallTests.length; ++i) {
checkMetadata(t.name, e, t);
}}(test);
- var otherType = type.match(/^video\//) ? "audio/x-wav" : "video/ogg";
+ var otherType = type.match(/^video\//) ? "audio/x-wav" : "video/webm";
subtests.push(maketest(set_src, src, null, check),
maketest(add_source, src, null, check),
maketest(add_source, src, type, check),
diff --git a/dom/media/test/test_preload_suspend.html b/dom/media/test/test_preload_suspend.html
index b715a58dc8..5fefe251ae 100644
--- a/dom/media/test/test_preload_suspend.html
+++ b/dom/media/test/test_preload_suspend.html
@@ -89,7 +89,7 @@ function startTest(test) {
var v = document.createElement("video");
v.name = test.name;
var key = Math.random();
- v.src = "seek.ogv?key=" + key + "&id=" + v.name;
+ v.src = "vp9.webm?key=" + key + "&id=" + v.name;
v.preload = test.preload;
v.suspendCount = 0;
v.expectedSuspendCount = test.expectedSuspendCount;
diff --git a/dom/media/test/test_standalone.html b/dom/media/test/test_standalone.html
index 620878a394..3b61f66d0a 100644
--- a/dom/media/test/test_standalone.html
+++ b/dom/media/test/test_standalone.html
@@ -6,56 +6,41 @@
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
<script type="text/javascript" src="manifest.js"></script>
</head>
-<body onload="doTest()">
-
-<pre id="test">
<script class="testbody" type="text/javascript">
-var iframes = [];
-
-for (let i=0; i<gSmallTests.length; ++i) {
- var test = gSmallTests[i];
+// Test whether video can be played correctly in a video document
+add_task(async function testStandAloneVideoDocument() {
+ for (let i=0; i<gSmallTests.length; ++i) {
+ const test = gSmallTests[i];
- // We can't play WAV files in stand alone documents, so just don't
- // run the test on non-video content types.
- var tag = getMajorMimeType(test.type);
- if (tag != "video" || !document.createElement("video").canPlayType(test.type))
- continue;
+ // We can't play WAV files in stand alone documents, so just don't
+ // run the test on non-video content types.
+ if (getMajorMimeType(test.type) != "video" ||
+ !document.createElement("video").canPlayType(test.type)) {
+ continue;
+ }
- let f = document.createElement("iframe");
- f.src = test.name;
- f._test = test;
- f.id = "frame" + i;
- iframes.push(f);
- document.body.appendChild(f);
-}
+ let f = document.createElement("iframe");
+ f.src = test.name;
+ document.body.appendChild(f);
+ info(`waiting iframe loading ${test.name}`);
+ await new Promise(r => f.onload = r);
-function filename(uri) {
- return uri.substr(uri.lastIndexOf("/")+1);
-}
-
-function doTest()
-{
- for (let i=0; i<iframes.length; ++i) {
- let f = document.getElementById(iframes[i].id);
- var v = f.contentDocument.body.firstChild;
+ const v = f.contentDocument.body.firstChild;
is(v.tagName.toLowerCase(), "video", "Is video element");
- var src = filename(v.currentSrc);
- is(src, iframes[i]._test.name, "Name ("+src+") should match ("+iframes[i]._test.name+")");
- is(v.controls, true, "Controls set (" + src + ")");
- is(v.autoplay, true, "Autoplay set (" + src + ")");
+ const src = filename(v.currentSrc);
+ is(src, test.name, `Name (${src}) should match (${test.name})`);
+ is(v.controls, true, `Controls set (${src})`);
+ is(v.autoplay, true, `Autoplay set (${src})`);
}
- SimpleTest.finish();
-}
+});
-if (!iframes.length) {
- todo(false, "No types supported");
-} else {
- SimpleTest.waitForExplicitFinish();
+// Helper function
+function filename(uri) {
+ return uri.substr(uri.lastIndexOf("/")+1);
}
</script>
-</pre>
</body>
</html>
diff --git a/dom/media/test/test_streams_element_capture.html b/dom/media/test/test_streams_element_capture.html
index 098136dba7..8695dbf63b 100644
--- a/dom/media/test/test_streams_element_capture.html
+++ b/dom/media/test/test_streams_element_capture.html
@@ -109,6 +109,11 @@ async function startTest(test, token) {
let tests = gPlayTests;
// Filter out bug1377278.webm due to bug 1541401.
tests = tests.filter(t => !t.name.includes("1377278"));
+ // bug 1372457, bug 1526207 for drawImage -- restrict to codecs that can be
+ // decoded in software.
+ if (navigator.userAgent.includes("Android")) {
+ tests = tests.filter(t => !t.name.includes("mp4"));
+ }
manager.runTests(tests, async (test, token) => {
try {
diff --git a/dom/media/test/test_streams_element_capture_twice.html b/dom/media/test/test_streams_element_capture_twice.html
index 0e30be1801..f180fd12fd 100644
--- a/dom/media/test/test_streams_element_capture_twice.html
+++ b/dom/media/test/test_streams_element_capture_twice.html
@@ -66,7 +66,7 @@ async function startTest(src) {
(async function() {
try {
- await startTest("short-video.ogv");
+ await startTest("vp9cake.webm");
} catch(e) {
ok(false, `Caught error: ${e}${e.stack ? '\n' + e.stack : ''}`);
} finally {
diff --git a/dom/media/test/test_videoDocumentTitle.html b/dom/media/test/test_videoDocumentTitle.html
index dd52dba26c..b03c41a30c 100644
--- a/dom/media/test/test_videoDocumentTitle.html
+++ b/dom/media/test/test_videoDocumentTitle.html
@@ -21,8 +21,8 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=463830
/** Test for Bug 463830 **/
var gTests = [
- { file: "320x240.ogv", title: "320x240.ogv" },
- { file: "bug461281.ogg", title: "bug461281.ogg" },
+ { file: "vp9.webm", title: "vp9.webm" },
+ { file: "vp9cake.webm", title: "vp9cake.webm" },
];
var gTestNum = 0;
diff --git a/dom/media/test/test_video_stats_resistfingerprinting.html b/dom/media/test/test_video_stats_resistfingerprinting.html
index 2bc239b367..90c8d2a3d3 100644
--- a/dom/media/test/test_video_stats_resistfingerprinting.html
+++ b/dom/media/test/test_video_stats_resistfingerprinting.html
@@ -35,7 +35,6 @@ https://trac.torproject.org/projects/tor/ticket/15757
["privacy.resistFingerprinting.target_video_res", 240]
);
var testCases = [
- { name:"320x240.ogv", type:"video/ogg", width:320, height:240, duration:0.266, drop: false },
{ name:"seek.webm", type:"video/webm", width:320, height:240, duration:3.966, drop: false },
{ name:"gizmo.mp4", type:"video/mp4", width:560, height:320, duration:5.56, drop: true }
];
diff --git a/dom/media/utils/PerformanceRecorder.cpp b/dom/media/utils/PerformanceRecorder.cpp
index d6124e8cf6..3dc2c24a5d 100644
--- a/dom/media/utils/PerformanceRecorder.cpp
+++ b/dom/media/utils/PerformanceRecorder.cpp
@@ -140,6 +140,8 @@ static void AppendImageFormatToName(nsCString& aName,
return "gbrp,";
case DecodeStage::ANDROID_SURFACE:
return "android.Surface,";
+ case DecodeStage::VAAPI_SURFACE:
+ return "VAAPI.Surface,";
}
MOZ_ASSERT_UNREACHABLE("Unhandled DecodeStage::ImageFormat");
return "";
diff --git a/dom/media/utils/PerformanceRecorder.h b/dom/media/utils/PerformanceRecorder.h
index 34c6676b01..e423c3fb5d 100644
--- a/dom/media/utils/PerformanceRecorder.h
+++ b/dom/media/utils/PerformanceRecorder.h
@@ -216,6 +216,7 @@ class DecodeStage {
RGB24,
GBRP,
ANDROID_SURFACE,
+ VAAPI_SURFACE,
};
DecodeStage(nsCString aSource, TrackingId aTrackingId, MediaInfoFlag aFlag)
diff --git a/dom/media/utils/TelemetryProbesReporter.cpp b/dom/media/utils/TelemetryProbesReporter.cpp
index 8c5614c048..377cee9abc 100644
--- a/dom/media/utils/TelemetryProbesReporter.cpp
+++ b/dom/media/utils/TelemetryProbesReporter.cpp
@@ -293,23 +293,30 @@ void TelemetryProbesReporter::OnDecodeResumed() {
}
void TelemetryProbesReporter::OntFirstFrameLoaded(
- const TimeDuration& aLoadedFirstFrameTime, bool aIsMSE,
- bool aIsExternalEngineStateMachine) {
- const MediaInfo& info = mOwner->GetMediaInfo();
- MOZ_ASSERT(info.HasVideo());
+ const double aLoadedFirstFrameTime, const double aLoadedMetadataTime,
+ const double aTotalWaitingDataTime, const double aTotalBufferingTime,
+ const FirstFrameLoadedFlagSet aFlags, const MediaInfo& aInfo) {
+ MOZ_ASSERT(aInfo.HasVideo());
nsCString resolution;
- DetermineResolutionForTelemetry(info, resolution);
+ DetermineResolutionForTelemetry(aInfo, resolution);
+
+ const bool isMSE = aFlags.contains(FirstFrameLoadedFlag::IsMSE);
+ const bool isExternalEngineStateMachine =
+ aFlags.contains(FirstFrameLoadedFlag::IsExternalEngineStateMachine);
glean::media_playback::FirstFrameLoadedExtra extraData;
- extraData.firstFrameLoadedTime = Some(aLoadedFirstFrameTime.ToMilliseconds());
- if (!aIsMSE && !aIsExternalEngineStateMachine) {
+ extraData.firstFrameLoadedTime = Some(aLoadedFirstFrameTime);
+ extraData.metadataLoadedTime = Some(aLoadedMetadataTime);
+ extraData.totalWaitingDataTime = Some(aTotalWaitingDataTime);
+ extraData.bufferingTime = Some(aTotalBufferingTime);
+ if (!isMSE && !isExternalEngineStateMachine) {
extraData.playbackType = Some("Non-MSE playback"_ns);
- } else if (aIsMSE && !aIsExternalEngineStateMachine) {
+ } else if (isMSE && !isExternalEngineStateMachine) {
extraData.playbackType = !mOwner->IsEncrypted() ? Some("MSE playback"_ns)
: Some("EME playback"_ns);
- } else if (!aIsMSE && aIsExternalEngineStateMachine) {
+ } else if (!isMSE && isExternalEngineStateMachine) {
extraData.playbackType = Some("Non-MSE media-engine playback"_ns);
- } else if (aIsMSE && aIsExternalEngineStateMachine) {
+ } else if (isMSE && isExternalEngineStateMachine) {
extraData.playbackType = !mOwner->IsEncrypted()
? Some("MSE media-engine playback"_ns)
: Some("EME media-engine playback"_ns);
@@ -317,18 +324,35 @@ void TelemetryProbesReporter::OntFirstFrameLoaded(
extraData.playbackType = Some("ERROR TYPE"_ns);
MOZ_ASSERT(false, "Unexpected playback type!");
}
- extraData.videoCodec = Some(info.mVideo.mMimeType);
+ extraData.videoCodec = Some(aInfo.mVideo.mMimeType);
extraData.resolution = Some(resolution);
if (const auto keySystem = mOwner->GetKeySystem()) {
extraData.keySystem = Some(NS_ConvertUTF16toUTF8(*keySystem));
}
+ if (aFlags.contains(FirstFrameLoadedFlag::IsHardwareDecoding)) {
+ extraData.isHardwareDecoding = Some(true);
+ }
+
+#ifdef MOZ_WIDGET_ANDROID
+ if (aFlags.contains(FirstFrameLoadedFlag::IsHLS)) {
+ extraData.hlsDecoder = Some(true);
+ }
+#endif
if (MOZ_LOG_TEST(gTelemetryProbesReporterLog, LogLevel::Debug)) {
nsPrintfCString logMessage{
- "Media_Playabck First_Frame_Loaded event, time(ms)=%f, "
- "playback-type=%s, videoCodec=%s, resolution=%s",
- aLoadedFirstFrameTime.ToMilliseconds(), extraData.playbackType->get(),
- extraData.videoCodec->get(), extraData.resolution->get()};
+ "Media_Playabck First_Frame_Loaded event, time(ms)=["
+ "full:%f, loading-meta:%f, waiting-data:%f, buffering:%f], "
+ "playback-type=%s, "
+ "videoCodec=%s, resolution=%s, hardware=%d",
+ aLoadedFirstFrameTime,
+ aLoadedMetadataTime,
+ aTotalWaitingDataTime,
+ aTotalBufferingTime,
+ extraData.playbackType->get(),
+ extraData.videoCodec->get(),
+ extraData.resolution->get(),
+ aFlags.contains(FirstFrameLoadedFlag::IsHardwareDecoding)};
if (const auto keySystem = mOwner->GetKeySystem()) {
logMessage.Append(nsPrintfCString{
", keySystem=%s", NS_ConvertUTF16toUTF8(*keySystem).get()});
@@ -336,6 +360,7 @@ void TelemetryProbesReporter::OntFirstFrameLoaded(
LOG("%s", logMessage.get());
}
glean::media_playback::first_frame_loaded.Record(Some(extraData));
+ mOwner->DispatchAsyncTestingEvent(u"mozfirstframeloadedprobe"_ns);
}
void TelemetryProbesReporter::OnShutdown() {
@@ -465,6 +490,7 @@ void TelemetryProbesReporter::ReportResultForVideo() {
SECONDS_TO_MS(totalVideoPlayTimeS));
}
+ // TODO: deprecate the old probes.
// Report result for video using CDM
auto keySystem = mOwner->GetKeySystem();
if (keySystem) {
@@ -519,6 +545,10 @@ void TelemetryProbesReporter::ReportResultForVideo() {
ReportResultForMFCDMPlaybackIfNeeded(totalVideoPlayTimeS, key);
}
#endif
+ if (keySystem) {
+ ReportPlaytimeForKeySystem(*keySystem, totalVideoPlayTimeS,
+ info.mVideo.mMimeType, key);
+ }
}
#ifdef MOZ_WMF_CDM
@@ -564,6 +594,17 @@ void TelemetryProbesReporter::ReportResultForMFCDMPlaybackIfNeeded(
}
#endif
+void TelemetryProbesReporter::ReportPlaytimeForKeySystem(
+ const nsAString& aKeySystem, const double aTotalPlayTimeS,
+ const nsCString& aCodec, const nsCString& aResolution) {
+ glean::mediadrm::EmePlaybackExtra extra = {
+ .keySystem = Some(NS_ConvertUTF16toUTF8(aKeySystem)),
+ .playedTime = Some(aTotalPlayTimeS),
+ .resolution = Some(aResolution),
+ .videoCodec = Some(aCodec)};
+ glean::mediadrm::eme_playback.Record(Some(extra));
+}
+
void TelemetryProbesReporter::ReportResultForAudio() {
// Don't record telemetry for a media that didn't have a valid audio or video
// to play, or hasn't played.
diff --git a/dom/media/utils/TelemetryProbesReporter.h b/dom/media/utils/TelemetryProbesReporter.h
index be81e8022c..43e05dcadd 100644
--- a/dom/media/utils/TelemetryProbesReporter.h
+++ b/dom/media/utils/TelemetryProbesReporter.h
@@ -8,6 +8,7 @@
#include "MediaInfo.h"
#include "mozilla/Maybe.h"
#include "mozilla/AwakeTimeStamp.h"
+#include "mozilla/EnumSet.h"
#include "AudioChannelService.h"
#include "nsISupportsImpl.h"
@@ -66,8 +67,20 @@ class TelemetryProbesReporter final {
void OnMutedChanged(bool aMuted);
void OnDecodeSuspended();
void OnDecodeResumed();
- void OntFirstFrameLoaded(const TimeDuration& aLoadedFirstFrameTime,
- bool aIsMSE, bool aIsExternalEngineStateMachine);
+
+ enum class FirstFrameLoadedFlag {
+ IsMSE,
+ IsExternalEngineStateMachine,
+ IsHLS,
+ IsHardwareDecoding,
+ };
+ using FirstFrameLoadedFlagSet = EnumSet<FirstFrameLoadedFlag, uint8_t>;
+ void OntFirstFrameLoaded(const double aLoadedFirstFrameTime,
+ const double aLoadedMetadataTime,
+ const double aTotalWaitingDataTime,
+ const double aTotalBufferingTime,
+ const FirstFrameLoadedFlagSet aFlags,
+ const MediaInfo& aInfo);
double GetTotalVideoPlayTimeInSeconds() const;
double GetTotalVideoHDRPlayTimeInSeconds() const;
@@ -100,7 +113,10 @@ class TelemetryProbesReporter final {
void ReportResultForMFCDMPlaybackIfNeeded(double aTotalPlayTimeS,
const nsCString& aResolution);
#endif
-
+ void ReportPlaytimeForKeySystem(const nsAString& aKeySystem,
+ const double aTotalPlayTimeS,
+ const nsCString& aCodec,
+ const nsCString& aResolution);
// Helper class to measure times for playback telemetry stats
class TimeDurationAccumulator {
public:
diff --git a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
index a915e78859..4ade20d16d 100644
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -23,12 +23,14 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamAudioSourceNode)
tmp->Destroy();
NS_IMPL_CYCLE_COLLECTION_UNLINK(mInputStream)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mInputTrack)
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(MediaStreamAudioSourceNode,
AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mInputStream)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mInputTrack)
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaStreamAudioSourceNode)
@@ -65,12 +67,13 @@ already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
void MediaStreamAudioSourceNode::Init(DOMMediaStream& aMediaStream,
ErrorResult& aRv) {
+ mListener = new TrackListener(this);
mInputStream = &aMediaStream;
AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
mTrack = AudioNodeExternalInputTrack::Create(Context()->Graph(), engine);
mInputStream->AddConsumerToKeepAlive(ToSupports(this));
- mInputStream->RegisterTrackListener(this);
+ mInputStream->RegisterTrackListener(mListener);
if (mInputStream->Audible()) {
NotifyAudible();
}
@@ -79,8 +82,9 @@ void MediaStreamAudioSourceNode::Init(DOMMediaStream& aMediaStream,
void MediaStreamAudioSourceNode::Destroy() {
if (mInputStream) {
- mInputStream->UnregisterTrackListener(this);
+ mInputStream->UnregisterTrackListener(mListener);
mInputStream = nullptr;
+ mListener = nullptr;
}
DetachFromTrack();
}
@@ -275,4 +279,14 @@ JSObject* MediaStreamAudioSourceNode::WrapObject(
return MediaStreamAudioSourceNode_Binding::Wrap(aCx, this, aGivenProto);
}
+NS_IMPL_CYCLE_COLLECTION_INHERITED(MediaStreamAudioSourceNode::TrackListener,
+ DOMMediaStream::TrackListener, mNode)
+NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode::TrackListener,
+ DOMMediaStream::TrackListener)
+NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode::TrackListener,
+ DOMMediaStream::TrackListener)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
+ MediaStreamAudioSourceNode::TrackListener)
+NS_INTERFACE_MAP_END_INHERITING(DOMMediaStream::TrackListener)
+
} // namespace mozilla::dom
diff --git a/dom/media/webaudio/MediaStreamAudioSourceNode.h b/dom/media/webaudio/MediaStreamAudioSourceNode.h
index 1875fc2e83..82ef67d4b7 100644
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -40,7 +40,6 @@ class MediaStreamAudioSourceNodeEngine final : public AudioNodeEngine {
class MediaStreamAudioSourceNode
: public AudioNode,
- public DOMMediaStream::TrackListener,
public PrincipalChangeObserver<MediaStreamTrack> {
public:
static already_AddRefed<MediaStreamAudioSourceNode> Create(
@@ -87,9 +86,28 @@ class MediaStreamAudioSourceNode
ErrorResult& aRv);
// From DOMMediaStream::TrackListener.
- void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
- void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
- void NotifyAudible() override;
+ void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack);
+ void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack);
+ void NotifyAudible();
+
+ class TrackListener final : public DOMMediaStream::TrackListener {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(TrackListener,
+ DOMMediaStream::TrackListener)
+ explicit TrackListener(MediaStreamAudioSourceNode* aNode) : mNode(aNode) {}
+ void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override {
+ mNode->NotifyTrackAdded(aTrack);
+ }
+ void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override {
+ mNode->NotifyTrackRemoved(aTrack);
+ }
+ void NotifyAudible() override { mNode->NotifyAudible(); }
+
+ private:
+ virtual ~TrackListener() = default;
+ RefPtr<MediaStreamAudioSourceNode> mNode;
+ };
// From PrincipalChangeObserver<MediaStreamTrack>.
void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
@@ -120,6 +138,7 @@ class MediaStreamAudioSourceNode
// On construction we set this to the first audio track of mInputStream.
RefPtr<MediaStreamTrack> mInputTrack;
+ RefPtr<TrackListener> mListener;
};
} // namespace mozilla::dom
diff --git a/dom/media/webaudio/test/mochitest_audio.toml b/dom/media/webaudio/test/mochitest_audio.toml
index 56e612b102..1f037c0ec6 100644
--- a/dom/media/webaudio/test/mochitest_audio.toml
+++ b/dom/media/webaudio/test/mochitest_audio.toml
@@ -6,7 +6,6 @@ support-files = [
"audio-mono-expected-2.wav",
"audio-mono-expected.wav",
"audio-quad.wav",
- "audio.ogv",
"audiovideo.mp4",
"audioBufferSourceNodeDetached_worker.js",
"corsServer.sjs",
diff --git a/dom/media/webaudio/test/test_mediaDecoding.html b/dom/media/webaudio/test/test_mediaDecoding.html
index e76a533e4a..c2105b53b9 100644
--- a/dom/media/webaudio/test/test_mediaDecoding.html
+++ b/dom/media/webaudio/test/test_mediaDecoding.html
@@ -197,18 +197,6 @@
{ url: "invalid.txt", valid: false, sampleRate: 44100 },
// A webm file with no audio
{ url: "noaudio.webm", valid: false, sampleRate: 48000 },
- // A video ogg file with audio
- {
- url: "audio.ogv",
- valid: true,
- expectedUrl: "audio-expected.wav",
- numberOfChannels: 2,
- sampleRate: 44100,
- frames: 47680,
- duration: 1.0807,
- fuzzTolerance: 106,
- fuzzToleranceMobile: 3482
- },
{
url: "nil-packet.ogg",
expectedUrl: null,
diff --git a/dom/media/webcodecs/AudioData.cpp b/dom/media/webcodecs/AudioData.cpp
index 0b21798be8..aae58fb32c 100644
--- a/dom/media/webcodecs/AudioData.cpp
+++ b/dom/media/webcodecs/AudioData.cpp
@@ -151,25 +151,6 @@ JSObject* AudioData::WrapObject(JSContext* aCx,
return AudioData_Binding::Wrap(aCx, this, aGivenProto);
}
-uint32_t BytesPerSamples(const mozilla::dom::AudioSampleFormat& aFormat) {
- switch (aFormat) {
- case AudioSampleFormat::U8:
- case AudioSampleFormat::U8_planar:
- return sizeof(uint8_t);
- case AudioSampleFormat::S16:
- case AudioSampleFormat::S16_planar:
- return sizeof(int16_t);
- case AudioSampleFormat::S32:
- case AudioSampleFormat::F32:
- case AudioSampleFormat::S32_planar:
- case AudioSampleFormat::F32_planar:
- return sizeof(float);
- default:
- MOZ_ASSERT_UNREACHABLE("wrong enum value");
- }
- return 0;
-}
-
Result<Ok, nsCString> IsValidAudioDataInit(const AudioDataInit& aInit) {
if (aInit.mSampleRate <= 0.0) {
auto msg = nsLiteralCString("sampleRate must be positive");
@@ -205,37 +186,13 @@ Result<Ok, nsCString> IsValidAudioDataInit(const AudioDataInit& aInit) {
return Ok();
}
-const char* FormatToString(AudioSampleFormat aFormat) {
- switch (aFormat) {
- case AudioSampleFormat::U8:
- return "u8";
- case AudioSampleFormat::S16:
- return "s16";
- case AudioSampleFormat::S32:
- return "s32";
- case AudioSampleFormat::F32:
- return "f32";
- case AudioSampleFormat::U8_planar:
- return "u8-planar";
- case AudioSampleFormat::S16_planar:
- return "s16-planar";
- case AudioSampleFormat::S32_planar:
- return "s32-planar";
- case AudioSampleFormat::F32_planar:
- return "f32-planar";
- default:
- MOZ_ASSERT_UNREACHABLE("wrong enum value");
- }
- return "unsupported";
-}
-
/* static */
already_AddRefed<AudioData> AudioData::Constructor(const GlobalObject& aGlobal,
const AudioDataInit& aInit,
ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
LOGD("[%p] AudioData(fmt: %s, rate: %f, ch: %" PRIu32 ", ts: %" PRId64 ")",
- global.get(), FormatToString(aInit.mFormat), aInit.mSampleRate,
+ global.get(), GetEnumString(aInit.mFormat).get(), aInit.mSampleRate,
aInit.mNumberOfChannels, aInit.mTimestamp);
if (!global) {
LOGE("Global unavailable");
@@ -311,6 +268,9 @@ struct CopyToSpec {
const uint32_t mFrameOffset;
const uint32_t mPlaneIndex;
const AudioSampleFormat mFormat;
+ // False if this is used internally, and this copy call doesn't come from
+ // script.
+ DebugOnly<bool> mFromScript = true;
};
bool IsInterleaved(const AudioSampleFormat& aFormat) {
@@ -463,7 +423,7 @@ void CopySamples(Span<S> aSource, Span<D> aDest, uint32_t aSourceChannelCount,
}
if (!IsInterleaved(aSourceFormat) && IsInterleaved(aCopyToSpec.mFormat)) {
- MOZ_CRASH("This should never be hit -- current spec doesn't support it");
+ MOZ_ASSERT(!aCopyToSpec.mFromScript);
// Planar to interleaved -- copy of all channels of the source into the
// destination buffer.
MOZ_ASSERT(aCopyToSpec.mPlaneIndex == 0);
@@ -505,7 +465,7 @@ nsCString AudioData::ToString() const {
return nsPrintfCString("AudioData[%zu bytes %s %fHz %" PRIu32 "x%" PRIu32
"ch]",
mResource->Data().LengthBytes(),
- FormatToString(mAudioSampleFormat.value()),
+ GetEnumString(mAudioSampleFormat.value()).get(),
mSampleRate, mNumberOfFrames, mNumberOfChannels);
}
@@ -515,8 +475,9 @@ nsCString CopyToToString(size_t aDestBufSize,
"AudioDataCopyToOptions[data: %zu bytes %s frame count:%" PRIu32
" frame offset: %" PRIu32 " plane: %" PRIu32 "]",
aDestBufSize,
- aOptions.mFormat.WasPassed() ? FormatToString(aOptions.mFormat.Value())
- : "null",
+ aOptions.mFormat.WasPassed()
+ ? GetEnumString(aOptions.mFormat.Value()).get()
+ : "null",
aOptions.mFrameCount.WasPassed() ? aOptions.mFrameCount.Value() : 0,
aOptions.mFrameOffset, aOptions.mPlaneIndex);
}
@@ -650,6 +611,8 @@ void AudioData::Close() {
mAudioSampleFormat = Nothing();
}
+bool AudioData::IsClosed() const { return !mResource; }
+
// https://w3c.github.io/webcodecs/#ref-for-deserialization-steps%E2%91%A1
/* static */
JSObject* AudioData::ReadStructuredClone(JSContext* aCx,
@@ -724,6 +687,31 @@ void AudioData::CloseIfNeeded() {
}
}
+RefPtr<mozilla::AudioData> AudioData::ToAudioData() const {
+ // Always convert to f32 interleaved for now, as this Gecko's prefered
+ // internal audio representation for encoding and decoding.
+ Span<uint8_t> data = mResource->Data();
+ DebugOnly<uint32_t> frames = mNumberOfFrames;
+ uint32_t bytesPerSample = BytesPerSamples(mAudioSampleFormat.value());
+ uint32_t samples = data.Length() / bytesPerSample;
+ DebugOnly<uint32_t> computedFrames = samples / mNumberOfChannels;
+ MOZ_ASSERT(frames == computedFrames);
+ AlignedAudioBuffer buf(samples);
+ Span<uint8_t> storage(reinterpret_cast<uint8_t*>(buf.Data()),
+ samples * sizeof(float));
+
+ CopyToSpec spec(mNumberOfFrames, 0, 0, AudioSampleFormat::F32);
+#ifdef DEBUG
+ spec.mFromScript = false;
+#endif
+
+ DoCopy(data, storage, mNumberOfChannels, mAudioSampleFormat.value(), spec);
+
+ return MakeRefPtr<mozilla::AudioData>(
+ 0, media::TimeUnit::FromMicroseconds(mTimestamp), std::move(buf),
+ mNumberOfChannels, mSampleRate);
+}
+
#undef LOGD
#undef LOGE
#undef LOG_INTERNAL
diff --git a/dom/media/webcodecs/AudioData.h b/dom/media/webcodecs/AudioData.h
index 4ae69a225a..43af638d11 100644
--- a/dom/media/webcodecs/AudioData.h
+++ b/dom/media/webcodecs/AudioData.h
@@ -90,6 +90,7 @@ class AudioData final : public nsISupports, public nsWrapperCache {
already_AddRefed<AudioData> Clone(ErrorResult& aRv);
void Close();
+ bool IsClosed() const;
// [Serializable] implementations: {Read, Write}StructuredClone
static JSObject* ReadStructuredClone(JSContext* aCx, nsIGlobalObject* aGlobal,
@@ -107,11 +108,13 @@ class AudioData final : public nsISupports, public nsWrapperCache {
static already_AddRefed<AudioData> FromTransferred(nsIGlobalObject* aGlobal,
TransferredData* aData);
+ nsCString ToString() const;
+
+ RefPtr<mozilla::AudioData> ToAudioData() const;
+
private:
size_t ComputeCopyElementCount(const AudioDataCopyToOptions& aOptions,
ErrorResult& aRv);
-
- nsCString ToString() const;
// AudioData can run on either main thread or worker thread.
void AssertIsOnOwningThread() const { NS_ASSERT_OWNINGTHREAD(AudioData); }
void CloseIfNeeded();
diff --git a/dom/media/webcodecs/AudioDecoder.cpp b/dom/media/webcodecs/AudioDecoder.cpp
index 6b554dcacf..ef2acd4eae 100644
--- a/dom/media/webcodecs/AudioDecoder.cpp
+++ b/dom/media/webcodecs/AudioDecoder.cpp
@@ -68,11 +68,11 @@ NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
AudioDecoderConfigInternal::AudioDecoderConfigInternal(
const nsAString& aCodec, uint32_t aSampleRate, uint32_t aNumberOfChannels,
- Maybe<RefPtr<MediaByteBuffer>>&& aDescription)
+ already_AddRefed<MediaByteBuffer> aDescription)
: mCodec(aCodec),
mSampleRate(aSampleRate),
mNumberOfChannels(aNumberOfChannels),
- mDescription(std::move(aDescription)) {}
+ mDescription(aDescription) {}
/*static*/
UniquePtr<AudioDecoderConfigInternal> AudioDecoderConfigInternal::Create(
@@ -83,7 +83,7 @@ UniquePtr<AudioDecoderConfigInternal> AudioDecoderConfigInternal::Create(
return nullptr;
}
- Maybe<RefPtr<MediaByteBuffer>> description;
+ RefPtr<MediaByteBuffer> description;
if (aConfig.mDescription.WasPassed()) {
auto rv = GetExtraDataFromArrayBuffer(aConfig.mDescription.Value());
if (rv.isErr()) { // Invalid description data.
@@ -95,12 +95,28 @@ UniquePtr<AudioDecoderConfigInternal> AudioDecoderConfigInternal::Create(
error.get());
return nullptr;
}
- description.emplace(rv.unwrap());
+ description = rv.unwrap();
}
return UniquePtr<AudioDecoderConfigInternal>(new AudioDecoderConfigInternal(
aConfig.mCodec, aConfig.mSampleRate, aConfig.mNumberOfChannels,
- std::move(description)));
+ description.forget()));
+}
+
+nsCString AudioDecoderConfigInternal::ToString() const {
+ nsCString rv;
+
+ rv.AppendLiteral("AudioDecoderConfigInternal: ");
+ rv.AppendPrintf("%s %" PRIu32 "Hz %" PRIu32 " ch",
+ NS_ConvertUTF16toUTF8(mCodec).get(), mSampleRate,
+ mNumberOfChannels);
+ if (mDescription) {
+ rv.AppendPrintf("(%zu bytes of extradata)", mDescription->Length());
+ } else {
+ rv.AppendLiteral("(no extradata)");
+ }
+
+ return rv;
}
/*
@@ -118,24 +134,6 @@ struct AudioMIMECreateParam {
// Map between WebCodecs pcm types as strings and codec numbers
// All other codecs
-nsCString ConvertCodecName(const nsCString& aContainer,
- const nsCString& aCodec) {
- if (!aContainer.EqualsLiteral("x-wav")) {
- return aCodec;
- }
- if (aCodec.EqualsLiteral("ulaw")) {
- return nsCString("7");
- }
- if (aCodec.EqualsLiteral("alaw")) {
- return nsCString("6");
- }
- if (aCodec.Find("f32")) {
- return nsCString("3");
- }
- // Linear PCM
- return nsCString("1");
-}
-
static nsTArray<nsCString> GuessMIMETypes(const AudioMIMECreateParam& aParam) {
nsCString codec = NS_ConvertUTF16toUTF8(aParam.mParsedCodec);
nsTArray<nsCString> types;
@@ -147,16 +145,6 @@ static nsTArray<nsCString> GuessMIMETypes(const AudioMIMECreateParam& aParam) {
return types;
}
-static bool IsSupportedAudioCodec(const nsAString& aCodec) {
- LOG("IsSupportedAudioCodec: %s", NS_ConvertUTF16toUTF8(aCodec).get());
- return aCodec.EqualsLiteral("flac") || aCodec.EqualsLiteral("mp3") ||
- IsAACCodecString(aCodec) || aCodec.EqualsLiteral("opus") ||
- aCodec.EqualsLiteral("ulaw") || aCodec.EqualsLiteral("alaw") ||
- aCodec.EqualsLiteral("pcm-u8") || aCodec.EqualsLiteral("pcm-s16") ||
- aCodec.EqualsLiteral("pcm-s24") || aCodec.EqualsLiteral("pcm-s32") ||
- aCodec.EqualsLiteral("pcm-f32");
-}
-
// https://w3c.github.io/webcodecs/#check-configuration-support
template <typename Config>
static bool CanDecodeAudio(const Config& aConfig) {
@@ -259,13 +247,12 @@ Result<UniquePtr<TrackInfo>, nsresult> AudioDecoderTraits::CreateTrackInfo(
return Err(NS_ERROR_INVALID_ARG);
}
- if (aConfig.mDescription.isSome()) {
- RefPtr<MediaByteBuffer> buf;
- buf = aConfig.mDescription.value();
- if (buf) {
- LOG("The given config has %zu bytes of description data", buf->Length());
- ai->mCodecSpecificConfig =
- AudioCodecSpecificVariant{AudioCodecSpecificBinaryBlob{buf}};
+ if (aConfig.mDescription) {
+ if (!aConfig.mDescription->IsEmpty()) {
+ LOG("The given config has %zu bytes of description data",
+ aConfig.mDescription->Length());
+ ai->mCodecSpecificConfig = AudioCodecSpecificVariant{
+ AudioCodecSpecificBinaryBlob{aConfig.mDescription}};
}
}
@@ -275,7 +262,7 @@ Result<UniquePtr<TrackInfo>, nsresult> AudioDecoderTraits::CreateTrackInfo(
LOG("Created AudioInfo %s (%" PRIu32 "ch %" PRIu32
"Hz - with extra-data: %s)",
NS_ConvertUTF16toUTF8(aConfig.mCodec).get(), ai->mChannels, ai->mChannels,
- aConfig.mDescription.isSome() ? "yes" : "no");
+ aConfig.mDescription && !aConfig.mDescription->IsEmpty() ? "yes" : "no");
return track;
}
diff --git a/dom/media/webcodecs/AudioEncoder.cpp b/dom/media/webcodecs/AudioEncoder.cpp
new file mode 100644
index 0000000000..7204a13200
--- /dev/null
+++ b/dom/media/webcodecs/AudioEncoder.cpp
@@ -0,0 +1,488 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/AudioEncoder.h"
+#include "EncoderTraits.h"
+#include "mozilla/dom/AudioEncoderBinding.h"
+
+#include "EncoderConfig.h"
+#include "EncoderTypes.h"
+#include "MediaData.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/dom/AudioDataBinding.h"
+#include "mozilla/dom/EncodedAudioChunk.h"
+#include "mozilla/dom/EncodedAudioChunkBinding.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/WebCodecsUtils.h"
+#include "EncoderConfig.h"
+
+extern mozilla::LazyLogModule gWebCodecsLog;
+
+namespace mozilla::dom {
+
+#ifdef LOG_INTERNAL
+# undef LOG_INTERNAL
+#endif // LOG_INTERNAL
+#define LOG_INTERNAL(level, msg, ...) \
+ MOZ_LOG(gWebCodecsLog, LogLevel::level, (msg, ##__VA_ARGS__))
+
+#ifdef LOG
+# undef LOG
+#endif // LOG
+#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__)
+
+#ifdef LOGW
+# undef LOGW
+#endif // LOGW
+#define LOGW(msg, ...) LOG_INTERNAL(Warning, msg, ##__VA_ARGS__)
+
+#ifdef LOGE
+# undef LOGE
+#endif // LOGE
+#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__)
+
+#ifdef LOGV
+# undef LOGV
+#endif // LOGV
+#define LOGV(msg, ...) LOG_INTERNAL(Verbose, msg, ##__VA_ARGS__)
+
+NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioEncoder, DOMEventTargetHelper,
+ mErrorCallback, mOutputCallback)
+NS_IMPL_ADDREF_INHERITED(AudioEncoder, DOMEventTargetHelper)
+NS_IMPL_RELEASE_INHERITED(AudioEncoder, DOMEventTargetHelper)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioEncoder)
+NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
+
+/*
+ * Below are helper classes
+ */
+AudioEncoderConfigInternal::AudioEncoderConfigInternal(
+ const nsAString& aCodec, Maybe<uint32_t> aSampleRate,
+ Maybe<uint32_t> aNumberOfChannels, Maybe<uint32_t> aBitrate,
+ BitrateMode aBitrateMode)
+ : mCodec(aCodec),
+ mSampleRate(aSampleRate),
+ mNumberOfChannels(aNumberOfChannels),
+ mBitrate(aBitrate),
+ mBitrateMode(aBitrateMode) {}
+
+AudioEncoderConfigInternal::AudioEncoderConfigInternal(
+ const AudioEncoderConfig& aConfig)
+ : AudioEncoderConfigInternal(
+ aConfig.mCodec, OptionalToMaybe(aConfig.mSampleRate),
+ OptionalToMaybe(aConfig.mNumberOfChannels),
+ OptionalToMaybe(aConfig.mBitrate), aConfig.mBitrateMode) {
+ DebugOnly<nsCString> errorMessage;
+ if (aConfig.mCodec.EqualsLiteral("opus") && aConfig.mOpus.WasPassed()) {
+ // All values are in range at this point, the config is known valid.
+ OpusSpecific specific;
+ if (aConfig.mOpus.Value().mComplexity.WasPassed()) {
+ specific.mComplexity = aConfig.mOpus.Value().mComplexity.Value();
+ } else {
+ // https://w3c.github.io/webcodecs/opus_codec_registration.html#dom-opusencoderconfig-complexity
+ // If no value is specificied, the default value is platform-specific:
+ // User Agents SHOULD set a default of 5 for mobile platforms, and a
+ // default of 9 for all other platforms.
+ if (IsOnAndroid()) {
+ specific.mComplexity = 5;
+ } else {
+ specific.mComplexity = 9;
+ }
+ }
+ specific.mApplication = OpusSpecific::Application::Unspecified;
+ specific.mFrameDuration = aConfig.mOpus.Value().mFrameDuration;
+ specific.mPacketLossPerc = aConfig.mOpus.Value().mPacketlossperc;
+ specific.mUseDTX = aConfig.mOpus.Value().mUsedtx;
+ specific.mUseInBandFEC = aConfig.mOpus.Value().mUseinbandfec;
+ mSpecific.emplace(specific);
+ }
+ MOZ_ASSERT(AudioEncoderTraits::Validate(aConfig, errorMessage));
+}
+
+AudioEncoderConfigInternal::AudioEncoderConfigInternal(
+ const AudioEncoderConfigInternal& aConfig)
+ : AudioEncoderConfigInternal(aConfig.mCodec, aConfig.mSampleRate,
+ aConfig.mNumberOfChannels, aConfig.mBitrate,
+ aConfig.mBitrateMode) {}
+
+void AudioEncoderConfigInternal::SetSpecific(
+ const EncoderConfig::CodecSpecific& aSpecific) {
+ mSpecific.emplace(aSpecific);
+}
+
+/*
+ * The followings are helpers for AudioEncoder methods
+ */
+
+static void CloneConfiguration(RootedDictionary<AudioEncoderConfig>& aDest,
+ JSContext* aCx,
+ const AudioEncoderConfig& aConfig) {
+ aDest.mCodec = aConfig.mCodec;
+
+ if (aConfig.mNumberOfChannels.WasPassed()) {
+ aDest.mNumberOfChannels.Construct(aConfig.mNumberOfChannels.Value());
+ }
+ if (aConfig.mSampleRate.WasPassed()) {
+ aDest.mSampleRate.Construct(aConfig.mSampleRate.Value());
+ }
+ if (aConfig.mBitrate.WasPassed()) {
+ aDest.mBitrate.Construct(aConfig.mBitrate.Value());
+ }
+ if (aConfig.mOpus.WasPassed()) {
+ aDest.mOpus.Construct(aConfig.mOpus.Value());
+ // Handle the default value manually since it's different on mobile
+ if (!aConfig.mOpus.Value().mComplexity.WasPassed()) {
+ if (IsOnAndroid()) {
+ aDest.mOpus.Value().mComplexity.Construct(5);
+ } else {
+ aDest.mOpus.Value().mComplexity.Construct(9);
+ }
+ }
+ }
+ aDest.mBitrateMode = aConfig.mBitrateMode;
+}
+
+static bool IsAudioEncodeSupported(const nsAString& aCodec) {
+ LOG("IsEncodeSupported: %s", NS_ConvertUTF16toUTF8(aCodec).get());
+
+ return aCodec.EqualsLiteral("opus") || aCodec.EqualsLiteral("vorbis");
+}
+
+static bool CanEncode(const RefPtr<AudioEncoderConfigInternal>& aConfig,
+ nsCString& aErrorMessage) {
+ auto parsedCodecString =
+ ParseCodecString(aConfig->mCodec).valueOr(EmptyString());
+ // TODO: Enable WebCodecs on Android (Bug 1840508)
+ if (IsOnAndroid()) {
+ return false;
+ }
+ if (!IsAudioEncodeSupported(parsedCodecString)) {
+ return false;
+ }
+
+ if (aConfig->mNumberOfChannels.value() > 256) {
+ aErrorMessage.AppendPrintf(
+ "Invalid number of channels, supported range is between 1 and 256");
+ return false;
+ }
+
+ // Somewhat arbitrarily chosen, but reflects real-life and what the rest of
+ // Gecko does.
+ if (aConfig->mSampleRate.value() < 3000 ||
+ aConfig->mSampleRate.value() > 384000) {
+ aErrorMessage.AppendPrintf(
+ "Invalid sample-rate of %d, supported range is 3000Hz to 384000Hz",
+ aConfig->mSampleRate.value());
+ return false;
+ }
+
+ return EncoderSupport::Supports(aConfig);
+}
+
+nsCString AudioEncoderConfigInternal::ToString() const {
+ nsCString rv;
+
+ rv.AppendPrintf("AudioEncoderConfigInternal: %s",
+ NS_ConvertUTF16toUTF8(mCodec).get());
+ if (mSampleRate) {
+ rv.AppendPrintf(" %" PRIu32 "Hz", mSampleRate.value());
+ }
+ if (mNumberOfChannels) {
+ rv.AppendPrintf(" %" PRIu32 "ch", mNumberOfChannels.value());
+ }
+ if (mBitrate) {
+ rv.AppendPrintf(" %" PRIu32 "bps", mBitrate.value());
+ }
+ rv.AppendPrintf(" (%s)", mBitrateMode == mozilla::dom::BitrateMode::Constant
+ ? "CRB"
+ : "VBR");
+
+ return rv;
+}
+
+EncoderConfig AudioEncoderConfigInternal::ToEncoderConfig() const {
+ const mozilla::BitrateMode bitrateMode =
+ mBitrateMode == mozilla::dom::BitrateMode::Constant
+ ? mozilla::BitrateMode::Constant
+ : mozilla::BitrateMode::Variable;
+
+ CodecType type = CodecType::Opus;
+ Maybe<EncoderConfig::CodecSpecific> specific;
+ if (mCodec.EqualsLiteral("opus")) {
+ type = CodecType::Opus;
+ MOZ_ASSERT(mSpecific.isNothing() || mSpecific->is<OpusSpecific>());
+ specific = mSpecific;
+ } else if (mCodec.EqualsLiteral("vorbis")) {
+ type = CodecType::Vorbis;
+ } else if (mCodec.EqualsLiteral("flac")) {
+ type = CodecType::Flac;
+ } else if (StringBeginsWith(mCodec, u"pcm-"_ns)) {
+ type = CodecType::PCM;
+ } else if (mCodec.EqualsLiteral("ulaw")) {
+ type = CodecType::PCM;
+ } else if (mCodec.EqualsLiteral("alaw")) {
+ type = CodecType::PCM;
+ } else if (StringBeginsWith(mCodec, u"mp4a."_ns)) {
+ type = CodecType::AAC;
+ }
+
+ // This should have been checked ahead of time -- we can't encode without
+ // knowing the sample-rate or the channel count at the very least.
+ MOZ_ASSERT(mSampleRate.value());
+ MOZ_ASSERT(mNumberOfChannels.value());
+
+ return EncoderConfig(type, mNumberOfChannels.value(), bitrateMode,
+ AssertedCast<uint32_t>(mSampleRate.value()),
+ mBitrate.valueOr(0), specific);
+}
+
+bool AudioEncoderConfigInternal::Equals(
+ const AudioEncoderConfigInternal& aOther) const {
+ return false;
+}
+
+bool AudioEncoderConfigInternal::CanReconfigure(
+ const AudioEncoderConfigInternal& aOther) const {
+ return false;
+}
+
+already_AddRefed<WebCodecsConfigurationChangeList>
+AudioEncoderConfigInternal::Diff(
+ const AudioEncoderConfigInternal& aOther) const {
+ return MakeRefPtr<WebCodecsConfigurationChangeList>().forget();
+}
+
+/* static */
+bool AudioEncoderTraits::IsSupported(
+ const AudioEncoderConfigInternal& aConfig) {
+ nsCString errorMessage;
+ bool canEncode =
+ CanEncode(MakeRefPtr<AudioEncoderConfigInternal>(aConfig), errorMessage);
+ if (!canEncode) {
+ LOGE("Can't encode configuration %s: %s", aConfig.ToString().get(),
+ errorMessage.get());
+ }
+ return canEncode;
+}
+
+// https://w3c.github.io/webcodecs/#valid-audioencoderconfig
+/* static */
+bool AudioEncoderTraits::Validate(const AudioEncoderConfig& aConfig,
+ nsCString& aErrorMessage) {
+ Maybe<nsString> codec = ParseCodecString(aConfig.mCodec);
+ if (!codec || codec->IsEmpty()) {
+ LOGE("Validating AudioEncoderConfig: invalid codec string");
+ return false;
+ }
+
+ if (!aConfig.mNumberOfChannels.WasPassed()) {
+ aErrorMessage.AppendPrintf("Channel count required");
+ return false;
+ }
+ if (aConfig.mNumberOfChannels.Value() == 0) {
+ aErrorMessage.AppendPrintf(
+ "Invalid number of channels, supported range is between 1 and 256");
+ return false;
+ }
+ if (!aConfig.mSampleRate.WasPassed()) {
+ aErrorMessage.AppendPrintf("Sample-rate required");
+ return false;
+ }
+ if (aConfig.mSampleRate.Value() == 0) {
+ aErrorMessage.AppendPrintf("Invalid sample-rate of 0");
+ return false;
+ }
+
+ if (aConfig.mBitrate.WasPassed() &&
+ aConfig.mBitrate.Value() > std::numeric_limits<int>::max()) {
+ aErrorMessage.AppendPrintf("Invalid config: bitrate value too large");
+ return false;
+ }
+
+ if (codec->EqualsLiteral("opus")) {
+ // This comes from
+ // https://w3c.github.io/webcodecs/opus_codec_registration.html#opus-encoder-config
+ if (aConfig.mBitrate.WasPassed() && (aConfig.mBitrate.Value() < 6000 ||
+ aConfig.mBitrate.Value() > 510000)) {
+ aErrorMessage.AppendPrintf(
+ "Invalid config: bitrate value outside of [6k, 510k] for opus");
+ return false;
+ }
+ if (aConfig.mOpus.WasPassed()) {
+ // Verify value ranges
+ const std::array validFrameDurationUs = {2500, 5000, 10000,
+ 20000, 40000, 60000};
+ if (std::find(validFrameDurationUs.begin(), validFrameDurationUs.end(),
+ aConfig.mOpus.Value().mFrameDuration) ==
+ validFrameDurationUs.end()) {
+ aErrorMessage.AppendPrintf("Invalid config: invalid frame duration");
+ return false;
+ }
+ if (aConfig.mOpus.Value().mComplexity.WasPassed() &&
+ aConfig.mOpus.Value().mComplexity.Value() > 10) {
+ aErrorMessage.AppendPrintf(
+ "Invalid config: Opus complexity greater than 10");
+ return false;
+ }
+ if (aConfig.mOpus.Value().mPacketlossperc > 100) {
+ aErrorMessage.AppendPrintf(
+ "Invalid config: Opus packet loss percentage greater than 100");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/* static */
+RefPtr<AudioEncoderConfigInternal> AudioEncoderTraits::CreateConfigInternal(
+ const AudioEncoderConfig& aConfig) {
+ nsCString errorMessage;
+ if (!AudioEncoderTraits::Validate(aConfig, errorMessage)) {
+ return nullptr;
+ }
+ return MakeRefPtr<AudioEncoderConfigInternal>(aConfig);
+}
+
+/* static */
+RefPtr<mozilla::AudioData> AudioEncoderTraits::CreateInputInternal(
+ const dom::AudioData& aInput,
+ const dom::VideoEncoderEncodeOptions& /* unused */) {
+ return aInput.ToAudioData();
+}
+
+/*
+ * Below are AudioEncoder implementation
+ */
+
+AudioEncoder::AudioEncoder(
+ nsIGlobalObject* aParent, RefPtr<WebCodecsErrorCallback>&& aErrorCallback,
+ RefPtr<EncodedAudioChunkOutputCallback>&& aOutputCallback)
+ : EncoderTemplate(aParent, std::move(aErrorCallback),
+ std::move(aOutputCallback)) {
+ MOZ_ASSERT(mErrorCallback);
+ MOZ_ASSERT(mOutputCallback);
+ LOG("AudioEncoder %p ctor", this);
+}
+
+AudioEncoder::~AudioEncoder() {
+ LOG("AudioEncoder %p dtor", this);
+ Unused << ResetInternal(NS_ERROR_DOM_ABORT_ERR);
+}
+
+JSObject* AudioEncoder::WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) {
+ AssertIsOnOwningThread();
+
+ return AudioEncoder_Binding::Wrap(aCx, this, aGivenProto);
+}
+
+// https://w3c.github.io/webcodecs/#dom-audioencoder-audioencoder
+/* static */
+already_AddRefed<AudioEncoder> AudioEncoder::Constructor(
+ const GlobalObject& aGlobal, const AudioEncoderInit& aInit,
+ ErrorResult& aRv) {
+ nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
+ if (!global) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return nullptr;
+ }
+
+ return MakeAndAddRef<AudioEncoder>(
+ global.get(), RefPtr<WebCodecsErrorCallback>(aInit.mError),
+ RefPtr<EncodedAudioChunkOutputCallback>(aInit.mOutput));
+}
+
+// https://w3c.github.io/webcodecs/#dom-audioencoder-isconfigsupported
+/* static */
+already_AddRefed<Promise> AudioEncoder::IsConfigSupported(
+ const GlobalObject& aGlobal, const AudioEncoderConfig& aConfig,
+ ErrorResult& aRv) {
+ LOG("AudioEncoder::IsConfigSupported, config: %s",
+ NS_ConvertUTF16toUTF8(aConfig.mCodec).get());
+
+ nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
+ if (!global) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return nullptr;
+ }
+
+ RefPtr<Promise> p = Promise::Create(global.get(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return p.forget();
+ }
+
+ nsCString errorMessage;
+ if (!AudioEncoderTraits::Validate(aConfig, errorMessage)) {
+ p->MaybeRejectWithTypeError(errorMessage);
+ return p.forget();
+ }
+
+ // TODO: Move the following works to another thread to unblock the current
+ // thread, as what spec suggests.
+
+ RootedDictionary<AudioEncoderConfig> config(aGlobal.Context());
+ CloneConfiguration(config, aGlobal.Context(), aConfig);
+
+ bool supportedAudioCodec = IsSupportedAudioCodec(aConfig.mCodec);
+ auto configInternal = MakeRefPtr<AudioEncoderConfigInternal>(aConfig);
+ bool canEncode = CanEncode(configInternal, errorMessage);
+ if (!canEncode) {
+ LOG("CanEncode failed: %s", errorMessage.get());
+ }
+ RootedDictionary<AudioEncoderSupport> s(aGlobal.Context());
+ s.mConfig.Construct(std::move(config));
+ s.mSupported.Construct(supportedAudioCodec && canEncode);
+
+ p->MaybeResolve(s);
+ return p.forget();
+}
+
+RefPtr<EncodedAudioChunk> AudioEncoder::EncodedDataToOutputType(
+ nsIGlobalObject* aGlobalObject, const RefPtr<MediaRawData>& aData) {
+ AssertIsOnOwningThread();
+
+ // Package into an EncodedAudioChunk
+ auto buffer =
+ MakeRefPtr<MediaAlignedByteBuffer>(aData->Data(), aData->Size());
+ auto encoded = MakeRefPtr<EncodedAudioChunk>(
+ aGlobalObject, buffer.forget(), EncodedAudioChunkType::Key,
+ aData->mTime.ToMicroseconds(),
+ aData->mDuration.IsZero() ? Nothing()
+ : Some(aData->mDuration.ToMicroseconds()));
+ return encoded;
+}
+
+AudioDecoderConfigInternal AudioEncoder::EncoderConfigToDecoderConfig(
+ nsIGlobalObject* aGlobal, const RefPtr<MediaRawData>& aRawData,
+ const AudioEncoderConfigInternal& aOutputConfig) const {
+ MOZ_ASSERT(aOutputConfig.mSampleRate.isSome());
+ MOZ_ASSERT(aOutputConfig.mNumberOfChannels.isSome());
+ uint32_t sampleRate = aOutputConfig.mSampleRate.value();
+ uint32_t channelCount = aOutputConfig.mNumberOfChannels.value();
+ // Check if the encoder had to modify the settings because of codec
+ // constraints. e.g. FFmpegAudioEncoder can encode any sample-rate, but if the
+ // codec is Opus, then it will resample the audio one of the specific rates
+ // supported by the encoder.
+ if (aRawData->mConfig) {
+ sampleRate = aRawData->mConfig->mSampleRate;
+ channelCount = aRawData->mConfig->mNumberOfChannels;
+ }
+ return AudioDecoderConfigInternal(aOutputConfig.mCodec, sampleRate,
+ channelCount,
+ do_AddRef(aRawData->mExtraData));
+}
+
+#undef LOG
+#undef LOGW
+#undef LOGE
+#undef LOGV
+#undef LOG_INTERNAL
+
+} // namespace mozilla::dom
diff --git a/dom/media/webcodecs/AudioEncoder.h b/dom/media/webcodecs/AudioEncoder.h
new file mode 100644
index 0000000000..0df6cd23d6
--- /dev/null
+++ b/dom/media/webcodecs/AudioEncoder.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_AudioEncoder_h
+#define mozilla_dom_AudioEncoder_h
+
+#include "js/TypeDecls.h"
+#include "mozilla/ErrorResult.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/dom/BindingDeclarations.h"
+#include "mozilla/dom/EncoderTemplate.h"
+#include "mozilla/dom/AudioData.h"
+#include "nsCycleCollectionParticipant.h"
+#include "EncoderTypes.h"
+#include "EncoderAgent.h"
+
+class nsIGlobalObject;
+
+namespace mozilla::dom {
+
+class AudioDataOutputCallback;
+class EncodedAudioChunk;
+class EncodedAudioChunkData;
+class EventHandlerNonNull;
+class GlobalObject;
+class Promise;
+class WebCodecsErrorCallback;
+struct AudioEncoderConfig;
+struct AudioEncoderInit;
+
+} // namespace mozilla::dom
+
+namespace mozilla::dom {
+
+class AudioEncoder final : public EncoderTemplate<AudioEncoderTraits> {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioEncoder, DOMEventTargetHelper)
+
+ public:
+ AudioEncoder(nsIGlobalObject* aParent,
+ RefPtr<WebCodecsErrorCallback>&& aErrorCallback,
+ RefPtr<EncodedAudioChunkOutputCallback>&& aOutputCallback);
+
+ protected:
+ ~AudioEncoder();
+
+ public:
+ JSObject* WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) override;
+
+ static already_AddRefed<AudioEncoder> Constructor(
+ const GlobalObject& aGlobal, const AudioEncoderInit& aInit,
+ ErrorResult& aRv);
+
+ static already_AddRefed<Promise> IsConfigSupported(
+ const GlobalObject& aGlobal, const AudioEncoderConfig& aConfig,
+ ErrorResult& aRv);
+
+ protected:
+ virtual RefPtr<EncodedAudioChunk> EncodedDataToOutputType(
+ nsIGlobalObject* aGlobalObject,
+ const RefPtr<MediaRawData>& aData) override;
+
+ virtual AudioDecoderConfigInternal EncoderConfigToDecoderConfig(
+ nsIGlobalObject* aGlobal, const RefPtr<MediaRawData>& aRawData,
+ const AudioEncoderConfigInternal& aOutputConfig) const override;
+};
+
+} // namespace mozilla::dom
+
+#endif // mozilla_dom_AudioEncoder_h
diff --git a/dom/media/webcodecs/DecoderTemplate.cpp b/dom/media/webcodecs/DecoderTemplate.cpp
index 4d1c310737..2fc2471a24 100644
--- a/dom/media/webcodecs/DecoderTemplate.cpp
+++ b/dom/media/webcodecs/DecoderTemplate.cpp
@@ -296,8 +296,7 @@ void DecoderTemplate<DecoderType>::CloseInternal(const nsresult& aResult) {
if (r.isErr()) {
nsCString name;
GetErrorName(r.unwrapErr(), name);
- LOGE("Error in ResetInternal: %s", name.get());
- MOZ_CRASH();
+ LOGE("Error in ResetInternal during CloseInternal: %s", name.get());
}
mState = CodecState::Closed;
nsCString error;
@@ -473,7 +472,6 @@ MessageProcessedResult DecoderTemplate<DecoderType>::ProcessConfigureMessage(
mProcessingMessage.reset();
QueueATask("Error while configuring decoder",
[self = RefPtr{this}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- MOZ_ASSERT(self->mState != CodecState::Closed);
self->CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
});
return MessageProcessedResult::Processed;
diff --git a/dom/media/webcodecs/DecoderTypes.h b/dom/media/webcodecs/DecoderTypes.h
index 339a164f70..4817a66f17 100644
--- a/dom/media/webcodecs/DecoderTypes.h
+++ b/dom/media/webcodecs/DecoderTypes.h
@@ -50,22 +50,22 @@ class VideoDecoderConfigInternal {
Maybe<uint32_t>&& aCodedHeight,
Maybe<uint32_t>&& aCodedWidth,
Maybe<VideoColorSpaceInternal>&& aColorSpace,
- Maybe<RefPtr<MediaByteBuffer>>&& aDescription,
+ already_AddRefed<MediaByteBuffer> aDescription,
Maybe<uint32_t>&& aDisplayAspectHeight,
Maybe<uint32_t>&& aDisplayAspectWidth,
const HardwareAcceleration& aHardwareAcceleration,
Maybe<bool>&& aOptimizeForLatency);
~VideoDecoderConfigInternal() = default;
- nsString ToString() const;
+ nsCString ToString() const;
bool Equals(const VideoDecoderConfigInternal& aOther) const {
- if (mDescription.isSome() != aOther.mDescription.isSome()) {
+ if (mDescription != aOther.mDescription) {
return false;
}
- if (mDescription.isSome() && aOther.mDescription.isSome()) {
- auto lhs = mDescription.value();
- auto rhs = aOther.mDescription.value();
+ if (mDescription && aOther.mDescription) {
+ auto lhs = mDescription;
+ auto rhs = aOther.mDescription;
if (lhs->Length() != rhs->Length()) {
return false;
}
@@ -86,7 +86,7 @@ class VideoDecoderConfigInternal {
Maybe<uint32_t> mCodedHeight;
Maybe<uint32_t> mCodedWidth;
Maybe<VideoColorSpaceInternal> mColorSpace;
- Maybe<RefPtr<MediaByteBuffer>> mDescription;
+ RefPtr<MediaByteBuffer> mDescription;
Maybe<uint32_t> mDisplayAspectHeight;
Maybe<uint32_t> mDisplayAspectWidth;
HardwareAcceleration mHardwareAcceleration;
@@ -116,24 +116,42 @@ class VideoDecoderTraits {
class AudioDecoderConfigInternal {
public:
+ AudioDecoderConfigInternal(const nsAString& aCodec, uint32_t aSampleRate,
+ uint32_t aNumberOfChannels,
+ already_AddRefed<MediaByteBuffer> aDescription);
static UniquePtr<AudioDecoderConfigInternal> Create(
const AudioDecoderConfig& aConfig);
~AudioDecoderConfigInternal() = default;
+ bool Equals(const AudioDecoderConfigInternal& aOther) const {
+ if (mDescription != aOther.mDescription) {
+ return false;
+ }
+ if (mDescription && aOther.mDescription) {
+ auto lhs = mDescription;
+ auto rhs = aOther.mDescription;
+ if (lhs->Length() != rhs->Length()) {
+ return false;
+ }
+ if (!ArrayEqual(lhs->Elements(), rhs->Elements(), lhs->Length())) {
+ return false;
+ }
+ }
+ return mCodec.Equals(aOther.mCodec) && mSampleRate == aOther.mSampleRate &&
+ mNumberOfChannels == aOther.mNumberOfChannels &&
+ mOptimizeForLatency == aOther.mOptimizeForLatency;
+ }
+ nsCString ToString() const;
+
nsString mCodec;
uint32_t mSampleRate;
uint32_t mNumberOfChannels;
- Maybe<RefPtr<MediaByteBuffer>> mDescription;
+ RefPtr<MediaByteBuffer> mDescription;
// Compilation fix, should be abstracted by DecoderAgent since those are not
// supported
HardwareAcceleration mHardwareAcceleration =
HardwareAcceleration::No_preference;
Maybe<bool> mOptimizeForLatency;
-
- private:
- AudioDecoderConfigInternal(const nsAString& aCodec, uint32_t aSampleRate,
- uint32_t aNumberOfChannels,
- Maybe<RefPtr<MediaByteBuffer>>&& aDescription);
};
class AudioDecoderTraits {
diff --git a/dom/media/webcodecs/EncoderTemplate.cpp b/dom/media/webcodecs/EncoderTemplate.cpp
index 35c8feb3f8..34edfae822 100644
--- a/dom/media/webcodecs/EncoderTemplate.cpp
+++ b/dom/media/webcodecs/EncoderTemplate.cpp
@@ -127,7 +127,7 @@ void EncoderTemplate<EncoderType>::Configure(const ConfigType& aConfig,
RefPtr<ConfigTypeInternal> config =
EncoderType::CreateConfigInternal(aConfig);
if (!config) {
- aRv.Throw(NS_ERROR_UNEXPECTED); // Invalid description data.
+ CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return;
}
@@ -237,9 +237,9 @@ template <typename EncoderType>
void EncoderTemplate<EncoderType>::Close(ErrorResult& aRv) {
AssertIsOnOwningThread();
- LOG("%s::Close %p", EncoderType::Name.get(), this);
+ LOG("%s %p, Close", EncoderType::Name.get(), this);
- if (auto r = CloseInternal(NS_ERROR_DOM_ABORT_ERR); r.isErr()) {
+ if (auto r = CloseInternalWithAbort(); r.isErr()) {
aRv.Throw(r.unwrapErr());
}
}
@@ -273,23 +273,33 @@ Result<Ok, nsresult> EncoderTemplate<EncoderType>::ResetInternal(
}
template <typename EncoderType>
-Result<Ok, nsresult> EncoderTemplate<EncoderType>::CloseInternal(
- const nsresult& aResult) {
+Result<Ok, nsresult> EncoderTemplate<EncoderType>::CloseInternalWithAbort() {
AssertIsOnOwningThread();
- MOZ_TRY(ResetInternal(aResult));
+ MOZ_TRY(ResetInternal(NS_ERROR_DOM_ABORT_ERR));
mState = CodecState::Closed;
- if (aResult != NS_ERROR_DOM_ABORT_ERR) {
- nsCString error;
- GetErrorName(aResult, error);
- LOGE("%s %p Close on error: %s", EncoderType::Name.get(), this,
- error.get());
- ReportError(aResult);
- }
return Ok();
}
template <typename EncoderType>
+void EncoderTemplate<EncoderType>::CloseInternal(const nsresult& aResult) {
+ AssertIsOnOwningThread();
+ MOZ_ASSERT(aResult != NS_ERROR_DOM_ABORT_ERR, "Use CloseInternalWithAbort");
+
+ auto r = ResetInternal(aResult);
+ if (r.isErr()) {
+ nsCString name;
+ GetErrorName(r.unwrapErr(), name);
+ LOGE("Error during ResetInternal during CloseInternal: %s", name.get());
+ }
+ mState = CodecState::Closed;
+ nsCString error;
+ GetErrorName(aResult, error);
+ LOGE("%s %p Close on error: %s", EncoderType::Name.get(), this, error.get());
+ ReportError(aResult);
+}
+
+template <typename EncoderType>
void EncoderTemplate<EncoderType>::ReportError(const nsresult& aResult) {
AssertIsOnOwningThread();
@@ -299,8 +309,28 @@ void EncoderTemplate<EncoderType>::ReportError(const nsresult& aResult) {
}
template <typename EncoderType>
-void EncoderTemplate<EncoderType>::OutputEncodedData(
- nsTArray<RefPtr<MediaRawData>>&& aData) {
+template <typename T, typename U>
+void EncoderTemplate<EncoderType>::CopyExtradataToDescriptionIfNeeded(
+ nsIGlobalObject* aGlobal, const T& aConfigInternal, U& aConfig) {
+ if (aConfigInternal.mDescription &&
+ !aConfigInternal.mDescription->IsEmpty()) {
+ auto& abov = aConfig.mDescription.Construct();
+ AutoEntryScript aes(aGlobal, "EncoderConfigToaConfigConfig");
+ size_t lengthBytes = aConfigInternal.mDescription->Length();
+ UniquePtr<uint8_t[], JS::FreePolicy> extradata(new uint8_t[lengthBytes]);
+ PodCopy(extradata.get(), aConfigInternal.mDescription->Elements(),
+ lengthBytes);
+ JS::Rooted<JSObject*> description(
+ aes.cx(), JS::NewArrayBufferWithContents(aes.cx(), lengthBytes,
+ std::move(extradata)));
+ JS::Rooted<JS::Value> value(aes.cx(), JS::ObjectValue(*description));
+ DebugOnly<bool> rv = abov.Init(aes.cx(), value);
+ }
+}
+
+template <>
+void EncoderTemplate<VideoEncoderTraits>::OutputEncodedVideoData(
+ const nsTArray<RefPtr<MediaRawData>>&& aData) {
AssertIsOnOwningThread();
MOZ_ASSERT(mState == CodecState::Configured);
MOZ_ASSERT(mActiveConfig);
@@ -313,7 +343,7 @@ void EncoderTemplate<EncoderType>::OutputEncodedData(
jsapi.Init(GetParentObject()); // TODO: check returned value?
JSContext* cx = jsapi.cx();
- RefPtr<typename EncoderType::OutputCallbackType> cb(mOutputCallback);
+ RefPtr<EncodedVideoChunkOutputCallback> cb(mOutputCallback);
for (auto& data : aData) {
// It's possible to have reset() called in between this task having been
// dispatched, and running -- no output callback should happen when that's
@@ -323,10 +353,10 @@ void EncoderTemplate<EncoderType>::OutputEncodedData(
if (!mActiveConfig) {
return;
}
- RefPtr<typename EncoderType::OutputType> encodedData =
+ RefPtr<EncodedVideoChunk> encodedData =
EncodedDataToOutputType(GetParentObject(), data);
- RootedDictionary<typename EncoderType::MetadataType> metadata(cx);
+ RootedDictionary<EncodedVideoChunkMetadata> metadata(cx);
if (mOutputNewDecoderConfig) {
VideoDecoderConfigInternal decoderConfigInternal =
EncoderConfigToDecoderConfig(GetParentObject(), data, *mActiveConfig);
@@ -354,23 +384,10 @@ void EncoderTemplate<EncoderType>::OutputEncodedData(
MaybeToNullable(decoderConfigInternal.mColorSpace->mTransfer);
decoderConfig.mColorSpace.Construct(std::move(colorSpace));
}
- if (decoderConfigInternal.mDescription &&
- !decoderConfigInternal.mDescription.value()->IsEmpty()) {
- auto& abov = decoderConfig.mDescription.Construct();
- AutoEntryScript aes(GetParentObject(), "EncoderConfigToDecoderConfig");
- size_t lengthBytes =
- decoderConfigInternal.mDescription.value()->Length();
- UniquePtr<uint8_t[], JS::FreePolicy> extradata(
- new uint8_t[lengthBytes]);
- PodCopy(extradata.get(),
- decoderConfigInternal.mDescription.value()->Elements(),
- lengthBytes);
- JS::Rooted<JSObject*> description(
- aes.cx(), JS::NewArrayBufferWithContents(aes.cx(), lengthBytes,
- std::move(extradata)));
- JS::Rooted<JS::Value> value(aes.cx(), JS::ObjectValue(*description));
- DebugOnly<bool> rv = abov.Init(aes.cx(), value);
- }
+
+ CopyExtradataToDescriptionIfNeeded(GetParentObject(),
+ decoderConfigInternal, decoderConfig);
+
if (decoderConfigInternal.mDisplayAspectHeight) {
decoderConfig.mDisplayAspectHeight.Construct(
decoderConfigInternal.mDisplayAspectHeight.value());
@@ -387,7 +404,7 @@ void EncoderTemplate<EncoderType>::OutputEncodedData(
metadata.mDecoderConfig.Construct(std::move(decoderConfig));
mOutputNewDecoderConfig = false;
LOGE("New config passed to output callback: %s",
- NS_ConvertUTF16toUTF8(decoderConfigInternal.ToString()).get());
+ decoderConfigInternal.ToString().get());
}
nsAutoCString metadataInfo;
@@ -407,125 +424,74 @@ void EncoderTemplate<EncoderType>::OutputEncodedData(
LOG("EncoderTemplate:: output callback (ts: % " PRId64 ")%s",
encodedData->Timestamp(), metadataInfo.get());
- cb->Call((typename EncoderType::OutputType&)(*encodedData), metadata);
+ cb->Call((EncodedVideoChunk&)(*encodedData), metadata);
}
}
-template <typename EncoderType>
-class EncoderTemplate<EncoderType>::ErrorRunnable final
- : public DiscardableRunnable {
- public:
- ErrorRunnable(Self* aEncoder, const nsresult& aError)
- : DiscardableRunnable("Decoder ErrorRunnable"),
- mEncoder(aEncoder),
- mError(aError) {
- MOZ_ASSERT(mEncoder);
- }
- ~ErrorRunnable() = default;
-
- // MOZ_CAN_RUN_SCRIPT_BOUNDARY until Runnable::Run is MOZ_CAN_RUN_SCRIPT.
- // See bug 1535398.
- MOZ_CAN_RUN_SCRIPT_BOUNDARY NS_IMETHOD Run() override {
- nsCString error;
- GetErrorName(mError, error);
- LOGE("%s %p report error: %s", EncoderType::Name.get(), mEncoder.get(),
- error.get());
- RefPtr<Self> d = std::move(mEncoder);
- d->ReportError(mError);
- return NS_OK;
- }
-
- private:
- RefPtr<Self> mEncoder;
- const nsresult mError;
-};
+template <>
+void EncoderTemplate<AudioEncoderTraits>::OutputEncodedAudioData(
+ const nsTArray<RefPtr<MediaRawData>>&& aData) {
+ AssertIsOnOwningThread();
+ MOZ_ASSERT(mState == CodecState::Configured);
+ MOZ_ASSERT(mActiveConfig);
-template <typename EncoderType>
-class EncoderTemplate<EncoderType>::OutputRunnable final
- : public DiscardableRunnable {
- public:
- OutputRunnable(Self* aEncoder, WebCodecsId aConfigureId,
- const nsACString& aLabel,
- nsTArray<RefPtr<MediaRawData>>&& aData)
- : DiscardableRunnable("Decoder OutputRunnable"),
- mEncoder(aEncoder),
- mConfigureId(aConfigureId),
- mLabel(aLabel),
- mData(std::move(aData)) {
- MOZ_ASSERT(mEncoder);
- }
- ~OutputRunnable() = default;
-
- // MOZ_CAN_RUN_SCRIPT_BOUNDARY until Runnable::Run is MOZ_CAN_RUN_SCRIPT.
- // See bug 1535398.
- MOZ_CAN_RUN_SCRIPT_BOUNDARY NS_IMETHOD Run() override {
- if (mEncoder->mState != CodecState::Configured) {
- LOGV("%s %p has been %s. Discard %s-result for EncoderAgent #%zu",
- EncoderType::Name.get(), mEncoder.get(),
- mEncoder->mState == CodecState::Closed ? "closed" : "reset",
- mLabel.get(), mConfigureId);
- return NS_OK;
- }
+ // Get JSContext for RootedDictionary.
+ // The EncoderType::MetadataType, AudioDecoderConfig
+ // below are rooted to work around the JS hazard issues.
+ AutoJSAPI jsapi;
+ DebugOnly<bool> ok =
+ jsapi.Init(GetParentObject()); // TODO: check returned value?
+ JSContext* cx = jsapi.cx();
- MOZ_ASSERT(mEncoder->mAgent);
- if (mConfigureId != mEncoder->mAgent->mId) {
- LOGW(
- "%s %p has been re-configured. Still yield %s-result for "
- "EncoderAgent #%zu",
- EncoderType::Name.get(), mEncoder.get(), mLabel.get(), mConfigureId);
+ RefPtr<EncodedAudioChunkOutputCallback> cb(mOutputCallback);
+ for (auto& data : aData) {
+ // It's possible to have reset() called in between this task having been
+ // dispatched, and running -- no output callback should happen when that's
+ // the case.
+ // This is imprecise in the spec, but discussed in
+ // https://github.com/w3c/webcodecs/issues/755 and agreed upon.
+ if (!mActiveConfig) {
+ return;
}
+ RefPtr<EncodedAudioChunk> encodedData =
+ EncodedDataToOutputType(GetParentObject(), data);
- LOGV("%s %p, yields %s-result for EncoderAgent #%zu",
- EncoderType::Name.get(), mEncoder.get(), mLabel.get(), mConfigureId);
- RefPtr<Self> d = std::move(mEncoder);
- d->OutputEncodedData(std::move(mData));
-
- return NS_OK;
- }
+ RootedDictionary<EncodedAudioChunkMetadata> metadata(cx);
+ if (mOutputNewDecoderConfig) {
+ AudioDecoderConfigInternal decoderConfigInternal =
+ this->EncoderConfigToDecoderConfig(GetParentObject(), data,
+ *mActiveConfig);
- private:
- RefPtr<Self> mEncoder;
- const WebCodecsId mConfigureId;
- const nsCString mLabel;
- nsTArray<RefPtr<MediaRawData>> mData;
-};
+ // Convert VideoDecoderConfigInternal to VideoDecoderConfig
+ RootedDictionary<AudioDecoderConfig> decoderConfig(cx);
+ decoderConfig.mCodec = decoderConfigInternal.mCodec;
+ decoderConfig.mNumberOfChannels = decoderConfigInternal.mNumberOfChannels;
+ decoderConfig.mSampleRate = decoderConfigInternal.mSampleRate;
-template <typename EncoderType>
-void EncoderTemplate<EncoderType>::ScheduleOutputEncodedData(
- nsTArray<RefPtr<MediaRawData>>&& aData, const nsACString& aLabel) {
- MOZ_ASSERT(mState == CodecState::Configured);
- MOZ_ASSERT(mAgent);
+ CopyExtradataToDescriptionIfNeeded(GetParentObject(),
+ decoderConfigInternal, decoderConfig);
- MOZ_ALWAYS_SUCCEEDS(NS_DispatchToCurrentThread(MakeAndAddRef<OutputRunnable>(
- this, mAgent->mId, aLabel, std::move(aData))));
-}
+ metadata.mDecoderConfig.Construct(std::move(decoderConfig));
+ mOutputNewDecoderConfig = false;
+ LOGE("New config passed to output callback: %s",
+ decoderConfigInternal.ToString().get());
+ }
-template <typename EncoderType>
-void EncoderTemplate<EncoderType>::ScheduleClose(const nsresult& aResult) {
- AssertIsOnOwningThread();
- MOZ_ASSERT(mState == CodecState::Configured);
+ nsAutoCString metadataInfo;
- auto task = [self = RefPtr{this}, result = aResult] {
- if (self->mState == CodecState::Closed) {
- nsCString error;
- GetErrorName(result, error);
- LOGW("%s %p has been closed. Ignore close with %s",
- EncoderType::Name.get(), self.get(), error.get());
- return;
+ if (metadata.mDecoderConfig.WasPassed()) {
+ metadataInfo.Append(", new decoder config");
}
- DebugOnly<Result<Ok, nsresult>> r = self->CloseInternal(result);
- MOZ_ASSERT(r.value.isOk());
- };
- nsISerialEventTarget* target = GetCurrentSerialEventTarget();
- if (NS_IsMainThread()) {
- MOZ_ALWAYS_SUCCEEDS(target->Dispatch(
- NS_NewRunnableFunction("ScheduleClose Runnable (main)", task)));
- return;
+ LOG("EncoderTemplate:: output callback (ts: % " PRId64
+ ", duration: % " PRId64 ", %zu bytes, %" PRIu64 " so far)",
+ encodedData->Timestamp(),
+ !encodedData->GetDuration().IsNull()
+ ? encodedData->GetDuration().Value()
+ : 0,
+ data->Size(), mPacketsOutput++);
+ cb->Call((EncodedAudioChunk&)(*encodedData), metadata);
}
-
- MOZ_ALWAYS_SUCCEEDS(target->Dispatch(NS_NewCancelableRunnableFunction(
- "ScheduleClose Runnable (worker)", task)));
}
template <typename EncoderType>
@@ -537,20 +503,10 @@ void EncoderTemplate<EncoderType>::ScheduleDequeueEvent() {
}
mDequeueEventScheduled = true;
- auto dispatcher = [self = RefPtr{this}] {
+ QueueATask("dequeue event task", [self = RefPtr{this}]() {
self->FireEvent(nsGkAtoms::ondequeue, u"dequeue"_ns);
self->mDequeueEventScheduled = false;
- };
- nsISerialEventTarget* target = GetCurrentSerialEventTarget();
-
- if (NS_IsMainThread()) {
- MOZ_ALWAYS_SUCCEEDS(target->Dispatch(NS_NewRunnableFunction(
- "ScheduleDequeueEvent Runnable (main)", dispatcher)));
- return;
- }
-
- MOZ_ALWAYS_SUCCEEDS(target->Dispatch(NS_NewCancelableRunnableFunction(
- "ScheduleDequeueEvent Runnable (worker)", dispatcher)));
+ });
}
template <typename EncoderType>
@@ -655,6 +611,15 @@ void EncoderTemplate<EncoderType>::CancelPendingControlMessages(
}
template <typename EncoderType>
+template <typename Func>
+void EncoderTemplate<EncoderType>::QueueATask(const char* aName,
+ Func&& aSteps) {
+ AssertIsOnOwningThread();
+ MOZ_ALWAYS_SUCCEEDS(NS_DispatchToCurrentThread(
+ NS_NewRunnableFunction(aName, std::forward<Func>(aSteps))));
+}
+
+template <typename EncoderType>
MessageProcessedResult EncoderTemplate<EncoderType>::ProcessConfigureMessage(
RefPtr<ConfigureMessage> aMessage) {
AssertIsOnOwningThread();
@@ -677,15 +642,13 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessConfigureMessage(
LOGE("%s %p ProcessConfigureMessage error (sync): Not supported",
EncoderType::Name.get(), this);
mProcessingMessage = nullptr;
- NS_DispatchToCurrentThread(NS_NewRunnableFunction(
- "ProcessConfigureMessage (async): not supported",
+ QueueATask(
+ "Error while configuring encoder",
[self = RefPtr(this)]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
LOGE("%s %p ProcessConfigureMessage (async close): Not supported",
EncoderType::Name.get(), self.get());
- DebugOnly<Result<Ok, nsresult>> r =
- self->CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
- MOZ_ASSERT(r.value.isOk());
- }));
+ self->CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
+ });
return MessageProcessedResult::Processed;
}
@@ -711,19 +674,30 @@ void EncoderTemplate<EncoderType>::StopBlockingMessageQueue() {
}
template <typename EncoderType>
+void EncoderTemplate<EncoderType>::OutputEncodedData(
+ const nsTArray<RefPtr<MediaRawData>>&& aData) {
+ if constexpr (std::is_same_v<EncoderType, VideoEncoderTraits>) {
+ OutputEncodedVideoData(std::move(aData));
+ } else {
+ OutputEncodedAudioData(std::move(aData));
+ }
+}
+
+template <typename EncoderType>
void EncoderTemplate<EncoderType>::Reconfigure(
RefPtr<ConfigureMessage> aMessage) {
MOZ_ASSERT(mAgent);
- LOG("Reconfiguring encoder: %s",
- NS_ConvertUTF16toUTF8(aMessage->Config()->ToString()).get());
+ LOG("Reconfiguring encoder: %s", aMessage->Config()->ToString().get());
RefPtr<ConfigTypeInternal> config = aMessage->Config();
RefPtr<WebCodecsConfigurationChangeList> configDiff =
config->Diff(*mActiveConfig);
- // Nothing to do, return now
+ // Nothing to do, return now, but per spec the config
+ // must be output next time a packet is output.
if (configDiff->Empty()) {
+ mOutputNewDecoderConfig = true;
LOG("Reconfigure with identical config, returning.");
mProcessingMessage = nullptr;
StopBlockingMessageQueue();
@@ -731,9 +705,8 @@ void EncoderTemplate<EncoderType>::Reconfigure(
}
LOG("Attempting to reconfigure encoder: old: %s new: %s, diff: %s",
- NS_ConvertUTF16toUTF8(mActiveConfig->ToString()).get(),
- NS_ConvertUTF16toUTF8(config->ToString()).get(),
- NS_ConvertUTF16toUTF8(configDiff->ToString()).get());
+ mActiveConfig->ToString().get(), config->ToString().get(),
+ configDiff->ToString().get());
RefPtr<EncoderConfigurationChangeList> changeList =
configDiff->ToPEMChangeList();
@@ -766,16 +739,20 @@ void EncoderTemplate<EncoderType>::Reconfigure(
message](EncoderAgent::EncodePromise::ResolveOrRejectValue&&
aResult) {
if (aResult.IsReject()) {
+ // The spec asks to close the encoder with an
+ // NotSupportedError so we log the exact error here.
const MediaResult& error = aResult.RejectValue();
- LOGE(
- "%s %p, EncoderAgent #%zu failed to flush during "
- "reconfigure, closing: %s",
- EncoderType::Name.get(), self.get(), id,
- error.Description().get());
-
- self->mProcessingMessage = nullptr;
- self->ScheduleClose(
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ LOGE("%s %p, EncoderAgent #%zu failed to configure: %s",
+ EncoderType::Name.get(), self.get(), id,
+ error.Description().get());
+
+ self->QueueATask(
+ "Error during drain during reconfigure",
+ [self = RefPtr{self}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
return;
}
@@ -797,12 +774,15 @@ void EncoderTemplate<EncoderType>::Reconfigure(
LOG("%s %p Outputing %zu frames during flush "
" for reconfiguration with encoder destruction",
EncoderType::Name.get(), self.get(), data.Length());
- self->ScheduleOutputEncodedData(
- std::move(data),
- nsLiteralCString("Flush before reconfigure"));
+ self->QueueATask(
+ "Output encoded Data",
+ [self = RefPtr{self}, data = std::move(data)]()
+ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ self->OutputEncodedData(std::move(data));
+ });
}
- NS_DispatchToCurrentThread(NS_NewRunnableFunction(
+ self->QueueATask(
"Destroy + recreate encoder after failed reconfigure",
[self = RefPtr(self), message]()
MOZ_CAN_RUN_SCRIPT_BOUNDARY {
@@ -810,7 +790,7 @@ void EncoderTemplate<EncoderType>::Reconfigure(
// encoder with the new configuration.
self->DestroyEncoderAgentIfAny();
self->Configure(message);
- }));
+ });
});
return;
}
@@ -833,32 +813,30 @@ void EncoderTemplate<EncoderType>::Configure(
RefPtr<ConfigureMessage> aMessage) {
MOZ_ASSERT(!mAgent);
- LOG("Configuring encoder: %s",
- NS_ConvertUTF16toUTF8(aMessage->Config()->ToString()).get());
+ LOG("Configuring encoder: %s", aMessage->Config()->ToString().get());
mOutputNewDecoderConfig = true;
mActiveConfig = aMessage->Config();
- bool decoderAgentCreated =
+ bool encoderAgentCreated =
CreateEncoderAgent(aMessage->mMessageId, aMessage->Config());
- if (!decoderAgentCreated) {
+ if (!encoderAgentCreated) {
LOGE(
"%s %p ProcessConfigureMessage error (sync): encoder agent "
"creation "
"failed",
EncoderType::Name.get(), this);
mProcessingMessage = nullptr;
- NS_DispatchToCurrentThread(NS_NewRunnableFunction(
- "ProcessConfigureMessage (async): encoder agent creating failed",
+ QueueATask(
+ "Error when configuring encoder (encoder agent creation failed)",
[self = RefPtr(this)]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ MOZ_ASSERT(self->mState != CodecState::Closed);
LOGE(
"%s %p ProcessConfigureMessage (async close): encoder agent "
"creation failed",
EncoderType::Name.get(), self.get());
- DebugOnly<Result<Ok, nsresult>> r =
- self->CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
- MOZ_ASSERT(r.value.isOk());
- }));
+ self->CloseInternal(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
+ });
return;
}
@@ -866,7 +844,7 @@ void EncoderTemplate<EncoderType>::Configure(
MOZ_ASSERT(mActiveConfig);
LOG("Real configuration with fresh config: %s",
- NS_ConvertUTF16toUTF8(mActiveConfig->ToString().get()).get());
+ mActiveConfig->ToString().get());
EncoderConfig config = mActiveConfig->ToEncoderConfig();
mAgent->Configure(config)
@@ -897,10 +875,15 @@ void EncoderTemplate<EncoderType>::Configure(
LOGE("%s %p, EncoderAgent #%zu failed to configure: %s",
EncoderType::Name.get(), self.get(), id,
error.Description().get());
- DebugOnly<Result<Ok, nsresult>> r = self->CloseInternal(
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
- MOZ_ASSERT(r.value.isOk());
- return; // No further process
+
+ self->QueueATask(
+ "Error during configure",
+ [self = RefPtr{self}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
+ return;
}
self->StopBlockingMessageQueue();
@@ -933,7 +916,11 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessEncodeMessage(
// data is invalid.
auto closeOnError = [&]() {
mProcessingMessage = nullptr;
- ScheduleClose(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ QueueATask("Error during encode",
+ [self = RefPtr{this}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
return MessageProcessedResult::Processed;
};
@@ -973,8 +960,14 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessEncodeMessage(
LOGE("%s %p, EncoderAgent #%zu %s failed: %s",
EncoderType::Name.get(), self.get(), id, msgStr.get(),
error.Description().get());
- self->ScheduleClose(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
- return; // No further process
+ self->QueueATask(
+ "Error during encode runnable",
+ [self = RefPtr{self}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
+ return;
}
MOZ_ASSERT(aResult.IsResolve());
@@ -984,11 +977,16 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessEncodeMessage(
LOGV("%s %p got no data for %s", EncoderType::Name.get(),
self.get(), msgStr.get());
} else {
- LOGV("%s %p, schedule %zu encoded data output",
- EncoderType::Name.get(), self.get(), data.Length());
- self->ScheduleOutputEncodedData(std::move(data), msgStr);
+ LOGV("%s %p, schedule %zu encoded data output for %s",
+ EncoderType::Name.get(), self.get(), data.Length(),
+ msgStr.get());
+ self->QueueATask(
+ "Output encoded Data",
+ [self = RefPtr{self}, data2 = std::move(data)]()
+ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ self->OutputEncodedData(std::move(data2));
+ });
}
-
self->ProcessControlMessageQueue();
})
->Track(aMessage->Request());
@@ -1022,69 +1020,78 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessFlushMessage(
}
mAgent->Drain()
- ->Then(GetCurrentSerialEventTarget(), __func__,
- [self = RefPtr{this}, id = mAgent->mId, aMessage](
- EncoderAgent::EncodePromise::ResolveOrRejectValue&& aResult) {
- MOZ_ASSERT(self->mProcessingMessage);
- MOZ_ASSERT(self->mProcessingMessage->AsFlushMessage());
- MOZ_ASSERT(self->mState == CodecState::Configured);
- MOZ_ASSERT(self->mAgent);
- MOZ_ASSERT(id == self->mAgent->mId);
- MOZ_ASSERT(self->mActiveConfig);
-
- LOG("%s %p, EncoderAgent #%zu %s has been %s",
- EncoderType::Name.get(), self.get(), id,
- aMessage->ToString().get(),
- aResult.IsResolve() ? "resolved" : "rejected");
-
- nsCString msgStr = aMessage->ToString();
-
- aMessage->Complete();
-
- // If flush failed, it means encoder fails to encode the data
- // sent before, so we treat it like an encode error. We reject
- // the promise first and then queue a task to close VideoEncoder
- // with an EncodingError.
- if (aResult.IsReject()) {
- const MediaResult& error = aResult.RejectValue();
- LOGE("%s %p, EncoderAgent #%zu failed to flush: %s",
- EncoderType::Name.get(), self.get(), id,
- error.Description().get());
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, id = mAgent->mId, aMessage,
+ this](EncoderAgent::EncodePromise::ResolveOrRejectValue&& aResult) {
+ MOZ_ASSERT(self->mProcessingMessage);
+ MOZ_ASSERT(self->mProcessingMessage->AsFlushMessage());
+ MOZ_ASSERT(self->mState == CodecState::Configured);
+ MOZ_ASSERT(self->mAgent);
+ MOZ_ASSERT(id == self->mAgent->mId);
+ MOZ_ASSERT(self->mActiveConfig);
- // Reject with an EncodingError instead of the error we got
- // above.
- self->SchedulePromiseResolveOrReject(
- aMessage->TakePromise(),
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ LOG("%s %p, EncoderAgent #%zu %s has been %s",
+ EncoderType::Name.get(), self.get(), id,
+ aMessage->ToString().get(),
+ aResult.IsResolve() ? "resolved" : "rejected");
- self->mProcessingMessage = nullptr;
+ nsCString msgStr = aMessage->ToString();
- self->ScheduleClose(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
- return; // No further process
- }
+ aMessage->Complete();
- // If flush succeeded, schedule to output encoded data first
- // and then resolve the promise, then keep processing the
- // control messages.
- MOZ_ASSERT(aResult.IsResolve());
- nsTArray<RefPtr<MediaRawData>> data =
- std::move(aResult.ResolveValue());
+ // If flush failed, it means encoder fails to encode the data
+ // sent before, so we treat it like an encode error. We reject
+ // the promise first and then queue a task to close VideoEncoder
+ // with an EncodingError.
+ if (aResult.IsReject()) {
+ const MediaResult& error = aResult.RejectValue();
+ LOGE("%s %p, EncoderAgent #%zu failed to flush: %s",
+ EncoderType::Name.get(), self.get(), id,
+ error.Description().get());
+ RefPtr<Promise> promise = aMessage->TakePromise();
+ // Reject with an EncodingError instead of the error we got
+ // above.
+ self->QueueATask(
+ "Error during flush runnable",
+ [self = RefPtr{this}, promise]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ promise->MaybeReject(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ self->mProcessingMessage = nullptr;
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
+ return;
+ }
- if (data.IsEmpty()) {
- LOG("%s %p gets no data for %s", EncoderType::Name.get(),
- self.get(), msgStr.get());
- } else {
- LOG("%s %p, schedule %zu encoded data output for %s",
- EncoderType::Name.get(), self.get(), data.Length(),
- msgStr.get());
- self->ScheduleOutputEncodedData(std::move(data), msgStr);
- }
+ // If flush succeeded, schedule to output encoded data first
+ // and then resolve the promise, then keep processing the
+ // control messages.
+ MOZ_ASSERT(aResult.IsResolve());
+ nsTArray<RefPtr<MediaRawData>> data =
+ std::move(aResult.ResolveValue());
+
+ if (data.IsEmpty()) {
+ LOG("%s %p gets no data for %s", EncoderType::Name.get(),
+ self.get(), msgStr.get());
+ } else {
+ LOG("%s %p, schedule %zu encoded data output for %s",
+ EncoderType::Name.get(), self.get(), data.Length(),
+ msgStr.get());
+ }
- self->SchedulePromiseResolveOrReject(aMessage->TakePromise(),
- NS_OK);
- self->mProcessingMessage = nullptr;
- self->ProcessControlMessageQueue();
- })
+ RefPtr<Promise> promise = aMessage->TakePromise();
+ self->QueueATask(
+ "Flush: output encoded data task",
+ [self = RefPtr{self}, promise, data = std::move(data)]()
+ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ self->OutputEncodedData(std::move(data));
+ promise->MaybeResolveWithUndefined();
+ });
+ self->mProcessingMessage = nullptr;
+ self->ProcessControlMessageQueue();
+ })
->Track(aMessage->Request());
return MessageProcessedResult::Processed;
@@ -1218,6 +1225,7 @@ void EncoderTemplate<EncoderType>::DestroyEncoderAgentIfAny() {
}
template class EncoderTemplate<VideoEncoderTraits>;
+template class EncoderTemplate<AudioEncoderTraits>;
#undef LOG
#undef LOGW
diff --git a/dom/media/webcodecs/EncoderTemplate.h b/dom/media/webcodecs/EncoderTemplate.h
index e53d7166d1..bc65edca46 100644
--- a/dom/media/webcodecs/EncoderTemplate.h
+++ b/dom/media/webcodecs/EncoderTemplate.h
@@ -18,6 +18,7 @@
#include "mozilla/Result.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/dom/VideoEncoderBinding.h"
+#include "mozilla/dom/AudioEncoderBinding.h"
#include "mozilla/dom/WorkerRef.h"
#include "mozilla/media/MediaUtils.h"
#include "nsStringFwd.h"
@@ -81,10 +82,8 @@ class EncoderTemplate : public DOMEventTargetHelper {
RefPtr<ConfigTypeInternal> Config() { return mConfig; }
nsCString ToString() const override {
nsCString rv;
- rv.AppendPrintf(
- "ConfigureMessage(#%zu): %s", this->mMessageId,
- mConfig ? NS_ConvertUTF16toUTF8(mConfig->ToString().get()).get()
- : "null cfg");
+ rv.AppendPrintf("ConfigureMessage(#%zu): %s", this->mMessageId,
+ mConfig ? mConfig->ToString().get() : "null cfg");
return rv;
}
@@ -149,10 +148,14 @@ class EncoderTemplate : public DOMEventTargetHelper {
void StartBlockingMessageQueue();
void StopBlockingMessageQueue();
+ MOZ_CAN_RUN_SCRIPT
+ void OutputEncodedData(const nsTArray<RefPtr<MediaRawData>>&& aData);
+
CodecState State() const { return mState; };
uint32_t EncodeQueueSize() const { return mEncodeQueueSize; };
+ MOZ_CAN_RUN_SCRIPT
void Configure(const ConfigType& aConfig, ErrorResult& aRv);
void EncodeAudioData(InputType& aInput, ErrorResult& aRv);
@@ -170,10 +173,13 @@ class EncoderTemplate : public DOMEventTargetHelper {
/* Type conversion functions for the Encoder implementation */
protected:
virtual RefPtr<OutputType> EncodedDataToOutputType(
- nsIGlobalObject* aGlobalObject, RefPtr<MediaRawData>& aData) = 0;
+ nsIGlobalObject* aGlobalObject, const RefPtr<MediaRawData>& aData) = 0;
virtual OutputConfigType EncoderConfigToDecoderConfig(
nsIGlobalObject* aGlobalObject, const RefPtr<MediaRawData>& aData,
const ConfigTypeInternal& aOutputConfig) const = 0;
+ template <typename T, typename U>
+ void CopyExtradataToDescriptionIfNeeded(nsIGlobalObject* aGlobal,
+ const T& aConfigInternal, U& aConfig);
/* Internal member variables and functions */
protected:
// EncoderTemplate can run on either main thread or worker thread.
@@ -182,21 +188,17 @@ class EncoderTemplate : public DOMEventTargetHelper {
}
Result<Ok, nsresult> ResetInternal(const nsresult& aResult);
- MOZ_CAN_RUN_SCRIPT_BOUNDARY
- Result<Ok, nsresult> CloseInternal(const nsresult& aResult);
+ MOZ_CAN_RUN_SCRIPT
+ Result<Ok, nsresult> CloseInternalWithAbort();
+ MOZ_CAN_RUN_SCRIPT
+ void CloseInternal(const nsresult& aResult);
MOZ_CAN_RUN_SCRIPT void ReportError(const nsresult& aResult);
- MOZ_CAN_RUN_SCRIPT void OutputEncodedData(
- nsTArray<RefPtr<MediaRawData>>&& aData);
-
- class ErrorRunnable;
- void ScheduleReportError(const nsresult& aResult);
- class OutputRunnable;
- void ScheduleOutputEncodedData(nsTArray<RefPtr<MediaRawData>>&& aData,
- const nsACString& aLabel);
-
- void ScheduleClose(const nsresult& aResult);
+ MOZ_CAN_RUN_SCRIPT void OutputEncodedVideoData(
+ const nsTArray<RefPtr<MediaRawData>>&& aData);
+ MOZ_CAN_RUN_SCRIPT void OutputEncodedAudioData(
+ const nsTArray<RefPtr<MediaRawData>>&& aData);
void ScheduleDequeueEvent();
nsresult FireEvent(nsAtom* aTypeWithOn, const nsAString& aEventType);
@@ -207,6 +209,9 @@ class EncoderTemplate : public DOMEventTargetHelper {
void ProcessControlMessageQueue();
void CancelPendingControlMessages(const nsresult& aResult);
+ template <typename Func>
+ void QueueATask(const char* aName, Func&& aSteps);
+
MessageProcessedResult ProcessConfigureMessage(
RefPtr<ConfigureMessage> aMessage);
@@ -244,14 +249,14 @@ class EncoderTemplate : public DOMEventTargetHelper {
// used as the FlushMessage's Id.
size_t mFlushCounter;
- // EncoderAgent will be created the first time "configure" is being processed,
- // and will be destroyed when "reset" is called. If another "configure" is
- // called, either it's possible to reconfigure the underlying encoder without
- // tearing eveyrthing down (e.g. a bitrate change), or it's not possible, and
- // the current encoder will be destroyed and a new one create.
- // In both cases, the encoder is implicitely flushed before the configuration
- // change.
- // See CanReconfigure on the {Audio,Video}EncoderConfigInternal
+ // EncoderAgent will be created the first time "configure" is being
+ // processed, and will be destroyed when "reset" is called. If another
+ // "configure" is called, either it's possible to reconfigure the underlying
+ // encoder without tearing everything down (e.g. a bitrate change), or it's
+ // not possible, and the current encoder will be destroyed and a new one
+ // create. In both cases, the encoder is implicitely flushed before the
+ // configuration change. See CanReconfigure on the
+ // {Audio,Video}EncoderConfigInternal
RefPtr<EncoderAgent> mAgent;
RefPtr<ConfigTypeInternal> mActiveConfig;
// This is true when a configure call has just been processed, and it's
@@ -283,6 +288,7 @@ class EncoderTemplate : public DOMEventTargetHelper {
// TODO: Use StrongWorkerRef instead if this is always used in the same
// thread?
RefPtr<ThreadSafeWorkerRef> mWorkerRef;
+ uint64_t mPacketsOutput = 0;
};
} // namespace mozilla::dom
diff --git a/dom/media/webcodecs/EncoderTypes.h b/dom/media/webcodecs/EncoderTypes.h
index d58d7c54c8..39f660203b 100644
--- a/dom/media/webcodecs/EncoderTypes.h
+++ b/dom/media/webcodecs/EncoderTypes.h
@@ -9,13 +9,14 @@
#include "mozilla/Maybe.h"
#include "mozilla/dom/EncodedVideoChunk.h"
+#include "mozilla/dom/MediaRecorderBinding.h"
#include "mozilla/dom/VideoEncoderBinding.h"
+#include "mozilla/dom/AudioEncoderBinding.h"
#include "mozilla/dom/VideoFrame.h"
#include "mozilla/dom/VideoFrameBinding.h"
#include "nsStringFwd.h"
#include "nsTLiteralString.h"
#include "VideoDecoder.h"
-#include "PlatformEncoderModule.h"
namespace mozilla {
@@ -24,6 +25,68 @@ class MediaByteBuffer;
namespace dom {
+class AudioEncoderConfigInternal {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioEncoderConfigInternal);
+ explicit AudioEncoderConfigInternal(const AudioEncoderConfig& aConfig);
+ explicit AudioEncoderConfigInternal(
+ const AudioEncoderConfigInternal& aConfig);
+
+ void SetSpecific(const EncoderConfig::CodecSpecific& aSpecific);
+
+ nsCString ToString() const;
+
+ bool Equals(const AudioEncoderConfigInternal& aOther) const;
+ bool CanReconfigure(const AudioEncoderConfigInternal& aOther) const;
+
+ // Returns an EncoderConfig struct with as many filled members as
+ // possible.
+ EncoderConfig ToEncoderConfig() const;
+
+ already_AddRefed<WebCodecsConfigurationChangeList> Diff(
+ const AudioEncoderConfigInternal& aOther) const;
+
+ nsString mCodec;
+ Maybe<uint32_t> mSampleRate;
+ Maybe<uint32_t> mNumberOfChannels;
+ Maybe<uint32_t> mBitrate;
+ BitrateMode mBitrateMode;
+ Maybe<EncoderConfig::CodecSpecific> mSpecific;
+
+ private:
+ AudioEncoderConfigInternal(const nsAString& aCodec,
+ Maybe<uint32_t> aSampleRate,
+ Maybe<uint32_t> aNumberOfChannels,
+ Maybe<uint32_t> aBitRate,
+ BitrateMode aBitratemode);
+ ~AudioEncoderConfigInternal() = default;
+};
+
+class AudioEncoderTraits {
+ public:
+ static constexpr nsLiteralCString Name = "AudioEncoder"_ns;
+ using ConfigType = AudioEncoderConfig;
+ using ConfigTypeInternal = AudioEncoderConfigInternal;
+ using InputType = dom::AudioData;
+ using OutputConfigType = mozilla::dom::AudioDecoderConfigInternal;
+ using InputTypeInternal = mozilla::AudioData;
+ using OutputType = EncodedAudioChunk;
+ using OutputCallbackType = EncodedAudioChunkOutputCallback;
+ using MetadataType = EncodedAudioChunkMetadata;
+
+ static bool IsSupported(const ConfigTypeInternal& aConfig);
+ static Result<UniquePtr<TrackInfo>, nsresult> CreateTrackInfo(
+ const ConfigTypeInternal& aConfig);
+ static bool Validate(const ConfigType& aConfig, nsCString& aErrorMessage);
+ static RefPtr<ConfigTypeInternal> CreateConfigInternal(
+ const ConfigType& aConfig);
+ static RefPtr<InputTypeInternal> CreateInputInternal(
+ const InputType& aInput, const VideoEncoderEncodeOptions& aOptions);
+ static already_AddRefed<OutputConfigType> EncoderConfigToDecoderConfig(
+ nsIGlobalObject* aGlobal, const RefPtr<MediaRawData>& aData,
+ const ConfigTypeInternal& mOutputConfig);
+};
+
class VideoEncoderConfigInternal {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoEncoderConfigInternal);
@@ -40,7 +103,7 @@ class VideoEncoderConfigInternal {
bool CanReconfigure(const VideoEncoderConfigInternal& aOther) const;
already_AddRefed<WebCodecsConfigurationChangeList> Diff(
const VideoEncoderConfigInternal& aOther) const;
- nsString ToString() const;
+ nsCString ToString() const;
nsString mCodec;
uint32_t mWidth;
@@ -84,17 +147,14 @@ class VideoEncoderTraits {
static bool IsSupported(const ConfigTypeInternal& aConfig);
static bool CanEncodeVideo(const ConfigTypeInternal& aConfig);
- static Result<UniquePtr<TrackInfo>, nsresult> CreateTrackInfo(
- const ConfigTypeInternal& aConfig);
static bool Validate(const ConfigType& aConfig, nsCString& aErrorMessage);
static RefPtr<ConfigTypeInternal> CreateConfigInternal(
const ConfigType& aConfig);
static RefPtr<InputTypeInternal> CreateInputInternal(
const InputType& aInput, const VideoEncoderEncodeOptions& aOptions);
static already_AddRefed<OutputConfigType> EncoderConfigToDecoderConfig(
- nsIGlobalObject* aGlobal,
- const RefPtr<MediaRawData>& aData,
- const ConfigTypeInternal& mOutputConfig);
+ nsIGlobalObject* aGlobal, const RefPtr<MediaRawData>& aData,
+ const ConfigTypeInternal& mOutputConfig);
};
} // namespace dom
diff --git a/dom/media/webcodecs/VideoDecoder.cpp b/dom/media/webcodecs/VideoDecoder.cpp
index 18855e5cea..dfc1dff093 100644
--- a/dom/media/webcodecs/VideoDecoder.cpp
+++ b/dom/media/webcodecs/VideoDecoder.cpp
@@ -96,7 +96,7 @@ VideoColorSpaceInit VideoColorSpaceInternal::ToColorSpaceInit() const {
VideoDecoderConfigInternal::VideoDecoderConfigInternal(
const nsAString& aCodec, Maybe<uint32_t>&& aCodedHeight,
Maybe<uint32_t>&& aCodedWidth, Maybe<VideoColorSpaceInternal>&& aColorSpace,
- Maybe<RefPtr<MediaByteBuffer>>&& aDescription,
+ already_AddRefed<MediaByteBuffer> aDescription,
Maybe<uint32_t>&& aDisplayAspectHeight,
Maybe<uint32_t>&& aDisplayAspectWidth,
const HardwareAcceleration& aHardwareAcceleration,
@@ -105,7 +105,7 @@ VideoDecoderConfigInternal::VideoDecoderConfigInternal(
mCodedHeight(std::move(aCodedHeight)),
mCodedWidth(std::move(aCodedWidth)),
mColorSpace(std::move(aColorSpace)),
- mDescription(std::move(aDescription)),
+ mDescription(aDescription),
mDisplayAspectHeight(std::move(aDisplayAspectHeight)),
mDisplayAspectWidth(std::move(aDisplayAspectWidth)),
mHardwareAcceleration(aHardwareAcceleration),
@@ -120,7 +120,7 @@ UniquePtr<VideoDecoderConfigInternal> VideoDecoderConfigInternal::Create(
return nullptr;
}
- Maybe<RefPtr<MediaByteBuffer>> description;
+ RefPtr<MediaByteBuffer> description;
if (aConfig.mDescription.WasPassed()) {
auto rv = GetExtraDataFromArrayBuffer(aConfig.mDescription.Value());
if (rv.isErr()) { // Invalid description data.
@@ -130,7 +130,7 @@ UniquePtr<VideoDecoderConfigInternal> VideoDecoderConfigInternal::Create(
static_cast<uint32_t>(rv.unwrapErr()));
return nullptr;
}
- description.emplace(rv.unwrap());
+ description = rv.unwrap();
}
Maybe<VideoColorSpaceInternal> colorSpace;
@@ -141,16 +141,16 @@ UniquePtr<VideoDecoderConfigInternal> VideoDecoderConfigInternal::Create(
return UniquePtr<VideoDecoderConfigInternal>(new VideoDecoderConfigInternal(
aConfig.mCodec, OptionalToMaybe(aConfig.mCodedHeight),
OptionalToMaybe(aConfig.mCodedWidth), std::move(colorSpace),
- std::move(description), OptionalToMaybe(aConfig.mDisplayAspectHeight),
+ description.forget(), OptionalToMaybe(aConfig.mDisplayAspectHeight),
OptionalToMaybe(aConfig.mDisplayAspectWidth),
aConfig.mHardwareAcceleration,
OptionalToMaybe(aConfig.mOptimizeForLatency)));
}
-nsString VideoDecoderConfigInternal::ToString() const {
- nsString rv;
+nsCString VideoDecoderConfigInternal::ToString() const {
+ nsCString rv;
- rv.Append(mCodec);
+ rv.Append(NS_ConvertUTF16toUTF8(mCodec));
if (mCodedWidth.isSome()) {
rv.AppendPrintf("coded: %dx%d", mCodedWidth.value(), mCodedHeight.value());
}
@@ -161,8 +161,8 @@ nsString VideoDecoderConfigInternal::ToString() const {
if (mColorSpace.isSome()) {
rv.AppendPrintf("colorspace %s", "todo");
}
- if (mDescription.isSome() && mDescription.value()) {
- rv.AppendPrintf("extradata: %zu bytes", mDescription.value()->Length());
+ if (mDescription) {
+ rv.AppendPrintf("extradata: %zu bytes", mDescription->Length());
}
rv.AppendPrintf("hw accel: %s", GetEnumString(mHardwareAcceleration).get());
if (mOptimizeForLatency.isSome()) {
@@ -579,8 +579,7 @@ bool VideoDecoderTraits::IsSupported(
/* static */
Result<UniquePtr<TrackInfo>, nsresult> VideoDecoderTraits::CreateTrackInfo(
const VideoDecoderConfigInternal& aConfig) {
- LOG("Create a VideoInfo from %s config",
- NS_ConvertUTF16toUTF8(aConfig.ToString()).get());
+ LOG("Create a VideoInfo from %s config", aConfig.ToString().get());
nsTArray<UniquePtr<TrackInfo>> tracks = GetTracksInfo(aConfig);
if (tracks.Length() != 1 || tracks[0]->GetType() != TrackInfo::kVideoTrack) {
@@ -668,15 +667,14 @@ Result<UniquePtr<TrackInfo>, nsresult> VideoDecoderTraits::CreateTrackInfo(
}
}
- if (aConfig.mDescription.isSome()) {
- RefPtr<MediaByteBuffer> buf;
- buf = aConfig.mDescription.value();
- if (buf) {
- LOG("The given config has %zu bytes of description data", buf->Length());
+ if (aConfig.mDescription) {
+ if (!aConfig.mDescription->IsEmpty()) {
+ LOG("The given config has %zu bytes of description data",
+ aConfig.mDescription->Length());
if (vi->mExtraData) {
LOGW("The default extra data is overwritten");
}
- vi->mExtraData = buf;
+ vi->mExtraData = aConfig.mDescription;
}
// TODO: Make this utility and replace the similar one in MP4Demuxer.cpp.
diff --git a/dom/media/webcodecs/VideoEncoder.cpp b/dom/media/webcodecs/VideoEncoder.cpp
index f593f70c77..5407e917b6 100644
--- a/dom/media/webcodecs/VideoEncoder.cpp
+++ b/dom/media/webcodecs/VideoEncoder.cpp
@@ -120,11 +120,12 @@ VideoEncoderConfigInternal::VideoEncoderConfigInternal(
mContentHint(OptionalToMaybe(aConfig.mContentHint)),
mAvc(OptionalToMaybe(aConfig.mAvc)) {}
-nsString VideoEncoderConfigInternal::ToString() const {
- nsString rv;
+nsCString VideoEncoderConfigInternal::ToString() const {
+ nsCString rv;
- rv.AppendPrintf("Codec: %s, [%" PRIu32 "x%" PRIu32 "],",
- NS_ConvertUTF16toUTF8(mCodec).get(), mWidth, mHeight);
+ rv.AppendLiteral("Codec: ");
+ rv.Append(NS_ConvertUTF16toUTF8(mCodec));
+ rv.AppendPrintf(" [%" PRIu32 "x%" PRIu32 "],", mWidth, mHeight);
if (mDisplayWidth.isSome()) {
rv.AppendPrintf(", display[%" PRIu32 "x%" PRIu32 "]", mDisplayWidth.value(),
mDisplayHeight.value());
@@ -194,20 +195,19 @@ bool VideoEncoderConfigInternal::CanReconfigure(
}
EncoderConfig VideoEncoderConfigInternal::ToEncoderConfig() const {
- MediaDataEncoder::Usage usage;
+ Usage usage;
if (mLatencyMode == LatencyMode::Quality) {
- usage = MediaDataEncoder::Usage::Record;
+ usage = Usage::Record;
} else {
- usage = MediaDataEncoder::Usage::Realtime;
+ usage = Usage::Realtime;
}
- MediaDataEncoder::HardwarePreference hwPref =
- MediaDataEncoder::HardwarePreference::None;
+ HardwarePreference hwPref = HardwarePreference::None;
if (mHardwareAcceleration ==
mozilla::dom::HardwareAcceleration::Prefer_hardware) {
- hwPref = MediaDataEncoder::HardwarePreference::RequireHardware;
+ hwPref = HardwarePreference::RequireHardware;
} else if (mHardwareAcceleration ==
mozilla::dom::HardwareAcceleration::Prefer_software) {
- hwPref = MediaDataEncoder::HardwarePreference::RequireSoftware;
+ hwPref = HardwarePreference::RequireSoftware;
}
CodecType codecType;
auto maybeCodecType = CodecStringToCodecType(mCodec);
@@ -236,19 +236,19 @@ EncoderConfig VideoEncoderConfigInternal::ToEncoderConfig() const {
}
}
uint8_t numTemporalLayers = 1;
- MediaDataEncoder::ScalabilityMode scalabilityMode;
+ ScalabilityMode scalabilityMode;
if (mScalabilityMode) {
if (mScalabilityMode->EqualsLiteral("L1T2")) {
- scalabilityMode = MediaDataEncoder::ScalabilityMode::L1T2;
+ scalabilityMode = ScalabilityMode::L1T2;
numTemporalLayers = 2;
} else if (mScalabilityMode->EqualsLiteral("L1T3")) {
- scalabilityMode = MediaDataEncoder::ScalabilityMode::L1T3;
+ scalabilityMode = ScalabilityMode::L1T3;
numTemporalLayers = 3;
} else {
- scalabilityMode = MediaDataEncoder::ScalabilityMode::None;
+ scalabilityMode = ScalabilityMode::None;
}
} else {
- scalabilityMode = MediaDataEncoder::ScalabilityMode::None;
+ scalabilityMode = ScalabilityMode::None;
}
// Only for vp9, not vp8
if (codecType == CodecType::VP9) {
@@ -278,8 +278,8 @@ EncoderConfig VideoEncoderConfigInternal::ToEncoderConfig() const {
AssertedCast<uint8_t>(mFramerate.refOr(0.f)), 0,
mBitrate.refOr(0),
mBitrateMode == VideoEncoderBitrateMode::Constant
- ? MediaDataEncoder::BitrateMode::Constant
- : MediaDataEncoder::BitrateMode::Variable,
+ ? mozilla::BitrateMode::Constant
+ : mozilla::BitrateMode::Variable,
hwPref, scalabilityMode, specific);
}
already_AddRefed<WebCodecsConfigurationChangeList>
@@ -558,7 +558,7 @@ already_AddRefed<Promise> VideoEncoder::IsConfigSupported(
}
RefPtr<EncodedVideoChunk> VideoEncoder::EncodedDataToOutputType(
- nsIGlobalObject* aGlobalObject, RefPtr<MediaRawData>& aData) {
+ nsIGlobalObject* aGlobalObject, const RefPtr<MediaRawData>& aData) {
AssertIsOnOwningThread();
MOZ_RELEASE_ASSERT(aData->mType == MediaData::Type::RAW_DATA);
@@ -591,8 +591,8 @@ VideoDecoderConfigInternal VideoEncoder::EncoderConfigToDecoderConfig(
Some(mOutputConfig.mWidth), /* aCodedWidth */
Some(init), /* aColorSpace */
aRawData->mExtraData && !aRawData->mExtraData->IsEmpty()
- ? Some(aRawData->mExtraData)
- : Nothing(), /* aDescription*/
+ ? aRawData->mExtraData.forget()
+ : nullptr, /* aDescription*/
Maybe<uint32_t>(mOutputConfig.mDisplayHeight), /* aDisplayAspectHeight*/
Maybe<uint32_t>(mOutputConfig.mDisplayWidth), /* aDisplayAspectWidth */
mOutputConfig.mHardwareAcceleration, /* aHardwareAcceleration */
diff --git a/dom/media/webcodecs/VideoEncoder.h b/dom/media/webcodecs/VideoEncoder.h
index 9251b5023a..f6d1bfffb7 100644
--- a/dom/media/webcodecs/VideoEncoder.h
+++ b/dom/media/webcodecs/VideoEncoder.h
@@ -65,7 +65,7 @@ class VideoEncoder final : public EncoderTemplate<VideoEncoderTraits> {
protected:
virtual RefPtr<EncodedVideoChunk> EncodedDataToOutputType(
- nsIGlobalObject* aGlobal, RefPtr<MediaRawData>& aData) override;
+ nsIGlobalObject* aGlobal, const RefPtr<MediaRawData>& aData) override;
virtual VideoDecoderConfigInternal EncoderConfigToDecoderConfig(
nsIGlobalObject* aGlobal /* TODO: delete */,
diff --git a/dom/media/webcodecs/WebCodecsUtils.cpp b/dom/media/webcodecs/WebCodecsUtils.cpp
index 3507aba440..db4d79220e 100644
--- a/dom/media/webcodecs/WebCodecsUtils.cpp
+++ b/dom/media/webcodecs/WebCodecsUtils.cpp
@@ -7,6 +7,7 @@
#include "WebCodecsUtils.h"
#include "DecoderTypes.h"
+#include "PlatformEncoderModule.h"
#include "VideoUtils.h"
#include "js/experimental/TypedData.h"
#include "mozilla/Assertions.h"
@@ -15,8 +16,6 @@
#include "mozilla/dom/VideoFrameBinding.h"
#include "mozilla/gfx/Types.h"
#include "nsDebug.h"
-#include "PlatformEncoderModule.h"
-#include "PlatformEncoderModule.h"
extern mozilla::LazyLogModule gWebCodecsLog;
@@ -412,8 +411,8 @@ struct ConfigurationChangeToString {
}
};
-nsString WebCodecsConfigurationChangeList::ToString() const {
- nsString rv;
+nsCString WebCodecsConfigurationChangeList::ToString() const {
+ nsCString rv;
for (const WebCodecsEncoderConfigurationItem& change : mChanges) {
nsCString str = change.match(ConfigurationChangeToString());
rv.AppendPrintf("- %s\n", str.get());
@@ -470,24 +469,24 @@ WebCodecsConfigurationChangeList::ToPEMChangeList() const {
} else if (change.is<FramerateChange>()) {
rv->Push(mozilla::FramerateChange(change.as<FramerateChange>().get()));
} else if (change.is<dom::BitrateModeChange>()) {
- MediaDataEncoder::BitrateMode mode;
+ mozilla::BitrateMode mode;
if (change.as<dom::BitrateModeChange>().get() ==
dom::VideoEncoderBitrateMode::Constant) {
- mode = MediaDataEncoder::BitrateMode::Constant;
+ mode = mozilla::BitrateMode::Constant;
} else if (change.as<BitrateModeChange>().get() ==
dom::VideoEncoderBitrateMode::Variable) {
- mode = MediaDataEncoder::BitrateMode::Variable;
+ mode = mozilla::BitrateMode::Variable;
} else {
// Quantizer, not underlying support yet.
- mode = MediaDataEncoder::BitrateMode::Variable;
+ mode = mozilla::BitrateMode::Variable;
}
rv->Push(mozilla::BitrateModeChange(mode));
} else if (change.is<LatencyModeChange>()) {
- MediaDataEncoder::Usage usage;
+ Usage usage;
if (change.as<LatencyModeChange>().get() == dom::LatencyMode::Quality) {
- usage = MediaDataEncoder::Usage::Record;
+ usage = Usage::Record;
} else {
- usage = MediaDataEncoder::Usage::Realtime;
+ usage = Usage::Realtime;
}
rv->Push(UsageChange(usage));
} else if (change.is<ContentHintChange>()) {
@@ -570,7 +569,7 @@ Maybe<CodecType> CodecStringToCodecType(const nsAString& aCodecString) {
return Nothing();
}
-nsString ConfigToString(const VideoDecoderConfig& aConfig) {
+nsCString ConfigToString(const VideoDecoderConfig& aConfig) {
nsString rv;
auto internal = VideoDecoderConfigInternal::Create(aConfig);
@@ -606,4 +605,52 @@ bool IsSupportedVideoCodec(const nsAString& aCodec) {
return true;
}
+nsCString ConvertCodecName(const nsCString& aContainer,
+ const nsCString& aCodec) {
+ if (!aContainer.EqualsLiteral("x-wav")) {
+ return aCodec;
+ }
+
+ // https://www.rfc-editor.org/rfc/rfc2361.txt
+ if (aCodec.EqualsLiteral("ulaw")) {
+ return nsCString("7");
+ }
+ if (aCodec.EqualsLiteral("alaw")) {
+ return nsCString("6");
+ }
+ if (aCodec.Find("f32")) {
+ return nsCString("3");
+ }
+ // Linear PCM
+ return nsCString("1");
+}
+
+bool IsSupportedAudioCodec(const nsAString& aCodec) {
+ LOG("IsSupportedAudioCodec: %s", NS_ConvertUTF16toUTF8(aCodec).get());
+ return aCodec.EqualsLiteral("flac") || aCodec.EqualsLiteral("mp3") ||
+ IsAACCodecString(aCodec) || aCodec.EqualsLiteral("opus") ||
+ aCodec.EqualsLiteral("ulaw") || aCodec.EqualsLiteral("alaw") ||
+ aCodec.EqualsLiteral("pcm-u8") || aCodec.EqualsLiteral("pcm-s16") ||
+ aCodec.EqualsLiteral("pcm-s24") || aCodec.EqualsLiteral("pcm-s32") ||
+ aCodec.EqualsLiteral("pcm-f32");
+}
+
+uint32_t BytesPerSamples(const mozilla::dom::AudioSampleFormat& aFormat) {
+ switch (aFormat) {
+ case AudioSampleFormat::U8:
+ case AudioSampleFormat::U8_planar:
+ return sizeof(uint8_t);
+ case AudioSampleFormat::S16:
+ case AudioSampleFormat::S16_planar:
+ return sizeof(int16_t);
+ case AudioSampleFormat::S32:
+ case AudioSampleFormat::F32:
+ case AudioSampleFormat::S32_planar:
+ case AudioSampleFormat::F32_planar:
+ return sizeof(float);
+ }
+ MOZ_ASSERT_UNREACHABLE("Invalid enum value");
+ return 0;
+}
+
} // namespace mozilla::dom
diff --git a/dom/media/webcodecs/WebCodecsUtils.h b/dom/media/webcodecs/WebCodecsUtils.h
index 196c57421d..b2a8f17398 100644
--- a/dom/media/webcodecs/WebCodecsUtils.h
+++ b/dom/media/webcodecs/WebCodecsUtils.h
@@ -9,17 +9,18 @@
#include "ErrorList.h"
#include "MediaData.h"
+#include "PlatformEncoderModule.h"
#include "js/TypeDecls.h"
#include "mozilla/Maybe.h"
#include "mozilla/MozPromise.h"
#include "mozilla/Result.h"
#include "mozilla/TaskQueue.h"
+#include "mozilla/dom/AudioDataBinding.h"
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/Nullable.h"
#include "mozilla/dom/UnionTypes.h"
#include "mozilla/dom/VideoEncoderBinding.h"
#include "mozilla/dom/VideoFrameBinding.h"
-#include "PlatformEncoderModule.h"
namespace mozilla {
@@ -218,7 +219,7 @@ struct WebCodecsConfigurationChangeList {
// Convert this to the format the underlying PEM can understand
RefPtr<EncoderConfigurationChangeList> ToPEMChangeList() const;
- nsString ToString() const;
+ nsCString ToString() const;
nsTArray<WebCodecsEncoderConfigurationItem> mChanges;
@@ -235,12 +236,20 @@ VideoColorSpaceInit FallbackColorSpaceForWebContent();
Maybe<CodecType> CodecStringToCodecType(const nsAString& aCodecString);
-nsString ConfigToString(const VideoDecoderConfig& aConfig);
+nsCString ConfigToString(const VideoDecoderConfig& aConfig);
+// Returns true if a particular codec is supported by WebCodecs.
bool IsSupportedVideoCodec(const nsAString& aCodec);
+bool IsSupportedAudioCodec(const nsAString& aCodec);
-} // namespace dom
+// Returns the codec string to use in Gecko for a particular container and
+// codec name given by WebCodecs. This maps pcm description to the profile
+// number, and simply returns the codec name for all other codecs.
+nsCString ConvertCodecName(const nsCString& aContainer,
+ const nsCString& aCodec);
+uint32_t BytesPerSamples(const mozilla::dom::AudioSampleFormat& aFormat);
+} // namespace dom
} // namespace mozilla
#endif // MOZILLA_DOM_WEBCODECS_WEBCODECSUTILS_H
diff --git a/dom/media/webcodecs/crashtests/1881079.html b/dom/media/webcodecs/crashtests/1881079.html
new file mode 100644
index 0000000000..15fd26ff74
--- /dev/null
+++ b/dom/media/webcodecs/crashtests/1881079.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html>
+<script>
+document.addEventListener("DOMContentLoaded", async () => {
+ const encoder = new VideoEncoder({
+ 'output': (e) => {},
+ 'error': (e) => {},
+ })
+ encoder.configure({
+ 'codec': 'ó ‹šá©¿',
+ 'width': 2147483648,
+ 'height': 60,
+ })
+ encoder.reset()
+ encoder.configure({
+ 'codec': 'ó ‹šá©¿',
+ 'width': 4294967295,
+ 'height': 29,
+ })
+ const decoder = new VideoDecoder({
+ 'output': (e) => {},
+ 'error': (e) => {},
+ })
+ decoder.configure({
+ 'codec': 'ó ‹šá©¿',
+ 'width': 2147483648,
+ 'height': 60,
+ })
+ decoder.reset()
+ decoder.configure({
+ 'codec': 'ó ‹šá©¿',
+ 'width': 4294967295,
+ 'height': 29,
+ })
+})
+</script>
diff --git a/dom/media/webcodecs/crashtests/crashtests.list b/dom/media/webcodecs/crashtests/crashtests.list
index cea5139fe9..16fbd90ff5 100644
--- a/dom/media/webcodecs/crashtests/crashtests.list
+++ b/dom/media/webcodecs/crashtests/crashtests.list
@@ -1,4 +1,6 @@
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1839270.html
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1848460.html
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1849271.html
-skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1864475.html \ No newline at end of file
+skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1864475.html
+skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1881079.html
+
diff --git a/dom/media/webcodecs/moz.build b/dom/media/webcodecs/moz.build
index ddb5aad5cb..1c398439a3 100644
--- a/dom/media/webcodecs/moz.build
+++ b/dom/media/webcodecs/moz.build
@@ -23,6 +23,7 @@ EXPORTS.mozilla += [
EXPORTS.mozilla.dom += [
"AudioData.h",
"AudioDecoder.h",
+ "AudioEncoder.h",
"DecoderTemplate.h",
"DecoderTypes.h",
"EncodedAudioChunk.h",
@@ -40,6 +41,7 @@ EXPORTS.mozilla.dom += [
UNIFIED_SOURCES += [
"AudioData.cpp",
"AudioDecoder.cpp",
+ "AudioEncoder.cpp",
"DecoderAgent.cpp",
"DecoderTemplate.cpp",
"EncodedAudioChunk.cpp",
diff --git a/dom/media/webrtc/MediaEnginePrefs.h b/dom/media/webrtc/MediaEnginePrefs.h
index cedb7f457c..de5daf0ad9 100644
--- a/dom/media/webrtc/MediaEnginePrefs.h
+++ b/dom/media/webrtc/MediaEnginePrefs.h
@@ -35,6 +35,7 @@ class MediaEnginePrefs {
mNoiseOn(false),
mTransientOn(false),
mAgc2Forced(false),
+ mExpectDrift(-1), // auto
mAgc(0),
mNoise(0),
mChannels(0) {}
@@ -50,6 +51,7 @@ class MediaEnginePrefs {
bool mNoiseOn;
bool mTransientOn;
bool mAgc2Forced;
+ int32_t mExpectDrift;
int32_t mAgc;
int32_t mNoise;
int32_t mChannels;
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
index 9d778d411d..220dcf3bd8 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -20,6 +20,7 @@
#include "mozilla/Sprintf.h"
#include "mozilla/Logging.h"
+#include "api/audio/echo_canceller3_factory.h"
#include "common_audio/include/audio_util.h"
#include "modules/audio_processing/include/audio_processing.h"
@@ -146,22 +147,17 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure(
return NS_OK;
}
-void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+AudioProcessing::Config AudioInputProcessing::ConfigForPrefs(
const MediaEnginePrefs& aPrefs) {
- AssertIsOnOwningThread();
-
- TRACE("ApplySettings");
- MOZ_ASSERT(
- mTrack,
- "ApplySetting is to be called only after SetTrack has been called");
+ AudioProcessing::Config config;
- mAudioProcessingConfig.pipeline.multi_channel_render = true;
- mAudioProcessingConfig.pipeline.multi_channel_capture = true;
+ config.pipeline.multi_channel_render = true;
+ config.pipeline.multi_channel_capture = true;
- mAudioProcessingConfig.echo_canceller.enabled = aPrefs.mAecOn;
- mAudioProcessingConfig.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
+ config.echo_canceller.enabled = aPrefs.mAecOn;
+ config.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
- if ((mAudioProcessingConfig.gain_controller1.enabled =
+ if ((config.gain_controller1.enabled =
aPrefs.mAgcOn && !aPrefs.mAgc2Forced)) {
auto mode = static_cast<AudioProcessing::Config::GainController1::Mode>(
aPrefs.mAgc);
@@ -169,7 +165,7 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
mode != AudioProcessing::Config::GainController1::kAdaptiveDigital &&
mode != AudioProcessing::Config::GainController1::kFixedDigital) {
LOG_ERROR("AudioInputProcessing %p Attempt to set invalid AGC mode %d",
- mInputProcessing.get(), static_cast<int>(mode));
+ this, static_cast<int>(mode));
mode = AudioProcessing::Config::GainController1::kAdaptiveDigital;
}
#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
@@ -177,20 +173,20 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Invalid AGC mode kAdaptiveAnalog on "
"mobile",
- mInputProcessing.get());
+ this);
MOZ_ASSERT_UNREACHABLE(
"Bad pref set in all.js or in about:config"
" for the auto gain, on mobile.");
mode = AudioProcessing::Config::GainController1::kFixedDigital;
}
#endif
- mAudioProcessingConfig.gain_controller1.mode = mode;
+ config.gain_controller1.mode = mode;
}
- mAudioProcessingConfig.gain_controller2.enabled =
- mAudioProcessingConfig.gain_controller2.adaptive_digital.enabled =
+ config.gain_controller2.enabled =
+ config.gain_controller2.adaptive_digital.enabled =
aPrefs.mAgcOn && aPrefs.mAgc2Forced;
- if ((mAudioProcessingConfig.noise_suppression.enabled = aPrefs.mNoiseOn)) {
+ if ((config.noise_suppression.enabled = aPrefs.mNoiseOn)) {
auto level = static_cast<AudioProcessing::Config::NoiseSuppression::Level>(
aPrefs.mNoise);
if (level != AudioProcessing::Config::NoiseSuppression::kLow &&
@@ -200,49 +196,44 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Attempt to set invalid noise suppression "
"level %d",
- mInputProcessing.get(), static_cast<int>(level));
+ this, static_cast<int>(level));
level = AudioProcessing::Config::NoiseSuppression::kModerate;
}
- mAudioProcessingConfig.noise_suppression.level = level;
+ config.noise_suppression.level = level;
}
- mAudioProcessingConfig.transient_suppression.enabled = aPrefs.mTransientOn;
+ config.transient_suppression.enabled = aPrefs.mTransientOn;
+
+ config.high_pass_filter.enabled = aPrefs.mHPFOn;
- mAudioProcessingConfig.high_pass_filter.enabled = aPrefs.mHPFOn;
+ return config;
+}
+
+void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+ const MediaEnginePrefs& aPrefs) {
+ AssertIsOnOwningThread();
+
+ TRACE("ApplySettings");
+ MOZ_ASSERT(
+ mTrack,
+ "ApplySetting is to be called only after SetTrack has been called");
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
NS_DispatchToMainThread(NS_NewRunnableFunction(
- __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs,
- audioProcessingConfig = mAudioProcessingConfig] {
+ __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs] {
mSettings->mEchoCancellation.Value() = prefs.mAecOn;
mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
mSettings->mChannelCount.Value() = prefs.mChannels;
- // The high-pass filter is not taken into account when activating the
- // pass through, since it's not controllable from content.
- bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
-
if (track->IsDestroyed()) {
return;
}
track->QueueControlMessageWithNoShutdown(
- [track, deviceID, inputProcessing = mInputProcessing,
- audioProcessingConfig, passThrough,
- requestedInputChannelCount = prefs.mChannels] {
- inputProcessing->ApplyConfig(track->Graph(),
- audioProcessingConfig);
- {
- TRACE("SetRequestedInputChannelCount");
- inputProcessing->SetRequestedInputChannelCount(
- track->Graph(), deviceID, requestedInputChannelCount);
- }
- {
- TRACE("SetPassThrough");
- inputProcessing->SetPassThrough(track->Graph(), passThrough);
- }
+ [track, deviceID, prefs, inputProcessing = mInputProcessing] {
+ inputProcessing->ApplySettings(track->Graph(), deviceID, prefs);
});
}));
}
@@ -408,57 +399,51 @@ void MediaEngineWebRTCMicrophoneSource::GetSettings(
}
AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount)
- : mAudioProcessing(AudioProcessingBuilder().Create().release()),
- mRequestedInputChannelCount(aMaxChannelCount),
- mSkipProcessing(false),
- mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
+ : mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
mEnabled(false),
mEnded(false),
- mPacketCount(0) {}
+ mPacketCount(0) {
+ mSettings.mChannels = static_cast<int32_t>(std::min<uint32_t>(
+ std::numeric_limits<int32_t>::max(), aMaxChannelCount));
+}
void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
// This method is just for asserts.
aGraph->AssertOnGraphThread();
}
-bool AudioInputProcessing::PassThrough(MediaTrackGraph* aGraph) const {
+bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
aGraph->AssertOnGraphThread();
- return mSkipProcessing;
+ // The high-pass filter is not taken into account when activating the
+ // pass through, since it's not controllable from content.
+ return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
}
-void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
- bool aPassThrough) {
+void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- if (aPassThrough == mSkipProcessing) {
- return;
- }
- mSkipProcessing = aPassThrough;
-
if (!mEnabled) {
MOZ_ASSERT(!mPacketizerInput);
return;
}
- if (aPassThrough) {
- // Turn on pass-through
+ if (IsPassThrough(aGraph)) {
+ // Switching to pass-through. Clear state so that it doesn't affect any
+ // future processing, if re-enabled.
ResetAudioProcessing(aGraph);
} else {
- // Turn off pass-through
+ // No longer pass-through. Processing will not use old state.
+ // Packetizer setup is deferred until needed.
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
}
uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
- return mRequestedInputChannelCount;
+ return mSettings.mChannels;
}
-void AudioInputProcessing::SetRequestedInputChannelCount(
- MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount) {
- mRequestedInputChannelCount = aRequestedInputChannelCount;
-
+void AudioInputProcessing::RequestedInputChannelCountChanged(
+ MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId) {
aGraph->ReevaluateInputDevice(aDeviceId);
}
@@ -470,12 +455,7 @@ void AudioInputProcessing::Start(MediaTrackGraph* aGraph) {
}
mEnabled = true;
- if (mSkipProcessing) {
- return;
- }
-
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
@@ -487,7 +467,7 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
mEnabled = false;
- if (mSkipProcessing) {
+ if (IsPassThrough(aGraph)) {
return;
}
@@ -605,10 +585,11 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
//
// The D(N) frames of data are just forwarded from input to output without any
// processing
-void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
- GraphTime aTo, AudioSegment* aInput,
+void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
+ GraphTime aFrom, GraphTime aTo,
+ AudioSegment* aInput,
AudioSegment* aOutput) {
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(aFrom <= aTo);
MOZ_ASSERT(!mEnded);
@@ -617,10 +598,11 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
return;
}
+ MediaTrackGraph* graph = aTrack->Graph();
if (!mEnabled) {
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Filling %" PRId64
" frames of silence to output (disabled)",
- aGraph, aGraph->CurrentDriver(), this, need);
+ graph, graph->CurrentDriver(), this, need);
aOutput->AppendNullData(need);
return;
}
@@ -628,22 +610,20 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(aInput->GetDuration() == need,
"Wrong data length from input port source");
- if (PassThrough(aGraph)) {
+ if (IsPassThrough(graph)) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Forwarding %" PRId64
" frames of input data to output directly (PassThrough)",
- aGraph, aGraph->CurrentDriver(), this, aInput->GetDuration());
+ graph, graph->CurrentDriver(), this, aInput->GetDuration());
aOutput->AppendSegment(aInput);
return;
}
- // SetPassThrough(false) must be called before reaching here.
- MOZ_ASSERT(mPacketizerInput);
- // If mRequestedInputChannelCount is updated, create a new packetizer. No
- // need to change the pre-buffering since the rate is always the same. The
- // frames left in the packetizer would be replaced by null data and then
- // transferred to mSegment.
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
+ // If the requested input channel count is updated, create a new
+ // packetizer. No need to change the pre-buffering since the rate is always
+ // the same. The frames left in the packetizer would be replaced by null
+ // data and then transferred to mSegment.
+ EnsurePacketizer(aTrack);
// Preconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -655,10 +635,10 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() >= 1);
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
- PacketizeAndProcess(aGraph, *aInput);
+ PacketizeAndProcess(aTrack, *aInput);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Buffer has %" PRId64
" frames of data now, after packetizing and processing",
- aGraph, aGraph->CurrentDriver(), this, mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, mSegment.GetDuration());
// By setting pre-buffering to the number of frames of one packet, and
// because the maximum number of frames stuck in the packetizer before
@@ -669,8 +649,7 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
mSegment.RemoveLeading(need);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p moving %" PRId64
" frames of data to output, leaving %" PRId64 " frames in buffer",
- aGraph, aGraph->CurrentDriver(), this, need,
- mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, need, mSegment.GetDuration());
// Postconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -680,16 +659,16 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
}
-void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
+void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
const AudioChunk& aChunk) {
MOZ_ASSERT(aChunk.ChannelCount() > 0);
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
- if (!mEnabled || PassThrough(aGraph)) {
+ if (!mEnabled || IsPassThrough(aTrack->Graph())) {
return;
}
- TrackRate sampleRate = aGraph->GraphRate();
+ TrackRate sampleRate = aTrack->mSampleRate;
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
// Downmix from aChannels to MAX_CHANNELS if needed.
uint32_t channelCount =
@@ -727,6 +706,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
if (mOutputBufferFrameCount == framesPerPacket) {
// Have a complete packet. Analyze it.
+ EnsureAudioProcessing(aTrack);
for (uint32_t channel = 0; channel < channelCount; channel++) {
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
}
@@ -743,14 +723,15 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
}
// Only called if we're not in passthrough mode
-void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
+void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment) {
- MOZ_ASSERT(!PassThrough(aGraph),
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph),
"This should be bypassed when in PassThrough mode.");
MOZ_ASSERT(mEnabled);
MOZ_ASSERT(mPacketizerInput);
MOZ_ASSERT(mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
// Calculate number of the pending frames in mChunksInPacketizer.
auto pendingFrames = [&]() {
@@ -792,7 +773,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Packetizing %zu frames. "
"Packetizer has %u frames (enough for %u packets) now",
- aGraph, aGraph->CurrentDriver(), this, frameCount,
+ graph, graph->CurrentDriver(), this, frameCount,
mPacketizerInput->FramesAvailable(),
mPacketizerInput->PacketsAvailable());
@@ -850,9 +831,10 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
deinterleavedPacketizedInputDataChannelPointers.Elements());
}
- StreamConfig inputConfig(aGraph->GraphRate(), channelCountInput);
+ StreamConfig inputConfig(aTrack->mSampleRate, channelCountInput);
StreamConfig outputConfig = inputConfig;
+ EnsureAudioProcessing(aTrack);
// Bug 1404965: Get the right delay here, it saves some work down the line.
mAudioProcessing->set_stream_delay_ms(0);
@@ -958,7 +940,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
"(Graph %p, Driver %p) AudioInputProcessing %p Appending %u frames of "
"packetized audio, leaving %u frames in packetizer (%" PRId64
" frames in mChunksInPacketizer)",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
mPacketizerInput->FramesAvailable(), pendingFrames());
// Postcondition of the Principal-labelling logic.
@@ -971,17 +953,36 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
// Reset some processing
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Reinitializing audio "
"processing",
aGraph, aGraph->CurrentDriver(), this);
}
-void AudioInputProcessing::ApplyConfig(MediaTrackGraph* aGraph,
- const AudioProcessing::Config& aConfig) {
+void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings) {
+ TRACE("AudioInputProcessing::ApplySettings");
aGraph->AssertOnGraphThread();
- mAudioProcessing->ApplyConfig(aConfig);
+
+ // Read previous state from mSettings.
+ uint32_t oldChannelCount = GetRequestedInputChannelCount();
+ bool wasPassThrough = IsPassThrough(aGraph);
+
+ mSettings = aSettings;
+ if (mAudioProcessing) {
+ mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
+ }
+
+ if (oldChannelCount != GetRequestedInputChannelCount()) {
+ RequestedInputChannelCountChanged(aGraph, aDeviceID);
+ }
+ if (wasPassThrough != IsPassThrough(aGraph)) {
+ PassThroughChanged(aGraph);
+ }
}
void AudioInputProcessing::End() {
@@ -995,14 +996,15 @@ TrackTime AudioInputProcessing::NumBufferedFrames(
return mSegment.GetDuration();
}
-void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
- uint32_t aChannels) {
- aGraph->AssertOnGraphThread();
- MOZ_ASSERT(aChannels > 0);
+void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(mEnabled);
- MOZ_ASSERT(!mSkipProcessing);
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph));
- if (mPacketizerInput && mPacketizerInput->mChannels == aChannels) {
+ uint32_t channelCount = GetRequestedInputChannelCount();
+ MOZ_ASSERT(channelCount > 0);
+ if (mPacketizerInput && mPacketizerInput->mChannels == channelCount) {
return;
}
@@ -1010,7 +1012,7 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
// need to change pre-buffering since the packet size is the same as the old
// one, since the rate is a constant.
MOZ_ASSERT_IF(mPacketizerInput, mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
bool needPreBuffering = !mPacketizerInput;
if (mPacketizerInput) {
const TrackTime numBufferedFrames =
@@ -1020,24 +1022,62 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
mChunksInPacketizer.clear();
}
- mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), aChannels);
+ mPacketizerInput.emplace(GetPacketSize(aTrack->mSampleRate), channelCount);
if (needPreBuffering) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p: Adding %u frames of "
"silence as pre-buffering",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
AudioSegment buffering;
buffering.AppendNullData(
static_cast<TrackTime>(mPacketizerInput->mPacketSize));
- PacketizeAndProcess(aGraph, buffering);
+ PacketizeAndProcess(aTrack, buffering);
+ }
+}
+
+void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
+
+ MediaTrackGraph* graph = aTrack->Graph();
+ // If the AEC might need to deal with drift then inform it of this and it
+ // will be less conservative about echo suppression. This can lead to some
+ // suppression of non-echo signal, so do this only when drift is expected.
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=11985#c2
+ bool haveAECAndDrift = mSettings.mAecOn;
+ if (haveAECAndDrift) {
+ if (mSettings.mExpectDrift < 0) {
+ haveAECAndDrift =
+ graph->OutputForAECMightDrift() ||
+ aTrack->GetDeviceInputTrackGraphThread()->AsNonNativeInputTrack();
+ } else {
+ haveAECAndDrift = mSettings.mExpectDrift > 0;
+ }
+ }
+ if (!mAudioProcessing || haveAECAndDrift != mHadAECAndDrift) {
+ TRACE("AudioProcessing creation");
+ LOG("Track %p AudioInputProcessing %p creating AudioProcessing. "
+ "aec+drift: %s",
+ aTrack, this, haveAECAndDrift ? "Y" : "N");
+ mHadAECAndDrift = haveAECAndDrift;
+ AudioProcessingBuilder builder;
+ builder.SetConfig(ConfigForPrefs(mSettings));
+ if (haveAECAndDrift) {
+ // Setting an EchoControlFactory always enables AEC, overriding
+ // Config::echo_canceller.enabled, so do this only when AEC is enabled.
+ EchoCanceller3Config aec3Config;
+ aec3Config.echo_removal_control.has_clock_drift = true;
+ builder.SetEchoControlFactory(
+ std::make_unique<EchoCanceller3Factory>(aec3Config));
+ }
+ mAudioProcessing.reset(builder.Create().release());
}
}
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- MOZ_ASSERT(mSkipProcessing || !mEnabled);
+ MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
MOZ_ASSERT(mPacketizerInput);
LOG_FRAME(
@@ -1047,7 +1087,9 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
// Reset AudioProcessing so that if we resume processing in the future it
// doesn't depend on old state.
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
mPacketizerInput->FramesAvailable() ==
@@ -1124,9 +1166,8 @@ void AudioProcessingTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
} else {
MOZ_ASSERT(mInputs.Length() == 1);
AudioSegment data;
- DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
- aTo);
- mInputProcessing->Process(Graph(), aFrom, aTo, &data,
+ DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
+ mInputProcessing->Process(this, aFrom, aTo, &data,
GetData<AudioSegment>());
}
MOZ_ASSERT(TrackTimeToGraphTime(GetEnd()) == aTo);
@@ -1142,7 +1183,7 @@ void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
AssertOnGraphThread();
if (mInputProcessing) {
- mInputProcessing->ProcessOutputData(aGraph, aChunk);
+ mInputProcessing->ProcessOutputData(this, aChunk);
}
}
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h
index e71b5ef826..6b1fbf0089 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -91,8 +91,7 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// Current state of the resource for this source.
MediaEngineSourceState mState;
- // The current preferences that will be forwarded to mAudioProcessingConfig
- // below.
+ // The current preferences that will be forwarded to mInputProcessing below.
MediaEnginePrefs mCurrentPrefs;
// The AudioProcessingTrack used to inteface with the MediaTrackGraph. Set in
@@ -101,10 +100,6 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// See note at the top of this class.
RefPtr<AudioInputProcessing> mInputProcessing;
-
- // Copy of the config currently applied to AudioProcessing through
- // mInputProcessing.
- webrtc::AudioProcessing::Config mAudioProcessingConfig;
};
// This class is created on the MediaManager thread, and then exclusively used
@@ -113,15 +108,16 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
class AudioInputProcessing : public AudioDataListener {
public:
explicit AudioInputProcessing(uint32_t aMaxChannelCount);
- void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
+ void Process(AudioProcessingTrack* aTrack, GraphTime aFrom, GraphTime aTo,
AudioSegment* aInput, AudioSegment* aOutput);
- void ProcessOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
+ void ProcessOutputData(AudioProcessingTrack* aTrack,
+ const AudioChunk& aChunk);
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
- return !PassThrough(aGraph);
+ return !IsPassThrough(aGraph);
}
void Start(MediaTrackGraph* aGraph);
@@ -135,23 +131,20 @@ class AudioInputProcessing : public AudioDataListener {
void Disconnect(MediaTrackGraph* aGraph) override;
- void PacketizeAndProcess(MediaTrackGraph* aGraph,
+ void PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment);
- void SetPassThrough(MediaTrackGraph* aGraph, bool aPassThrough);
uint32_t GetRequestedInputChannelCount();
- void SetRequestedInputChannelCount(MediaTrackGraph* aGraph,
- CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount);
- // This is true when all processing is disabled, we can skip
+ // This is true when all processing is disabled, in which case we can skip
// packetization, resampling and other processing passes.
- bool PassThrough(MediaTrackGraph* aGraph) const;
+ bool IsPassThrough(MediaTrackGraph* aGraph) const;
// This allow changing the APM options, enabling or disabling processing
- // steps. The config gets applied the next time we're about to process input
+ // steps. The settings get applied the next time we're about to process input
// data.
- void ApplyConfig(MediaTrackGraph* aGraph,
- const webrtc::AudioProcessing::Config& aConfig);
+ void ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings);
void End();
@@ -164,9 +157,18 @@ class AudioInputProcessing : public AudioDataListener {
bool IsEnded() const { return mEnded; }
+ // For testing:
+ bool HadAECAndDrift() const { return mHadAECAndDrift; }
+
private:
~AudioInputProcessing() = default;
- void EnsureAudioProcessing(MediaTrackGraph* aGraph, uint32_t aChannels);
+ webrtc::AudioProcessing::Config ConfigForPrefs(
+ const MediaEnginePrefs& aPrefs);
+ void PassThroughChanged(MediaTrackGraph* aGraph);
+ void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceId);
+ void EnsurePacketizer(AudioProcessingTrack* aTrack);
+ void EnsureAudioProcessing(AudioProcessingTrack* aTrack);
void ResetAudioProcessing(MediaTrackGraph* aGraph);
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
// This implements the processing algoritm to apply to the input (e.g. a
@@ -174,17 +176,16 @@ class AudioInputProcessing : public AudioDataListener {
// class only accepts audio chunks of 10ms. It has two inputs and one output:
// it is fed the speaker data and the microphone data. It outputs processed
// input data.
- const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ // Whether mAudioProcessing was created for AEC with clock drift.
+ // Meaningful only when mAudioProcessing is non-null;
+ bool mHadAECAndDrift = false;
// Packetizer to be able to feed 10ms packets to the input side of
// mAudioProcessing. Not used if the processing is bypassed.
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
- // The number of channels asked for by content, after clamping to the range of
- // legal channel count for this particular device.
- uint32_t mRequestedInputChannelCount;
- // mSkipProcessing is true if none of the processing passes are enabled,
- // because of prefs or constraints. This allows simply copying the audio into
- // the MTG, skipping resampling and the whole webrtc.org code.
- bool mSkipProcessing;
+ // The current settings from about:config preferences and content-provided
+ // constraints.
+ MediaEnginePrefs mSettings;
// Buffer for up to one 10ms packet of planar mixed audio output for the
// reverse-stream (speaker data) of mAudioProcessing AEC.
// Length is packet size * channel count, regardless of how many frames are
diff --git a/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp b/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
index d293fa0be6..b9b9ab8fc5 100644
--- a/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
+++ b/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
@@ -129,11 +129,12 @@ class DummyAudioProcessing : public AudioProcessing {
}
void set_stream_key_pressed(bool) override { MOZ_CRASH("Unexpected call"); }
bool CreateAndAttachAecDump(absl::string_view, int64_t,
- rtc::TaskQueue*) override {
+ absl::Nonnull<TaskQueueBase*>) override {
MOZ_CRASH("Unexpected call");
return false;
}
- bool CreateAndAttachAecDump(FILE*, int64_t, rtc::TaskQueue*) override {
+ bool CreateAndAttachAecDump(FILE*, int64_t,
+ absl::Nonnull<TaskQueueBase*>) override {
MOZ_CRASH("Unexpected call");
return false;
}
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp b/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
index 43f34c456f..8fa0bade00 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
@@ -47,6 +47,22 @@ already_AddRefed<RTCRtpScriptTransform> RTCRtpScriptTransform::Constructor(
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
+
+ // The spec currently fails to describe what to do when the worker is closing
+ // or closed; the following placeholder text can be found in the spec at:
+ // https://w3c.github.io/webrtc-encoded-transform/#dom-rtcrtpscripttransform-rtcrtpscripttransform
+ //
+ // > FIXME: Describe error handling (worker closing flag true at
+ // > RTCRtpScriptTransform creation time. And worker being terminated while
+ // > transform is processing data).
+ //
+ // Because our worker runnables do not like to be pointed at a nonexistant
+ // worker, we throw in this case.
+ if (!aWorker.IsEligibleForMessaging()) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return nullptr;
+ }
+
auto newTransform = MakeRefPtr<RTCRtpScriptTransform>(ownerWindow);
RefPtr<RTCTransformEventRunnable> runnable =
new RTCTransformEventRunnable(aWorker, &newTransform->GetProxy());
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
index 126020a94f..f2fbd6d637 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
@@ -148,7 +148,7 @@ WritableStreamRTCFrameSink::WritableStreamRTCFrameSink(
WritableStreamRTCFrameSink::~WritableStreamRTCFrameSink() = default;
-already_AddRefed<Promise> WritableStreamRTCFrameSink::WriteCallback(
+already_AddRefed<Promise> WritableStreamRTCFrameSink::WriteCallbackImpl(
JSContext* aCx, JS::Handle<JS::Value> aChunk,
WritableStreamDefaultController& aController, ErrorResult& aError) {
// Spec does not say to do this right now. Might be a spec bug, needs
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
index 6d61ac3cd5..7a22612254 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
@@ -87,7 +87,7 @@ class WritableStreamRTCFrameSink final
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(WritableStreamRTCFrameSink,
UnderlyingSinkAlgorithmsWrapper)
- already_AddRefed<Promise> WriteCallback(
+ already_AddRefed<Promise> WriteCallbackImpl(
JSContext* aCx, JS::Handle<JS::Value> aChunk,
WritableStreamDefaultController& aController,
ErrorResult& aError) override;
diff --git a/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp b/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
index 4e4bf9ab93..eabf7ee335 100644
--- a/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
+++ b/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
@@ -234,7 +234,7 @@ void WebrtcGlobalInformation::GetStatsHistorySince(
auto statsAfter = aAfter.WasPassed() ? Some(aAfter.Value()) : Nothing();
auto sdpAfter = aSdpAfter.WasPassed() ? Some(aSdpAfter.Value()) : Nothing();
- WebrtcGlobalStatsHistory::GetHistory(pcIdFilter).apply([&](auto& hist) {
+ WebrtcGlobalStatsHistory::GetHistory(pcIdFilter).apply([&](const auto& hist) {
if (!history.mReports.AppendElements(hist->Since(statsAfter), fallible)) {
mozalloc_handle_oom(0);
}
diff --git a/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
index 49f049cd21..91ad0d848c 100644
--- a/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
+++ b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
@@ -907,7 +907,7 @@ RtpExtList WebrtcAudioConduit::FilterExtensions(LocalDirection aDirection,
webrtc::SdpAudioFormat WebrtcAudioConduit::CodecConfigToLibwebrtcFormat(
const AudioCodecConfig& aConfig) {
- webrtc::SdpAudioFormat::Parameters parameters;
+ webrtc::CodecParameterMap parameters;
if (aConfig.mName == kOpusCodecName) {
if (aConfig.mChannels == 2) {
parameters[kCodecParamStereo] = kParamValueTrue;
diff --git a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
index 73e59f5ee2..5862237711 100644
--- a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
+++ b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
@@ -190,7 +190,7 @@ webrtc::VideoCodecType SupportedCodecType(webrtc::VideoCodecType aType) {
rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
ConfigureVideoEncoderSettings(const VideoCodecConfig& aConfig,
const WebrtcVideoConduit* aConduit,
- webrtc::SdpVideoFormat::Parameters& aParameters) {
+ webrtc::CodecParameterMap& aParameters) {
bool is_screencast =
aConduit->CodecMode() == webrtc::VideoCodecMode::kScreensharing;
// No automatic resizing when using simulcast or screencast.
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
index 824f1cf6eb..b03c1772c4 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
@@ -11,6 +11,8 @@
#include "TaskQueueWrapper.h"
// libwebrtc includes
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "call/rtp_transport_controller_send_factory.h"
namespace mozilla {
@@ -28,17 +30,16 @@ namespace mozilla {
std::move(eventLog), std::move(taskQueueFactory), aTimestampMaker,
std::move(aShutdownTicket));
+ webrtc::Environment env = CreateEnvironment(
+ wrapper->mEventLog.get(), wrapper->mClock.GetRealTimeClockRaw(),
+ wrapper->mTaskQueueFactory.get(), aSharedState->mTrials.get());
+
wrapper->mCallThread->Dispatch(
- NS_NewRunnableFunction(__func__, [wrapper, aSharedState] {
- webrtc::CallConfig config(wrapper->mEventLog.get());
+ NS_NewRunnableFunction(__func__, [wrapper, aSharedState, env] {
+ webrtc::CallConfig config(env, nullptr);
config.audio_state =
webrtc::AudioState::Create(aSharedState->mAudioStateConfig);
- config.task_queue_factory = wrapper->mTaskQueueFactory.get();
- config.trials = aSharedState->mTrials.get();
- wrapper->SetCall(WrapUnique(webrtc::Call::Create(
- config, &wrapper->mClock,
- webrtc::RtpTransportControllerSendFactory().Create(
- config.ExtractTransportConfig(), &wrapper->mClock)).release()));
+ wrapper->SetCall(WrapUnique(webrtc::Call::Create(config).release()));
}));
return wrapper;
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
index 865f9afff0..b8ee44c6b3 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
@@ -289,7 +289,7 @@ class WebrtcGmpVideoEncoder : public GMPVideoEncoderCallbackProxy,
GMPVideoHost* mHost;
GMPVideoCodec mCodecParams;
uint32_t mMaxPayloadSize;
- const webrtc::SdpVideoFormat::Parameters mFormatParams;
+ const webrtc::CodecParameterMap mFormatParams;
webrtc::CodecSpecificInfo mCodecSpecificInfo;
webrtc::H264BitstreamParser mH264BitstreamParser;
// Protects mCallback
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
index 844542cd0d..f5240ffa22 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
@@ -75,7 +75,7 @@ static const char* PacketModeStr(const webrtc::CodecSpecificInfo& aInfo) {
}
static std::pair<H264_PROFILE, H264_LEVEL> ConvertProfileLevel(
- const webrtc::SdpVideoFormat::Parameters& aParameters) {
+ const webrtc::CodecParameterMap& aParameters) {
const absl::optional<webrtc::H264ProfileLevelId> profileLevel =
webrtc::ParseSdpForH264ProfileLevelId(aParameters);
@@ -143,9 +143,9 @@ WebrtcMediaDataEncoder::~WebrtcMediaDataEncoder() {
}
}
-static void InitCodecSpecficInfo(
- webrtc::CodecSpecificInfo& aInfo, const webrtc::VideoCodec* aCodecSettings,
- const webrtc::SdpVideoFormat::Parameters& aParameters) {
+static void InitCodecSpecficInfo(webrtc::CodecSpecificInfo& aInfo,
+ const webrtc::VideoCodec* aCodecSettings,
+ const webrtc::CodecParameterMap& aParameters) {
MOZ_ASSERT(aCodecSettings);
aInfo.codecType = aCodecSettings->codecType;
@@ -290,13 +290,11 @@ already_AddRefed<MediaDataEncoder> WebrtcMediaDataEncoder::CreateEncoder(
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unsupported codec type");
}
EncoderConfig config(
- type, {aCodecSettings->width, aCodecSettings->height},
- MediaDataEncoder::Usage::Realtime, MediaDataEncoder::PixelFormat::YUV420P,
- MediaDataEncoder::PixelFormat::YUV420P, aCodecSettings->maxFramerate,
- keyframeInterval, mBitrateAdjuster.GetTargetBitrateBps(),
- MediaDataEncoder::BitrateMode::Variable,
- MediaDataEncoder::HardwarePreference::None,
- MediaDataEncoder::ScalabilityMode::None, specific);
+ type, {aCodecSettings->width, aCodecSettings->height}, Usage::Realtime,
+ dom::ImageBitmapFormat::YUV420P, dom::ImageBitmapFormat::YUV420P,
+ aCodecSettings->maxFramerate, keyframeInterval,
+ mBitrateAdjuster.GetTargetBitrateBps(), BitrateMode::Variable,
+ HardwarePreference::None, ScalabilityMode::None, specific);
return mFactory->CreateEncoder(config, mTaskQueue);
}
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
index 9d750e85b2..0c2070f6a9 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
@@ -65,7 +65,7 @@ class WebrtcMediaDataEncoder : public RefCountedWebrtcVideoEncoder {
MediaResult mError = NS_OK;
VideoInfo mInfo;
- webrtc::SdpVideoFormat::Parameters mFormatParams;
+ webrtc::CodecParameterMap mFormatParams;
webrtc::CodecSpecificInfo mCodecSpecific;
webrtc::BitrateAdjuster mBitrateAdjuster;
uint32_t mMaxFrameRate = {0};
diff --git a/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html b/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
index 96c2c42b78..1f3662d9fc 100644
--- a/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
+++ b/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
@@ -28,10 +28,11 @@
sdpMid: "test",
sdpMLineIndex: 3 });
jsonCopy = JSON.parse(JSON.stringify(rtcIceCandidate));
- for (key in rtcIceCandidate) {
- if (typeof(rtcIceCandidate[key]) == "function") continue;
- is(rtcIceCandidate[key], jsonCopy[key], "key " + key + " should match.");
- }
+ is(jsonCopy.candidate, "dummy");
+ is(jsonCopy.sdpMid, "test");
+ is(jsonCopy.sdpMLineIndex, 3);
+ is(jsonCopy.usernameFragment, rtcIceCandidate.usernameFragment);
+ is(Object.keys(jsonCopy).length, 4, "JSON limited to those four members.");
});
</script>
</pre>
diff --git a/dom/media/webrtc/third_party_build/default_config_env b/dom/media/webrtc/third_party_build/default_config_env
index 7013520a30..be3c5ba7c1 100644
--- a/dom/media/webrtc/third_party_build/default_config_env
+++ b/dom/media/webrtc/third_party_build/default_config_env
@@ -5,41 +5,41 @@
export MOZ_LIBWEBRTC_SRC=$STATE_DIR/moz-libwebrtc
# The previous fast-forward bug number is used for some error messaging.
-export MOZ_PRIOR_FASTFORWARD_BUG="1871981"
+export MOZ_PRIOR_FASTFORWARD_BUG="1876843"
# Fast-forwarding each Chromium version of libwebrtc should be done
# under a separate bugzilla bug. This bug number is used when crafting
# the commit summary as each upstream commit is vendored into the
# mercurial repository. The bug used for the v106 fast-forward was
# 1800920.
-export MOZ_FASTFORWARD_BUG="1876843"
+export MOZ_FASTFORWARD_BUG="1883116"
# MOZ_NEXT_LIBWEBRTC_MILESTONE and MOZ_NEXT_FIREFOX_REL_TARGET are
# not used during fast-forward processing, but facilitate generating this
# default config. To generate an default config for the next update, run
# bash dom/media/webrtc/third_party_build/update_default_config_env.sh
-export MOZ_NEXT_LIBWEBRTC_MILESTONE=121
-export MOZ_NEXT_FIREFOX_REL_TARGET=125
+export MOZ_NEXT_LIBWEBRTC_MILESTONE=122
+export MOZ_NEXT_FIREFOX_REL_TARGET=126
# For Chromium release branches, see:
# https://chromiumdash.appspot.com/branches
-# Chromium's v120 release branch was 6099. This is used to pre-stack
+# Chromium's v121 release branch was 6167. This is used to pre-stack
# the previous release branch's commits onto the appropriate base commit
# (the first common commit between trunk and the release branch).
-export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6099"
+export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6167"
-# New target release branch for v121 is branch-heads/6167. This is used
+# New target release branch for v122 is branch-heads/6261. This is used
# to calculate the next upstream commit.
-export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6167"
+export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6261"
# For local development 'mozpatches' is fine for a branch name, but when
# pushing the patch stack to github, it should be named something like
-# 'moz-mods-chr121-for-rel125'.
+# 'moz-mods-chr122-for-rel126'.
export MOZ_LIBWEBRTC_BRANCH="mozpatches"
# After elm has been merged to mozilla-central, the patch stack in
# moz-libwebrtc should be pushed to github. The script
# push_official_branch.sh uses this branch name when pushing to the
# public repo.
-export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr121-for-rel125"
+export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr122-for-rel126"
diff --git a/dom/media/webrtc/third_party_build/elm_rebase.sh b/dom/media/webrtc/third_party_build/elm_rebase.sh
index ba0028b7a4..0dbf93d3ce 100644
--- a/dom/media/webrtc/third_party_build/elm_rebase.sh
+++ b/dom/media/webrtc/third_party_build/elm_rebase.sh
@@ -153,6 +153,15 @@ export MOZ_BOOKMARK=$MOZ_BOOKMARK
" > $STATE_DIR/rebase_resume_state
fi # if [ -f $STATE_DIR/rebase_resume_state ]; then ; else
+if [ "x$STOP_FOR_REORDER" = "x1" ]; then
+ echo ""
+ echo "Stopping after generating commit list ($COMMIT_LIST_FILE) to"
+ echo "allow tweaking commit ordering. Re-running $0 will resume the"
+ echo "rebase processing. To stop processing during the rebase,"
+ echo "insert a line with only 'STOP'."
+ exit
+fi
+
# grab all commits
COMMITS=`cat $COMMIT_LIST_FILE | awk '{print $1;}'`
@@ -171,6 +180,12 @@ for commit in $COMMITS; do
ed -s $COMMIT_LIST_FILE <<< $'1d\nw\nq'
}
+ if [ "$FULL_COMMIT_LINE" == "STOP" ]; then
+ echo "Stopping for history editing. Re-run $0 to resume."
+ remove_commit
+ exit
+ fi
+
IS_BUILD_COMMIT=`hg log -T '{desc|firstline}' -r $commit \
| grep "file updates" | wc -l | tr -d " " || true`
echo "IS_BUILD_COMMIT: $IS_BUILD_COMMIT"
diff --git a/dom/media/webrtc/third_party_build/fetch_github_repo.py b/dom/media/webrtc/third_party_build/fetch_github_repo.py
index b9d10e0b6c..8caa55d5c5 100644
--- a/dom/media/webrtc/third_party_build/fetch_github_repo.py
+++ b/dom/media/webrtc/third_party_build/fetch_github_repo.py
@@ -87,6 +87,10 @@ def fetch_repo(github_path, clone_protocol, force_fetch, tar_path):
else:
print("Upstream remote branch-heads already configured")
+ # prevent changing line endings when moving things out of the git repo
+ # (and into hg for instance)
+ run_git("git config --local core.autocrlf false")
+
# do a sanity fetch in case this was not a freshly cloned copy of the
# repo, meaning it may not have all the mozilla branches present.
run_git("git fetch --all", github_path)
diff --git a/dom/media/webrtc/third_party_build/vendor-libwebrtc.py b/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
index d820d8c006..1c44fbd749 100644
--- a/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
+++ b/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
@@ -27,7 +27,6 @@ def get_excluded_files():
".clang-format",
".git-blame-ignore-revs",
".gitignore",
- ".vpython",
"CODE_OF_CONDUCT.md",
"ENG_REVIEW_OWNERS",
"PRESUBMIT.py",
diff --git a/dom/media/webrtc/transport/test/ice_unittest.cpp b/dom/media/webrtc/transport/test/ice_unittest.cpp
index 50febb3cdd..7df379e1c4 100644
--- a/dom/media/webrtc/transport/test/ice_unittest.cpp
+++ b/dom/media/webrtc/transport/test/ice_unittest.cpp
@@ -58,9 +58,9 @@ using namespace mozilla;
static unsigned int kDefaultTimeout = 7000;
-// TODO(nils@mozilla.com): This should get replaced with some non-external
-// solution like discussed in bug 860775.
-const std::string kDefaultStunServerHostname((char*)"stun.l.google.com");
+// TODO: It would be nice to have a test STUN/TURN server that can run with
+// gtest.
+const std::string kDefaultStunServerHostname((char*)"");
const std::string kBogusStunServerHostname(
(char*)"stun-server-nonexistent.invalid");
const uint16_t kDefaultStunServerPort = 19305;
@@ -1628,12 +1628,17 @@ class WebRtcIceConnectTest : public StunTest {
peer->SetMappingType(mapping_type_);
peer->SetBlockUdp(block_udp_);
} else if (setup_stun_servers) {
- std::vector<NrIceStunServer> stun_servers;
+ if (stun_server_address_.empty()) {
+ InitTestStunServer();
+ peer->UseTestStunServer();
+ } else {
+ std::vector<NrIceStunServer> stun_servers;
- stun_servers.push_back(*NrIceStunServer::Create(
- stun_server_address_, kDefaultStunServerPort, kNrIceTransportUdp));
+ stun_servers.push_back(*NrIceStunServer::Create(
+ stun_server_address_, kDefaultStunServerPort, kNrIceTransportUdp));
- peer->SetStunServers(stun_servers);
+ peer->SetStunServers(stun_servers);
+ }
}
}
diff --git a/dom/media/webspeech/recognition/SpeechRecognition.cpp b/dom/media/webspeech/recognition/SpeechRecognition.cpp
index 7239a88237..44ad5fdd61 100644
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp
@@ -136,7 +136,7 @@ CreateSpeechRecognitionService(nsPIDOMWindowInner* aWindow,
NS_IMPL_CYCLE_COLLECTION_WEAK_PTR_INHERITED(SpeechRecognition,
DOMEventTargetHelper, mStream,
mTrack, mRecognitionService,
- mSpeechGrammarList)
+ mSpeechGrammarList, mListener)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechRecognition)
NS_INTERFACE_MAP_ENTRY(nsIObserver)
@@ -145,6 +145,16 @@ NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
NS_IMPL_ADDREF_INHERITED(SpeechRecognition, DOMEventTargetHelper)
NS_IMPL_RELEASE_INHERITED(SpeechRecognition, DOMEventTargetHelper)
+NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechRecognition::TrackListener,
+ DOMMediaStream::TrackListener,
+ mSpeechRecognition)
+NS_IMPL_ADDREF_INHERITED(SpeechRecognition::TrackListener,
+ DOMMediaStream::TrackListener)
+NS_IMPL_RELEASE_INHERITED(SpeechRecognition::TrackListener,
+ DOMMediaStream::TrackListener)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechRecognition::TrackListener)
+NS_INTERFACE_MAP_END_INHERITING(DOMMediaStream::TrackListener)
+
SpeechRecognition::SpeechRecognition(nsPIDOMWindowInner* aOwnerWindow)
: DOMEventTargetHelper(aOwnerWindow),
mEndpointer(kSAMPLE_RATE),
@@ -472,8 +482,9 @@ void SpeechRecognition::Reset() {
++mStreamGeneration;
if (mStream) {
- mStream->UnregisterTrackListener(this);
+ mStream->UnregisterTrackListener(mListener);
mStream = nullptr;
+ mListener = nullptr;
}
mTrack = nullptr;
mTrackIsOwned = false;
@@ -642,7 +653,8 @@ RefPtr<GenericNonExclusivePromise> SpeechRecognition::StopRecording() {
if (mStream) {
// Ensure we don't start recording because a track became available
// before we get reset.
- mStream->UnregisterTrackListener(this);
+ mStream->UnregisterTrackListener(mListener);
+ mListener = nullptr;
}
return GenericNonExclusivePromise::CreateAndResolve(true, __func__);
}
@@ -801,10 +813,13 @@ void SpeechRecognition::Start(const Optional<NonNull<DOMMediaStream>>& aStream,
MediaStreamConstraints constraints;
constraints.mAudio.SetAsBoolean() = true;
+ MOZ_ASSERT(!mListener);
+ mListener = new TrackListener(this);
+
if (aStream.WasPassed()) {
mStream = &aStream.Value();
mTrackIsOwned = false;
- mStream->RegisterTrackListener(this);
+ mStream->RegisterTrackListener(mListener);
nsTArray<RefPtr<AudioStreamTrack>> tracks;
mStream->GetAudioTracks(tracks);
for (const RefPtr<AudioStreamTrack>& track : tracks) {
@@ -839,7 +854,7 @@ void SpeechRecognition::Start(const Optional<NonNull<DOMMediaStream>>& aStream,
return;
}
mStream = std::move(aStream);
- mStream->RegisterTrackListener(this);
+ mStream->RegisterTrackListener(mListener);
for (const RefPtr<AudioStreamTrack>& track : tracks) {
if (!track->Ended()) {
NotifyTrackAdded(track);
diff --git a/dom/media/webspeech/recognition/SpeechRecognition.h b/dom/media/webspeech/recognition/SpeechRecognition.h
index 687f38041e..465cadc8cb 100644
--- a/dom/media/webspeech/recognition/SpeechRecognition.h
+++ b/dom/media/webspeech/recognition/SpeechRecognition.h
@@ -52,7 +52,6 @@ LogModule* GetSpeechRecognitionLog();
class SpeechRecognition final : public DOMEventTargetHelper,
public nsIObserver,
- public DOMMediaStream::TrackListener,
public SupportsWeakPtr {
public:
explicit SpeechRecognition(nsPIDOMWindowInner* aOwnerWindow);
@@ -133,7 +132,24 @@ class SpeechRecognition final : public DOMEventTargetHelper,
EVENT_COUNT
};
- void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
+ void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack);
+
+ class TrackListener final : public DOMMediaStream::TrackListener {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(TrackListener,
+ DOMMediaStream::TrackListener)
+ explicit TrackListener(SpeechRecognition* aSpeechRecognition)
+ : mSpeechRecognition(aSpeechRecognition) {}
+ void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override {
+ mSpeechRecognition->NotifyTrackAdded(aTrack);
+ }
+
+ private:
+ virtual ~TrackListener() = default;
+ RefPtr<SpeechRecognition> mSpeechRecognition;
+ };
+
// aMessage should be valid UTF-8, but invalid UTF-8 byte sequences are
// replaced with the REPLACEMENT CHARACTER on conversion to UTF-16.
void DispatchError(EventType aErrorType,
@@ -266,6 +282,8 @@ class SpeechRecognition final : public DOMEventTargetHelper,
// a conforming implementation.
uint32_t mMaxAlternatives;
+ RefPtr<TrackListener> mListener;
+
void ProcessTestEventRequest(nsISupports* aSubject,
const nsAString& aEventName);
diff --git a/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl b/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
index 8192eff045..e5189e0bc1 100644
--- a/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
+++ b/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
@@ -55,9 +55,9 @@ interface nsISynthVoiceRegistry : nsISupports
AString getVoice(in uint32_t aIndex);
- bool isDefaultVoice(in AString aUri);
+ boolean isDefaultVoice(in AString aUri);
- bool isLocalVoice(in AString aUri);
+ boolean isLocalVoice(in AString aUri);
AString getVoiceLang(in AString aUri);
diff --git a/dom/media/webvtt/TextTrack.cpp b/dom/media/webvtt/TextTrack.cpp
index 7ee9ee63d2..7a5b398d2d 100644
--- a/dom/media/webvtt/TextTrack.cpp
+++ b/dom/media/webvtt/TextTrack.cpp
@@ -23,20 +23,6 @@ extern mozilla::LazyLogModule gTextTrackLog;
namespace mozilla::dom {
-static const char* ToStateStr(const TextTrackMode aMode) {
- switch (aMode) {
- case TextTrackMode::Disabled:
- return "DISABLED";
- case TextTrackMode::Hidden:
- return "HIDDEN";
- case TextTrackMode::Showing:
- return "SHOWING";
- default:
- MOZ_ASSERT_UNREACHABLE("Invalid state.");
- }
- return "Unknown";
-}
-
static const char* ToReadyStateStr(const TextTrackReadyState aState) {
switch (aState) {
case TextTrackReadyState::NotLoaded:
@@ -53,24 +39,6 @@ static const char* ToReadyStateStr(const TextTrackReadyState aState) {
return "Unknown";
}
-static const char* ToTextTrackKindStr(const TextTrackKind aKind) {
- switch (aKind) {
- case TextTrackKind::Subtitles:
- return "Subtitles";
- case TextTrackKind::Captions:
- return "Captions";
- case TextTrackKind::Descriptions:
- return "Descriptions";
- case TextTrackKind::Chapters:
- return "Chapters";
- case TextTrackKind::Metadata:
- return "Metadata";
- default:
- MOZ_ASSERT_UNREACHABLE("Invalid kind.");
- }
- return "Unknown";
-}
-
NS_IMPL_CYCLE_COLLECTION_INHERITED(TextTrack, DOMEventTargetHelper, mCueList,
mActiveCueList, mTextTrackList,
mTrackElement)
@@ -129,8 +97,8 @@ void TextTrack::SetMode(TextTrackMode aValue) {
if (mMode == aValue) {
return;
}
- WEBVTT_LOG("Set mode=%s for track kind %s", ToStateStr(aValue),
- ToTextTrackKindStr(mKind));
+ WEBVTT_LOG("Set mode=%s for track kind %s", GetEnumString(aValue).get(),
+ GetEnumString(mKind).get());
mMode = aValue;
HTMLMediaElement* mediaElement = GetMediaElement();